From c71487f2d9f8798831c6a6e9614b3028adbb60aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 16 Oct 2024 22:41:41 +0200 Subject: [PATCH 001/312] Support multiple blocks in `ParachainBlockData` --- Cargo.lock | 1 + cumulus/client/collator/src/lib.rs | 15 +- cumulus/client/collator/src/service.rs | 6 +- cumulus/client/consensus/aura/src/collator.rs | 20 +- .../consensus/aura/src/collators/basic.rs | 7 +- .../consensus/aura/src/collators/lookahead.rs | 15 +- .../collators/slot_based/collation_task.rs | 8 +- cumulus/client/pov-recovery/src/lib.rs | 48 ++++- cumulus/client/pov-recovery/src/tests.rs | 33 ++- .../src/validate_block/implementation.rs | 201 ++++++++++-------- .../src/validate_block/tests.rs | 46 ++-- cumulus/primitives/core/Cargo.toml | 2 + cumulus/primitives/core/src/lib.rs | 58 +++-- cumulus/test/client/src/block_builder.rs | 8 +- cumulus/test/client/src/lib.rs | 37 ++-- .../test/service/benches/validate_block.rs | 9 +- .../service/benches/validate_block_glutton.rs | 21 +- 17 files changed, 295 insertions(+), 240 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1a1c10b9570a2..837e16909af86 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4512,6 +4512,7 @@ dependencies = [ "sp-runtime 31.0.1", "sp-trie 29.0.0", "staging-xcm", + "tracing", ] [[package]] diff --git a/cumulus/client/collator/src/lib.rs b/cumulus/client/collator/src/lib.rs index 91ff913f263d5..951929671aefd 100644 --- a/cumulus/client/collator/src/lib.rs +++ b/cumulus/client/collator/src/lib.rs @@ -31,7 +31,7 @@ use polkadot_node_subsystem::messages::{CollationGenerationMessage, CollatorProt use polkadot_overseer::Handle as OverseerHandle; use polkadot_primitives::{CollatorPair, Id as ParaId}; -use codec::{Decode, Encode}; +use codec::Decode; use futures::prelude::*; use std::sync::Arc; @@ -120,13 +120,7 @@ where let (collation, b) = self.service.build_collation(&last_head, block_hash, candidate)?; - tracing::info!( - target: LOG_TARGET, - "PoV size {{ header: {}kb, extrinsics: {}kb, storage_proof: {}kb }}", - b.header().encode().len() as f64 / 1024f64, - b.extrinsics().encode().len() as f64 / 1024f64, - b.storage_proof().encode().len() as f64 / 1024f64, - ); + b.log_size_info(); if let MaybeCompressedPoV::Compressed(ref pov) = collation.proof_of_validity { tracing::info!( @@ -336,6 +330,7 @@ pub fn start_collator_sync( mod tests { use super::*; use async_trait::async_trait; + use codec::Encode; use cumulus_client_consensus_common::ParachainCandidate; use cumulus_primitives_core::ParachainBlockData; use cumulus_test_client::{ @@ -458,10 +453,10 @@ mod tests { let block = ParachainBlockData::::decode(&mut &decompressed[..]).expect("Is a valid block"); - assert_eq!(1, *block.header().number()); + assert_eq!(1, *block.blocks().nth(0).unwrap().header().number()); // Ensure that we did not include `:code` in the proof. - let proof = block.storage_proof(); + let proof = block.proofs().nth(0).unwrap().clone(); let backend = sp_state_machine::create_proof_check_backend::( *header.state_root(), diff --git a/cumulus/client/collator/src/service.rs b/cumulus/client/collator/src/service.rs index c06be006fc17f..a121ac226a5be 100644 --- a/cumulus/client/collator/src/service.rs +++ b/cumulus/client/collator/src/service.rs @@ -219,7 +219,7 @@ where block_hash: Block::Hash, candidate: ParachainCandidate, ) -> Option<(Collation, ParachainBlockData)> { - let (header, extrinsics) = candidate.block.deconstruct(); + let block = candidate.block; let compact_proof = match candidate .proof @@ -234,7 +234,7 @@ where // Create the parachain block data for the validators. let collation_info = self - .fetch_collation_info(block_hash, &header) + .fetch_collation_info(block_hash, block.header()) .map_err(|e| { tracing::error!( target: LOG_TARGET, @@ -245,7 +245,7 @@ where .ok() .flatten()?; - let block_data = ParachainBlockData::::new(header, extrinsics, compact_proof); + let block_data = ParachainBlockData::::new(vec![(block, compact_proof)]); let pov = polkadot_node_primitives::maybe_compress_pov(PoV { block_data: BlockData(block_data.encode()), diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index dc830e463a4f5..84ac905407e81 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -24,7 +24,7 @@ //! This module also exposes some standalone functions for common operations when building //! aura-based collators. -use codec::{Codec, Encode}; +use codec::Codec; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{ self as consensus_common, ParachainBlockImportMarker, ParachainCandidate, @@ -228,10 +228,7 @@ where inherent_data: (ParachainInherentData, InherentData), proposal_duration: Duration, max_pov_size: usize, - ) -> Result< - Option<(Collation, ParachainBlockData, Block::Hash)>, - Box, - > { + ) -> Result)>, Box> { let maybe_candidate = self .build_block_and_import( parent_header, @@ -249,13 +246,7 @@ where if let Some((collation, block_data)) = self.collator_service.build_collation(parent_header, hash, candidate) { - tracing::info!( - target: crate::LOG_TARGET, - "PoV size {{ header: {}kb, extrinsics: {}kb, storage_proof: {}kb }}", - block_data.header().encode().len() as f64 / 1024f64, - block_data.extrinsics().encode().len() as f64 / 1024f64, - block_data.storage_proof().encode().len() as f64 / 1024f64, - ); + block_data.log_size_info(); if let MaybeCompressedPoV::Compressed(ref pov) = collation.proof_of_validity { tracing::info!( @@ -265,10 +256,9 @@ where ); } - Ok(Some((collation, block_data, hash))) + Ok(Some((collation, block_data))) } else { - Err(Box::::from("Unable to produce collation") - as Box) + Err(Box::::from("Unable to produce collation")) } } diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs index d843483b79fa0..7c8c2ccea4487 100644 --- a/cumulus/client/consensus/aura/src/collators/basic.rs +++ b/cumulus/client/consensus/aura/src/collators/basic.rs @@ -260,9 +260,12 @@ where .await ); - if let Some((collation, _, post_hash)) = maybe_collation { + if let Some((collation, block_data)) = maybe_collation { + let Some(block_hash) = block_data.blocks().nth(0).map(|b| b.hash()) else { + continue + }; let result_sender = - Some(collator.collator_service().announce_with_barrier(post_hash)); + Some(collator.collator_service().announce_with_barrier(block_hash)); request.complete(Some(CollationResult { collation, result_sender })); } else { request.complete(None); diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 8ac43fbd116e5..de8d0f65f73f9 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -436,7 +436,16 @@ where ) .await { - Ok(Some((collation, block_data, new_block_hash))) => { + Ok(Some((collation, block_data))) => { + let Some(new_block_header) = + block_data.blocks().nth(0).map(|b| b.header().clone()) + else { + tracing::error!(target: crate::LOG_TARGET, "Produced PoV doesn't contain any blocks"); + break + }; + + let new_block_hash = new_block_header.hash(); + // Here we are assuming that the import logic protects against equivocations // and provides sybil-resistance, as it should. collator.collator_service().announce_block(new_block_hash, None); @@ -446,7 +455,7 @@ where export_pov.clone(), collation.proof_of_validity.clone().into_compressed(), new_block_hash, - *block_data.header().number(), + *new_block_header.number(), parent_header.clone(), *relay_parent_header.state_root(), *relay_parent_header.number(), @@ -475,7 +484,7 @@ where .await; parent_hash = new_block_hash; - parent_header = block_data.into_header(); + parent_header = new_block_header; }, Ok(None) => { tracing::debug!(target: crate::LOG_TARGET, "No block proposal"); diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs index 5b8151f6302c4..83df8876859cc 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs @@ -107,13 +107,7 @@ async fn handle_collation_message( }, }; - tracing::info!( - target: LOG_TARGET, - "PoV size {{ header: {:.2}kB, extrinsics: {:.2}kB, storage_proof: {:.2}kB }}", - block_data.header().encoded_size() as f64 / 1024f64, - block_data.extrinsics().encoded_size() as f64 / 1024f64, - block_data.storage_proof().encoded_size() as f64 / 1024f64, - ); + block_data.log_size_info(); if let MaybeCompressedPoV::Compressed(ref pov) = collation.proof_of_validity { tracing::info!( diff --git a/cumulus/client/pov-recovery/src/lib.rs b/cumulus/client/pov-recovery/src/lib.rs index 043cba12d1937..2ec68eb5195af 100644 --- a/cumulus/client/pov-recovery/src/lib.rs +++ b/cumulus/client/pov-recovery/src/lib.rs @@ -398,9 +398,33 @@ where }, }; - let block = block_data.into_block(); + let blocks_and_proofs = block_data.into_inner(); - let parent = *block.header().parent_hash(); + if let Some((block, _)) = blocks_and_proofs.last() { + let last_block_hash = block.hash(); + if last_block_hash != block_hash { + tracing::debug!( + target: LOG_TARGET, + expected_block_hash = ?block_hash, + got_block_hash = ?last_block_hash, + "Recovered candidate doesn't contain the expected block.", + ); + + self.reset_candidate(block_hash); + return; + } + } + + let Some(parent) = blocks_and_proofs.first().map(|(b, _)| *b.header().parent_hash()) else { + tracing::debug!( + target: LOG_TARGET, + ?block_hash, + "Recovered candidate doesn't contain any blocks.", + ); + + self.reset_candidate(block_hash); + return; + }; match self.parachain_client.block_status(parent) { Ok(BlockStatus::Unknown) => { @@ -418,7 +442,12 @@ where "Waiting for recovery of parent.", ); - self.waiting_for_parent.entry(parent).or_default().push(block); + blocks_and_proofs.into_iter().for_each(|(b, _)| { + self.waiting_for_parent + .entry(*b.header().parent_hash()) + .or_default() + .push(b); + }); return } else { tracing::debug!( @@ -447,17 +476,16 @@ where _ => (), } - self.import_block(block); + self.import_blocks(blocks_and_proofs.into_iter().map(|d| d.0)); } - /// Import the given `block`. + /// Import the given `blocks`. /// /// This will also recursively drain `waiting_for_parent` and import them as well. - fn import_block(&mut self, block: Block) { - let mut blocks = VecDeque::new(); + fn import_blocks(&mut self, blocks: impl Iterator) { + let mut blocks = VecDeque::from_iter(blocks); - tracing::debug!(target: LOG_TARGET, block_hash = ?block.hash(), "Importing block retrieved using pov_recovery"); - blocks.push_back(block); + tracing::trace!(target: LOG_TARGET, blocks = ?blocks.iter().map(|b| b.hash()), "Importing blocks retrieved using pov_recovery"); let mut incoming_blocks = Vec::new(); @@ -586,7 +614,7 @@ where if let Some(waiting_blocks) = self.waiting_for_parent.remove(&imported.hash) { for block in waiting_blocks { tracing::debug!(target: LOG_TARGET, block_hash = ?block.hash(), resolved_parent = ?imported.hash, "Found new waiting child block during import, queuing."); - self.import_block(block); + self.import_blocks(std::iter::once(block)); } }; diff --git a/cumulus/client/pov-recovery/src/tests.rs b/cumulus/client/pov-recovery/src/tests.rs index 94dec32485ccb..b32731776a99d 100644 --- a/cumulus/client/pov-recovery/src/tests.rs +++ b/cumulus/client/pov-recovery/src/tests.rs @@ -688,9 +688,7 @@ async fn single_pending_candidate_recovery_success(#[case] runtime_version: u32) AvailableData { pov: Arc::new(PoV { block_data: ParachainBlockData::::new( - header.clone(), - vec![], - CompactProof {encoded_nodes: vec![]} + vec![(Block::new(header.clone(), vec![]), CompactProof { encoded_nodes: vec![] })] ).encode().into() }), validation_data: dummy_pvd(), @@ -792,9 +790,7 @@ async fn single_pending_candidate_recovery_retry_succeeds() { AvailableData { pov: Arc::new(PoV { block_data: ParachainBlockData::::new( - header.clone(), - vec![], - CompactProof {encoded_nodes: vec![]} + vec![(Block::new(header.clone(), Vec::new()), CompactProof { encoded_nodes: vec![] })] ).encode().into() }), validation_data: dummy_pvd(), @@ -1098,11 +1094,10 @@ async fn candidate_is_imported_while_awaiting_recovery() { recovery_response_tx .send(Ok(AvailableData { pov: Arc::new(PoV { - block_data: ParachainBlockData::::new( - header.clone(), - vec![], + block_data: ParachainBlockData::::new(vec![( + Block::new(header.clone(), vec![]), CompactProof { encoded_nodes: vec![] }, - ) + )]) .encode() .into(), }), @@ -1196,11 +1191,10 @@ async fn candidate_is_finalized_while_awaiting_recovery() { recovery_response_tx .send(Ok(AvailableData { pov: Arc::new(PoV { - block_data: ParachainBlockData::::new( - header.clone(), - vec![], + block_data: ParachainBlockData::::new(vec![( + Block::new(header.clone(), vec![]), CompactProof { encoded_nodes: vec![] }, - ) + )]) .encode() .into(), }), @@ -1285,9 +1279,7 @@ async fn chained_recovery_success() { .send(Ok(AvailableData { pov: Arc::new(PoV { block_data: ParachainBlockData::::new( - header.clone(), - vec![], - CompactProof { encoded_nodes: vec![] }, + vec![(Block::new(header.clone(), vec![]), CompactProof { encoded_nodes: vec![] })] ) .encode() .into(), @@ -1401,11 +1393,10 @@ async fn chained_recovery_child_succeeds_before_parent() { recovery_response_sender .send(Ok(AvailableData { pov: Arc::new(PoV { - block_data: ParachainBlockData::::new( - header.clone(), - vec![], + block_data: ParachainBlockData::::new(vec![( + Block::new(header.clone(), vec![]), CompactProof { encoded_nodes: vec![] }, - ) + )]) .encode() .into(), }), diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index c4c8440e5187d..020eab5b7f595 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -29,7 +29,10 @@ use polkadot_parachain_primitives::primitives::{ use alloc::vec::Vec; use codec::Encode; -use frame_support::traits::{ExecuteBlock, ExtrinsicCall, Get, IsSubType}; +use frame_support::{ + traits::{ExecuteBlock, ExtrinsicCall, Get, IsSubType}, + BoundedVec, +}; use sp_core::storage::{ChildInfo, StateVersion}; use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; @@ -89,7 +92,7 @@ pub fn validate_block< >( MemoryOptimizedValidationParams { block_data, - parent_head, + parent_head: parachain_head, relay_parent_number, relay_parent_storage_root, }: MemoryOptimizedValidationParams, @@ -98,46 +101,6 @@ where B::Extrinsic: ExtrinsicCall, ::Call: IsSubType>, { - let block_data = codec::decode_from_bytes::>(block_data) - .expect("Invalid parachain block data"); - - let parent_header = - codec::decode_from_bytes::(parent_head.clone()).expect("Invalid parent head"); - - let (header, extrinsics, storage_proof) = block_data.deconstruct(); - - let block = B::new(header, extrinsics); - assert!(parent_header.hash() == *block.header().parent_hash(), "Invalid parent hash"); - - let inherent_data = extract_parachain_inherent_data(&block); - - validate_validation_data( - &inherent_data.validation_data, - relay_parent_number, - relay_parent_storage_root, - parent_head, - ); - - // Create the db - let db = match storage_proof.to_memory_db(Some(parent_header.state_root())) { - Ok((db, _)) => db, - Err(_) => panic!("Compact proof decoding failure."), - }; - - core::mem::drop(storage_proof); - - let mut recorder = SizeOnlyRecorderProvider::new(); - let cache_provider = trie_cache::CacheProvider::new(); - // We use the storage root of the `parent_head` to ensure that it is the correct root. - // This is already being done above while creating the in-memory db, but let's be paranoid!! - let backend = sp_state_machine::TrieBackendBuilder::new_with_cache( - db, - *parent_header.state_root(), - cache_provider, - ) - .with_recorder(recorder.clone()) - .build(); - let _guard = ( // Replace storage calls with our own implementations sp_io::storage::host_read.replace_implementation(host_storage_read), @@ -179,59 +142,117 @@ where .replace_implementation(host_storage_proof_size), ); - run_with_externalities_and_recorder::(&backend, &mut recorder, || { - let relay_chain_proof = crate::RelayChainStateProof::new( - PSC::SelfParaId::get(), - inherent_data.validation_data.relay_parent_storage_root, - inherent_data.relay_chain_state.clone(), - ) - .expect("Invalid relay chain state proof"); + let block_data = codec::decode_from_bytes::>(block_data) + .expect("Invalid parachain block data"); - #[allow(deprecated)] - let res = CI::check_inherents(&block, &relay_chain_proof); + let mut parent_header = + codec::decode_from_bytes::(parachain_head.clone()).expect("Invalid parent head"); - if !res.ok() { - if log::log_enabled!(log::Level::Error) { - res.into_errors().for_each(|e| { - log::error!("Checking inherent with identifier `{:?}` failed", e.0) - }); - } + let blocks_and_proofs = block_data.into_inner(); - panic!("Checking inherents failed"); - } - }); + assert_eq!( + *blocks_and_proofs + .first() + .expect("BlockData should have at least one block") + .0 + .header() + .parent_hash(), + parent_header.hash(), + "Parachain head needs to be the parent of the first block" + ); - run_with_externalities_and_recorder::(&backend, &mut recorder, || { - let head_data = HeadData(block.header().encode()); + let mut processed_downward_messages = 0; + let mut upward_messages = BoundedVec::default(); + let mut horizontal_messages = BoundedVec::default(); + let mut hrmp_watermark = Default::default(); + let mut head_data = None; + let mut new_validation_code = None; + + for (block, storage_proof) in blocks_and_proofs { + let inherent_data = extract_parachain_inherent_data(&block); + + validate_validation_data( + &inherent_data.validation_data, + relay_parent_number, + relay_parent_storage_root, + ¶chain_head, + ); - E::execute_block(block); + // Create the db + let db = match storage_proof.to_memory_db(Some(parent_header.state_root())) { + Ok((db, _)) => db, + Err(_) => panic!("Compact proof decoding failure."), + }; + + core::mem::drop(storage_proof); + + let mut recorder = SizeOnlyRecorderProvider::new(); + let cache_provider = trie_cache::CacheProvider::new(); + // We use the storage root of the `parent_head` to ensure that it is the correct root. + // This is already being done above while creating the in-memory db, but let's be paranoid!! + let backend = sp_state_machine::TrieBackendBuilder::new_with_cache( + db, + *parent_header.state_root(), + cache_provider, + ) + .with_recorder(recorder.clone()) + .build(); + + run_with_externalities_and_recorder::(&backend, &mut recorder, || { + let relay_chain_proof = crate::RelayChainStateProof::new( + PSC::SelfParaId::get(), + inherent_data.validation_data.relay_parent_storage_root, + inherent_data.relay_chain_state.clone(), + ) + .expect("Invalid relay chain state proof"); + + #[allow(deprecated)] + let res = CI::check_inherents(&block, &relay_chain_proof); + + if !res.ok() { + if log::log_enabled!(log::Level::Error) { + res.into_errors().for_each(|e| { + log::error!("Checking inherent with identifier `{:?}` failed", e.0) + }); + } + + panic!("Checking inherents failed"); + } + }); + + run_with_externalities_and_recorder::(&backend, &mut recorder, || { + parent_header = block.header().clone(); + + E::execute_block(block); + + new_validation_code = + new_validation_code.take().or(crate::NewValidationCode::::get()); + upward_messages + .try_extend(crate::UpwardMessages::::get().into_iter()) + .expect( + "Number of upward messages should not be greater than `MAX_UPWARD_MESSAGE_NUM`", + ); + processed_downward_messages += crate::ProcessedDownwardMessages::::get(); + horizontal_messages.try_extend(crate::HrmpOutboundMessages::::get().into_iter()).expect( + "Number of horizontal messages should not be greater than `MAX_HORIZONTAL_MESSAGE_NUM`", + ); + hrmp_watermark = crate::HrmpWatermark::::get(); + + head_data = Some( + crate::CustomValidationHeadData::::get() + .map_or_else(|| HeadData(parent_header.encode()), HeadData), + ); + }) + } - let new_validation_code = crate::NewValidationCode::::get(); - let upward_messages = crate::UpwardMessages::::get().try_into().expect( - "Number of upward messages should not be greater than `MAX_UPWARD_MESSAGE_NUM`", - ); - let processed_downward_messages = crate::ProcessedDownwardMessages::::get(); - let horizontal_messages = crate::HrmpOutboundMessages::::get().try_into().expect( - "Number of horizontal messages should not be greater than `MAX_HORIZONTAL_MESSAGE_NUM`", - ); - let hrmp_watermark = crate::HrmpWatermark::::get(); - - let head_data = - if let Some(custom_head_data) = crate::CustomValidationHeadData::::get() { - HeadData(custom_head_data) - } else { - head_data - }; - - ValidationResult { - head_data, - new_validation_code: new_validation_code.map(Into::into), - upward_messages, - processed_downward_messages, - horizontal_messages, - hrmp_watermark, - } - }) + ValidationResult { + head_data: head_data.expect("HeadData not set"), + new_validation_code: new_validation_code.map(Into::into), + upward_messages, + processed_downward_messages, + horizontal_messages, + hrmp_watermark, + } } /// Extract the [`ParachainInherentData`]. @@ -263,7 +284,7 @@ fn validate_validation_data( validation_data: &PersistedValidationData, relay_parent_number: RelayChainBlockNumber, relay_parent_storage_root: RHash, - parent_head: bytes::Bytes, + parent_head: &[u8], ) { assert_eq!(parent_head, validation_data.parent_head.0, "Parent head doesn't match"); assert_eq!( diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 871ce5c1710e8..bff491a713320 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -115,7 +115,7 @@ fn validate_block_works() { build_block_with_witness(&client, Vec::new(), parent_head.clone(), Default::default()); let block = seal_block(block, slot, &client); - let header = block.header().clone(); + let header = block.blocks().nth(0).unwrap().header().clone(); let res_header = call_validate_block(parent_head, block, validation_data.relay_parent_storage_root) .expect("Calls `validate_block`"); @@ -140,7 +140,7 @@ fn validate_block_with_extra_extrinsics() { Default::default(), ); let block = seal_block(block, slot, &client); - let header = block.header().clone(); + let header = block.blocks().nth(0).unwrap().header().clone(); let res_header = call_validate_block(parent_head, block, validation_data.relay_parent_storage_root) @@ -173,7 +173,7 @@ fn validate_block_returns_custom_head_data() { parent_head.clone(), Default::default(), ); - let header = block.header().clone(); + let header = block.blocks().nth(0).unwrap().header().clone(); assert_ne!(expected_header, header.encode()); let block = seal_block(block, slot, &client); @@ -192,13 +192,16 @@ fn validate_block_invalid_parent_hash() { if env::var("RUN_TEST").is_ok() { let (client, parent_head) = create_test_client(); - let TestBlockData { block, validation_data, .. } = + let TestBlockData { mut block, validation_data, .. } = build_block_with_witness(&client, Vec::new(), parent_head.clone(), Default::default()); - let (mut header, extrinsics, witness) = block.deconstruct(); - header.set_parent_hash(Hash::from_low_u64_be(1)); + block + .blocks_mut() + .nth(0) + .unwrap() + .header + .set_parent_hash(Hash::from_low_u64_be(1)); - let block_data = ParachainBlockData::new(header, extrinsics, witness); - call_validate_block(parent_head, block_data, validation_data.relay_parent_storage_root) + call_validate_block(parent_head, block, validation_data.relay_parent_storage_root) .unwrap_err(); } else { let output = Command::new(env::current_exe().unwrap()) @@ -208,7 +211,8 @@ fn validate_block_invalid_parent_hash() { .expect("Runs the test"); assert!(output.status.success()); - assert!(dbg!(String::from_utf8(output.stderr).unwrap()).contains("Invalid parent hash")); + assert!(dbg!(String::from_utf8(output.stderr).unwrap()) + .contains("Parachain head needs to be the parent of the first block")); } } @@ -242,19 +246,18 @@ fn check_inherents_are_unsigned_and_before_all_other_extrinsics() { if env::var("RUN_TEST").is_ok() { let (client, parent_head) = create_test_client(); - let TestBlockData { block, validation_data, .. } = + let TestBlockData { mut block, validation_data, .. } = build_block_with_witness(&client, Vec::new(), parent_head.clone(), Default::default()); - let (header, mut extrinsics, proof) = block.deconstruct(); - - extrinsics.insert(0, transfer(&client, Alice, Bob, 69)); + block + .blocks_mut() + .nth(0) + .unwrap() + .extrinsics + .insert(0, transfer(&client, Alice, Bob, 69)); - call_validate_block( - parent_head, - ParachainBlockData::new(header, extrinsics, proof), - validation_data.relay_parent_storage_root, - ) - .unwrap_err(); + call_validate_block(parent_head, block, validation_data.relay_parent_storage_root) + .unwrap_err(); } else { let output = Command::new(env::current_exe().unwrap()) .args([ @@ -319,7 +322,8 @@ fn validate_block_works_with_child_tries() { parent_head.clone(), Default::default(), ); - let block = block.into_block(); + + let block = block.blocks().nth(0).unwrap().clone(); futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); @@ -333,7 +337,7 @@ fn validate_block_works_with_child_tries() { ); let block = seal_block(block, slot, &client); - let header = block.header().clone(); + let header = block.blocks().nth(0).unwrap().header().clone(); let res_header = call_validate_block(parent_head, block, validation_data.relay_parent_storage_root) .expect("Calls `validate_block`"); diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index 533d368d3b00e..1ab0d448b9f44 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -12,6 +12,7 @@ workspace = true [dependencies] codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } +tracing.workspace = true # Substrate sp-api = { workspace = true } @@ -35,6 +36,7 @@ std = [ "sp-api/std", "sp-runtime/std", "sp-trie/std", + "tracing/std", "xcm/std", ] runtime-benchmarks = [ diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index dfb574ef33018..c371fac60ed81 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -42,6 +42,7 @@ pub use sp_runtime::{ ConsensusEngineId, }; +use sp_trie::CompactProof; pub use xcm::latest::prelude::*; /// A module that re-exports relevant relay chain definitions. @@ -201,53 +202,46 @@ pub enum ServiceQuality { /// This is send as PoV (proof of validity block) to the relay-chain validators. There it will be /// passed to the parachain validation Wasm blob to be validated. #[derive(codec::Encode, codec::Decode, Clone)] -pub struct ParachainBlockData { - /// The header of the parachain block. - header: B::Header, - /// The extrinsics of the parachain block. - extrinsics: alloc::vec::Vec, - /// The data that is required to emulate the storage accesses executed by all extrinsics. - storage_proof: sp_trie::CompactProof, +pub struct ParachainBlockData { + blocks: Vec<(Block, CompactProof)>, } -impl ParachainBlockData { +impl ParachainBlockData { /// Creates a new instance of `Self`. - pub fn new( - header: ::Header, - extrinsics: alloc::vec::Vec<::Extrinsic>, - storage_proof: sp_trie::CompactProof, - ) -> Self { - Self { header, extrinsics, storage_proof } + pub fn new(blocks: Vec<(Block, CompactProof)>) -> Self { + Self { blocks } } - /// Convert `self` into the stored block. - pub fn into_block(self) -> B { - B::new(self.header, self.extrinsics) + pub fn blocks(&self) -> impl Iterator { + self.blocks.iter().map(|e| &e.0) } - /// Convert `self` into the stored header. - pub fn into_header(self) -> B::Header { - self.header + pub fn blocks_mut(&mut self) -> impl Iterator { + self.blocks.iter_mut().map(|e| &mut e.0) } - /// Returns the header. - pub fn header(&self) -> &B::Header { - &self.header + pub fn into_blocks(self) -> impl Iterator { + self.blocks.into_iter().map(|d| d.0) } - /// Returns the extrinsics. - pub fn extrinsics(&self) -> &[B::Extrinsic] { - &self.extrinsics + pub fn proofs(&self) -> impl Iterator { + self.blocks.iter().map(|d| &d.1) } - /// Returns the [`CompactProof`](sp_trie::CompactProof). - pub fn storage_proof(&self) -> &sp_trie::CompactProof { - &self.storage_proof + /// Deconstruct into the inner parts. + pub fn into_inner(self) -> Vec<(Block, CompactProof)> { + self.blocks } - /// Deconstruct into the inner parts. - pub fn deconstruct(self) -> (B::Header, alloc::vec::Vec, sp_trie::CompactProof) { - (self.header, self.extrinsics, self.storage_proof) + /// Log the size of the individual components (header, extrinsics, storage proof) as info. + pub fn log_size_info(&self) { + tracing::info!( + target: "cumulus", + "PoV size {{ header: {}kb, extrinsics: {}kb, storage_proof: {}kb }}", + self.blocks().map(|b| b.header().encoded_size()).sum::() as f64 / 1024f64, + self.blocks().map(|b| b.extrinsics().encoded_size()).sum::() as f64 / 1024f64, + self.proofs().map(|p| p.encoded_size()).sum::() as f64 / 1024f64, + ); } } diff --git a/cumulus/test/client/src/block_builder.rs b/cumulus/test/client/src/block_builder.rs index c2e5a69dd9b55..82b1ffcbb6e54 100644 --- a/cumulus/test/client/src/block_builder.rs +++ b/cumulus/test/client/src/block_builder.rs @@ -24,10 +24,7 @@ use polkadot_primitives::{BlockNumber as PBlockNumber, Hash as PHash}; use sc_block_builder::BlockBuilderBuilder; use sp_api::ProvideRuntimeApi; use sp_consensus_aura::Slot; -use sp_runtime::{ - traits::{Block as BlockT, Header as HeaderT}, - Digest, DigestItem, -}; +use sp_runtime::{traits::Header as HeaderT, Digest, DigestItem}; /// A struct containing a block builder and support data required to build test scenarios. pub struct BlockBuilderAndSupportData<'a> { @@ -195,7 +192,6 @@ impl<'a> BuildParachainBlockData for sc_block_builder::BlockBuilder<'a, Block, C .into_compact_proof::<
::Hashing>(parent_state_root) .expect("Creates the compact proof"); - let (header, extrinsics) = built_block.block.deconstruct(); - ParachainBlockData::new(header, extrinsics, storage_proof) + ParachainBlockData::new(vec![(built_block.block, storage_proof)]) } } diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index f26413e441e72..5bf64c0a9dbdd 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -238,19 +238,28 @@ pub fn seal_block( parachain_slot: Slot, client: &Client, ) -> ParachainBlockData { - let parent_hash = block.header().parent_hash; - let authorities = client.runtime_api().authorities(parent_hash).unwrap(); - let expected_author = slot_author::<::Pair>(parachain_slot, &authorities) - .expect("Should be able to find author"); - - let (mut header, extrinsics, proof) = block.deconstruct(); - let keystore = get_keystore(); - let seal_digest = seal::<_, sp_consensus_aura::sr25519::AuthorityPair>( - &header.hash(), - expected_author, - &keystore, + ParachainBlockData::new( + block + .into_inner() + .into_iter() + .map(|(mut block, proof)| { + let parent_hash = block.header.parent_hash; + let authorities = client.runtime_api().authorities(parent_hash).unwrap(); + let expected_author = + slot_author::<::Pair>(parachain_slot, &authorities) + .expect("Should be able to find author"); + + let keystore = get_keystore(); + let seal_digest = seal::<_, sp_consensus_aura::sr25519::AuthorityPair>( + &block.header.hash(), + expected_author, + &keystore, + ) + .expect("Should be able to create seal"); + block.header.digest_mut().push(seal_digest); + + (block, proof) + }) + .collect::>(), ) - .expect("Should be able to create seal"); - header.digest_mut().push(seal_digest); - ParachainBlockData::new(header, extrinsics, proof) } diff --git a/cumulus/test/service/benches/validate_block.rs b/cumulus/test/service/benches/validate_block.rs index 34b09d99ce985..9ebfa47013b9c 100644 --- a/cumulus/test/service/benches/validate_block.rs +++ b/cumulus/test/service/benches/validate_block.rs @@ -116,7 +116,8 @@ fn benchmark_block_validation(c: &mut Criterion) { let parachain_block = block_builder.build_parachain_block(*parent_header.state_root()); - let proof_size_in_kb = parachain_block.storage_proof().encode().len() as f64 / 1024f64; + let proof_size_in_kb = + parachain_block.proofs().map(|p| p.encoded_size()).sum::() as f64 / 1024f64; let runtime = utils::get_wasm_module(); let (relay_parent_storage_root, _) = sproof_builder.into_state_root_and_proof(); @@ -131,7 +132,11 @@ fn benchmark_block_validation(c: &mut Criterion) { // This is not strictly necessary for this benchmark, but // let us make sure that the result of `validate_block` is what // we expect. - verify_expected_result(&runtime, &encoded_params, parachain_block.into_block()); + verify_expected_result( + &runtime, + &encoded_params, + parachain_block.blocks().nth(0).unwrap().clone(), + ); let mut group = c.benchmark_group("Block validation"); group.sample_size(20); diff --git a/cumulus/test/service/benches/validate_block_glutton.rs b/cumulus/test/service/benches/validate_block_glutton.rs index 6fe26519a3ebd..05ed11e3d672d 100644 --- a/cumulus/test/service/benches/validate_block_glutton.rs +++ b/cumulus/test/service/benches/validate_block_glutton.rs @@ -78,7 +78,11 @@ fn benchmark_block_validation(c: &mut Criterion) { set_glutton_parameters(&client, is_first, compute_ratio, storage_ratio); is_first = false; - runtime.block_on(import_block(&client, parachain_block.clone().into_block(), false)); + runtime.block_on(import_block( + &client, + parachain_block.blocks().nth(0).unwrap().clone(), + false, + )); // Build benchmark block let parent_hash = client.usage_info().chain.best_hash; @@ -92,8 +96,13 @@ fn benchmark_block_validation(c: &mut Criterion) { client.init_block_builder(Some(validation_data), Default::default()); let parachain_block = block_builder.build_parachain_block(*parent_header.state_root()); - let proof_size_in_kb = parachain_block.storage_proof().encode().len() as f64 / 1024f64; - runtime.block_on(import_block(&client, parachain_block.clone().into_block(), false)); + let proof_size_in_kb = + parachain_block.proofs().map(|p| p.encoded_size()).sum::() as f64 / 1024f64; + runtime.block_on(import_block( + &client, + parachain_block.blocks().nth(0).unwrap().clone(), + false, + )); let runtime = utils::get_wasm_module(); let sproof_builder: RelayStateSproofBuilder = Default::default(); @@ -109,7 +118,11 @@ fn benchmark_block_validation(c: &mut Criterion) { // This is not strictly necessary for this benchmark, but // let us make sure that the result of `validate_block` is what // we expect. - verify_expected_result(&runtime, &encoded_params, parachain_block.into_block()); + verify_expected_result( + &runtime, + &encoded_params, + parachain_block.blocks().nth(0).unwrap().clone(), + ); group.bench_function( format!( From 0cdb13d29b1a80dc491f6912f157cfafcab0be3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 16 Oct 2024 22:49:33 +0200 Subject: [PATCH 002/312] Comments --- cumulus/primitives/core/src/lib.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index c371fac60ed81..ba46179a3ec05 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -212,18 +212,22 @@ impl ParachainBlockData { Self { blocks } } + /// Returns an iterator yielding references to the stored blocks. pub fn blocks(&self) -> impl Iterator { self.blocks.iter().map(|e| &e.0) } + /// Returns an iterator yielding mutable references to the stored blocks. pub fn blocks_mut(&mut self) -> impl Iterator { self.blocks.iter_mut().map(|e| &mut e.0) } + /// Returns an iterator yielding the stored blocks. pub fn into_blocks(self) -> impl Iterator { self.blocks.into_iter().map(|d| d.0) } + /// Returns an iterator yielding references to the stored proofs. pub fn proofs(&self) -> impl Iterator { self.blocks.iter().map(|d| &d.1) } From 441553044feaab5c1e245ee4532db21d622188f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 18 Oct 2024 10:59:56 +0200 Subject: [PATCH 003/312] Add a test --- Cargo.lock | 1 - cumulus/pallets/parachain-system/Cargo.toml | 1 - .../src/validate_block/tests.rs | 142 ++++++++++++++++-- cumulus/test/client/src/block_builder.rs | 28 ++-- cumulus/test/client/src/lib.rs | 21 ++- 5 files changed, 157 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 837e16909af86..02f6cb68ec6e4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4364,7 +4364,6 @@ dependencies = [ "rand", "sc-client-api", "scale-info", - "sp-consensus-slots", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", "sp-externalities 0.25.0", diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 3cb0394c4b954..c2af4b9b40a8f 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -59,7 +59,6 @@ sp-keyring = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } -sp-consensus-slots = { workspace = true, default-features = true } # Cumulus cumulus-test-client = { workspace = true } diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index bff491a713320..0bf21c6de07b1 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -27,7 +27,6 @@ use cumulus_test_client::{ TestClientBuilder, TestClientBuilderExt, ValidationParams, }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; -use sp_consensus_slots::Slot; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::{env, process::Command}; @@ -35,6 +34,7 @@ use std::{env, process::Command}; use crate::validate_block::MemoryOptimizedValidationParams; fn call_validate_block_encoded_header( + validation_code: &[u8], parent_head: Header, block_data: ParachainBlockData, relay_parent_storage_root: Hash, @@ -46,7 +46,7 @@ fn call_validate_block_encoded_header( relay_parent_number: 1, relay_parent_storage_root, }, - WASM_BINARY.expect("You need to build the WASM binaries to run the tests!"), + validation_code, ) .map(|v| v.head_data.0) } @@ -56,8 +56,29 @@ fn call_validate_block( block_data: ParachainBlockData, relay_parent_storage_root: Hash, ) -> cumulus_test_client::ExecutorResult
{ - call_validate_block_encoded_header(parent_head, block_data, relay_parent_storage_root) - .map(|v| Header::decode(&mut &v[..]).expect("Decodes `Header`.")) + call_validate_block_encoded_header( + WASM_BINARY.expect("You need to build the WASM binaries to run the tests!"), + parent_head, + block_data, + relay_parent_storage_root, + ) + .map(|v| Header::decode(&mut &v[..]).expect("Decodes `Header`.")) +} + +/// Call `validate_block` in the runtime with `elastic-scaling` activated. +fn call_validate_block_elastic_scaling( + parent_head: Header, + block_data: ParachainBlockData, + relay_parent_storage_root: Hash, +) -> cumulus_test_client::ExecutorResult
{ + call_validate_block_encoded_header( + test_runtime::elastic_scaling::WASM_BINARY + .expect("You need to build the WASM binaries to run the tests!"), + parent_head, + block_data, + relay_parent_storage_root, + ) + .map(|v| Header::decode(&mut &v[..]).expect("Decodes `Header`.")) } fn create_test_client() -> (Client, Header) { @@ -72,10 +93,28 @@ fn create_test_client() -> (Client, Header) { (client, genesis_header) } +/// Create test client using the runtime with `elastic-scaling` feature enabled. +fn create_elastic_scaling_test_client() -> (Client, Header) { + let mut builder = TestClientBuilder::new(); + builder.genesis_init_mut().wasm = Some( + test_runtime::elastic_scaling::WASM_BINARY + .expect("You need to build the WASM binaries to run the tests!") + .to_vec(), + ); + let client = builder.enable_import_proof_recording().build(); + + let genesis_header = client + .header(client.chain_info().genesis_hash) + .ok() + .flatten() + .expect("Genesis header exists; qed"); + + (client, genesis_header) +} + struct TestBlockData { block: ParachainBlockData, validation_data: PersistedValidationData, - slot: Slot, } fn build_block_with_witness( @@ -96,14 +135,67 @@ fn build_block_with_witness( let cumulus_test_client::BlockBuilderAndSupportData { mut block_builder, persisted_validation_data, - slot, } = client.init_block_builder(Some(validation_data), sproof_builder); extra_extrinsics.into_iter().for_each(|e| block_builder.push(e).unwrap()); let block = block_builder.build_parachain_block(*parent_head.state_root()); - TestBlockData { block, validation_data: persisted_validation_data, slot } + TestBlockData { block, validation_data: persisted_validation_data } +} + +fn build_multiple_blocks_with_witness( + client: &Client, + mut parent_head: Header, + mut sproof_builder: RelayStateSproofBuilder, + num_blocks: usize, +) -> TestBlockData { + sproof_builder.para_id = test_runtime::PARACHAIN_ID.into(); + sproof_builder.included_para_head = Some(HeadData(parent_head.encode())); + sproof_builder.current_slot = (std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .expect("Time is always after UNIX_EPOCH; qed") + .as_millis() as u64 / + 6000) + .into(); + + let validation_data = PersistedValidationData { + relay_parent_number: 1, + parent_head: parent_head.encode().into(), + ..Default::default() + }; + + let mut persisted_validation_data = None; + let mut blocks = Vec::new(); + + for _ in 0..num_blocks { + let cumulus_test_client::BlockBuilderAndSupportData { + block_builder, + persisted_validation_data: p_v_data, + } = client.init_block_builder(Some(validation_data.clone()), sproof_builder.clone()); + + persisted_validation_data = Some(p_v_data); + + blocks.extend( + block_builder + .build_parachain_block(*parent_head.state_root()) + .into_inner() + .into_iter() + .inspect(|d| { + futures::executor::block_on( + client.import_as_best(BlockOrigin::Own, d.0.clone()), + ) + .unwrap(); + + parent_head = d.0.header.clone(); + }), + ); + } + + TestBlockData { + block: ParachainBlockData::new(blocks), + validation_data: persisted_validation_data.unwrap(), + } } #[test] @@ -111,10 +203,10 @@ fn validate_block_works() { sp_tracing::try_init_simple(); let (client, parent_head) = create_test_client(); - let TestBlockData { block, validation_data, slot } = + let TestBlockData { block, validation_data } = build_block_with_witness(&client, Vec::new(), parent_head.clone(), Default::default()); - let block = seal_block(block, slot, &client); + let block = seal_block(block, &client); let header = block.blocks().nth(0).unwrap().header().clone(); let res_header = call_validate_block(parent_head, block, validation_data.relay_parent_storage_root) @@ -122,6 +214,25 @@ fn validate_block_works() { assert_eq!(header, res_header); } +#[test] +fn validate_multiple_blocks_work() { + sp_tracing::try_init_simple(); + + let (client, parent_head) = create_elastic_scaling_test_client(); + let TestBlockData { block, validation_data } = + build_multiple_blocks_with_witness(&client, parent_head.clone(), Default::default(), 4); + + let block = seal_block(block, &client); + let header = block.blocks().last().unwrap().header().clone(); + let res_header = call_validate_block_elastic_scaling( + parent_head, + block, + validation_data.relay_parent_storage_root, + ) + .expect("Calls `validate_block`"); + assert_eq!(header, res_header); +} + #[test] fn validate_block_with_extra_extrinsics() { sp_tracing::try_init_simple(); @@ -133,13 +244,13 @@ fn validate_block_with_extra_extrinsics() { transfer(&client, Charlie, Alice, 500), ]; - let TestBlockData { block, validation_data, slot } = build_block_with_witness( + let TestBlockData { block, validation_data } = build_block_with_witness( &client, extra_extrinsics, parent_head.clone(), Default::default(), ); - let block = seal_block(block, slot, &client); + let block = seal_block(block, &client); let header = block.blocks().nth(0).unwrap().header().clone(); let res_header = @@ -167,7 +278,7 @@ fn validate_block_returns_custom_head_data() { transfer(&client, Bob, Charlie, 100), ]; - let TestBlockData { block, validation_data, slot } = build_block_with_witness( + let TestBlockData { block, validation_data } = build_block_with_witness( &client, extra_extrinsics, parent_head.clone(), @@ -176,8 +287,9 @@ fn validate_block_returns_custom_head_data() { let header = block.blocks().nth(0).unwrap().header().clone(); assert_ne!(expected_header, header.encode()); - let block = seal_block(block, slot, &client); + let block = seal_block(block, &client); let res_header = call_validate_block_encoded_header( + WASM_BINARY.expect("You need to build the WASM binaries to run the tests!"), parent_head, block, validation_data.relay_parent_storage_root, @@ -329,14 +441,14 @@ fn validate_block_works_with_child_tries() { let parent_head = block.header().clone(); - let TestBlockData { block, validation_data, slot } = build_block_with_witness( + let TestBlockData { block, validation_data } = build_block_with_witness( &client, vec![generate_extrinsic(&client, Alice, TestPalletCall::read_and_write_child_tries {})], parent_head.clone(), Default::default(), ); - let block = seal_block(block, slot, &client); + let block = seal_block(block, &client); let header = block.blocks().nth(0).unwrap().header().clone(); let res_header = call_validate_block(parent_head, block, validation_data.relay_parent_storage_root) diff --git a/cumulus/test/client/src/block_builder.rs b/cumulus/test/client/src/block_builder.rs index 82b1ffcbb6e54..75b8d6932cdf6 100644 --- a/cumulus/test/client/src/block_builder.rs +++ b/cumulus/test/client/src/block_builder.rs @@ -23,14 +23,13 @@ use cumulus_test_runtime::{Block, GetLastTimestamp, Hash, Header}; use polkadot_primitives::{BlockNumber as PBlockNumber, Hash as PHash}; use sc_block_builder::BlockBuilderBuilder; use sp_api::ProvideRuntimeApi; -use sp_consensus_aura::Slot; +use sp_consensus_aura::{AuraApi, Slot}; use sp_runtime::{traits::Header as HeaderT, Digest, DigestItem}; /// A struct containing a block builder and support data required to build test scenarios. pub struct BlockBuilderAndSupportData<'a> { pub block_builder: sc_block_builder::BlockBuilder<'a, Block, Client>, pub persisted_validation_data: PersistedValidationData, - pub slot: Slot, } /// An extension for the Cumulus test client to init a block builder. @@ -83,9 +82,12 @@ fn init_block_builder( mut relay_sproof_builder: RelayStateSproofBuilder, timestamp: u64, ) -> BlockBuilderAndSupportData<'_> { - // This slot will be used for both relay chain and parachain - let slot: Slot = (timestamp / cumulus_test_runtime::SLOT_DURATION).into(); - relay_sproof_builder.current_slot = slot; + let slot: Slot = + (timestamp / client.runtime_api().slot_duration(at).unwrap().as_millis()).into(); + + if relay_sproof_builder.current_slot == 0u64 { + relay_sproof_builder.current_slot = (timestamp / 6_000).into(); + } let aura_pre_digest = Digest { logs: vec![DigestItem::PreRuntime(sp_consensus_aura::AURA_ENGINE_ID, slot.encode())], @@ -130,7 +132,7 @@ fn init_block_builder( .into_iter() .for_each(|ext| block_builder.push(ext).expect("Pushes inherent")); - BlockBuilderAndSupportData { block_builder, persisted_validation_data: validation_data, slot } + BlockBuilderAndSupportData { block_builder, persisted_validation_data: validation_data } } impl InitBlockBuilder for Client { @@ -152,12 +154,16 @@ impl InitBlockBuilder for Client { let last_timestamp = self.runtime_api().get_last_timestamp(at).expect("Get last timestamp"); let timestamp = if last_timestamp == 0 { - std::time::SystemTime::now() - .duration_since(std::time::SystemTime::UNIX_EPOCH) - .expect("Time is always after UNIX_EPOCH; qed") - .as_millis() as u64 + if relay_sproof_builder.current_slot != 0u64 { + *relay_sproof_builder.current_slot * 6_000 + } else { + std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .expect("Time is always after UNIX_EPOCH; qed") + .as_millis() as u64 + } } else { - last_timestamp + cumulus_test_runtime::SLOT_DURATION + last_timestamp + self.runtime_api().slot_duration(at).unwrap().as_millis() }; init_block_builder(self, at, validation_data, relay_sproof_builder, timestamp) diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index 5bf64c0a9dbdd..a3d5982bac80b 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -28,14 +28,17 @@ use runtime::{ Balance, Block, BlockHashCount, Runtime, RuntimeCall, Signature, SignedExtra, SignedPayload, UncheckedExtrinsic, VERSION, }; -use sc_consensus_aura::standalone::{seal, slot_author}; +use sc_consensus_aura::{ + find_pre_digest, + standalone::{seal, slot_author}, +}; pub use sc_executor::error::Result as ExecutorResult; use sc_executor::HeapAllocStrategy; use sc_executor_common::runtime_blob::RuntimeBlob; use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppCrypto; use sp_blockchain::HeaderBackend; -use sp_consensus_aura::{AuraApi, Slot}; +use sp_consensus_aura::AuraApi; use sp_core::Pair; use sp_io::TestExternalities; use sp_keystore::testing::MemoryKeystore; @@ -72,6 +75,7 @@ pub type Client = client::Client; #[derive(Default)] pub struct GenesisParameters { pub endowed_accounts: Vec, + pub wasm: Option>, } impl substrate_test_client::GenesisInit for GenesisParameters { @@ -79,7 +83,9 @@ impl substrate_test_client::GenesisInit for GenesisParameters { cumulus_test_service::chain_spec::get_chain_spec_with_extra_endowed( None, self.endowed_accounts.clone(), - cumulus_test_runtime::WASM_BINARY.expect("WASM binary not compiled!"), + self.wasm.as_deref().unwrap_or_else(|| { + cumulus_test_runtime::WASM_BINARY.expect("WASM binary not compiled!") + }), ) .build_storage() .expect("Builds test runtime genesis storage") @@ -233,16 +239,15 @@ fn get_keystore() -> sp_keystore::KeystorePtr { /// Given parachain block data and a slot, seal the block with an aura seal. Assumes that the /// authorities of the test runtime are present in the keyring. -pub fn seal_block( - block: ParachainBlockData, - parachain_slot: Slot, - client: &Client, -) -> ParachainBlockData { +pub fn seal_block(block: ParachainBlockData, client: &Client) -> ParachainBlockData { ParachainBlockData::new( block .into_inner() .into_iter() .map(|(mut block, proof)| { + let parachain_slot = + find_pre_digest::::Signature>(&block.header) + .unwrap(); let parent_hash = block.header.parent_hash; let authorities = client.runtime_api().authorities(parent_hash).unwrap(); let expected_author = From 109b3f2163d35f853c4d3d3510306eff267438ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 18 Oct 2024 12:00:52 +0200 Subject: [PATCH 004/312] Only set the head on the last block --- .../src/validate_block/implementation.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 020eab5b7f595..8f913779293b3 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -167,8 +167,9 @@ where let mut hrmp_watermark = Default::default(); let mut head_data = None; let mut new_validation_code = None; + let num_blocks = blocks_and_proofs.len(); - for (block, storage_proof) in blocks_and_proofs { + for (block_index, (block, storage_proof)) in blocks_and_proofs.into_iter().enumerate() { let inherent_data = extract_parachain_inherent_data(&block); validate_validation_data( @@ -238,10 +239,12 @@ where ); hrmp_watermark = crate::HrmpWatermark::::get(); - head_data = Some( - crate::CustomValidationHeadData::::get() - .map_or_else(|| HeadData(parent_header.encode()), HeadData), - ); + if block_index + 1 == num_blocks { + head_data = Some( + crate::CustomValidationHeadData::::get() + .map_or_else(|| HeadData(parent_header.encode()), HeadData), + ); + } }) } From 6f7667d33d72a000e57ae0bea4d327850775df83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 18 Oct 2024 13:49:47 +0200 Subject: [PATCH 005/312] Handle versioning in collation generation --- cumulus/client/collator/src/service.rs | 26 +++-- cumulus/primitives/core/src/lib.rs | 64 ++--------- .../core/src/parachain_block_data.rs | 102 ++++++++++++++++++ 3 files changed, 130 insertions(+), 62 deletions(-) create mode 100644 cumulus/primitives/core/src/parachain_block_data.rs diff --git a/cumulus/client/collator/src/service.rs b/cumulus/client/collator/src/service.rs index a121ac226a5be..c6365184f0a92 100644 --- a/cumulus/client/collator/src/service.rs +++ b/cumulus/client/collator/src/service.rs @@ -175,13 +175,14 @@ where /// Fetch the collation info from the runtime. /// - /// Returns `Ok(Some(_))` on success, `Err(_)` on error or `Ok(None)` if the runtime api isn't - /// implemented by the runtime. + /// Returns `Ok(Some((CollationInfo, ApiVersion)))` on success, `Err(_)` on error or `Ok(None)` + /// if the runtime api isn't implemented by the runtime. `ApiVersion` being the version of the + /// [`CollectCollectionInfo`] runtime api. pub fn fetch_collation_info( &self, block_hash: Block::Hash, header: &Block::Header, - ) -> Result, sp_api::ApiError> { + ) -> Result, sp_api::ApiError> { let runtime_api = self.runtime_api.runtime_api(); let api_version = @@ -205,7 +206,7 @@ where runtime_api.collect_collation_info(block_hash, header)? }; - Ok(Some(collation_info)) + Ok(Some((collation_info, api_version))) } /// Build a full [`Collation`] from a given [`ParachainCandidate`]. This requires @@ -233,7 +234,7 @@ where }; // Create the parachain block data for the validators. - let collation_info = self + let (collation_info, api_version) = self .fetch_collation_info(block_hash, block.header()) .map_err(|e| { tracing::error!( @@ -248,7 +249,20 @@ where let block_data = ParachainBlockData::::new(vec![(block, compact_proof)]); let pov = polkadot_node_primitives::maybe_compress_pov(PoV { - block_data: BlockData(block_data.encode()), + block_data: BlockData(if api_version >= 3 { + block_data.encode() + } else { + let block_data = block_data.as_v0(); + + if block_data.is_none() { + tracing::error!( + target: LOG_TARGET, + "Trying to submit a collation with multiple blocks is not supported by the current runtime." + ); + } + + block_data?.encode() + }), }); let upward_messages = collation_info diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index ba46179a3ec05..04c68d847b1f9 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -26,6 +26,9 @@ use polkadot_parachain_primitives::primitives::HeadData; use scale_info::TypeInfo; use sp_runtime::RuntimeDebug; +pub mod parachain_block_data; + +pub use parachain_block_data::ParachainBlockData; pub use polkadot_core_primitives::InboundDownwardMessage; pub use polkadot_parachain_primitives::primitives::{ DmpMessageHandler, Id as ParaId, IsSystem, UpwardMessage, ValidationParams, XcmpMessageFormat, @@ -35,14 +38,11 @@ pub use polkadot_primitives::{ vstaging::{ClaimQueueOffset, CoreSelector}, AbridgedHostConfiguration, AbridgedHrmpChannel, PersistedValidationData, }; - pub use sp_runtime::{ generic::{Digest, DigestItem}, traits::Block as BlockT, ConsensusEngineId, }; - -use sp_trie::CompactProof; pub use xcm::latest::prelude::*; /// A module that re-exports relevant relay chain definitions. @@ -197,58 +197,6 @@ pub enum ServiceQuality { Fast, } -/// The parachain block that is created by a collator. -/// -/// This is send as PoV (proof of validity block) to the relay-chain validators. There it will be -/// passed to the parachain validation Wasm blob to be validated. -#[derive(codec::Encode, codec::Decode, Clone)] -pub struct ParachainBlockData { - blocks: Vec<(Block, CompactProof)>, -} - -impl ParachainBlockData { - /// Creates a new instance of `Self`. - pub fn new(blocks: Vec<(Block, CompactProof)>) -> Self { - Self { blocks } - } - - /// Returns an iterator yielding references to the stored blocks. - pub fn blocks(&self) -> impl Iterator { - self.blocks.iter().map(|e| &e.0) - } - - /// Returns an iterator yielding mutable references to the stored blocks. - pub fn blocks_mut(&mut self) -> impl Iterator { - self.blocks.iter_mut().map(|e| &mut e.0) - } - - /// Returns an iterator yielding the stored blocks. - pub fn into_blocks(self) -> impl Iterator { - self.blocks.into_iter().map(|d| d.0) - } - - /// Returns an iterator yielding references to the stored proofs. - pub fn proofs(&self) -> impl Iterator { - self.blocks.iter().map(|d| &d.1) - } - - /// Deconstruct into the inner parts. - pub fn into_inner(self) -> Vec<(Block, CompactProof)> { - self.blocks - } - - /// Log the size of the individual components (header, extrinsics, storage proof) as info. - pub fn log_size_info(&self) { - tracing::info!( - target: "cumulus", - "PoV size {{ header: {}kb, extrinsics: {}kb, storage_proof: {}kb }}", - self.blocks().map(|b| b.header().encoded_size()).sum::() as f64 / 1024f64, - self.blocks().map(|b| b.extrinsics().encoded_size()).sum::() as f64 / 1024f64, - self.proofs().map(|p| p.encoded_size()).sum::() as f64 / 1024f64, - ); - } -} - /// A consensus engine ID indicating that this is a Cumulus Parachain. pub const CUMULUS_CONSENSUS_ID: ConsensusEngineId = *b"CMLS"; @@ -387,7 +335,11 @@ pub struct CollationInfo { sp_api::decl_runtime_apis! { /// Runtime api to collect information about a collation. - #[api_version(2)] + /// + /// Version history: + /// - Version 2: Changed [`Self::collect_collation_info`] signature + /// - Version 3: Signals to the node to use version 1 of [`ParachainBlockData`]. + #[api_version(3)] pub trait CollectCollationInfo { /// Collect information about a collation. #[changed_in(2)] diff --git a/cumulus/primitives/core/src/parachain_block_data.rs b/cumulus/primitives/core/src/parachain_block_data.rs new file mode 100644 index 0000000000000..acf3a51b3e57b --- /dev/null +++ b/cumulus/primitives/core/src/parachain_block_data.rs @@ -0,0 +1,102 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Provides [`ParachainBlockData`] and its historical versions. + +use alloc::vec::Vec; +use codec::Encode; +use sp_runtime::traits::Block as BlockT; +use sp_trie::CompactProof; + +pub mod v0 { + use super::*; + + #[derive(codec::Encode, codec::Decode, Clone)] + pub struct ParachainBlockData { + /// The header of the parachain block. + pub header: B::Header, + /// The extrinsics of the parachain block. + pub extrinsics: alloc::vec::Vec, + /// The data that is required to emulate the storage accesses executed by all extrinsics. + pub storage_proof: sp_trie::CompactProof, + } +} + +/// The parachain block that is created by a collator. +/// +/// This is send as PoV (proof of validity block) to the relay-chain validators. There it will be +/// passed to the parachain validation Wasm blob to be validated. +#[derive(codec::Encode, codec::Decode, Clone)] +pub struct ParachainBlockData { + blocks: Vec<(Block, CompactProof)>, +} + +impl ParachainBlockData { + /// Creates a new instance of `Self`. + pub fn new(blocks: Vec<(Block, CompactProof)>) -> Self { + Self { blocks } + } + + /// Returns an iterator yielding references to the stored blocks. + pub fn blocks(&self) -> impl Iterator { + self.blocks.iter().map(|e| &e.0) + } + + /// Returns an iterator yielding mutable references to the stored blocks. + pub fn blocks_mut(&mut self) -> impl Iterator { + self.blocks.iter_mut().map(|e| &mut e.0) + } + + /// Returns an iterator yielding the stored blocks. + pub fn into_blocks(self) -> impl Iterator { + self.blocks.into_iter().map(|d| d.0) + } + + /// Returns an iterator yielding references to the stored proofs. + pub fn proofs(&self) -> impl Iterator { + self.blocks.iter().map(|d| &d.1) + } + + /// Deconstruct into the inner parts. + pub fn into_inner(self) -> Vec<(Block, CompactProof)> { + self.blocks + } + + /// Log the size of the individual components (header, extrinsics, storage proof) as info. + pub fn log_size_info(&self) { + tracing::info!( + target: "cumulus", + "PoV size {{ header: {}kb, extrinsics: {}kb, storage_proof: {}kb }}", + self.blocks().map(|b| b.header().encoded_size()).sum::() as f64 / 1024f64, + self.blocks().map(|b| b.extrinsics().encoded_size()).sum::() as f64 / 1024f64, + self.proofs().map(|p| p.encoded_size()).sum::() as f64 / 1024f64, + ); + } + + /// Converts into [`v0::ParachainBlockData`]. + /// + /// Returns `None` if there is not exactly one block. + pub fn as_v0(&self) -> Option> { + if self.blocks.len() != 1 { + return None + } + + self.blocks.first().map(|(block, storage_proof)| { + let (header, extrinsics) = block.clone().deconstruct(); + v0::ParachainBlockData { header, extrinsics, storage_proof: storage_proof.clone() } + }) + } +} From a8a64bc0dd942bb3588b2346b62d3a5f1a5e0cc2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 18 Oct 2024 23:24:09 +0200 Subject: [PATCH 006/312] Write tests for pov-recovery --- cumulus/client/pov-recovery/src/lib.rs | 70 ++++++----- cumulus/client/pov-recovery/src/tests.rs | 118 +++++++++++++++++- .../core/src/parachain_block_data.rs | 9 ++ cumulus/test/runtime/build.rs | 4 +- 4 files changed, 167 insertions(+), 34 deletions(-) diff --git a/cumulus/client/pov-recovery/src/lib.rs b/cumulus/client/pov-recovery/src/lib.rs index 2ec68eb5195af..3acd4408b2ed4 100644 --- a/cumulus/client/pov-recovery/src/lib.rs +++ b/cumulus/client/pov-recovery/src/lib.rs @@ -62,7 +62,7 @@ use polkadot_primitives::{ use cumulus_primitives_core::ParachainBlockData; use cumulus_relay_chain_interface::{RelayChainInterface, RelayChainResult}; -use codec::Decode; +use codec::{Decode, DecodeAll}; use futures::{ channel::mpsc::Receiver, select, stream::FuturesUnordered, Future, FutureExt, Stream, StreamExt, }; @@ -349,6 +349,43 @@ where self.clear_waiting_recovery(&hash); } + /// Try to decode [`ParachainBlockData`] from `data`. + /// + /// Internally it will handle the decoding of the different versions. + fn decode_parachain_block_data( + data: &[u8], + expected_block_hash: Block::Hash, + ) -> Option> { + if let Ok(block_data) = ParachainBlockData::::decode_all(&mut &data[..]) { + if block_data.blocks().last().map_or(false, |b| b.hash() == expected_block_hash) { + return Some(block_data) + } + + tracing::debug!( + target: LOG_TARGET, + ?expected_block_hash, + "Could not find the expected block hash as latest block in `ParachainBlockData`" + ); + } + + if let Ok(block_data) = + cumulus_primitives_core::parachain_block_data::v0::ParachainBlockData::::decode_all( + &mut &data[..], + ) { + if block_data.header.hash() == expected_block_hash { + return Some(block_data.into()) + } + } + + tracing::warn!( + target: LOG_TARGET, + ?expected_block_hash, + "Could not decode `ParachainBlockData` from recovered PoV", + ); + + None + } + /// Handle a recovered candidate. async fn handle_candidate_recovered(&mut self, block_hash: Block::Hash, pov: Option<&PoV>) { let pov = match pov { @@ -384,37 +421,14 @@ where }, }; - let block_data = match ParachainBlockData::::decode(&mut &raw_block_data[..]) { - Ok(d) => d, - Err(error) => { - tracing::warn!( - target: LOG_TARGET, - ?error, - "Failed to decode parachain block data from recovered PoV", - ); - - self.reset_candidate(block_hash); - return - }, + let Some(block_data) = Self::decode_parachain_block_data(&raw_block_data, block_hash) + else { + self.reset_candidate(block_hash); + return }; let blocks_and_proofs = block_data.into_inner(); - if let Some((block, _)) = blocks_and_proofs.last() { - let last_block_hash = block.hash(); - if last_block_hash != block_hash { - tracing::debug!( - target: LOG_TARGET, - expected_block_hash = ?block_hash, - got_block_hash = ?last_block_hash, - "Recovered candidate doesn't contain the expected block.", - ); - - self.reset_candidate(block_hash); - return; - } - } - let Some(parent) = blocks_and_proofs.first().map(|(b, _)| *b.header().parent_hash()) else { tracing::debug!( target: LOG_TARGET, diff --git a/cumulus/client/pov-recovery/src/tests.rs b/cumulus/client/pov-recovery/src/tests.rs index b32731776a99d..9568ccba5cb1b 100644 --- a/cumulus/client/pov-recovery/src/tests.rs +++ b/cumulus/client/pov-recovery/src/tests.rs @@ -629,7 +629,10 @@ async fn pending_candidate_height_lower_than_latest_finalized() { #[case(RuntimeApiRequest::CANDIDATES_PENDING_AVAILABILITY_RUNTIME_REQUIREMENT)] #[case(10)] #[tokio::test] -async fn single_pending_candidate_recovery_success(#[case] runtime_version: u32) { +async fn single_pending_candidate_recovery_success( + #[case] runtime_version: u32, + #[values(true, false)] latest_block_data: bool, +) { sp_tracing::init_for_tests(); let (recovery_subsystem_tx, mut recovery_subsystem_rx) = @@ -687,9 +690,15 @@ async fn single_pending_candidate_recovery_success(#[case] runtime_version: u32) Ok( AvailableData { pov: Arc::new(PoV { - block_data: ParachainBlockData::::new( + block_data: if latest_block_data { ParachainBlockData::::new( vec![(Block::new(header.clone(), vec![]), CompactProof { encoded_nodes: vec![] })] - ).encode().into() + ).encode()} else { + cumulus_primitives_core::parachain_block_data::v0::ParachainBlockData:: { + header: header.clone(), + extrinsics: Vec::new(), + storage_proof: CompactProof { encoded_nodes: Vec::new() } + }.encode() + }.into() }), validation_data: dummy_pvd(), } @@ -1418,3 +1427,106 @@ async fn chained_recovery_child_succeeds_before_parent() { // No more import requests received assert_matches!(import_requests_rx.next().timeout(Duration::from_millis(100)).await, None); } + +#[tokio::test] +async fn recovery_multiple_blocks_per_candidate() { + sp_tracing::init_for_tests(); + + let (recovery_subsystem_tx, mut recovery_subsystem_rx) = + AvailabilityRecoverySubsystemHandle::new(); + let recovery_delay_range = + RecoveryDelayRange { min: Duration::from_millis(0), max: Duration::from_millis(0) }; + let (_explicit_recovery_chan_tx, explicit_recovery_chan_rx) = mpsc::channel(10); + let candidates = make_candidate_chain(1..4); + let candidate = candidates.last().clone().unwrap(); + let headers = candidates + .iter() + .map(|c| Header::decode(&mut &c.commitments.head_data.0[..]).unwrap()) + .collect::>(); + let header = headers.last().unwrap(); + + let relay_chain_client = Relaychain::new(vec![( + PHeader { + parent_hash: PHash::from_low_u64_be(0), + number: 1, + state_root: PHash::random(), + extrinsics_root: PHash::random(), + digest: Default::default(), + }, + vec![candidate.clone()], + )]); + let mut known_blocks = HashMap::new(); + known_blocks.insert(GENESIS_HASH, BlockStatus::InChainWithState); + let known_blocks = Arc::new(Mutex::new(known_blocks)); + let (parachain_client, import_notifications_tx, _finality_notifications_tx) = + ParachainClient::new(vec![dummy_usage_info(0)], known_blocks.clone()); + let (parachain_import_queue, mut import_requests_rx) = ParachainImportQueue::new(); + + let pov_recovery = PoVRecovery::::new( + Box::new(recovery_subsystem_tx), + recovery_delay_range, + Arc::new(parachain_client), + Box::new(parachain_import_queue), + relay_chain_client, + ParaId::new(1000), + explicit_recovery_chan_rx, + Arc::new(DummySyncOracle::default()), + ); + + task::spawn(pov_recovery.run()); + + // Candidates are recovered in the right order. + assert_matches!( + recovery_subsystem_rx.next().await, + Some(AvailabilityRecoveryMessage::RecoverAvailableData( + receipt, + session_index, + None, + None, + response_tx + )) => { + assert_eq!(receipt.hash(), candidate.hash()); + assert_eq!(session_index, TEST_SESSION_INDEX); + response_tx + .send(Ok(AvailableData { + pov: Arc::new(PoV { + block_data: ParachainBlockData::::new( + headers.iter().map(|h| (Block::new(h.clone(), vec![]), CompactProof { encoded_nodes: vec![] })).collect() + ) + .encode() + .into(), + }), + validation_data: dummy_pvd(), + })) + .unwrap(); + } + ); + + assert_matches!(import_requests_rx.next().await, Some(incoming_blocks) => { + assert_eq!(incoming_blocks.len(), 3); + assert_eq!(incoming_blocks.iter().map(|b| b.header.clone().unwrap()).collect::>(), headers); + }); + + known_blocks + .lock() + .expect("Poisoned lock") + .insert(header.hash(), BlockStatus::InChainWithState); + + let (unpin_sender, _unpin_receiver) = sc_utils::mpsc::tracing_unbounded("test_unpin", 10); + import_notifications_tx + .unbounded_send(BlockImportNotification::new( + header.hash(), + BlockOrigin::ConsensusBroadcast, + header.clone(), + false, + None, + unpin_sender, + )) + .unwrap(); + + // No more recovery messages received. + assert_matches!(recovery_subsystem_rx.next().timeout(Duration::from_millis(100)).await, None); + + // No more import requests received + assert_matches!(import_requests_rx.next().timeout(Duration::from_millis(100)).await, None); +} diff --git a/cumulus/primitives/core/src/parachain_block_data.rs b/cumulus/primitives/core/src/parachain_block_data.rs index acf3a51b3e57b..00b24875567c1 100644 --- a/cumulus/primitives/core/src/parachain_block_data.rs +++ b/cumulus/primitives/core/src/parachain_block_data.rs @@ -33,6 +33,15 @@ pub mod v0 { /// The data that is required to emulate the storage accesses executed by all extrinsics. pub storage_proof: sp_trie::CompactProof, } + + impl From> for super::ParachainBlockData { + fn from(block_data: ParachainBlockData) -> Self { + Self::new(alloc::vec![( + Block::new(block_data.header, block_data.extrinsics), + block_data.storage_proof, + )]) + } + } } /// The parachain block that is created by a collator. diff --git a/cumulus/test/runtime/build.rs b/cumulus/test/runtime/build.rs index 7a7fe8ffaa82e..fa1f9eba2f58d 100644 --- a/cumulus/test/runtime/build.rs +++ b/cumulus/test/runtime/build.rs @@ -25,10 +25,8 @@ fn main() { .set_file_name("wasm_binary_spec_version_incremented.rs") .build(); - WasmBuilder::new() - .with_current_project() + WasmBuilder::init_with_defaults() .enable_feature("elastic-scaling") - .import_memory() .set_file_name("wasm_binary_elastic_scaling.rs") .build(); } From 3c0f51a74963d8b83564841a20b90b3cabe37302 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 18 Oct 2024 23:39:24 +0200 Subject: [PATCH 007/312] Use enum to be future proof --- .../core/src/parachain_block_data.rs | 49 +++++++++++++------ 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/cumulus/primitives/core/src/parachain_block_data.rs b/cumulus/primitives/core/src/parachain_block_data.rs index 00b24875567c1..482bc48a0cd3a 100644 --- a/cumulus/primitives/core/src/parachain_block_data.rs +++ b/cumulus/primitives/core/src/parachain_block_data.rs @@ -49,39 +49,50 @@ pub mod v0 { /// This is send as PoV (proof of validity block) to the relay-chain validators. There it will be /// passed to the parachain validation Wasm blob to be validated. #[derive(codec::Encode, codec::Decode, Clone)] -pub struct ParachainBlockData { - blocks: Vec<(Block, CompactProof)>, +pub enum ParachainBlockData { + #[codec(index = 1)] + V1(Vec<(Block, CompactProof)>), } impl ParachainBlockData { /// Creates a new instance of `Self`. pub fn new(blocks: Vec<(Block, CompactProof)>) -> Self { - Self { blocks } + Self::V1(blocks) } /// Returns an iterator yielding references to the stored blocks. pub fn blocks(&self) -> impl Iterator { - self.blocks.iter().map(|e| &e.0) + match self { + Self::V1(blocks) => blocks.iter().map(|e| &e.0), + } } /// Returns an iterator yielding mutable references to the stored blocks. pub fn blocks_mut(&mut self) -> impl Iterator { - self.blocks.iter_mut().map(|e| &mut e.0) + match self { + Self::V1(blocks) => blocks.iter_mut().map(|e| &mut e.0), + } } /// Returns an iterator yielding the stored blocks. pub fn into_blocks(self) -> impl Iterator { - self.blocks.into_iter().map(|d| d.0) + match self { + Self::V1(blocks) => blocks.into_iter().map(|e| e.0), + } } /// Returns an iterator yielding references to the stored proofs. pub fn proofs(&self) -> impl Iterator { - self.blocks.iter().map(|d| &d.1) + match self { + Self::V1(blocks) => blocks.iter().map(|e| &e.1), + } } /// Deconstruct into the inner parts. pub fn into_inner(self) -> Vec<(Block, CompactProof)> { - self.blocks + match self { + Self::V1(blocks) => blocks, + } } /// Log the size of the individual components (header, extrinsics, storage proof) as info. @@ -99,13 +110,21 @@ impl ParachainBlockData { /// /// Returns `None` if there is not exactly one block. pub fn as_v0(&self) -> Option> { - if self.blocks.len() != 1 { - return None + match self { + Self::V1(blocks) => { + if blocks.len() != 1 { + return None + } + + blocks.first().map(|(block, storage_proof)| { + let (header, extrinsics) = block.clone().deconstruct(); + v0::ParachainBlockData { + header, + extrinsics, + storage_proof: storage_proof.clone(), + } + }) + }, } - - self.blocks.first().map(|(block, storage_proof)| { - let (header, extrinsics) = block.clone().deconstruct(); - v0::ParachainBlockData { header, extrinsics, storage_proof: storage_proof.clone() } - }) } } From acb64aeecf05497e43f92bfa92a10783095faa78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 15 Nov 2024 16:12:56 +0100 Subject: [PATCH 008/312] Use scale --- cumulus/pallets/parachain-system/src/lib.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 39fc8321a072e..48d1388d7e59b 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -55,7 +55,6 @@ use frame_system::{ensure_none, ensure_root, pallet_prelude::HeaderFor}; use polkadot_parachain_primitives::primitives::RelayChainBlockNumber; use polkadot_runtime_parachains::FeeTracker; use scale_info::TypeInfo; -use sp_core::U256; use sp_runtime::{ traits::{Block as BlockT, BlockNumberProvider, Hash, One}, BoundedSlice, FixedU128, RuntimeDebug, Saturating, @@ -204,15 +203,16 @@ pub struct DefaultCoreSelector(PhantomData); impl SelectCore for DefaultCoreSelector { fn selected_core() -> (CoreSelector, ClaimQueueOffset) { - let core_selector: U256 = frame_system::Pallet::::block_number().into(); + let core_selector = frame_system::Pallet::::block_number().using_encoded(|b| b[0]); - (CoreSelector(core_selector.byte(0)), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET)) + (CoreSelector(core_selector), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET)) } fn select_next_core() -> (CoreSelector, ClaimQueueOffset) { - let core_selector: U256 = (frame_system::Pallet::::block_number() + One::one()).into(); + let core_selector = + (frame_system::Pallet::::block_number() + One::one()).using_encoded(|b| b[0]); - (CoreSelector(core_selector.byte(0)), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET)) + (CoreSelector(core_selector), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET)) } } @@ -221,15 +221,16 @@ pub struct LookaheadCoreSelector(PhantomData); impl SelectCore for LookaheadCoreSelector { fn selected_core() -> (CoreSelector, ClaimQueueOffset) { - let core_selector: U256 = frame_system::Pallet::::block_number().into(); + let core_selector = frame_system::Pallet::::block_number().using_encoded(|b| b[0]); - (CoreSelector(core_selector.byte(0)), ClaimQueueOffset(1)) + (CoreSelector(core_selector), ClaimQueueOffset(1)) } fn select_next_core() -> (CoreSelector, ClaimQueueOffset) { - let core_selector: U256 = (frame_system::Pallet::::block_number() + One::one()).into(); + let core_selector = + (frame_system::Pallet::::block_number() + One::one()).using_encoded(|b| b[0]); - (CoreSelector(core_selector.byte(0)), ClaimQueueOffset(1)) + (CoreSelector(core_selector), ClaimQueueOffset(1)) } } From 70e7d50aed0a05eb8f36868197b3161b6cf42c00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 15 Nov 2024 23:14:54 +0100 Subject: [PATCH 009/312] Handle UMPSignals --- .../src/validate_block/implementation.rs | 61 +++++++++++++++++-- polkadot/primitives/src/vstaging/mod.rs | 4 +- 2 files changed, 57 insertions(+), 8 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 802a8fc75097c..eedabb44393f6 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -27,8 +27,9 @@ use polkadot_parachain_primitives::primitives::{ }; use alloc::vec::Vec; -use codec::Encode; +use codec::{Decode, Encode}; +use cumulus_primitives_core::relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}; use frame_support::{ traits::{ExecuteBlock, ExtrinsicCall, Get, IsSubType}, BoundedVec, @@ -163,6 +164,7 @@ where let mut processed_downward_messages = 0; let mut upward_messages = BoundedVec::default(); + let mut upward_message_signals = Vec::>::new(); let mut horizontal_messages = BoundedVec::default(); let mut hrmp_watermark = Default::default(); let mut head_data = None; @@ -228,11 +230,33 @@ where new_validation_code = new_validation_code.take().or(crate::NewValidationCode::::get()); - upward_messages - .try_extend(crate::UpwardMessages::::get().into_iter()) - .expect( - "Number of upward messages should not be greater than `MAX_UPWARD_MESSAGE_NUM`", - ); + + let mut found_separator = false; + crate::UpwardMessages::::get() + .into_iter() + .filter_map(|m| { + if cfg!(feature = "experimental-ump-signals") { + if m == UMP_SEPARATOR { + found_separator = true; + None + } else if found_separator { + if upward_message_signals.iter().any(|s| *s != m) { + upward_message_signals.push(m); + } + None + } else { + Some(m) + } + } else { + Some(m) + } + }) + .for_each(|m| { + upward_messages.try_push(m) + .expect( + "Number of upward messages should not be greater than `MAX_UPWARD_MESSAGE_NUM`", + ) + }); processed_downward_messages += crate::ProcessedDownwardMessages::::get(); horizontal_messages.try_extend(crate::HrmpOutboundMessages::::get().into_iter()).expect( "Number of horizontal messages should not be greater than `MAX_HORIZONTAL_MESSAGE_NUM`", @@ -248,6 +272,31 @@ where }) } + if !upward_message_signals.is_empty() { + let mut selected_core = None; + + upward_message_signals.iter().for_each(|s| { + if let Ok(UMPSignal::SelectCore(selector, offset)) = UMPSignal::decode(&mut &s[..]) { + match &selected_core { + Some(selected_core) if *selected_core != (selector, offset) => { + panic!("All `SelectCore` signals need to select the same core") + }, + Some(_) => {}, + None => { + selected_core = Some((selector, offset)); + }, + } + } + }); + + upward_messages + .try_push(UMP_SEPARATOR) + .expect("UMPSignals does not fit in UMPMessages"); + upward_messages + .try_extend(upward_message_signals.into_iter()) + .expect("UMPSignals does not fit in UMPMessages"); + } + ValidationResult { head_data: head_data.expect("HeadData not set"), new_validation_code: new_validation_code.map(Into::into), diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 271f78efe0901..91ebae3e8448b 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -416,11 +416,11 @@ impl From> for super::v8::CandidateReceipt { /// A strictly increasing sequence number, typically this would be the least significant byte of the /// block number. -#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, RuntimeDebug)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, RuntimeDebug, Copy)] pub struct CoreSelector(pub u8); /// An offset in the relay chain claim queue. -#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, RuntimeDebug)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, RuntimeDebug, Copy)] pub struct ClaimQueueOffset(pub u8); /// Signals that a parachain can send to the relay chain via the UMP queue. From e5277ffeb6df5d463baf2d4bd398a66b699e38a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 31 Jan 2025 23:18:09 +0100 Subject: [PATCH 010/312] Ensure the blocks match --- .../parachain-system/src/validate_block/implementation.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index eedabb44393f6..86f67e511435d 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -181,6 +181,13 @@ where ¶chain_head, ); + assert_eq!( + parent_header.hash(), + *block.header().parent_hash(), + "Invalid parent header hash: {:?}", + block.header().hash() + ); + // Create the db let db = match storage_proof.to_memory_db(Some(parent_header.state_root())) { Ok((db, _)) => db, From 7f20108d8a9cf2144aff40fc29be9c68addbc0d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 21 Mar 2025 23:39:28 +0100 Subject: [PATCH 011/312] Only one proof --- cumulus/client/collator/src/service.rs | 2 +- .../consensus/aura/src/collators/basic.rs | 2 +- .../consensus/aura/src/collators/lookahead.rs | 2 +- .../collators/slot_based/collation_task.rs | 22 +- cumulus/client/pov-recovery/src/lib.rs | 13 +- .../src/validate_block/implementation.rs | 219 +++++++++--------- .../core/src/parachain_block_data.rs | 56 +++-- 7 files changed, 161 insertions(+), 155 deletions(-) diff --git a/cumulus/client/collator/src/service.rs b/cumulus/client/collator/src/service.rs index 44cdc895f9212..41cfc6b866ec0 100644 --- a/cumulus/client/collator/src/service.rs +++ b/cumulus/client/collator/src/service.rs @@ -247,7 +247,7 @@ where .ok() .flatten()?; - let block_data = ParachainBlockData::::new(vec![(block, compact_proof)]); + let block_data = ParachainBlockData::::new(vec![block], compact_proof); let pov = polkadot_node_primitives::maybe_compress_pov(PoV { block_data: BlockData(if api_version >= 3 { diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs index 0ed140147f4e3..72558a93c9f9f 100644 --- a/cumulus/client/consensus/aura/src/collators/basic.rs +++ b/cumulus/client/consensus/aura/src/collators/basic.rs @@ -262,7 +262,7 @@ where ); if let Some((collation, block_data)) = maybe_collation { - let Some(block_hash) = block_data.blocks().nth(0).map(|b| b.hash()) else { + let Some(block_hash) = block_data.blocks().first().map(|b| b.hash()) else { continue }; let result_sender = diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index da52b52941396..5c98689a03d24 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -397,7 +397,7 @@ where { Ok(Some((collation, block_data))) => { let Some(new_block_header) = - block_data.blocks().nth(0).map(|b| b.header().clone()) + block_data.blocks().first().map(|b| b.header().clone()) else { tracing::error!(target: crate::LOG_TARGET, "Produced PoV doesn't contain any blocks"); break diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs index 819d58c9599ca..0414ebf2e1182 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs @@ -147,16 +147,18 @@ async fn handle_collation_message( - pov_path.clone(), - pov.clone(), - block_data.header().hash(), - *block_data.header().number(), - parent_header.clone(), - relay_parent_header.state_root, - relay_parent_header.number, - max_pov_size, - ); + if let Some(header) = block_data.blocks().first().map(|b| b.header()) { + export_pov_to_path::( + pov_path.clone(), + pov.clone(), + header.hash(), + *header.number(), + parent_header.clone(), + relay_parent_header.state_root, + relay_parent_header.number, + max_pov_size, + ); + } } else { tracing::error!(target: LOG_TARGET, "Failed to get relay parent header from hash: {relay_parent:?}"); } diff --git a/cumulus/client/pov-recovery/src/lib.rs b/cumulus/client/pov-recovery/src/lib.rs index 4f708d8e1fddb..e6a0ddb8c5377 100644 --- a/cumulus/client/pov-recovery/src/lib.rs +++ b/cumulus/client/pov-recovery/src/lib.rs @@ -1,6 +1,5 @@ // Copyright (C) Parity Technologies (UK) Ltd. // This file is part of Cumulus. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // Cumulus is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -9,11 +8,11 @@ // Cumulus is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// along with Cumulus. If not, see . //! Parachain PoV recovery //! @@ -432,9 +431,9 @@ where return }; - let blocks_and_proofs = block_data.into_inner(); + let blocks = block_data.into_blocks(); - let Some(parent) = blocks_and_proofs.first().map(|(b, _)| *b.header().parent_hash()) else { + let Some(parent) = blocks.first().map(|b| *b.header().parent_hash()) else { tracing::debug!( target: LOG_TARGET, ?block_hash, @@ -461,7 +460,7 @@ where "Waiting for recovery of parent.", ); - blocks_and_proofs.into_iter().for_each(|(b, _)| { + blocks.into_iter().for_each(|b| { self.waiting_for_parent .entry(*b.header().parent_hash()) .or_default() @@ -495,7 +494,7 @@ where _ => (), } - self.import_blocks(blocks_and_proofs.into_iter().map(|d| d.0)); + self.import_blocks(blocks.into_iter()); } /// Import the given `blocks`. diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 3b3f1a1dca8a6..f25de4a69cf8d 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -1,18 +1,18 @@ // Copyright (C) Parity Technologies (UK) Ltd. // This file is part of Cumulus. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! The actual implementation of the validate block functionality. @@ -38,6 +38,7 @@ use sp_core::storage::{ChildInfo, StateVersion}; use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; use sp_runtime::traits::{Block as BlockT, ExtrinsicLike, HashingFor, Header as HeaderT}; +use sp_state_machine::OverlayedChanges; use sp_trie::{MemoryDB, ProofSizeProvider}; use trie_recorder::SizeOnlyRecorderProvider; @@ -149,13 +150,12 @@ where let mut parent_header = codec::decode_from_bytes::(parachain_head.clone()).expect("Invalid parent head"); - let blocks_and_proofs = block_data.into_inner(); + let (blocks, proof) = block_data.into_inner(); assert_eq!( - *blocks_and_proofs + *blocks .first() .expect("BlockData should have at least one block") - .0 .header() .parent_hash(), parent_header.hash(), @@ -169,9 +169,35 @@ where let mut hrmp_watermark = Default::default(); let mut head_data = None; let mut new_validation_code = None; - let num_blocks = blocks_and_proofs.len(); - - for (block_index, (block, storage_proof)) in blocks_and_proofs.into_iter().enumerate() { + let num_blocks = blocks.len(); + + // Create the db + let mut db = match proof.to_memory_db(Some(parent_header.state_root())) { + Ok((db, _)) => db, + Err(_) => panic!("Compact proof decoding failure."), + }; + + core::mem::drop(proof); + + // We use the same recorder across all blocks. Each node only contributed once to the total size + // of the storage proof. + let mut recorder = SizeOnlyRecorderProvider::new(); + let cache_provider = trie_cache::CacheProvider::new(); + // We use the storage root of the `parent_head` to ensure that it is the correct root. + // This is already being done above while creating the in-memory db, but let's be paranoid!! + let backend = sp_state_machine::TrieBackendBuilder::new_with_cache( + db, + *parent_header.state_root(), + cache_provider, + ) + .with_recorder(recorder.clone()) + .build(); + + // We let all blocks contribute to the same overlay. Data written by a previous block will be + // directly accessible without going to the db. + let mut overlay = OverlayedChanges::default(); + + for (block_index, block) in blocks.into_iter().enumerate() { let inherent_data = extract_parachain_inherent_data(&block); validate_validation_data( @@ -181,102 +207,86 @@ where ¶chain_head, ); - assert_eq!( - parent_header.hash(), - *block.header().parent_hash(), - "Invalid parent header hash: {:?}", - block.header().hash() - ); + run_with_externalities_and_recorder::( + &backend, + &mut recorder, + &mut Default::default(), + || { + let relay_chain_proof = crate::RelayChainStateProof::new( + PSC::SelfParaId::get(), + inherent_data.validation_data.relay_parent_storage_root, + inherent_data.relay_chain_state.clone(), + ) + .expect("Invalid relay chain state proof"); + + #[allow(deprecated)] + let res = CI::check_inherents(&block, &relay_chain_proof); + + if !res.ok() { + if log::log_enabled!(log::Level::Error) { + res.into_errors().for_each(|e| { + log::error!("Checking inherent with identifier `{:?}` failed", e.0) + }); + } - // Create the db - let db = match storage_proof.to_memory_db(Some(parent_header.state_root())) { - Ok((db, _)) => db, - Err(_) => panic!("Compact proof decoding failure."), - }; - - core::mem::drop(storage_proof); - - let mut recorder = SizeOnlyRecorderProvider::new(); - let cache_provider = trie_cache::CacheProvider::new(); - // We use the storage root of the `parent_head` to ensure that it is the correct root. - // This is already being done above while creating the in-memory db, but let's be paranoid!! - let backend = sp_state_machine::TrieBackendBuilder::new_with_cache( - db, - *parent_header.state_root(), - cache_provider, - ) - .with_recorder(recorder.clone()) - .build(); - - run_with_externalities_and_recorder::(&backend, &mut recorder, || { - let relay_chain_proof = crate::RelayChainStateProof::new( - PSC::SelfParaId::get(), - inherent_data.validation_data.relay_parent_storage_root, - inherent_data.relay_chain_state.clone(), - ) - .expect("Invalid relay chain state proof"); - - #[allow(deprecated)] - let res = CI::check_inherents(&block, &relay_chain_proof); - - if !res.ok() { - if log::log_enabled!(log::Level::Error) { - res.into_errors().for_each(|e| { - log::error!("Checking inherent with identifier `{:?}` failed", e.0) - }); + panic!("Checking inherents failed"); } + }, + ); - panic!("Checking inherents failed"); - } - }); - - run_with_externalities_and_recorder::(&backend, &mut recorder, || { - parent_header = block.header().clone(); - - E::execute_block(block); - - new_validation_code = - new_validation_code.take().or(crate::NewValidationCode::::get()); - - let mut found_separator = false; - crate::UpwardMessages::::get() - .into_iter() - .filter_map(|m| { - if cfg!(feature = "experimental-ump-signals") { - if m == UMP_SEPARATOR { - found_separator = true; - None - } else if found_separator { - if upward_message_signals.iter().any(|s| *s != m) { - upward_message_signals.push(m); + run_with_externalities_and_recorder::( + &backend, + &mut recorder, + &mut overlay, + || { + parent_header = block.header().clone(); + + E::execute_block(block); + + new_validation_code = + new_validation_code.take().or(crate::NewValidationCode::::get()); + + let mut found_separator = false; + crate::UpwardMessages::::get() + .into_iter() + .filter_map(|m| { + if cfg!(feature = "experimental-ump-signals") { + if m == UMP_SEPARATOR { + found_separator = true; + None + } else if found_separator { + if upward_message_signals.iter().any(|s| *s != m) { + upward_message_signals.push(m); + } + None + } else { + Some(m) } - None } else { Some(m) } - } else { - Some(m) - } - }) - .for_each(|m| { - upward_messages.try_push(m) + }) + .for_each(|m| { + upward_messages.try_push(m) .expect( "Number of upward messages should not be greater than `MAX_UPWARD_MESSAGE_NUM`", ) - }); - processed_downward_messages += crate::ProcessedDownwardMessages::::get(); - horizontal_messages.try_extend(crate::HrmpOutboundMessages::::get().into_iter()).expect( + }); + + processed_downward_messages += crate::ProcessedDownwardMessages::::get(); + horizontal_messages.try_extend(crate::HrmpOutboundMessages::::get().into_iter()).expect( "Number of horizontal messages should not be greater than `MAX_HORIZONTAL_MESSAGE_NUM`", ); - hrmp_watermark = crate::HrmpWatermark::::get(); + hrmp_watermark = crate::HrmpWatermark::::get(); - if block_index + 1 == num_blocks { - head_data = Some( - crate::CustomValidationHeadData::::get() - .map_or_else(|| HeadData(parent_header.encode()), HeadData), - ); - } - }) + if block_index + 1 == num_blocks { + head_data = Some( + crate::CustomValidationHeadData::::get() + .map_or_else(|| HeadData(parent_header.encode()), HeadData), + ); + } + }, + ) } if !upward_message_signals.is_empty() { @@ -357,11 +367,10 @@ fn validate_validation_data( fn run_with_externalities_and_recorder R>( backend: &TrieBackend, recorder: &mut SizeOnlyRecorderProvider>, + overlay: &mut OverlayedChanges>, execute: F, ) -> R { - let mut overlay = sp_state_machine::OverlayedChanges::default(); - let mut ext = Ext::::new(&mut overlay, backend); - recorder.reset(); + let mut ext = Ext::::new(overlay, backend); recorder::using(recorder, || set_and_run_with_externalities(&mut ext, || execute())) } diff --git a/cumulus/primitives/core/src/parachain_block_data.rs b/cumulus/primitives/core/src/parachain_block_data.rs index 482bc48a0cd3a..ce9901778f40d 100644 --- a/cumulus/primitives/core/src/parachain_block_data.rs +++ b/cumulus/primitives/core/src/parachain_block_data.rs @@ -36,10 +36,10 @@ pub mod v0 { impl From> for super::ParachainBlockData { fn from(block_data: ParachainBlockData) -> Self { - Self::new(alloc::vec![( - Block::new(block_data.header, block_data.extrinsics), + Self::new( + alloc::vec![Block::new(block_data.header, block_data.extrinsics)], block_data.storage_proof, - )]) + ) } } } @@ -51,47 +51,47 @@ pub mod v0 { #[derive(codec::Encode, codec::Decode, Clone)] pub enum ParachainBlockData { #[codec(index = 1)] - V1(Vec<(Block, CompactProof)>), + V1 { blocks: Vec, proof: CompactProof }, } impl ParachainBlockData { /// Creates a new instance of `Self`. - pub fn new(blocks: Vec<(Block, CompactProof)>) -> Self { - Self::V1(blocks) + pub fn new(blocks: Vec, proof: CompactProof) -> Self { + Self::V1 { blocks, proof } } - /// Returns an iterator yielding references to the stored blocks. - pub fn blocks(&self) -> impl Iterator { + /// Returns references to the stored blocks. + pub fn blocks(&self) -> &[Block] { match self { - Self::V1(blocks) => blocks.iter().map(|e| &e.0), + Self::V1 { blocks, .. } => &blocks, } } - /// Returns an iterator yielding mutable references to the stored blocks. - pub fn blocks_mut(&mut self) -> impl Iterator { + /// Returns mutable references to the stored blocks. + pub fn blocks_mut(&mut self) -> &mut [Block] { match self { - Self::V1(blocks) => blocks.iter_mut().map(|e| &mut e.0), + Self::V1 { ref mut blocks, .. } => blocks, } } - /// Returns an iterator yielding the stored blocks. - pub fn into_blocks(self) -> impl Iterator { + /// Returns the stored blocks. + pub fn into_blocks(self) -> Vec { match self { - Self::V1(blocks) => blocks.into_iter().map(|e| e.0), + Self::V1 { blocks, .. } => blocks, } } - /// Returns an iterator yielding references to the stored proofs. - pub fn proofs(&self) -> impl Iterator { + /// Returns a reference to the stored proof. + pub fn proof(&self) -> &CompactProof { match self { - Self::V1(blocks) => blocks.iter().map(|e| &e.1), + Self::V1 { proof, .. } => proof, } } /// Deconstruct into the inner parts. - pub fn into_inner(self) -> Vec<(Block, CompactProof)> { + pub fn into_inner(self) -> (Vec, CompactProof) { match self { - Self::V1(blocks) => blocks, + Self::V1 { blocks, proof } => (blocks, proof), } } @@ -100,9 +100,9 @@ impl ParachainBlockData { tracing::info!( target: "cumulus", "PoV size {{ header: {}kb, extrinsics: {}kb, storage_proof: {}kb }}", - self.blocks().map(|b| b.header().encoded_size()).sum::() as f64 / 1024f64, - self.blocks().map(|b| b.extrinsics().encoded_size()).sum::() as f64 / 1024f64, - self.proofs().map(|p| p.encoded_size()).sum::() as f64 / 1024f64, + self.blocks().iter().map(|b| b.header().encoded_size()).sum::() as f64 / 1024f64, + self.blocks().iter().map(|b| b.extrinsics().encoded_size()).sum::() as f64 / 1024f64, + self.proof().encoded_size() as f64 / 1024f64, ); } @@ -111,18 +111,14 @@ impl ParachainBlockData { /// Returns `None` if there is not exactly one block. pub fn as_v0(&self) -> Option> { match self { - Self::V1(blocks) => { + Self::V1 { blocks, proof } => { if blocks.len() != 1 { return None } - blocks.first().map(|(block, storage_proof)| { + blocks.first().map(|block| { let (header, extrinsics) = block.clone().deconstruct(); - v0::ParachainBlockData { - header, - extrinsics, - storage_proof: storage_proof.clone(), - } + v0::ParachainBlockData { header, extrinsics, storage_proof: proof.clone() } }) }, } From ce343a6062f803d5d097e402777eb91926d894f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sun, 23 Mar 2025 22:59:47 +0100 Subject: [PATCH 012/312] Fix warnings --- .../parachain-system/src/validate_block/implementation.rs | 2 +- .../parachain-system/src/validate_block/trie_recorder.rs | 7 ------- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index f25de4a69cf8d..5fc086587e592 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -172,7 +172,7 @@ where let num_blocks = blocks.len(); // Create the db - let mut db = match proof.to_memory_db(Some(parent_header.state_root())) { + let db = match proof.to_memory_db(Some(parent_header.state_root())) { Ok((db, _)) => db, Err(_) => panic!("Compact proof decoding failure."), }; diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 09acedf1d983f..c03c8c272cefc 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -105,13 +105,6 @@ impl SizeOnlyRecorderProvider { recorded_keys: Default::default(), } } - - /// Reset the internal state. - pub fn reset(&self) { - self.seen_nodes.borrow_mut().clear(); - *self.encoded_size.borrow_mut() = 0; - self.recorded_keys.borrow_mut().clear(); - } } impl sp_trie::TrieRecorderProvider for SizeOnlyRecorderProvider { From 44ab2887f8130d47432538dfc3605cf54d4f7a16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 24 Mar 2025 22:07:51 +0100 Subject: [PATCH 013/312] Fix compilation errors --- cumulus/client/collator/src/lib.rs | 4 +- cumulus/client/pov-recovery/src/tests.rs | 27 +++++----- .../src/validate_block/tests.rs | 54 ++++++++----------- .../src/validate_block/trie_recorder.rs | 3 -- cumulus/test/client/src/block_builder.rs | 2 +- cumulus/test/client/src/lib.rs | 10 ++-- 6 files changed, 45 insertions(+), 55 deletions(-) diff --git a/cumulus/client/collator/src/lib.rs b/cumulus/client/collator/src/lib.rs index e45e7d3cb1d70..9cd08bb06c3ae 100644 --- a/cumulus/client/collator/src/lib.rs +++ b/cumulus/client/collator/src/lib.rs @@ -454,10 +454,10 @@ mod tests { let block = ParachainBlockData::::decode(&mut &decompressed[..]).expect("Is a valid block"); - assert_eq!(1, *block.blocks().nth(0).unwrap().header().number()); + assert_eq!(1, *block.blocks()[0].header().number()); // Ensure that we did not include `:code` in the proof. - let proof = block.proofs().nth(0).unwrap().clone(); + let proof = block.proof().clone(); let backend = sp_state_machine::create_proof_check_backend::( *header.state_root(), diff --git a/cumulus/client/pov-recovery/src/tests.rs b/cumulus/client/pov-recovery/src/tests.rs index 53362a312130c..0a3d13dc0e417 100644 --- a/cumulus/client/pov-recovery/src/tests.rs +++ b/cumulus/client/pov-recovery/src/tests.rs @@ -697,7 +697,7 @@ async fn single_pending_candidate_recovery_success( AvailableData { pov: Arc::new(PoV { block_data: if latest_block_data { ParachainBlockData::::new( - vec![(Block::new(header.clone(), vec![]), CompactProof { encoded_nodes: vec![] })] + vec![Block::new(header.clone(), vec![])], CompactProof { encoded_nodes: vec![] } ).encode()} else { cumulus_primitives_core::parachain_block_data::v0::ParachainBlockData:: { header: header.clone(), @@ -805,7 +805,7 @@ async fn single_pending_candidate_recovery_retry_succeeds() { AvailableData { pov: Arc::new(PoV { block_data: ParachainBlockData::::new( - vec![(Block::new(header.clone(), Vec::new()), CompactProof { encoded_nodes: vec![] })] + vec![Block::new(header.clone(), Vec::new())], CompactProof { encoded_nodes: vec![] } ).encode().into() }), validation_data: dummy_pvd(), @@ -1109,10 +1109,10 @@ async fn candidate_is_imported_while_awaiting_recovery() { recovery_response_tx .send(Ok(AvailableData { pov: Arc::new(PoV { - block_data: ParachainBlockData::::new(vec![( - Block::new(header.clone(), vec![]), + block_data: ParachainBlockData::::new( + vec![Block::new(header.clone(), vec![])], CompactProof { encoded_nodes: vec![] }, - )]) + ) .encode() .into(), }), @@ -1206,10 +1206,10 @@ async fn candidate_is_finalized_while_awaiting_recovery() { recovery_response_tx .send(Ok(AvailableData { pov: Arc::new(PoV { - block_data: ParachainBlockData::::new(vec![( - Block::new(header.clone(), vec![]), + block_data: ParachainBlockData::::new( + vec![Block::new(header.clone(), vec![])], CompactProof { encoded_nodes: vec![] }, - )]) + ) .encode() .into(), }), @@ -1294,7 +1294,7 @@ async fn chained_recovery_success() { .send(Ok(AvailableData { pov: Arc::new(PoV { block_data: ParachainBlockData::::new( - vec![(Block::new(header.clone(), vec![]), CompactProof { encoded_nodes: vec![] })] + vec![Block::new(header.clone(), vec![])], CompactProof { encoded_nodes: vec![] } ) .encode() .into(), @@ -1408,10 +1408,10 @@ async fn chained_recovery_child_succeeds_before_parent() { recovery_response_sender .send(Ok(AvailableData { pov: Arc::new(PoV { - block_data: ParachainBlockData::::new(vec![( - Block::new(header.clone(), vec![]), + block_data: ParachainBlockData::::new( + vec![Block::new(header.clone(), vec![])], CompactProof { encoded_nodes: vec![] }, - )]) + ) .encode() .into(), }), @@ -1497,7 +1497,8 @@ async fn recovery_multiple_blocks_per_candidate() { .send(Ok(AvailableData { pov: Arc::new(PoV { block_data: ParachainBlockData::::new( - headers.iter().map(|h| (Block::new(h.clone(), vec![]), CompactProof { encoded_nodes: vec![] })).collect() + headers.iter().map(|h| Block::new(h.clone(), vec![])).collect(), + CompactProof { encoded_nodes: vec![] }, ) .encode() .into(), diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index dc5d56a26d3e4..16a087eddcdea 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -167,6 +167,8 @@ fn build_multiple_blocks_with_witness( let mut persisted_validation_data = None; let mut blocks = Vec::new(); + //TODO: Fix this, not correct. + let mut proof = None; for _ in 0..num_blocks { let cumulus_test_client::BlockBuilderAndSupportData { @@ -176,24 +178,21 @@ fn build_multiple_blocks_with_witness( persisted_validation_data = Some(p_v_data); - blocks.extend( - block_builder - .build_parachain_block(*parent_head.state_root()) - .into_inner() - .into_iter() - .inspect(|d| { - futures::executor::block_on( - client.import_as_best(BlockOrigin::Own, d.0.clone()), - ) - .unwrap(); - - parent_head = d.0.header.clone(); - }), - ); + let (build_blocks, build_proof) = + block_builder.build_parachain_block(*parent_head.state_root()).into_inner(); + + proof.get_or_insert_with(|| build_proof); + + blocks.extend(build_blocks.into_iter().inspect(|b| { + futures::executor::block_on(client.import_as_best(BlockOrigin::Own, b.clone())) + .unwrap(); + + parent_head = b.header.clone(); + })); } TestBlockData { - block: ParachainBlockData::new(blocks), + block: ParachainBlockData::new(blocks, proof.unwrap()), validation_data: persisted_validation_data.unwrap(), } } @@ -207,7 +206,7 @@ fn validate_block_works() { build_block_with_witness(&client, Vec::new(), parent_head.clone(), Default::default()); let block = seal_block(block, &client); - let header = block.blocks().nth(0).unwrap().header().clone(); + let header = block.blocks()[0].header().clone(); let res_header = call_validate_block(parent_head, block, validation_data.relay_parent_storage_root) .expect("Calls `validate_block`"); @@ -215,6 +214,7 @@ fn validate_block_works() { } #[test] +#[ignore = "Needs another pr to work"] fn validate_multiple_blocks_work() { sp_tracing::try_init_simple(); @@ -251,7 +251,7 @@ fn validate_block_with_extra_extrinsics() { Default::default(), ); let block = seal_block(block, &client); - let header = block.blocks().nth(0).unwrap().header().clone(); + let header = block.blocks()[0].header().clone(); let res_header = call_validate_block(parent_head, block, validation_data.relay_parent_storage_root) @@ -284,7 +284,7 @@ fn validate_block_returns_custom_head_data() { parent_head.clone(), Default::default(), ); - let header = block.blocks().nth(0).unwrap().header().clone(); + let header = block.blocks()[0].header().clone(); assert_ne!(expected_header, header.encode()); let block = seal_block(block, &client); @@ -306,12 +306,7 @@ fn validate_block_invalid_parent_hash() { let (client, parent_head) = create_test_client(); let TestBlockData { mut block, validation_data, .. } = build_block_with_witness(&client, Vec::new(), parent_head.clone(), Default::default()); - block - .blocks_mut() - .nth(0) - .unwrap() - .header - .set_parent_hash(Hash::from_low_u64_be(1)); + block.blocks_mut()[0].header.set_parent_hash(Hash::from_low_u64_be(1)); call_validate_block(parent_head, block, validation_data.relay_parent_storage_root) .unwrap_err(); @@ -361,12 +356,7 @@ fn check_inherents_are_unsigned_and_before_all_other_extrinsics() { let TestBlockData { mut block, validation_data, .. } = build_block_with_witness(&client, Vec::new(), parent_head.clone(), Default::default()); - block - .blocks_mut() - .nth(0) - .unwrap() - .extrinsics - .insert(0, transfer(&client, Alice, Bob, 69)); + block.blocks_mut()[0].extrinsics.insert(0, transfer(&client, Alice, Bob, 69)); call_validate_block(parent_head, block, validation_data.relay_parent_storage_root) .unwrap_err(); @@ -435,7 +425,7 @@ fn validate_block_works_with_child_tries() { Default::default(), ); - let block = block.blocks().nth(0).unwrap().clone(); + let block = block.blocks()[0].clone(); futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); @@ -449,7 +439,7 @@ fn validate_block_works_with_child_tries() { ); let block = seal_block(block, &client); - let header = block.blocks().nth(0).unwrap().header().clone(); + let header = block.blocks()[0].header().clone(); let res_header = call_validate_block(parent_head, block, validation_data.relay_parent_storage_root) .expect("Calls `validate_block`"); diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index c03c8c272cefc..ec748d90e8263 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -285,9 +285,6 @@ mod tests { reference_recorder.estimate_encoded_size(), recorder_for_test.estimate_encoded_size() ); - - recorder_for_test.reset(); - assert_eq!(recorder_for_test.estimate_encoded_size(), 0) } } } diff --git a/cumulus/test/client/src/block_builder.rs b/cumulus/test/client/src/block_builder.rs index 75b8d6932cdf6..63796a665c7de 100644 --- a/cumulus/test/client/src/block_builder.rs +++ b/cumulus/test/client/src/block_builder.rs @@ -198,6 +198,6 @@ impl<'a> BuildParachainBlockData for sc_block_builder::BlockBuilder<'a, Block, C .into_compact_proof::<
::Hashing>(parent_state_root) .expect("Creates the compact proof"); - ParachainBlockData::new(vec![(built_block.block, storage_proof)]) + ParachainBlockData::new(vec![built_block.block], storage_proof) } } diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index 054b63de3c739..580f7d21f0f63 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -240,11 +240,12 @@ fn get_keystore() -> sp_keystore::KeystorePtr { /// Given parachain block data and a slot, seal the block with an aura seal. Assumes that the /// authorities of the test runtime are present in the keyring. pub fn seal_block(block: ParachainBlockData, client: &Client) -> ParachainBlockData { + let (blocks, proof) = block.into_inner(); + ParachainBlockData::new( - block - .into_inner() + blocks .into_iter() - .map(|(mut block, proof)| { + .map(|mut block| { let parachain_slot = find_pre_digest::::Signature>(&block.header) .unwrap(); @@ -263,8 +264,9 @@ pub fn seal_block(block: ParachainBlockData, client: &Client) -> ParachainBlockD .expect("Should be able to create seal"); block.header.digest_mut().push(seal_digest); - (block, proof) + block }) .collect::>(), + proof, ) } From 8e46176367b24f55ca65db2e26d37f5c89fc3642 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 24 Mar 2025 22:44:01 +0100 Subject: [PATCH 014/312] More fixes --- cumulus/test/service/benches/validate_block.rs | 12 ++++-------- .../test/service/benches/validate_block_glutton.rs | 10 +++++----- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/cumulus/test/service/benches/validate_block.rs b/cumulus/test/service/benches/validate_block.rs index 0e5156653d244..ecfc824b571fa 100644 --- a/cumulus/test/service/benches/validate_block.rs +++ b/cumulus/test/service/benches/validate_block.rs @@ -91,7 +91,8 @@ fn benchmark_block_validation(c: &mut Criterion) { let para_id = ParaId::from(cumulus_test_runtime::PARACHAIN_ID); let mut test_client_builder = TestClientBuilder::with_default_backend(); let genesis_init = test_client_builder.genesis_init_mut(); - *genesis_init = cumulus_test_client::GenesisParameters { endowed_accounts: account_ids }; + *genesis_init = + cumulus_test_client::GenesisParameters { endowed_accounts: account_ids, wasm: None }; let client = test_client_builder.build_with_native_executor(None).0; let (max_transfer_count, extrinsics) = create_extrinsics(&client, &src_accounts, &dst_accounts); @@ -119,8 +120,7 @@ fn benchmark_block_validation(c: &mut Criterion) { let parachain_block = block_builder.build_parachain_block(*parent_header.state_root()); - let proof_size_in_kb = - parachain_block.proofs().map(|p| p.encoded_size()).sum::() as f64 / 1024f64; + let proof_size_in_kb = parachain_block.proof().encoded_size() as f64 / 1024f64; let runtime = utils::get_wasm_module(); let (relay_parent_storage_root, _) = sproof_builder.into_state_root_and_proof(); @@ -135,11 +135,7 @@ fn benchmark_block_validation(c: &mut Criterion) { // This is not strictly necessary for this benchmark, but // let us make sure that the result of `validate_block` is what // we expect. - verify_expected_result( - &runtime, - &encoded_params, - parachain_block.blocks().nth(0).unwrap().clone(), - ); + verify_expected_result(&runtime, &encoded_params, parachain_block.blocks()[0].clone()); let mut group = c.benchmark_group("Block validation"); group.sample_size(20); diff --git a/cumulus/test/service/benches/validate_block_glutton.rs b/cumulus/test/service/benches/validate_block_glutton.rs index 05ed11e3d672d..05c422f84dec2 100644 --- a/cumulus/test/service/benches/validate_block_glutton.rs +++ b/cumulus/test/service/benches/validate_block_glutton.rs @@ -63,7 +63,7 @@ fn benchmark_block_validation(c: &mut Criterion) { let endowed_accounts = vec![AccountId::from(Alice.public())]; let mut test_client_builder = TestClientBuilder::with_default_backend(); let genesis_init = test_client_builder.genesis_init_mut(); - *genesis_init = cumulus_test_client::GenesisParameters { endowed_accounts }; + *genesis_init = cumulus_test_client::GenesisParameters { endowed_accounts, wasm: None }; let client = test_client_builder.build_with_native_executor(None).0; @@ -80,7 +80,7 @@ fn benchmark_block_validation(c: &mut Criterion) { runtime.block_on(import_block( &client, - parachain_block.blocks().nth(0).unwrap().clone(), + parachain_block.blocks()[0].clone(), false, )); @@ -97,10 +97,10 @@ fn benchmark_block_validation(c: &mut Criterion) { let parachain_block = block_builder.build_parachain_block(*parent_header.state_root()); let proof_size_in_kb = - parachain_block.proofs().map(|p| p.encoded_size()).sum::() as f64 / 1024f64; + parachain_block.proof().encoded_size() as f64 / 1024f64; runtime.block_on(import_block( &client, - parachain_block.blocks().nth(0).unwrap().clone(), + parachain_block.blocks()[0].clone(), false, )); let runtime = utils::get_wasm_module(); @@ -121,7 +121,7 @@ fn benchmark_block_validation(c: &mut Criterion) { verify_expected_result( &runtime, &encoded_params, - parachain_block.blocks().nth(0).unwrap().clone(), + parachain_block.blocks()[0].clone(), ); group.bench_function( From b0b041dbf15b0a38228889b567cd1ed3e5070577 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 24 Mar 2025 22:50:25 +0100 Subject: [PATCH 015/312] FMT.. --- .../service/benches/validate_block_glutton.rs | 21 ++++--------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/cumulus/test/service/benches/validate_block_glutton.rs b/cumulus/test/service/benches/validate_block_glutton.rs index 05c422f84dec2..06ad739965146 100644 --- a/cumulus/test/service/benches/validate_block_glutton.rs +++ b/cumulus/test/service/benches/validate_block_glutton.rs @@ -78,11 +78,7 @@ fn benchmark_block_validation(c: &mut Criterion) { set_glutton_parameters(&client, is_first, compute_ratio, storage_ratio); is_first = false; - runtime.block_on(import_block( - &client, - parachain_block.blocks()[0].clone(), - false, - )); + runtime.block_on(import_block(&client, parachain_block.blocks()[0].clone(), false)); // Build benchmark block let parent_hash = client.usage_info().chain.best_hash; @@ -96,13 +92,8 @@ fn benchmark_block_validation(c: &mut Criterion) { client.init_block_builder(Some(validation_data), Default::default()); let parachain_block = block_builder.build_parachain_block(*parent_header.state_root()); - let proof_size_in_kb = - parachain_block.proof().encoded_size() as f64 / 1024f64; - runtime.block_on(import_block( - &client, - parachain_block.blocks()[0].clone(), - false, - )); + let proof_size_in_kb = parachain_block.proof().encoded_size() as f64 / 1024f64; + runtime.block_on(import_block(&client, parachain_block.blocks()[0].clone(), false)); let runtime = utils::get_wasm_module(); let sproof_builder: RelayStateSproofBuilder = Default::default(); @@ -118,11 +109,7 @@ fn benchmark_block_validation(c: &mut Criterion) { // This is not strictly necessary for this benchmark, but // let us make sure that the result of `validate_block` is what // we expect. - verify_expected_result( - &runtime, - &encoded_params, - parachain_block.blocks()[0].clone(), - ); + verify_expected_result(&runtime, &encoded_params, parachain_block.blocks()[0].clone()); group.bench_function( format!( From b6cfcda9821b9021627e7257b6b7b0fd2f5adbe9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 24 Mar 2025 22:50:47 +0100 Subject: [PATCH 016/312] More --- cumulus/bin/pov-validator/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/bin/pov-validator/Cargo.toml b/cumulus/bin/pov-validator/Cargo.toml index d7af29a6bcb25..a919e3f68eace 100644 --- a/cumulus/bin/pov-validator/Cargo.toml +++ b/cumulus/bin/pov-validator/Cargo.toml @@ -19,8 +19,8 @@ sc-executor.workspace = true sp-core.workspace = true sp-io.workspace = true sp-maybe-compressed-blob.workspace = true -tracing-subscriber.workspace = true tracing.workspace = true +tracing-subscriber.workspace = true [lints] workspace = true From 05096afaf6c2fe0196fbcd8396e8c8196b5740fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 25 Mar 2025 09:33:38 +0100 Subject: [PATCH 017/312] MIGHTY CLIPPY ACCEPT MY SACRIFICE --- cumulus/client/pov-recovery/src/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/client/pov-recovery/src/tests.rs b/cumulus/client/pov-recovery/src/tests.rs index 0a3d13dc0e417..78884691cc384 100644 --- a/cumulus/client/pov-recovery/src/tests.rs +++ b/cumulus/client/pov-recovery/src/tests.rs @@ -1444,7 +1444,7 @@ async fn recovery_multiple_blocks_per_candidate() { RecoveryDelayRange { min: Duration::from_millis(0), max: Duration::from_millis(0) }; let (_explicit_recovery_chan_tx, explicit_recovery_chan_rx) = mpsc::channel(10); let candidates = make_candidate_chain(1..4); - let candidate = candidates.last().clone().unwrap(); + let candidate = candidates.last().unwrap(); let headers = candidates .iter() .map(|c| Header::decode(&mut &c.commitments.head_data.0[..]).unwrap()) From fa0889883f424856cc2fb7f8acd54285678932a7 Mon Sep 17 00:00:00 2001 From: "cmd[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 25 Mar 2025 08:38:46 +0000 Subject: [PATCH 018/312] Update from github-actions[bot] running command 'prdoc --bump major --audience node_dev' --- prdoc/pr_6137.prdoc | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 prdoc/pr_6137.prdoc diff --git a/prdoc/pr_6137.prdoc b/prdoc/pr_6137.prdoc new file mode 100644 index 0000000000000..5f7a40f13766d --- /dev/null +++ b/prdoc/pr_6137.prdoc @@ -0,0 +1,22 @@ +title: 'cumulus: `ParachainBlockData` support multiple blocks' +doc: +- audience: Node Dev + description: |- + This pull request adds support to `ParachainBlockData` to support multiple blocks at once. This basically means that cumulus based Parachains could start packaging multiple blocks into one `PoV`. From the relay chain POV nothing changes and these `PoV`s appear like any other `PoV`. Internally this `PoV` then executes the blocks sequentially. However, all these blocks together can use the same amount of resources like a single `PoV`. This pull request is basically a preparation to support running parachains with a faster block time than the relay chain. + + This breaks the encoding of `ParachainBlockData`. It requires that the collators upgrade first before the runtime requiring the new `ParachainBlockData` is enacted. The collators will decide based on the api version of `CollectCollationInfo`, which `ParachainBlockData` format they will send to the relay chain so that the validation code can interpret it correctly. +crates: +- name: cumulus-client-collator + bump: major +- name: cumulus-client-consensus-aura + bump: major +- name: cumulus-client-pov-recovery + bump: major +- name: cumulus-pallet-parachain-system + bump: major +- name: cumulus-primitives-core + bump: major +- name: polkadot-primitives + bump: major +- name: cumulus-pov-validator + bump: major From 4f5ffb35fe545fe4d1f745d30ddb6ed618d25c86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 25 Mar 2025 10:51:05 +0100 Subject: [PATCH 019/312] Fix doc issue --- cumulus/client/collator/src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/client/collator/src/service.rs b/cumulus/client/collator/src/service.rs index 41cfc6b866ec0..921f1890f783e 100644 --- a/cumulus/client/collator/src/service.rs +++ b/cumulus/client/collator/src/service.rs @@ -178,7 +178,7 @@ where /// /// Returns `Ok(Some((CollationInfo, ApiVersion)))` on success, `Err(_)` on error or `Ok(None)` /// if the runtime api isn't implemented by the runtime. `ApiVersion` being the version of the - /// [`CollectCollectionInfo`] runtime api. + /// [`CollectCollationInfo`] runtime api. pub fn fetch_collation_info( &self, block_hash: Block::Hash, From 99f814c7588b75de50bd3d25af4ba0bdab97c485 Mon Sep 17 00:00:00 2001 From: "cmd[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 25 Mar 2025 10:06:12 +0000 Subject: [PATCH 020/312] Update from github-actions[bot] running command 'fmt' --- cumulus/bin/pov-validator/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/bin/pov-validator/Cargo.toml b/cumulus/bin/pov-validator/Cargo.toml index a919e3f68eace..d7af29a6bcb25 100644 --- a/cumulus/bin/pov-validator/Cargo.toml +++ b/cumulus/bin/pov-validator/Cargo.toml @@ -19,8 +19,8 @@ sc-executor.workspace = true sp-core.workspace = true sp-io.workspace = true sp-maybe-compressed-blob.workspace = true -tracing.workspace = true tracing-subscriber.workspace = true +tracing.workspace = true [lints] workspace = true From 146f29de32b725017463c92fb0a6b3efda498824 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 25 Mar 2025 11:07:32 +0100 Subject: [PATCH 021/312] Fix some issues --- cumulus/client/pov-recovery/src/lib.rs | 5 ++-- .../src/validate_block/implementation.rs | 26 +++++++++---------- .../core/src/parachain_block_data.rs | 26 +++++++++---------- prdoc/pr_6137.prdoc | 8 ++++-- 4 files changed, 35 insertions(+), 30 deletions(-) diff --git a/cumulus/client/pov-recovery/src/lib.rs b/cumulus/client/pov-recovery/src/lib.rs index e6a0ddb8c5377..b9ad5b3f91dc5 100644 --- a/cumulus/client/pov-recovery/src/lib.rs +++ b/cumulus/client/pov-recovery/src/lib.rs @@ -1,5 +1,6 @@ // Copyright (C) Parity Technologies (UK) Ltd. // This file is part of Cumulus. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // Cumulus is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -8,11 +9,11 @@ // Cumulus is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// along with Cumulus. If not, see . //! Parachain PoV recovery //! diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 5fc086587e592..74a6f23809be2 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -1,18 +1,18 @@ // Copyright (C) Parity Technologies (UK) Ltd. // This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! The actual implementation of the validate block functionality. diff --git a/cumulus/primitives/core/src/parachain_block_data.rs b/cumulus/primitives/core/src/parachain_block_data.rs index ce9901778f40d..7e56124922da9 100644 --- a/cumulus/primitives/core/src/parachain_block_data.rs +++ b/cumulus/primitives/core/src/parachain_block_data.rs @@ -1,18 +1,18 @@ // Copyright (C) Parity Technologies (UK) Ltd. // This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Provides [`ParachainBlockData`] and its historical versions. diff --git a/prdoc/pr_6137.prdoc b/prdoc/pr_6137.prdoc index 5f7a40f13766d..60f88966042c1 100644 --- a/prdoc/pr_6137.prdoc +++ b/prdoc/pr_6137.prdoc @@ -2,9 +2,13 @@ title: 'cumulus: `ParachainBlockData` support multiple blocks' doc: - audience: Node Dev description: |- - This pull request adds support to `ParachainBlockData` to support multiple blocks at once. This basically means that cumulus based Parachains could start packaging multiple blocks into one `PoV`. From the relay chain POV nothing changes and these `PoV`s appear like any other `PoV`. Internally this `PoV` then executes the blocks sequentially. However, all these blocks together can use the same amount of resources like a single `PoV`. This pull request is basically a preparation to support running parachains with a faster block time than the relay chain. + This pull request adds support to `ParachainBlockData` to support multiple blocks at once. This basically means that cumulus based Parachains could start packaging multiple blocks into one `PoV`. + From the relay chain PoV nothing changes and these `PoV`s appear like any other `PoV`. Internally this `PoV` then executes the blocks sequentially. However, all these blocks together can use the same amount of resources like a single `PoV`. + This pull request is basically a preparation to support running parachains with a faster block time than the relay chain. + + This breaks the encoding of `ParachainBlockData`. It requires that the collators upgrade first before the runtime requiring the new `ParachainBlockData` is enacted. + The collators will decide based on the api version of `CollectCollationInfo`, which `ParachainBlockData` format they will send to the relay chain so that the validation code can interpret it correctly. - This breaks the encoding of `ParachainBlockData`. It requires that the collators upgrade first before the runtime requiring the new `ParachainBlockData` is enacted. The collators will decide based on the api version of `CollectCollationInfo`, which `ParachainBlockData` format they will send to the relay chain so that the validation code can interpret it correctly. crates: - name: cumulus-client-collator bump: major From 06dba39529727e21977287b371f946cc78b24868 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 25 Mar 2025 22:37:48 +0100 Subject: [PATCH 022/312] Fix bug --- .../src/validate_block/implementation.rs | 12 ++-- .../src/validate_block/tests.rs | 60 ++++++++++++++++--- polkadot/node/collation-generation/src/lib.rs | 11 +++- .../node/core/candidate-validation/src/lib.rs | 16 ++++- 4 files changed, 80 insertions(+), 19 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 74a6f23809be2..81e60a891b03f 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -255,7 +255,7 @@ where found_separator = true; None } else if found_separator { - if upward_message_signals.iter().any(|s| *s != m) { + if upward_message_signals.iter().all(|s| *s != m) { upward_message_signals.push(m); } None @@ -268,15 +268,15 @@ where }) .for_each(|m| { upward_messages.try_push(m) - .expect( - "Number of upward messages should not be greater than `MAX_UPWARD_MESSAGE_NUM`", - ) + .expect( + "Number of upward messages should not be greater than `MAX_UPWARD_MESSAGE_NUM`", + ) }); processed_downward_messages += crate::ProcessedDownwardMessages::::get(); horizontal_messages.try_extend(crate::HrmpOutboundMessages::::get().into_iter()).expect( - "Number of horizontal messages should not be greater than `MAX_HORIZONTAL_MESSAGE_NUM`", - ); + "Number of horizontal messages should not be greater than `MAX_HORIZONTAL_MESSAGE_NUM`", + ); hrmp_watermark = crate::HrmpWatermark::::get(); if block_index + 1 == num_blocks { diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 16a087eddcdea..4d9abcc2b39f1 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -14,6 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::*; use codec::{Decode, DecodeAll, Encode}; use cumulus_primitives_core::{ParachainBlockData, PersistedValidationData}; use cumulus_test_client::{ @@ -27,18 +28,21 @@ use cumulus_test_client::{ TestClientBuilder, TestClientBuilderExt, ValidationParams, }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; +use polkadot_parachain_primitives::primitives::ValidationResult; +#[cfg(feature = "experimental-ump-signals")] +use relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::{env, process::Command}; use crate::validate_block::MemoryOptimizedValidationParams; -fn call_validate_block_encoded_header( +fn call_validate_block_validation_result( validation_code: &[u8], parent_head: Header, block_data: ParachainBlockData, relay_parent_storage_root: Hash, -) -> cumulus_test_client::ExecutorResult> { +) -> cumulus_test_client::ExecutorResult { cumulus_test_client::validate_block( ValidationParams { block_data: BlockData(block_data.encode()), @@ -48,7 +52,6 @@ fn call_validate_block_encoded_header( }, validation_code, ) - .map(|v| v.head_data.0) } fn call_validate_block( @@ -56,13 +59,13 @@ fn call_validate_block( block_data: ParachainBlockData, relay_parent_storage_root: Hash, ) -> cumulus_test_client::ExecutorResult
{ - call_validate_block_encoded_header( + call_validate_block_validation_result( WASM_BINARY.expect("You need to build the WASM binaries to run the tests!"), parent_head, block_data, relay_parent_storage_root, ) - .map(|v| Header::decode(&mut &v[..]).expect("Decodes `Header`.")) + .map(|v| Header::decode(&mut &v.head_data.0[..]).expect("Decodes `Header`.")) } /// Call `validate_block` in the runtime with `elastic-scaling` activated. @@ -71,14 +74,14 @@ fn call_validate_block_elastic_scaling( block_data: ParachainBlockData, relay_parent_storage_root: Hash, ) -> cumulus_test_client::ExecutorResult
{ - call_validate_block_encoded_header( + call_validate_block_validation_result( test_runtime::elastic_scaling::WASM_BINARY .expect("You need to build the WASM binaries to run the tests!"), parent_head, block_data, relay_parent_storage_root, ) - .map(|v| Header::decode(&mut &v[..]).expect("Decodes `Header`.")) + .map(|v| Header::decode(&mut &v.head_data.0[..]).expect("Decodes `Header`.")) } fn create_test_client() -> (Client, Header) { @@ -288,13 +291,15 @@ fn validate_block_returns_custom_head_data() { assert_ne!(expected_header, header.encode()); let block = seal_block(block, &client); - let res_header = call_validate_block_encoded_header( + let res_header = call_validate_block_validation_result( WASM_BINARY.expect("You need to build the WASM binaries to run the tests!"), parent_head, block, validation_data.relay_parent_storage_root, ) - .expect("Calls `validate_block`"); + .expect("Calls `validate_block`") + .head_data + .0; assert_eq!(expected_header, res_header); } @@ -445,3 +450,40 @@ fn validate_block_works_with_child_tries() { .expect("Calls `validate_block`"); assert_eq!(header, res_header); } + +#[test] +#[cfg(feature = "experimental-ump-signals")] +fn validate_block_handles_ump_signal() { + sp_tracing::try_init_simple(); + + let (client, parent_head) = create_elastic_scaling_test_client(); + let extra_extrinsics = + vec![transfer(&client, Alice, Bob, 69), transfer(&client, Bob, Charlie, 100)]; + + let TestBlockData { block, validation_data } = build_block_with_witness( + &client, + extra_extrinsics, + parent_head.clone(), + Default::default(), + ); + + let block = seal_block(block, &client); + let upward_messages = call_validate_block_validation_result( + test_runtime::elastic_scaling::WASM_BINARY + .expect("You need to build the WASM binaries to run the tests!"), + parent_head, + block, + validation_data.relay_parent_storage_root, + ) + .expect("Calls `validate_block`") + .upward_messages; + + assert_eq!( + upward_messages, + vec![ + UMP_SEPARATOR, + UMPSignal::SelectCore(CoreSelector(1), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET)) + .encode() + ] + ); +} diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index b4b3e009a6bd7..fd672e7160b14 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -612,7 +612,7 @@ async fn construct_and_distribute_receipt( commitments.head_data.hash(), validation_code_hash, ), - commitments, + commitments: commitments.clone(), }; ccr.check_core_index(&transposed_claim_queue) @@ -653,8 +653,15 @@ async fn construct_and_distribute_receipt( ?relay_parent, para_id = %para_id, ?core_index, - "candidate is generated", + "Candidate generated", ); + gum::trace!( + target: LOG_TARGET, + ?commitments, + candidate_hash = ?receipt.hash(), + "Candidate commitments", + ); + metrics.on_collation_generated(); sender diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index a40db8439f3ad..dec7cded89f98 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -865,10 +865,12 @@ async fn validate_candidate_exhaustive( let validation_code_hash = validation_code.hash(); let relay_parent = candidate_receipt.descriptor.relay_parent(); let para_id = candidate_receipt.descriptor.para_id(); + let candidate_hash = candidate_receipt.hash(); gum::debug!( target: LOG_TARGET, ?validation_code_hash, + ?candidate_hash, ?para_id, "About to validate a candidate.", ); @@ -888,7 +890,7 @@ async fn validate_candidate_exhaustive( &pov, &validation_code_hash, ) { - gum::info!(target: LOG_TARGET, ?para_id, "Invalid candidate (basic checks)"); + gum::debug!(target: LOG_TARGET, ?para_id, ?candidate_hash, "Invalid candidate (basic checks)"); return Ok(ValidationResult::Invalid(e)) } @@ -935,7 +937,7 @@ async fn validate_candidate_exhaustive( }; if let Err(ref error) = result { - gum::info!(target: LOG_TARGET, ?para_id, ?error, "Failed to validate candidate"); + gum::info!(target: LOG_TARGET, ?para_id, ?candidate_hash, ?error, "Failed to validate candidate"); } match result { @@ -943,6 +945,7 @@ async fn validate_candidate_exhaustive( gum::warn!( target: LOG_TARGET, ?para_id, + ?candidate_hash, ?e, "An internal error occurred during validation, will abstain from voting", ); @@ -1008,9 +1011,18 @@ async fn validate_candidate_exhaustive( gum::info!( target: LOG_TARGET, ?para_id, + ?candidate_hash, "Invalid candidate (commitments hash)" ); + gum::trace!( + target: LOG_TARGET, + ?para_id, + ?candidate_hash, + produced_commitments = ?committed_candidate_receipt.commitments, + "Invalid candidate commitments" + ); + // If validation produced a new set of commitments, we treat the candidate as // invalid. Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch)) From cedb8e268426aa4f2dad2a270d581e9a4903ce61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 26 Mar 2025 15:47:39 +0100 Subject: [PATCH 023/312] Fix more zombienet tests --- cumulus/client/pov-recovery/src/lib.rs | 6 +++++- cumulus/zombienet/tests/0002-pov_recovery.zndsl | 12 ++++++------ .../zombienet/tests/0009-elastic_pov_recovery.zndsl | 2 +- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/cumulus/client/pov-recovery/src/lib.rs b/cumulus/client/pov-recovery/src/lib.rs index b9ad5b3f91dc5..2ffff3a9d0d4a 100644 --- a/cumulus/client/pov-recovery/src/lib.rs +++ b/cumulus/client/pov-recovery/src/lib.rs @@ -504,7 +504,11 @@ where fn import_blocks(&mut self, blocks: impl Iterator) { let mut blocks = VecDeque::from_iter(blocks); - tracing::trace!(target: LOG_TARGET, blocks = ?blocks.iter().map(|b| b.hash()), "Importing blocks retrieved using pov_recovery"); + tracing::trace!( + target: LOG_TARGET, + blocks = ?blocks.iter().map(|b| b.hash()), + "Importing blocks retrieved using pov_recovery", + ); let mut incoming_blocks = Vec::new(); diff --git a/cumulus/zombienet/tests/0002-pov_recovery.zndsl b/cumulus/zombienet/tests/0002-pov_recovery.zndsl index dc7095ced252d..5cc615e70d209 100644 --- a/cumulus/zombienet/tests/0002-pov_recovery.zndsl +++ b/cumulus/zombienet/tests/0002-pov_recovery.zndsl @@ -19,9 +19,9 @@ two: reports block height is at least 20 within 800 seconds # three: reports block height is at least 20 within 800 seconds eve: reports block height is at least 20 within 800 seconds -one: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds -two: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds -three: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds -eve: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds -charlie: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds -alice: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds +one: count of log lines containing "Importing blocks retrieved using pov_recovery" is greater than 19 within 10 seconds +two: count of log lines containing "Importing blocks retrieved using pov_recovery" is greater than 19 within 10 seconds +three: count of log lines containing "Importing blocks retrieved using pov_recovery" is greater than 19 within 10 seconds +eve: count of log lines containing "Importing blocks retrieved using pov_recovery" is greater than 19 within 10 seconds +charlie: count of log lines containing "Importing blocks retrieved using pov_recovery" is greater than 19 within 10 seconds +alice: count of log lines containing "Importing blocks retrieved using pov_recovery" is greater than 19 within 10 seconds diff --git a/cumulus/zombienet/tests/0009-elastic_pov_recovery.zndsl b/cumulus/zombienet/tests/0009-elastic_pov_recovery.zndsl index 5cca6120ff3a3..2614084a5a465 100644 --- a/cumulus/zombienet/tests/0009-elastic_pov_recovery.zndsl +++ b/cumulus/zombienet/tests/0009-elastic_pov_recovery.zndsl @@ -21,4 +21,4 @@ alice: parachain 2100 is registered within 300 seconds collator-elastic: reports block height is at least 40 within 225 seconds collator-elastic: count of log lines containing "set_validation_data inherent needs to be present in every block" is 0 within 10 seconds -recovery-target: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 35 within 10 seconds +recovery-target: count of log lines containing "Importing blocks retrieved using pov_recovery" is greater than 35 within 10 seconds From d79ebcf70ec88a0d6b33305e325db6af30846eda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 26 Mar 2025 22:42:16 +0100 Subject: [PATCH 024/312] Let's sleep longer --- .github/workflows/zombienet-reusable-preflight.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/zombienet-reusable-preflight.yml b/.github/workflows/zombienet-reusable-preflight.yml index 68c39cdebd95a..2158fef4e4e52 100644 --- a/.github/workflows/zombienet-reusable-preflight.yml +++ b/.github/workflows/zombienet-reusable-preflight.yml @@ -245,7 +245,7 @@ jobs: echo "::warning::No CI workflow runs found for this commit" exit 1 fi - sleep 10 + sleep 60 done #check if the build succeeded From aae6db9c56c5947ac4f1da9b55736bc0d6d87cc2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 27 Mar 2025 12:27:59 +0100 Subject: [PATCH 025/312] Use debug for logging --- cumulus/client/pov-recovery/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/client/pov-recovery/src/lib.rs b/cumulus/client/pov-recovery/src/lib.rs index 2ffff3a9d0d4a..e8f2e636249ff 100644 --- a/cumulus/client/pov-recovery/src/lib.rs +++ b/cumulus/client/pov-recovery/src/lib.rs @@ -504,7 +504,7 @@ where fn import_blocks(&mut self, blocks: impl Iterator) { let mut blocks = VecDeque::from_iter(blocks); - tracing::trace!( + tracing::debug!( target: LOG_TARGET, blocks = ?blocks.iter().map(|b| b.hash()), "Importing blocks retrieved using pov_recovery", From f10615b884ed3c77d716aee24b54f3519060b1ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 27 Mar 2025 22:31:04 +0100 Subject: [PATCH 026/312] Initial support for ignoring trie nodes --- Cargo.lock | 5 + cumulus/client/consensus/proposer/Cargo.toml | 5 + cumulus/client/consensus/proposer/src/lib.rs | 46 ++-- .../basic-authorship/src/basic_authorship.rs | 102 +++++--- substrate/client/basic-authorship/src/lib.rs | 4 +- substrate/client/block-builder/src/lib.rs | 31 ++- .../api/proc-macro/src/impl_runtime_apis.rs | 4 + .../proc-macro/src/mock_impl_runtime_apis.rs | 4 + substrate/primitives/api/src/lib.rs | 9 +- substrate/primitives/trie/src/recorder.rs | 82 +++++- substrate/primitives/trie/src/trie_codec.rs | 241 +++++++++++++++++- 11 files changed, 455 insertions(+), 78 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 91eadbe796947..f85ac5809d30c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4297,6 +4297,11 @@ dependencies = [ "anyhow", "async-trait", "cumulus-primitives-parachain-inherent", + "sc-basic-authorship", + "sc-block-builder", + "sc-transaction-pool-api", + "sp-api 26.0.0", + "sp-blockchain", "sp-consensus", "sp-inherents", "sp-runtime 31.0.1", diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml index e391481bc4452..b98c77b3f891b 100644 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ b/cumulus/client/consensus/proposer/Cargo.toml @@ -17,6 +17,11 @@ async-trait = { workspace = true } thiserror = { workspace = true } # Substrate +sc-basic-authorship = { workspace = true } +sc-block-builder = { workspace = true } +sc-transaction-pool-api = { workspace = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/proposer/src/lib.rs b/cumulus/client/consensus/proposer/src/lib.rs index 4a5a071991e3a..ceba852cad753 100644 --- a/cumulus/client/consensus/proposer/src/lib.rs +++ b/cumulus/client/consensus/proposer/src/lib.rs @@ -21,13 +21,16 @@ //! This utility is designed to be composed within any collator consensus algorithm. use async_trait::async_trait; - use cumulus_primitives_parachain_inherent::ParachainInherentData; -use sp_consensus::{EnableProofRecording, Environment, Proposal, Proposer as SubstrateProposer}; +use sc_basic_authorship::{ProposeArgs, ProposerFactory}; +use sc_block_builder::BlockBuilderApi; +use sc_transaction_pool_api::TransactionPool; +use sp_api::{ApiExt, CallApiAt, ProvideRuntimeApi}; +use sp_blockchain::HeaderBackend; +use sp_consensus::{EnableProofRecording, Environment, Proposal}; use sp_inherents::InherentData; use sp_runtime::{traits::Block as BlockT, Digest}; use sp_state_machine::StorageProof; - use std::{fmt::Debug, time::Duration}; /// Errors that can occur when proposing a parachain block. @@ -80,39 +83,24 @@ pub trait ProposerInterface { ) -> Result>, Error>; } -/// A simple wrapper around a Substrate proposer for creating collations. -pub struct Proposer { - inner: T, - _marker: std::marker::PhantomData, -} - -impl Proposer { - /// Create a new Cumulus [`Proposer`]. - pub fn new(inner: T) -> Self { - Proposer { inner, _marker: std::marker::PhantomData } - } -} - #[async_trait] -impl ProposerInterface for Proposer +impl ProposerInterface for ProposerFactory where - B: sp_runtime::traits::Block, - T: Environment + Send, - T::Error: Send + Sync + 'static, - T::Proposer: SubstrateProposer, - >::Error: Send + Sync + 'static, + A: TransactionPool + 'static, + C: HeaderBackend + ProvideRuntimeApi + CallApiAt + Send + Sync + 'static, + C::Api: ApiExt + BlockBuilderApi, + Block: sp_runtime::traits::Block, { async fn propose( &mut self, - parent_header: &B::Header, + parent_header: &Block::Header, paras_inherent_data: &ParachainInherentData, other_inherent_data: InherentData, inherent_digests: Digest, max_duration: Duration, block_size_limit: Option, - ) -> Result>, Error> { + ) -> Result>, Error> { let proposer = self - .inner .init(parent_header) .await .map_err(|e| Error::proposer_creation(anyhow::Error::new(e)))?; @@ -126,7 +114,13 @@ where .map_err(|e| Error::proposing(anyhow::Error::new(e)))?; proposer - .propose(inherent_data, inherent_digests, max_duration, block_size_limit) + .propose(ProposeArgs { + inherent_data, + inherent_digests, + max_duration, + block_size_limit, + ignored_nodes_by_proof_recording: None, + }) .await .map(Some) .map_err(|e| Error::proposing(anyhow::Error::new(e)).into()) diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index b3519f47a158c..372c1a7e738bd 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -30,7 +30,7 @@ use log::{debug, error, info, trace, warn}; use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool, TxInvalidityReportMap}; -use sp_api::{ApiExt, CallApiAt, ProvideRuntimeApi}; +use sp_api::{ApiExt, CallApiAt, ProofRecorder, ProvideRuntimeApi}; use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend}; use sp_consensus::{DisableProofRecording, EnableProofRecording, ProofRecording, Proposal}; use sp_core::traits::SpawnNamed; @@ -39,7 +39,7 @@ use sp_runtime::{ traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT}, Digest, ExtrinsicInclusionMode, Percent, SaturatedConversion, }; -use std::{marker::PhantomData, pin::Pin, sync::Arc, time}; +use std::{collections::HashSet, marker::PhantomData, pin::Pin, sync::Arc, time}; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_proposer_metrics::{EndProposingReason, MetricsLink as PrometheusMetrics}; @@ -283,56 +283,100 @@ where max_duration: time::Duration, block_size_limit: Option, ) -> Self::Proposal { + Self::propose( + self, + ProposeArgs { + inherent_data, + inherent_digests, + max_duration, + block_size_limit, + ignored_nodes_by_proof_recording: None, + }, + ) + .boxed() + } +} + +/// Arguments for [`Proposer::propose`]. +pub struct ProposeArgs { + /// The inherent data to pass to the block production. + pub inherent_data: InherentData, + /// The inherent digests to include in the produced block. + pub inherent_digests: Digest, + /// Max duration for building the block. + pub max_duration: time::Duration, + /// Optional size limit for the produced block. + /// + /// When set, block production ends before hitting this limit. The limit includes the storage + /// proof, when proof recording is activated. + pub block_size_limit: Option, + /// Hashes of trie nodes that should not be recorded. + /// + /// Only applies when proof recording is enabled. + pub ignored_nodes_by_proof_recording: Option>, +} + +/// If the block is full we will attempt to push at most +/// this number of transactions before quitting for real. +/// It allows us to increase block utilization. +const MAX_SKIPPED_TRANSACTIONS: usize = 8; + +impl Proposer +where + A: TransactionPool + 'static, + Block: BlockT, + C: HeaderBackend + ProvideRuntimeApi + CallApiAt + Send + Sync + 'static, + C::Api: ApiExt + BlockBuilderApi, + PR: ProofRecording, +{ + /// Propose a new block. + pub async fn propose( + self, + args: ProposeArgs, + ) -> Result, sp_blockchain::Error> { let (tx, rx) = oneshot::channel(); let spawn_handle = self.spawn_handle.clone(); + // Spawn on a new thread, because block production is a blocking operation. spawn_handle.spawn_blocking( "basic-authorship-proposer", None, - Box::pin(async move { - // leave some time for evaluation and block finalization (33%) - let deadline = (self.now)() + max_duration - max_duration / 3; - let res = self - .propose_with(inherent_data, inherent_digests, deadline, block_size_limit) - .await; + async move { + let res = self.propose_with(args).await; if tx.send(res).is_err() { trace!( target: LOG_TARGET, "Could not send block production result to proposer!" ); } - }), + } + .boxed(), ); - async move { rx.await? }.boxed() + rx.await?.map_err(Into::into) } -} -/// If the block is full we will attempt to push at most -/// this number of transactions before quitting for real. -/// It allows us to increase block utilization. -const MAX_SKIPPED_TRANSACTIONS: usize = 8; - -impl Proposer -where - A: TransactionPool, - Block: BlockT, - C: HeaderBackend + ProvideRuntimeApi + CallApiAt + Send + Sync + 'static, - C::Api: ApiExt + BlockBuilderApi, - PR: ProofRecording, -{ async fn propose_with( self, - inherent_data: InherentData, - inherent_digests: Digest, - deadline: time::Instant, - block_size_limit: Option, + ProposeArgs { + inherent_data, + inherent_digests, + max_duration, + block_size_limit, + ignored_nodes_by_proof_recording, + }: ProposeArgs, ) -> Result, sp_blockchain::Error> { + // leave some time for evaluation and block finalization (33%) + let deadline = (self.now)() + max_duration - max_duration / 3; let block_timer = time::Instant::now(); let mut block_builder = BlockBuilderBuilder::new(&*self.client) .on_parent_block(self.parent_hash) .with_parent_block_number(self.parent_number) - .with_proof_recording(PR::ENABLED) + .with_proof_recorder(PR::ENABLED.then(|| { + ProofRecorder::::with_ignored_nodes( + ignored_nodes_by_proof_recording.unwrap_or_default(), + ) + })) .with_inherent_digests(inherent_digests) .build()?; diff --git a/substrate/client/basic-authorship/src/lib.rs b/substrate/client/basic-authorship/src/lib.rs index 13c75fd08c3c8..a5996793f6839 100644 --- a/substrate/client/basic-authorship/src/lib.rs +++ b/substrate/client/basic-authorship/src/lib.rs @@ -72,4 +72,6 @@ mod basic_authorship; -pub use crate::basic_authorship::{Proposer, ProposerFactory, DEFAULT_BLOCK_SIZE_LIMIT}; +pub use crate::basic_authorship::{ + ProposeArgs, Proposer, ProposerFactory, DEFAULT_BLOCK_SIZE_LIMIT, +}; diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index d02d0e3218051..0d578c118567c 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -29,8 +29,8 @@ use codec::Encode; use sp_api::{ - ApiExt, ApiRef, CallApiAt, Core, ProvideRuntimeApi, StorageChanges, StorageProof, - TransactionOutcome, + ApiExt, ApiRef, CallApiAt, Core, ProofRecorder, ProvideRuntimeApi, StorageChanges, + StorageProof, TransactionOutcome, }; use sp_blockchain::{ApplyExtrinsicFailed, Error, HeaderBackend}; use sp_core::traits::CallContext; @@ -99,7 +99,7 @@ where Ok(BlockBuilderBuilderStage2 { call_api_at: self.call_api_at, - enable_proof_recording: false, + proof_recorder: None, inherent_digests: Default::default(), parent_block: self.parent_block, parent_number, @@ -116,7 +116,7 @@ where ) -> BlockBuilderBuilderStage2<'a, B, C> { BlockBuilderBuilderStage2 { call_api_at: self.call_api_at, - enable_proof_recording: false, + proof_recorder: None, inherent_digests: Default::default(), parent_block: self.parent_block, parent_number, @@ -130,7 +130,7 @@ where /// [`BlockBuilderBuilder::new`] needs to be used. pub struct BlockBuilderBuilderStage2<'a, B: BlockT, C> { call_api_at: &'a C, - enable_proof_recording: bool, + proof_recorder: Option>, inherent_digests: Digest, parent_block: B::Hash, parent_number: NumberFor, @@ -139,13 +139,19 @@ pub struct BlockBuilderBuilderStage2<'a, B: BlockT, C> { impl<'a, B: BlockT, C> BlockBuilderBuilderStage2<'a, B, C> { /// Enable proof recording for the block builder. pub fn enable_proof_recording(mut self) -> Self { - self.enable_proof_recording = true; + self.proof_recorder = Some(Default::default()); self } /// Enable/disable proof recording for the block builder. pub fn with_proof_recording(mut self, enable: bool) -> Self { - self.enable_proof_recording = enable; + self.proof_recorder = enable.then(|| Default::default()); + self + } + + /// Enable/disable proof recording for the block builder using the given proof recorder. + pub fn with_proof_recorder(mut self, proof_recorder: Option>) -> Self { + self.proof_recorder = proof_recorder; self } @@ -165,7 +171,7 @@ impl<'a, B: BlockT, C> BlockBuilderBuilderStage2<'a, B, C> { self.call_api_at, self.parent_block, self.parent_number, - self.enable_proof_recording, + self.proof_recorder, self.inherent_digests, ) } @@ -221,7 +227,7 @@ where call_api_at: &'a C, parent_hash: Block::Hash, parent_number: NumberFor, - record_proof: bool, + proof_recorder: Option>, inherent_digests: Digest, ) -> Result { let header = <::Header as HeaderT>::new( @@ -236,11 +242,8 @@ where let mut api = call_api_at.runtime_api(); - if record_proof { - api.record_proof(); - let recorder = api - .proof_recorder() - .expect("Proof recording is enabled in the line above; qed."); + if let Some(recorder) = proof_recorder { + api.set_proof_recorder(recorder.clone()); api.register_extension(ProofSizeExt::new(recorder)); } diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index 5c9448da2bc7e..01caff7629e4f 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -325,6 +325,10 @@ fn generate_runtime_api_base_structures() -> Result { self.recorder = std::option::Option::Some(std::default::Default::default()); } + fn set_proof_recorder(&mut self, recorder: #crate_::ProofRecorder) { + self.recorder = std::option::Option::Some(recorder); + } + fn proof_recorder(&self) -> std::option::Option<#crate_::ProofRecorder> { std::clone::Clone::clone(&self.recorder) } diff --git a/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 1761e0ac9dbf4..503fcf23cc130 100644 --- a/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -99,6 +99,10 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result) { + unimplemented!("`set_proof_recorder` not implemented for runtime api mocks") + } + fn extract_proof( &mut self, ) -> Option<#crate_::StorageProof> { diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index 8909d2b2e4861..d7ab924eee946 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -615,9 +615,16 @@ pub trait ApiExt { where Self: Sized; - /// Start recording all accessed trie nodes for generating proofs. + /// Start recording all accessed trie nodes. + /// + /// The recorded trie nodes can be converted into a proof using [`Self::extract_proof`]. fn record_proof(&mut self); + /// Start recording all accessed trie nodes using the given proof recorder. + /// + /// The recorded trie nodes can be converted into a proof using [`Self::extract_proof`]. + fn set_proof_recorder(&mut self, recorder: ProofRecorder); + /// Extract the recorded proof. /// /// This stops the proof recording. diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index 4ec13066ded7f..f8e1ab0769fa5 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -66,6 +66,9 @@ struct RecorderInner { /// /// Mapping: `Hash(Node) -> Node`. accessed_nodes: HashMap>, + + /// Nodes that should be ignored and not recorded. + ignored_nodes: HashSet, } impl Default for RecorderInner { @@ -74,6 +77,7 @@ impl Default for RecorderInner { recorded_keys: Default::default(), accessed_nodes: Default::default(), transactions: Vec::new(), + ignored_nodes: Default::default(), } } } @@ -107,10 +111,20 @@ impl Clone for Recorder { } impl Recorder { + /// Create a new instance with the given `ingored_nodes`. + /// + /// These ignored nodes are not recorded when accessed. + pub fn with_ignored_nodes(ignored_nodes: HashSet) -> Self { + Self { + inner: Arc::new(Mutex::new(RecorderInner { ignored_nodes, ..Default::default() })), + ..Default::default() + } + } + /// Returns [`RecordedForKey`] per recorded key per trie. /// /// There are multiple tries when working with e.g. child tries. - pub fn recorded_keys(&self) -> HashMap<::Out, HashMap, RecordedForKey>> { + pub fn recorded_keys(&self) -> HashMap, RecordedForKey>> { self.inner.lock().recorded_keys.clone() } @@ -318,12 +332,21 @@ impl<'a, H: Hasher> trie_db::TrieRecorder for TrieRecorder<'a, H> { TrieAccess::NodeOwned { hash, node_owned } => { tracing::trace!( target: LOG_TARGET, - hash = ?hash, + ?hash, "Recording node", ); let inner = self.inner.deref_mut(); + if inner.ignored_nodes.contains(&hash) { + tracing::trace!( + target: LOG_TARGET, + ?hash, + "Ignoring node", + ); + return + } + inner.accessed_nodes.entry(hash).or_insert_with(|| { let node = node_owned.to_encoded::>(); @@ -345,6 +368,15 @@ impl<'a, H: Hasher> trie_db::TrieRecorder for TrieRecorder<'a, H> { let inner = self.inner.deref_mut(); + if inner.ignored_nodes.contains(&hash) { + tracing::trace!( + target: LOG_TARGET, + ?hash, + "Ignoring node", + ); + return + } + inner.accessed_nodes.entry(hash).or_insert_with(|| { let node = encoded_node.into_owned(); @@ -367,6 +399,15 @@ impl<'a, H: Hasher> trie_db::TrieRecorder for TrieRecorder<'a, H> { let inner = self.inner.deref_mut(); + if inner.ignored_nodes.contains(&hash) { + tracing::trace!( + target: LOG_TARGET, + ?hash, + "Ignoring value", + ); + return + } + inner.accessed_nodes.entry(hash).or_insert_with(|| { let value = value.into_owned(); @@ -730,4 +771,41 @@ mod tests { assert!(matches!(trie_recorder.trie_nodes_recorded_for_key(key), RecordedForKey::None)); } } + + #[test] + fn recorder_ignoring_nodes_works() { + let (db, root) = create_trie::(TEST_DATA); + + let recorder = Recorder::default(); + + { + let mut trie_recorder = recorder.as_trie_recorder(root); + let trie = TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut trie_recorder) + .build(); + + for (key, data) in TEST_DATA { + assert_eq!(data.to_vec(), trie.get(&key).unwrap().unwrap()); + } + } + + assert!(recorder.estimate_encoded_size() > 10); + let memory_db: MemoryDB = recorder.drain_storage_proof().into_memory_db(); + + let recorder = + Recorder::with_ignored_nodes(memory_db.keys().into_keys().collect::>()); + + { + let mut trie_recorder = recorder.as_trie_recorder(root); + let trie = TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut trie_recorder) + .build(); + + for (key, data) in TEST_DATA { + assert_eq!(data.to_vec(), trie.get(&key).unwrap().unwrap()); + } + } + + assert_eq!(0, recorder.estimate_encoded_size()); + } } diff --git a/substrate/primitives/trie/src/trie_codec.rs b/substrate/primitives/trie/src/trie_codec.rs index 65b4f50535990..4599bf7a98c90 100644 --- a/substrate/primitives/trie/src/trie_codec.rs +++ b/substrate/primitives/trie/src/trie_codec.rs @@ -20,7 +20,9 @@ //! This uses compact proof from trie crate and extends //! it to substrate specific layout and child trie system. -use crate::{CompactProof, HashDBT, TrieConfiguration, TrieHash, EMPTY_PREFIX}; +use crate::{ + CompactProof, HashDBT, TrieConfiguration, TrieHash, EMPTY_PREFIX, +}; use alloc::{boxed::Box, vec::Vec}; use trie_db::{CError, Trie}; @@ -69,10 +71,8 @@ where let (top_root, _nb_used) = trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; // Only check root if expected root is passed as argument. - if let Some(expected_root) = expected_root { - if expected_root != &top_root { - return Err(Error::RootMismatch(top_root, *expected_root)) - } + if let Some(expected_root) = expected_root.filter(|expected| *expected != &top_root) { + return Err(Error::RootMismatch(top_root, *expected_root)) } let mut child_tries = Vec::new(); @@ -205,3 +205,234 @@ where Ok(CompactProof { encoded_nodes: compact_proof }) } + +#[cfg(test)] +mod tests { + use crate::{delta_trie_root, HashDB, StorageProof}; + + use super::*; + use codec::Encode; + use hash_db::{AsHashDB, Hasher}; + use sp_core::{Blake2Hasher, H256}; + use std::collections::HashSet; + use trie_db::{Bytes, DBValue, Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut}; + + type MemoryDB = crate::MemoryDB; + type Layout = crate::LayoutV1; + type Recorder = crate::recorder::Recorder; + + fn create_trie(num_keys: u32) -> (MemoryDB, TrieHash) { + let mut db = MemoryDB::default(); + let mut root = Default::default(); + + { + let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build(); + for i in 0..num_keys { + trie.insert( + &i.encode(), + &vec![1u8; 64].into_iter().chain(i.encode()).collect::>(), + ) + .expect("Inserts data"); + } + } + + (db, root) + } + + struct Overlay<'a> { + db: &'a MemoryDB, + write: MemoryDB, + } + + impl hash_db::HashDB for Overlay<'_> { + fn get( + &self, + key: &::Out, + prefix: hash_db::Prefix, + ) -> Option { + HashDB::get(self.db, key, prefix) + } + + fn contains( + &self, + key: &::Out, + prefix: hash_db::Prefix, + ) -> bool { + HashDB::contains(self.db, key, prefix) + } + + fn insert( + &mut self, + prefix: hash_db::Prefix, + value: &[u8], + ) -> ::Out { + self.write.insert(prefix, value) + } + + fn emplace( + &mut self, + key: ::Out, + prefix: hash_db::Prefix, + value: DBValue, + ) { + self.write.emplace(key, prefix, value); + } + + fn remove( + &mut self, + key: &::Out, + prefix: hash_db::Prefix, + ) { + self.write.remove(key, prefix); + } + } + + impl AsHashDB for Overlay<'_> { + fn as_hash_db(&self) -> &dyn HashDBT { + self + } + + fn as_hash_db_mut<'a>(&'a mut self) -> &'a mut (dyn HashDBT + 'a) { + self + } + } + + fn emulate_block_building( + state: &MemoryDB, + root: H256, + read_keys: &[u32], + write_keys: &[u32], + nodes_to_ignore: HashSet, + ) -> (Recorder, MemoryDB, H256) { + let recorder = Recorder::with_ignored_nodes(nodes_to_ignore); + + { + let mut trie_recorder = recorder.as_trie_recorder(root); + let trie = TrieDBBuilder::::new(state, &root) + .with_recorder(&mut trie_recorder) + .build(); + + for key in read_keys { + trie.get(&key.encode()).unwrap().unwrap(); + } + } + + let mut overlay = Overlay { db: state, write: Default::default() }; + + let new_root = { + let mut trie_recorder = recorder.as_trie_recorder(root); + delta_trie_root::( + &mut overlay, + root, + write_keys.iter().map(|k| { + ( + k.encode(), + Some(vec![2u8; 64].into_iter().chain(k.encode()).collect::>()), + ) + }), + Some(&mut trie_recorder), + None, + ) + .unwrap() + }; + + (recorder, overlay.write, new_root) + } + + fn build_known_nodes_list(recorder: &Recorder, transaction: &MemoryDB) -> HashSet { + recorder + .to_storage_proof() + .into_iter_nodes() + .map(|n| Blake2Hasher::hash(&n)) + .chain(transaction.clone().drain().into_iter().map(|d| Blake2Hasher::hash(&(d.1).0))) + .collect() + } + + #[test] + fn ensure_multiple_tries_encode_compact_works() { + let (mut db, root) = create_trie(100); + + let mut nodes_to_ignore = HashSet::new(); + let (recorder, transaction, root1) = emulate_block_building( + &db, + root, + &[2, 4, 5, 6, 7, 8], + &[9, 10, 11, 12, 13, 14], + nodes_to_ignore.clone(), + ); + + db.consolidate(transaction.clone()); + nodes_to_ignore.extend(build_known_nodes_list(&recorder, &transaction)); + + let (recorder2, transaction, root2) = emulate_block_building( + &db, + root1, + &[9, 10, 11, 12, 13, 14], + &[15, 16, 17, 18, 19, 20], + nodes_to_ignore.clone(), + ); + + db.consolidate(transaction.clone()); + nodes_to_ignore.extend(build_known_nodes_list(&recorder2, &transaction)); + + let (recorder3, _, root3) = emulate_block_building( + &db, + root2, + &[20, 30, 40, 41, 42], + &[80, 90, 91, 92, 93], + nodes_to_ignore, + ); + + let proof = StorageProof::merge([ + recorder.to_storage_proof(), + recorder2.to_storage_proof(), + recorder3.to_storage_proof(), + ]); + + let compact_proof = encode_compact::(&proof.to_memory_db(), &root).unwrap(); + + assert!(proof.encoded_size() > compact_proof.encoded_size()); + + let mut res_db = crate::MemoryDB::::new(&[]); + decode_compact::( + &mut res_db, + compact_proof.iter_compact_encoded_nodes(), + Some(&root), + ) + .unwrap(); + + let (_, transaction, root1_proof) = emulate_block_building( + &res_db, + root, + &[2, 4, 5, 6, 7, 8], + &[9, 10, 11, 12, 13, 14], + Default::default(), + ); + + assert_eq!(root1, root1_proof); + + res_db.consolidate(transaction); + + let (_, transaction2, root2_proof) = emulate_block_building( + &res_db, + root1, + &[9, 10, 11, 12, 13, 14], + &[15, 16, 17, 18, 19, 20], + Default::default(), + ); + + assert_eq!(root2, root2_proof); + + res_db.consolidate(transaction2); + + let (_, _, root3_proof) = emulate_block_building( + &res_db, + root2, + &[20, 30, 40, 41, 42], + &[80, 90, 91, 92, 93], + Default::default(), + ); + + assert_eq!(root3, root3_proof); + } +} From c64ac051a289666256c2f4d2c95c1eee315df2d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 31 Mar 2025 16:29:26 +0200 Subject: [PATCH 027/312] Introduce `IgnoredNodes` type to hold the ignored nodes --- Cargo.lock | 1 + substrate/client/basic-authorship/Cargo.toml | 1 + .../basic-authorship/src/basic_authorship.rs | 102 ++++++++++-------- substrate/client/basic-authorship/src/lib.rs | 3 +- substrate/primitives/trie/src/recorder.rs | 71 ++++++++++-- substrate/primitives/trie/src/trie_codec.rs | 33 +++--- 6 files changed, 140 insertions(+), 71 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f85ac5809d30c..fb3f9cfe00682 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18863,6 +18863,7 @@ dependencies = [ "sp-core 28.0.0", "sp-inherents", "sp-runtime 31.0.1", + "sp-trie 29.0.0", "substrate-prometheus-endpoint", "substrate-test-runtime-client", ] diff --git a/substrate/client/basic-authorship/Cargo.toml b/substrate/client/basic-authorship/Cargo.toml index cc2e0d8d04dfe..07bf33cb82cb9 100644 --- a/substrate/client/basic-authorship/Cargo.toml +++ b/substrate/client/basic-authorship/Cargo.toml @@ -31,6 +31,7 @@ sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } [dev-dependencies] parking_lot = { workspace = true, default-features = true } diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index 372c1a7e738bd..16b06a984c313 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -27,7 +27,9 @@ use futures::{ future::{Future, FutureExt}, }; use log::{debug, error, info, trace, warn}; +use prometheus_endpoint::Registry as PrometheusRegistry; use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder}; +use sc_proposer_metrics::{EndProposingReason, MetricsLink as PrometheusMetrics}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool, TxInvalidityReportMap}; use sp_api::{ApiExt, CallApiAt, ProofRecorder, ProvideRuntimeApi}; @@ -39,10 +41,8 @@ use sp_runtime::{ traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT}, Digest, ExtrinsicInclusionMode, Percent, SaturatedConversion, }; -use std::{collections::HashSet, marker::PhantomData, pin::Pin, sync::Arc, time}; - -use prometheus_endpoint::Registry as PrometheusRegistry; -use sc_proposer_metrics::{EndProposingReason, MetricsLink as PrometheusMetrics}; +use sp_trie::recorder::IgnoredNodes; +use std::{marker::PhantomData, pin::Pin, sync::Arc, time}; /// Default block size limit in bytes used by [`Proposer`]. /// @@ -310,10 +310,22 @@ pub struct ProposeArgs { /// When set, block production ends before hitting this limit. The limit includes the storage /// proof, when proof recording is activated. pub block_size_limit: Option, - /// Hashes of trie nodes that should not be recorded. + /// Trie nodes that should not be recorded. /// /// Only applies when proof recording is enabled. - pub ignored_nodes_by_proof_recording: Option>, + pub ignored_nodes_by_proof_recording: Option>, +} + +impl Default for ProposeArgs { + fn default() -> Self { + Self { + inherent_data: Default::default(), + inherent_digests: Default::default(), + max_duration: Default::default(), + block_size_limit: None, + ignored_nodes_by_proof_recording: None, + } + } } /// If the block is full we will attempt to push at most @@ -655,7 +667,7 @@ mod tests { use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionSource}; use sp_api::Core; use sp_blockchain::HeaderBackend; - use sp_consensus::{BlockOrigin, Environment, Proposer}; + use sp_consensus::{BlockOrigin, Environment}; use sp_runtime::{generic::BlockId, traits::NumberFor, Perbill}; use substrate_test_runtime_client::{ prelude::*, @@ -731,10 +743,11 @@ mod tests { // when let deadline = time::Duration::from_secs(3); - let block = - block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) - .map(|r| r.block) - .unwrap(); + let block = block_on( + proposer.propose(ProposeArgs { max_duration: deadline, ..Default::default() }), + ) + .map(|r| r.block) + .unwrap(); // then // block should have some extrinsics although we have some more in the pool. @@ -773,7 +786,7 @@ mod tests { ); let deadline = time::Duration::from_secs(1); - block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) + block_on(proposer.propose(ProposeArgs { max_duration: deadline, ..Default::default() })) .map(|r| r.block) .unwrap(); } @@ -812,9 +825,10 @@ mod tests { ); let deadline = time::Duration::from_secs(9); - let proposal = - block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) - .unwrap(); + let proposal = block_on( + proposer.propose(ProposeArgs { max_duration: deadline, ..Default::default() }), + ) + .unwrap(); assert_eq!(proposal.block.extrinsics().len(), 1); @@ -877,10 +891,11 @@ mod tests { // when let deadline = time::Duration::from_secs(900); - let block = - block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) - .map(|r| r.block) - .unwrap(); + let block = block_on( + proposer.propose(ProposeArgs { max_duration: deadline, ..Default::default() }), + ) + .map(|r| r.block) + .unwrap(); // then // block should have some extrinsics although we have some more in the pool. @@ -989,12 +1004,11 @@ mod tests { // Give it enough time let deadline = time::Duration::from_secs(300); - let block = block_on(proposer.propose( - Default::default(), - Default::default(), - deadline, - Some(block_limit), - )) + let block = block_on(proposer.propose(ProposeArgs { + max_duration: deadline, + block_size_limit: Some(block_limit), + ..Default::default() + })) .map(|r| r.block) .unwrap(); @@ -1003,10 +1017,11 @@ mod tests { let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap(); - let block = - block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) - .map(|r| r.block) - .unwrap(); + let block = block_on( + proposer.propose(ProposeArgs { max_duration: deadline, ..Default::default() }), + ) + .map(|r| r.block) + .unwrap(); // Without a block limit we should include all of them assert_eq!(block.extrinsics().len(), extrinsics_num); @@ -1032,12 +1047,11 @@ mod tests { .unwrap(); builder.estimate_block_size(true) + extrinsics[0].encoded_size() }; - let block = block_on(proposer.propose( - Default::default(), - Default::default(), - deadline, - Some(block_limit), - )) + let block = block_on(proposer.propose(ProposeArgs { + max_duration: deadline, + block_size_limit: Some(block_limit), + ..Default::default() + })) .map(|r| r.block) .unwrap(); @@ -1107,10 +1121,11 @@ mod tests { // when // give it enough time so that deadline is never triggered. let deadline = time::Duration::from_secs(900); - let block = - block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) - .map(|r| r.block) - .unwrap(); + let block = block_on( + proposer.propose(ProposeArgs { max_duration: deadline, ..Default::default() }), + ) + .map(|r| r.block) + .unwrap(); // then block should have all non-exhaust resources extrinsics (+ the first one). assert_eq!(block.extrinsics().len(), MAX_SKIPPED_TRANSACTIONS + 1); @@ -1185,10 +1200,11 @@ mod tests { }), ); - let block = - block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) - .map(|r| r.block) - .unwrap(); + let block = block_on( + proposer.propose(ProposeArgs { max_duration: deadline, ..Default::default() }), + ) + .map(|r| r.block) + .unwrap(); // then the block should have one or two transactions. This maybe random as they are // processed in parallel. The same signer and consecutive nonces for huge and tiny diff --git a/substrate/client/basic-authorship/src/lib.rs b/substrate/client/basic-authorship/src/lib.rs index a5996793f6839..b08b66e23aa13 100644 --- a/substrate/client/basic-authorship/src/lib.rs +++ b/substrate/client/basic-authorship/src/lib.rs @@ -58,7 +58,8 @@ //! //! // This `Proposer` allows us to create a block proposition. //! // The proposer will grab transactions from the transaction pool, and put them into the block. -//! let future = proposer.propose( +//! let future = Proposer::propose( +//! proposer, //! Default::default(), //! Default::default(), //! Duration::from_secs(2), diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index f8e1ab0769fa5..4981a0ce40313 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -20,9 +20,10 @@ //! Provides an implementation of the [`TrieRecorder`](trie_db::TrieRecorder) trait. It can be used //! to record storage accesses to the state to generate a [`StorageProof`]. -use crate::{NodeCodec, StorageProof}; +use crate::{GenericMemoryDB, NodeCodec, StorageProof}; use codec::Encode; use hash_db::Hasher; +use memory_db::KeyFunction; use parking_lot::{Mutex, MutexGuard}; use std::{ collections::{HashMap, HashSet}, @@ -38,6 +39,56 @@ use trie_db::{RecordedForKey, TrieAccess}; const LOG_TARGET: &str = "trie-recorder"; +/// A list of ignored nodes for [`Recorder`]. +/// +/// These nodes when passed to a recorder will be ignored and not recorded by the recorder. +#[derive(Clone)] +pub struct IgnoredNodes { + nodes: HashSet, +} + +impl Default for IgnoredNodes { + fn default() -> Self { + Self { nodes: HashSet::default() } + } +} + +impl IgnoredNodes { + /// Initialize from the given storage proof. + /// + /// So, all recorded nodes of the proof will be the ignored nodes. + pub fn from_storage_proof>(proof: &StorageProof) -> Self { + Self { nodes: proof.iter_nodes().map(|n| Hasher::hash(&n)).collect() } + } + + /// Initialize from the given memory db. + /// + /// All nodes that have a reference count > 0 will be used as ignored nodes. + pub fn from_memory_db, KF: KeyFunction>( + mut memory_db: GenericMemoryDB, + ) -> Self { + Self { + nodes: memory_db + .drain() + .into_iter() + // We do not want to add removed nodes. + .filter(|(_, (_, counter))| *counter > 0) + .map(|(_, (data, _))| Hasher::hash(&data)) + .collect(), + } + } + + /// Extend `self` with the other instance of ignored nodes. + pub fn extend(&mut self, other: &Self) { + self.nodes.extend(other.nodes.iter().cloned()); + } + + /// Returns `true` if the node is ignored. + pub fn is_ignored(&self, node: &H) -> bool { + self.nodes.contains(node) + } +} + /// Stores all the information per transaction. #[derive(Default)] struct Transaction { @@ -68,7 +119,7 @@ struct RecorderInner { accessed_nodes: HashMap>, /// Nodes that should be ignored and not recorded. - ignored_nodes: HashSet, + ignored_nodes: IgnoredNodes, } impl Default for RecorderInner { @@ -114,7 +165,7 @@ impl Recorder { /// Create a new instance with the given `ingored_nodes`. /// /// These ignored nodes are not recorded when accessed. - pub fn with_ignored_nodes(ignored_nodes: HashSet) -> Self { + pub fn with_ignored_nodes(ignored_nodes: IgnoredNodes) -> Self { Self { inner: Arc::new(Mutex::new(RecorderInner { ignored_nodes, ..Default::default() })), ..Default::default() @@ -338,7 +389,7 @@ impl<'a, H: Hasher> trie_db::TrieRecorder for TrieRecorder<'a, H> { let inner = self.inner.deref_mut(); - if inner.ignored_nodes.contains(&hash) { + if inner.ignored_nodes.is_ignored(&hash) { tracing::trace!( target: LOG_TARGET, ?hash, @@ -368,7 +419,7 @@ impl<'a, H: Hasher> trie_db::TrieRecorder for TrieRecorder<'a, H> { let inner = self.inner.deref_mut(); - if inner.ignored_nodes.contains(&hash) { + if inner.ignored_nodes.is_ignored(&hash) { tracing::trace!( target: LOG_TARGET, ?hash, @@ -399,7 +450,8 @@ impl<'a, H: Hasher> trie_db::TrieRecorder for TrieRecorder<'a, H> { let inner = self.inner.deref_mut(); - if inner.ignored_nodes.contains(&hash) { + // A value is also just a node. + if inner.ignored_nodes.is_ignored(&hash) { tracing::trace!( target: LOG_TARGET, ?hash, @@ -790,10 +842,11 @@ mod tests { } assert!(recorder.estimate_encoded_size() > 10); - let memory_db: MemoryDB = recorder.drain_storage_proof().into_memory_db(); + let ignored_nodes = IgnoredNodes::from_storage_proof::( + &recorder.drain_storage_proof(), + ); - let recorder = - Recorder::with_ignored_nodes(memory_db.keys().into_keys().collect::>()); + let recorder = Recorder::with_ignored_nodes(ignored_nodes); { let mut trie_recorder = recorder.as_trie_recorder(root); diff --git a/substrate/primitives/trie/src/trie_codec.rs b/substrate/primitives/trie/src/trie_codec.rs index 4599bf7a98c90..143c7e61b18ee 100644 --- a/substrate/primitives/trie/src/trie_codec.rs +++ b/substrate/primitives/trie/src/trie_codec.rs @@ -20,9 +20,7 @@ //! This uses compact proof from trie crate and extends //! it to substrate specific layout and child trie system. -use crate::{ - CompactProof, HashDBT, TrieConfiguration, TrieHash, EMPTY_PREFIX, -}; +use crate::{CompactProof, HashDBT, TrieConfiguration, TrieHash, EMPTY_PREFIX}; use alloc::{boxed::Box, vec::Vec}; use trie_db::{CError, Trie}; @@ -208,14 +206,13 @@ where #[cfg(test)] mod tests { - use crate::{delta_trie_root, HashDB, StorageProof}; + use crate::{delta_trie_root, recorder::IgnoredNodes, HashDB, StorageProof}; use super::*; use codec::Encode; - use hash_db::{AsHashDB, Hasher}; + use hash_db::AsHashDB; use sp_core::{Blake2Hasher, H256}; - use std::collections::HashSet; - use trie_db::{Bytes, DBValue, Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut}; + use trie_db::{DBValue, Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut}; type MemoryDB = crate::MemoryDB; type Layout = crate::LayoutV1; @@ -302,7 +299,7 @@ mod tests { root: H256, read_keys: &[u32], write_keys: &[u32], - nodes_to_ignore: HashSet, + nodes_to_ignore: IgnoredNodes, ) -> (Recorder, MemoryDB, H256) { let recorder = Recorder::with_ignored_nodes(nodes_to_ignore); @@ -339,20 +336,20 @@ mod tests { (recorder, overlay.write, new_root) } - fn build_known_nodes_list(recorder: &Recorder, transaction: &MemoryDB) -> HashSet { - recorder - .to_storage_proof() - .into_iter_nodes() - .map(|n| Blake2Hasher::hash(&n)) - .chain(transaction.clone().drain().into_iter().map(|d| Blake2Hasher::hash(&(d.1).0))) - .collect() + fn build_known_nodes_list(recorder: &Recorder, transaction: &MemoryDB) -> IgnoredNodes { + let mut ignored_nodes = + IgnoredNodes::from_storage_proof::(&recorder.to_storage_proof()); + + ignored_nodes.extend(&IgnoredNodes::from_memory_db::(transaction.clone())); + + ignored_nodes } #[test] fn ensure_multiple_tries_encode_compact_works() { let (mut db, root) = create_trie(100); - let mut nodes_to_ignore = HashSet::new(); + let mut nodes_to_ignore = IgnoredNodes::default(); let (recorder, transaction, root1) = emulate_block_building( &db, root, @@ -362,7 +359,7 @@ mod tests { ); db.consolidate(transaction.clone()); - nodes_to_ignore.extend(build_known_nodes_list(&recorder, &transaction)); + nodes_to_ignore.extend(&build_known_nodes_list(&recorder, &transaction)); let (recorder2, transaction, root2) = emulate_block_building( &db, @@ -373,7 +370,7 @@ mod tests { ); db.consolidate(transaction.clone()); - nodes_to_ignore.extend(build_known_nodes_list(&recorder2, &transaction)); + nodes_to_ignore.extend(&build_known_nodes_list(&recorder2, &transaction)); let (recorder3, _, root3) = emulate_block_building( &db, From 9f70dac4a6d85005b7085e7dafd0901dbe56d6af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 31 Mar 2025 21:33:23 +0200 Subject: [PATCH 028/312] Update cumulus/primitives/core/Cargo.toml Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> --- cumulus/primitives/core/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index 4ebf8517281ef..592026b824128 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } -tracing.workspace = true +tracing = { workspace = true } # Substrate sp-api = { workspace = true } From 55753f4a681952df7ab7bf3bc1439c27e1cb9405 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 31 Mar 2025 22:16:17 +0200 Subject: [PATCH 029/312] Try to get the tests working --- .../src/validate_block/tests.rs | 62 +++++++++++-------- cumulus/test/client/src/block_builder.rs | 42 +++++++++++-- cumulus/test/service/src/lib.rs | 4 +- substrate/primitives/api/src/lib.rs | 3 + 4 files changed, 79 insertions(+), 32 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 4d9abcc2b39f1..ea8ab540b2a1a 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -14,7 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::*; +use crate::{validate_block::MemoryOptimizedValidationParams, *}; use codec::{Decode, DecodeAll, Encode}; use cumulus_primitives_core::{ParachainBlockData, PersistedValidationData}; use cumulus_test_client::{ @@ -31,12 +31,11 @@ use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use polkadot_parachain_primitives::primitives::ValidationResult; #[cfg(feature = "experimental-ump-signals")] use relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; - +use sp_core::H256; +use sp_runtime::traits::{BlakeTwo256, Block as BlockT, Header as HeaderT}; +use sp_trie::{recorder::IgnoredNodes, StorageProof}; use std::{env, process::Command}; -use crate::validate_block::MemoryOptimizedValidationParams; - fn call_validate_block_validation_result( validation_code: &[u8], parent_head: Header, @@ -100,7 +99,7 @@ fn create_test_client() -> (Client, Header) { fn create_elastic_scaling_test_client() -> (Client, Header) { let mut builder = TestClientBuilder::new(); builder.genesis_init_mut().wasm = Some( - test_runtime::elastic_scaling::WASM_BINARY + test_runtime::elastic_scaling_multi_block_slot::WASM_BINARY .expect("You need to build the WASM binaries to run the tests!") .to_vec(), ); @@ -153,14 +152,14 @@ fn build_multiple_blocks_with_witness( mut sproof_builder: RelayStateSproofBuilder, num_blocks: usize, ) -> TestBlockData { - sproof_builder.para_id = test_runtime::PARACHAIN_ID.into(); - sproof_builder.included_para_head = Some(HeadData(parent_head.encode())); - sproof_builder.current_slot = (std::time::SystemTime::now() + let timestamp = std::time::SystemTime::now() .duration_since(std::time::SystemTime::UNIX_EPOCH) .expect("Time is always after UNIX_EPOCH; qed") - .as_millis() as u64 / - 6000) - .into(); + .as_millis() as u64; + let parent_head_root = *parent_head.state_root(); + sproof_builder.para_id = test_runtime::PARACHAIN_ID.into(); + sproof_builder.included_para_head = Some(HeadData(parent_head.encode())); + sproof_builder.current_slot = (timestamp / 6000).into(); let validation_data = PersistedValidationData { relay_parent_number: 1, @@ -170,32 +169,46 @@ fn build_multiple_blocks_with_witness( let mut persisted_validation_data = None; let mut blocks = Vec::new(); - //TODO: Fix this, not correct. - let mut proof = None; + let mut proof = StorageProof::empty(); + let mut ignored_nodes = IgnoredNodes::::default(); for _ in 0..num_blocks { let cumulus_test_client::BlockBuilderAndSupportData { block_builder, persisted_validation_data: p_v_data, - } = client.init_block_builder(Some(validation_data.clone()), sproof_builder.clone()); + } = client.init_block_builder_with_ignored_nodes( + parent_head.hash(), + Some(validation_data.clone()), + sproof_builder.clone(), + timestamp, + ignored_nodes.clone(), + ); persisted_validation_data = Some(p_v_data); - let (build_blocks, build_proof) = - block_builder.build_parachain_block(*parent_head.state_root()).into_inner(); + let built_block = block_builder.build().unwrap(); - proof.get_or_insert_with(|| build_proof); + ignored_nodes.extend(&IgnoredNodes::from_storage_proof::( + &built_block.proof.clone().unwrap(), + )); + ignored_nodes + .extend(&IgnoredNodes::from_memory_db(built_block.storage_changes.transaction)); + proof = StorageProof::merge([proof, built_block.proof.unwrap()]); - blocks.extend(build_blocks.into_iter().inspect(|b| { - futures::executor::block_on(client.import_as_best(BlockOrigin::Own, b.clone())) - .unwrap(); + futures::executor::block_on( + client.import_as_best(BlockOrigin::Own, built_block.block.clone()), + ) + .unwrap(); - parent_head = b.header.clone(); - })); + parent_head = built_block.block.header.clone(); + + blocks.push(built_block.block); } + let proof = proof.into_compact_proof::(parent_head_root).unwrap(); + TestBlockData { - block: ParachainBlockData::new(blocks, proof.unwrap()), + block: ParachainBlockData::new(blocks, proof), validation_data: persisted_validation_data.unwrap(), } } @@ -217,7 +230,6 @@ fn validate_block_works() { } #[test] -#[ignore = "Needs another pr to work"] fn validate_multiple_blocks_work() { sp_tracing::try_init_simple(); diff --git a/cumulus/test/client/src/block_builder.rs b/cumulus/test/client/src/block_builder.rs index 63796a665c7de..54ef442d9ce91 100644 --- a/cumulus/test/client/src/block_builder.rs +++ b/cumulus/test/client/src/block_builder.rs @@ -22,7 +22,7 @@ use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use cumulus_test_runtime::{Block, GetLastTimestamp, Hash, Header}; use polkadot_primitives::{BlockNumber as PBlockNumber, Hash as PHash}; use sc_block_builder::BlockBuilderBuilder; -use sp_api::ProvideRuntimeApi; +use sp_api::{ProofRecorder, ProofRecorderIgnoredNodes, ProvideRuntimeApi}; use sp_consensus_aura::{AuraApi, Slot}; use sp_runtime::{traits::Header as HeaderT, Digest, DigestItem}; @@ -61,6 +61,19 @@ pub trait InitBlockBuilder { relay_sproof_builder: RelayStateSproofBuilder, ) -> BlockBuilderAndSupportData; + /// Init a specific block builder at a specific block that works for the test runtime. + /// + /// Same as [`InitBlockBuilder::init_block_builder_with_timestamp`] besides that it takes + /// `ignored_nodes` that instruct the proof recorder to not record these nodes. + fn init_block_builder_with_ignored_nodes( + &self, + at: Hash, + validation_data: Option>, + relay_sproof_builder: RelayStateSproofBuilder, + timestamp: u64, + ignored_nodes: ProofRecorderIgnoredNodes, + ) -> BlockBuilderAndSupportData; + /// Init a specific block builder that works for the test runtime. /// /// Same as [`InitBlockBuilder::init_block_builder`] besides that it takes a @@ -81,6 +94,7 @@ fn init_block_builder( validation_data: Option>, mut relay_sproof_builder: RelayStateSproofBuilder, timestamp: u64, + ignored_nodes: Option>, ) -> BlockBuilderAndSupportData<'_> { let slot: Slot = (timestamp / client.runtime_api().slot_duration(at).unwrap().as_millis()).into(); @@ -97,7 +111,9 @@ fn init_block_builder( .on_parent_block(at) .fetch_parent_block_number(client) .unwrap() - .enable_proof_recording() + .with_proof_recorder(Some(ProofRecorder::::with_ignored_nodes( + ignored_nodes.unwrap_or_default(), + ))) .with_inherent_digests(aura_pre_digest) .build() .expect("Creates new block builder for test runtime"); @@ -166,7 +182,25 @@ impl InitBlockBuilder for Client { last_timestamp + self.runtime_api().slot_duration(at).unwrap().as_millis() }; - init_block_builder(self, at, validation_data, relay_sproof_builder, timestamp) + init_block_builder(self, at, validation_data, relay_sproof_builder, timestamp, None) + } + + fn init_block_builder_with_ignored_nodes( + &self, + at: Hash, + validation_data: Option>, + relay_sproof_builder: RelayStateSproofBuilder, + timestamp: u64, + ignored_nodes: ProofRecorderIgnoredNodes, + ) -> BlockBuilderAndSupportData { + init_block_builder( + self, + at, + validation_data, + relay_sproof_builder, + timestamp, + Some(ignored_nodes), + ) } fn init_block_builder_with_timestamp( @@ -176,7 +210,7 @@ impl InitBlockBuilder for Client { relay_sproof_builder: RelayStateSproofBuilder, timestamp: u64, ) -> BlockBuilderAndSupportData { - init_block_builder(self, at, validation_data, relay_sproof_builder, timestamp) + init_block_builder(self, at, validation_data, relay_sproof_builder, timestamp, None) } } diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index e8870422a6de5..0dd0dfc50a2ea 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -34,7 +34,6 @@ use cumulus_client_consensus_aura::{ }, ImportQueueParams, }; -use cumulus_client_consensus_proposer::Proposer; use prometheus::Registry; use runtime::AccountId; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; @@ -464,14 +463,13 @@ where }) .await; } else { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + let proposer = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), client.clone(), transaction_pool.clone(), prometheus_registry.as_ref(), None, ); - let proposer = Proposer::new(proposer_factory); let collator_service = CollatorService::new( client.clone(), diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index d7ab924eee946..f115a254e19d5 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -527,6 +527,9 @@ pub use sp_api_proc_macro::mock_impl_runtime_apis; #[cfg(feature = "std")] pub type ProofRecorder = sp_trie::recorder::Recorder>; +#[cfg(feature = "std")] +pub type ProofRecorderIgnoredNodes = sp_trie::recorder::IgnoredNodes<::Hash>; + #[cfg(feature = "std")] pub type StorageChanges = sp_state_machine::StorageChanges>; From c8ec8e247b787fb221dd505fb287fb5ac9273b7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 1 Apr 2025 22:34:25 +0200 Subject: [PATCH 030/312] Work on the test --- .../src/validate_block/implementation.rs | 17 +++++++----- cumulus/test/client/src/lib.rs | 1 + cumulus/test/runtime/src/lib.rs | 26 ++++++++++++++++++- cumulus/test/runtime/src/test_pallet.rs | 5 ++++ polkadot/primitives/src/vstaging/mod.rs | 6 ++--- 5 files changed, 44 insertions(+), 11 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 81e60a891b03f..7453ee0a61709 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -22,18 +22,17 @@ use cumulus_primitives_core::{ }; use cumulus_primitives_parachain_inherent::ParachainInherentData; -use polkadot_parachain_primitives::primitives::{ - HeadData, RelayChainBlockNumber, ValidationResult, -}; - +use crate::{ClaimQueueOffset, CoreSelector}; use alloc::vec::Vec; use codec::{Decode, Encode}; - use cumulus_primitives_core::relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}; use frame_support::{ traits::{ExecuteBlock, ExtrinsicCall, Get, IsSubType}, BoundedVec, }; +use polkadot_parachain_primitives::primitives::{ + HeadData, RelayChainBlockNumber, ValidationResult, +}; use sp_core::storage::{ChildInfo, StateVersion}; use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; @@ -290,13 +289,17 @@ where } if !upward_message_signals.is_empty() { - let mut selected_core = None; + let mut selected_core: Option<(CoreSelector, ClaimQueueOffset)> = None; upward_message_signals.iter().for_each(|s| { if let Ok(UMPSignal::SelectCore(selector, offset)) = UMPSignal::decode(&mut &s[..]) { match &selected_core { Some(selected_core) if *selected_core != (selector, offset) => { - panic!("All `SelectCore` signals need to select the same core") + panic!( + "All `SelectCore` signals need to select the same core {:?} vs {:?}", + selected_core, + (selector, offset) + ) }, Some(_) => {}, None => { diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index 580f7d21f0f63..de54c1759757d 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -76,6 +76,7 @@ pub type Client = client::Client; pub struct GenesisParameters { pub endowed_accounts: Vec, pub wasm: Option>, + pub blocks_per_pov: Option, } impl substrate_test_client::GenesisInit for GenesisParameters { diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 5575fbbd84056..c55f1ad55d45e 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -52,6 +52,8 @@ mod test_pallet; extern crate alloc; use alloc::{vec, vec::Vec}; +use codec::Encode; +use cumulus_pallet_parachain_system::SelectCore; use frame_support::{derive_impl, traits::OnRuntimeUpgrade, PalletId}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -324,6 +326,28 @@ impl pallet_glutton::Config for Runtime { type WeightInfo = pallet_glutton::weights::SubstrateWeight; } +parameter_types! { + pub storage BlocksPerPoV: u32 = 1; +} + +pub struct MultipleBlocksPerPoVCoreSelector; + +impl SelectCore for MultipleBlocksPerPoVCoreSelector { + fn selected_core() -> (CoreSelector, ClaimQueueOffset) { + let core_selector = (System::block_number().saturating_sub(1) / BlocksPerPoV::get()) + .using_encoded(|b| b[0]); + + (CoreSelector(core_selector), ClaimQueueOffset(0)) + } + + fn select_next_core() -> (CoreSelector, ClaimQueueOffset) { + let core_selector = + ((System::block_number()) / BlocksPerPoV::get()).using_encoded(|b| b[0]); + + (CoreSelector(core_selector), ClaimQueueOffset(0)) + } +} + type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< Runtime, RELAY_CHAIN_SLOT_DURATION_MILLIS, @@ -344,7 +368,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; + type SelectCore = MultipleBlocksPerPoVCoreSelector; } impl parachain_info::Config for Runtime {} diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index 61195386ae79d..5679ee1dccd24 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -80,12 +80,17 @@ pub mod pallet { pub struct GenesisConfig { #[serde(skip)] pub _config: core::marker::PhantomData, + pub blocks_per_pov: Option, } #[pallet::genesis_build] impl BuildGenesisConfig for GenesisConfig { fn build(&self) { sp_io::storage::set(TEST_RUNTIME_UPGRADE_KEY, &[1, 2, 3, 4]); + + if let Some(blocks_per_pov) = self.blocks_per_pov { + crate::BlocksPerPoV::set(&blocks_per_pov); + } } } } diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index be0438fb8ab7c..d03be3d2ee615 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -419,15 +419,15 @@ impl From> for super::v8::CandidateReceipt { /// A strictly increasing sequence number, typically this would be the least significant byte of the /// block number. -#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, RuntimeDebug, Copy)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Debug, Copy)] pub struct CoreSelector(pub u8); /// An offset in the relay chain claim queue. -#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, RuntimeDebug, Copy)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Debug, Copy)] pub struct ClaimQueueOffset(pub u8); /// Signals that a parachain can send to the relay chain via the UMP queue. -#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, RuntimeDebug)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Debug)] pub enum UMPSignal { /// A message sent by a parachain to select the core the candidate is committed to. /// Relay chain validators, in particular backers, use the `CoreSelector` and From 80e8b3d1b8af96359bcee14bb7bd3ade9c6d404d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 2 Apr 2025 12:52:48 +0200 Subject: [PATCH 031/312] Fix the test properly --- .../parachain-system/src/validate_block/tests.rs | 16 +++++++++++----- cumulus/polkadot-omni-node/lib/src/nodes/aura.rs | 7 +++---- cumulus/test/client/src/lib.rs | 1 + cumulus/test/service/src/chain_spec.rs | 10 ++++++++++ cumulus/test/service/src/lib.rs | 1 + substrate/bin/node/bench/src/construct.rs | 3 ++- substrate/bin/node/cli/src/service.rs | 14 +++++++++----- templates/parachain/node/src/service.rs | 5 +---- 8 files changed, 38 insertions(+), 19 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index ea8ab540b2a1a..eaef45dfc20fe 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -96,13 +96,14 @@ fn create_test_client() -> (Client, Header) { } /// Create test client using the runtime with `elastic-scaling` feature enabled. -fn create_elastic_scaling_test_client() -> (Client, Header) { +fn create_elastic_scaling_test_client(blocks_per_pov: u32) -> (Client, Header) { let mut builder = TestClientBuilder::new(); builder.genesis_init_mut().wasm = Some( test_runtime::elastic_scaling_multi_block_slot::WASM_BINARY .expect("You need to build the WASM binaries to run the tests!") .to_vec(), ); + builder.genesis_init_mut().blocks_per_pov = Some(blocks_per_pov); let client = builder.enable_import_proof_recording().build(); let genesis_header = client @@ -150,7 +151,7 @@ fn build_multiple_blocks_with_witness( client: &Client, mut parent_head: Header, mut sproof_builder: RelayStateSproofBuilder, - num_blocks: usize, + num_blocks: u32, ) -> TestBlockData { let timestamp = std::time::SystemTime::now() .duration_since(std::time::SystemTime::UNIX_EPOCH) @@ -233,9 +234,14 @@ fn validate_block_works() { fn validate_multiple_blocks_work() { sp_tracing::try_init_simple(); - let (client, parent_head) = create_elastic_scaling_test_client(); - let TestBlockData { block, validation_data } = - build_multiple_blocks_with_witness(&client, parent_head.clone(), Default::default(), 4); + let blocks_per_pov = 4; + let (client, parent_head) = create_elastic_scaling_test_client(blocks_per_pov); + let TestBlockData { block, validation_data } = build_multiple_blocks_with_witness( + &client, + parent_head.clone(), + Default::default(), + blocks_per_pov, + ); let block = seal_block(block, &client); let header = block.blocks().last().unwrap().header().clone(); diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs index 3444b21c8aa08..2bb7455fb0563 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs @@ -45,7 +45,7 @@ use cumulus_client_consensus_aura::{ }, equivocation_import_queue::Verifier as EquivocationVerifier, }; -use cumulus_client_consensus_proposer::{Proposer, ProposerInterface}; +use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; #[allow(deprecated)] use cumulus_client_service::CollatorSybilResistance; @@ -325,7 +325,7 @@ where node_extra_args: NodeExtraArgs, block_import_handle: SlotBasedBlockImportHandle, ) -> Result<(), Error> { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + let proposer = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), client.clone(), transaction_pool, @@ -333,7 +333,6 @@ where telemetry.clone(), ); - let proposer = Proposer::new(proposer_factory); let collator_service = CollatorService::new( client.clone(), Arc::new(task_manager.spawn_handle()), @@ -483,7 +482,7 @@ where para_id, overseer_handle, relay_chain_slot_duration, - proposer: Proposer::new(proposer_factory), + proposer: proposer_factory, collator_service, authoring_duration: Duration::from_millis(2000), reinitialize: false, diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index de54c1759757d..b58638cf004f8 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -87,6 +87,7 @@ impl substrate_test_client::GenesisInit for GenesisParameters { self.wasm.as_deref().unwrap_or_else(|| { cumulus_test_runtime::WASM_BINARY.expect("WASM binary not compiled!") }), + self.blocks_per_pov, ) .build_storage() .expect("Builds test runtime genesis storage") diff --git a/cumulus/test/service/src/chain_spec.rs b/cumulus/test/service/src/chain_spec.rs index ecac18f2ed9e8..cfd4efc55be22 100644 --- a/cumulus/test/service/src/chain_spec.rs +++ b/cumulus/test/service/src/chain_spec.rs @@ -49,6 +49,7 @@ pub fn get_chain_spec_with_extra_endowed( id: Option, extra_endowed_accounts: Vec, code: &[u8], + blocks_per_pov: Option, ) -> ChainSpec { let runtime_caller = GenesisConfigBuilderRuntimeCaller::::new(code); let mut development_preset = runtime_caller @@ -70,6 +71,9 @@ pub fn get_chain_spec_with_extra_endowed( let mut patch_json = json!({ "balances": { "balances": all_balances, + }, + "testPallet": { + "blocksPerPov": blocks_per_pov, } }); @@ -81,6 +85,7 @@ pub fn get_chain_spec_with_extra_endowed( "parachainInfo": { "parachainId": id, }, + }), ); }; @@ -104,6 +109,7 @@ pub fn get_chain_spec(id: Option) -> ChainSpec { id, Default::default(), cumulus_test_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + None, ) } @@ -114,6 +120,7 @@ pub fn get_elastic_scaling_chain_spec(id: Option) -> ChainSpec { Default::default(), cumulus_test_runtime::elastic_scaling::WASM_BINARY .expect("WASM binary was not built, please build it!"), + None, ) } @@ -124,6 +131,7 @@ pub fn get_elastic_scaling_500ms_chain_spec(id: Option) -> ChainSpec { Default::default(), cumulus_test_runtime::elastic_scaling_500ms::WASM_BINARY .expect("WASM binary was not built, please build it!"), + None, ) } @@ -134,6 +142,7 @@ pub fn get_elastic_scaling_mvp_chain_spec(id: Option) -> ChainSpec { Default::default(), cumulus_test_runtime::elastic_scaling_mvp::WASM_BINARY .expect("WASM binary was not built, please build it!"), + None, ) } @@ -143,5 +152,6 @@ pub fn get_elastic_scaling_multi_block_slot_chain_spec(id: Option) -> Ch Default::default(), cumulus_test_runtime::elastic_scaling_multi_block_slot::WASM_BINARY .expect("WASM binary was not built, please build it!"), + None, ) } diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 0dd0dfc50a2ea..d064e4d9b7a11 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -827,6 +827,7 @@ pub fn node_config( Some(para_id), endowed_accounts, cumulus_test_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + None, )); let mut storage = spec.as_storage_builder().build_storage().expect("could not build storage"); diff --git a/substrate/bin/node/bench/src/construct.rs b/substrate/bin/node/bench/src/construct.rs index 9049732d6d376..49e2641ff8a13 100644 --- a/substrate/bin/node/bench/src/construct.rs +++ b/substrate/bin/node/bench/src/construct.rs @@ -144,7 +144,8 @@ impl core::Benchmark for ConstructionBenchmark { let inherent_data = futures::executor::block_on(timestamp_provider.create_inherent_data()) .expect("Create inherent data failed"); - let _block = futures::executor::block_on(proposer.propose( + let _block = futures::executor::block_on(Proposer::propose( + proposer, inherent_data, Default::default(), std::time::Duration::from_secs(20), diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 53f7c0d3d1752..7028445fb5247 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -1002,11 +1002,15 @@ mod tests { digest.push(::babe_pre_digest(babe_pre_digest)); let new_block = futures::executor::block_on(async move { - let proposer = proposer_factory.init(&parent_header).await; - proposer - .unwrap() - .propose(inherent_data, digest, std::time::Duration::from_secs(1), None) - .await + let proposer = proposer_factory.init(&parent_header).await.unwrap(); + Proposer::propose( + proposer, + inherent_data, + digest, + std::time::Duration::from_secs(1), + None, + ) + .await }) .expect("Error making test block") .block; diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index 8c526317283ea..a3e2431fe8469 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -17,7 +17,6 @@ use cumulus_client_collator::service::CollatorService; #[docify::export(lookahead_collator)] use cumulus_client_consensus_aura::collators::lookahead::{self as aura, Params as AuraParams}; use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport; -use cumulus_client_consensus_proposer::Proposer; use cumulus_client_service::{ build_network, build_relay_chain_interface, prepare_node_config, start_relay_chain_tasks, BuildNetworkParams, CollatorSybilResistance, DARecoveryProfile, ParachainHostFunctions, @@ -184,7 +183,7 @@ fn start_consensus( overseer_handle: OverseerHandle, announce_block: Arc>) + Send + Sync>, ) -> Result<(), sc_service::Error> { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + let proposer = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), client.clone(), transaction_pool, @@ -192,8 +191,6 @@ fn start_consensus( telemetry.clone(), ); - let proposer = Proposer::new(proposer_factory); - let collator_service = CollatorService::new( client.clone(), Arc::new(task_manager.spawn_handle()), From d375bde92e7e201ba5be3628399798cd430f2785 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 2 Apr 2025 20:45:12 +0200 Subject: [PATCH 032/312] Extend the test --- Cargo.lock | 2 + cumulus/pallets/parachain-system/Cargo.toml | 2 + .../src/validate_block/tests.rs | 48 +++++++++++++++---- cumulus/test/runtime/src/test_pallet.rs | 12 +++++ .../consensus/common/src/block_import.rs | 14 ++++++ 5 files changed, 70 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb3f9cfe00682..0b2e9aeeeb271 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4525,7 +4525,9 @@ dependencies = [ "polkadot-runtime-parachains", "rand 0.8.5", "sc-client-api", + "sc-consensus", "scale-info", + "sp-api 26.0.0", "sp-consensus-slots", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 6b6bc4fbcefe5..02bf1634fceef 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -55,7 +55,9 @@ rand = { workspace = true, default-features = true } trie-standardmap = { workspace = true } # Substrate +sc-consensus = { workspace = true } sc-client-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } sp-consensus-slots = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index eaef45dfc20fe..e488ac94d3ebe 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -18,7 +18,7 @@ use crate::{validate_block::MemoryOptimizedValidationParams, *}; use codec::{Decode, DecodeAll, Encode}; use cumulus_primitives_core::{ParachainBlockData, PersistedValidationData}; use cumulus_test_client::{ - generate_extrinsic, + generate_extrinsic, generate_extrinsic_with_pair, runtime::{ self as test_runtime, Block, Hash, Header, TestPalletCall, UncheckedExtrinsic, WASM_BINARY, }, @@ -31,6 +31,8 @@ use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use polkadot_parachain_primitives::primitives::ValidationResult; #[cfg(feature = "experimental-ump-signals")] use relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}; +use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; +use sp_api::{ApiExt, Core, ProofRecorder, ProvideRuntimeApi}; use sp_core::H256; use sp_runtime::traits::{BlakeTwo256, Block as BlockT, Header as HeaderT}; use sp_trie::{recorder::IgnoredNodes, StorageProof}; @@ -152,6 +154,7 @@ fn build_multiple_blocks_with_witness( mut parent_head: Header, mut sproof_builder: RelayStateSproofBuilder, num_blocks: u32, + extra_extrinsics: impl Fn(u32) -> Vec, ) -> TestBlockData { let timestamp = std::time::SystemTime::now() .duration_since(std::time::SystemTime::UNIX_EPOCH) @@ -173,9 +176,9 @@ fn build_multiple_blocks_with_witness( let mut proof = StorageProof::empty(); let mut ignored_nodes = IgnoredNodes::::default(); - for _ in 0..num_blocks { + for i in 0..num_blocks { let cumulus_test_client::BlockBuilderAndSupportData { - block_builder, + mut block_builder, persisted_validation_data: p_v_data, } = client.init_block_builder_with_ignored_nodes( parent_head.hash(), @@ -187,8 +190,32 @@ fn build_multiple_blocks_with_witness( persisted_validation_data = Some(p_v_data); + for ext in (extra_extrinsics)(i) { + block_builder.push(ext).unwrap(); + } + let built_block = block_builder.build().unwrap(); + futures::executor::block_on({ + dbg!(i); + let parent_hash = *built_block.block.header.parent_hash(); + let state = client.state_at(parent_hash).unwrap(); + + let mut api = client.runtime_api(); + api.record_proof(); + api.execute_block(parent_hash, built_block.block.clone()).unwrap(); + + let (header, extrinsics) = built_block.block.clone().deconstruct(); + + let mut import = BlockImportParams::new(BlockOrigin::Own, header); + import.body = Some(extrinsics); + import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); + import.state_action = api.into_storage_changes(&state, parent_hash).unwrap().into(); + + BlockImport::import_block(&client, import) + }) + .unwrap(); + ignored_nodes.extend(&IgnoredNodes::from_storage_proof::( &built_block.proof.clone().unwrap(), )); @@ -196,11 +223,6 @@ fn build_multiple_blocks_with_witness( .extend(&IgnoredNodes::from_memory_db(built_block.storage_changes.transaction)); proof = StorageProof::merge([proof, built_block.proof.unwrap()]); - futures::executor::block_on( - client.import_as_best(BlockOrigin::Own, built_block.block.clone()), - ) - .unwrap(); - parent_head = built_block.block.header.clone(); blocks.push(built_block.block); @@ -241,8 +263,18 @@ fn validate_multiple_blocks_work() { parent_head.clone(), Default::default(), blocks_per_pov, + |i| { + vec![generate_extrinsic_with_pair( + &client, + Charlie.into(), + TestPalletCall::read_and_write_big_value {}, + Some(i), + )] + }, ); + assert!(block.proof().encoded_size() < 3 * 1024 * 1024); + let block = seal_block(block, &client); let header = block.blocks().last().unwrap().header().clone(); let res_header = call_validate_block_elastic_scaling( diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index 5679ee1dccd24..cd866ac2881f7 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -24,6 +24,7 @@ pub const TEST_RUNTIME_UPGRADE_KEY: &[u8] = b"+test_runtime_upgrade_key+"; #[frame_support::pallet(dev_mode)] pub mod pallet { use crate::test_pallet::TEST_RUNTIME_UPGRADE_KEY; + use alloc::vec; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; @@ -73,6 +74,17 @@ pub mod pallet { Ok(()) } + + /// Reads a key and writes a big value under this key. + /// + /// At genesis this `key` is empty and thus, will only be set in consequent blocks. + pub fn read_and_write_big_value(_: OriginFor) -> DispatchResult { + let key = &b"really_huge_value"[..]; + sp_io::storage::get(key); + sp_io::storage::set(key, &vec![0u8; 1024 * 1024 * 30]); + + Ok(()) + } } #[derive(frame_support::DefaultNoBound)] diff --git a/substrate/client/consensus/common/src/block_import.rs b/substrate/client/consensus/common/src/block_import.rs index 0fcf96a963682..f90412d677d22 100644 --- a/substrate/client/consensus/common/src/block_import.rs +++ b/substrate/client/consensus/common/src/block_import.rs @@ -165,6 +165,20 @@ impl StateAction { } } +impl From> for StateAction { + fn from(value: StorageChanges) -> Self { + Self::ApplyChanges(value) + } +} + +impl From>> + for StateAction +{ + fn from(value: sp_state_machine::StorageChanges>) -> Self { + Self::ApplyChanges(StorageChanges::Changes(value)) + } +} + /// Data required to import a Block. #[non_exhaustive] pub struct BlockImportParams { From 1d74d6b5244001c8f102628db1236ab18e5bbcf5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 2 Apr 2025 23:46:43 +0200 Subject: [PATCH 033/312] Fix test --- .../pallets/parachain-system/src/validate_block/tests.rs | 6 ++++-- cumulus/test/client/src/lib.rs | 2 +- cumulus/test/runtime/src/test_pallet.rs | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index e488ac94d3ebe..30b92ccdcc97e 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -35,7 +35,7 @@ use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sp_api::{ApiExt, Core, ProofRecorder, ProvideRuntimeApi}; use sp_core::H256; use sp_runtime::traits::{BlakeTwo256, Block as BlockT, Header as HeaderT}; -use sp_trie::{recorder::IgnoredNodes, StorageProof}; +use sp_trie::{proof_size_extension::ProofSizeExt, recorder::IgnoredNodes, StorageProof}; use std::{env, process::Command}; fn call_validate_block_validation_result( @@ -202,7 +202,9 @@ fn build_multiple_blocks_with_witness( let state = client.state_at(parent_hash).unwrap(); let mut api = client.runtime_api(); - api.record_proof(); + let proof_recorder = ProofRecorder::::with_ignored_nodes(ignored_nodes.clone()); + api.set_proof_recorder(proof_recorder.clone()); + api.register_extension(ProofSizeExt::new(proof_recorder)); api.execute_block(parent_hash, built_block.block.clone()).unwrap(); let (header, extrinsics) = built_block.block.clone().deconstruct(); diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index b58638cf004f8..389e21b6c8795 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -203,7 +203,7 @@ pub fn validate_block( let mut ext = TestExternalities::default(); let mut ext_ext = ext.ext(); - let heap_pages = HeapAllocStrategy::Static { extra_pages: 1024 }; + let heap_pages = HeapAllocStrategy::Static { extra_pages: 2048 }; let executor = WasmExecutor::<( sp_io::SubstrateHostFunctions, cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions, diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index cd866ac2881f7..67feaaf3102e0 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -81,7 +81,7 @@ pub mod pallet { pub fn read_and_write_big_value(_: OriginFor) -> DispatchResult { let key = &b"really_huge_value"[..]; sp_io::storage::get(key); - sp_io::storage::set(key, &vec![0u8; 1024 * 1024 * 30]); + sp_io::storage::set(key, &vec![0u8; 1024 * 1024 * 5]); Ok(()) } From d4904ecfaeb46baa1606bf888e8c6dd6916a9290 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 3 Apr 2025 22:11:29 +0200 Subject: [PATCH 034/312] Review comments --- .../src/validate_block/implementation.rs | 13 +++++++++---- cumulus/primitives/core/src/parachain_block_data.rs | 8 ++++---- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 81e60a891b03f..225c0d7b5f55d 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -250,6 +250,7 @@ where crate::UpwardMessages::::get() .into_iter() .filter_map(|m| { + // Filter out the `UMP_SEPARATOR` and the `UMPSignals`. if cfg!(feature = "experimental-ump-signals") { if m == UMP_SEPARATOR { found_separator = true; @@ -260,6 +261,7 @@ where } None } else { + // No signal or separator Some(m) } } else { @@ -293,16 +295,19 @@ where let mut selected_core = None; upward_message_signals.iter().for_each(|s| { - if let Ok(UMPSignal::SelectCore(selector, offset)) = UMPSignal::decode(&mut &s[..]) { - match &selected_core { + match UMPSignal::decode(&mut &s[..]).expect("Failed to decode `UMPSignal`") { + UMPSignal::SelectCore(selector, offset) => match &selected_core { Some(selected_core) if *selected_core != (selector, offset) => { - panic!("All `SelectCore` signals need to select the same core") + panic!( + "All `SelectCore` signals need to select the same core: {selected_core:?} vs {:?}", + (selector, offset), + ) }, Some(_) => {}, None => { selected_core = Some((selector, offset)); }, - } + }, } }); diff --git a/cumulus/primitives/core/src/parachain_block_data.rs b/cumulus/primitives/core/src/parachain_block_data.rs index 7e56124922da9..e5357125e080f 100644 --- a/cumulus/primitives/core/src/parachain_block_data.rs +++ b/cumulus/primitives/core/src/parachain_block_data.rs @@ -99,10 +99,10 @@ impl ParachainBlockData { pub fn log_size_info(&self) { tracing::info!( target: "cumulus", - "PoV size {{ header: {}kb, extrinsics: {}kb, storage_proof: {}kb }}", - self.blocks().iter().map(|b| b.header().encoded_size()).sum::() as f64 / 1024f64, - self.blocks().iter().map(|b| b.extrinsics().encoded_size()).sum::() as f64 / 1024f64, - self.proof().encoded_size() as f64 / 1024f64, + header_kb = %self.blocks().iter().map(|b| b.header().encoded_size()).sum::() as f64 / 1024f64, + extrinsics_kb = %self.blocks().iter().map(|b| b.extrinsics().encoded_size()).sum::() as f64 / 1024f64, + storage_proof_kb = %self.proof().encoded_size() as f64 / 1024f64, + "PoV size", ); } From 583efc253a50e8833de56771fb34f20ad0c3fead Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 4 Apr 2025 18:27:17 +0200 Subject: [PATCH 035/312] Make it backwards and forwards compatible --- cumulus/client/pov-recovery/src/lib.rs | 43 ++--- cumulus/client/pov-recovery/src/tests.rs | 19 +- .../core/src/parachain_block_data.rs | 182 +++++++++++++++--- prdoc/pr_6137.prdoc | 5 +- 4 files changed, 188 insertions(+), 61 deletions(-) diff --git a/cumulus/client/pov-recovery/src/lib.rs b/cumulus/client/pov-recovery/src/lib.rs index e8f2e636249ff..089ad08367a75 100644 --- a/cumulus/client/pov-recovery/src/lib.rs +++ b/cumulus/client/pov-recovery/src/lib.rs @@ -361,33 +361,28 @@ where data: &[u8], expected_block_hash: Block::Hash, ) -> Option> { - if let Ok(block_data) = ParachainBlockData::::decode_all(&mut &data[..]) { - if block_data.blocks().last().map_or(false, |b| b.hash() == expected_block_hash) { - return Some(block_data) - } - - tracing::debug!( - target: LOG_TARGET, - ?expected_block_hash, - "Could not find the expected block hash as latest block in `ParachainBlockData`" - ); - } + match ParachainBlockData::::decode_all(&mut &data[..]) { + Ok(block_data) => { + if block_data.blocks().last().map_or(false, |b| b.hash() == expected_block_hash) { + return Some(block_data) + } - if let Ok(block_data) = - cumulus_primitives_core::parachain_block_data::v0::ParachainBlockData::::decode_all( - &mut &data[..], - ) { - if block_data.header.hash() == expected_block_hash { - return Some(block_data.into()) - } + tracing::debug!( + target: LOG_TARGET, + ?expected_block_hash, + "Could not find the expected block hash as latest block in `ParachainBlockData`" + ); + }, + Err(error) => { + tracing::debug!( + target: LOG_TARGET, + ?expected_block_hash, + ?error, + "Could not decode `ParachainBlockData` from recovered PoV", + ); + }, } - tracing::warn!( - target: LOG_TARGET, - ?expected_block_hash, - "Could not decode `ParachainBlockData` from recovered PoV", - ); - None } diff --git a/cumulus/client/pov-recovery/src/tests.rs b/cumulus/client/pov-recovery/src/tests.rs index 78884691cc384..e6f2e2e2e34e6 100644 --- a/cumulus/client/pov-recovery/src/tests.rs +++ b/cumulus/client/pov-recovery/src/tests.rs @@ -692,19 +692,20 @@ async fn single_pending_candidate_recovery_success( )) => { assert_eq!(receipt.hash(), candidate_hash); assert_eq!(session_index, TEST_SESSION_INDEX); + let block_data = + ParachainBlockData::::new( + vec![Block::new(header.clone(), vec![])], CompactProof { encoded_nodes: vec![] } + ); + response_tx.send( Ok( AvailableData { pov: Arc::new(PoV { - block_data: if latest_block_data { ParachainBlockData::::new( - vec![Block::new(header.clone(), vec![])], CompactProof { encoded_nodes: vec![] } - ).encode()} else { - cumulus_primitives_core::parachain_block_data::v0::ParachainBlockData:: { - header: header.clone(), - extrinsics: Vec::new(), - storage_proof: CompactProof { encoded_nodes: Vec::new() } - }.encode() - }.into() + block_data: if latest_block_data { + block_data + } else { + block_data.as_v0().unwrap() + }.encode().into() }), validation_data: dummy_pvd(), } diff --git a/cumulus/primitives/core/src/parachain_block_data.rs b/cumulus/primitives/core/src/parachain_block_data.rs index e5357125e080f..0b48a571acfc4 100644 --- a/cumulus/primitives/core/src/parachain_block_data.rs +++ b/cumulus/primitives/core/src/parachain_block_data.rs @@ -17,29 +17,48 @@ //! Provides [`ParachainBlockData`] and its historical versions. use alloc::vec::Vec; -use codec::Encode; +use codec::{Decode, Encode}; use sp_runtime::traits::Block as BlockT; use sp_trie::CompactProof; -pub mod v0 { - use super::*; +/// Special prefix used by [`ParachainBlockData`] from version 1 and upwards to distinguish from the +/// unversioned legacy/v0 version. +const VERSIONED_PARACHAIN_BLOCK_DATA_PREFIX: &[u8] = b"VERSIONEDPBD"; - #[derive(codec::Encode, codec::Decode, Clone)] - pub struct ParachainBlockData { - /// The header of the parachain block. - pub header: B::Header, - /// The extrinsics of the parachain block. - pub extrinsics: alloc::vec::Vec, - /// The data that is required to emulate the storage accesses executed by all extrinsics. - pub storage_proof: sp_trie::CompactProof, +// Struct which allows prepending bytes after reading from an input. +pub(crate) struct PrependBytesInput<'a, I> { + prepend: &'a [u8], + read: usize, + inner: &'a mut I, +} + +impl<'a, I: codec::Input> codec::Input for PrependBytesInput<'a, I> { + fn remaining_len(&mut self) -> Result, codec::Error> { + let remaining_compact = self.prepend.len().saturating_sub(self.read); + Ok(self.inner.remaining_len()?.map(|len| len.saturating_add(remaining_compact))) } - impl From> for super::ParachainBlockData { - fn from(block_data: ParachainBlockData) -> Self { - Self::new( - alloc::vec![Block::new(block_data.header, block_data.extrinsics)], - block_data.storage_proof, - ) + fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> { + if into.is_empty() { + return Ok(()); + } + + let remaining_compact = self.prepend.len().saturating_sub(self.read); + if remaining_compact > 0 { + let to_read = into.len().min(remaining_compact); + into[..to_read].copy_from_slice(&self.prepend[self.read..][..to_read]); + self.read += to_read; + + if to_read < into.len() { + // Buffer not full, keep reading the inner. + self.inner.read(&mut into[to_read..]) + } else { + // Buffer was filled by the bytes. + Ok(()) + } + } else { + // Prepended bytes has been read, just read from inner. + self.inner.read(into) } } } @@ -48,12 +67,54 @@ pub mod v0 { /// /// This is send as PoV (proof of validity block) to the relay-chain validators. There it will be /// passed to the parachain validation Wasm blob to be validated. -#[derive(codec::Encode, codec::Decode, Clone)] +#[derive(Clone)] pub enum ParachainBlockData { - #[codec(index = 1)] + V0 { block: [Block; 1], proof: CompactProof }, V1 { blocks: Vec, proof: CompactProof }, } +impl Encode for ParachainBlockData { + fn encode(&self) -> Vec { + match self { + Self::V0 { block, proof } => + (block[0].header(), block[0].extrinsics(), &proof).encode(), + Self::V1 { blocks, proof } => { + let mut res = VERSIONED_PARACHAIN_BLOCK_DATA_PREFIX.to_vec(); + 1u8.encode_to(&mut res); + blocks.encode_to(&mut res); + proof.encode_to(&mut res); + res + }, + } + } +} + +impl Decode for ParachainBlockData { + fn decode(input: &mut I) -> Result { + let mut prefix = [0u8; VERSIONED_PARACHAIN_BLOCK_DATA_PREFIX.len()]; + input.read(&mut prefix)?; + + if prefix == VERSIONED_PARACHAIN_BLOCK_DATA_PREFIX { + match input.read_byte()? { + 1 => { + let blocks = Vec::::decode(input)?; + let proof = CompactProof::decode(input)?; + + Ok(Self::V1 { blocks, proof }) + }, + _ => Err("Unknown `ParachainBlockData` version".into()), + } + } else { + let mut input = PrependBytesInput { prepend: &prefix, read: 0, inner: input }; + let header = Block::Header::decode(&mut input)?; + let extrinsics = Vec::::decode(&mut input)?; + let proof = CompactProof::decode(&mut input)?; + + Ok(Self::V0 { block: [Block::new(header, extrinsics)], proof }) + } + } +} + impl ParachainBlockData { /// Creates a new instance of `Self`. pub fn new(blocks: Vec, proof: CompactProof) -> Self { @@ -63,6 +124,7 @@ impl ParachainBlockData { /// Returns references to the stored blocks. pub fn blocks(&self) -> &[Block] { match self { + Self::V0 { block, .. } => &block[..], Self::V1 { blocks, .. } => &blocks, } } @@ -70,6 +132,7 @@ impl ParachainBlockData { /// Returns mutable references to the stored blocks. pub fn blocks_mut(&mut self) -> &mut [Block] { match self { + Self::V0 { ref mut block, .. } => block, Self::V1 { ref mut blocks, .. } => blocks, } } @@ -77,6 +140,7 @@ impl ParachainBlockData { /// Returns the stored blocks. pub fn into_blocks(self) -> Vec { match self { + Self::V0 { block, .. } => block.into_iter().collect(), Self::V1 { blocks, .. } => blocks, } } @@ -84,6 +148,7 @@ impl ParachainBlockData { /// Returns a reference to the stored proof. pub fn proof(&self) -> &CompactProof { match self { + Self::V0 { proof, .. } => &proof, Self::V1 { proof, .. } => proof, } } @@ -91,6 +156,7 @@ impl ParachainBlockData { /// Deconstruct into the inner parts. pub fn into_inner(self) -> (Vec, CompactProof) { match self { + Self::V0 { block, proof } => (block.into_iter().collect(), proof), Self::V1 { blocks, proof } => (blocks, proof), } } @@ -109,18 +175,86 @@ impl ParachainBlockData { /// Converts into [`v0::ParachainBlockData`]. /// /// Returns `None` if there is not exactly one block. - pub fn as_v0(&self) -> Option> { + pub fn as_v0(&self) -> Option { match self { + Self::V0 { .. } => Some(self.clone()), Self::V1 { blocks, proof } => { if blocks.len() != 1 { return None } - blocks.first().map(|block| { - let (header, extrinsics) = block.clone().deconstruct(); - v0::ParachainBlockData { header, extrinsics, storage_proof: proof.clone() } - }) + blocks + .first() + .map(|block| Self::V0 { block: [block.clone()], proof: proof.clone() }) }, } } } + +#[cfg(test)] +mod tests { + use super::*; + use sp_runtime::testing::*; + + #[derive(codec::Encode, codec::Decode, Clone, PartialEq, Debug)] + struct ParachainBlockDataV0 { + /// The header of the parachain block. + pub header: B::Header, + /// The extrinsics of the parachain block. + pub extrinsics: alloc::vec::Vec, + /// The data that is required to emulate the storage accesses executed by all extrinsics. + pub storage_proof: sp_trie::CompactProof, + } + + type TestExtrinsic = TestXt; + type TestBlock = Block; + + #[test] + fn decoding_encoding_v0_works() { + let v0 = ParachainBlockDataV0:: { + header: Header::new_from_number(10), + extrinsics: vec![ + TestExtrinsic::new_bare(MockCallU64(10)), + TestExtrinsic::new_bare(MockCallU64(100)), + ], + storage_proof: CompactProof { encoded_nodes: vec![vec![10u8; 200], vec![20u8; 30]] }, + }; + + let encoded = v0.encode(); + let decoded = ParachainBlockData::::decode(&mut &encoded[..]).unwrap(); + + match &decoded { + ParachainBlockData::V0 { block, proof } => { + assert_eq!(v0.header, block[0].header); + assert_eq!(v0.extrinsics, block[0].extrinsics); + assert_eq!(&v0.storage_proof, proof); + }, + _ => panic!("Invalid decoding"), + } + + let encoded = decoded.as_v0().unwrap().encode(); + + let decoded = ParachainBlockDataV0::::decode(&mut &encoded[..]).unwrap(); + assert_eq!(decoded, v0); + } + + #[test] + fn decoding_encoding_v1_works() { + let v1 = ParachainBlockData::::V1 { + blocks: vec![TestBlock::new( + Header::new_from_number(10), + vec![ + TestExtrinsic::new_bare(MockCallU64(10)), + TestExtrinsic::new_bare(MockCallU64(100)), + ], + )], + proof: CompactProof { encoded_nodes: vec![vec![10u8; 200], vec![20u8; 30]] }, + }; + + let encoded = v1.encode(); + let decoded = ParachainBlockData::::decode(&mut &encoded[..]).unwrap(); + + assert_eq!(v1.blocks(), decoded.blocks()); + assert_eq!(v1.proof(), decoded.proof()); + } +} diff --git a/prdoc/pr_6137.prdoc b/prdoc/pr_6137.prdoc index 60f88966042c1..6a5930ba12fed 100644 --- a/prdoc/pr_6137.prdoc +++ b/prdoc/pr_6137.prdoc @@ -5,10 +5,7 @@ doc: This pull request adds support to `ParachainBlockData` to support multiple blocks at once. This basically means that cumulus based Parachains could start packaging multiple blocks into one `PoV`. From the relay chain PoV nothing changes and these `PoV`s appear like any other `PoV`. Internally this `PoV` then executes the blocks sequentially. However, all these blocks together can use the same amount of resources like a single `PoV`. This pull request is basically a preparation to support running parachains with a faster block time than the relay chain. - - This breaks the encoding of `ParachainBlockData`. It requires that the collators upgrade first before the runtime requiring the new `ParachainBlockData` is enacted. - The collators will decide based on the api version of `CollectCollationInfo`, which `ParachainBlockData` format they will send to the relay chain so that the validation code can interpret it correctly. - + crates: - name: cumulus-client-collator bump: major From 16d95c919fbab261c7d01f0a32e6890a4df2157b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 4 Apr 2025 18:52:32 +0200 Subject: [PATCH 036/312] Update pr_6137.prdoc --- prdoc/pr_6137.prdoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/prdoc/pr_6137.prdoc b/prdoc/pr_6137.prdoc index 6a5930ba12fed..13076fed7d1c9 100644 --- a/prdoc/pr_6137.prdoc +++ b/prdoc/pr_6137.prdoc @@ -5,6 +5,8 @@ doc: This pull request adds support to `ParachainBlockData` to support multiple blocks at once. This basically means that cumulus based Parachains could start packaging multiple blocks into one `PoV`. From the relay chain PoV nothing changes and these `PoV`s appear like any other `PoV`. Internally this `PoV` then executes the blocks sequentially. However, all these blocks together can use the same amount of resources like a single `PoV`. This pull request is basically a preparation to support running parachains with a faster block time than the relay chain. + + This changes the encoding of ParachainBlockData. However, encoding and decoding is made in a backwards and forwards compatible way. This means that there is no dependency between the collator and runtime upgrade. crates: - name: cumulus-client-collator From a303b8c359b7cd1f47bd80b435454fb1820fe3ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 7 Apr 2025 16:30:46 +0200 Subject: [PATCH 037/312] Update cumulus/primitives/core/src/parachain_block_data.rs --- cumulus/primitives/core/src/parachain_block_data.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/primitives/core/src/parachain_block_data.rs b/cumulus/primitives/core/src/parachain_block_data.rs index 0b48a571acfc4..2e83732077716 100644 --- a/cumulus/primitives/core/src/parachain_block_data.rs +++ b/cumulus/primitives/core/src/parachain_block_data.rs @@ -172,7 +172,7 @@ impl ParachainBlockData { ); } - /// Converts into [`v0::ParachainBlockData`]. + /// Converts into [`ParachainBlockData::V0`]. /// /// Returns `None` if there is not exactly one block. pub fn as_v0(&self) -> Option { From 8becf8cd8aade9543d940a1e633e0ec82769ce8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 7 Apr 2025 21:27:17 +0200 Subject: [PATCH 038/312] Fix compile errors --- cumulus/test/service/benches/validate_block.rs | 7 +++++-- cumulus/test/service/benches/validate_block_glutton.rs | 6 +++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/cumulus/test/service/benches/validate_block.rs b/cumulus/test/service/benches/validate_block.rs index ecfc824b571fa..e2ae6aa9dbc0c 100644 --- a/cumulus/test/service/benches/validate_block.rs +++ b/cumulus/test/service/benches/validate_block.rs @@ -91,8 +91,11 @@ fn benchmark_block_validation(c: &mut Criterion) { let para_id = ParaId::from(cumulus_test_runtime::PARACHAIN_ID); let mut test_client_builder = TestClientBuilder::with_default_backend(); let genesis_init = test_client_builder.genesis_init_mut(); - *genesis_init = - cumulus_test_client::GenesisParameters { endowed_accounts: account_ids, wasm: None }; + *genesis_init = cumulus_test_client::GenesisParameters { + endowed_accounts: account_ids, + wasm: None, + blocks_per_pov: None, + }; let client = test_client_builder.build_with_native_executor(None).0; let (max_transfer_count, extrinsics) = create_extrinsics(&client, &src_accounts, &dst_accounts); diff --git a/cumulus/test/service/benches/validate_block_glutton.rs b/cumulus/test/service/benches/validate_block_glutton.rs index 06ad739965146..36bcea760b84e 100644 --- a/cumulus/test/service/benches/validate_block_glutton.rs +++ b/cumulus/test/service/benches/validate_block_glutton.rs @@ -63,7 +63,11 @@ fn benchmark_block_validation(c: &mut Criterion) { let endowed_accounts = vec![AccountId::from(Alice.public())]; let mut test_client_builder = TestClientBuilder::with_default_backend(); let genesis_init = test_client_builder.genesis_init_mut(); - *genesis_init = cumulus_test_client::GenesisParameters { endowed_accounts, wasm: None }; + *genesis_init = cumulus_test_client::GenesisParameters { + endowed_accounts, + wasm: None, + blocks_per_pov: None, + }; let client = test_client_builder.build_with_native_executor(None).0; From 0ac06c58f8fc6de0e884d92b4405c9e26b0f4487 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 10 Apr 2025 10:52:38 +0200 Subject: [PATCH 039/312] Ensure no nodes are shared --- substrate/primitives/trie/src/trie_codec.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/substrate/primitives/trie/src/trie_codec.rs b/substrate/primitives/trie/src/trie_codec.rs index 143c7e61b18ee..84b79418ecb20 100644 --- a/substrate/primitives/trie/src/trie_codec.rs +++ b/substrate/primitives/trie/src/trie_codec.rs @@ -206,12 +206,12 @@ where #[cfg(test)] mod tests { - use crate::{delta_trie_root, recorder::IgnoredNodes, HashDB, StorageProof}; - use super::*; + use crate::{delta_trie_root, recorder::IgnoredNodes, HashDB, StorageProof}; use codec::Encode; use hash_db::AsHashDB; use sp_core::{Blake2Hasher, H256}; + use std::collections::HashSet; use trie_db::{DBValue, Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut}; type MemoryDB = crate::MemoryDB; @@ -380,11 +380,15 @@ mod tests { nodes_to_ignore, ); - let proof = StorageProof::merge([ - recorder.to_storage_proof(), - recorder2.to_storage_proof(), - recorder3.to_storage_proof(), - ]); + let proof = recorder.to_storage_proof(); + let proof2 = recorder2.to_storage_proof(); + let proof3 = recorder3.to_storage_proof(); + + let mut combined = HashSet::>::from_iter(proof.into_iter_nodes()); + proof2.iter_nodes().for_each(|n| assert!(combined.insert(n.clone()))); + proof3.iter_nodes().for_each(|n| assert!(combined.insert(n.clone()))); + + let proof = StorageProof::new(combined.into_iter()); let compact_proof = encode_compact::(&proof.to_memory_db(), &root).unwrap(); From e4c66ee0fde1f5c4f5bd650aa7e1caa23827f5fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 12 Apr 2025 00:55:08 +0200 Subject: [PATCH 040/312] Take by value --- substrate/primitives/trie/src/recorder.rs | 4 ++-- substrate/primitives/trie/src/trie_codec.rs | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index 4981a0ce40313..7a8a29d4be8ac 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -79,8 +79,8 @@ impl IgnoredNodes { } /// Extend `self` with the other instance of ignored nodes. - pub fn extend(&mut self, other: &Self) { - self.nodes.extend(other.nodes.iter().cloned()); + pub fn extend(&mut self, other: Self) { + self.nodes.extend(other.nodes.into_iter()); } /// Returns `true` if the node is ignored. diff --git a/substrate/primitives/trie/src/trie_codec.rs b/substrate/primitives/trie/src/trie_codec.rs index 84b79418ecb20..521f0edce70d0 100644 --- a/substrate/primitives/trie/src/trie_codec.rs +++ b/substrate/primitives/trie/src/trie_codec.rs @@ -340,7 +340,7 @@ mod tests { let mut ignored_nodes = IgnoredNodes::from_storage_proof::(&recorder.to_storage_proof()); - ignored_nodes.extend(&IgnoredNodes::from_memory_db::(transaction.clone())); + ignored_nodes.extend(IgnoredNodes::from_memory_db::(transaction.clone())); ignored_nodes } @@ -359,7 +359,7 @@ mod tests { ); db.consolidate(transaction.clone()); - nodes_to_ignore.extend(&build_known_nodes_list(&recorder, &transaction)); + nodes_to_ignore.extend(build_known_nodes_list(&recorder, &transaction)); let (recorder2, transaction, root2) = emulate_block_building( &db, @@ -370,7 +370,7 @@ mod tests { ); db.consolidate(transaction.clone()); - nodes_to_ignore.extend(&build_known_nodes_list(&recorder2, &transaction)); + nodes_to_ignore.extend(build_known_nodes_list(&recorder2, &transaction)); let (recorder3, _, root3) = emulate_block_building( &db, From b201e25831cccbc837da262cba2c719ab8bca305 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 12 Apr 2025 00:56:12 +0200 Subject: [PATCH 041/312] Start --- Cargo.lock | 2 + cumulus/client/consensus/aura/src/collator.rs | 42 ++++++++--- .../slot_based/block_builder_task.rs | 71 ++++++++++++++----- .../aura/src/collators/slot_based/mod.rs | 8 ++- .../consensus/common/src/block_import.rs | 10 +++ substrate/primitives/block-builder/Cargo.toml | 2 + substrate/primitives/block-builder/src/lib.rs | 12 ++++ 7 files changed, 117 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a943ba6706d23..68c6d1707b7ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21998,6 +21998,8 @@ dependencies = [ name = "sp-block-builder" version = "26.0.0" dependencies = [ + "parity-scale-codec", + "scale-info", "sp-api 26.0.0", "sp-inherents", "sp-runtime 31.0.1", diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index d6e083ef0a763..ad5919ece79d1 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -41,9 +41,10 @@ use polkadot_node_primitives::{Collation, MaybeCompressedPoV}; use polkadot_primitives::{Header as PHeader, Id as ParaId}; use futures::prelude::*; +use sc_client_api::BackendTransaction; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction}; use sc_consensus_aura::standalone as aura_internal; -use sp_api::ProvideRuntimeApi; +use sp_api::{ProvideRuntimeApi, StorageProof}; use sp_application_crypto::AppPublic; use sp_consensus::BlockOrigin; use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; @@ -77,6 +78,24 @@ pub struct Params { pub collator_service: CS, } +/// Result of [`Collator::build_block_and_import`]. +pub struct BuiltBlock { + /// The block that was built. + pub block: Block, + /// The proof that was recorded while building the block. + pub proof: StorageProof, + /// The transaction resulting from building the block. + /// + /// This contains all the state changes. + pub backend_transaction: BackendTransaction>, +} + +impl From> for ParachainCandidate { + fn from(built: BuiltBlock) -> Self { + Self { block: built.block, proof: built.proof } + } +} + /// A utility struct for writing collation logic that makes use of Aura entirely /// or in part. See module docs for more details. pub struct Collator { @@ -166,7 +185,7 @@ where inherent_data: (ParachainInherentData, InherentData), proposal_duration: Duration, max_pov_size: usize, - ) -> Result>, Box> { + ) -> Result>, Box> { let mut digest = additional_pre_digest.into().unwrap_or_default(); digest.push(slot_claim.pre_digest.clone()); @@ -183,10 +202,7 @@ where .await .map_err(|e| Box::new(e) as Box)?; - let proposal = match maybe_proposal { - None => return Ok(None), - Some(p) => p, - }; + let Some(proposal) = maybe_proposal else { return Ok(None) }; let sealed_importable = seal::<_, P>( proposal.block, @@ -205,12 +221,22 @@ where .clone(), ); + let Some(backend_transaction) = sealed_importable + .state_action + .as_storage_changes() + .map(|c| c.transaction.clone()) + else { + tracing::error!(target: crate::LOG_TARGET, "Building a block should return storage changes!"); + + return Ok(None) + }; + self.block_import .import_block(sealed_importable) .map_err(|e| Box::new(e) as Box) .await?; - Ok(Some(ParachainCandidate { block, proof: proposal.proof })) + Ok(Some(BuiltBlock { block, proof: proposal.proof, backend_transaction })) } /// Propose, seal, import a block and packaging it into a collation. @@ -245,7 +271,7 @@ where let hash = candidate.block.header().hash(); if let Some((collation, block_data)) = - self.collator_service.build_collation(parent_header, hash, candidate) + self.collator_service.build_collation(parent_header, hash, candidate.into()) { block_data.log_size_info(); diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 5740ff2eb861e..12318d10f963e 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -25,6 +25,8 @@ use cumulus_primitives_core::{GetCoreSelectorApi, PersistedValidationData}; use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_primitives::{Block as RelayBlock, Id as ParaId}; +use sp_block_builder::BlockBuilder; +use sp_trie::recorder::IgnoredNodes; use super::CollatorMessage; use crate::{ @@ -120,8 +122,10 @@ where + Send + Sync + 'static, - Client::Api: - AuraApi + GetCoreSelectorApi + AuraUnincludedSegmentApi, + Client::Api: AuraApi + + GetCoreSelectorApi + + AuraUnincludedSegmentApi + + BlockBuilder, Backend: sc_client_api::Backend + 'static, RelayClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, @@ -353,30 +357,59 @@ where validation_data.max_pov_size * 85 / 100 } as usize; - let Ok(Some(candidate)) = collator - .build_block_and_import( - &parent_header, - &slot_claim, - None, - (parachain_inherent_data, other_inherent_data), - authoring_duration, - allowed_pov_size, - ) - .await - else { - tracing::error!(target: crate::LOG_TARGET, "Unable to build block at slot."); - continue; + let Ok(block_rate) = para_client.runtime_api().block_rate(parent_hash) else { + tracing::error!( + target: crate::LOG_TARGET, + "Failed to fetch block rate." + ); + continue + }; + + // TODO: Do not use relay chain slot duration, should also be `block_time`. + let blocks_per_core = if block_rate.block_time < relay_chain_slot_duration { + relay_chain_slot_duration.as_millis() / block_rate.block_time.as_millis() + } else { + 1 }; - let new_block_hash = candidate.block.header().hash(); + let mut blocks = Vec::new(); + let mut proofs = Vec::new(); + let mut ignored_nodes = IgnoredNodes::default(); + + for _ in 0..blocks_per_core { + let Ok(Some(res)) = collator + .build_block_and_import( + &parent_header, + &slot_claim, + None, + (parachain_inherent_data, other_inherent_data), + authoring_duration, + allowed_pov_size, + ) + .await + else { + tracing::error!(target: crate::LOG_TARGET, "Unable to build block at slot."); + continue; + }; + + let new_block_hash = res.block.header().hash(); + + // Announce the newly built block to our peers. + collator.collator_service().announce_block(new_block_hash, None); + + ignored_nodes.extend(IgnoredNodes::from_storage_proof(&res.proof)); + ignored_nodes.extend(IgnoredNodes::from_memory_db(res.backend_transaction)); + + blocks.push(res.block); + proofs.push(res.proof); + } - // Announce the newly built block to our peers. - collator.collator_service().announce_block(new_block_hash, None); + let proof = StorageProof::merge(proofs); if let Err(err) = collator_sender.unbounded_send(CollatorMessage { relay_parent, parent_header, - parachain_candidate: candidate, + parachain_candidate: Parachain, validation_code_hash, core_index: *core_index, max_pov_size: validation_data.max_pov_size, diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index bf4f292e238f0..f98359b303a4d 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -83,7 +83,7 @@ use polkadot_primitives::{ use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; use sc_consensus::BlockImport; use sc_utils::mpsc::tracing_unbounded; -use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_api::{ApiExt, ProvideRuntimeApi, StorageProof}; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; use sp_consensus_aura::AuraApi; @@ -254,8 +254,10 @@ struct CollatorMessage { pub relay_parent: RelayHash, /// The header of the parent block. pub parent_header: Block::Header, - /// The parachain block candidate. - pub parachain_candidate: ParachainCandidate, + /// The built blocks. + pub blocks: Vec, + /// The storage proof that was collected while building all the blocks. + pub proof: StorageProof, /// The validation code hash at the parent block. pub validation_code_hash: ValidationCodeHash, /// Core index that this block should be submitted on diff --git a/substrate/client/consensus/common/src/block_import.rs b/substrate/client/consensus/common/src/block_import.rs index f90412d677d22..ec0ee98115a8a 100644 --- a/substrate/client/consensus/common/src/block_import.rs +++ b/substrate/client/consensus/common/src/block_import.rs @@ -163,6 +163,16 @@ impl StateAction { StateAction::Skip => true, } } + + /// Returns as storage changes. + pub fn as_storage_changes( + &self, + ) -> Option<&sp_state_machine::StorageChanges>> { + match self { + StateAction::ApplyChanges(StorageChanges::Changes(changes)) => Some(&changes), + _ => None, + } + } } impl From> for StateAction { diff --git a/substrate/primitives/block-builder/Cargo.toml b/substrate/primitives/block-builder/Cargo.toml index dcd6ba8a91d71..95c5e2fc80726 100644 --- a/substrate/primitives/block-builder/Cargo.toml +++ b/substrate/primitives/block-builder/Cargo.toml @@ -16,6 +16,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-api = { workspace = true } sp-inherents = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/primitives/block-builder/src/lib.rs b/substrate/primitives/block-builder/src/lib.rs index 9d03aa4d7a013..a7eadfeac0f47 100644 --- a/substrate/primitives/block-builder/src/lib.rs +++ b/substrate/primitives/block-builder/src/lib.rs @@ -21,9 +21,19 @@ extern crate alloc; +use codec::{Decode, Encode}; +use core::time::Duration; use sp_inherents::{CheckInherentsResult, InherentData}; use sp_runtime::{traits::Block as BlockT, ApplyExtrinsicResult}; +#[derive(Encode, Decode, scale_info::TypeInfo)] +pub struct BlockRate { + /// Time between individual blocks. + pub block_time: Duration, + /// Maximum time to spend building per block. + pub block_building_time: Duration, +} + sp_api::decl_runtime_apis! { /// The `BlockBuilder` api trait that provides the required functionality for building a block. #[api_version(6)] @@ -50,5 +60,7 @@ sp_api::decl_runtime_apis! { /// Check that the inherents are valid. The inherent data will vary from chain to chain. fn check_inherents(block: Block, data: InherentData) -> CheckInherentsResult; + + fn block_rate() -> BlockRate; } } From 5a7abe615efa0e996a80fa2e1a29a5dab2268fcb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 14 Apr 2025 17:18:01 +0200 Subject: [PATCH 042/312] slot-based-collator: Do not skip slots Ensure that we do not skip a slot. This could for example happen if the work being done in the previous slot was taking until the end of the last slot or a little bit longer. --- .../src/collators/slot_based/slot_timer.rs | 43 +++++++++++++------ 1 file changed, 31 insertions(+), 12 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs b/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs index fb76089cdb0f2..c3ffb883e5077 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs @@ -55,6 +55,8 @@ pub(crate) struct SlotTimer { /// Slot duration of the relay chain. This is used to compute how man block-production /// attempts we should trigger per relay chain block. relay_slot_duration: Duration, + /// Stores the latest slot that was reported by [`Self::wait_until_next_slot`]. + last_reported_slot: Option, _marker: std::marker::PhantomData<(Block, Box)>, } @@ -147,6 +149,7 @@ where time_offset, last_reported_core_num: None, relay_slot_duration, + last_reported_slot: None, _marker: Default::default(), } } @@ -157,30 +160,46 @@ where } /// Returns a future that resolves when the next block production should be attempted. - pub async fn wait_until_next_slot(&self) -> Option { + pub async fn wait_until_next_slot(&mut self) -> Option { let Ok(slot_duration) = crate::slot_duration(&*self.client) else { tracing::error!(target: LOG_TARGET, "Failed to fetch slot duration from runtime."); return None }; - let (time_until_next_attempt, timestamp, aura_slot) = compute_next_wake_up_time( - slot_duration, - self.relay_slot_duration, - self.last_reported_core_num, - duration_now(), - self.time_offset, - ); + let (time_until_next_attempt, mut next_timestamp, mut next_aura_slot) = + compute_next_wake_up_time( + slot_duration, + self.relay_slot_duration, + self.last_reported_core_num, + duration_now(), + self.time_offset, + ); - tokio::time::sleep(time_until_next_attempt).await; + match self.last_reported_slot { + // If we already reported a slot, we don't want to skip a slot. But we also don't want + // to go through all the slots if a node was halted for some reason. + Some(ls) if ls + 1 < next_aura_slot && next_aura_slot <= ls + 3 => { + next_aura_slot = ls + 1u64; + next_timestamp = next_aura_slot + .timestamp(slot_duration) + .expect("Timestamp does not overflow; qed"); + }, + None | Some(_) => { + tokio::time::sleep(time_until_next_attempt).await; + }, + } tracing::debug!( target: LOG_TARGET, ?slot_duration, - ?timestamp, - ?aura_slot, + timestamp = ?next_timestamp, + aura_slot = ?next_aura_slot, "New block production opportunity." ); - Some(SlotInfo { slot: aura_slot, timestamp }) + + self.last_reported_slot = Some(next_aura_slot); + + Some(SlotInfo { slot: next_aura_slot, timestamp: next_timestamp }) } } From 5df69e57e51c99265f40857a68cce89f131d56b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 15 Apr 2025 20:48:55 +0200 Subject: [PATCH 043/312] Make it work --- cumulus/client/collator/src/lib.rs | 1 + cumulus/client/collator/src/service.rs | 116 ++++++++++++------ .../slot_based/block_builder_task.rs | 94 +++++++------- .../collators/slot_based/collation_task.rs | 11 +- .../aura/src/collators/slot_based/mod.rs | 8 +- .../assets/asset-hub-rococo/src/lib.rs | 7 ++ .../assets/asset-hub-westend/src/lib.rs | 7 ++ .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 7 ++ .../bridge-hubs/bridge-hub-westend/src/lib.rs | 7 ++ .../collectives-westend/src/lib.rs | 7 ++ .../runtimes/constants/src/rococo.rs | 4 +- .../coretime/coretime-rococo/src/lib.rs | 7 ++ .../coretime/coretime-westend/src/lib.rs | 7 ++ .../glutton/glutton-westend/src/lib.rs | 7 ++ .../runtimes/people/people-rococo/src/lib.rs | 7 ++ .../runtimes/people/people-westend/src/lib.rs | 7 ++ .../runtimes/testing/penpal/src/lib.rs | 7 ++ .../testing/rococo-parachain/src/lib.rs | 7 ++ .../lib/src/fake_runtime_api/utils.rs | 4 + cumulus/zombienet/examples/small_network.toml | 2 +- polkadot/node/service/src/fake_runtime_api.rs | 4 + polkadot/runtime/rococo/src/lib.rs | 7 ++ polkadot/runtime/westend/src/lib.rs | 7 ++ .../primitives/consensus/slots/src/lib.rs | 15 +-- .../src/overhead/fake_runtime_api.rs | 4 + 25 files changed, 262 insertions(+), 99 deletions(-) diff --git a/cumulus/client/collator/src/lib.rs b/cumulus/client/collator/src/lib.rs index 9cd08bb06c3ae..bbd6521d10c39 100644 --- a/cumulus/client/collator/src/lib.rs +++ b/cumulus/client/collator/src/lib.rs @@ -22,6 +22,7 @@ use cumulus_primitives_core::{ }; use sc_client_api::BlockBackend; +use service::ServiceInterface; use sp_api::ProvideRuntimeApi; use sp_core::traits::SpawnNamed; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; diff --git a/cumulus/client/collator/src/service.rs b/cumulus/client/collator/src/service.rs index 921f1890f783e..ec22cb38edd79 100644 --- a/cumulus/client/collator/src/service.rs +++ b/cumulus/client/collator/src/service.rs @@ -22,7 +22,7 @@ use cumulus_client_network::WaitToAnnounce; use cumulus_primitives_core::{CollationInfo, CollectCollationInfo, ParachainBlockData}; use sc_client_api::BlockBackend; -use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_api::{ApiExt, ProvideRuntimeApi, StorageProof}; use sp_consensus::BlockStatus; use sp_core::traits::SpawnNamed; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT, Zero}; @@ -59,6 +59,17 @@ pub trait ServiceInterface { candidate: ParachainCandidate, ) -> Option<(Collation, ParachainBlockData)>; + /// Build a multi-block collation. + /// + /// Does the same as [`Self::build_collation`], but includes multiple blocks into one collation. + /// The given `parent_header` should be the header from the parent of the first block. + fn build_multi_block_collation( + &self, + parent_header: &Block::Header, + blocks: Vec, + proof: StorageProof, + ) -> Option<(Collation, ParachainBlockData)>; + /// Inform networking systems that the block should be announced after a signal has /// been received to indicate the block has been seconded by a relay-chain validator. /// @@ -215,39 +226,53 @@ where /// as it fetches underlying runtime API data. /// /// This also returns the unencoded parachain block data, in case that is desired. - pub fn build_collation( + fn build_multi_block_collation( &self, parent_header: &Block::Header, - block_hash: Block::Hash, - candidate: ParachainCandidate, + blocks: Vec, + proof: StorageProof, ) -> Option<(Collation, ParachainBlockData)> { - let block = candidate.block; - - let compact_proof = match candidate - .proof - .into_compact_proof::>(*parent_header.state_root()) - { - Ok(proof) => proof, - Err(e) => { - tracing::error!(target: "cumulus-collator", "Failed to compact proof: {:?}", e); - return None - }, - }; + let compact_proof = + match proof.into_compact_proof::>(*parent_header.state_root()) { + Ok(proof) => proof, + Err(e) => { + tracing::error!(target: "cumulus-collator", "Failed to compact proof: {:?}", e); + return None + }, + }; - // Create the parachain block data for the validators. - let (collation_info, api_version) = self - .fetch_collation_info(block_hash, block.header()) - .map_err(|e| { - tracing::error!( - target: LOG_TARGET, - error = ?e, - "Failed to collect collation info.", - ) - }) - .ok() - .flatten()?; + let mut api_version = 0; + let mut upward_messages = Vec::new(); + let mut horizontal_messages = Vec::new(); + let mut new_validation_code = None; + let mut processed_downward_messages = 0; + let mut hrmp_watermark = None; + let mut head_data = None; + + for block in &blocks { + // Create the parachain block data for the validators. + let (collation_info, version) = self + .fetch_collation_info(block.hash(), block.header()) + .map_err(|e| { + tracing::error!( + target: LOG_TARGET, + error = ?e, + "Failed to collect collation info.", + ) + }) + .ok() + .flatten()?; + + upward_messages.extend(collation_info.upward_messages); + horizontal_messages.extend(collation_info.horizontal_messages); + api_version = version; + new_validation_code = new_validation_code.take().or(collation_info.new_validation_code); + processed_downward_messages += collation_info.processed_downward_messages; + hrmp_watermark = Some(collation_info.hrmp_watermark); + head_data = Some(collation_info.head_data); + } - let block_data = ParachainBlockData::::new(vec![block], compact_proof); + let block_data = ParachainBlockData::::new(blocks, compact_proof); let pov = polkadot_node_primitives::maybe_compress_pov(PoV { block_data: BlockData(if api_version >= 3 { @@ -266,8 +291,7 @@ where }), }); - let upward_messages = collation_info - .upward_messages + let upward_messages = upward_messages .try_into() .map_err(|e| { tracing::error!( @@ -277,8 +301,7 @@ where ) }) .ok()?; - let horizontal_messages = collation_info - .horizontal_messages + let horizontal_messages = horizontal_messages .try_into() .map_err(|e| { tracing::error!( @@ -291,11 +314,12 @@ where let collation = Collation { upward_messages, - new_validation_code: collation_info.new_validation_code, - processed_downward_messages: collation_info.processed_downward_messages, + new_validation_code, + processed_downward_messages, horizontal_messages, - hrmp_watermark: collation_info.hrmp_watermark, - head_data: collation_info.head_data, + // If these are `None`, there was no block. + hrmp_watermark: hrmp_watermark?, + head_data: head_data?, proof_of_validity: MaybeCompressedPoV::Compressed(pov), }; @@ -328,10 +352,15 @@ where fn build_collation( &self, parent_header: &Block::Header, - block_hash: Block::Hash, + _: Block::Hash, candidate: ParachainCandidate, ) -> Option<(Collation, ParachainBlockData)> { - CollatorService::build_collation(self, parent_header, block_hash, candidate) + CollatorService::build_multi_block_collation( + self, + parent_header, + vec![candidate.block], + candidate.proof, + ) } fn announce_with_barrier( @@ -344,4 +373,13 @@ where fn announce_block(&self, block_hash: Block::Hash, data: Option>) { (self.announce_block)(block_hash, data) } + + fn build_multi_block_collation( + &self, + parent_header: &::Header, + blocks: Vec, + proof: StorageProof, + ) -> Option<(Collation, ParachainBlockData)> { + CollatorService::build_multi_block_collation(self, parent_header, blocks, proof) + } } diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 12318d10f963e..712a64b180b60 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -15,19 +15,6 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use codec::{Codec, Encode}; - -use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; -use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; -use cumulus_client_consensus_proposer::ProposerInterface; -use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::{GetCoreSelectorApi, PersistedValidationData}; -use cumulus_relay_chain_interface::RelayChainInterface; - -use polkadot_primitives::{Block as RelayBlock, Id as ParaId}; -use sp_block_builder::BlockBuilder; -use sp_trie::recorder::IgnoredNodes; - use super::CollatorMessage; use crate::{ collator::{self as collator_util}, @@ -41,18 +28,31 @@ use crate::{ }, LOG_TARGET, }; +use codec::{Codec, Encode}; +use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; +use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; +use cumulus_client_consensus_proposer::ProposerInterface; +use cumulus_primitives_aura::AuraUnincludedSegmentApi; +use cumulus_primitives_core::{GetCoreSelectorApi, PersistedValidationData}; +use cumulus_relay_chain_interface::RelayChainInterface; use futures::prelude::*; +use polkadot_primitives::{Block as RelayBlock, Id as ParaId}; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; use sc_consensus::BlockImport; -use sp_api::ProvideRuntimeApi; +use sp_api::{ProvideRuntimeApi, StorageProof}; use sp_application_crypto::AppPublic; +use sp_block_builder::BlockBuilder; use sp_blockchain::HeaderBackend; use sp_consensus_aura::AuraApi; use sp_core::crypto::Pair; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; -use std::{sync::Arc, time::Duration}; +use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT, Member}; +use sp_trie::recorder::IgnoredNodes; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; /// Parameters for [`run_block_builder`]. pub struct BuilderTaskParams< @@ -256,11 +256,11 @@ where continue } - let parent_header = parent.header; + let pov_parent_header = parent.header; // We mainly call this to inform users at genesis if there is a mismatch with the // on-chain data. - collator.collator_service().check_block_status(parent_hash, &parent_header); + collator.collator_service().check_block_status(parent_hash, &pov_parent_header); let Ok(relay_slot) = sc_consensus_babe::find_pre_digest::(relay_parent_header) @@ -309,28 +309,12 @@ where ); let validation_data = PersistedValidationData { - parent_head: parent_header.encode().into(), + parent_head: pov_parent_header.encode().into(), relay_parent_number: *relay_parent_header.number(), relay_parent_storage_root: *relay_parent_header.state_root(), max_pov_size: *max_pov_size, }; - let (parachain_inherent_data, other_inherent_data) = match collator - .create_inherent_data( - relay_parent, - &validation_data, - parent_hash, - slot_claim.timestamp(), - ) - .await - { - Err(err) => { - tracing::error!(target: crate::LOG_TARGET, ?err); - break - }, - Ok(x) => x, - }; - let validation_code_hash = match code_hash_provider.code_hash_at(parent_hash) { None => { tracing::error!(target: crate::LOG_TARGET, ?parent_hash, "Could not fetch validation code hash"); @@ -366,7 +350,7 @@ where }; // TODO: Do not use relay chain slot duration, should also be `block_time`. - let blocks_per_core = if block_rate.block_time < relay_chain_slot_duration { + let blocks_per_core = if dbg!(block_rate.block_time) < dbg!(relay_chain_slot_duration) { relay_chain_slot_duration.as_millis() / block_rate.block_time.as_millis() } else { 1 @@ -375,8 +359,29 @@ where let mut blocks = Vec::new(); let mut proofs = Vec::new(); let mut ignored_nodes = IgnoredNodes::default(); + // We redefine it as mutable here, because above this value should not change. + let mut parent_hash = parent_hash; + let mut parent_header = pov_parent_header.clone(); for _ in 0..blocks_per_core { + let expected_block_end = Instant::now() + block_rate.block_time; + + let (parachain_inherent_data, other_inherent_data) = match collator + .create_inherent_data( + relay_parent, + &validation_data, + parent_hash, + slot_claim.timestamp(), + ) + .await + { + Err(err) => { + tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to create inherent data."); + return + }, + Ok(x) => x, + }; + let Ok(Some(res)) = collator .build_block_and_import( &parent_header, @@ -392,24 +397,31 @@ where continue; }; - let new_block_hash = res.block.header().hash(); + parent_hash = res.block.header().hash(); + parent_header = res.block.header().clone(); // Announce the newly built block to our peers. - collator.collator_service().announce_block(new_block_hash, None); + collator.collator_service().announce_block(parent_hash, None); - ignored_nodes.extend(IgnoredNodes::from_storage_proof(&res.proof)); + ignored_nodes + .extend(IgnoredNodes::from_storage_proof::>(&res.proof)); ignored_nodes.extend(IgnoredNodes::from_memory_db(res.backend_transaction)); blocks.push(res.block); proofs.push(res.proof); + + if let Some(sleep) = expected_block_end.checked_duration_since(Instant::now()) { + tokio::time::sleep(sleep).await; + } } let proof = StorageProof::merge(proofs); if let Err(err) = collator_sender.unbounded_send(CollatorMessage { relay_parent, - parent_header, - parachain_candidate: Parachain, + parent_header: pov_parent_header, + blocks, + proof, validation_code_hash, core_index: *core_index, max_pov_size: validation_data.max_pov_size, diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs index 0414ebf2e1182..fa063d771ffda 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs @@ -122,20 +122,19 @@ async fn handle_collation_message collation, None => { - tracing::warn!(target: LOG_TARGET, %hash, ?number, ?core_index, "Unable to build collation."); + tracing::warn!(target: LOG_TARGET, ?core_index, "Unable to build collation."); return; }, }; @@ -171,7 +170,7 @@ async fn handle_collation_message + GetCoreSelectorApi + AuraUnincludedSegmentApi, + Client::Api: AuraApi + + GetCoreSelectorApi + + AuraUnincludedSegmentApi + + BlockBuilder, Backend: sc_client_api::Backend + 'static, RClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 52952d47dd22e..8a7acda611458 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1404,6 +1404,13 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } + + fn block_rate() -> sp_block_builder::BlockRate { + sp_block_builder::BlockRate { + block_time: core::time::Duration::from_millis(500), + block_building_time: core::time::Duration::from_millis(500), + } + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 4cf540a2f8b19..fbac4e5a95e6f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1558,6 +1558,13 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } + + fn block_rate() -> sp_block_builder::BlockRate { + sp_block_builder::BlockRate { + block_time: core::time::Duration::from_secs(6), + block_building_time: core::time::Duration::from_secs(2), + } + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 31dd66706aede..0ff7ea8d7b3aa 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -775,6 +775,13 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } + + fn block_rate() -> sp_block_builder::BlockRate { + sp_block_builder::BlockRate { + block_time: core::time::Duration::from_secs(6), + block_building_time: core::time::Duration::from_secs(2), + } + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index e9b3a16d7be5c..f8dae607f2a72 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -726,6 +726,13 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } + + fn block_rate() -> sp_block_builder::BlockRate { + sp_block_builder::BlockRate { + block_time: core::time::Duration::from_secs(6), + block_building_time: core::time::Duration::from_secs(2), + } + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 721a513b607d4..accc75af538bc 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -897,6 +897,13 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } + + fn block_rate() -> sp_block_builder::BlockRate { + sp_block_builder::BlockRate { + block_time: core::time::Duration::from_secs(6), + block_building_time: core::time::Duration::from_secs(2), + } + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/constants/src/rococo.rs b/cumulus/parachains/runtimes/constants/src/rococo.rs index be4b5c9711ccb..478c851763315 100644 --- a/cumulus/parachains/runtimes/constants/src/rococo.rs +++ b/cumulus/parachains/runtimes/constants/src/rococo.rs @@ -112,10 +112,10 @@ pub mod consensus { /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included /// into the relay chain. - pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; + pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 36; /// How many parachain blocks are processed by the relay chain per parent. Limits the /// number of blocks authored per slot. - pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; + pub const BLOCK_PROCESSING_VELOCITY: u32 = 12; /// Relay chain slot duration, in milliseconds. pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 89345846acc5d..55a0650ef2da8 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -758,6 +758,13 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } + + fn block_rate() -> sp_block_builder::BlockRate { + sp_block_builder::BlockRate { + block_time: core::time::Duration::from_secs(6), + block_building_time: core::time::Duration::from_secs(2), + } + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index c2c49067d877a..91d6375b0f432 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -759,6 +759,13 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } + + fn block_rate() -> sp_block_builder::BlockRate { + sp_block_builder::BlockRate { + block_time: core::time::Duration::from_secs(6), + block_building_time: core::time::Duration::from_secs(2), + } + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index ff1d30eac7e07..11b794fb499a6 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -392,6 +392,13 @@ impl_runtime_apis! { fn check_inherents(block: Block, data: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } + + fn block_rate() -> sp_block_builder::BlockRate { + sp_block_builder::BlockRate { + block_time: core::time::Duration::from_secs(6), + block_building_time: core::time::Duration::from_secs(2), + } + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 0e3189a1dc4d9..e2071ee6ed62f 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -712,6 +712,13 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } + + fn block_rate() -> sp_block_builder::BlockRate { + sp_block_builder::BlockRate { + block_time: core::time::Duration::from_secs(6), + block_building_time: core::time::Duration::from_secs(2), + } + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 3997707ed2a96..e1da3641b0a41 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -710,6 +710,13 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } + + fn block_rate() -> sp_block_builder::BlockRate { + sp_block_builder::BlockRate { + block_time: core::time::Duration::from_secs(6), + block_building_time: core::time::Duration::from_secs(2), + } + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 78b59562772ee..4ae1e75ab7b8d 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -989,6 +989,13 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } + + fn block_rate() -> sp_block_builder::BlockRate { + sp_block_builder::BlockRate { + block_time: core::time::Duration::from_secs(6), + block_building_time: core::time::Duration::from_secs(2), + } + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 78a3df8349bc0..4326d64a8e508 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -752,6 +752,13 @@ impl_runtime_apis! { fn check_inherents(block: Block, data: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } + + fn block_rate() -> sp_block_builder::BlockRate { + sp_block_builder::BlockRate { + block_time: core::time::Duration::from_secs(6), + block_building_time: core::time::Duration::from_secs(2), + } + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs index 858275d189a67..ac8c2be3ac448 100644 --- a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs +++ b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs @@ -99,6 +99,10 @@ macro_rules! impl_node_runtime_apis { ) -> sp_inherents::CheckInherentsResult { unimplemented!() } + + fn block_rate() -> sp_block_builder::BlockRate { + unimplemented!() + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue<$block> for $runtime { diff --git a/cumulus/zombienet/examples/small_network.toml b/cumulus/zombienet/examples/small_network.toml index 64765566471a0..3dfcbd935580b 100644 --- a/cumulus/zombienet/examples/small_network.toml +++ b/cumulus/zombienet/examples/small_network.toml @@ -22,4 +22,4 @@ name = "charlie" validator = true image = "parity/polkadot-parachain:latest" command = "polkadot-parachain" -args = ["--force-authoring"] +args = ["--force-authoring", "--authoring=slot-based"] diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index 4e31c72d334f7..e3e4973c86bfd 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -106,6 +106,10 @@ sp_api::impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { unimplemented!() } + + fn block_rate() -> sp_block_builder::BlockRate { + unimplemented!() + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index ba33cc9ecfa0d..8e30f23799d46 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1974,6 +1974,13 @@ sp_api::impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } + + fn block_rate() -> sp_block_builder::BlockRate { + sp_block_builder::BlockRate { + block_time: core::time::Duration::from_secs(6), + block_building_time: core::time::Duration::from_secs(2), + } + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 80d3fa139bbe8..43ee5eef4d1f5 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2046,6 +2046,13 @@ sp_api::impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } + + fn block_rate() -> sp_block_builder::BlockRate { + sp_block_builder::BlockRate { + block_time: core::time::Duration::from_secs(6), + block_building_time: core::time::Duration::from_secs(2), + } + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/substrate/primitives/consensus/slots/src/lib.rs b/substrate/primitives/consensus/slots/src/lib.rs index 21f6f2e95b1bd..4279184ac2116 100644 --- a/substrate/primitives/consensus/slots/src/lib.rs +++ b/substrate/primitives/consensus/slots/src/lib.rs @@ -164,20 +164,21 @@ impl SlotDuration { pub const fn from_millis(millis: u64) -> Self { Self(millis) } -} -impl SlotDuration { + /// Returns `self` as [`core::time::Duration`]. + pub const fn as_duration(&self) -> core::time::Duration { + core::time::Duration::from_millis(self.0) + } + /// Returns `self` as a `u64` representing the duration in milliseconds. pub const fn as_millis(&self) -> u64 { self.0 } } -#[cfg(feature = "std")] -impl SlotDuration { - /// Returns `self` as [`core::time::Duration`]. - pub const fn as_duration(&self) -> core::time::Duration { - core::time::Duration::from_millis(self.0) +impl From for SlotDuration { + fn from(duration: core::time::Duration) -> Self { + Self::from_millis(duration.as_millis() as u64) } } diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/fake_runtime_api.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/fake_runtime_api.rs index 653908a5a205f..9c3f9b3d61e5f 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/fake_runtime_api.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/fake_runtime_api.rs @@ -81,6 +81,10 @@ sp_api::impl_runtime_apis! { fn check_inherents(_: Block, _: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { unimplemented!() } + + fn block_rate() -> sp_block_builder::BlockRate { + unimplemented!() + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { From ce219798fc9e19cf3086b91b0dcc2758df8a9c6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 16 Apr 2025 21:57:19 +0200 Subject: [PATCH 044/312] Start writing a test --- .../zombienet-sdk-helpers/src/lib.rs | 10 +- ...lastic_scaling_multiple_blocks_per_slot.rs | 14 +++ .../tests/elastic_scaling/mod.rs | 14 +++ cumulus/zombienet/zombienet-sdk/tests/lib.rs | 17 ++++ .../tests/multiple_blocks_per_pov.rs | 97 +++++++++++++++++++ 5 files changed, 149 insertions(+), 3 deletions(-) create mode 100644 cumulus/zombienet/zombienet-sdk/tests/multiple_blocks_per_pov.rs diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 35525ba477ab1..d0cabb6abbcc6 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -128,9 +128,13 @@ pub async fn assert_finalized_para_throughput( Ok(()) } -// Helper function for asserting the throughput of parachains (total number of backed candidates in -// a window of relay chain blocks), after the first session change. -// Blocks with session changes are generally ignores. + +// Helper function for asserting the throughput of parachains. +// +// The troughput is measured as total number of backed candidates in a window of relay chain blocks, +// after the first session change. Blocks with session changes are generally ignored. +// +// `stop_after`: Number of relay chain blocks after which the recording should be stopped. pub async fn assert_para_throughput( relay_client: &OnlineClient, stop_after: u32, diff --git a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/elastic_scaling_multiple_blocks_per_slot.rs b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/elastic_scaling_multiple_blocks_per_slot.rs index 660440fb7998c..d18128da4067c 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/elastic_scaling_multiple_blocks_per_slot.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/elastic_scaling_multiple_blocks_per_slot.rs @@ -1,6 +1,20 @@ +// This file is part of Cumulus. + // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use anyhow::anyhow; use cumulus_zombienet_sdk_helpers::{ diff --git a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/mod.rs index 658c4af684eba..5cabd5c88839c 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/mod.rs @@ -1,4 +1,18 @@ +// This file is part of Cumulus. + // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + mod elastic_scaling_multiple_blocks_per_slot; diff --git a/cumulus/zombienet/zombienet-sdk/tests/lib.rs b/cumulus/zombienet/zombienet-sdk/tests/lib.rs index 55df3e6c0bd86..a0b5290cb3183 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/lib.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/lib.rs @@ -1,5 +1,22 @@ +// This file is part of Cumulus. + // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #[cfg(feature = "zombie-ci")] mod elastic_scaling; + +#[cfg(feature = "zombie-ci")] +mod multiple_blocks_per_pov; diff --git a/cumulus/zombienet/zombienet-sdk/tests/multiple_blocks_per_pov.rs b/cumulus/zombienet/zombienet-sdk/tests/multiple_blocks_per_pov.rs new file mode 100644 index 0000000000000..4da361c8f19ed --- /dev/null +++ b/cumulus/zombienet/zombienet-sdk/tests/multiple_blocks_per_pov.rs @@ -0,0 +1,97 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::anyhow; + +use cumulus_zombienet_sdk_helpers::{ + assert_finality_lag, assert_para_throughput, create_assign_core_call, +}; +use polkadot_primitives::Id as ParaId; +use serde_json::json; +use subxt::{OnlineClient, PolkadotConfig}; +use subxt_signer::sr25519::dev; +use zombienet_sdk::{NetworkConfig, NetworkConfigBuilder}; + +const PARA_ID: u32 = 2400; + +/// This test spawns a parachain network. +#[tokio::test(flavor = "multi_thread")] +async fn multiple_blocks_per_pov() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let config = build_network_config().await?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; + + let relay_node = network.get_node("validator-0")?; + let para_node_elastic = network.get_node("collator-1")?; + + let relay_client: OnlineClient = relay_node.wait_client().await?; + let alice = dev::alice(); + assert_para_throughput( + &relay_client, + 10, + [(ParaId::from(PARA_ID), 8..11)].into_iter().collect(), + ) + .await?; + assert_finality_lag(¶_node_elastic.wait_client().await?, 5).await?; + + log::info!("Test finished successfully"); + Ok(()) +} + +async fn build_network_config() -> Result { + let images = zombienet_sdk::environment::get_images_from_env(); + log::info!("Using images: {images:?}"); + NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=trace").into()]) + .with_default_resources(|resources| { + resources.with_request_cpu(2).with_request_memory("2G") + }) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "num_cores": 7, + "max_validators_per_core": 1 + } + } + } + })) + // Have to set a `with_node` outside of the loop below, so that `r` has the right + // type. + .with_node(|node| node.with_name("validator-0")); + (1..9).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) + }) + .with_parachain(|p| { + p.with_id(PARA_ID) + .with_default_command("polkadot-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("asset-hub-rococo") + .with_default_args(vec![ + ("--authoring").into(), + ("slot-based").into(), + ("-lparachain=trace,aura=debug").into(), + ]) + .with_collator(|n| n.with_name("collator-0")) + .with_collator(|n| n.with_name("collator-1")) + .with_collator(|n| n.with_name("collator-2")) + }) + .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { + Ok(val) => global_settings.with_base_dir(val), + _ => global_settings, + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + }) +} From 2701420992c2db25e86b47dcc862ae58d992d43a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 2 May 2025 23:24:53 +0200 Subject: [PATCH 045/312] Adds failing test --- .../src/validate_block/tests.rs | 61 +++++++++++++++++-- cumulus/test/runtime/src/lib.rs | 7 +++ cumulus/test/runtime/src/test_pallet.rs | 20 ++++++ polkadot/runtime/test-runtime/src/lib.rs | 7 +++ 4 files changed, 91 insertions(+), 4 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 30b92ccdcc97e..fc354307382f9 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -197,7 +197,6 @@ fn build_multiple_blocks_with_witness( let built_block = block_builder.build().unwrap(); futures::executor::block_on({ - dbg!(i); let parent_hash = *built_block.block.header.parent_hash(); let state = client.state_at(parent_hash).unwrap(); @@ -218,11 +217,10 @@ fn build_multiple_blocks_with_witness( }) .unwrap(); - ignored_nodes.extend(&IgnoredNodes::from_storage_proof::( + ignored_nodes.extend(IgnoredNodes::from_storage_proof::( &built_block.proof.clone().unwrap(), )); - ignored_nodes - .extend(&IgnoredNodes::from_memory_db(built_block.storage_changes.transaction)); + ignored_nodes.extend(IgnoredNodes::from_memory_db(built_block.storage_changes.transaction)); proof = StorageProof::merge([proof, built_block.proof.unwrap()]); parent_head = built_block.block.header.clone(); @@ -503,6 +501,61 @@ fn validate_block_works_with_child_tries() { assert_eq!(header, res_header); } +#[test] +fn state_changes_in_multiple_blocks_are_applied_in_exact_order() { + sp_tracing::try_init_simple(); + + let blocks_per_pov = 12; + let (client, genesis_head) = create_elastic_scaling_test_client(blocks_per_pov); + + // 1. Build the initial block that stores values in the map. + let TestBlockData { block: initial_block_data, .. } = build_block_with_witness( + &client, + vec![generate_extrinsic_with_pair( + &client, + Alice.into(), + TestPalletCall::store_values_in_map { max_key: 4095 }, + Some(0), // Nonce 0 for Alice + )], + genesis_head.clone(), + RelayStateSproofBuilder { current_slot: 1.into(), ..Default::default() }, + ); + + let initial_block = initial_block_data.blocks()[0].clone(); + futures::executor::block_on(client.import(BlockOrigin::Own, initial_block.clone())).unwrap(); + let initial_block_header = initial_block.header().clone(); + + // 2. Build the PoV block that removes values from the map. + let TestBlockData { block: pov_block_data, validation_data: pov_validation_data } = + build_multiple_blocks_with_witness( + &client, + initial_block_header.clone(), // Start building PoV from the initial block's header + RelayStateSproofBuilder { current_slot: 2.into(), ..Default::default() }, + blocks_per_pov, + |i| { + // Each block `i` (0-11) removes key `116 + i`. + let key_to_remove = 116 + i; + vec![generate_extrinsic_with_pair( + &client, + Bob.into(), // Use Bob to avoid nonce conflicts with Alice + TestPalletCall::remove_value_from_map { key: key_to_remove }, + Some(i), // Nonce `i` for Bob + )] + }, + ); + + // 3. Validate the PoV. + let sealed_pov_block = seal_block(pov_block_data, &client); + let final_pov_header = sealed_pov_block.blocks().last().unwrap().header().clone(); + let res_header = call_validate_block_elastic_scaling( + initial_block_header, // The parent is the head of the initial block before the PoV + sealed_pov_block, + pov_validation_data.relay_parent_storage_root, + ) + .expect("Calls `validate_block` after building the PoV"); + assert_eq!(final_pov_header, res_header); +} + #[test] #[cfg(feature = "experimental-ump-signals")] fn validate_block_handles_ump_signal() { diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index c55f1ad55d45e..cf084b3ca6def 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -547,6 +547,13 @@ impl_runtime_apis! { fn check_inherents(block: Block, data: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } + + fn block_rate() -> sp_block_builder::BlockRate { + sp_block_builder::BlockRate { + block_time: core::time::Duration::from_secs(6), + block_building_time: core::time::Duration::from_secs(2), + } + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index 67feaaf3102e0..1d96621655f6c 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -34,6 +34,10 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + cumulus_pallet_parachain_system::Config {} + /// A simple storage map for testing purposes. + #[pallet::storage] + pub type TestMap = StorageMap<_, Twox64Concat, u32, (), ValueQuery>; + #[pallet::hooks] impl Hooks> for Pallet {} @@ -85,6 +89,22 @@ pub mod pallet { Ok(()) } + + /// Stores `()` in `TestMap` for keys from 0 up to `max_key`. + #[pallet::weight(0)] + pub fn store_values_in_map(_: OriginFor, max_key: u32) -> DispatchResult { + for i in 0..=max_key { + TestMap::::insert(i, ()); + } + Ok(()) + } + + /// Removes the value associated with `key` from `TestMap`. + #[pallet::weight(0)] + pub fn remove_value_from_map(_: OriginFor, key: u32) -> DispatchResult { + TestMap::::remove(key); + Ok(()) + } } #[derive(frame_support::DefaultNoBound)] diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index efa8bdd141300..6bad902f3bdd0 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -910,6 +910,13 @@ sp_api::impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } + + fn block_rate() -> sp_block_builder::BlockRate { + sp_block_builder::BlockRate { + block_time: core::time::Duration::from_secs(6), + block_building_time: core::time::Duration::from_secs(2), + } + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { From 45b336c4541ad641299fdfca074628bc19810a92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 16 May 2025 17:13:40 +0200 Subject: [PATCH 046/312] Fix the test --- .../src/validate_block/implementation.rs | 91 ++++++++++++------- .../src/validate_block/tests.rs | 28 ++++-- .../src/validate_block/trie_cache.rs | 20 ++++ .../src/validate_block/trie_recorder.rs | 12 ++- cumulus/test/runtime/src/lib.rs | 13 +-- 5 files changed, 116 insertions(+), 48 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index bbd34d5b18d42..f92878b59a18c 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -17,15 +17,17 @@ //! The actual implementation of the validate block functionality. use super::{trie_cache, trie_recorder, MemoryOptimizedValidationParams}; -use cumulus_primitives_core::{ - relay_chain::Hash as RHash, ParachainBlockData, PersistedValidationData, -}; -use cumulus_primitives_parachain_inherent::ParachainInherentData; - use crate::{ClaimQueueOffset, CoreSelector}; use alloc::vec::Vec; use codec::{Decode, Encode}; -use cumulus_primitives_core::relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}; +use cumulus_primitives_core::{ + relay_chain::{ + vstaging::{UMPSignal, UMP_SEPARATOR}, + Hash as RHash, + }, + ParachainBlockData, PersistedValidationData, +}; +use cumulus_primitives_parachain_inherent::ParachainInherentData; use frame_support::{ traits::{ExecuteBlock, ExtrinsicCall, Get, IsSubType}, BoundedVec, @@ -36,10 +38,12 @@ use polkadot_parachain_primitives::primitives::{ use sp_core::storage::{ChildInfo, StateVersion}; use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; -use sp_runtime::traits::{Block as BlockT, ExtrinsicLike, HashingFor, Header as HeaderT}; +use sp_runtime::traits::{ + Block as BlockT, ExtrinsicLike, Hash as HashT, HashingFor, Header as HeaderT, +}; use sp_state_machine::OverlayedChanges; -use sp_trie::ProofSizeProvider; -use trie_recorder::SizeOnlyRecorderProvider; +use sp_trie::{HashDBT, ProofSizeProvider, EMPTY_PREFIX}; +use trie_recorder::{SeenNodes, SizeOnlyRecorderProvider}; type Ext<'a, Block, Backend> = sp_state_machine::Ext<'a, HashingFor, Backend>; @@ -95,6 +99,8 @@ where B::Extrinsic: ExtrinsicCall, ::Call: IsSubType>, { + // sp_runtime::runtime_logger::RuntimeLogger::init(); + let _guard = ( // Replace storage calls with our own implementations sp_io::storage::host_read.replace_implementation(host_storage_read), @@ -164,7 +170,7 @@ where let num_blocks = blocks.len(); // Create the db - let db = match proof.to_memory_db(Some(parent_header.state_root())) { + let mut db = match proof.to_memory_db(Some(parent_header.state_root())) { Ok((db, _)) => db, Err(_) => panic!("Compact proof decoding failure."), }; @@ -172,29 +178,32 @@ where core::mem::drop(proof); let cache_provider = trie_cache::CacheProvider::new(); - // We use the storage root of the `parent_head` to ensure that it is the correct root. - // This is already being done above while creating the in-memory db, but let's be paranoid!! - let backend = sp_state_machine::TrieBackendBuilder::new_with_cache( - db, - *parent_header.state_root(), - cache_provider, - ) - .build(); - - // We use the same recorder when executing all blocks. So, each node only contributes once to - // the total size of the storage proof. This recorder should only be used for `execute_block`. - let mut execute_recorder = SizeOnlyRecorderProvider::default(); - // `backend` with the `execute_recorder`. As the `execute_recorder`, this should only be used - // for `execute_block`. - let execute_backend = sp_state_machine::TrieBackendBuilder::wrap(&backend) - .with_recorder(execute_recorder.clone()) + let seen_nodes = SeenNodes::>::default(); + + for (block_index, block) in blocks.into_iter().enumerate() { + // We use the storage root of the `parent_head` to ensure that it is the correct root. + // This is already being done above while creating the in-memory db, but let's be paranoid!! + let backend = sp_state_machine::TrieBackendBuilder::new_with_cache( + &db, + *parent_header.state_root(), + &cache_provider, + ) .build(); - // We let all blocks contribute to the same overlay. Data written by a previous block will be - // directly accessible without going to the db. - let mut overlay = OverlayedChanges::default(); + // We use the same recorder when executing all blocks. So, each node only contributes once + // to the total size of the storage proof. This recorder should only be used for + // `execute_block`. + let mut execute_recorder = SizeOnlyRecorderProvider::with_seen_nodes(seen_nodes.clone()); + // `backend` with the `execute_recorder`. As the `execute_recorder`, this should only be + // used for `execute_block`. + let execute_backend = sp_state_machine::TrieBackendBuilder::wrap(&backend) + .with_recorder(execute_recorder.clone()) + .build(); + + // We let all blocks contribute to the same overlay. Data written by a previous block will + // be directly accessible without going to the db. + let mut overlay = OverlayedChanges::default(); - for (block_index, block) in blocks.into_iter().enumerate() { parent_header = block.header().clone(); let inherent_data = extract_parachain_inherent_data(&block); @@ -298,7 +307,27 @@ where ); } }, - ) + ); + + if block_index + 1 != num_blocks { + let mut changes = overlay + .drain_storage_changes( + &backend, + ::Version::get().state_version(), + ) + .unwrap(); + + drop(backend); + + changes.transaction.drain().into_iter().for_each(|(_, (v, c))| { + if c > 0 { + db.insert(EMPTY_PREFIX, &v); + + let hash = HashingFor::::hash(&v); + seen_nodes.borrow_mut().insert(hash); + } + }); + } } if !upward_message_signals.is_empty() { diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index fc354307382f9..8270b80c3c136 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -33,6 +33,7 @@ use polkadot_parachain_primitives::primitives::ValidationResult; use relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sp_api::{ApiExt, Core, ProofRecorder, ProvideRuntimeApi}; +use sp_consensus_slots::SlotDuration; use sp_core::H256; use sp_runtime::traits::{BlakeTwo256, Block as BlockT, Header as HeaderT}; use sp_trie::{proof_size_extension::ProofSizeExt, recorder::IgnoredNodes, StorageProof}; @@ -101,7 +102,7 @@ fn create_test_client() -> (Client, Header) { fn create_elastic_scaling_test_client(blocks_per_pov: u32) -> (Client, Header) { let mut builder = TestClientBuilder::new(); builder.genesis_init_mut().wasm = Some( - test_runtime::elastic_scaling_multi_block_slot::WASM_BINARY + test_runtime::elastic_scaling_500ms::WASM_BINARY .expect("You need to build the WASM binaries to run the tests!") .to_vec(), ); @@ -156,14 +157,25 @@ fn build_multiple_blocks_with_witness( num_blocks: u32, extra_extrinsics: impl Fn(u32) -> Vec, ) -> TestBlockData { - let timestamp = std::time::SystemTime::now() - .duration_since(std::time::SystemTime::UNIX_EPOCH) - .expect("Time is always after UNIX_EPOCH; qed") - .as_millis() as u64; let parent_head_root = *parent_head.state_root(); sproof_builder.para_id = test_runtime::PARACHAIN_ID.into(); sproof_builder.included_para_head = Some(HeadData(parent_head.encode())); - sproof_builder.current_slot = (timestamp / 6000).into(); + + let timestamp = if sproof_builder.current_slot == 0u64 { + let timestamp = std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .expect("Time is always after UNIX_EPOCH; qed") + .as_millis() as u64; + sproof_builder.current_slot = (timestamp / 6000).into(); + + timestamp + } else { + sproof_builder + .current_slot + .timestamp(SlotDuration::from_millis(6000)) + .unwrap() + .as_millis() + }; let validation_data = PersistedValidationData { relay_parent_number: 1, @@ -515,7 +527,7 @@ fn state_changes_in_multiple_blocks_are_applied_in_exact_order() { &client, Alice.into(), TestPalletCall::store_values_in_map { max_key: 4095 }, - Some(0), // Nonce 0 for Alice + Some(0), )], genesis_head.clone(), RelayStateSproofBuilder { current_slot: 1.into(), ..Default::default() }, @@ -539,7 +551,7 @@ fn state_changes_in_multiple_blocks_are_applied_in_exact_order() { &client, Bob.into(), // Use Bob to avoid nonce conflicts with Alice TestPalletCall::remove_value_from_map { key: key_to_remove }, - Some(i), // Nonce `i` for Bob + Some(i), )] }, ); diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs index 9590af993e9f9..e7ed215dd64e5 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs @@ -84,6 +84,26 @@ impl CacheProvider { } } +impl TrieCacheProvider for &&CacheProvider { + type Cache<'a> + = TrieCache<'a, H> + where + Self: 'a, + H: 'a; + + fn as_trie_db_cache(&self, storage_root: ::Out) -> Self::Cache<'_> { + TrieCacheProvider::::as_trie_db_cache(**self, storage_root) + } + + fn as_trie_db_mut_cache(&self) -> Self::Cache<'_> { + TrieCacheProvider::::as_trie_db_mut_cache(**self) + } + + fn merge<'a>(&'a self, other: Self::Cache<'a>, new_root: ::Out) { + TrieCacheProvider::merge(**self, other, new_root) + } +} + impl TrieCacheProvider for &CacheProvider { type Cache<'a> = TrieCache<'a, H> diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 9f84843c56265..40319f3ecac2f 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -30,6 +30,8 @@ use core::cell::{RefCell, RefMut}; use sp_trie::{NodeCodec, ProofSizeProvider, StorageProof}; use trie_db::{Hasher, RecordedForKey, TrieAccess}; +pub(crate) type SeenNodes = Rc::Out>>>; + /// A trie recorder that only keeps track of the proof size. /// /// The internal size counting logic should align @@ -91,7 +93,7 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeOnlyRecorder< #[derive(Clone)] pub(crate) struct SizeOnlyRecorderProvider { - seen_nodes: Rc>>, + seen_nodes: SeenNodes, encoded_size: Rc>, recorded_keys: Rc, RecordedForKey>>>, } @@ -106,6 +108,14 @@ impl Default for SizeOnlyRecorderProvider { } } +impl SizeOnlyRecorderProvider { + /// Use the given `seen_nodes` to populate the internal state. + #[cfg(not(feature = "std"))] + pub(crate) fn with_seen_nodes(seen_nodes: SeenNodes) -> Self { + Self { seen_nodes, ..Default::default() } + } +} + impl sp_trie::TrieRecorderProvider for SizeOnlyRecorderProvider { type Recorder<'a> = SizeOnlyRecorder<'a, H> diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index cf084b3ca6def..739a6984f0a8f 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -206,7 +206,7 @@ const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( ); parameter_types! { - pub const BlockHashCount: BlockNumber = 250; + pub const BlockHashCount: BlockNumber = 4096; pub const Version: RuntimeVersion = VERSION; pub RuntimeBlockLength: BlockLength = BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); @@ -334,25 +334,22 @@ pub struct MultipleBlocksPerPoVCoreSelector; impl SelectCore for MultipleBlocksPerPoVCoreSelector { fn selected_core() -> (CoreSelector, ClaimQueueOffset) { - let core_selector = (System::block_number().saturating_sub(1) / BlocksPerPoV::get()) - .using_encoded(|b| b[0]); - - (CoreSelector(core_selector), ClaimQueueOffset(0)) + (CoreSelector(0), ClaimQueueOffset(0)) } fn select_next_core() -> (CoreSelector, ClaimQueueOffset) { let core_selector = ((System::block_number()) / BlocksPerPoV::get()).using_encoded(|b| b[0]); - (CoreSelector(core_selector), ClaimQueueOffset(0)) + (CoreSelector(0), ClaimQueueOffset(0)) } } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< Runtime, RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, + 24, + 32, >; impl cumulus_pallet_parachain_system::Config for Runtime { type WeightInfo = (); From 0b2941f9fcc5d1d436f3cca19538239a3f28806b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 19 May 2025 20:26:39 +0200 Subject: [PATCH 047/312] Some small code cleanups --- .../src/validate_block/implementation.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index f92878b59a18c..ee175075aea43 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -315,15 +315,17 @@ where &backend, ::Version::get().state_version(), ) - .unwrap(); + .expect("Failed to get drain storage changes from the overlay."); drop(backend); - changes.transaction.drain().into_iter().for_each(|(_, (v, c))| { - if c > 0 { - db.insert(EMPTY_PREFIX, &v); + // We just forward the changes directly to our db. + changes.transaction.drain().into_iter().for_each(|(_, (value, count))| { + // We only care about inserts and not deletes. + if count > 0 { + db.insert(EMPTY_PREFIX, &value); - let hash = HashingFor::::hash(&v); + let hash = HashingFor::::hash(&value); seen_nodes.borrow_mut().insert(hash); } }); From f35762eca32d90ac9acbd649762660e12555abd7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 3 Jun 2025 11:21:35 +0200 Subject: [PATCH 048/312] Improve some stuff --- .../slot_based/block_builder_task.rs | 12 +++++----- substrate/primitives/block-builder/src/lib.rs | 22 ++++++++++++++++++- 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 712a64b180b60..1d0e998561755 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -349,11 +349,13 @@ where continue }; + let block_time = block_rate.block_time.as_regular(); + // TODO: Do not use relay chain slot duration, should also be `block_time`. - let blocks_per_core = if dbg!(block_rate.block_time) < dbg!(relay_chain_slot_duration) { - relay_chain_slot_duration.as_millis() / block_rate.block_time.as_millis() - } else { - 1 + let blocks_per_core = match block_time { + Some(bt) if bt < relay_chain_slot_duration => + relay_chain_slot_duration.as_millis() / bt.as_millis(), + _ => 1, }; let mut blocks = Vec::new(); @@ -364,7 +366,7 @@ where let mut parent_header = pov_parent_header.clone(); for _ in 0..blocks_per_core { - let expected_block_end = Instant::now() + block_rate.block_time; + let expected_block_end = Instant::now() + block_time.unwrap_or_default(); let (parachain_inherent_data, other_inherent_data) = match collator .create_inherent_data( diff --git a/substrate/primitives/block-builder/src/lib.rs b/substrate/primitives/block-builder/src/lib.rs index a7eadfeac0f47..aa0da399aea30 100644 --- a/substrate/primitives/block-builder/src/lib.rs +++ b/substrate/primitives/block-builder/src/lib.rs @@ -29,11 +29,31 @@ use sp_runtime::{traits::Block as BlockT, ApplyExtrinsicResult}; #[derive(Encode, Decode, scale_info::TypeInfo)] pub struct BlockRate { /// Time between individual blocks. - pub block_time: Duration, + pub block_time: BlockTime, /// Maximum time to spend building per block. pub block_building_time: Duration, } +#[derive(Encode, Decode, scale_info::TypeInfo)] +pub enum BlockTime { + /// Blocks are expected every X. + Regularly { + /// Time between blocks. + every: Duration, + }, + /// Blocks are coming at unexpected times. + Irregular, +} + +impl BlockTime { + pub fn as_regular(&self) -> Option { + match self { + Self::Regularly { every } => Some(*every), + Self::Irregular => None, + } + } +} + sp_api::decl_runtime_apis! { /// The `BlockBuilder` api trait that provides the required functionality for building a block. #[api_version(6)] From 36cda25a6ac8eb62b19b54f0172ee017916160a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 20 Jun 2025 17:01:09 +0200 Subject: [PATCH 049/312] Let's start --- .../consensus/aura/src/collators/lookahead.rs | 20 ++++++--------- .../consensus/aura/src/collators/mod.rs | 22 +++++----------- .../slot_based/block_builder_task.rs | 6 ++--- .../aura/src/collators/slot_based/mod.rs | 22 ---------------- .../slot_based/relay_chain_data_cache.rs | 25 ++++++------------- 5 files changed, 23 insertions(+), 72 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 5f1a9eaa579db..0c31a08a66390 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -47,7 +47,7 @@ use polkadot_primitives::{ vstaging::DEFAULT_CLAIM_QUEUE_OFFSET, CollatorPair, Id as ParaId, OccupiedCoreAssumption, }; -use crate::{collator as collator_util, export_pov_to_path}; +use crate::{collator as collator_util, collators::claim_queue_at, export_pov_to_path}; use futures::prelude::*; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; use sc_consensus::BlockImport; @@ -220,17 +220,13 @@ where while let Some(relay_parent_header) = import_notifications.next().await { let relay_parent = relay_parent_header.hash(); - let core_index = if let Some(core_index) = super::cores_scheduled_for_para( - relay_parent, - params.para_id, - &mut params.relay_client, - ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET), - ) - .await - .get(0) - { - *core_index - } else { + let Some(core_index) = claim_queue_at(relay_parent, &mut params.relay_client) + .await + .iter_claims_at_depth(0) + .find_map( + |(core, para_id)| if para_id == params.para_id { Some(core) } else { None }, + ) + else { tracing::trace!( target: crate::LOG_TARGET, ?relay_parent, diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 72fa7ad3dcdb8..eb555bdd1cccc 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -145,18 +145,13 @@ async fn scheduling_lookahead( } } -// Return all the cores assigned to the para at the provided relay parent, using the claim queue -// offset. -// Will return an empty vec if the provided offset is higher than the claim queue length (which -// corresponds to the scheduling_lookahead on the relay chain). -async fn cores_scheduled_for_para( +// Returns the claim queue at the given relay parent. +async fn claim_queue_at( relay_parent: RelayHash, - para_id: ParaId, relay_client: &impl RelayChainInterface, - claim_queue_offset: ClaimQueueOffset, -) -> Vec { +) -> ClaimQueueSnapshot { // Get `ClaimQueue` from runtime - let claim_queue: ClaimQueueSnapshot = match relay_client.claim_queue(relay_parent).await { + match relay_client.claim_queue(relay_parent).await { Ok(claim_queue) => claim_queue.into(), Err(error) => { tracing::error!( @@ -165,14 +160,9 @@ async fn cores_scheduled_for_para( ?relay_parent, "Failed to query claim queue runtime API", ); - return Vec::new() + Default::default() }, - }; - - claim_queue - .iter_claims_at_depth(claim_queue_offset.0 as usize) - .filter_map(|(core_index, core_para_id)| (core_para_id == para_id).then_some(core_index)) - .collect() + } } // Checks if we own the slot at the given block and whether there diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 1c9fe5ca04fae..17b35f741b03f 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -250,11 +250,9 @@ where let Ok(RelayChainData { relay_parent_header, max_pov_size, - scheduled_cores, + claim_queue, claimed_cores, - }) = relay_chain_data_cache - .get_mut_relay_chain_data(relay_parent, claim_queue_offset) - .await + }) = relay_chain_data_cache.get_mut_relay_chain_data(relay_parent).await else { continue; }; diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index a518cde1e96a5..a5ea4a4680a08 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -267,25 +267,3 @@ struct CollatorMessage { pub max_pov_size: u32, } -/// Fetch the `CoreSelector` and `ClaimQueueOffset` for `parent_hash`. -fn core_selector( - para_client: &Client, - parent_hash: Block::Hash, - parent_number: NumberFor, -) -> Result<(CoreSelector, ClaimQueueOffset), sp_api::ApiError> -where - Client: ProvideRuntimeApi + Send + Sync, - Client::Api: GetCoreSelectorApi, -{ - let runtime_api = para_client.runtime_api(); - - if runtime_api.has_api::>(parent_hash)? { - Ok(runtime_api.core_selector(parent_hash)?) - } else { - let next_block_number: U256 = (parent_number + One::one()).into(); - - // If the runtime API does not support the core selector API, fallback to some default - // values. - Ok((CoreSelector(next_block_number.byte(0)), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET))) - } -} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs b/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs index b3251cd886efe..c7d110b0c0530 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs @@ -17,7 +17,7 @@ //! Utility for caching [`RelayChainData`] for different relay blocks. -use crate::collators::cores_scheduled_for_para; +use crate::collators::{claim_queue_at, cores_scheduled_for_para}; use cumulus_primitives_core::ClaimQueueOffset; use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_primitives::{ @@ -31,8 +31,8 @@ use std::collections::BTreeSet; pub struct RelayChainData { /// Current relay chain parent header. pub relay_parent_header: RelayHeader, - /// The cores on which the para is scheduled at the configured claim queue offset. - pub scheduled_cores: Vec, + /// The claim queue at the relay parent. + pub claim_queue: ClaimQueueSnapshot, /// Maximum configured PoV size on the relay chain. pub max_pov_size: u32, /// The claimed cores at a relay parent. @@ -66,14 +66,13 @@ where pub async fn get_mut_relay_chain_data( &mut self, relay_parent: RelayHash, - claim_queue_offset: ClaimQueueOffset, ) -> Result<&mut RelayChainData, ()> { let insert_data = if self.cached_data.peek(&relay_parent).is_some() { tracing::trace!(target: crate::LOG_TARGET, %relay_parent, "Using cached data for relay parent."); None } else { tracing::trace!(target: crate::LOG_TARGET, %relay_parent, "Relay chain best block changed, fetching new data from relay chain."); - Some(self.update_for_relay_parent(relay_parent, claim_queue_offset).await?) + Some(self.update_for_relay_parent(relay_parent).await?) }; Ok(self @@ -85,18 +84,8 @@ where } /// Fetch fresh data from the relay chain for the given relay parent hash. - async fn update_for_relay_parent( - &self, - relay_parent: RelayHash, - claim_queue_offset: ClaimQueueOffset, - ) -> Result { - let scheduled_cores = cores_scheduled_for_para( - relay_parent, - self.para_id, - &self.relay_client, - claim_queue_offset, - ) - .await; + async fn update_for_relay_parent(&self, relay_parent: RelayHash) -> Result { + let claim_queue = claim_queue_at(relay_parent, &self.relay_client).await; let Ok(Some(relay_parent_header)) = self.relay_client.header(BlockId::Hash(relay_parent)).await @@ -120,7 +109,7 @@ where Ok(RelayChainData { relay_parent_header, - scheduled_cores, + claim_queue, max_pov_size, claimed_cores: BTreeSet::new(), }) From c6866b1803561e410b1f626421bd4bcdd671eb8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 23 Jun 2025 22:18:40 +0200 Subject: [PATCH 050/312] More work --- .../slot_based/block_builder_task.rs | 57 +++++++-- .../slot_based/relay_chain_data_cache.rs | 14 +-- .../node/subsystem-util/src/runtime/mod.rs | 118 +++++++++++++++++- polkadot/primitives/src/vstaging/mod.rs | 4 +- 4 files changed, 171 insertions(+), 22 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 17b35f741b03f..e1444cb6078bc 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -21,9 +21,13 @@ use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterfa use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; -use cumulus_primitives_core::{GetCoreSelectorApi, PersistedValidationData}; +use cumulus_primitives_core::{ + extract_relay_parent, rpsr_digest, ClaimQueueOffset, CoreSelector, CumulusDigestItem, + GetCoreSelectorApi, PersistedValidationData, +}; use cumulus_relay_chain_interface::RelayChainInterface; +use polkadot_node_subsystem_util::runtime::ClaimQueueSnapshot; use polkadot_primitives::{ Block as RelayBlock, BlockId, Hash as RelayHash, Header as RelayHeader, Id as ParaId, }; @@ -34,7 +38,6 @@ use crate::{ collators::{ check_validation_code_or_log, slot_based::{ - core_selector, relay_chain_data_cache::{RelayChainData, RelayChainDataCache}, slot_timer::{SlotInfo, SlotTimer}, }, @@ -247,12 +250,8 @@ where }, }; - let Ok(RelayChainData { - relay_parent_header, - max_pov_size, - claim_queue, - claimed_cores, - }) = relay_chain_data_cache.get_mut_relay_chain_data(relay_parent).await + let Ok(RelayChainData { relay_parent_header, max_pov_size, claim_queue }) = + relay_chain_data_cache.get_mut_relay_chain_data(relay_parent).await else { continue; }; @@ -263,6 +262,7 @@ where ?claimed_cores, "Claimed cores.", ); + if scheduled_cores.is_empty() { tracing::debug!(target: LOG_TARGET, "Parachain not scheduled, skipping slot."); continue; @@ -515,6 +515,47 @@ where Ok(RelayParentData::new_with_descendants(relay_parent, required_ancestors.into())) } +/// Determine the core for the given `para_id`. +/// +/// Takes into account the `parent` core to find the next available core. +async fn determine_core( + relay_chain_data_cache: &mut RelayChainDataCache, + relay_parent: &RelayHeader, + para_id: ParaId, + parent: &Block::Header, +) -> Result<(CoreSelector, ClaimQueueOffset), ()> { + // The digest should be always there and if not, we can just assume `(0, 0)` as offset and + // selector. + let (last_selector, last_offset) = CumulusDigestItem::find_select_core(parent.digest()) + .map_or_else(|| (None, None), |(selector, offset)| (Some(selector), Some(offset))); + + let last_relay_parent = match extract_relay_parent(parent.digest()) { + Some(last_relay_parent) => *relay_chain_data_cache + .get_mut_relay_chain_data(last_relay_parent) + .await? + .relay_parent_header + .number(), + None => rpsr_digest::extract_relay_parent_storage_root(parent.digest()).ok_or(())?.1, + }; + + let relay_parent_offset = relay_parent.number().saturating_sub(last_relay_parent); + let claim_queue = &relay_chain_data_cache + .get_mut_relay_chain_data(relay_parent.hash()) + .await? + .claim_queue; + + if relay_parent_offset > last_offset.unwrap_or_default().0 as u32 { + claim_queue.find_core(para_id, 0, 0) + } else { + claim_queue.find_core( + para_id, + last_selector.map_or(0, |s| s.0 as u32 + 1), + last_offset.map_or(0, |o| o.0 as u32 - relay_parent_offset), + ) + } + .ok_or(()) +} + #[cfg(test)] mod tests { use super::*; diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs b/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs index c7d110b0c0530..ea56a70d4fafd 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs @@ -17,9 +17,10 @@ //! Utility for caching [`RelayChainData`] for different relay blocks. -use crate::collators::{claim_queue_at, cores_scheduled_for_para}; +use crate::collators::claim_queue_at; use cumulus_primitives_core::ClaimQueueOffset; use cumulus_relay_chain_interface::RelayChainInterface; +use polkadot_node_subsystem_util::runtime::ClaimQueueSnapshot; use polkadot_primitives::{ CoreIndex, Hash as RelayHash, Header as RelayHeader, Id as ParaId, OccupiedCoreAssumption, }; @@ -35,8 +36,6 @@ pub struct RelayChainData { pub claim_queue: ClaimQueueSnapshot, /// Maximum configured PoV size on the relay chain. pub max_pov_size: u32, - /// The claimed cores at a relay parent. - pub claimed_cores: BTreeSet, } /// Simple helper to fetch relay chain data and cache it based on the current relay chain best block @@ -49,7 +48,7 @@ pub struct RelayChainDataCache { impl RelayChainDataCache where - RI: RelayChainInterface + Clone + 'static, + RI: RelayChainInterface + 'static, { pub fn new(relay_client: RI, para_id: ParaId) -> Self { Self { @@ -107,11 +106,6 @@ where }, }; - Ok(RelayChainData { - relay_parent_header, - claim_queue, - max_pov_size, - claimed_cores: BTreeSet::new(), - }) + Ok(RelayChainData { relay_parent_header, claim_queue, max_pov_size }) } } diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 317d25bef8e9a..a523a52627a73 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -33,7 +33,10 @@ use polkadot_node_subsystem_types::UnpinHandle; use polkadot_primitives::{ node_features::FeatureIndex, slashing, - vstaging::{CandidateEvent, CoreState, OccupiedCore, ScrapedOnChainVotes}, + vstaging::{ + CandidateEvent, ClaimQueueOffset, CoreSelector, CoreState, OccupiedCore, + ScrapedOnChainVotes, + }, CandidateHash, CoreIndex, EncodeAs, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, IndexedVec, NodeFeatures, SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, @@ -474,7 +477,7 @@ where } /// A snapshot of the runtime claim queue at an arbitrary relay chain block. -#[derive(Default)] +#[derive(Default, Clone)] pub struct ClaimQueueSnapshot(pub BTreeMap>); impl From>> for ClaimQueueSnapshot { @@ -513,6 +516,45 @@ impl ClaimQueueSnapshot { pub fn iter_all_claims(&self) -> impl Iterator)> + '_ { self.0.iter() } + + /// Find a core for the given `para_id`. + /// + /// `cores_claimed` is the number of cores already claimed from this snapshot for `para_id` at + /// the given `claim_queue_offset`. + pub fn find_core( + &self, + para_id: ParaId, + mut cores_claimed: u32, + claim_queue_offset: u32, + ) -> Option<(CoreSelector, ClaimQueueOffset)> { + let mut offset_to_core_count = BTreeMap::::new(); + + self.0.iter().for_each(|(_, ids)| { + ids.iter() + .enumerate() + .filter_map(|(i, id)| (*id == para_id).then(|| i)) + .for_each(|offset| { + *offset_to_core_count.entry(offset).or_default() += 1; + }); + }); + + for (claim_queue_pos, count) in offset_to_core_count { + if (claim_queue_pos as u32) < claim_queue_offset { + continue + } + + if cores_claimed < count { + return Some(( + CoreSelector(cores_claimed as u8), + ClaimQueueOffset(claim_queue_pos as u8), + )) + } + + cores_claimed -= count; + } + + None + } } /// Fetch the claim queue and wrap it into a helpful `ClaimQueueSnapshot` @@ -585,3 +627,75 @@ pub async fn fetch_validation_code_bomb_limit( res } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn find_core_works() { + let claim_queue = ClaimQueueSnapshot(BTreeMap::from_iter( + [ + ( + CoreIndex(0), + VecDeque::from_iter([ParaId::from(1), ParaId::from(2), ParaId::from(1)]), + ), + ( + CoreIndex(1), + VecDeque::from_iter([ParaId::from(1), ParaId::from(1), ParaId::from(2)]), + ), + ( + CoreIndex(2), + VecDeque::from_iter([ParaId::from(1), ParaId::from(2), ParaId::from(3)]), + ), + ( + CoreIndex(3), + VecDeque::from_iter([ParaId::from(2), ParaId::from(1), ParaId::from(3)]), + ), + ] + .into_iter(), + )); + + assert_eq!( + claim_queue.find_core(1u32.into(), 0, 0).unwrap(), + (CoreSelector(0), ClaimQueueOffset(0)) + ); + + assert_eq!( + claim_queue.find_core(1u32.into(), 1, 0).unwrap(), + (CoreSelector(1), ClaimQueueOffset(0)) + ); + + assert_eq!( + claim_queue.find_core(1u32.into(), 2, 0).unwrap(), + (CoreSelector(2), ClaimQueueOffset(0)) + ); + + assert_eq!( + claim_queue.find_core(1u32.into(), 3, 0).unwrap(), + (CoreSelector(0), ClaimQueueOffset(1)) + ); + + assert_eq!( + claim_queue.find_core(1u32.into(), 4, 0).unwrap(), + (CoreSelector(1), ClaimQueueOffset(1)) + ); + + assert_eq!( + claim_queue.find_core(1u32.into(), 5, 0).unwrap(), + (CoreSelector(0), ClaimQueueOffset(2)) + ); + + assert_eq!(claim_queue.find_core(1u32.into(), 6, 0), None); + + assert_eq!( + claim_queue.find_core(1u32.into(), 0, 1).unwrap(), + (CoreSelector(0), ClaimQueueOffset(1)) + ); + + assert_eq!( + claim_queue.find_core(1u32.into(), 2, 1).unwrap(), + (CoreSelector(0), ClaimQueueOffset(2)) + ); + } +} diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index db34fbeab6694..3c8cc3bf63cd0 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -421,11 +421,11 @@ impl From> for super::v8::CandidateReceipt { /// A strictly increasing sequence number, typically this would be the least significant byte of the /// block number. -#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Debug, Copy)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Debug, Copy, Default)] pub struct CoreSelector(pub u8); /// An offset in the relay chain claim queue. -#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Debug, Copy)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Debug, Copy, Default)] pub struct ClaimQueueOffset(pub u8); /// Approved PeerId type. PeerIds in polkadot should typically be 32 bytes long but for identity From f8ea621481653e7b25e93c706484b59a733e0bc3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 24 Jun 2025 22:20:19 +0200 Subject: [PATCH 051/312] More work --- .../consensus/aura/src/collators/lookahead.rs | 6 +- .../consensus/aura/src/collators/mod.rs | 4 +- .../slot_based/block_builder_task.rs | 171 ++++++++++-------- .../aura/src/collators/slot_based/mod.rs | 14 +- .../slot_based/relay_chain_data_cache.rs | 4 +- cumulus/zombienet/zombienet-sdk/run.sh | 4 +- .../node/subsystem-util/src/runtime/mod.rs | 45 +++-- 7 files changed, 131 insertions(+), 117 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 0c31a08a66390..8b7738ce230d5 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -37,15 +37,13 @@ use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterfa use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::{ClaimQueueOffset, CollectCollationInfo, PersistedValidationData}; +use cumulus_primitives_core::{CollectCollationInfo, PersistedValidationData}; use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_node_primitives::SubmitCollationParams; use polkadot_node_subsystem::messages::CollationGenerationMessage; use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{ - vstaging::DEFAULT_CLAIM_QUEUE_OFFSET, CollatorPair, Id as ParaId, OccupiedCoreAssumption, -}; +use polkadot_primitives::{CollatorPair, Id as ParaId, OccupiedCoreAssumption}; use crate::{collator as collator_util, collators::claim_queue_at, export_pov_to_path}; use futures::prelude::*; diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index eb555bdd1cccc..2ce38d0c07b68 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -25,12 +25,12 @@ use crate::collator::SlotClaim; use codec::Codec; use cumulus_client_consensus_common::{self as consensus_common, ParentSearchParams}; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; -use cumulus_primitives_core::{relay_chain::Header as RelayHeader, BlockT, ClaimQueueOffset}; +use cumulus_primitives_core::{relay_chain::Header as RelayHeader, BlockT}; use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_node_subsystem::messages::RuntimeApiRequest; use polkadot_node_subsystem_util::runtime::ClaimQueueSnapshot; use polkadot_primitives::{ - CoreIndex, Hash as RelayHash, Id as ParaId, OccupiedCoreAssumption, ValidationCodeHash, + Hash as RelayHash, Id as ParaId, OccupiedCoreAssumption, ValidationCodeHash, DEFAULT_SCHEDULING_LOOKAHEAD, }; use sc_consensus_aura::{standalone as aura_internal, AuraApi}; diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index e1444cb6078bc..b8dac70d336b1 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -27,9 +27,8 @@ use cumulus_primitives_core::{ }; use cumulus_relay_chain_interface::RelayChainInterface; -use polkadot_node_subsystem_util::runtime::ClaimQueueSnapshot; use polkadot_primitives::{ - Block as RelayBlock, BlockId, Hash as RelayHash, Header as RelayHeader, Id as ParaId, + Block as RelayBlock, CoreIndex, Hash as RelayHash, Header as RelayHeader, Id as ParaId, }; use super::CollatorMessage; @@ -208,7 +207,7 @@ where }; let Ok(rp_data) = offset_relay_parent_find_descendants( - &relay_client, + &mut relay_chain_data_cache, relay_best_hash, relay_parent_offset, ) @@ -226,6 +225,7 @@ where }; let relay_parent = rp_data.relay_parent().hash(); + let relay_parent_header = rp_data.relay_parent().clone(); let Some((included_header, parent)) = crate::collators::find_parent(relay_parent, para_id, &*para_backend, &relay_client) @@ -235,63 +235,67 @@ where }; let parent_hash = parent.hash; + let parent_header = parent.header; // Retrieve the core selector. - let (core_selector, claim_queue_offset) = - match core_selector(&*para_client, parent.hash, *parent.header.number()) { - Ok(core_selector) => core_selector, - Err(err) => { - tracing::trace!( - target: crate::LOG_TARGET, - "Unable to retrieve the core selector from the runtime API: {}", - err - ); - continue - }, - }; - - let Ok(RelayChainData { relay_parent_header, max_pov_size, claim_queue }) = - relay_chain_data_cache.get_mut_relay_chain_data(relay_parent).await - else { - continue; - }; + let (core_selector, claim_queue_offset, core_index) = match determine_core( + &mut relay_chain_data_cache, + &relay_parent_header, + para_id, + &parent_header, + ) + .await + { + Err(()) => { + tracing::debug!( + target: LOG_TARGET, + ?relay_parent, + "Failed to determine core" + ); - tracing::debug!( - target: LOG_TARGET, - ?relay_parent, - ?claimed_cores, - "Claimed cores.", - ); + continue + }, + Ok(Some(res)) => { + tracing::debug!( + target: LOG_TARGET, + ?relay_parent, + core_selector = ?res.0, + claim_queue_offset = ?res.1, + "Going to claim core", + ); - if scheduled_cores.is_empty() { - tracing::debug!(target: LOG_TARGET, "Parachain not scheduled, skipping slot."); - continue; - } else { - tracing::debug!( - target: LOG_TARGET, - ?relay_parent, - "Parachain is scheduled on cores: {:?}", - scheduled_cores - ); - } + res + }, + Ok(None) => { + tracing::debug!( + target: LOG_TARGET, + ?relay_parent, + "No available core" + ); - slot_timer.update_scheduling(scheduled_cores.len() as u32); + continue + }, + }; - let core_selector = core_selector.0 as usize % scheduled_cores.len(); - let Some(core_index) = scheduled_cores.get(core_selector) else { - // This cannot really happen, as we modulo the core selector with the - // scheduled_cores length and we check that the scheduled_cores is not empty. + let Ok(RelayChainData { max_pov_size, claim_queue, .. }) = + relay_chain_data_cache.get_mut_relay_chain_data(relay_parent).await + else { continue; }; - let parent_header = parent.header; + slot_timer.update_scheduling( + claim_queue + .iter_claims_at_depth(claim_queue_offset.0 as usize) + .filter(|(_, id)| para_id == *id) + .count() as u32, + ); // We mainly call this to inform users at genesis if there is a mismatch with the // on-chain data. collator.collator_service().check_block_status(parent_hash, &parent_header); let Ok(relay_slot) = - sc_consensus_babe::find_pre_digest::(relay_parent_header) + sc_consensus_babe::find_pre_digest::(&relay_parent_header) .map(|babe_pre_digest| babe_pre_digest.slot()) else { tracing::error!(target: crate::LOG_TARGET, "Relay chain does not contain babe slot. This should never happen."); @@ -315,7 +319,6 @@ where None => { tracing::debug!( target: crate::LOG_TARGET, - ?core_index, unincluded_segment_len = parent.depth, relay_parent = %relay_parent, relay_parent_num = %relay_parent_header.number(), @@ -329,15 +332,6 @@ where }, }; - if !claimed_cores.insert(*core_index) { - tracing::debug!( - target: LOG_TARGET, - "Core {:?} was already claimed at this relay chain slot", - core_index - ); - continue - } - tracing::debug!( target: crate::LOG_TARGET, unincluded_segment_len = parent.depth, @@ -348,7 +342,6 @@ where included_num = %included_header.number(), parent = %parent_hash, slot = ?para_slot.slot, - ?core_index, "Building block." ); @@ -427,7 +420,7 @@ where parent_header, parachain_candidate: candidate, validation_code_hash, - core_index: *core_index, + core_index, max_pov_size: validation_data.max_pov_size, }) { tracing::error!(target: crate::LOG_TARGET, ?err, "Unable to send block to collation task."); @@ -471,14 +464,17 @@ fn adjust_para_to_relay_parent_slot( /// The function traverses backwards from the best block until it finds the block at the specified /// offset, collecting all blocks in between to maintain the chain of ancestry. async fn offset_relay_parent_find_descendants( - relay_client: &RelayClient, + relay_chain_data_cache: &mut RelayChainDataCache, relay_best_block: RelayHash, relay_parent_offset: u32, ) -> Result where RelayClient: RelayChainInterface + Clone + 'static, { - let Ok(Some(mut relay_header)) = relay_client.header(BlockId::Hash(relay_best_block)).await + let Ok(mut relay_header) = relay_chain_data_cache + .get_mut_relay_chain_data(relay_best_block) + .await + .map(|d| d.relay_parent_header.clone()) else { tracing::error!(target: LOG_TARGET, ?relay_best_block, "Unable to fetch best relay chain block header."); return Err(()) @@ -491,20 +487,21 @@ where let mut required_ancestors: VecDeque = Default::default(); required_ancestors.push_front(relay_header.clone()); while required_ancestors.len() < relay_parent_offset as usize { - let Ok(Some(next_header)) = - relay_client.header(BlockId::Hash(*relay_header.parent_hash())).await - else { - return Err(()) - }; + let next_header = relay_chain_data_cache + .get_mut_relay_chain_data(*relay_header.parent_hash()) + .await? + .relay_parent_header + .clone(); required_ancestors.push_front(next_header.clone()); relay_header = next_header; } - let Ok(Some(relay_parent)) = - relay_client.header(BlockId::Hash(*relay_header.parent_hash())).await - else { - return Err(()) - }; + let relay_parent = relay_chain_data_cache + .get_mut_relay_chain_data(*relay_header.parent_hash()) + .await? + .relay_parent_header + .clone(); + tracing::debug!( target: LOG_TARGET, relay_parent_hash = %relay_parent.hash(), @@ -512,18 +509,19 @@ where num_descendants = required_ancestors.len(), "Relay parent descendants." ); + Ok(RelayParentData::new_with_descendants(relay_parent, required_ancestors.into())) } /// Determine the core for the given `para_id`. /// /// Takes into account the `parent` core to find the next available core. -async fn determine_core( +async fn determine_core( relay_chain_data_cache: &mut RelayChainDataCache, relay_parent: &RelayHeader, para_id: ParaId, - parent: &Block::Header, -) -> Result<(CoreSelector, ClaimQueueOffset), ()> { + parent: &Header, +) -> Result, ()> { // The digest should be always there and if not, we can just assume `(0, 0)` as offset and // selector. let (last_selector, last_offset) = CumulusDigestItem::find_select_core(parent.digest()) @@ -544,7 +542,10 @@ async fn determine_core( .await? .claim_queue; - if relay_parent_offset > last_offset.unwrap_or_default().0 as u32 { + // If the offset between the last relay parent and the current one is bigger than the last + // claim queue offset, we can start from the beginning of the claim queue. Because there was no + // core yet claimed from this claim queue. + let res = if relay_parent_offset > last_offset.unwrap_or_default().0 as u32 { claim_queue.find_core(para_id, 0, 0) } else { claim_queue.find_core( @@ -552,8 +553,9 @@ async fn determine_core( last_selector.map_or(0, |s| s.0 as u32 + 1), last_offset.map_or(0, |o| o.0 as u32 - relay_parent_offset), ) - } - .ok_or(()) + }; + + Ok(res) } #[cfg(test)] @@ -563,6 +565,7 @@ mod tests { use cumulus_relay_chain_interface::*; use futures::Stream; use polkadot_primitives::vstaging::{CandidateEvent, CommittedCandidateReceiptV2}; + use sp_runtime::generic::BlockId; use sp_version::RuntimeVersion; use std::{ collections::{BTreeMap, HashMap, VecDeque}, @@ -576,7 +579,9 @@ mod tests { let client = TestRelayClient::new(headers); - let result = offset_relay_parent_find_descendants(&client, best_hash, 0).await; + let mut cache = RelayChainDataCache::new(client, 1.into()); + + let result = offset_relay_parent_find_descendants(&mut cache, best_hash, 0).await; assert!(result.is_ok()); let data = result.unwrap(); assert_eq!(data.descendants_len(), 0); @@ -591,7 +596,9 @@ mod tests { let client = TestRelayClient::new(headers); - let result = offset_relay_parent_find_descendants(&client, best_hash, 2).await; + let mut cache = RelayChainDataCache::new(client, 1.into()); + + let result = offset_relay_parent_find_descendants(&mut cache, best_hash, 2).await; assert!(result.is_ok()); let data = result.unwrap(); assert_eq!(data.descendants_len(), 2); @@ -609,7 +616,9 @@ mod tests { let client = TestRelayClient::new(headers); - let result = offset_relay_parent_find_descendants(&client, best_hash, 5).await; + let mut cache = RelayChainDataCache::new(client, 1.into()); + + let result = offset_relay_parent_find_descendants(&mut cache, best_hash, 5).await; assert!(result.is_ok()); let data = result.unwrap(); assert_eq!(data.descendants_len(), 5); @@ -627,10 +636,12 @@ mod tests { let client = TestRelayClient::new(headers); - let result = offset_relay_parent_find_descendants(&client, best_hash, 200).await; + let mut cache = RelayChainDataCache::new(client, 1.into()); + + let result = offset_relay_parent_find_descendants(&mut cache, best_hash, 200).await; assert!(result.is_err()); - let result = offset_relay_parent_find_descendants(&client, best_hash, 101).await; + let result = offset_relay_parent_find_descendants(&mut cache, best_hash, 101).await; assert!(result.is_err()); } diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index a5ea4a4680a08..8ab159125a07b 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -74,26 +74,23 @@ use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterfa use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::{ - ClaimQueueOffset, CoreSelector, GetCoreSelectorApi, RelayParentOffsetApi, -}; +use cumulus_primitives_core::{GetCoreSelectorApi, RelayParentOffsetApi}; use cumulus_relay_chain_interface::RelayChainInterface; use futures::FutureExt; use polkadot_primitives::{ - vstaging::DEFAULT_CLAIM_QUEUE_OFFSET, CollatorPair, CoreIndex, Hash as RelayHash, Id as ParaId, - ValidationCodeHash, + CollatorPair, CoreIndex, Hash as RelayHash, Id as ParaId, ValidationCodeHash, }; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; use sc_consensus::BlockImport; use sc_utils::mpsc::tracing_unbounded; -use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; use sp_consensus_aura::AuraApi; -use sp_core::{crypto::Pair, traits::SpawnNamed, U256}; +use sp_core::{crypto::Pair, traits::SpawnNamed}; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; -use sp_runtime::traits::{Block as BlockT, Member, NumberFor, One}; +use sp_runtime::traits::{Block as BlockT, Member}; use std::{path::PathBuf, sync::Arc, time::Duration}; mod block_builder_task; @@ -266,4 +263,3 @@ struct CollatorMessage { /// Maximum pov size. Currently needed only for exporting PoV. pub max_pov_size: u32, } - diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs b/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs index ea56a70d4fafd..0c46dac71d839 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs @@ -18,14 +18,12 @@ //! Utility for caching [`RelayChainData`] for different relay blocks. use crate::collators::claim_queue_at; -use cumulus_primitives_core::ClaimQueueOffset; use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_node_subsystem_util::runtime::ClaimQueueSnapshot; use polkadot_primitives::{ - CoreIndex, Hash as RelayHash, Header as RelayHeader, Id as ParaId, OccupiedCoreAssumption, + Hash as RelayHash, Header as RelayHeader, Id as ParaId, OccupiedCoreAssumption, }; use sp_runtime::generic::BlockId; -use std::collections::BTreeSet; /// Contains relay chain data necessary for parachain block building. #[derive(Clone)] diff --git a/cumulus/zombienet/zombienet-sdk/run.sh b/cumulus/zombienet/zombienet-sdk/run.sh index 377efdc5cb3f6..40d5bafc6c248 100755 --- a/cumulus/zombienet/zombienet-sdk/run.sh +++ b/cumulus/zombienet/zombienet-sdk/run.sh @@ -1,9 +1,9 @@ #!/usr/bin/env bash set -e -cargo build --release -p cumulus-test-service --bin test-parachain -p polkadot --bin polkadot-prepare-worker --bin polkadot-execute-worker --bin polkadot +cargo build --release -p cumulus-test-service --bin test-parachain -p polkadot --bin polkadot-prepare-worker --bin polkadot-execute-worker --bin polkadot -p polkadot-parachain-bin --bin polkadot-parachain RELEASE_DIR=$(dirname "$(cargo locate-project --workspace --message-format plain)")/target/release export PATH=$RELEASE_DIR:$PATH -ZOMBIE_PROVIDER=native cargo test --release -p cumulus-zombienet-sdk-tests --features zombie-ci +ZOMBIE_PROVIDER=native cargo test --release -p cumulus-zombienet-sdk-tests --features zombie-ci "$@" diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index a523a52627a73..b78a7e7b104be 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -526,31 +526,32 @@ impl ClaimQueueSnapshot { para_id: ParaId, mut cores_claimed: u32, claim_queue_offset: u32, - ) -> Option<(CoreSelector, ClaimQueueOffset)> { - let mut offset_to_core_count = BTreeMap::::new(); + ) -> Option<(CoreSelector, ClaimQueueOffset, CoreIndex)> { + let mut offset_to_core_count = BTreeMap::>::new(); - self.0.iter().for_each(|(_, ids)| { + self.0.iter().for_each(|(core_index, ids)| { ids.iter() .enumerate() .filter_map(|(i, id)| (*id == para_id).then(|| i)) .for_each(|offset| { - *offset_to_core_count.entry(offset).or_default() += 1; + offset_to_core_count.entry(offset).or_default().push(*core_index); }); }); - for (claim_queue_pos, count) in offset_to_core_count { - if (claim_queue_pos as u32) < claim_queue_offset { + for (offset, cores) in offset_to_core_count { + if (offset as u32) < claim_queue_offset { continue } - if cores_claimed < count { + if let Some(core_index) = cores.get(cores_claimed as usize) { return Some(( CoreSelector(cores_claimed as u8), - ClaimQueueOffset(claim_queue_pos as u8), + ClaimQueueOffset(offset as u8), + *core_index, )) } - cores_claimed -= count; + cores_claimed -= cores.len() as u32; } None @@ -658,44 +659,54 @@ mod test { assert_eq!( claim_queue.find_core(1u32.into(), 0, 0).unwrap(), - (CoreSelector(0), ClaimQueueOffset(0)) + (CoreSelector(0), ClaimQueueOffset(0), CoreIndex(0)) ); assert_eq!( claim_queue.find_core(1u32.into(), 1, 0).unwrap(), - (CoreSelector(1), ClaimQueueOffset(0)) + (CoreSelector(1), ClaimQueueOffset(0), CoreIndex(1)) ); assert_eq!( claim_queue.find_core(1u32.into(), 2, 0).unwrap(), - (CoreSelector(2), ClaimQueueOffset(0)) + (CoreSelector(2), ClaimQueueOffset(0), CoreIndex(2)) ); assert_eq!( claim_queue.find_core(1u32.into(), 3, 0).unwrap(), - (CoreSelector(0), ClaimQueueOffset(1)) + (CoreSelector(0), ClaimQueueOffset(1), CoreIndex(1)) ); assert_eq!( claim_queue.find_core(1u32.into(), 4, 0).unwrap(), - (CoreSelector(1), ClaimQueueOffset(1)) + (CoreSelector(1), ClaimQueueOffset(1), CoreIndex(3)) ); assert_eq!( claim_queue.find_core(1u32.into(), 5, 0).unwrap(), - (CoreSelector(0), ClaimQueueOffset(2)) + (CoreSelector(0), ClaimQueueOffset(2), CoreIndex(0)) ); assert_eq!(claim_queue.find_core(1u32.into(), 6, 0), None); assert_eq!( claim_queue.find_core(1u32.into(), 0, 1).unwrap(), - (CoreSelector(0), ClaimQueueOffset(1)) + (CoreSelector(0), ClaimQueueOffset(1), CoreIndex(1)) ); assert_eq!( claim_queue.find_core(1u32.into(), 2, 1).unwrap(), - (CoreSelector(0), ClaimQueueOffset(2)) + (CoreSelector(0), ClaimQueueOffset(2), CoreIndex(0)) + ); + + assert_eq!( + claim_queue.find_core(3u32.into(), 0, 0).unwrap(), + (CoreSelector(0), ClaimQueueOffset(2), CoreIndex(2)) + ); + + assert_eq!( + claim_queue.find_core(3u32.into(), 1, 0).unwrap(), + (CoreSelector(1), ClaimQueueOffset(2), CoreIndex(3)) ); } } From f8ac8e44939163b077eb7a5b6bf44c9ab777cc98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 25 Jun 2025 23:07:07 +0200 Subject: [PATCH 052/312] Some fixes --- .../slot_based/block_builder_task.rs | 61 ++++++++++--------- .../collators/slot_based/collation_task.rs | 3 +- .../slot_based/relay_chain_data_cache.rs | 2 +- cumulus/pallets/parachain-system/src/lib.rs | 24 +++----- .../zombienet-sdk-helpers/src/lib.rs | 7 +-- .../node/subsystem-util/src/runtime/mod.rs | 2 +- polkadot/primitives/src/vstaging/mod.rs | 14 +++-- 7 files changed, 60 insertions(+), 53 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index b8dac70d336b1..db3414e37a437 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -17,23 +17,9 @@ use codec::{Codec, Encode}; -use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; -use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; -use cumulus_client_consensus_proposer::ProposerInterface; -use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; -use cumulus_primitives_core::{ - extract_relay_parent, rpsr_digest, ClaimQueueOffset, CoreSelector, CumulusDigestItem, - GetCoreSelectorApi, PersistedValidationData, -}; -use cumulus_relay_chain_interface::RelayChainInterface; - -use polkadot_primitives::{ - Block as RelayBlock, CoreIndex, Hash as RelayHash, Header as RelayHeader, Id as ParaId, -}; - use super::CollatorMessage; use crate::{ - collator::{self as collator_util}, + collator as collator_util, collators::{ check_validation_code_or_log, slot_based::{ @@ -44,8 +30,19 @@ use crate::{ }, LOG_TARGET, }; -use cumulus_primitives_core::RelayParentOffsetApi; +use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; +use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; +use cumulus_client_consensus_proposer::ProposerInterface; +use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; +use cumulus_primitives_core::{ + extract_relay_parent, rpsr_digest, ClaimQueueOffset, CoreSelector, CumulusDigestItem, + GetCoreSelectorApi, PersistedValidationData, RelayParentOffsetApi, +}; +use cumulus_relay_chain_interface::RelayChainInterface; use futures::prelude::*; +use polkadot_primitives::{ + Block as RelayBlock, CoreIndex, Hash as RelayHash, Header as RelayHeader, Id as ParaId, +}; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; use sc_consensus::BlockImport; use sc_consensus_aura::SlotDuration; @@ -56,7 +53,7 @@ use sp_consensus_aura::AuraApi; use sp_core::crypto::Pair; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member, Zero}; use std::{collections::VecDeque, sync::Arc, time::Duration}; /// Parameters for [`run_block_builder`]. @@ -320,11 +317,11 @@ where tracing::debug!( target: crate::LOG_TARGET, unincluded_segment_len = parent.depth, - relay_parent = %relay_parent, + relay_parent = ?relay_parent, relay_parent_num = %relay_parent_header.number(), - included_hash = %included_header_hash, + included_hash = ?included_header_hash, included_num = %included_header.number(), - parent = %parent_hash, + parent = ?parent_hash, slot = ?para_slot.slot, "Not building block." ); @@ -399,7 +396,11 @@ where .build_block_and_import( &parent_header, &slot_claim, - None, + Some(vec![dbg!(CumulusDigestItem::SelectCore { + selector: core_selector, + claim_queue_offset, + }) + .to_digest_item()]), (parachain_inherent_data, other_inherent_data), authoring_duration, allowed_pov_size, @@ -527,13 +528,17 @@ async fn determine_core( let (last_selector, last_offset) = CumulusDigestItem::find_select_core(parent.digest()) .map_or_else(|| (None, None), |(selector, offset)| (Some(selector), Some(offset))); - let last_relay_parent = match extract_relay_parent(parent.digest()) { - Some(last_relay_parent) => *relay_chain_data_cache - .get_mut_relay_chain_data(last_relay_parent) - .await? - .relay_parent_header - .number(), - None => rpsr_digest::extract_relay_parent_storage_root(parent.digest()).ok_or(())?.1, + let last_relay_parent = if parent.number().is_zero() { + 0 + } else { + match extract_relay_parent(parent.digest()) { + Some(last_relay_parent) => *relay_chain_data_cache + .get_mut_relay_chain_data(last_relay_parent) + .await? + .relay_parent_header + .number(), + None => rpsr_digest::extract_relay_parent_storage_root(parent.digest()).ok_or(())?.1, + } }; let relay_parent_offset = relay_parent.number().saturating_sub(last_relay_parent); diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs index 0414ebf2e1182..0464aaf4d19a6 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs @@ -171,7 +171,8 @@ async fn handle_collation_message::deposit_log( - cumulus_primitives_core::CumulusDigestItem::SelectCore { - selector: selected_core.0, - claim_queue_offset: selected_core.1, - } - .to_digest_item(), - ); - weight } } @@ -1515,14 +1505,20 @@ impl Pallet { /// Send the ump signals #[cfg(feature = "experimental-ump-signals")] fn send_ump_signal() { - use cumulus_primitives_core::relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}; + use cumulus_primitives_core::{ + relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}, + CumulusDigestItem, + }; UpwardMessages::::mutate(|up| { up.push(UMP_SEPARATOR); - // Send the core selector signal. - let core_selector = T::SelectCore::selected_core(); - up.push(UMPSignal::SelectCore(core_selector.0, core_selector.1).encode()); + if let Some((selector, offset)) = + CumulusDigestItem::find_select_core(&frame_system::Pallet::::digest()) + { + // Send the core selector signal. + up.push(UMPSignal::SelectCore(selector, offset).encode()); + } }); } diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 867f0d79e9b47..75bd45975a6c3 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -114,8 +114,7 @@ pub async fn assert_finalized_para_throughput( } log::info!( - "Reached {} finalized relay chain blocks that contain backed candidates. The per-parachain distribution is: {:#?}", - stop_after, + "Reached {stop_after} finalized relay chain blocks that contain backed candidates. The per-parachain distribution is: {:#?}", candidate_count ); @@ -218,8 +217,7 @@ pub async fn assert_para_throughput( } log::info!( - "Reached {} relay chain blocks that contain backed candidates. The per-parachain distribution is: {:#?}", - stop_after, + "Reached {stop_after} relay chain blocks that contain backed candidates. The per-parachain distribution is: {:#?}", candidate_count ); @@ -227,6 +225,7 @@ pub async fn assert_para_throughput( let actual = candidate_count .get(¶_id) .expect("ParaId did not have any backed candidates"); + assert!( expected_candidate_range.contains(&actual.0), "Candidate count {} not within range {expected_candidate_range:?}", diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index b78a7e7b104be..3247fc2fa3a27 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -477,7 +477,7 @@ where } /// A snapshot of the runtime claim queue at an arbitrary relay chain block. -#[derive(Default, Clone)] +#[derive(Default, Clone, Debug)] pub struct ClaimQueueSnapshot(pub BTreeMap>); impl From>> for ClaimQueueSnapshot { diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 3c8cc3bf63cd0..ae7d22e32a1f3 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -541,9 +541,9 @@ pub enum CommittedCandidateReceiptError { /// The core index in commitments doesn't match the one in descriptor #[cfg_attr( feature = "std", - error("The core index in commitments doesn't match the one in descriptor") + error("The core index in commitments (:commitments?) doesn't match the one in descriptor (:descriptor?)") )] - CoreIndexMismatch, + CoreIndexMismatch { descriptor: CoreIndex, commitments: CoreIndex }, /// The core selector or claim queue offset is invalid. #[cfg_attr(feature = "std", error("The core selector or claim queue offset is invalid"))] InvalidSelectedCore, @@ -748,7 +748,10 @@ impl CommittedCandidateReceiptV2 { .copied()?; if core_index != descriptor_core_index { - return Err(CommittedCandidateReceiptError::CoreIndexMismatch) + return Err(CommittedCandidateReceiptError::CoreIndexMismatch { + descriptor: descriptor_core_index, + commitments: core_index, + }) } Ok(()) @@ -1329,7 +1332,10 @@ mod candidate_receipt_tests { new_ccr.descriptor.set_core_index(CoreIndex(1)); assert_eq!( new_ccr.parse_ump_signals(&cq), - Err(CommittedCandidateReceiptError::CoreIndexMismatch) + Err(CommittedCandidateReceiptError::CoreIndexMismatch { + descriptor: CoreIndex(1), + commitments: CoreIndex(0), + }) ); } From 79f1aabe3f11c816e7545319fd52194c75ce3842 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 26 Jun 2025 11:51:50 +0200 Subject: [PATCH 053/312] More fixes --- Cargo.lock | 1 + cumulus/pallets/parachain-system/src/lib.rs | 4 ++-- cumulus/primitives/core/src/lib.rs | 4 ++-- cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs | 6 +++--- polkadot/parachain/Cargo.toml | 1 + polkadot/parachain/src/primitives.rs | 14 ++++++++++++-- 6 files changed, 21 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 02130dfae9b43..30ebaf851be21 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15868,6 +15868,7 @@ dependencies = [ name = "polkadot-parachain-primitives" version = "6.0.0" dependencies = [ + "array-bytes 6.2.2", "bounded-collections 0.2.3", "derive_more 0.99.17", "parity-scale-codec", diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 868867a722553..10bf6fec9fc69 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -1511,11 +1511,11 @@ impl Pallet { }; UpwardMessages::::mutate(|up| { - up.push(UMP_SEPARATOR); - if let Some((selector, offset)) = CumulusDigestItem::find_select_core(&frame_system::Pallet::::digest()) { + up.push(UMP_SEPARATOR); + // Send the core selector signal. up.push(UMPSignal::SelectCore(selector, offset).encode()); } diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 5a0c60bc18daa..26d584a1431b6 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -233,7 +233,7 @@ pub enum CumulusDigestItem { impl CumulusDigestItem { /// Encode this as a Substrate [`DigestItem`]. pub fn to_digest_item(&self) -> DigestItem { - DigestItem::Consensus(CUMULUS_CONSENSUS_ID, self.encode()) + DigestItem::PreRuntime(CUMULUS_CONSENSUS_ID, self.encode()) } /// Find [`CumulusDigestItem::SelectCore`] in the given `digest`. @@ -242,7 +242,7 @@ impl CumulusDigestItem { /// well-behaving runtimes should not produce headers with more than one. pub fn find_select_core(digest: &Digest) -> Option<(CoreSelector, ClaimQueueOffset)> { digest.convert_first(|d| match d { - DigestItem::Consensus(id, val) if id == &CUMULUS_CONSENSUS_ID => { + DigestItem::PreRuntime(id, val) if id == &CUMULUS_CONSENSUS_ID => { let Ok(CumulusDigestItem::SelectCore { selector, claim_queue_offset }) = CumulusDigestItem::decode_all(&mut &val[..]) else { diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 75bd45975a6c3..0d626f812fd00 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -115,7 +115,7 @@ pub async fn assert_finalized_para_throughput( log::info!( "Reached {stop_after} finalized relay chain blocks that contain backed candidates. The per-parachain distribution is: {:#?}", - candidate_count + candidate_count.iter().map(|(para_id, count)| format!("{para_id} has {count} backed candidates")).collect::>() ); for (para_id, expected_candidate_range) in expected_candidate_ranges { @@ -217,8 +217,8 @@ pub async fn assert_para_throughput( } log::info!( - "Reached {stop_after} relay chain blocks that contain backed candidates. The per-parachain distribution is: {:#?}", - candidate_count + "Reached {stop_after} relay chain blocks that contain backed candidates: {:#?}", + candidate_count.iter().map(|(para_id, (count, _))| format!("Parachain {para_id} has {count} backed candidates")).collect::>() ); for (para_id, expected_candidate_range) in expected_candidate_ranges { diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index 0dd103d58b25e..03b6218671aa4 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # note: special care is taken to avoid inclusion of `sp-io` externals when compiling # this crate for WASM. This is critical to avoid forcing all parachain WASM into implementing # various unnecessary Substrate-specific endpoints. +array-bytes = { workspace = true } bounded-collections = { features = ["serde"], workspace = true } codec = { features = ["derive"], workspace = true } derive_more = { workspace = true, default-features = true } diff --git a/polkadot/parachain/src/primitives.rs b/polkadot/parachain/src/primitives.rs index 73d060c1adaca..117383968b402 100644 --- a/polkadot/parachain/src/primitives.rs +++ b/polkadot/parachain/src/primitives.rs @@ -42,7 +42,6 @@ pub use polkadot_core_primitives::BlockNumber as RelayChainBlockNumber; Encode, Decode, DecodeWithMemTracking, - RuntimeDebug, derive_more::From, TypeInfo, Serialize, @@ -51,6 +50,12 @@ pub use polkadot_core_primitives::BlockNumber as RelayChainBlockNumber; #[cfg_attr(feature = "std", derive(Hash, Default))] pub struct HeadData(#[serde(with = "bytes")] pub Vec); +impl core::fmt::Debug for HeadData { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "HeadData({})", array_bytes::bytes2hex("0x", &self.0)) + } +} + impl HeadData { /// Returns the hash of this head data. pub fn hash(&self) -> Hash { @@ -68,7 +73,6 @@ impl codec::EncodeLike for alloc::vec::Vec {} Encode, Decode, DecodeWithMemTracking, - RuntimeDebug, derive_more::From, TypeInfo, Serialize, @@ -77,6 +81,12 @@ impl codec::EncodeLike for alloc::vec::Vec {} #[cfg_attr(feature = "std", derive(Hash))] pub struct ValidationCode(#[serde(with = "bytes")] pub Vec); +impl core::fmt::Debug for ValidationCode { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "ValidationCode({})", array_bytes::bytes2hex("0x", &self.0)) + } +} + impl ValidationCode { /// Get the blake2-256 hash of the validation code bytes. pub fn hash(&self) -> ValidationCodeHash { From 8c56db771c25a89a41fc94a150de8e4b6c28a9c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 26 Jun 2025 14:21:27 +0200 Subject: [PATCH 054/312] Some docs --- .../aura/src/collators/slot_based/block_builder_task.rs | 4 ++-- polkadot/primitives/src/vstaging/mod.rs | 7 ++++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index db3414e37a437..944917faac40a 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -396,10 +396,10 @@ where .build_block_and_import( &parent_header, &slot_claim, - Some(vec![dbg!(CumulusDigestItem::SelectCore { + Some(vec![CumulusDigestItem::SelectCore { selector: core_selector, claim_queue_offset, - }) + } .to_digest_item()]), (parachain_inherent_data, other_inherent_data), authoring_duration, diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index ae7d22e32a1f3..0c9cf9d8e6c00 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -543,7 +543,12 @@ pub enum CommittedCandidateReceiptError { feature = "std", error("The core index in commitments (:commitments?) doesn't match the one in descriptor (:descriptor?)") )] - CoreIndexMismatch { descriptor: CoreIndex, commitments: CoreIndex }, + CoreIndexMismatch { + /// The core index as found in the descriptor. + descriptor: CoreIndex, + /// The core index as found in the commitments. + commitments: CoreIndex + }, /// The core selector or claim queue offset is invalid. #[cfg_attr(feature = "std", error("The core selector or claim queue offset is invalid"))] InvalidSelectedCore, From 3d738ba389066ffb351153612d90d1cd95fb5d90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 26 Jun 2025 15:58:28 +0200 Subject: [PATCH 055/312] Rename `CoreSelector` to `CoreInfo` --- .../slot_based/block_builder_task.rs | 101 +++++++++--------- cumulus/pallets/parachain-system/src/lib.rs | 23 ++-- cumulus/primitives/core/src/lib.rs | 50 ++++++--- .../node/subsystem-util/src/runtime/mod.rs | 26 +++-- 4 files changed, 116 insertions(+), 84 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 944917faac40a..a7d3c8f7d16e5 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -35,7 +35,7 @@ use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockIm use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; use cumulus_primitives_core::{ - extract_relay_parent, rpsr_digest, ClaimQueueOffset, CoreSelector, CumulusDigestItem, + extract_relay_parent, rpsr_digest, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, GetCoreSelectorApi, PersistedValidationData, RelayParentOffsetApi, }; use cumulus_relay_chain_interface::RelayChainInterface; @@ -235,44 +235,45 @@ where let parent_header = parent.header; // Retrieve the core selector. - let (core_selector, claim_queue_offset, core_index) = match determine_core( - &mut relay_chain_data_cache, - &relay_parent_header, - para_id, - &parent_header, - ) - .await - { - Err(()) => { - tracing::debug!( - target: LOG_TARGET, - ?relay_parent, - "Failed to determine core" - ); - - continue - }, - Ok(Some(res)) => { - tracing::debug!( - target: LOG_TARGET, - ?relay_parent, - core_selector = ?res.0, - claim_queue_offset = ?res.1, - "Going to claim core", - ); - - res - }, - Ok(None) => { - tracing::debug!( - target: LOG_TARGET, - ?relay_parent, - "No available core" - ); - - continue - }, - }; + let (core_selector, claim_queue_offset, core_index, number_of_cores) = + match determine_core( + &mut relay_chain_data_cache, + &relay_parent_header, + para_id, + &parent_header, + ) + .await + { + Err(()) => { + tracing::debug!( + target: LOG_TARGET, + ?relay_parent, + "Failed to determine core" + ); + + continue + }, + Ok(Some(res)) => { + tracing::debug!( + target: LOG_TARGET, + ?relay_parent, + core_selector = ?res.0, + claim_queue_offset = ?res.1, + "Going to claim core", + ); + + res + }, + Ok(None) => { + tracing::debug!( + target: LOG_TARGET, + ?relay_parent, + "No available core" + ); + + continue + }, + }; let Ok(RelayChainData { max_pov_size, claim_queue, .. }) = relay_chain_data_cache.get_mut_relay_chain_data(relay_parent).await @@ -396,10 +397,11 @@ where .build_block_and_import( &parent_header, &slot_claim, - Some(vec![CumulusDigestItem::SelectCore { + Some(vec![CumulusDigestItem::CoreInfo(CoreInfo { selector: core_selector, claim_queue_offset, - } + number_of_cores: number_of_cores.into(), + }) .to_digest_item()]), (parachain_inherent_data, other_inherent_data), authoring_duration, @@ -522,11 +524,8 @@ async fn determine_core( relay_parent: &RelayHeader, para_id: ParaId, parent: &Header, -) -> Result, ()> { - // The digest should be always there and if not, we can just assume `(0, 0)` as offset and - // selector. - let (last_selector, last_offset) = CumulusDigestItem::find_select_core(parent.digest()) - .map_or_else(|| (None, None), |(selector, offset)| (Some(selector), Some(offset))); +) -> Result, ()> { + let core_info = CumulusDigestItem::find_core_info(parent.digest()); let last_relay_parent = if parent.number().is_zero() { 0 @@ -550,13 +549,17 @@ async fn determine_core( // If the offset between the last relay parent and the current one is bigger than the last // claim queue offset, we can start from the beginning of the claim queue. Because there was no // core yet claimed from this claim queue. - let res = if relay_parent_offset > last_offset.unwrap_or_default().0 as u32 { + let res = if relay_parent_offset > + core_info.as_ref().map(|ci| ci.claim_queue_offset).unwrap_or_default().0 as u32 + { claim_queue.find_core(para_id, 0, 0) } else { claim_queue.find_core( para_id, - last_selector.map_or(0, |s| s.0 as u32 + 1), - last_offset.map_or(0, |o| o.0 as u32 - relay_parent_offset), + core_info.as_ref().map_or(0, |ci| ci.selector.0 as u32 + 1), + core_info + .as_ref() + .map_or(0, |ci| ci.claim_queue_offset.0 as u32 - relay_parent_offset), ) }; diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 10bf6fec9fc69..3aa8d449b796b 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -37,8 +37,8 @@ use cumulus_primitives_core::{ self, vstaging::{ClaimQueueOffset, CoreSelector, DEFAULT_CLAIM_QUEUE_OFFSET}, }, - AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, GetChannelInfo, - InboundDownwardMessage, InboundHrmpMessage, ListChannelInfos, MessageSendError, + AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, CumulusDigestItem, + GetChannelInfo, InboundDownwardMessage, InboundHrmpMessage, ListChannelInfos, MessageSendError, OutboundHrmpMessage, ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender, XcmpMessageHandler, XcmpMessageSource, }; @@ -585,6 +585,11 @@ pub mod pallet { // Always try to read `UpgradeGoAhead` in `on_finalize`. weight += T::DbWeight::get().reads(1); + if !CumulusDigestItem::core_info_exists_at_max_once(&frame_system::Pallet::::digest()) + { + panic!("`CumulusDigestItem::CoreInfo` must exist at max once."); + } + weight } } @@ -1505,19 +1510,19 @@ impl Pallet { /// Send the ump signals #[cfg(feature = "experimental-ump-signals")] fn send_ump_signal() { - use cumulus_primitives_core::{ - relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}, - CumulusDigestItem, - }; + use cumulus_primitives_core::relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}; UpwardMessages::::mutate(|up| { - if let Some((selector, offset)) = - CumulusDigestItem::find_select_core(&frame_system::Pallet::::digest()) + if let Some(core_info) = + CumulusDigestItem::find_core_info(&frame_system::Pallet::::digest()) { up.push(UMP_SEPARATOR); // Send the core selector signal. - up.push(UMPSignal::SelectCore(selector, offset).encode()); + up.push( + UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset) + .encode(), + ); } }); } diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 26d584a1431b6..1bcf4e52da4f8 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -21,7 +21,7 @@ extern crate alloc; use alloc::vec::Vec; -use codec::{Decode, DecodeAll, DecodeWithMemTracking, Encode, MaxEncodedLen}; +use codec::{Compact, Decode, DecodeAll, DecodeWithMemTracking, Encode, MaxEncodedLen}; use polkadot_parachain_primitives::primitives::HeadData; use scale_info::TypeInfo; use sp_runtime::RuntimeDebug; @@ -214,20 +214,27 @@ pub enum ServiceQuality { /// A consensus engine ID indicating that this is a Cumulus Parachain. pub const CUMULUS_CONSENSUS_ID: ConsensusEngineId = *b"CMLS"; +/// Information about the core on the relay chain this block will be validated on. +#[derive(Clone, Debug, Decode, Encode, PartialEq)] +pub struct CoreInfo { + /// The selector that determines the actual core at `claim_queue_offset`. + pub selector: CoreSelector, + /// The claim queue offset that determines how far "into the future" the core is selected. + pub claim_queue_offset: ClaimQueueOffset, + /// The number of cores assigned to the parachain at `claim_queue_offset`. + pub number_of_cores: Compact, +} + /// Consensus header digests for Cumulus parachains. -#[derive(Clone, RuntimeDebug, Decode, Encode, PartialEq)] +#[derive(Clone, Debug, Decode, Encode, PartialEq)] pub enum CumulusDigestItem { /// A digest item indicating the relay-parent a parachain block was built against. #[codec(index = 0)] RelayParent(relay_chain::Hash), - /// A digest item indicating which core to select on the relay chain for this block. + /// A digest item providing information about the core selected on the relay chain for this + /// block. #[codec(index = 1)] - SelectCore { - /// The selector that determines the actual core. - selector: CoreSelector, - /// The claim queue offset that determines how far "into the future" the core is selected. - claim_queue_offset: ClaimQueueOffset, - }, + CoreInfo(CoreInfo), } impl CumulusDigestItem { @@ -236,24 +243,37 @@ impl CumulusDigestItem { DigestItem::PreRuntime(CUMULUS_CONSENSUS_ID, self.encode()) } - /// Find [`CumulusDigestItem::SelectCore`] in the given `digest`. + /// Find [`CumulusDigestItem::CoreInfo`] in the given `digest`. /// - /// If there are multiple valid digests, this returns the value of the first one, although - /// well-behaving runtimes should not produce headers with more than one. - pub fn find_select_core(digest: &Digest) -> Option<(CoreSelector, ClaimQueueOffset)> { + /// If there are multiple valid digests, this returns the value of the first one. + pub fn find_core_info(digest: &Digest) -> Option { digest.convert_first(|d| match d { DigestItem::PreRuntime(id, val) if id == &CUMULUS_CONSENSUS_ID => { - let Ok(CumulusDigestItem::SelectCore { selector, claim_queue_offset }) = + let Ok(CumulusDigestItem::CoreInfo(core_info)) = CumulusDigestItem::decode_all(&mut &val[..]) else { return None }; - Some((selector, claim_queue_offset)) + Some(core_info) }, _ => None, }) } + + /// Returns `true` if `Self::CoreInfo` only exists at max once in the given `digest`. + pub fn core_info_exists_at_max_once(digest: &Digest) -> bool { + digest + .logs() + .iter() + .filter(|l| match l { + DigestItem::PreRuntime(CUMULUS_CONSENSUS_ID, d) => { + matches!(Self::decode_all(&mut &d[..]), Ok(CumulusDigestItem::CoreInfo(_))) + }, + _ => false, + }) + .count() <= 1 + } } /// Extract the relay-parent from the provided header digest. Returns `None` if none were found. diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 3247fc2fa3a27..452e1510273bd 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -521,12 +521,15 @@ impl ClaimQueueSnapshot { /// /// `cores_claimed` is the number of cores already claimed from this snapshot for `para_id` at /// the given `claim_queue_offset`. + /// + /// Returns the core selector, claim queue offset, core index and the number of cores at claim + /// queue offset. pub fn find_core( &self, para_id: ParaId, mut cores_claimed: u32, claim_queue_offset: u32, - ) -> Option<(CoreSelector, ClaimQueueOffset, CoreIndex)> { + ) -> Option<(CoreSelector, ClaimQueueOffset, CoreIndex, u16)> { let mut offset_to_core_count = BTreeMap::>::new(); self.0.iter().for_each(|(core_index, ids)| { @@ -548,6 +551,7 @@ impl ClaimQueueSnapshot { CoreSelector(cores_claimed as u8), ClaimQueueOffset(offset as u8), *core_index, + cores.len() as u16, )) } @@ -659,54 +663,54 @@ mod test { assert_eq!( claim_queue.find_core(1u32.into(), 0, 0).unwrap(), - (CoreSelector(0), ClaimQueueOffset(0), CoreIndex(0)) + (CoreSelector(0), ClaimQueueOffset(0), CoreIndex(0), 3) ); assert_eq!( claim_queue.find_core(1u32.into(), 1, 0).unwrap(), - (CoreSelector(1), ClaimQueueOffset(0), CoreIndex(1)) + (CoreSelector(1), ClaimQueueOffset(0), CoreIndex(1), 3) ); assert_eq!( claim_queue.find_core(1u32.into(), 2, 0).unwrap(), - (CoreSelector(2), ClaimQueueOffset(0), CoreIndex(2)) + (CoreSelector(2), ClaimQueueOffset(0), CoreIndex(2), 3) ); assert_eq!( claim_queue.find_core(1u32.into(), 3, 0).unwrap(), - (CoreSelector(0), ClaimQueueOffset(1), CoreIndex(1)) + (CoreSelector(0), ClaimQueueOffset(1), CoreIndex(1), 2) ); assert_eq!( claim_queue.find_core(1u32.into(), 4, 0).unwrap(), - (CoreSelector(1), ClaimQueueOffset(1), CoreIndex(3)) + (CoreSelector(1), ClaimQueueOffset(1), CoreIndex(3), 2) ); assert_eq!( claim_queue.find_core(1u32.into(), 5, 0).unwrap(), - (CoreSelector(0), ClaimQueueOffset(2), CoreIndex(0)) + (CoreSelector(0), ClaimQueueOffset(2), CoreIndex(0), 1) ); assert_eq!(claim_queue.find_core(1u32.into(), 6, 0), None); assert_eq!( claim_queue.find_core(1u32.into(), 0, 1).unwrap(), - (CoreSelector(0), ClaimQueueOffset(1), CoreIndex(1)) + (CoreSelector(0), ClaimQueueOffset(1), CoreIndex(1), 2) ); assert_eq!( claim_queue.find_core(1u32.into(), 2, 1).unwrap(), - (CoreSelector(0), ClaimQueueOffset(2), CoreIndex(0)) + (CoreSelector(0), ClaimQueueOffset(2), CoreIndex(0), 1) ); assert_eq!( claim_queue.find_core(3u32.into(), 0, 0).unwrap(), - (CoreSelector(0), ClaimQueueOffset(2), CoreIndex(2)) + (CoreSelector(0), ClaimQueueOffset(2), CoreIndex(2), 2) ); assert_eq!( claim_queue.find_core(3u32.into(), 1, 0).unwrap(), - (CoreSelector(1), ClaimQueueOffset(2), CoreIndex(3)) + (CoreSelector(1), ClaimQueueOffset(2), CoreIndex(3), 2) ); } } From e9bc7c77e5429f2530d01ceb93e676287deaa77d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 26 Jun 2025 16:45:55 +0200 Subject: [PATCH 056/312] Remove `SelectCore` runtime logic --- cumulus/pallets/aura-ext/src/test.rs | 1 - cumulus/pallets/parachain-system/src/lib.rs | 53 +------------------ cumulus/pallets/parachain-system/src/mock.rs | 1 - cumulus/pallets/xcmp-queue/src/mock.rs | 1 - .../assets/asset-hub-rococo/src/lib.rs | 1 - .../assets/asset-hub-westend/src/lib.rs | 1 - .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 1 - .../bridge-hubs/bridge-hub-westend/src/lib.rs | 1 - .../collectives-westend/src/lib.rs | 1 - .../coretime/coretime-rococo/src/lib.rs | 1 - .../coretime/coretime-westend/src/lib.rs | 1 - .../glutton/glutton-westend/src/lib.rs | 1 - .../runtimes/people/people-rococo/src/lib.rs | 1 - .../runtimes/people/people-westend/src/lib.rs | 1 - .../runtimes/testing/penpal/src/lib.rs | 1 - .../testing/rococo-parachain/src/lib.rs | 1 - .../testing/yet-another-parachain/src/lib.rs | 1 - cumulus/test/runtime/src/lib.rs | 1 - .../src/guides/handling_parachain_forks.rs | 1 - docs/sdk/src/polkadot_sdk/cumulus.rs | 1 - .../runtimes/parachain/src/lib.rs | 1 - .../parachain/runtime/src/configs/mod.rs | 1 - 22 files changed, 1 insertion(+), 73 deletions(-) diff --git a/cumulus/pallets/aura-ext/src/test.rs b/cumulus/pallets/aura-ext/src/test.rs index cfb3845fdbf0c..baf49048567de 100644 --- a/cumulus/pallets/aura-ext/src/test.rs +++ b/cumulus/pallets/aura-ext/src/test.rs @@ -114,7 +114,6 @@ impl cumulus_pallet_parachain_system::Config for Test { type ReservedXcmpWeight = (); type CheckAssociatedRelayNumber = AnyRelayNumber; type ConsensusHook = ExpectParentIncluded; - type SelectCore = DefaultCoreSelector; type RelayParentOffset = ConstU32<0>; } diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 3aa8d449b796b..84571d3c83b10 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -185,50 +185,6 @@ pub mod ump_constants { pub const THRESHOLD_FACTOR: u32 = 2; } -/// Trait for selecting the next core to build the candidate for. -pub trait SelectCore { - /// Core selector information for the current block. - fn selected_core() -> (CoreSelector, ClaimQueueOffset); - /// Core selector information for the next block. - fn select_next_core() -> (CoreSelector, ClaimQueueOffset); -} - -/// The default core selection policy. -pub struct DefaultCoreSelector(PhantomData); - -impl SelectCore for DefaultCoreSelector { - fn selected_core() -> (CoreSelector, ClaimQueueOffset) { - let core_selector = frame_system::Pallet::::block_number().using_encoded(|b| b[0]); - - (CoreSelector(core_selector), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET)) - } - - fn select_next_core() -> (CoreSelector, ClaimQueueOffset) { - let core_selector = - (frame_system::Pallet::::block_number() + One::one()).using_encoded(|b| b[0]); - - (CoreSelector(core_selector), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET)) - } -} - -/// Core selection policy that builds on claim queue offset 1. -pub struct LookaheadCoreSelector(PhantomData); - -impl SelectCore for LookaheadCoreSelector { - fn selected_core() -> (CoreSelector, ClaimQueueOffset) { - let core_selector = frame_system::Pallet::::block_number().using_encoded(|b| b[0]); - - (CoreSelector(core_selector), ClaimQueueOffset(1)) - } - - fn select_next_core() -> (CoreSelector, ClaimQueueOffset) { - let core_selector = - (frame_system::Pallet::::block_number() + One::one()).using_encoded(|b| b[0]); - - (CoreSelector(core_selector), ClaimQueueOffset(1)) - } -} - #[frame_support::pallet] pub mod pallet { use super::*; @@ -291,9 +247,6 @@ pub mod pallet { /// in the relay-chain state proof. type ConsensusHook: ConsensusHook; - /// Select core. - type SelectCore: SelectCore; - /// The offset between the tip of the relay chain and the parent relay block used as parent /// when authoring a parachain block. /// @@ -585,6 +538,7 @@ pub mod pallet { // Always try to read `UpgradeGoAhead` in `on_finalize`. weight += T::DbWeight::get().reads(1); + // We need to ensure that `CoreInfo` digest exists only once. if !CumulusDigestItem::core_info_exists_at_max_once(&frame_system::Pallet::::digest()) { panic!("`CumulusDigestItem::CoreInfo` must exist at max once."); @@ -1486,11 +1440,6 @@ impl Pallet { } } - /// Returns the core selector for the next block. - pub fn core_selector() -> (CoreSelector, ClaimQueueOffset) { - T::SelectCore::select_next_core() - } - /// Set a custom head data that should be returned as result of `validate_block`. /// /// This will overwrite the head data that is returned as result of `validate_block` while diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index 76f8ebc602139..d3f2e80e7ea4f 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -95,7 +95,6 @@ impl Config for Test { type CheckAssociatedRelayNumber = AnyRelayNumber; type ConsensusHook = TestConsensusHook; type WeightInfo = (); - type SelectCore = DefaultCoreSelector; type RelayParentOffset = ConstU32<0>; } diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index 2dcb4e9b607a7..873784e3bcc5b 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -104,7 +104,6 @@ impl cumulus_pallet_parachain_system::Config for Test { type ReservedXcmpWeight = (); type CheckAssociatedRelayNumber = AnyRelayNumber; type ConsensusHook = cumulus_pallet_parachain_system::consensus_hook::ExpectParentIncluded; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; type RelayParentOffset = ConstU32<0>; } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 800607f5512f6..dcdbefba4baa5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -738,7 +738,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; type RelayParentOffset = ConstU32<0>; } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 98e1f1518f309..9a779beb84bb1 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -790,7 +790,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; type RelayParentOffset = ConstU32<0>; } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 25e0d3f51b925..c721f88166137 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -399,7 +399,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; type RelayParentOffset = ConstU32<0>; } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 2ee35714b3107..75306e22e22a5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -389,7 +389,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; type RelayParentOffset = ConstU32<0>; } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 9bf926a6f4933..6f7be3ce76e58 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -408,7 +408,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; type RelayParentOffset = ConstU32<0>; } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 3c9c368a5fbdf..1d76df5ddddb2 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -302,7 +302,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; type RelayParentOffset = ConstU32<0>; } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 7b7e80f6cdbb0..ac1ed69512b71 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -303,7 +303,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; type RelayParentOffset = ConstU32<0>; } diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 071f3c24afebf..5aa0053197132 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -189,7 +189,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; type RelayParentOffset = ConstU32<0>; } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 01f5f61e4c092..d51b8df06fd51 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -277,7 +277,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; type RelayParentOffset = ConstU32<0>; } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index a2d6e942421f4..9e82aec204bc0 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -276,7 +276,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; type RelayParentOffset = ConstU32<0>; } diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 06c09a196c1ee..6820848656a2b 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -686,7 +686,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { BLOCK_PROCESSING_VELOCITY, UNINCLUDED_SEGMENT_CAPACITY, >; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; type RelayParentOffset = ConstU32<0>; } diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 697bf1e011a6f..7ca9da1a587aa 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -307,7 +307,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; type RelayParentOffset = ConstU32<0>; } diff --git a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs index a5c94fca779e8..ebf45f69d818a 100644 --- a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs @@ -308,7 +308,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; type RelayParentOffset = ConstU32; } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index f722cf7b21647..7767638c09d7e 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -376,7 +376,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; type RelayParentOffset = ConstU32; } diff --git a/docs/sdk/src/guides/handling_parachain_forks.rs b/docs/sdk/src/guides/handling_parachain_forks.rs index 6bc4bedbf1c0a..3b3d2022a1863 100644 --- a/docs/sdk/src/guides/handling_parachain_forks.rs +++ b/docs/sdk/src/guides/handling_parachain_forks.rs @@ -74,7 +74,6 @@ //! impl cumulus_pallet_parachain_system::Config for Runtime { //! // Other config items here //! ... -//! type SelectCore = DefaultCoreSelector; //! type RelayParentOffset = ConstU32; //! } //! ``` diff --git a/docs/sdk/src/polkadot_sdk/cumulus.rs b/docs/sdk/src/polkadot_sdk/cumulus.rs index cdab5e8db4377..010f175784800 100644 --- a/docs/sdk/src/polkadot_sdk/cumulus.rs +++ b/docs/sdk/src/polkadot_sdk/cumulus.rs @@ -96,7 +96,6 @@ mod tests { >; type WeightInfo = (); type DmpQueue = frame::traits::EnqueueWithOrigin<(), sp_core::ConstU8<0>>; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; type RelayParentOffset = ConstU32<0>; } diff --git a/substrate/frame/staking-async/runtimes/parachain/src/lib.rs b/substrate/frame/staking-async/runtimes/parachain/src/lib.rs index b595910c52dfa..e5b4069d0fafd 100644 --- a/substrate/frame/staking-async/runtimes/parachain/src/lib.rs +++ b/substrate/frame/staking-async/runtimes/parachain/src/lib.rs @@ -808,7 +808,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; type RelayParentOffset = ConstU32<0>; } diff --git a/templates/parachain/runtime/src/configs/mod.rs b/templates/parachain/runtime/src/configs/mod.rs index a15a2c6d8bc4b..582677543a710 100644 --- a/templates/parachain/runtime/src/configs/mod.rs +++ b/templates/parachain/runtime/src/configs/mod.rs @@ -209,7 +209,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; - type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; type RelayParentOffset = ConstU32<0>; } From c6be8a420edf1c04de44762716a1f90f958fb00f Mon Sep 17 00:00:00 2001 From: "cmd[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 08:28:37 +0000 Subject: [PATCH 057/312] Update from github-actions[bot] running command 'fmt' --- cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs | 7 ++++++- polkadot/primitives/src/vstaging/mod.rs | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 6b8c8a0f1dbb0..161d5d80044d5 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -218,7 +218,12 @@ pub async fn assert_para_throughput( log::info!( "Reached {stop_after} relay chain blocks that contain backed candidates: {:#?}", - candidate_count.iter().map(|(para_id, (count, _))| format!("Parachain {para_id} has {count} backed candidates")).collect::>() + candidate_count + .iter() + .map(|(para_id, (count, _))| format!( + "Parachain {para_id} has {count} backed candidates" + )) + .collect::>() ); for (para_id, expected_candidate_range) in expected_candidate_ranges { diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 0c9cf9d8e6c00..4c62e583e4c83 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -547,7 +547,7 @@ pub enum CommittedCandidateReceiptError { /// The core index as found in the descriptor. descriptor: CoreIndex, /// The core index as found in the commitments. - commitments: CoreIndex + commitments: CoreIndex, }, /// The core selector or claim queue offset is invalid. #[cfg_attr(feature = "std", error("The core selector or claim queue offset is invalid"))] From 2a03e75b8c7b446b466bd7d099016f7376e28917 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 27 Jun 2025 15:33:07 +0200 Subject: [PATCH 058/312] Fixes --- .../slot_based/block_builder_task.rs | 8 +- .../aura/src/collators/slot_based/mod.rs | 8 +- cumulus/pallets/parachain-system/src/lib.rs | 16 +-- cumulus/pallets/parachain-system/src/tests.rs | 125 ++---------------- .../src/validate_block/tests.rs | 67 +++++++--- .../assets/asset-hub-rococo/src/lib.rs | 6 - .../assets/asset-hub-westend/src/lib.rs | 6 - .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 6 - .../bridge-hubs/bridge-hub-westend/src/lib.rs | 6 - .../collectives-westend/src/lib.rs | 6 - .../coretime/coretime-rococo/src/lib.rs | 6 - .../coretime/coretime-westend/src/lib.rs | 6 - .../glutton/glutton-westend/src/lib.rs | 6 - .../runtimes/people/people-rococo/src/lib.rs | 6 - .../runtimes/people/people-westend/src/lib.rs | 6 - .../runtimes/testing/penpal/src/lib.rs | 6 - .../testing/rococo-parachain/src/lib.rs | 6 - .../polkadot-omni-node/lib/src/common/mod.rs | 6 +- .../lib/src/fake_runtime_api/utils.rs | 5 - cumulus/primitives/core/src/lib.rs | 6 - cumulus/test/client/src/block_builder.rs | 77 ++++++++--- cumulus/test/runtime/src/lib.rs | 8 +- .../runtimes/parachain/src/lib.rs | 6 - 23 files changed, 131 insertions(+), 273 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index a7d3c8f7d16e5..c8926bea12151 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -36,7 +36,7 @@ use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; use cumulus_primitives_core::{ extract_relay_parent, rpsr_digest, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, - GetCoreSelectorApi, PersistedValidationData, RelayParentOffsetApi, + PersistedValidationData, RelayParentOffsetApi, }; use cumulus_relay_chain_interface::RelayChainInterface; use futures::prelude::*; @@ -124,10 +124,8 @@ where + Send + Sync + 'static, - Client::Api: AuraApi - + GetCoreSelectorApi - + RelayParentOffsetApi - + AuraUnincludedSegmentApi, + Client::Api: + AuraApi + RelayParentOffsetApi + AuraUnincludedSegmentApi, Backend: sc_client_api::Backend + 'static, RelayClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index 8ab159125a07b..1f56480365055 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -74,7 +74,7 @@ use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterfa use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::{GetCoreSelectorApi, RelayParentOffsetApi}; +use cumulus_primitives_core::RelayParentOffsetApi; use cumulus_relay_chain_interface::RelayChainInterface; use futures::FutureExt; use polkadot_primitives::{ @@ -160,10 +160,8 @@ pub fn run - + GetCoreSelectorApi - + AuraUnincludedSegmentApi - + RelayParentOffsetApi, + Client::Api: + AuraApi + AuraUnincludedSegmentApi + RelayParentOffsetApi, Backend: sc_client_api::Backend + 'static, RClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 84571d3c83b10..561fc60bf232b 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -31,16 +31,12 @@ extern crate alloc; use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec}; use codec::{Decode, DecodeLimit, Encode}; -use core::{cmp, marker::PhantomData}; +use core::cmp; use cumulus_primitives_core::{ - relay_chain::{ - self, - vstaging::{ClaimQueueOffset, CoreSelector, DEFAULT_CLAIM_QUEUE_OFFSET}, - }, - AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, CumulusDigestItem, - GetChannelInfo, InboundDownwardMessage, InboundHrmpMessage, ListChannelInfos, MessageSendError, - OutboundHrmpMessage, ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender, - XcmpMessageHandler, XcmpMessageSource, + relay_chain, AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, + CumulusDigestItem, GetChannelInfo, InboundDownwardMessage, InboundHrmpMessage, + ListChannelInfos, MessageSendError, OutboundHrmpMessage, ParaId, PersistedValidationData, + UpwardMessage, UpwardMessageSender, XcmpMessageHandler, XcmpMessageSource, }; use cumulus_primitives_parachain_inherent::{v0, MessageQueueChain, ParachainInherentData}; use frame_support::{ @@ -56,7 +52,7 @@ use polkadot_parachain_primitives::primitives::RelayChainBlockNumber; use polkadot_runtime_parachains::{FeeTracker, GetMinFeeFactor}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{Block as BlockT, BlockNumberProvider, Hash, One}, + traits::{Block as BlockT, BlockNumberProvider, Hash}, BoundedSlice, FixedU128, RuntimeDebug, }; use xcm::{latest::XcmHash, VersionedLocation, VersionedXcm, MAX_XCM_DECODE_DEPTH}; diff --git a/cumulus/pallets/parachain-system/src/tests.rs b/cumulus/pallets/parachain-system/src/tests.rs index 160f9d253e311..5fee780d8ed78 100755 --- a/cumulus/pallets/parachain-system/src/tests.rs +++ b/cumulus/pallets/parachain-system/src/tests.rs @@ -653,25 +653,7 @@ fn send_upward_message_num_per_candidate() { }, || { let v = UpwardMessages::::get(); - #[cfg(feature = "experimental-ump-signals")] - { - assert_eq!( - v, - vec![ - b"Mr F was here".to_vec(), - UMP_SEPARATOR, - UMPSignal::SelectCore( - CoreSelector(1), - ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET) - ) - .encode() - ] - ); - } - #[cfg(not(feature = "experimental-ump-signals"))] - { - assert_eq!(v, vec![b"Mr F was here".to_vec()]); - } + assert_eq!(v, vec![b"Mr F was here".to_vec()]); }, ) .add_with_post_test( @@ -682,25 +664,7 @@ fn send_upward_message_num_per_candidate() { }, || { let v = UpwardMessages::::get(); - #[cfg(feature = "experimental-ump-signals")] - { - assert_eq!( - v, - vec![ - b"message 2".to_vec(), - UMP_SEPARATOR, - UMPSignal::SelectCore( - CoreSelector(2), - ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET) - ) - .encode() - ] - ); - } - #[cfg(not(feature = "experimental-ump-signals"))] - { - assert_eq!(v, vec![b"message 2".to_vec()]); - } + assert_eq!(v, vec![b"message 2".to_vec()]); }, ); } @@ -726,24 +690,7 @@ fn send_upward_message_relay_bottleneck() { || { // The message won't be sent because there is already one message in queue. let v = UpwardMessages::::get(); - #[cfg(feature = "experimental-ump-signals")] - { - assert_eq!( - v, - vec![ - UMP_SEPARATOR, - UMPSignal::SelectCore( - CoreSelector(1), - ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET) - ) - .encode() - ] - ); - } - #[cfg(not(feature = "experimental-ump-signals"))] - { - assert!(v.is_empty()); - } + assert!(v.is_empty()); }, ) .add_with_post_test( @@ -751,25 +698,7 @@ fn send_upward_message_relay_bottleneck() { || { /* do nothing within block */ }, || { let v = UpwardMessages::::get(); - #[cfg(feature = "experimental-ump-signals")] - { - assert_eq!( - v, - vec![ - vec![0u8; 8], - UMP_SEPARATOR, - UMPSignal::SelectCore( - CoreSelector(2), - ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET) - ) - .encode() - ] - ); - } - #[cfg(not(feature = "experimental-ump-signals"))] - { - assert_eq!(v, vec![vec![0u8; 8]]); - } + assert_eq!(v, vec![vec![0u8; 8]]); }, ); } @@ -1348,25 +1277,7 @@ fn ump_fee_factor_increases_and_decreases() { || { // Factor decreases in `on_finalize`, but only if we are below the threshold let messages = UpwardMessages::::get(); - #[cfg(feature = "experimental-ump-signals")] - { - assert_eq!( - messages, - vec![ - b"Test".to_vec(), - UMP_SEPARATOR, - UMPSignal::SelectCore( - CoreSelector(1), - ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET) - ) - .encode() - ] - ); - } - #[cfg(not(feature = "experimental-ump-signals"))] - { - assert_eq!(messages, vec![b"Test".to_vec()]); - } + assert_eq!(messages, vec![b"Test".to_vec()]); assert_eq!( UpwardDeliveryFeeFactor::::get(), FixedU128::from_rational(105, 100) @@ -1380,28 +1291,10 @@ fn ump_fee_factor_increases_and_decreases() { }, || { let messages = UpwardMessages::::get(); - #[cfg(feature = "experimental-ump-signals")] - { - assert_eq!( - messages, - vec![ - b"This message will be enough to increase the fee factor".to_vec(), - UMP_SEPARATOR, - UMPSignal::SelectCore( - CoreSelector(2), - ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET) - ) - .encode() - ] - ); - } - #[cfg(not(feature = "experimental-ump-signals"))] - { - assert_eq!( - messages, - vec![b"This message will be enough to increase the fee factor".to_vec()] - ); - } + assert_eq!( + messages, + vec![b"This message will be enough to increase the fee factor".to_vec()] + ); // Now the delivery fee factor is decreased, since we are below the threshold assert_eq!(UpwardDeliveryFeeFactor::::get(), FixedU128::from_u32(1)); }, diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 028bd64566d25..9ae5ea7fa84da 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -29,9 +29,10 @@ use cumulus_test_client::{ }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use polkadot_parachain_primitives::primitives::ValidationResult; -#[cfg(feature = "experimental-ump-signals")] -use relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use sp_runtime::{ + traits::{Block as BlockT, Header as HeaderT}, + DigestItem, +}; use std::{env, process::Command}; @@ -125,6 +126,7 @@ fn build_block_with_witness( extra_extrinsics: Vec, parent_head: Header, mut sproof_builder: RelayStateSproofBuilder, + pre_digests: Vec, ) -> TestBlockData { sproof_builder.para_id = test_runtime::PARACHAIN_ID.into(); sproof_builder.included_para_head = Some(HeadData(parent_head.encode())); @@ -138,7 +140,7 @@ fn build_block_with_witness( let cumulus_test_client::BlockBuilderAndSupportData { mut block_builder, persisted_validation_data, - } = client.init_block_builder(Some(validation_data), sproof_builder); + } = client.init_block_builder_with_pre_digests(Some(validation_data), sproof_builder, pre_digests); extra_extrinsics.into_iter().for_each(|e| block_builder.push(e).unwrap()); @@ -205,8 +207,13 @@ fn validate_block_works() { sp_tracing::try_init_simple(); let (client, parent_head) = create_test_client(); - let TestBlockData { block, validation_data } = - build_block_with_witness(&client, Vec::new(), parent_head.clone(), Default::default()); + let TestBlockData { block, validation_data } = build_block_with_witness( + &client, + Vec::new(), + parent_head.clone(), + Default::default(), + Default::default(), + ); let block = seal_parachain_block_data(block, &client); let header = block.blocks()[0].header().clone(); @@ -252,6 +259,7 @@ fn validate_block_with_extra_extrinsics() { extra_extrinsics, parent_head.clone(), Default::default(), + Default::default(), ); let block = seal_parachain_block_data(block, &client); let header = block.blocks()[0].header().clone(); @@ -286,6 +294,7 @@ fn validate_block_returns_custom_head_data() { extra_extrinsics, parent_head.clone(), Default::default(), + Default::default(), ); let header = block.blocks()[0].header().clone(); assert_ne!(expected_header, header.encode()); @@ -309,8 +318,13 @@ fn validate_block_invalid_parent_hash() { if env::var("RUN_TEST").is_ok() { let (client, parent_head) = create_test_client(); - let TestBlockData { mut block, validation_data, .. } = - build_block_with_witness(&client, Vec::new(), parent_head.clone(), Default::default()); + let TestBlockData { mut block, validation_data, .. } = build_block_with_witness( + &client, + Vec::new(), + parent_head.clone(), + Default::default(), + Default::default(), + ); block.blocks_mut()[0].header.set_parent_hash(Hash::from_low_u64_be(1)); call_validate_block(parent_head, block, validation_data.relay_parent_storage_root) @@ -334,8 +348,13 @@ fn validate_block_fails_on_invalid_validation_data() { if env::var("RUN_TEST").is_ok() { let (client, parent_head) = create_test_client(); - let TestBlockData { block, .. } = - build_block_with_witness(&client, Vec::new(), parent_head.clone(), Default::default()); + let TestBlockData { block, .. } = build_block_with_witness( + &client, + Vec::new(), + parent_head.clone(), + Default::default(), + Default::default(), + ); call_validate_block(parent_head, block, Hash::random()).unwrap_err(); } else { @@ -358,8 +377,13 @@ fn check_inherents_are_unsigned_and_before_all_other_extrinsics() { if env::var("RUN_TEST").is_ok() { let (client, parent_head) = create_test_client(); - let TestBlockData { mut block, validation_data, .. } = - build_block_with_witness(&client, Vec::new(), parent_head.clone(), Default::default()); + let TestBlockData { mut block, validation_data, .. } = build_block_with_witness( + &client, + Vec::new(), + parent_head.clone(), + Default::default(), + Default::default(), + ); block.blocks_mut()[0].extrinsics.insert(0, transfer(&client, Alice, Bob, 69)); @@ -428,6 +452,7 @@ fn validate_block_works_with_child_tries() { vec![generate_extrinsic(&client, Charlie, TestPalletCall::read_and_write_child_tries {})], parent_head.clone(), Default::default(), + Default::default(), ); let block = block.blocks()[0].clone(); @@ -441,6 +466,7 @@ fn validate_block_works_with_child_tries() { vec![generate_extrinsic(&client, Alice, TestPalletCall::read_and_write_child_tries {})], parent_head.clone(), Default::default(), + Default::default(), ); let block = seal_parachain_block_data(block, &client); @@ -454,6 +480,11 @@ fn validate_block_works_with_child_tries() { #[test] #[cfg(feature = "experimental-ump-signals")] fn validate_block_handles_ump_signal() { + use cumulus_primitives_core::{ + relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}, + ClaimQueueOffset, CoreInfo, CoreSelector, + }; + sp_tracing::try_init_simple(); let (client, parent_head) = create_elastic_scaling_test_client(); @@ -465,6 +496,12 @@ fn validate_block_handles_ump_signal() { extra_extrinsics, parent_head.clone(), Default::default(), + vec![CumulusDigestItem::CoreInfo(CoreInfo { + selector: CoreSelector(0), + claim_queue_offset: ClaimQueueOffset(0), + number_of_cores: 1.into(), + }) + .to_digest_item()], ); let block = seal_parachain_block_data(block, &client); @@ -480,10 +517,6 @@ fn validate_block_handles_ump_signal() { assert_eq!( upward_messages, - vec![ - UMP_SEPARATOR, - UMPSignal::SelectCore(CoreSelector(1), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET)) - .encode() - ] + vec![UMP_SEPARATOR, UMPSignal::SelectCore(CoreSelector(0), ClaimQueueOffset(0)).encode()] ); } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 1c0edba5174b8..874d7599e42e0 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1614,12 +1614,6 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { - fn core_selector() -> (CoreSelector, ClaimQueueOffset) { - ParachainSystem::core_selector() - } - } - #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 9a779beb84bb1..f407339aef02f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1833,12 +1833,6 @@ pallet_revive::impl_runtime_apis_plus_revive!( } } - impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { - fn core_selector() -> (CoreSelector, ClaimQueueOffset) { - ParachainSystem::core_selector() - } - } - #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 37f6467dab037..cc2bce15acc07 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -918,12 +918,6 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { - fn core_selector() -> (CoreSelector, ClaimQueueOffset) { - ParachainSystem::core_selector() - } - } - impl bp_westend::WestendFinalityApi for Runtime { fn best_finalized() -> Option> { BridgeWestendGrandpa::best_finalized() diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 4446c9319b454..d47cf24ae49ac 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -893,12 +893,6 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { - fn core_selector() -> (CoreSelector, ClaimQueueOffset) { - ParachainSystem::core_selector() - } - } - impl bp_rococo::RococoFinalityApi for Runtime { fn best_finalized() -> Option> { BridgeRococoGrandpa::best_finalized() diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 62d877532a6d9..f9ced17542f9e 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -1061,12 +1061,6 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { - fn core_selector() -> (CoreSelector, ClaimQueueOffset) { - ParachainSystem::core_selector() - } - } - #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 045e18d108485..b72aab8d9619e 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -904,12 +904,6 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { - fn core_selector() -> (CoreSelector, ClaimQueueOffset) { - ParachainSystem::core_selector() - } - } - #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 678326b0c8d43..77ccf93134dbf 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -929,12 +929,6 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { - fn core_selector() -> (CoreSelector, ClaimQueueOffset) { - ParachainSystem::core_selector() - } - } - #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 6f4f690c67043..2a3fde588ce94 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -435,12 +435,6 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { - fn core_selector() -> (CoreSelector, ClaimQueueOffset) { - ParachainSystem::core_selector() - } - } - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { fn account_nonce(account: AccountId) -> Nonce { System::account_nonce(account) diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 428875588fd36..64bbd438d7e18 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -852,12 +852,6 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { - fn core_selector() -> (CoreSelector, ClaimQueueOffset) { - ParachainSystem::core_selector() - } - } - #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 366dd9878a862..58db70a5b9c3d 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -874,12 +874,6 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { - fn core_selector() -> (CoreSelector, ClaimQueueOffset) { - ParachainSystem::core_selector() - } - } - #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 1f3905481746b..4d064991cbb7a 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -1079,12 +1079,6 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { - fn core_selector() -> (CoreSelector, ClaimQueueOffset) { - ParachainSystem::core_selector() - } - } - impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetLocationId(xcm_config::RelayLocation::get())]; diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index e62492756e800..b629bbb4d4ee4 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -880,12 +880,6 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { - fn core_selector() -> (CoreSelector, ClaimQueueOffset) { - ParachainSystem::core_selector() - } - } - impl cumulus_primitives_core::GetParachainInfo for Runtime { fn parachain_id() -> ParaId { ParachainInfo::parachain_id() diff --git a/cumulus/polkadot-omni-node/lib/src/common/mod.rs b/cumulus/polkadot-omni-node/lib/src/common/mod.rs index a1020f9f64f4a..70847263c0117 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/mod.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/mod.rs @@ -28,9 +28,7 @@ pub mod types; use crate::cli::AuthoringPolicy; -use cumulus_primitives_core::{ - CollectCollationInfo, GetCoreSelectorApi, GetParachainInfo, RelayParentOffsetApi, -}; +use cumulus_primitives_core::{CollectCollationInfo, GetParachainInfo, RelayParentOffsetApi}; use sc_client_db::DbHash; use sc_offchain::OffchainWorkerApi; use serde::de::DeserializeOwned; @@ -72,7 +70,6 @@ pub trait NodeRuntimeApi: + TaggedTransactionQueue + OffchainWorkerApi + CollectCollationInfo - + GetCoreSelectorApi + GetParachainInfo + RelayParentOffsetApi + Sized @@ -86,7 +83,6 @@ impl NodeRuntimeApi for T where + BlockBuilder + TaggedTransactionQueue + OffchainWorkerApi - + GetCoreSelectorApi + RelayParentOffsetApi + CollectCollationInfo + GetParachainInfo diff --git a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs index f22cbb59318d0..6e6591f3190ca 100644 --- a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs +++ b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs @@ -169,11 +169,6 @@ macro_rules! impl_node_runtime_apis { } } - impl cumulus_primitives_core::GetCoreSelectorApi<$block> for $runtime { - fn core_selector() -> (CoreSelector, ClaimQueueOffset) { - unimplemented!() - } - } impl cumulus_primitives_core::GetParachainInfo<$block> for $runtime { fn parachain_id() -> ParaId { unimplemented!() diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index aa53abc0c3f3c..6df4232ba17ce 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -408,12 +408,6 @@ sp_api::decl_runtime_apis! { fn collect_collation_info(header: &Block::Header) -> CollationInfo; } - /// Runtime api used to select the core for which the next block will be built. - pub trait GetCoreSelectorApi { - /// Retrieve core selector and claim queue offset for the next block. - fn core_selector() -> (CoreSelector, ClaimQueueOffset); - } - /// Runtime api used to access general info about a parachain runtime. pub trait GetParachainInfo { /// Retrieve the parachain id used for runtime. diff --git a/cumulus/test/client/src/block_builder.rs b/cumulus/test/client/src/block_builder.rs index ee400859f75c8..0764d860d9626 100644 --- a/cumulus/test/client/src/block_builder.rs +++ b/cumulus/test/client/src/block_builder.rs @@ -61,6 +61,17 @@ pub trait InitBlockBuilder { relay_sproof_builder: RelayStateSproofBuilder, ) -> BlockBuilderAndSupportData; + /// Init a specific block builder using the given pre-digests. + /// + /// Same as [`InitBlockBuilder::init_block_builder`] besides that it takes vector of + /// [`DigestItem`]'s that are passed as pre-digest to the block builder. + fn init_block_builder_with_pre_digests( + &self, + validation_data: Option>, + relay_sproof_builder: RelayStateSproofBuilder, + pre_digests: Vec, + ) -> BlockBuilderAndSupportData; + /// Init a specific block builder that works for the test runtime. /// /// Same as [`InitBlockBuilder::init_block_builder`] besides that it takes a @@ -80,8 +91,27 @@ fn init_block_builder( at: Hash, validation_data: Option>, mut relay_sproof_builder: RelayStateSproofBuilder, - timestamp: u64, + timestamp: Option, + extra_pre_digests: Option>, ) -> BlockBuilderAndSupportData<'_> { + let timestamp = timestamp.unwrap_or_else(|| { + let last_timestamp = + client.runtime_api().get_last_timestamp(at).expect("Get last timestamp"); + + if last_timestamp == 0 { + if relay_sproof_builder.current_slot != 0u64 { + *relay_sproof_builder.current_slot * 6_000 + } else { + std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .expect("Time is always after UNIX_EPOCH; qed") + .as_millis() as u64 + } + } else { + last_timestamp + client.runtime_api().slot_duration(at).unwrap().as_millis() + } + }); + let slot: Slot = (timestamp / client.runtime_api().slot_duration(at).unwrap().as_millis()).into(); @@ -90,7 +120,14 @@ fn init_block_builder( } let aura_pre_digest = Digest { - logs: vec![DigestItem::PreRuntime(sp_consensus_aura::AURA_ENGINE_ID, slot.encode())], + logs: extra_pre_digests + .unwrap_or_default() + .into_iter() + .chain(std::iter::once(DigestItem::PreRuntime( + sp_consensus_aura::AURA_ENGINE_ID, + slot.encode(), + ))) + .collect::>(), }; let mut block_builder = BlockBuilderBuilder::new(client) @@ -147,28 +184,30 @@ impl InitBlockBuilder for Client { self.init_block_builder_at(chain_info.best_hash, validation_data, relay_sproof_builder) } + fn init_block_builder_with_pre_digests( + &self, + validation_data: Option>, + relay_sproof_builder: RelayStateSproofBuilder, + pre_digests: Vec, + ) -> BlockBuilderAndSupportData { + let chain_info = self.chain_info(); + init_block_builder( + self, + chain_info.best_hash, + validation_data, + relay_sproof_builder, + None, + Some(pre_digests), + ) + } + fn init_block_builder_at( &self, at: Hash, validation_data: Option>, relay_sproof_builder: RelayStateSproofBuilder, ) -> BlockBuilderAndSupportData { - let last_timestamp = self.runtime_api().get_last_timestamp(at).expect("Get last timestamp"); - - let timestamp = if last_timestamp == 0 { - if relay_sproof_builder.current_slot != 0u64 { - *relay_sproof_builder.current_slot * 6_000 - } else { - std::time::SystemTime::now() - .duration_since(std::time::SystemTime::UNIX_EPOCH) - .expect("Time is always after UNIX_EPOCH; qed") - .as_millis() as u64 - } - } else { - last_timestamp + self.runtime_api().slot_duration(at).unwrap().as_millis() - }; - - init_block_builder(self, at, validation_data, relay_sproof_builder, timestamp) + init_block_builder(self, at, validation_data, relay_sproof_builder, None, None) } fn init_block_builder_with_timestamp( @@ -178,7 +217,7 @@ impl InitBlockBuilder for Client { relay_sproof_builder: RelayStateSproofBuilder, timestamp: u64, ) -> BlockBuilderAndSupportData { - init_block_builder(self, at, validation_data, relay_sproof_builder, timestamp) + init_block_builder(self, at, validation_data, relay_sproof_builder, Some(timestamp), None) } } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 58927c0e75bd3..40226df701903 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -77,7 +77,7 @@ use sp_runtime::{ use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use cumulus_primitives_core::{ClaimQueueOffset, CoreSelector, ParaId}; +use cumulus_primitives_core::ParaId; // A few exports that help ease life for downstream crates. pub use frame_support::{ @@ -608,12 +608,6 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { - fn core_selector() -> (CoreSelector, ClaimQueueOffset) { - ParachainSystem::core_selector() - } - } - impl sp_genesis_builder::GenesisBuilder for Runtime { fn build_state(config: Vec) -> sp_genesis_builder::Result { build_state::(config) diff --git a/substrate/frame/staking-async/runtimes/parachain/src/lib.rs b/substrate/frame/staking-async/runtimes/parachain/src/lib.rs index 13fb8dbf4b3db..de01d182c14a6 100644 --- a/substrate/frame/staking-async/runtimes/parachain/src/lib.rs +++ b/substrate/frame/staking-async/runtimes/parachain/src/lib.rs @@ -1709,12 +1709,6 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { - fn core_selector() -> (CoreSelector, ClaimQueueOffset) { - ParachainSystem::core_selector() - } - } - #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { From b8dd79fea1175abfe29afa39e4cd7c9e40125d59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 1 Jul 2025 11:00:29 +0200 Subject: [PATCH 059/312] Get the test working --- Cargo.lock | 1 + .../slot_based/block_builder_task.rs | 4 +- .../assets/asset-hub-rococo/src/lib.rs | 2 +- .../assets/asset-hub-westend/src/lib.rs | 2 +- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 2 +- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- .../collectives-westend/src/lib.rs | 2 +- .../coretime/coretime-rococo/src/lib.rs | 2 +- .../coretime/coretime-westend/src/lib.rs | 2 +- .../glutton/glutton-westend/src/lib.rs | 2 +- .../runtimes/people/people-rococo/src/lib.rs | 2 +- .../runtimes/people/people-westend/src/lib.rs | 2 +- .../runtimes/testing/penpal/src/lib.rs | 2 +- .../testing/rococo-parachain/src/lib.rs | 2 +- .../testing/yet-another-parachain/src/lib.rs | 7 + .../lib/src/fake_runtime_api/utils.rs | 4 + cumulus/primitives/core/src/lib.rs | 51 +++++- cumulus/test/runtime/Cargo.toml | 4 +- cumulus/test/runtime/build.rs | 4 +- cumulus/test/runtime/src/lib.rs | 38 ++-- cumulus/test/service/src/chain_spec.rs | 4 +- cumulus/test/service/src/cli.rs | 6 +- .../zombienet-sdk-helpers/Cargo.toml | 1 + .../zombienet-sdk-helpers/src/lib.rs | 165 ++++++++++++++---- .../tests/elastic_scaling/mod.rs | 2 +- ...ple_blocks_per_slot.rs => pov_bundling.rs} | 58 +++--- cumulus/zombienet/zombienet-sdk/tests/lib.rs | 3 - .../tests/multiple_blocks_per_pov.rs | 97 ---------- ...cks_from_tip_without_connected_collator.rs | 7 +- polkadot/runtime/rococo/src/lib.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 2 +- substrate/primitives/block-builder/src/lib.rs | 4 +- 32 files changed, 266 insertions(+), 222 deletions(-) rename cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/{elastic_scaling_multiple_blocks_per_slot.rs => pov_bundling.rs} (77%) delete mode 100644 cumulus/zombienet/zombienet-sdk/tests/multiple_blocks_per_pov.rs diff --git a/Cargo.lock b/Cargo.lock index f38ed084ee80d..d022cefd584c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5141,6 +5141,7 @@ dependencies = [ "log", "parity-scale-codec", "polkadot-primitives", + "sp-runtime 31.0.1", "tokio", "zombienet-sdk", ] diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index b0bbf2006dd1e..b1a539f662133 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -280,7 +280,7 @@ where }, }; - let Ok(RelayChainData { max_pov_size, claim_queue, .. }) = + let Ok(RelayChainData { max_pov_size, .. }) = relay_chain_data_cache.get_mut_relay_chain_data(relay_parent).await else { continue; @@ -391,6 +391,8 @@ where _ => 1, }; + tracing::trace!(target: LOG_TARGET, %blocks_per_core, ?block_rate, "Block rate configuration"); + let mut blocks = Vec::new(); let mut proofs = Vec::new(); let mut ignored_nodes = IgnoredNodes::default(); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 64129999ab414..74054653347a4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1416,7 +1416,7 @@ impl_runtime_apis! { fn block_rate() -> sp_block_builder::BlockRate { sp_block_builder::BlockRate { - block_time: core::time::Duration::from_millis(500), + block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , block_building_time: core::time::Duration::from_millis(500), } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 5742be817aae3..c66fa690fd5bf 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1565,7 +1565,7 @@ pallet_revive::impl_runtime_apis_plus_revive!( fn block_rate() -> sp_block_builder::BlockRate { sp_block_builder::BlockRate { - block_time: core::time::Duration::from_secs(6), + block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , block_building_time: core::time::Duration::from_secs(2), } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 89de6e7dc00e9..4aedb60956fe0 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -790,7 +790,7 @@ impl_runtime_apis! { fn block_rate() -> sp_block_builder::BlockRate { sp_block_builder::BlockRate { - block_time: core::time::Duration::from_secs(6), + block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , block_building_time: core::time::Duration::from_secs(2), } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index e388b16a57f7f..04cf8cd684429 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -741,7 +741,7 @@ impl_runtime_apis! { fn block_rate() -> sp_block_builder::BlockRate { sp_block_builder::BlockRate { - block_time: core::time::Duration::from_secs(6), + block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , block_building_time: core::time::Duration::from_secs(2), } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 168c2d87a63a6..2cbf0e45c8d20 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -909,7 +909,7 @@ impl_runtime_apis! { fn block_rate() -> sp_block_builder::BlockRate { sp_block_builder::BlockRate { - block_time: core::time::Duration::from_secs(6), + block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , block_building_time: core::time::Duration::from_secs(2), } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index b7c2cc33be6fe..4cc5de240381d 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -770,7 +770,7 @@ impl_runtime_apis! { fn block_rate() -> sp_block_builder::BlockRate { sp_block_builder::BlockRate { - block_time: core::time::Duration::from_secs(6), + block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , block_building_time: core::time::Duration::from_secs(2), } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 46dbcd648b2a1..301300c016eea 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -771,7 +771,7 @@ impl_runtime_apis! { fn block_rate() -> sp_block_builder::BlockRate { sp_block_builder::BlockRate { - block_time: core::time::Duration::from_secs(6), + block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , block_building_time: core::time::Duration::from_secs(2), } } diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index a06a5952b3dce..112a0e0ec5748 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -402,7 +402,7 @@ impl_runtime_apis! { fn block_rate() -> sp_block_builder::BlockRate { sp_block_builder::BlockRate { - block_time: core::time::Duration::from_secs(6), + block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , block_building_time: core::time::Duration::from_secs(2), } } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index cfba0222d5832..d2cd91dadf7d4 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -724,7 +724,7 @@ impl_runtime_apis! { fn block_rate() -> sp_block_builder::BlockRate { sp_block_builder::BlockRate { - block_time: core::time::Duration::from_secs(6), + block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , block_building_time: core::time::Duration::from_secs(2), } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 7d13fd0e72634..26cece0cb76ce 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -722,7 +722,7 @@ impl_runtime_apis! { fn block_rate() -> sp_block_builder::BlockRate { sp_block_builder::BlockRate { - block_time: core::time::Duration::from_secs(6), + block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , block_building_time: core::time::Duration::from_secs(2), } } diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index c59a1352a47bb..c365980f0f6f7 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -996,7 +996,7 @@ impl_runtime_apis! { fn block_rate() -> sp_block_builder::BlockRate { sp_block_builder::BlockRate { - block_time: core::time::Duration::from_secs(6), + block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , block_building_time: core::time::Duration::from_secs(2), } } diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 597c41c9b0ee6..cc8612b457ea1 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -758,7 +758,7 @@ impl_runtime_apis! { fn block_rate() -> sp_block_builder::BlockRate { sp_block_builder::BlockRate { - block_time: core::time::Duration::from_secs(6), + block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , block_building_time: core::time::Duration::from_secs(2), } } diff --git a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs index ebf45f69d818a..01b6c32f3a811 100644 --- a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs @@ -549,6 +549,13 @@ impl_runtime_apis! { fn check_inherents(block: Block, data: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } + + fn block_rate() -> sp_block_builder::BlockRate { + sp_block_builder::BlockRate { + block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , + block_building_time: core::time::Duration::from_secs(2), + } + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs index 6e6591f3190ca..301771caaf074 100644 --- a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs +++ b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs @@ -105,6 +105,10 @@ macro_rules! impl_node_runtime_apis { ) -> sp_inherents::CheckInherentsResult { unimplemented!() } + + fn block_rate() -> sp_block_builder::BlockRate { + unimplemented!() + } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue<$block> for $runtime { diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 6df4232ba17ce..15e234264f70f 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -225,6 +225,14 @@ pub struct CoreInfo { pub number_of_cores: Compact, } +/// Identifier for a relay chain block used by [`CumulusDigestItem`]. +pub enum RelayBlockIdentifier { + /// The block is identified using its block hash. + ByHash(relay_chain::Hash), + /// The block is identified using its storage root and block number. + ByStorageRoot { storage_root: relay_chain::Hash, block_number: relay_chain::BlockNumber }, +} + /// Consensus header digests for Cumulus parachains. #[derive(Clone, Debug, Decode, Encode, PartialEq)] pub enum CumulusDigestItem { @@ -274,6 +282,36 @@ impl CumulusDigestItem { }) .count() <= 1 } + + /// Returns the [`RelayBlockIdentifier`] from the given `digest`. + /// + /// The identifier corresponds to the relay parent used to build the parachain block. + pub fn find_relay_block_identifier(digest: &Digest) -> Option { + digest.convert_first(|d| match d { + DigestItem::Consensus(id, val) if id == &CUMULUS_CONSENSUS_ID => { + let Ok(CumulusDigestItem::RelayParent(hash)) = + CumulusDigestItem::decode_all(&mut &val[..]) + else { + return None + }; + + Some(RelayBlockIdentifier::ByHash(hash)) + }, + DigestItem::Consensus(id, val) if id == &rpsr_digest::RPSR_CONSENSUS_ID => { + let Ok((storage_root, block_number)) = + rpsr_digest::RpsrType::decode_all(&mut &val[..]) + else { + return None + }; + + Some(RelayBlockIdentifier::ByStorageRoot { + storage_root, + block_number: block_number.into(), + }) + }, + _ => None, + }) + } } /// Extract the relay-parent from the provided header digest. Returns `None` if none were found. @@ -309,9 +347,12 @@ pub fn extract_relay_parent(digest: &Digest) -> Option { /// blocks in low-value scenarios such as performance optimizations. #[doc(hidden)] pub mod rpsr_digest { - use super::{relay_chain, ConsensusEngineId, Decode, Digest, DigestItem, Encode}; + use super::{relay_chain, ConsensusEngineId, DecodeAll, Digest, DigestItem, Encode}; use codec::Compact; + /// The type used to store the relay-parent storage root and number. + pub type RpsrType = (relay_chain::Hash, Compact); + /// A consensus engine ID for relay-parent storage root digests. pub const RPSR_CONSENSUS_ID: ConsensusEngineId = *b"RPSR"; @@ -320,7 +361,10 @@ pub mod rpsr_digest { storage_root: relay_chain::Hash, number: impl Into>, ) -> DigestItem { - DigestItem::Consensus(RPSR_CONSENSUS_ID, (storage_root, number.into()).encode()) + DigestItem::Consensus( + RPSR_CONSENSUS_ID, + RpsrType::from((storage_root, number.into())).encode(), + ) } /// Extract the relay-parent storage root and number from the provided header digest. Returns @@ -330,8 +374,7 @@ pub mod rpsr_digest { ) -> Option<(relay_chain::Hash, relay_chain::BlockNumber)> { digest.convert_first(|d| match d { DigestItem::Consensus(id, val) if id == &RPSR_CONSENSUS_ID => { - let (h, n): (relay_chain::Hash, Compact) = - Decode::decode(&mut &val[..]).ok()?; + let (h, n) = RpsrType::decode_all(&mut &val[..]).ok()?; Some((h, n.0)) }, diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index 896e30bf68b65..f766ad2076301 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -99,8 +99,8 @@ relay-parent-offset = [] elastic-scaling = [] # A runtime with low slot duration of 500ms for low-latency testing with 12 cores. elastic-scaling-500ms = [] -# A runtime with a slot duration of 6s but parameters that allow multiple blocks per slot. -elastic-scaling-multi-block-slot = [] +# A runtime pov-bundling. +pov-bundling = [] # A runtime with 6s slot duration which sends RFC-103 compatible UMP signals. experimental-ump-signals = [ "cumulus-pallet-parachain-system/experimental-ump-signals", diff --git a/cumulus/test/runtime/build.rs b/cumulus/test/runtime/build.rs index f0b44eb3ecc15..ddde5c2f2a8b7 100644 --- a/cumulus/test/runtime/build.rs +++ b/cumulus/test/runtime/build.rs @@ -49,10 +49,10 @@ fn main() { WasmBuilder::new() .with_current_project() - .enable_feature("elastic-scaling-multi-block-slot") + .enable_feature("pov-bundling") .enable_feature("experimental-ump-signals") .import_memory() - .set_file_name("wasm_binary_elastic_scaling_multi_block_slot.rs") + .set_file_name("wasm_binary_pov_bundling.rs") .build(); WasmBuilder::new() diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index b2c9e70cb7e6d..4a1925c6a6479 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -46,9 +46,9 @@ pub mod elastic_scaling { include!(concat!(env!("OUT_DIR"), "/wasm_binary_elastic_scaling.rs")); } -pub mod elastic_scaling_multi_block_slot { +pub mod pov_bundling { #[cfg(feature = "std")] - include!(concat!(env!("OUT_DIR"), "/wasm_binary_elastic_scaling_multi_block_slot.rs")); + include!(concat!(env!("OUT_DIR"), "/wasm_binary_pov_bundling.rs")); } pub mod sync_backing { @@ -62,7 +62,6 @@ mod test_pallet; extern crate alloc; use alloc::{vec, vec::Vec}; -use codec::Encode; use frame_support::{derive_impl, traits::OnRuntimeUpgrade, PalletId}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -120,22 +119,8 @@ impl_opaque_keys! { pub const PARACHAIN_ID: u32 = 100; #[cfg(all( - feature = "elastic-scaling-multi-block-slot", - not(any( - feature = "elastic-scaling", - feature = "elastic-scaling-500ms", - feature = "relay-parent-offset" - )) -))] -pub const BLOCK_PROCESSING_VELOCITY: u32 = 6; - -#[cfg(all( - feature = "elastic-scaling-500ms", - not(any( - feature = "elastic-scaling", - feature = "elastic-scaling-multi-block-slot", - feature = "relay-parent-offset" - )) + any(feature = "elastic-scaling-500ms", feature = "pov-bundling"), + not(any(feature = "elastic-scaling", feature = "relay-parent-offset")) ))] pub const BLOCK_PROCESSING_VELOCITY: u32 = 12; @@ -145,7 +130,7 @@ pub const BLOCK_PROCESSING_VELOCITY: u32 = 3; #[cfg(not(any( feature = "elastic-scaling", feature = "elastic-scaling-500ms", - feature = "elastic-scaling-multi-block-slot", + feature = "pov-bundling", feature = "relay-parent-offset" )))] pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; @@ -569,9 +554,16 @@ impl_runtime_apis! { } fn block_rate() -> sp_block_builder::BlockRate { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) }, - block_building_time: core::time::Duration::from_secs(2), + if cfg!(feature = "pov-bundling") { + sp_block_builder::BlockRate { + block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_millis(500) }, + block_building_time: core::time::Duration::from_millis(500), + } + } else { + sp_block_builder::BlockRate { + block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) }, + block_building_time: core::time::Duration::from_secs(2), + } } } } diff --git a/cumulus/test/service/src/chain_spec.rs b/cumulus/test/service/src/chain_spec.rs index 953959f3a03cd..5a10b3d2ccad5 100644 --- a/cumulus/test/service/src/chain_spec.rs +++ b/cumulus/test/service/src/chain_spec.rs @@ -125,11 +125,11 @@ pub fn get_elastic_scaling_mvp_chain_spec(id: Option) -> GenericChainSpe ) } -pub fn get_elastic_scaling_multi_block_slot_chain_spec(id: Option) -> GenericChainSpec { +pub fn get_pov_bundling_chain_spec(id: Option) -> GenericChainSpec { get_chain_spec_with_extra_endowed( id, Default::default(), - cumulus_test_runtime::elastic_scaling_multi_block_slot::WASM_BINARY + cumulus_test_runtime::pov_bundling::WASM_BINARY .expect("WASM binary was not built, please build it!"), ) } diff --git a/cumulus/test/service/src/cli.rs b/cumulus/test/service/src/cli.rs index aa719c0593bbc..2f4980e30102a 100644 --- a/cumulus/test/service/src/cli.rs +++ b/cumulus/test/service/src/cli.rs @@ -309,9 +309,9 @@ impl SubstrateCli for TestCollatorCli { ParaId::from(2300), ))) as Box<_> }, - "elastic-scaling-multi-block-slot" => { - tracing::info!("Using elastic-scaling multi-block-slot chain spec."); - Box::new(cumulus_test_service::get_elastic_scaling_multi_block_slot_chain_spec( + "pov-bundling" => { + tracing::info!("Using pov-bundling chain spec."); + Box::new(cumulus_test_service::get_pov_bundling_chain_spec( Some(ParaId::from(2400)), )) as Box<_> }, diff --git a/cumulus/zombienet/zombienet-sdk-helpers/Cargo.toml b/cumulus/zombienet/zombienet-sdk-helpers/Cargo.toml index a7630d48b8875..1ce94e4acecd0 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/Cargo.toml +++ b/cumulus/zombienet/zombienet-sdk-helpers/Cargo.toml @@ -16,4 +16,5 @@ cumulus-primitives-core = { workspace = true, default-features = true } tokio = { workspace = true, features = ["rt-multi-thread", "macros", "time"] } zombienet-sdk = { workspace = true } futures = { workspace = true } +sp-runtime = { workspace = true } diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 945edab6b90b0..6e425603f2aac 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -2,10 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::anyhow; -use codec::{Compact, Decode}; -use cumulus_primitives_core::{relay_chain, rpsr_digest::RPSR_CONSENSUS_ID}; -use futures::stream::StreamExt; +use codec::{Compact, Decode, Encode}; +use cumulus_primitives_core::{relay_chain, rpsr_digest::RPSR_CONSENSUS_ID, CumulusDigestItem}; +use futures::{stream::StreamExt, TryStreamExt}; use polkadot_primitives::{vstaging::CandidateReceiptV2, Id as ParaId}; +use sp_runtime::traits::Zero; use std::{ cmp::max, collections::{HashMap, HashSet}, @@ -16,8 +17,14 @@ use tokio::{ time::{sleep, Duration}, }; use zombienet_sdk::subxt::{ - blocks::Block, config::substrate::DigestItem, events::Events, ext::scale_value::value, - tx::DynamicPayload, utils::H256, OnlineClient, PolkadotConfig, + backend::{legacy::LegacyRpcMethods, Backend}, + blocks::Block, + config::{substrate::DigestItem, Header}, + events::Events, + ext::scale_value::value, + tx::DynamicPayload, + utils::H256, + Config, OnlineClient, PolkadotConfig, }; // Maximum number of blocks to wait for a session change. @@ -80,14 +87,9 @@ pub async fn assert_finalized_para_throughput( let block = block?; log::debug!("Finalized relay chain block {}", block.number()); let events = block.events().await?; - let is_session_change = events.iter().any(|event| { - event.as_ref().is_ok_and(|event| { - event.pallet_name() == "Session" && event.variant_name() == "NewSession" - }) - }); // Do not count blocks with session changes, no backed blocks there. - if is_session_change { + if is_session_change(&block).await? { continue } @@ -131,16 +133,28 @@ pub async fn assert_finalized_para_throughput( Ok(()) } -// Helper function for asserting the throughput of parachains. +/// Returns `true` if the `block` is a session change. +async fn is_session_change( + block: &Block>, +) -> Result { + let events = block.events().await?; + Ok(events.iter().any(|event| { + event.as_ref().is_ok_and(|event| { + event.pallet_name() == "Session" && event.variant_name() == "NewSession" + }) + })) +} + +// Helper function for asserting the throughput of parachain candidates on the relay chain. // -// The troughput is measured as total number of backed candidates in a window of relay chain blocks, -// after the first session change. Blocks with session changes are generally ignored. +// The throughput is measured as total number of backed candidates in a window of relay chain +// blocks, after the first session change. Blocks with session changes are generally ignored. // // `stop_after`: Number of relay chain blocks after which the recording should be stopped. pub async fn assert_para_throughput( relay_client: &OnlineClient, stop_after: u32, - expected_candidate_ranges: HashMap>, + expected_candidate_ranges: impl Into>>, ) -> Result<(), anyhow::Error> { // Check on backed blocks in all imported relay chain blocks. The slot-based collator // builds on the best fork currently. It can happen that it builds on a fork which is not @@ -150,6 +164,7 @@ pub async fn assert_para_throughput( let mut blocks_sub = relay_client.blocks().subscribe_all().await?; let mut candidate_count: HashMap = HashMap::new(); let mut start_height: Option = None; + let expected_candidate_ranges = expected_candidate_ranges.into(); let valid_para_ids: Vec = expected_candidate_ranges.keys().cloned().collect(); @@ -159,18 +174,13 @@ pub async fn assert_para_throughput( let mut session_change_seen_at = 0u32; while let Some(block) = blocks_sub.next().await { let block = block?; - let block_number = Into::::into(block.number()); + let block_number = u32::from(block.number()); let events = block.events().await?; let mut para_ids_to_increment: HashSet = Default::default(); - let is_session_change = events.iter().any(|event| { - event.as_ref().is_ok_and(|event| { - event.pallet_name() == "Session" && event.variant_name() == "NewSession" - }) - }); // Do not count blocks with session changes, no backed blocks there. - if is_session_change { + if is_session_change(&block).await? { if block_number == session_change_seen_at { continue; } @@ -245,6 +255,103 @@ pub async fn assert_para_throughput( Ok(()) } +/// Returns the header of the relay parent used by the given parachain `block`. +async fn relay_parent_for( + block: &Block>, + relay_rpc_client: &LegacyRpcMethods, +) -> Result<::Header, anyhow::Error> { + let substrate_digest = + sp_runtime::generic::Digest::decode(&mut &block.header().digest.encode()[..]) + .expect("`subxt::Digest` and `substrate::Digest` should encode and decode; qed"); + + match CumulusDigestItem::find_relay_block_identifier(&substrate_digest).unwrap() { + cumulus_primitives_core::RelayBlockIdentifier::ByHash(hash) => relay_rpc_client + .chain_get_header(Some(hash)) + .await? + .ok_or_else(|| anyhow!("Could not fetch relay chain header: {hash:?}")), + cumulus_primitives_core::RelayBlockIdentifier::ByStorageRoot { + storage_root, + block_number, + } => { + let block_hash = relay_rpc_client + .chain_get_block_hash(Some(block_number.into())) + .await? + .ok_or_else(|| anyhow!("Could not fetch block hash for block: {}", block_number))?; + + let header = relay_rpc_client + .chain_get_header(Some(block_hash)) + .await? + .ok_or_else(|| anyhow!("Could not fetch real chain header: {block_hash:?}"))?; + + assert_eq!(storage_root, header.state_root, "Storage roots should match"); + Ok(header) + }, + } +} + +/// Assert that `stop_after` parachain blocks are included via `expected_relay_blocks`. +/// +/// It waits for `stop_after` parachain blocks to be finalized. Then it ensures that these parachain +/// blocks are included on the relay chain using the given number of `expected_relay_blocks`. +pub async fn assert_para_blocks_throughput( + para_client: &OnlineClient, + stop_after: usize, + relay_rpc_client: &LegacyRpcMethods, + relay_client: &OnlineClient, + expected_relay_blocks: Range, +) -> Result<(), anyhow::Error> { + // Wait for the first session, block production on the parachain will start after that. + wait_for_first_session_change(&mut relay_client.blocks().subscribe_best().await?).await?; + + let finalized_stream = para_client.blocks().subscribe_finalized().await?; + + let finalized_blocks = finalized_stream + .try_filter(|b| futures::future::ready(!b.number().is_zero())) + .take(stop_after) + .try_collect::>() + .await?; + + let first_relay_header = relay_parent_for(&finalized_blocks[0], relay_rpc_client).await?; + let last_relay_header = + relay_parent_for(finalized_blocks.last().unwrap(), relay_rpc_client).await?; + + let mut relay_blocks_without_session_change = 0; + let mut current_relay_header = last_relay_header.clone(); + while current_relay_header.number() >= first_relay_header.number() { + let block = relay_rpc_client + .chain_get_block(Some(current_relay_header.hash())) + .await? + .ok_or_else(|| { + anyhow!("Could not fetch relay block: {:?}", current_relay_header.hash()) + })? + .block; + + let block = relay_client.blocks().at(block.header.hash()).await?; + + if !is_session_change(&block).await? { + relay_blocks_without_session_change += 1; + } + + current_relay_header = relay_rpc_client + .chain_get_header(Some(current_relay_header.parent_hash)) + .await? + .ok_or_else(|| { + anyhow!( + "Could not fetch relay chain header: {:?}", + current_relay_header.parent_hash + ) + })?; + } + + assert!( + expected_relay_blocks.contains(&relay_blocks_without_session_change), + "{relay_blocks_without_session_change} relay chain blocks is not in the \ + expected range of {relay_blocks_without_session_change} relay chain blocks.", + ); + + Ok(()) +} + /// Wait for the first block with a session change. /// /// The session change is detected by inspecting the events in the block. @@ -269,14 +376,8 @@ pub async fn wait_for_nth_session_change( while let Some(block) = blocks_sub.next().await { let block = block?; log::debug!("Finalized relay chain block {}", block.number()); - let events = block.events().await?; - let is_session_change = events.iter().any(|event| { - event.as_ref().is_ok_and(|event| { - event.pallet_name() == "Session" && event.variant_name() == "NewSession" - }) - }); - if is_session_change { + if is_session_change(&block).await? { sessions_to_wait -= 1; if sessions_to_wait == 0 { return Ok(()) @@ -376,7 +477,11 @@ pub async fn assert_relay_parent_offset( return Err(anyhow!("No RPSR digest found in header #{}", para_block.number())); }; log::debug!("Parachain block #{} was built on relay parent #{relay_parent_number}, highest seen was {highest_relay_block_seen}", para_block.number()); - assert!(highest_relay_block_seen < offset || relay_parent_number <= highest_relay_block_seen.saturating_sub(offset), "Relay parent is not at the correct offset! relay_parent: #{relay_parent_number} highest_seen_relay_block: #{highest_relay_block_seen}"); + assert!( + highest_relay_block_seen < offset || + relay_parent_number <= highest_relay_block_seen.saturating_sub(offset), + "Relay parent is not at the correct offset! relay_parent: #{relay_parent_number} highest_seen_relay_block: #{highest_relay_block_seen}", + ); num_para_blocks_seen += 1; if num_para_blocks_seen >= block_limit { log::info!("Successfully verified relay parent offset of {offset} for {num_para_blocks_seen} parachain blocks."); diff --git a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/mod.rs index b575a2fe2077e..e322abcc93b86 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/mod.rs @@ -15,5 +15,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod elastic_scaling_multiple_blocks_per_slot; +mod pov_bundling; mod slot_based_rp_offset; diff --git a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/elastic_scaling_multiple_blocks_per_slot.rs b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs similarity index 77% rename from cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/elastic_scaling_multiple_blocks_per_slot.rs rename to cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs index 3496ce7f2aa71..ad2827e0d0bc7 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/elastic_scaling_multiple_blocks_per_slot.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs @@ -18,25 +18,29 @@ use anyhow::anyhow; use cumulus_zombienet_sdk_helpers::{ - assert_finality_lag, assert_para_throughput, create_assign_core_call, + assert_finality_lag, assert_para_blocks_throughput, assert_para_throughput, + create_assign_core_call, }; use polkadot_primitives::Id as ParaId; use serde_json::json; use zombienet_sdk::{ - subxt::{OnlineClient, PolkadotConfig}, + subxt::{ + backend::{legacy::LegacyRpcMethods, rpc::RpcClient}, + OnlineClient, PolkadotConfig, + }, subxt_signer::sr25519::dev, NetworkConfig, NetworkConfigBuilder, }; const PARA_ID: u32 = 2400; -/// This test spawns a parachain network. -/// Initially, one core is assigned. We expect the parachain to produce 1 block per relay. -/// As we increase the number of cores via `assign_core`, we expect the block pace to increase too. -/// **Note:** The runtime in use here has 6s slot duration, so multiple blocks will be produced per -/// slot. +/// A test that ensures that PoV bundling works. +/// +/// Initially, one core is assigned. We expect the parachain to produce 12 block per relay core. +/// As we increase the number of cores via `assign_core`, we expect the blocks to spread over the +/// relay cores. #[tokio::test(flavor = "multi_thread")] -async fn elastic_scaling_multiple_block_per_slot() -> Result<(), anyhow::Error> { +async fn pov_bundling() -> Result<(), anyhow::Error> { let _ = env_logger::try_init_from_env( env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), ); @@ -47,17 +51,17 @@ async fn elastic_scaling_multiple_block_per_slot() -> Result<(), anyhow::Error> let network = spawn_fn(config).await?; let relay_node = network.get_node("validator-0")?; - let para_node_elastic = network.get_node("collator-1")?; + let para_node = network.get_node("collator-1")?; + let para_client = para_node.wait_client().await?; let relay_client: OnlineClient = relay_node.wait_client().await?; + let relay_rpc_client = + LegacyRpcMethods::new(RpcClient::from_url(relay_node.ws_uri()).await.unwrap()); let alice = dev::alice(); - assert_para_throughput( - &relay_client, - 10, - [(ParaId::from(PARA_ID), 8..11)].into_iter().collect(), - ) - .await?; - assert_finality_lag(¶_node_elastic.wait_client().await?, 5).await?; + + assert_para_blocks_throughput(¶_client, 72, &relay_rpc_client, &relay_client, 6..9).await?; + // 3 relay chain blocks + assert_finality_lag(¶_client, 36).await?; let assign_cores_call = create_assign_core_call(&[(2, PARA_ID), (3, PARA_ID)]); @@ -70,13 +74,8 @@ async fn elastic_scaling_multiple_block_per_slot() -> Result<(), anyhow::Error> .await?; log::info!("2 more cores assigned to each parachain"); - assert_para_throughput( - &relay_client, - 15, - [(ParaId::from(PARA_ID), 39..46)].into_iter().collect(), - ) - .await?; - assert_finality_lag(¶_node_elastic.wait_client().await?, 20).await?; + assert_para_throughput(&relay_client, 15, [(ParaId::from(PARA_ID), 39..46)]).await?; + assert_finality_lag(¶_client, 20).await?; let assign_cores_call = create_assign_core_call(&[(4, PARA_ID), (5, PARA_ID), (6, PARA_ID)]); // Assign two extra cores to each parachain. @@ -88,13 +87,8 @@ async fn elastic_scaling_multiple_block_per_slot() -> Result<(), anyhow::Error> .await?; log::info!("3 more cores assigned to each parachain"); - assert_para_throughput( - &relay_client, - 10, - [(ParaId::from(PARA_ID), 52..61)].into_iter().collect(), - ) - .await?; - assert_finality_lag(¶_node_elastic.wait_client().await?, 30).await?; + assert_para_throughput(&relay_client, 10, [(ParaId::from(PARA_ID), 52..61)]).await?; + assert_finality_lag(¶_client, 30).await?; log::info!("Test finished successfully"); Ok(()) } @@ -131,11 +125,11 @@ async fn build_network_config() -> Result { p.with_id(PARA_ID) .with_default_command("test-parachain") .with_default_image(images.cumulus.as_str()) - .with_chain("elastic-scaling-multi-block-slot") + .with_chain("pov-bundling") .with_default_args(vec![ ("--authoring").into(), ("slot-based").into(), - ("-lparachain=trace,aura=debug").into(), + ("-laura=trace").into(), ]) .with_collator(|n| n.with_name("collator-0")) .with_collator(|n| n.with_name("collator-1")) diff --git a/cumulus/zombienet/zombienet-sdk/tests/lib.rs b/cumulus/zombienet/zombienet-sdk/tests/lib.rs index 4d356859b53d3..c1bade1f57ea4 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/lib.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/lib.rs @@ -18,9 +18,6 @@ #[cfg(feature = "zombie-ci")] mod elastic_scaling; -#[cfg(feature = "zombie-ci")] -mod multiple_blocks_per_pov; - #[cfg(feature = "zombie-ci")] mod sync_blocks; diff --git a/cumulus/zombienet/zombienet-sdk/tests/multiple_blocks_per_pov.rs b/cumulus/zombienet/zombienet-sdk/tests/multiple_blocks_per_pov.rs deleted file mode 100644 index 4da361c8f19ed..0000000000000 --- a/cumulus/zombienet/zombienet-sdk/tests/multiple_blocks_per_pov.rs +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -use anyhow::anyhow; - -use cumulus_zombienet_sdk_helpers::{ - assert_finality_lag, assert_para_throughput, create_assign_core_call, -}; -use polkadot_primitives::Id as ParaId; -use serde_json::json; -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; -use zombienet_sdk::{NetworkConfig, NetworkConfigBuilder}; - -const PARA_ID: u32 = 2400; - -/// This test spawns a parachain network. -#[tokio::test(flavor = "multi_thread")] -async fn multiple_blocks_per_pov() -> Result<(), anyhow::Error> { - let _ = env_logger::try_init_from_env( - env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), - ); - - let config = build_network_config().await?; - - let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); - let network = spawn_fn(config).await?; - - let relay_node = network.get_node("validator-0")?; - let para_node_elastic = network.get_node("collator-1")?; - - let relay_client: OnlineClient = relay_node.wait_client().await?; - let alice = dev::alice(); - assert_para_throughput( - &relay_client, - 10, - [(ParaId::from(PARA_ID), 8..11)].into_iter().collect(), - ) - .await?; - assert_finality_lag(¶_node_elastic.wait_client().await?, 5).await?; - - log::info!("Test finished successfully"); - Ok(()) -} - -async fn build_network_config() -> Result { - let images = zombienet_sdk::environment::get_images_from_env(); - log::info!("Using images: {images:?}"); - NetworkConfigBuilder::new() - .with_relaychain(|r| { - let r = r - .with_chain("rococo-local") - .with_default_command("polkadot") - .with_default_image(images.polkadot.as_str()) - .with_default_args(vec![("-lparachain=trace").into()]) - .with_default_resources(|resources| { - resources.with_request_cpu(2).with_request_memory("2G") - }) - .with_genesis_overrides(json!({ - "configuration": { - "config": { - "scheduler_params": { - "num_cores": 7, - "max_validators_per_core": 1 - } - } - } - })) - // Have to set a `with_node` outside of the loop below, so that `r` has the right - // type. - .with_node(|node| node.with_name("validator-0")); - (1..9).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) - }) - .with_parachain(|p| { - p.with_id(PARA_ID) - .with_default_command("polkadot-parachain") - .with_default_image(images.cumulus.as_str()) - .with_chain("asset-hub-rococo") - .with_default_args(vec![ - ("--authoring").into(), - ("slot-based").into(), - ("-lparachain=trace,aura=debug").into(), - ]) - .with_collator(|n| n.with_name("collator-0")) - .with_collator(|n| n.with_name("collator-1")) - .with_collator(|n| n.with_name("collator-2")) - }) - .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { - Ok(val) => global_settings.with_base_dir(val), - _ => global_settings, - }) - .build() - .map_err(|e| { - let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); - anyhow!("config errs: {errs}") - }) -} diff --git a/cumulus/zombienet/zombienet-sdk/tests/sync_blocks/sync_blocks_from_tip_without_connected_collator.rs b/cumulus/zombienet/zombienet-sdk/tests/sync_blocks/sync_blocks_from_tip_without_connected_collator.rs index e9f1690380bb1..962a55dc65d3e 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/sync_blocks/sync_blocks_from_tip_without_connected_collator.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/sync_blocks/sync_blocks_from_tip_without_connected_collator.rs @@ -22,12 +22,7 @@ async fn sync_blocks_from_tip_without_connected_collator() -> Result<(), anyhow: let relay_client: OnlineClient = relay_alice.wait_client().await?; log::info!("Ensuring parachain making progress"); - assert_para_throughput( - &relay_client, - 10, - [(ParaId::from(PARA_ID), 9..11)].into_iter().collect(), - ) - .await?; + assert_para_throughput(&relay_client, 10, [(ParaId::from(PARA_ID), 9..11)]).await?; let para_ferdie = network.get_node("ferdie")?; let para_eve = network.get_node("eve")?; diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 765e4172207a7..fddf8d8fee970 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1999,7 +1999,7 @@ sp_api::impl_runtime_apis! { fn block_rate() -> sp_block_builder::BlockRate { sp_block_builder::BlockRate { - block_time: core::time::Duration::from_secs(6), + block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , block_building_time: core::time::Duration::from_secs(2), } } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 84285bf88cffa..e932048068e15 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2210,7 +2210,7 @@ sp_api::impl_runtime_apis! { fn block_rate() -> sp_block_builder::BlockRate { sp_block_builder::BlockRate { - block_time: core::time::Duration::from_secs(6), + block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , block_building_time: core::time::Duration::from_secs(2), } } diff --git a/substrate/primitives/block-builder/src/lib.rs b/substrate/primitives/block-builder/src/lib.rs index aa0da399aea30..5bf282d51c44f 100644 --- a/substrate/primitives/block-builder/src/lib.rs +++ b/substrate/primitives/block-builder/src/lib.rs @@ -26,7 +26,7 @@ use core::time::Duration; use sp_inherents::{CheckInherentsResult, InherentData}; use sp_runtime::{traits::Block as BlockT, ApplyExtrinsicResult}; -#[derive(Encode, Decode, scale_info::TypeInfo)] +#[derive(Encode, Decode, scale_info::TypeInfo, Debug)] pub struct BlockRate { /// Time between individual blocks. pub block_time: BlockTime, @@ -34,7 +34,7 @@ pub struct BlockRate { pub block_building_time: Duration, } -#[derive(Encode, Decode, scale_info::TypeInfo)] +#[derive(Encode, Decode, scale_info::TypeInfo, Debug)] pub enum BlockTime { /// Blocks are expected every X. Regularly { From b901193517854099bf23104b58572e806d4d9254 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 3 Jul 2025 00:15:57 +0200 Subject: [PATCH 060/312] More stuff --- cumulus/client/collator/src/service.rs | 23 +- .../slot_based/block_builder_task.rs | 433 +++++++++++------- .../aura/src/collators/slot_based/mod.rs | 2 +- cumulus/pallets/parachain-system/src/lib.rs | 12 +- .../src/validate_block/implementation.rs | 1 + .../polkadot-omni-node/lib/src/nodes/aura.rs | 2 +- .../zombienet-sdk-helpers/src/lib.rs | 38 +- .../tests/elastic_scaling/pov_bundling.rs | 8 +- 8 files changed, 328 insertions(+), 191 deletions(-) diff --git a/cumulus/client/collator/src/service.rs b/cumulus/client/collator/src/service.rs index ec22cb38edd79..92dfcc1058e87 100644 --- a/cumulus/client/collator/src/service.rs +++ b/cumulus/client/collator/src/service.rs @@ -21,6 +21,7 @@ use cumulus_client_network::WaitToAnnounce; use cumulus_primitives_core::{CollationInfo, CollectCollationInfo, ParachainBlockData}; +use polkadot_primitives::vstaging::UMP_SEPARATOR; use sc_client_api::BlockBackend; use sp_api::{ApiExt, ProvideRuntimeApi, StorageProof}; use sp_consensus::BlockStatus; @@ -35,7 +36,7 @@ use polkadot_node_primitives::{ use codec::Encode; use futures::channel::oneshot; use parking_lot::Mutex; -use std::sync::Arc; +use std::{collections::HashSet, sync::Arc}; /// The logging target. const LOG_TARGET: &str = "cumulus-collator"; @@ -243,6 +244,7 @@ where let mut api_version = 0; let mut upward_messages = Vec::new(); + let mut upward_message_signals = HashSet::>::with_capacity(4); let mut horizontal_messages = Vec::new(); let mut new_validation_code = None; let mut processed_downward_messages = 0; @@ -263,7 +265,18 @@ where .ok() .flatten()?; - upward_messages.extend(collation_info.upward_messages); + collation_info + .upward_messages + .iter() + .rev() + .take_while(|m| **m != UMP_SEPARATOR) + .for_each(|s| { + upward_message_signals.insert(s.clone()); + }); + + upward_messages.extend( + collation_info.upward_messages.into_iter().take_while(|m| *m != UMP_SEPARATOR), + ); horizontal_messages.extend(collation_info.horizontal_messages); api_version = version; new_validation_code = new_validation_code.take().or(collation_info.new_validation_code); @@ -291,6 +304,12 @@ where }), }); + // If we got some signals, push them now. + if !upward_message_signals.is_empty() { + upward_messages.push(UMP_SEPARATOR); + upward_messages.extend(upward_message_signals.into_iter()); + } + let upward_messages = upward_messages .try_into() .map_err(|e| { diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index b1a539f662133..63c49515ff996 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -17,7 +17,7 @@ use super::CollatorMessage; use crate::{ - collator as collator_util, + collator::{self as collator_util, Collator, SlotClaim}, collators::{ check_validation_code_or_log, slot_based::{ @@ -140,7 +140,7 @@ where BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, - CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, + CHP: consensus_common::ValidationCodeHashProvider + Send + Sync + 'static, P: Pair, P::Public: AppPublic + Member + Codec, P::Signature: TryFrom> + Member + Codec, @@ -229,66 +229,36 @@ where let relay_parent = rp_data.relay_parent().hash(); let relay_parent_header = rp_data.relay_parent().clone(); - let Some((included_header, parent)) = + let Some((included_header, initial_parent)) = crate::collators::find_parent(relay_parent, para_id, &*para_backend, &relay_client) .await else { continue }; - let parent_hash = parent.hash; - let pov_parent_header = parent.header; - - // Retrieve the core selector. - let (core_selector, claim_queue_offset, core_index, number_of_cores) = - match determine_core( - &mut relay_chain_data_cache, - &relay_parent_header, - para_id, - &pov_parent_header, - ) + let Ok(max_pov_size) = relay_chain_data_cache + .get_mut_relay_chain_data(relay_parent) .await - { - Err(()) => { - tracing::debug!( - target: LOG_TARGET, - ?relay_parent, - "Failed to determine core" - ); - - continue - }, - Ok(Some(res)) => { - tracing::debug!( - target: LOG_TARGET, - ?relay_parent, - core_selector = ?res.0, - claim_queue_offset = ?res.1, - "Going to claim core", - ); - - res - }, - Ok(None) => { - tracing::debug!( - target: LOG_TARGET, - ?relay_parent, - "No available core" - ); - - continue - }, - }; - - let Ok(RelayChainData { max_pov_size, .. }) = - relay_chain_data_cache.get_mut_relay_chain_data(relay_parent).await + .map(|d| d.max_pov_size) else { continue; }; + let allowed_pov_size = if let Some(max_pov_percentage) = max_pov_percentage { + max_pov_size * max_pov_percentage / 100 + } else { + // Set the block limit to 85% of the maximum PoV size. + // + // Once https://github.com/paritytech/polkadot-sdk/issues/6020 issue is + // fixed, this should be removed. + max_pov_size * 85 / 100 + } as usize; + // We mainly call this to inform users at genesis if there is a mismatch with the // on-chain data. - collator.collator_service().check_block_status(parent_hash, &pov_parent_header); + collator + .collator_service() + .check_block_status(initial_parent.hash, &initial_parent.header); let Ok(relay_slot) = sc_consensus_babe::find_pre_digest::(&relay_parent_header) @@ -304,7 +274,7 @@ where para_slot.slot, relay_slot, para_slot.timestamp, - parent_hash, + initial_parent.hash, included_header_hash, &*para_client, &keystore, @@ -315,14 +285,14 @@ where None => { tracing::debug!( target: crate::LOG_TARGET, - unincluded_segment_len = parent.depth, + unincluded_segment_len = initial_parent.depth, relay_parent = ?relay_parent, relay_parent_num = %relay_parent_header.number(), included_hash = ?included_header_hash, included_num = %included_header.number(), - parent = ?parent_hash, + initial_parent = ?initial_parent.hash, slot = ?para_slot.slot, - "Not building block." + "Not eligible to claim slot." ); continue }, @@ -330,144 +300,267 @@ where tracing::debug!( target: crate::LOG_TARGET, - unincluded_segment_len = parent.depth, - relay_parent = %relay_parent, + unincluded_segment_len = initial_parent.depth, + relay_parent = ?relay_parent, relay_parent_num = %relay_parent_header.number(), relay_parent_offset, - included_hash = %included_header_hash, + included_hash = ?included_header_hash, included_num = %included_header.number(), - parent = %parent_hash, + initial_parent = ?initial_parent.hash, slot = ?para_slot.slot, - "Building block." + "Claiming slot." ); - let validation_data = PersistedValidationData { - parent_head: pov_parent_header.encode().into(), - relay_parent_number: *relay_parent_header.number(), - relay_parent_storage_root: *relay_parent_header.state_root(), - max_pov_size: *max_pov_size, - }; + let mut pov_parent_header = initial_parent.header; + let mut pov_parent_hash = initial_parent.hash; - let validation_code_hash = match code_hash_provider.code_hash_at(parent_hash) { - None => { - tracing::error!(target: crate::LOG_TARGET, ?parent_hash, "Could not fetch validation code hash"); - break - }, - Some(v) => v, - }; + loop { + match build_collation_for_core( + pov_parent_header, + pov_parent_hash, + &relay_parent_header, + relay_parent, + max_pov_size, + para_id, + &relay_client, + &*para_client, + &mut relay_chain_data_cache, + &code_hash_provider, + relay_chain_slot_duration, + &slot_claim, + &collator_sender, + authoring_duration, + &mut collator, + allowed_pov_size, + ) + .await + { + NextBlockProductionStep::NextCore { last_block_header } => { + pov_parent_header = last_block_header; + pov_parent_hash = pov_parent_header.hash(); + }, + NextBlockProductionStep::NextSlot => break, + NextBlockProductionStep::Stop => return, + } + } + } + } +} - check_validation_code_or_log( - &validation_code_hash, - para_id, - &relay_client, - relay_parent, - ) - .await; +enum NextBlockProductionStep { + NextCore { last_block_header: Block::Header }, + NextSlot, + Stop, +} - let allowed_pov_size = if let Some(max_pov_percentage) = max_pov_percentage { - validation_data.max_pov_size * max_pov_percentage / 100 - } else { - // Set the block limit to 85% of the maximum PoV size. - // - // Once https://github.com/paritytech/polkadot-sdk/issues/6020 issue is - // fixed, this should be removed. - validation_data.max_pov_size * 85 / 100 - } as usize; +async fn build_collation_for_core( + pov_parent_header: Block::Header, + pov_parent_hash: Block::Hash, + relay_parent_header: &RelayHeader, + relay_parent_hash: RelayHash, + max_pov_size: u32, + para_id: ParaId, + relay_client: &impl RelayChainInterface, + para_client: &Client, + relay_chain_data_cache: &mut RelayChainDataCache, + code_hash_provider: &impl consensus_common::ValidationCodeHashProvider, + relay_chain_slot_duration: Duration, + slot_claim: &SlotClaim, + collator_sender: &sc_utils::mpsc::TracingUnboundedSender>, + authoring_duration: Duration, + collator: &mut Collator, + allowed_pov_size: usize, +) -> NextBlockProductionStep +where + Client: ProvideRuntimeApi + + UsageProvider + + BlockOf + + AuxStore + + HeaderBackend + + BlockBackend + + Send + + Sync + + 'static, + Client::Api: AuraApi + + RelayParentOffsetApi + + AuraUnincludedSegmentApi + + BlockBuilder, + RelayClient: RelayChainInterface + 'static, + P: Pair, + P::Public: AppPublic + Member + Codec, + P::Signature: TryFrom> + Member + Codec, + CIDP: CreateInherentDataProviders + 'static, + CIDP::InherentDataProviders: Send, + BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, + Proposer: ProposerInterface + Send + Sync + 'static, + CS: CollatorServiceInterface + Send + Sync + 'static, +{ + let validation_data = PersistedValidationData { + parent_head: pov_parent_header.encode().into(), + relay_parent_number: *relay_parent_header.number(), + relay_parent_storage_root: *relay_parent_header.state_root(), + max_pov_size, + }; - let Ok(block_rate) = para_client.runtime_api().block_rate(parent_hash) else { - tracing::error!( - target: crate::LOG_TARGET, - "Failed to fetch block rate." - ); - continue - }; + let Some(validation_code_hash) = code_hash_provider.code_hash_at(pov_parent_hash) else { + tracing::error!( + target: crate::LOG_TARGET, + ?pov_parent_hash, + "Could not fetch validation code hash", + ); + return NextBlockProductionStep::Stop + }; - let block_time = block_rate.block_time.as_regular(); + check_validation_code_or_log(&validation_code_hash, para_id, relay_client, relay_parent_hash) + .await; - // TODO: Do not use relay chain slot duration, should also be `block_time`. - let blocks_per_core = match block_time { - Some(bt) if bt < relay_chain_slot_duration => - relay_chain_slot_duration.as_millis() / bt.as_millis(), - _ => 1, - }; + let Ok(block_rate) = para_client.runtime_api().block_rate(pov_parent_hash) else { + tracing::error!( + target: crate::LOG_TARGET, + "Failed to fetch block rate." + ); + return NextBlockProductionStep::Stop + }; - tracing::trace!(target: LOG_TARGET, %blocks_per_core, ?block_rate, "Block rate configuration"); - - let mut blocks = Vec::new(); - let mut proofs = Vec::new(); - let mut ignored_nodes = IgnoredNodes::default(); - // We redefine it as mutable here, because above this value should not change. - let mut parent_hash = parent_hash; - let mut parent_header = pov_parent_header.clone(); - - for _ in 0..blocks_per_core { - let expected_block_end = Instant::now() + block_time.unwrap_or_default(); - - let (parachain_inherent_data, other_inherent_data) = match collator - .create_inherent_data( - relay_parent, - &validation_data, - parent_hash, - slot_claim.timestamp(), - ) - .await - { - Err(err) => { - tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to create inherent data."); - return - }, - Ok(x) => x, - }; - - let Ok(Some(res)) = collator - .build_block_and_import( - &parent_header, - &slot_claim, - None, - (parachain_inherent_data, other_inherent_data), - authoring_duration, - allowed_pov_size, - ) - .await - else { - tracing::error!(target: crate::LOG_TARGET, "Unable to build block at slot."); - continue; - }; + // Retrieve the core selector. + let (core_selector, claim_queue_offset, core_index, number_of_cores) = match determine_core( + relay_chain_data_cache, + relay_parent_header, + para_id, + &pov_parent_header, + ) + .await + { + Err(()) => { + tracing::debug!( + target: LOG_TARGET, + relay_parent = ?relay_parent_hash, + "Failed to determine core" + ); + + return NextBlockProductionStep::Stop + }, + Ok(Some(res)) => { + tracing::debug!( + target: LOG_TARGET, + relay_parent = ?relay_parent_hash, + core_selector = ?res.0, + claim_queue_offset = ?res.1, + number_of_cores = %res.3, + "Going to claim core", + ); - parent_hash = res.block.header().hash(); - parent_header = res.block.header().clone(); + res + }, + Ok(None) => { + tracing::debug!( + target: LOG_TARGET, + relay_parent = ?relay_parent_hash, + "No available core" + ); - // Announce the newly built block to our peers. - collator.collator_service().announce_block(parent_hash, None); + return NextBlockProductionStep::NextSlot + }, + }; - ignored_nodes - .extend(IgnoredNodes::from_storage_proof::>(&res.proof)); - ignored_nodes.extend(IgnoredNodes::from_memory_db(res.backend_transaction)); + let block_time = block_rate.block_time.as_regular(); - blocks.push(res.block); - proofs.push(res.proof); + let (blocks_per_core, max_blocks_per_relay_slot) = match block_time { + Some(bt) if bt < relay_chain_slot_duration => ( + (relay_chain_slot_duration.as_millis() / number_of_cores as u128) / bt.as_millis(), + relay_chain_slot_duration.as_millis() / bt.as_millis(), + ), + _ => (1, 1), + }; - if let Some(sleep) = expected_block_end.checked_duration_since(Instant::now()) { - tokio::time::sleep(sleep).await; - } - } + tracing::trace!( + target: LOG_TARGET, + %blocks_per_core, + ?block_rate, + "Block rate configuration", + ); - let proof = StorageProof::merge(proofs); - - if let Err(err) = collator_sender.unbounded_send(CollatorMessage { - relay_parent, - parent_header: pov_parent_header, - blocks, - proof, - validation_code_hash, - core_index, - max_pov_size: validation_data.max_pov_size, - }) { - tracing::error!(target: crate::LOG_TARGET, ?err, "Unable to send block to collation task."); - return - } + let mut blocks = Vec::new(); + let mut proofs = Vec::new(); + let mut ignored_nodes = IgnoredNodes::default(); + + let mut parent_hash = pov_parent_hash; + let mut parent_header = pov_parent_header.clone(); + + for _ in 0..blocks_per_core { + let expected_block_end = Instant::now() + block_time.unwrap_or_default(); + + let (parachain_inherent_data, other_inherent_data) = match collator + .create_inherent_data( + relay_parent_hash, + &validation_data, + parent_hash, + slot_claim.timestamp(), + ) + .await + { + Err(err) => { + tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to create inherent data."); + return NextBlockProductionStep::NextSlot + }, + Ok(x) => x, + }; + + let Ok(Some(res)) = collator + .build_block_and_import( + &parent_header, + slot_claim, + Some(vec![CumulusDigestItem::CoreInfo(CoreInfo { + selector: core_selector, + claim_queue_offset, + number_of_cores: number_of_cores.into(), + }) + .to_digest_item()]), + (parachain_inherent_data, other_inherent_data), + authoring_duration, + allowed_pov_size, + ) + .await + else { + tracing::error!(target: crate::LOG_TARGET, "Unable to build block at slot."); + return NextBlockProductionStep::NextSlot; + }; + + parent_hash = res.block.header().hash(); + parent_header = res.block.header().clone(); + + // Announce the newly built block to our peers. + collator.collator_service().announce_block(parent_hash, None); + + ignored_nodes.extend(IgnoredNodes::from_storage_proof::>(&res.proof)); + ignored_nodes.extend(IgnoredNodes::from_memory_db(res.backend_transaction)); + + blocks.push(res.block); + proofs.push(res.proof); + + if let Some(sleep) = expected_block_end.checked_duration_since(Instant::now()) { + tokio::time::sleep(sleep).await; } } + + let proof = StorageProof::merge(proofs); + + if let Err(err) = collator_sender.unbounded_send(CollatorMessage { + relay_parent: relay_parent_hash, + parent_header: pov_parent_header.clone(), + blocks, + proof, + validation_code_hash, + core_index, + max_pov_size: validation_data.max_pov_size, + }) { + tracing::error!(target: crate::LOG_TARGET, ?err, "Unable to send block to collation task."); + NextBlockProductionStep::Stop + } else if max_blocks_per_relay_slot > 1 && core_selector.0 as u16 + 1 < number_of_cores { + NextBlockProductionStep::NextCore { last_block_header: parent_header } + } else { + NextBlockProductionStep::NextSlot + } } /// Translate the slot of the relay parent to the slot of the parachain. diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index 1bb749eb8bfb2..5829a46fc6dd4 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -171,7 +171,7 @@ pub fn run + ParachainBlockImportMarker + Send + Sync + 'static, Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + Clone + 'static, - CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, + CHP: consensus_common::ValidationCodeHashProvider + Send + Sync + 'static, P: Pair + 'static, P::Public: AppPublic + Member + Codec, P::Signature: TryFrom> + Member + Codec, diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 561fc60bf232b..c18ffe643683f 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -1457,10 +1457,10 @@ impl Pallet { fn send_ump_signal() { use cumulus_primitives_core::relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}; - UpwardMessages::::mutate(|up| { - if let Some(core_info) = - CumulusDigestItem::find_core_info(&frame_system::Pallet::::digest()) - { + if let Some(core_info) = + CumulusDigestItem::find_core_info(&frame_system::Pallet::::digest()) + { + UpwardMessages::::mutate(|up| { up.push(UMP_SEPARATOR); // Send the core selector signal. @@ -1468,8 +1468,8 @@ impl Pallet { UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset) .encode(), ); - } - }); + }); + } } /// Open HRMP channel for using it in benchmarks or tests. diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 2de239b196762..2cdab7176f14a 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -366,6 +366,7 @@ where upward_messages .try_push(UMP_SEPARATOR) .expect("UMPSignals does not fit in UMPMessages"); + upward_messages .try_extend(upward_message_signals.into_iter()) .expect("UMPSignals does not fit in UMPMessages"); diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs index 2dde70b7b3315..76e571aff2d03 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs @@ -274,7 +274,7 @@ where ) where CIDP: CreateInherentDataProviders + 'static, CIDP::InherentDataProviders: Send, - CHP: cumulus_client_consensus_common::ValidationCodeHashProvider + Send + 'static, + CHP: cumulus_client_consensus_common::ValidationCodeHashProvider + Send + Sync + 'static, Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + Clone + 'static, Spawner: SpawnNamed, diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 6e425603f2aac..74d80c4bed9ae 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -4,7 +4,7 @@ use anyhow::anyhow; use codec::{Compact, Decode, Encode}; use cumulus_primitives_core::{relay_chain, rpsr_digest::RPSR_CONSENSUS_ID, CumulusDigestItem}; -use futures::{stream::StreamExt, TryStreamExt}; +use futures::{pin_mut, select, stream::StreamExt, TryStreamExt}; use polkadot_primitives::{vstaging::CandidateReceiptV2, Id as ParaId}; use sp_runtime::traits::Zero; use std::{ @@ -303,13 +303,37 @@ pub async fn assert_para_blocks_throughput( // Wait for the first session, block production on the parachain will start after that. wait_for_first_session_change(&mut relay_client.blocks().subscribe_best().await?).await?; - let finalized_stream = para_client.blocks().subscribe_finalized().await?; + let finalized_stream = para_client.blocks().subscribe_finalized().await?.fuse(); + let finalized_relay_blocks = relay_client.blocks().subscribe_finalized().await?.fuse(); + let start_relay_block = relay_client.blocks().at_latest().await?.number(); - let finalized_blocks = finalized_stream - .try_filter(|b| futures::future::ready(!b.number().is_zero())) - .take(stop_after) - .try_collect::>() - .await?; + let mut finalized_blocks = Vec::new(); + + pin_mut!(finalized_stream); + pin_mut!(finalized_relay_blocks); + + loop { + select! { + finalized = finalized_stream.select_next_some() => { + let finalized = finalized?; + if !finalized.number().is_zero() { + finalized_blocks.push(finalized); + + if finalized_blocks.len() >= stop_after { + break + } + } + }, + finalized = finalized_relay_blocks.select_next_some() => { + // `start_relay_block` maybe not being finalized at the beginning, but we just + // need some good estimation to ensure the tests ends at some point if there is some issue. + if finalized?.number().saturating_sub(start_relay_block) >= expected_relay_blocks.end { + panic!("Already processed more relay chain blocks than allowed in the range.") + } + }, + complete => { panic!("Both streams should not finish"); } + } + } let first_relay_header = relay_parent_for(&finalized_blocks[0], relay_rpc_client).await?; let last_relay_header = diff --git a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs index ad2827e0d0bc7..e3e338866738d 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs @@ -74,8 +74,8 @@ async fn pov_bundling() -> Result<(), anyhow::Error> { .await?; log::info!("2 more cores assigned to each parachain"); - assert_para_throughput(&relay_client, 15, [(ParaId::from(PARA_ID), 39..46)]).await?; - assert_finality_lag(¶_client, 20).await?; + assert_para_blocks_throughput(¶_client, 72, &relay_rpc_client, &relay_client, 6..9).await?; + assert_finality_lag(¶_client, 36).await?; let assign_cores_call = create_assign_core_call(&[(4, PARA_ID), (5, PARA_ID), (6, PARA_ID)]); // Assign two extra cores to each parachain. @@ -87,8 +87,8 @@ async fn pov_bundling() -> Result<(), anyhow::Error> { .await?; log::info!("3 more cores assigned to each parachain"); - assert_para_throughput(&relay_client, 10, [(ParaId::from(PARA_ID), 52..61)]).await?; - assert_finality_lag(¶_client, 30).await?; + assert_para_blocks_throughput(¶_client, 72, &relay_rpc_client, &relay_client, 6..9).await?; + assert_finality_lag(¶_client, 36).await?; log::info!("Test finished successfully"); Ok(()) } From c1ef7d0d13af3c8da414e4dd013e12c627894b6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 4 Jul 2025 00:45:28 +0200 Subject: [PATCH 061/312] Some stuff --- cumulus/pallets/parachain-system/src/lib.rs | 6 +++- cumulus/test/runtime/src/lib.rs | 2 +- .../zombienet-sdk-helpers/src/lib.rs | 31 ++++++++++++++-- .../tests/elastic_scaling/pov_bundling.rs | 35 ++++++++++++++++--- 4 files changed, 66 insertions(+), 8 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index c18ffe643683f..e1ab811c6b25a 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -1329,7 +1329,11 @@ impl Pallet { // // If this fails, the parachain needs to wait for ancestors to be included before // a new block is allowed. - assert!(new_len < capacity.get(), "no space left for the block in the unincluded segment"); + assert!( + new_len < capacity.get(), + "No space left for the block in the unincluded segment: {new_len} < {}", + capacity.get() + ); weight_used } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 4a1925c6a6479..489973ba0d9bb 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -347,7 +347,7 @@ type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< Runtime, RELAY_CHAIN_SLOT_DURATION_MILLIS, 24, - 32, + 36, >; impl cumulus_pallet_parachain_system::Config for Runtime { type WeightInfo = (); diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 74d80c4bed9ae..be123255d806e 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -289,16 +289,33 @@ async fn relay_parent_for( } } +/// Count the number of `CandidateBacked` events for the given `para_id`. +async fn count_candidate_backed_events( + para_id: ParaId, + block: &Block>, +) -> Result { + let events = block.events().await?; + + find_event_and_decode_fields::>( + &events, + "ParaInclusion", + "CandidateBacked", + ) + .map(|events| events.iter().filter(|e| e.descriptor.para_id() == para_id).count()) +} + /// Assert that `stop_after` parachain blocks are included via `expected_relay_blocks`. /// /// It waits for `stop_after` parachain blocks to be finalized. Then it ensures that these parachain /// blocks are included on the relay chain using the given number of `expected_relay_blocks`. pub async fn assert_para_blocks_throughput( + para_id: ParaId, para_client: &OnlineClient, stop_after: usize, relay_rpc_client: &LegacyRpcMethods, relay_client: &OnlineClient, expected_relay_blocks: Range, + expected_candidates_per_relay_block: Range, ) -> Result<(), anyhow::Error> { // Wait for the first session, block production on the parachain will start after that. wait_for_first_session_change(&mut relay_client.blocks().subscribe_best().await?).await?; @@ -325,10 +342,13 @@ pub async fn assert_para_blocks_throughput( } }, finalized = finalized_relay_blocks.select_next_some() => { + let num_relay_chain_blocks = finalized?.number().saturating_sub(start_relay_block); + // `start_relay_block` maybe not being finalized at the beginning, but we just // need some good estimation to ensure the tests ends at some point if there is some issue. - if finalized?.number().saturating_sub(start_relay_block) >= expected_relay_blocks.end { - panic!("Already processed more relay chain blocks than allowed in the range.") + if num_relay_chain_blocks >= expected_relay_blocks.end { + panic!("Already processed more relay chain blocks ({num_relay_chain_blocks}) \ + than allowed in the range ({expected_relay_blocks:?}).") } }, complete => { panic!("Both streams should not finish"); } @@ -356,6 +376,13 @@ pub async fn assert_para_blocks_throughput( relay_blocks_without_session_change += 1; } + let candidate_count = count_candidate_backed_events(para_id, &block).await?; + + assert!( + expected_candidates_per_relay_block.contains(&candidate_count), + "Expected `CandidateBacked` count of {expected_candidates_per_relay_block:?} but only got {candidate_count}", + ); + current_relay_header = relay_rpc_client .chain_get_header(Some(current_relay_header.parent_hash)) .await? diff --git a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs index e3e338866738d..c5671dfb8e66c 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs @@ -59,7 +59,16 @@ async fn pov_bundling() -> Result<(), anyhow::Error> { LegacyRpcMethods::new(RpcClient::from_url(relay_node.ws_uri()).await.unwrap()); let alice = dev::alice(); - assert_para_blocks_throughput(¶_client, 72, &relay_rpc_client, &relay_client, 6..9).await?; + assert_para_blocks_throughput( + PARA_ID.into(), + ¶_client, + 72, + &relay_rpc_client, + &relay_client, + 6..9, + 1..2, + ) + .await?; // 3 relay chain blocks assert_finality_lag(¶_client, 36).await?; @@ -74,7 +83,16 @@ async fn pov_bundling() -> Result<(), anyhow::Error> { .await?; log::info!("2 more cores assigned to each parachain"); - assert_para_blocks_throughput(¶_client, 72, &relay_rpc_client, &relay_client, 6..9).await?; + assert_para_blocks_throughput( + PARA_ID.into(), + ¶_client, + 72, + &relay_rpc_client, + &relay_client, + 6..9, + 2..4, + ) + .await?; assert_finality_lag(¶_client, 36).await?; let assign_cores_call = create_assign_core_call(&[(4, PARA_ID), (5, PARA_ID), (6, PARA_ID)]); @@ -87,7 +105,16 @@ async fn pov_bundling() -> Result<(), anyhow::Error> { .await?; log::info!("3 more cores assigned to each parachain"); - assert_para_blocks_throughput(¶_client, 72, &relay_rpc_client, &relay_client, 6..9).await?; + assert_para_blocks_throughput( + PARA_ID.into(), + ¶_client, + 72, + &relay_rpc_client, + &relay_client, + 6..9, + 5..7, + ) + .await?; assert_finality_lag(¶_client, 36).await?; log::info!("Test finished successfully"); Ok(()) @@ -129,7 +156,7 @@ async fn build_network_config() -> Result { .with_default_args(vec![ ("--authoring").into(), ("slot-based").into(), - ("-laura=trace").into(), + ("-lparachain=debug,aura=trace").into(), ]) .with_collator(|n| n.with_name("collator-0")) .with_collator(|n| n.with_name("collator-1")) From 031a9224d95273eb19a6b9cbe567b657819ed755 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 5 Jul 2025 01:49:18 +0200 Subject: [PATCH 062/312] Try to make it more robust --- cumulus/primitives/core/src/lib.rs | 1 + .../zombienet-sdk-helpers/src/lib.rs | 186 +++++++++++------- 2 files changed, 120 insertions(+), 67 deletions(-) diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 15e234264f70f..0000b2727911e 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -226,6 +226,7 @@ pub struct CoreInfo { } /// Identifier for a relay chain block used by [`CumulusDigestItem`]. +#[derive(Clone, Debug, PartialEq)] pub enum RelayBlockIdentifier { /// The block is identified using its block hash. ByHash(relay_chain::Hash), diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index be123255d806e..de1c0dc055bf9 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -3,9 +3,13 @@ use anyhow::anyhow; use codec::{Compact, Decode, Encode}; -use cumulus_primitives_core::{relay_chain, rpsr_digest::RPSR_CONSENSUS_ID, CumulusDigestItem}; +use cumulus_primitives_core::{ + relay_chain, rpsr_digest::RPSR_CONSENSUS_ID, CoreInfo, CumulusDigestItem, RelayBlockIdentifier, +}; use futures::{pin_mut, select, stream::StreamExt, TryStreamExt}; -use polkadot_primitives::{vstaging::CandidateReceiptV2, Id as ParaId}; +use polkadot_primitives::{ + vstaging::CandidateReceiptV2, BlakeTwo256, HashT, HeadData, Id as ParaId, +}; use sp_runtime::traits::Zero; use std::{ cmp::max, @@ -59,8 +63,7 @@ fn find_event_and_decode_fields( for event in events.iter() { let event = event?; if event.pallet_name() == pallet && event.variant_name() == variant { - let field_bytes = event.field_bytes().to_vec(); - result.push(T::decode(&mut &field_bytes[..])?); + result.push(T::decode(&mut &event.field_bytes()[..])?); } } Ok(result) @@ -255,53 +258,43 @@ pub async fn assert_para_throughput( Ok(()) } -/// Returns the header of the relay parent used by the given parachain `block`. -async fn relay_parent_for( +/// Returns [`CoreInfo`] for the given parachain block. +fn find_core_info( block: &Block>, - relay_rpc_client: &LegacyRpcMethods, -) -> Result<::Header, anyhow::Error> { +) -> Result { let substrate_digest = sp_runtime::generic::Digest::decode(&mut &block.header().digest.encode()[..]) .expect("`subxt::Digest` and `substrate::Digest` should encode and decode; qed"); - match CumulusDigestItem::find_relay_block_identifier(&substrate_digest).unwrap() { - cumulus_primitives_core::RelayBlockIdentifier::ByHash(hash) => relay_rpc_client - .chain_get_header(Some(hash)) - .await? - .ok_or_else(|| anyhow!("Could not fetch relay chain header: {hash:?}")), - cumulus_primitives_core::RelayBlockIdentifier::ByStorageRoot { - storage_root, - block_number, - } => { - let block_hash = relay_rpc_client - .chain_get_block_hash(Some(block_number.into())) - .await? - .ok_or_else(|| anyhow!("Could not fetch block hash for block: {}", block_number))?; - - let header = relay_rpc_client - .chain_get_header(Some(block_hash)) - .await? - .ok_or_else(|| anyhow!("Could not fetch real chain header: {block_hash:?}"))?; - - assert_eq!(storage_root, header.state_root, "Storage roots should match"); - Ok(header) - }, - } + CumulusDigestItem::find_core_info(&substrate_digest) + .ok_or_else(|| anyhow!("Failed to find `CoreInfo` digest")) +} + +/// Returns [`RelayBlockIdentifier`] for the given parachain block. +fn find_relay_block_identifier( + block: &Block>, +) -> Result { + let substrate_digest = + sp_runtime::generic::Digest::decode(&mut &block.header().digest.encode()[..]) + .expect("`subxt::Digest` and `substrate::Digest` should encode and decode; qed"); + + CumulusDigestItem::find_relay_block_identifier(&substrate_digest) + .ok_or_else(|| anyhow!("Failed to find `RelayBlockIdentifier` digest")) } -/// Count the number of `CandidateBacked` events for the given `para_id`. -async fn count_candidate_backed_events( +/// Find the `CandidateIncluded` events for the given `para_id`. +async fn find_candidate_included_events( para_id: ParaId, block: &Block>, -) -> Result { +) -> Result>, anyhow::Error> { let events = block.events().await?; find_event_and_decode_fields::>( &events, "ParaInclusion", - "CandidateBacked", + "CandidateIncluded", ) - .map(|events| events.iter().filter(|e| e.descriptor.para_id() == para_id).count()) + .map(|events| events.into_iter().filter(|e| e.descriptor.para_id() == para_id).collect()) } /// Assert that `stop_after` parachain blocks are included via `expected_relay_blocks`. @@ -320,48 +313,74 @@ pub async fn assert_para_blocks_throughput( // Wait for the first session, block production on the parachain will start after that. wait_for_first_session_change(&mut relay_client.blocks().subscribe_best().await?).await?; + para_client + .blocks() + .subscribe_finalized() + .await? + .try_filter(|b| { + futures::future::ready(find_core_info(b).map_or(false, |info| { + expected_candidates_per_relay_block.contains(&(info.number_of_cores.0 as usize)) + })) + }) + .next() + .await + .transpose()?; + let finalized_stream = para_client.blocks().subscribe_finalized().await?.fuse(); let finalized_relay_blocks = relay_client.blocks().subscribe_finalized().await?.fuse(); - let start_relay_block = relay_client.blocks().at_latest().await?.number(); + let start_relay_block = relay_client + .blocks() + .subscribe_best() + .await? + .next() + .await + .ok_or_else(|| anyhow!("Could not get a best block from the relay chain"))??; - let mut finalized_blocks = Vec::new(); + let mut finalized_parachain_blocks = Vec::new(); pin_mut!(finalized_stream); pin_mut!(finalized_relay_blocks); - loop { + let last_finalized_relay_block = loop { select! { finalized = finalized_stream.select_next_some() => { let finalized = finalized?; - if !finalized.number().is_zero() { - finalized_blocks.push(finalized); - - if finalized_blocks.len() >= stop_after { - break - } + if !finalized.number().is_zero() && finalized_parachain_blocks.len() < stop_after { + finalized_parachain_blocks.push(finalized); } }, finalized = finalized_relay_blocks.select_next_some() => { - let num_relay_chain_blocks = finalized?.number().saturating_sub(start_relay_block); + let finalized = finalized?; + let num_relay_chain_blocks = finalized.number().saturating_sub(start_relay_block.number()); + + // If we have recorded enough parachain blocks + if finalized_parachain_blocks.len() >= stop_after { + break finalized + } // `start_relay_block` maybe not being finalized at the beginning, but we just // need some good estimation to ensure the tests ends at some point if there is some issue. if num_relay_chain_blocks >= expected_relay_blocks.end { - panic!("Already processed more relay chain blocks ({num_relay_chain_blocks}) \ - than allowed in the range ({expected_relay_blocks:?}).") + return Err(anyhow!("Already processed more relay chain blocks ({num_relay_chain_blocks}) \ + than allowed in the range ({expected_relay_blocks:?}).")) } }, complete => { panic!("Both streams should not finish"); } } - } + }; - let first_relay_header = relay_parent_for(&finalized_blocks[0], relay_rpc_client).await?; - let last_relay_header = - relay_parent_for(finalized_blocks.last().unwrap(), relay_rpc_client).await?; + // The number of cores occupied by the parachain candidates, ignoring session changes. + let mut occupied_relay_chain_blocks = 0; + // Did we found the first candidate matching one of our expected parachain blocks? + let mut found_first_candidate = false; + let mut current_relay_header = last_finalized_relay_block.header().clone(); + loop { + if current_relay_header.number().is_zero() { + return Err(anyhow!( + "Reached relay genesis block without finding all parachain blocks?" + )); + } - let mut relay_blocks_without_session_change = 0; - let mut current_relay_header = last_relay_header.clone(); - while current_relay_header.number() >= first_relay_header.number() { let block = relay_rpc_client .chain_get_block(Some(current_relay_header.hash())) .await? @@ -372,16 +391,51 @@ pub async fn assert_para_blocks_throughput( let block = relay_client.blocks().at(block.header.hash()).await?; + let included_events = find_candidate_included_events(para_id, &block).await?; + + let included_parachain_block_identifiers = included_events + .iter() + .filter_map(|i| { + finalized_parachain_blocks.iter().rev().find_map(|p| { + (BlakeTwo256::hash_of(p.header()) == i.descriptor.para_head()).then(|| { + find_core_info(&p) + .and_then(|c| find_relay_block_identifier(&p).map(|rbi| (c, rbi))) + }) + }) + }) + .collect::, _>>()?; + + finalized_parachain_blocks.retain(|b| { + let core_info = find_core_info(b).unwrap(); + let rbi = find_relay_block_identifier(b).unwrap(); + + !included_parachain_block_identifiers.contains(&(core_info, rbi)) + }); + + dbg!(block.number()); + if !is_session_change(&block).await? { - relay_blocks_without_session_change += 1; - } + found_first_candidate |= !included_parachain_block_identifiers.is_empty(); - let candidate_count = count_candidate_backed_events(para_id, &block).await?; + if found_first_candidate { + occupied_relay_chain_blocks += 1; + } - assert!( - expected_candidates_per_relay_block.contains(&candidate_count), - "Expected `CandidateBacked` count of {expected_candidates_per_relay_block:?} but only got {candidate_count}", - ); + if !included_parachain_block_identifiers.is_empty() && + !expected_candidates_per_relay_block + .contains(&included_parachain_block_identifiers.len()) + { + return Err(anyhow!( + "{} candidates did not match the expected {expected_candidates_per_relay_block:?} \ + candidates per relay chain block", included_parachain_block_identifiers.len() + )) + } + } + + dbg!(finalized_parachain_blocks.len()); + if finalized_parachain_blocks.is_empty() { + break + } current_relay_header = relay_rpc_client .chain_get_header(Some(current_relay_header.parent_hash)) @@ -394,11 +448,9 @@ pub async fn assert_para_blocks_throughput( })?; } - assert!( - expected_relay_blocks.contains(&relay_blocks_without_session_change), - "{relay_blocks_without_session_change} relay chain blocks is not in the \ - expected range of {relay_blocks_without_session_change} relay chain blocks.", - ); + if !expected_relay_blocks.contains(&occupied_relay_chain_blocks) { + return Err(anyhow!("{occupied_relay_chain_blocks} did not match the expected {expected_candidates_per_relay_block:?} relay chain blocks")) + } Ok(()) } From 8a3d99634955d9603c23bae44943cbc34153c2d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sun, 6 Jul 2025 00:21:05 +0200 Subject: [PATCH 063/312] Fix test --- .../zombienet-sdk-helpers/src/lib.rs | 105 ++++++++++++------ .../tests/elastic_scaling/pov_bundling.rs | 48 ++++---- .../tests/disabling/slashing.rs | 7 +- .../tests/elastic_scaling/basic_3cores.rs | 4 +- .../doesnt_break_parachains.rs | 3 +- .../approved_peer_mixed_validators.rs | 4 +- .../async_backing_6_seconds_rate.rs | 4 +- .../tests/functional/duplicate_collations.rs | 7 +- .../functional/shared_core_idle_parachain.rs | 7 +- .../tests/functional/sync_backing.rs | 2 +- 10 files changed, 101 insertions(+), 90 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index de1c0dc055bf9..cc6a28cf5008a 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -69,18 +69,21 @@ fn find_event_and_decode_fields( Ok(result) } -// Helper function for asserting the throughput of parachains (total number of backed candidates in -// a window of relay chain blocks), after the first session change. -// Blocks with session changes are generally ignores. +// Helper function for asserting the throughput of parachains, after the first session change. +// +// The throughput is measured as total number of backed candidates in a window of relay chain +// blocks. Relay chain blocks with session changes are generally ignores. pub async fn assert_finalized_para_throughput( relay_client: &OnlineClient, stop_after: u32, - expected_candidate_ranges: HashMap>, + expected_candidate_ranges: impl Into>>, + expected_number_of_blocks: impl Into, Range)>>, ) -> Result<(), anyhow::Error> { let mut blocks_sub = relay_client.blocks().subscribe_finalized().await?; - let mut candidate_count: HashMap = HashMap::new(); + let mut candidate_count: HashMap>> = HashMap::new(); let mut current_block_count = 0; + let expected_candidate_ranges = expected_candidate_ranges.into(); let valid_para_ids: Vec = expected_candidate_ranges.keys().cloned().collect(); // Wait for the first session, block production on the parachain will start after that. @@ -107,10 +110,12 @@ pub async fn assert_finalized_para_throughput( for receipt in receipts { let para_id = receipt.descriptor.para_id(); log::debug!("Block backed for para_id {para_id}"); + if !valid_para_ids.contains(¶_id) { return Err(anyhow!("Invalid ParaId detected: {}", para_id)); }; - *(candidate_count.entry(para_id).or_default()) += 1; + + candidate_count.entry(para_id).or_default().push(receipt); } if current_block_count == stop_after { @@ -120,17 +125,66 @@ pub async fn assert_finalized_para_throughput( log::info!( "Reached {stop_after} finalized relay chain blocks that contain backed candidates. The per-parachain distribution is: {:#?}", - candidate_count.iter().map(|(para_id, count)| format!("{para_id} has {count} backed candidates")).collect::>() + candidate_count.iter().map(|(para_id, receipts)| format!("{para_id} has {} backed candidates", receipts.len())).collect::>() ); for (para_id, expected_candidate_range) in expected_candidate_ranges { - let actual = candidate_count + let receipts = candidate_count .get(¶_id) - .expect("ParaId did not have any backed candidates"); - assert!( - expected_candidate_range.contains(actual), - "Candidate count {actual} not within range {expected_candidate_range:?}" - ); + .ok_or_else(|| anyhow!("ParaId did not have any backed candidates"))?; + + if !expected_candidate_range.contains(&(receipts.len() as u32)) { + return Err(anyhow!( + "Candidate count {} not within range {expected_candidate_range:?}", + receipts.len() + )) + } + } + + for (para_id, (para_client, expected_number_of_blocks)) in expected_number_of_blocks.into() { + let receipts = candidate_count + .get(¶_id) + .ok_or_else(|| anyhow!("ParaId did not have any backed candidates"))?; + + let mut num_blocks = 0; + + for receipt in receipts { + // We "abuse" the fact that the parachain is using `BlakeTwo256` as hash and thus, the + // `para_head` hash and the hash of the `header` should be equal. + let mut next_para_block_hash = receipt.descriptor().para_head(); + + let mut relay_identifier = None; + let mut core_info = None; + + loop { + let block = para_client.blocks().at(next_para_block_hash).await?; + + // Genesis block is not part of a candidate :) + if block.number() == 0 { + break + } + + let ri = find_relay_block_identifier(&block)?; + let ci = find_core_info(&block)?; + + // If the core changes or the relay identifier, we found all blocks for the + // candidate. + if *relay_identifier.get_or_insert(ri.clone()) != ri || + *core_info.get_or_insert(ci.clone()) != ci + { + break + } + + num_blocks += 1; + next_para_block_hash = block.header().parent_hash; + } + } + + if !expected_number_of_blocks.contains(&num_blocks) { + return Err(anyhow!( + "Block number count {num_blocks} not within range {expected_number_of_blocks:?}", + )) + } } Ok(()) @@ -574,12 +628,15 @@ pub async fn assert_relay_parent_offset( } }, Some(Ok(para_block)) = para_block_stream.next() => { - let logs = ¶_block.header().digest.logs; + let relay_block_identifier = find_relay_block_identifier(¶_block)?; - let Some((_, relay_parent_number)): Option<(H256, u32)> = logs.iter().find_map(extract_relay_parent_storage_root) else { - return Err(anyhow!("No RPSR digest found in header #{}", para_block.number())); + let relay_parent_number = match relay_block_identifier { + RelayBlockIdentifier::ByHash(block_hash) => relay_client.blocks().at(block_hash).await?.number(), + RelayBlockIdentifier::ByStorageRoot { block_number, .. } => block_number, }; + log::debug!("Parachain block #{} was built on relay parent #{relay_parent_number}, highest seen was {highest_relay_block_seen}", para_block.number()); + assert!( highest_relay_block_seen < offset || relay_parent_number <= highest_relay_block_seen.saturating_sub(offset), @@ -593,20 +650,6 @@ pub async fn assert_relay_parent_offset( } } } - Ok(()) -} -/// Extract relay parent information from the digest logs. -fn extract_relay_parent_storage_root( - digest: &DigestItem, -) -> Option<(relay_chain::Hash, relay_chain::BlockNumber)> { - match digest { - DigestItem::Consensus(id, val) if id == &RPSR_CONSENSUS_ID => { - let (h, n): (relay_chain::Hash, Compact) = - Decode::decode(&mut &val[..]).ok()?; - - Some((h, n.0)) - }, - _ => None, - } + Ok(()) } diff --git a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs index c5671dfb8e66c..8355c596a3a5c 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs @@ -18,7 +18,7 @@ use anyhow::anyhow; use cumulus_zombienet_sdk_helpers::{ - assert_finality_lag, assert_para_blocks_throughput, assert_para_throughput, + assert_finality_lag, assert_finalized_para_throughput, assert_para_throughput, create_assign_core_call, }; use polkadot_primitives::Id as ParaId; @@ -55,22 +55,17 @@ async fn pov_bundling() -> Result<(), anyhow::Error> { let para_client = para_node.wait_client().await?; let relay_client: OnlineClient = relay_node.wait_client().await?; - let relay_rpc_client = - LegacyRpcMethods::new(RpcClient::from_url(relay_node.ws_uri()).await.unwrap()); let alice = dev::alice(); - assert_para_blocks_throughput( - PARA_ID.into(), - ¶_client, - 72, - &relay_rpc_client, + assert_finalized_para_throughput( &relay_client, - 6..9, - 1..2, + 6, + [(ParaId::from(PARA_ID), 4..6)], + [(ParaId::from(PARA_ID), (para_client.clone(), 48..72))], ) .await?; // 3 relay chain blocks - assert_finality_lag(¶_client, 36).await?; + assert_finality_lag(¶_client, 72).await?; let assign_cores_call = create_assign_core_call(&[(2, PARA_ID), (3, PARA_ID)]); @@ -81,19 +76,16 @@ async fn pov_bundling() -> Result<(), anyhow::Error> { .inspect(|_| log::info!("Tx send, waiting for finalization"))? .wait_for_finalized_success() .await?; - log::info!("2 more cores assigned to each parachain"); + log::info!("2 more cores assigned to the parachain"); - assert_para_blocks_throughput( - PARA_ID.into(), - ¶_client, - 72, - &relay_rpc_client, + assert_finalized_para_throughput( &relay_client, - 6..9, - 2..4, + 6, + [(ParaId::from(PARA_ID), 12..18)], + [(ParaId::from(PARA_ID), (para_client.clone(), 48..72))], ) .await?; - assert_finality_lag(¶_client, 36).await?; + assert_finality_lag(¶_client, 72).await?; let assign_cores_call = create_assign_core_call(&[(4, PARA_ID), (5, PARA_ID), (6, PARA_ID)]); // Assign two extra cores to each parachain. @@ -103,19 +95,17 @@ async fn pov_bundling() -> Result<(), anyhow::Error> { .await? .wait_for_finalized_success() .await?; - log::info!("3 more cores assigned to each parachain"); + log::info!("3 more cores assigned to the parachain"); - assert_para_blocks_throughput( - PARA_ID.into(), - ¶_client, - 72, - &relay_rpc_client, + assert_finalized_para_throughput( &relay_client, - 6..9, - 5..7, + 6, + [(ParaId::from(PARA_ID), 24..36)], + [(ParaId::from(PARA_ID), (para_client.clone(), 48..72))], ) .await?; - assert_finality_lag(¶_client, 36).await?; + + assert_finality_lag(¶_client, 72).await?; log::info!("Test finished successfully"); Ok(()) } diff --git a/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs b/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs index 372b42b35c4d9..131fabcf3d00d 100644 --- a/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs +++ b/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs @@ -92,12 +92,7 @@ async fn dispute_past_session_slashing() -> Result<(), anyhow::Error> { let relay_client: OnlineClient = honest.wait_client().await?; // Wait for some para blocks being produced - assert_finalized_para_throughput( - &relay_client, - 20, - [(ParaId::from(1337), 10..20)].into_iter().collect(), - ) - .await?; + assert_finalized_para_throughput(&relay_client, 20, [(ParaId::from(1337), 10..20)]).await?; // Let's initiate a dispute malus.resume().await?; diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs index 0a530eb027ea1..75afd5bf990bc 100644 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs @@ -91,9 +91,7 @@ async fn basic_3cores_test() -> Result<(), anyhow::Error> { assert_finalized_para_throughput( &relay_client, 15, - [(ParaId::from(2000), 38..46), (ParaId::from(2001), 12..16)] - .into_iter() - .collect(), + [(ParaId::from(2000), 38..46), (ParaId::from(2001), 12..16)], ) .await?; diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs index 8da48526ef44b..87bb98ba4eca4 100644 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs @@ -84,8 +84,7 @@ async fn doesnt_break_parachains_test() -> Result<(), anyhow::Error> { let para_id = ParaId::from(2000); // Expect the parachain to be making normal progress, 1 candidate backed per relay chain block. // Lowering to 12 to make sure CI passes. - assert_finalized_para_throughput(&relay_client, 15, [(para_id, 12..16)].into_iter().collect()) - .await?; + assert_finalized_para_throughput(&relay_client, 15, [(para_id, 12..16)]).await?; let para_client = para_node.wait_client().await?; // Assert the parachain finalized block height is also on par with the number of backed diff --git a/polkadot/zombienet-sdk-tests/tests/functional/approved_peer_mixed_validators.rs b/polkadot/zombienet-sdk-tests/tests/functional/approved_peer_mixed_validators.rs index 67b411f4a819b..eeed6ad264432 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/approved_peer_mixed_validators.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/approved_peer_mixed_validators.rs @@ -112,9 +112,7 @@ async fn approved_peer_mixed_validators_test() -> Result<(), anyhow::Error> { assert_finalized_para_throughput( &relay_client, 15, - [(ParaId::from(2000), 6..15), (ParaId::from(2001), 11..16)] - .into_iter() - .collect(), + [(ParaId::from(2000), 6..15), (ParaId::from(2001), 11..16)], ) .await?; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs b/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs index f03705617f89d..44ca4c9f9b8fa 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs @@ -78,9 +78,7 @@ async fn async_backing_6_seconds_rate_test() -> Result<(), anyhow::Error> { assert_finalized_para_throughput( &relay_client, 15, - [(ParaId::from(2000), 11..16), (ParaId::from(2001), 11..16)] - .into_iter() - .collect(), + [(ParaId::from(2000), 11..16), (ParaId::from(2001), 11..16)], ) .await?; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs b/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs index 39cd1383fb54c..0fcd0fb768879 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs @@ -99,12 +99,7 @@ async fn duplicate_collations_test() -> Result<(), anyhow::Error> { log::info!("2 more cores assigned to parachain-2000"); - assert_finalized_para_throughput( - &relay_client, - 15, - [(ParaId::from(2000), 40..46)].into_iter().collect(), - ) - .await?; + assert_finalized_para_throughput(&relay_client, 15, [(ParaId::from(2000), 40..46)]).await?; let log_line_options = LogLineCountOptions::new( |n| n == 1, diff --git a/polkadot/zombienet-sdk-tests/tests/functional/shared_core_idle_parachain.rs b/polkadot/zombienet-sdk-tests/tests/functional/shared_core_idle_parachain.rs index a5b3da274f65c..90bf26e51fb8f 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/shared_core_idle_parachain.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/shared_core_idle_parachain.rs @@ -91,12 +91,7 @@ async fn shared_core_idle_parachain_test() -> Result<(), anyhow::Error> { // Check that para 2000 is essentially getting 12-second block time, while para 2001 does not // produce anything. - assert_finalized_para_throughput( - &relay_client, - 15, - [(ParaId::from(2000), 5..9)].into_iter().collect(), - ) - .await?; + assert_finalized_para_throughput(&relay_client, 15, [(ParaId::from(2000), 5..9)]).await?; assert_finality_lag(¶_node_2000.wait_client().await?, 5).await?; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs b/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs index 717b8fbf87e8e..f3eb0ef4eac1d 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs @@ -66,7 +66,7 @@ async fn sync_backing_test() -> Result<(), anyhow::Error> { assert_finalized_para_throughput( &relay_client, 15, - [(ParaId::from(2500), 5..9)].into_iter().collect(), + [(ParaId::from(2500), 5..9)], ) .await?; From ed945abfecd2435bddaaa341be9114aa4df30118 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 7 Jul 2025 15:13:41 +0200 Subject: [PATCH 064/312] Redo master changes --- .../zombienet-sdk-helpers/src/lib.rs | 136 ++---------------- .../tests/disabling/slashing.rs | 4 +- .../tests/elastic_scaling/basic_3cores.rs | 5 +- .../doesnt_break_parachains.rs | 4 +- .../approved_peer_mixed_validators.rs | 5 +- .../async_backing_6_seconds_rate.rs | 5 +- .../tests/functional/duplicate_collations.rs | 4 +- .../functional/shared_core_idle_parachain.rs | 4 +- .../tests/functional/sync_backing.rs | 5 +- 9 files changed, 30 insertions(+), 142 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index cc6a28cf5008a..9399edd7e2348 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -2,33 +2,25 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::anyhow; -use codec::{Compact, Decode, Encode}; -use cumulus_primitives_core::{ - relay_chain, rpsr_digest::RPSR_CONSENSUS_ID, CoreInfo, CumulusDigestItem, RelayBlockIdentifier, -}; +use codec::{Decode, Encode}; +use cumulus_primitives_core::{CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; use futures::{pin_mut, select, stream::StreamExt, TryStreamExt}; -use polkadot_primitives::{ - vstaging::CandidateReceiptV2, BlakeTwo256, HashT, HeadData, Id as ParaId, -}; +use polkadot_primitives::{vstaging::CandidateReceiptV2, BlakeTwo256, HashT, Id as ParaId}; use sp_runtime::traits::Zero; -use std::{ - cmp::max, - collections::{HashMap, HashSet}, - ops::Range, -}; +use std::{cmp::max, collections::HashMap, ops::Range}; use tokio::{ join, time::{sleep, Duration}, }; use zombienet_sdk::subxt::{ - backend::{legacy::LegacyRpcMethods, Backend}, + backend::legacy::LegacyRpcMethods, blocks::Block, - config::{substrate::DigestItem, Header}, + config::Header, events::Events, ext::scale_value::value, tx::DynamicPayload, utils::H256, - Config, OnlineClient, PolkadotConfig, + OnlineClient, PolkadotConfig, }; // Maximum number of blocks to wait for a session change. @@ -73,7 +65,7 @@ fn find_event_and_decode_fields( // // The throughput is measured as total number of backed candidates in a window of relay chain // blocks. Relay chain blocks with session changes are generally ignores. -pub async fn assert_finalized_para_throughput( +pub async fn assert_para_throughput( relay_client: &OnlineClient, stop_after: u32, expected_candidate_ranges: impl Into>>, @@ -84,6 +76,7 @@ pub async fn assert_finalized_para_throughput( let mut current_block_count = 0; let expected_candidate_ranges = expected_candidate_ranges.into(); + let expected_number_of_blocks = expected_number_of_blocks.into(); let valid_para_ids: Vec = expected_candidate_ranges.keys().cloned().collect(); // Wait for the first session, block production on the parachain will start after that. @@ -141,7 +134,7 @@ pub async fn assert_finalized_para_throughput( } } - for (para_id, (para_client, expected_number_of_blocks)) in expected_number_of_blocks.into() { + for (para_id, (para_client, expected_number_of_blocks)) in expected_number_of_blocks { let receipts = candidate_count .get(¶_id) .ok_or_else(|| anyhow!("ParaId did not have any backed candidates"))?; @@ -202,115 +195,6 @@ async fn is_session_change( })) } -// Helper function for asserting the throughput of parachain candidates on the relay chain. -// -// The throughput is measured as total number of backed candidates in a window of relay chain -// blocks, after the first session change. Blocks with session changes are generally ignored. -// -// `stop_after`: Number of relay chain blocks after which the recording should be stopped. -pub async fn assert_para_throughput( - relay_client: &OnlineClient, - stop_after: u32, - expected_candidate_ranges: impl Into>>, -) -> Result<(), anyhow::Error> { - // Check on backed blocks in all imported relay chain blocks. The slot-based collator - // builds on the best fork currently. It can happen that it builds on a fork which is not - // getting finalized, in which case we will lose some blocks. This makes it harder to build - // stable asserts. Once we are building on older relay parents, this can be changed to - // finalized blocks again. - let mut blocks_sub = relay_client.blocks().subscribe_all().await?; - let mut candidate_count: HashMap = HashMap::new(); - let mut start_height: Option = None; - let expected_candidate_ranges = expected_candidate_ranges.into(); - - let valid_para_ids: Vec = expected_candidate_ranges.keys().cloned().collect(); - - // Wait for the first session, block production on the parachain will start after that. - wait_for_first_session_change(&mut blocks_sub).await?; - - let mut session_change_seen_at = 0u32; - while let Some(block) = blocks_sub.next().await { - let block = block?; - let block_number = u32::from(block.number()); - - let events = block.events().await?; - let mut para_ids_to_increment: HashSet = Default::default(); - - // Do not count blocks with session changes, no backed blocks there. - if is_session_change(&block).await? { - if block_number == session_change_seen_at { - continue; - } - - // Increment the start height to account for a block level that has no - // backed blocks. - start_height = start_height.map(|h| h + 1); - session_change_seen_at = block_number; - continue; - } - - let receipts = find_event_and_decode_fields::>( - &events, - "ParaInclusion", - "CandidateBacked", - )?; - - for receipt in receipts { - let para_id = receipt.descriptor.para_id(); - if !valid_para_ids.contains(¶_id) { - return Err(anyhow!("Invalid ParaId detected: {}", para_id)); - }; - log::debug!( - "Block backed for para_id {para_id} at relay: #{} ({})", - block.number(), - block.hash() - ); - let (counter, accounted_block_height) = candidate_count.entry(para_id).or_default(); - if block_number > *accounted_block_height { - *counter += 1; - // Increment later to count multiple descriptors in the same block. - para_ids_to_increment.insert(para_id); - } - } - - for para_id in para_ids_to_increment.iter() { - candidate_count.entry(*para_id).or_default().1 = block_number; - } - - if block_number - *start_height.get_or_insert_with(|| block_number - 1) >= stop_after { - log::info!( - "Finished condition: block_height: {:?}, start_height: {:?}", - block.number(), - start_height - ); - break; - } - } - - log::info!( - "Reached {stop_after} relay chain blocks that contain backed candidates: {:#?}", - candidate_count - .iter() - .map(|(para_id, (count, _))| format!( - "Parachain {para_id} has {count} backed candidates" - )) - .collect::>() - ); - - for (para_id, expected_candidate_range) in expected_candidate_ranges { - let actual = candidate_count - .get(¶_id) - .expect("ParaId did not have any backed candidates"); - - assert!( - expected_candidate_range.contains(&actual.0), - "Candidate count {} not within range {expected_candidate_range:?}", - actual.0 - ); - } - - Ok(()) -} /// Returns [`CoreInfo`] for the given parachain block. fn find_core_info( diff --git a/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs b/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs index 131fabcf3d00d..a6e9eb698542c 100644 --- a/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs +++ b/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs @@ -7,7 +7,7 @@ use anyhow::anyhow; use cumulus_zombienet_sdk_helpers::{ - assert_blocks_are_being_finalized, assert_finalized_para_throughput, + assert_blocks_are_being_finalized, assert_para_throughput, wait_for_first_session_change, }; use polkadot_primitives::{BlockNumber, CandidateHash, DisputeState, Id as ParaId, SessionIndex}; @@ -92,7 +92,7 @@ async fn dispute_past_session_slashing() -> Result<(), anyhow::Error> { let relay_client: OnlineClient = honest.wait_client().await?; // Wait for some para blocks being produced - assert_finalized_para_throughput(&relay_client, 20, [(ParaId::from(1337), 10..20)]).await?; + assert_para_throughput(&relay_client, 20, [(ParaId::from(1337), 10..20)], []).await?; // Let's initiate a dispute malus.resume().await?; diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs index 75afd5bf990bc..e520251ceebfd 100644 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs @@ -5,7 +5,7 @@ // can achieve full throughput of 3 candidates per block. use anyhow::anyhow; -use cumulus_zombienet_sdk_helpers::{assert_finalized_para_throughput, create_assign_core_call}; +use cumulus_zombienet_sdk_helpers::{assert_para_throughput, create_assign_core_call}; use polkadot_primitives::Id as ParaId; use serde_json::json; use zombienet_sdk::{ @@ -88,10 +88,11 @@ async fn basic_3cores_test() -> Result<(), anyhow::Error> { log::info!("2 more cores assigned to adder-2000"); - assert_finalized_para_throughput( + assert_para_throughput( &relay_client, 15, [(ParaId::from(2000), 38..46), (ParaId::from(2001), 12..16)], + [], ) .await?; diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs index 87bb98ba4eca4..f903c611a3c02 100644 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs @@ -6,7 +6,7 @@ use anyhow::anyhow; use cumulus_zombienet_sdk_helpers::{ - assert_finality_lag, assert_finalized_para_throughput, create_assign_core_call, + assert_finality_lag, assert_para_throughput, create_assign_core_call, }; use polkadot_primitives::{CoreIndex, Id as ParaId}; use serde_json::json; @@ -84,7 +84,7 @@ async fn doesnt_break_parachains_test() -> Result<(), anyhow::Error> { let para_id = ParaId::from(2000); // Expect the parachain to be making normal progress, 1 candidate backed per relay chain block. // Lowering to 12 to make sure CI passes. - assert_finalized_para_throughput(&relay_client, 15, [(para_id, 12..16)]).await?; + assert_para_throughput(&relay_client, 15, [(para_id, 12..16)], []).await?; let para_client = para_node.wait_client().await?; // Assert the parachain finalized block height is also on par with the number of backed diff --git a/polkadot/zombienet-sdk-tests/tests/functional/approved_peer_mixed_validators.rs b/polkadot/zombienet-sdk-tests/tests/functional/approved_peer_mixed_validators.rs index eeed6ad264432..5ae650110a9fd 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/approved_peer_mixed_validators.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/approved_peer_mixed_validators.rs @@ -9,7 +9,7 @@ use anyhow::anyhow; use tokio::time::Duration; -use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_finalized_para_throughput}; +use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_para_throughput}; use polkadot_primitives::Id as ParaId; use serde_json::json; use zombienet_orchestrator::network::node::LogLineCountOptions; @@ -109,10 +109,11 @@ async fn approved_peer_mixed_validators_test() -> Result<(), anyhow::Error> { // The min throughput for para 2000 is going to be lower, but it depends on how the old // validators are distributed into backing groups. - assert_finalized_para_throughput( + assert_para_throughput( &relay_client, 15, [(ParaId::from(2000), 6..15), (ParaId::from(2001), 11..16)], + [], ) .await?; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs b/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs index 44ca4c9f9b8fa..3ca8dc2c862d3 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs @@ -5,7 +5,7 @@ use anyhow::anyhow; -use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_finalized_para_throughput}; +use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_para_throughput}; use polkadot_primitives::Id as ParaId; use serde_json::json; use zombienet_sdk::{ @@ -75,10 +75,11 @@ async fn async_backing_6_seconds_rate_test() -> Result<(), anyhow::Error> { let relay_client: OnlineClient = relay_node.wait_client().await?; - assert_finalized_para_throughput( + assert_para_throughput( &relay_client, 15, [(ParaId::from(2000), 11..16), (ParaId::from(2001), 11..16)], + [], ) .await?; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs b/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs index 0fcd0fb768879..88973176b7f38 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs @@ -7,7 +7,7 @@ use anyhow::anyhow; use tokio::time::Duration; -use cumulus_zombienet_sdk_helpers::{assert_finalized_para_throughput, create_assign_core_call}; +use cumulus_zombienet_sdk_helpers::{assert_para_throughput, create_assign_core_call}; use polkadot_primitives::Id as ParaId; use serde_json::json; use zombienet_orchestrator::network::node::LogLineCountOptions; @@ -99,7 +99,7 @@ async fn duplicate_collations_test() -> Result<(), anyhow::Error> { log::info!("2 more cores assigned to parachain-2000"); - assert_finalized_para_throughput(&relay_client, 15, [(ParaId::from(2000), 40..46)]).await?; + assert_para_throughput(&relay_client, 15, [(ParaId::from(2000), 40..46)], []).await?; let log_line_options = LogLineCountOptions::new( |n| n == 1, diff --git a/polkadot/zombienet-sdk-tests/tests/functional/shared_core_idle_parachain.rs b/polkadot/zombienet-sdk-tests/tests/functional/shared_core_idle_parachain.rs index 90bf26e51fb8f..69c7a085b705d 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/shared_core_idle_parachain.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/shared_core_idle_parachain.rs @@ -6,7 +6,7 @@ use anyhow::anyhow; -use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_finalized_para_throughput}; +use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_para_throughput}; use polkadot_primitives::Id as ParaId; use serde_json::json; use zombienet_sdk::{ @@ -91,7 +91,7 @@ async fn shared_core_idle_parachain_test() -> Result<(), anyhow::Error> { // Check that para 2000 is essentially getting 12-second block time, while para 2001 does not // produce anything. - assert_finalized_para_throughput(&relay_client, 15, [(ParaId::from(2000), 5..9)]).await?; + assert_para_throughput(&relay_client, 15, [(ParaId::from(2000), 5..9)], []).await?; assert_finality_lag(¶_node_2000.wait_client().await?, 5).await?; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs b/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs index f3eb0ef4eac1d..8cfa2ed6ece8a 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs @@ -5,7 +5,7 @@ use anyhow::anyhow; -use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_finalized_para_throughput}; +use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_para_throughput}; use polkadot_primitives::Id as ParaId; use serde_json::json; use zombienet_sdk::{ @@ -63,10 +63,11 @@ async fn sync_backing_test() -> Result<(), anyhow::Error> { let relay_client: OnlineClient = relay_node.wait_client().await?; - assert_finalized_para_throughput( + assert_para_throughput( &relay_client, 15, [(ParaId::from(2500), 5..9)], + [], ) .await?; From 04672c8936ad15169666271fa50a403edc0c66f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 7 Jul 2025 15:30:12 +0200 Subject: [PATCH 065/312] Fix --- .../zombienet-sdk/tests/elastic_scaling/pov_bundling.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs index 8355c596a3a5c..293ab18b2a174 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs @@ -18,8 +18,7 @@ use anyhow::anyhow; use cumulus_zombienet_sdk_helpers::{ - assert_finality_lag, assert_finalized_para_throughput, assert_para_throughput, - create_assign_core_call, + assert_finality_lag, assert_para_throughput, create_assign_core_call, }; use polkadot_primitives::Id as ParaId; use serde_json::json; @@ -57,7 +56,7 @@ async fn pov_bundling() -> Result<(), anyhow::Error> { let relay_client: OnlineClient = relay_node.wait_client().await?; let alice = dev::alice(); - assert_finalized_para_throughput( + assert_para_throughput( &relay_client, 6, [(ParaId::from(PARA_ID), 4..6)], @@ -78,7 +77,7 @@ async fn pov_bundling() -> Result<(), anyhow::Error> { .await?; log::info!("2 more cores assigned to the parachain"); - assert_finalized_para_throughput( + assert_para_throughput( &relay_client, 6, [(ParaId::from(PARA_ID), 12..18)], @@ -97,7 +96,7 @@ async fn pov_bundling() -> Result<(), anyhow::Error> { .await?; log::info!("3 more cores assigned to the parachain"); - assert_finalized_para_throughput( + assert_para_throughput( &relay_client, 6, [(ParaId::from(PARA_ID), 24..36)], From cb912d9261a858fbfda19e30c458191071b697f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 8 Jul 2025 23:45:08 +0200 Subject: [PATCH 066/312] Introduce `SlotSchedule` runtime api --- .../slot_based/block_builder_task.rs | 248 +++++++++--------- .../aura/src/collators/slot_based/mod.rs | 3 +- .../lib/src/fake_runtime_api/utils.rs | 6 + .../polkadot-omni-node/lib/src/nodes/aura.rs | 14 +- cumulus/primitives/core/src/lib.rs | 19 ++ cumulus/test/runtime/src/lib.rs | 11 + ...cks_from_tip_without_connected_collator.rs | 2 +- .../node/subsystem-util/src/runtime/mod.rs | 161 ++++++------ 8 files changed, 253 insertions(+), 211 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 63c49515ff996..63da1731e9d62 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -35,10 +35,11 @@ use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; use cumulus_primitives_core::{ extract_relay_parent, rpsr_digest, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, - PersistedValidationData, RelayParentOffsetApi, + PersistedValidationData, RelayParentOffsetApi, SlotSchedule, }; use cumulus_relay_chain_interface::RelayChainInterface; use futures::prelude::*; +use polkadot_node_subsystem_util::runtime::ClaimQueueSnapshot; use polkadot_primitives::{ Block as RelayBlock, CoreIndex, Hash as RelayHash, Header as RelayHeader, Id as ParaId, }; @@ -132,6 +133,7 @@ where Client::Api: AuraApi + RelayParentOffsetApi + AuraUnincludedSegmentApi + + SlotSchedule + BlockBuilder, Backend: sc_client_api::Backend + 'static, RelayClient: RelayChainInterface + Clone + 'static, @@ -311,8 +313,55 @@ where "Claiming slot." ); + let mut cores = match determine_cores( + &mut relay_chain_data_cache, + &relay_parent_header, + para_id, + &initial_parent.header, + ) + .await + { + Ok(Some(core)) => core, + Ok(None) => { + tracing::debug!( + target: crate::LOG_TARGET, + relay_parent = ?relay_parent, + "Not cores scheduled." + ); + continue; + }, + Err(()) => { + tracing::error!( + target: crate::LOG_TARGET, + relay_parent = ?relay_parent, + "Failed to determine cores." + ); + + break; + }, + }; + + let slot_schedule = match para_client + .runtime_api() + .next_slot_schedule(initial_parent.hash, cores.total_cores()) + { + Ok(schedule) => schedule, + Err(error) => { + tracing::debug!( + target: crate::LOG_TARGET, + block = ?initial_parent.hash, + ?error, + "Failed to fetch `slot_schedule`, assuming one block with 2s" + ); + vec![Duration::from_secs(2)] + }, + }; + + let blocks_per_core = (slot_schedule.len() as u32 / cores.total_cores()).max(1); + let mut pov_parent_header = initial_parent.header; let mut pov_parent_hash = initial_parent.hash; + let mut slot_schedule = slot_schedule.into_iter(); loop { match build_collation_for_core( @@ -323,37 +372,36 @@ where max_pov_size, para_id, &relay_client, - &*para_client, - &mut relay_chain_data_cache, &code_hash_provider, - relay_chain_slot_duration, &slot_claim, &collator_sender, authoring_duration, &mut collator, allowed_pov_size, + cores.core_info(), + cores.core_index(), + (&mut slot_schedule).take(blocks_per_core as usize), ) .await { - NextBlockProductionStep::NextCore { last_block_header } => { - pov_parent_header = last_block_header; + Ok(Some(header)) => { + pov_parent_header = header; pov_parent_hash = pov_parent_header.hash(); }, - NextBlockProductionStep::NextSlot => break, - NextBlockProductionStep::Stop => return, + // Let's wait for the next slot + Ok(None) => break, + Err(()) => return, + } + + if !cores.advance() { + break } } } } } -enum NextBlockProductionStep { - NextCore { last_block_header: Block::Header }, - NextSlot, - Stop, -} - -async fn build_collation_for_core( +async fn build_collation_for_core( pov_parent_header: Block::Header, pov_parent_hash: Block::Hash, relay_parent_header: &RelayHeader, @@ -361,30 +409,17 @@ async fn build_collation_for_core, code_hash_provider: &impl consensus_common::ValidationCodeHashProvider, - relay_chain_slot_duration: Duration, slot_claim: &SlotClaim, collator_sender: &sc_utils::mpsc::TracingUnboundedSender>, authoring_duration: Duration, collator: &mut Collator, allowed_pov_size: usize, -) -> NextBlockProductionStep + core_info: CoreInfo, + core_index: CoreIndex, + block_schedule: impl Iterator, +) -> Result, ()> where - Client: ProvideRuntimeApi - + UsageProvider - + BlockOf - + AuxStore - + HeaderBackend - + BlockBackend - + Send - + Sync - + 'static, - Client::Api: AuraApi - + RelayParentOffsetApi - + AuraUnincludedSegmentApi - + BlockBuilder, RelayClient: RelayChainInterface + 'static, P: Pair, P::Public: AppPublic + Member + Codec, @@ -408,78 +443,13 @@ where ?pov_parent_hash, "Could not fetch validation code hash", ); - return NextBlockProductionStep::Stop + + return Err(()) }; check_validation_code_or_log(&validation_code_hash, para_id, relay_client, relay_parent_hash) .await; - let Ok(block_rate) = para_client.runtime_api().block_rate(pov_parent_hash) else { - tracing::error!( - target: crate::LOG_TARGET, - "Failed to fetch block rate." - ); - return NextBlockProductionStep::Stop - }; - - // Retrieve the core selector. - let (core_selector, claim_queue_offset, core_index, number_of_cores) = match determine_core( - relay_chain_data_cache, - relay_parent_header, - para_id, - &pov_parent_header, - ) - .await - { - Err(()) => { - tracing::debug!( - target: LOG_TARGET, - relay_parent = ?relay_parent_hash, - "Failed to determine core" - ); - - return NextBlockProductionStep::Stop - }, - Ok(Some(res)) => { - tracing::debug!( - target: LOG_TARGET, - relay_parent = ?relay_parent_hash, - core_selector = ?res.0, - claim_queue_offset = ?res.1, - number_of_cores = %res.3, - "Going to claim core", - ); - - res - }, - Ok(None) => { - tracing::debug!( - target: LOG_TARGET, - relay_parent = ?relay_parent_hash, - "No available core" - ); - - return NextBlockProductionStep::NextSlot - }, - }; - - let block_time = block_rate.block_time.as_regular(); - - let (blocks_per_core, max_blocks_per_relay_slot) = match block_time { - Some(bt) if bt < relay_chain_slot_duration => ( - (relay_chain_slot_duration.as_millis() / number_of_cores as u128) / bt.as_millis(), - relay_chain_slot_duration.as_millis() / bt.as_millis(), - ), - _ => (1, 1), - }; - - tracing::trace!( - target: LOG_TARGET, - %blocks_per_core, - ?block_rate, - "Block rate configuration", - ); - let mut blocks = Vec::new(); let mut proofs = Vec::new(); let mut ignored_nodes = IgnoredNodes::default(); @@ -487,8 +457,8 @@ where let mut parent_hash = pov_parent_hash; let mut parent_header = pov_parent_header.clone(); - for _ in 0..blocks_per_core { - let expected_block_end = Instant::now() + block_time.unwrap_or_default(); + for block_time in block_schedule { + let expected_block_end = Instant::now() + block_time; let (parachain_inherent_data, other_inherent_data) = match collator .create_inherent_data( @@ -501,7 +471,7 @@ where { Err(err) => { tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to create inherent data."); - return NextBlockProductionStep::NextSlot + return Ok(None) }, Ok(x) => x, }; @@ -510,12 +480,7 @@ where .build_block_and_import( &parent_header, slot_claim, - Some(vec![CumulusDigestItem::CoreInfo(CoreInfo { - selector: core_selector, - claim_queue_offset, - number_of_cores: number_of_cores.into(), - }) - .to_digest_item()]), + Some(vec![CumulusDigestItem::CoreInfo(core_info.clone()).to_digest_item()]), (parachain_inherent_data, other_inherent_data), authoring_duration, allowed_pov_size, @@ -523,7 +488,7 @@ where .await else { tracing::error!(target: crate::LOG_TARGET, "Unable to build block at slot."); - return NextBlockProductionStep::NextSlot; + return Ok(None); }; parent_hash = res.block.header().hash(); @@ -555,11 +520,9 @@ where max_pov_size: validation_data.max_pov_size, }) { tracing::error!(target: crate::LOG_TARGET, ?err, "Unable to send block to collation task."); - NextBlockProductionStep::Stop - } else if max_blocks_per_relay_slot > 1 && core_selector.0 as u16 + 1 < number_of_cores { - NextBlockProductionStep::NextCore { last_block_header: parent_header } + Err(()) } else { - NextBlockProductionStep::NextSlot + Ok(Some(parent_header)) } } @@ -646,15 +609,55 @@ where Ok(RelayParentData::new_with_descendants(relay_parent, required_ancestors.into())) } -/// Determine the core for the given `para_id`. +/// Return value of [`determine_cores`]. +struct Cores { + selector: CoreSelector, + claim_queue_offset: ClaimQueueOffset, + core_indices: Vec, +} + +impl Cores { + /// Returns the current [`CoreInfo`]. + fn core_info(&self) -> CoreInfo { + CoreInfo { + selector: self.selector, + claim_queue_offset: self.claim_queue_offset, + number_of_cores: (self.core_indices.len() as u16).into(), + } + } + + /// Returns the current [`CoreIndex`]. + fn core_index(&self) -> CoreIndex { + self.core_indices[self.selector.0 as usize] + } + + /// Advance to the next available core. + /// + /// Returns `false` if there is no core left. + fn advance(&mut self) -> bool { + if self.selector.0 as usize + 1 < self.core_indices.len() { + self.selector.0 += 1; + true + } else { + false + } + } + + /// Returns the total number of cores. + fn total_cores(&self) -> u32 { + self.core_indices.len() as u32 + } +} + +/// Determine the cores for the given `para_id`. /// -/// Takes into account the `parent` core to find the next available core. -async fn determine_core( +/// Takes into account the `parent` core to find the next available cores. +async fn determine_cores( relay_chain_data_cache: &mut RelayChainDataCache, relay_parent: &RelayHeader, para_id: ParaId, parent: &Header, -) -> Result, ()> { +) -> Result, ()> { let core_info = CumulusDigestItem::find_core_info(parent.digest()); let last_relay_parent = if parent.number().is_zero() { @@ -682,18 +685,21 @@ async fn determine_core( let res = if relay_parent_offset > core_info.as_ref().map(|ci| ci.claim_queue_offset).unwrap_or_default().0 as u32 { - claim_queue.find_core(para_id, 0, 0) + claim_queue.find_cores(para_id, 0) } else { - claim_queue.find_core( + claim_queue.find_cores( para_id, - core_info.as_ref().map_or(0, |ci| ci.selector.0 as u32 + 1), core_info .as_ref() .map_or(0, |ci| ci.claim_queue_offset.0 as u32 - relay_parent_offset), ) }; - Ok(res) + Ok(res.map(|(cores, claim_queue_offset)| Cores { + selector: CoreSelector(0), + claim_queue_offset, + core_indices: cores, + })) } #[cfg(test)] diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index 5829a46fc6dd4..3bfda6a36379d 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -73,7 +73,7 @@ use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterfa use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::RelayParentOffsetApi; +use cumulus_primitives_core::{RelayParentOffsetApi, SlotSchedule}; use cumulus_relay_chain_interface::RelayChainInterface; use futures::FutureExt; use polkadot_primitives::{ @@ -163,6 +163,7 @@ pub fn run + AuraUnincludedSegmentApi + RelayParentOffsetApi + + SlotSchedule + BlockBuilder, Backend: sc_client_api::Backend + 'static, RClient: RelayChainInterface + Clone + 'static, diff --git a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs index 301771caaf074..6bcdbdafd213c 100644 --- a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs +++ b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs @@ -236,6 +236,12 @@ macro_rules! impl_node_runtime_apis { unimplemented!() } } + + impl cumulus_primitives_core::SlotSchedule<$block> for $runtime { + fn next_slot_schedule(_: u32) -> Vec { + unimplemented!() + } + } } }; } diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs index 76e571aff2d03..3bf113947d4f0 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs @@ -49,7 +49,9 @@ use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; #[allow(deprecated)] use cumulus_client_service::CollatorSybilResistance; -use cumulus_primitives_core::{relay_chain::ValidationCode, GetParachainInfo, ParaId}; +use cumulus_primitives_core::{ + relay_chain::ValidationCode, GetParachainInfo, ParaId, SlotSchedule, +}; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; use futures::prelude::*; use polkadot_primitives::CollatorPair; @@ -216,6 +218,7 @@ where RuntimeApi::RuntimeApi: AuraRuntimeApi + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + substrate_frame_rpc_system::AccountNonceApi + + SlotSchedule + GetParachainInfo, AuraId: AuraIdT + Sync, { @@ -247,7 +250,7 @@ impl, RuntimeApi, AuraId> StartSlotBasedAuraConsensus where RuntimeApi: ConstructNodeRuntimeApi>, - RuntimeApi::RuntimeApi: AuraRuntimeApi, + RuntimeApi::RuntimeApi: AuraRuntimeApi + SlotSchedule, AuraId: AuraIdT + Sync, { #[docify::export_content] @@ -274,7 +277,10 @@ where ) where CIDP: CreateInherentDataProviders + 'static, CIDP::InherentDataProviders: Send, - CHP: cumulus_client_consensus_common::ValidationCodeHashProvider + Send + Sync + 'static, + CHP: cumulus_client_consensus_common::ValidationCodeHashProvider + + Send + + Sync + + 'static, Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + Clone + 'static, Spawner: SpawnNamed, @@ -298,7 +304,7 @@ impl, RuntimeApi, AuraId> > for StartSlotBasedAuraConsensus where RuntimeApi: ConstructNodeRuntimeApi>, - RuntimeApi::RuntimeApi: AuraRuntimeApi, + RuntimeApi::RuntimeApi: AuraRuntimeApi + SlotSchedule, AuraId: AuraIdT + Sync, { fn start_consensus( diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 0000b2727911e..986b9db6a2fea 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -20,6 +20,8 @@ extern crate alloc; +use core::time::Duration; + use alloc::vec::Vec; use codec::{Compact, Decode, DecodeAll, DecodeWithMemTracking, Encode, MaxEncodedLen}; use polkadot_parachain_primitives::primitives::HeadData; @@ -466,4 +468,21 @@ sp_api::decl_runtime_apis! { /// Fetch the slot offset that is expected from the relay chain. fn relay_parent_offset() -> u32; } + + /// API for parachain slot scheduling. + /// + /// This runtime API allows the parachain runtime to communicate the number of scheduled blocks + /// to the node side. The node will call this API every relay chain slot (~6 seconds) + /// to get the scheduled parachain blocks. The block interval is calculated by dividing the + /// relay chain slot duration by the number of scheduled blocks. + pub trait SlotSchedule { + /// Get the block production schedule for the next relay chain slot. + /// + /// - `num_cores`: The number of cores assigned to this parachain + /// + /// Returns a vector of [`Duration`] values each representing the block time on standard + /// hardware in wall clock time. This should be used as the upper wall clock time when + /// building a block. + fn next_slot_schedule(num_cores: u32) -> Vec; + } } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 489973ba0d9bb..5b61b1e3343f0 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -62,6 +62,7 @@ mod test_pallet; extern crate alloc; use alloc::{vec, vec::Vec}; +use core::time::Duration; use frame_support::{derive_impl, traits::OnRuntimeUpgrade, PalletId}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -628,6 +629,16 @@ impl_runtime_apis! { } } + + impl cumulus_primitives_core::SlotSchedule for Runtime { + fn next_slot_schedule(num_cores: u32) -> Vec { + const TARGET_BLOCK_INTERVAL: u32 = 12; + + let block_time = Duration::from_secs(2) * num_cores / TARGET_BLOCK_INTERVAL; + + vec![block_time.min(Duration::from_millis(500)); TARGET_BLOCK_INTERVAL as usize] + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/zombienet/zombienet-sdk/tests/sync_blocks/sync_blocks_from_tip_without_connected_collator.rs b/cumulus/zombienet/zombienet-sdk/tests/sync_blocks/sync_blocks_from_tip_without_connected_collator.rs index 962a55dc65d3e..c459f5c334754 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/sync_blocks/sync_blocks_from_tip_without_connected_collator.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/sync_blocks/sync_blocks_from_tip_without_connected_collator.rs @@ -22,7 +22,7 @@ async fn sync_blocks_from_tip_without_connected_collator() -> Result<(), anyhow: let relay_client: OnlineClient = relay_alice.wait_client().await?; log::info!("Ensuring parachain making progress"); - assert_para_throughput(&relay_client, 10, [(ParaId::from(PARA_ID), 9..11)]).await?; + assert_para_throughput(&relay_client, 10, [(ParaId::from(PARA_ID), 9..11)], []).await?; let para_ferdie = network.get_node("ferdie")?; let para_eve = network.get_node("eve")?; diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 452e1510273bd..527e645dfba37 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -33,10 +33,7 @@ use polkadot_node_subsystem_types::UnpinHandle; use polkadot_primitives::{ node_features::FeatureIndex, slashing, - vstaging::{ - CandidateEvent, ClaimQueueOffset, CoreSelector, CoreState, OccupiedCore, - ScrapedOnChainVotes, - }, + vstaging::{CandidateEvent, ClaimQueueOffset, CoreState, OccupiedCore, ScrapedOnChainVotes}, CandidateHash, CoreIndex, EncodeAs, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, IndexedVec, NodeFeatures, SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, @@ -517,48 +514,33 @@ impl ClaimQueueSnapshot { self.0.iter() } - /// Find a core for the given `para_id`. + /// Find cores for the given `para_id` at the given `claim_queue_offset`. /// - /// `cores_claimed` is the number of cores already claimed from this snapshot for `para_id` at - /// the given `claim_queue_offset`. - /// - /// Returns the core selector, claim queue offset, core index and the number of cores at claim - /// queue offset. - pub fn find_core( + /// It is not guaranteed that at the given `claim_queue_offset` cores are available for + /// the `para_id`. Thus, the claim queue offset for the core indices is returned as well. + pub fn find_cores( &self, para_id: ParaId, - mut cores_claimed: u32, claim_queue_offset: u32, - ) -> Option<(CoreSelector, ClaimQueueOffset, CoreIndex, u16)> { - let mut offset_to_core_count = BTreeMap::>::new(); + ) -> Option<(Vec, ClaimQueueOffset)> { + let mut offset_to_cores = BTreeMap::>::new(); self.0.iter().for_each(|(core_index, ids)| { ids.iter() .enumerate() .filter_map(|(i, id)| (*id == para_id).then(|| i)) .for_each(|offset| { - offset_to_core_count.entry(offset).or_default().push(*core_index); + offset_to_cores.entry(offset).or_default().push(*core_index); }); }); - for (offset, cores) in offset_to_core_count { - if (offset as u32) < claim_queue_offset { - continue - } - - if let Some(core_index) = cores.get(cores_claimed as usize) { - return Some(( - CoreSelector(cores_claimed as u8), - ClaimQueueOffset(offset as u8), - *core_index, - cores.len() as u16, - )) + offset_to_cores.into_iter().find_map(|(offset, cores)| { + if (offset as u32) >= claim_queue_offset { + Some((cores, ClaimQueueOffset(offset as u8))) + } else { + None } - - cores_claimed -= cores.len() as u32; - } - - None + }) } } @@ -638,7 +620,7 @@ mod test { use super::*; #[test] - fn find_core_works() { + fn find_cores_works() { let claim_queue = ClaimQueueSnapshot(BTreeMap::from_iter( [ ( @@ -661,56 +643,67 @@ mod test { .into_iter(), )); - assert_eq!( - claim_queue.find_core(1u32.into(), 0, 0).unwrap(), - (CoreSelector(0), ClaimQueueOffset(0), CoreIndex(0), 3) - ); - - assert_eq!( - claim_queue.find_core(1u32.into(), 1, 0).unwrap(), - (CoreSelector(1), ClaimQueueOffset(0), CoreIndex(1), 3) - ); - - assert_eq!( - claim_queue.find_core(1u32.into(), 2, 0).unwrap(), - (CoreSelector(2), ClaimQueueOffset(0), CoreIndex(2), 3) - ); - - assert_eq!( - claim_queue.find_core(1u32.into(), 3, 0).unwrap(), - (CoreSelector(0), ClaimQueueOffset(1), CoreIndex(1), 2) - ); - - assert_eq!( - claim_queue.find_core(1u32.into(), 4, 0).unwrap(), - (CoreSelector(1), ClaimQueueOffset(1), CoreIndex(3), 2) - ); - - assert_eq!( - claim_queue.find_core(1u32.into(), 5, 0).unwrap(), - (CoreSelector(0), ClaimQueueOffset(2), CoreIndex(0), 1) - ); - - assert_eq!(claim_queue.find_core(1u32.into(), 6, 0), None); - - assert_eq!( - claim_queue.find_core(1u32.into(), 0, 1).unwrap(), - (CoreSelector(0), ClaimQueueOffset(1), CoreIndex(1), 2) - ); - - assert_eq!( - claim_queue.find_core(1u32.into(), 2, 1).unwrap(), - (CoreSelector(0), ClaimQueueOffset(2), CoreIndex(0), 1) - ); - - assert_eq!( - claim_queue.find_core(3u32.into(), 0, 0).unwrap(), - (CoreSelector(0), ClaimQueueOffset(2), CoreIndex(2), 2) - ); - - assert_eq!( - claim_queue.find_core(3u32.into(), 1, 0).unwrap(), - (CoreSelector(1), ClaimQueueOffset(2), CoreIndex(3), 2) - ); + // Test finding cores for para_id 1 at offset 0 + let (cores, actual_offset) = claim_queue.find_cores(1u32.into(), 0).unwrap(); + assert_eq!(cores.len(), 3); + assert!(cores.contains(&CoreIndex(0))); + assert!(cores.contains(&CoreIndex(1))); + assert!(cores.contains(&CoreIndex(2))); + assert_eq!(actual_offset, ClaimQueueOffset(0)); + + // Test finding cores for para_id 1 at offset 1 + let (cores, actual_offset) = claim_queue.find_cores(1u32.into(), 1).unwrap(); + assert_eq!(cores.len(), 2); + assert!(cores.contains(&CoreIndex(1))); + assert!(cores.contains(&CoreIndex(3))); + assert_eq!(actual_offset, ClaimQueueOffset(1)); + + // Test finding cores for para_id 1 at offset 2 + let (cores, actual_offset) = claim_queue.find_cores(1u32.into(), 2).unwrap(); + assert_eq!(cores.len(), 1); + assert!(cores.contains(&CoreIndex(0))); + assert_eq!(actual_offset, ClaimQueueOffset(2)); + + // Test finding cores for para_id 1 at offset 3 (no cores at this offset) + assert_eq!(claim_queue.find_cores(1u32.into(), 3), None); + + // Test finding cores for para_id 2 at offset 0 + let (cores, actual_offset) = claim_queue.find_cores(2u32.into(), 0).unwrap(); + assert_eq!(cores.len(), 1); + assert!(cores.contains(&CoreIndex(3))); + assert_eq!(actual_offset, ClaimQueueOffset(0)); + + // Test finding cores for para_id 2 at offset 1 + let (cores, actual_offset) = claim_queue.find_cores(2u32.into(), 1).unwrap(); + assert_eq!(cores.len(), 2); + assert!(cores.contains(&CoreIndex(0))); + assert!(cores.contains(&CoreIndex(2))); + assert_eq!(actual_offset, ClaimQueueOffset(1)); + + // Test finding cores for para_id 2 at offset 2 + let (cores, actual_offset) = claim_queue.find_cores(2u32.into(), 2).unwrap(); + assert_eq!(cores.len(), 1); + assert!(cores.contains(&CoreIndex(1))); + assert_eq!(actual_offset, ClaimQueueOffset(2)); + + // Test finding cores for para_id 3 at offset 0 (should find at offset 2) + let (cores, actual_offset) = claim_queue.find_cores(3u32.into(), 0).unwrap(); + assert_eq!(cores.len(), 2); + assert!(cores.contains(&CoreIndex(2))); + assert!(cores.contains(&CoreIndex(3))); + assert_eq!(actual_offset, ClaimQueueOffset(2)); + + // Test finding cores for para_id 3 at offset 2 + let (cores, actual_offset) = claim_queue.find_cores(3u32.into(), 2).unwrap(); + assert_eq!(cores.len(), 2); + assert!(cores.contains(&CoreIndex(2))); + assert!(cores.contains(&CoreIndex(3))); + assert_eq!(actual_offset, ClaimQueueOffset(2)); + + // Test finding cores for para_id 3 at offset 3 (no cores at this offset) + assert_eq!(claim_queue.find_cores(3u32.into(), 3), None); + + // Test finding cores for non-existent para_id + assert_eq!(claim_queue.find_cores(99u32.into(), 0), None); } } From 88794ad083ef2211d1cf123091cbbbd51a90deaa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 9 Jul 2025 16:27:30 +0200 Subject: [PATCH 067/312] Fix the ranges --- .../tests/elastic_scaling/pov_bundling.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs index 293ab18b2a174..614d329641962 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs @@ -59,8 +59,8 @@ async fn pov_bundling() -> Result<(), anyhow::Error> { assert_para_throughput( &relay_client, 6, - [(ParaId::from(PARA_ID), 4..6)], - [(ParaId::from(PARA_ID), (para_client.clone(), 48..72))], + [(ParaId::from(PARA_ID), 4..7)], + [(ParaId::from(PARA_ID), (para_client.clone(), 48..73))], ) .await?; // 3 relay chain blocks @@ -80,8 +80,8 @@ async fn pov_bundling() -> Result<(), anyhow::Error> { assert_para_throughput( &relay_client, 6, - [(ParaId::from(PARA_ID), 12..18)], - [(ParaId::from(PARA_ID), (para_client.clone(), 48..72))], + [(ParaId::from(PARA_ID), 12..19)], + [(ParaId::from(PARA_ID), (para_client.clone(), 48..73))], ) .await?; assert_finality_lag(¶_client, 72).await?; @@ -99,8 +99,8 @@ async fn pov_bundling() -> Result<(), anyhow::Error> { assert_para_throughput( &relay_client, 6, - [(ParaId::from(PARA_ID), 24..36)], - [(ParaId::from(PARA_ID), (para_client.clone(), 48..72))], + [(ParaId::from(PARA_ID), 24..37)], + [(ParaId::from(PARA_ID), (para_client.clone(), 48..73))], ) .await?; From 679d281f54eecf23cc5e3e7ec03d2d11abdd16f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 10 Jul 2025 13:04:32 +0200 Subject: [PATCH 068/312] Make `SlotTimer` use `relay_slot_duration` --- .../slot_based/block_builder_task.rs | 20 +- .../src/collators/slot_based/slot_timer.rs | 221 ++++-------------- 2 files changed, 57 insertions(+), 184 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 63da1731e9d62..2cc18dd40925d 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -21,7 +21,7 @@ use crate::{ collators::{ check_validation_code_or_log, slot_based::{ - relay_chain_data_cache::{RelayChainData, RelayChainDataCache}, + relay_chain_data_cache::RelayChainDataCache, slot_timer::{SlotInfo, SlotTimer}, }, RelayParentData, @@ -39,7 +39,6 @@ use cumulus_primitives_core::{ }; use cumulus_relay_chain_interface::RelayChainInterface; use futures::prelude::*; -use polkadot_node_subsystem_util::runtime::ClaimQueueSnapshot; use polkadot_primitives::{ Block as RelayBlock, CoreIndex, Hash as RelayHash, Header as RelayHeader, Id as ParaId, }; @@ -167,11 +166,7 @@ where max_pov_percentage, } = params; - let mut slot_timer = SlotTimer::<_, _, P>::new_with_offset( - para_client.clone(), - slot_offset, - relay_chain_slot_duration, - ); + let mut slot_timer = SlotTimer::new_with_offset(slot_offset, relay_chain_slot_duration); let mut collator = { let params = collator_util::Params { @@ -228,6 +223,9 @@ where continue; }; + // Use the slot calculated from relay parent + let slot_info = para_slot; + let relay_parent = rp_data.relay_parent().hash(); let relay_parent_header = rp_data.relay_parent().clone(); @@ -273,9 +271,9 @@ where let included_header_hash = included_header.hash(); let slot_claim = match crate::collators::can_build_upon::<_, _, P>( - para_slot.slot, + slot_info.slot, relay_slot, - para_slot.timestamp, + slot_info.timestamp, initial_parent.hash, included_header_hash, &*para_client, @@ -293,7 +291,7 @@ where included_hash = ?included_header_hash, included_num = %included_header.number(), initial_parent = ?initial_parent.hash, - slot = ?para_slot.slot, + slot = ?slot_info.slot, "Not eligible to claim slot." ); continue @@ -309,7 +307,7 @@ where included_hash = ?included_header_hash, included_num = %included_header.number(), initial_parent = ?initial_parent.hash, - slot = ?para_slot.slot, + slot = ?slot_info.slot, "Claiming slot." ); diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs b/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs index 53ef8eb3300af..e606bf579f101 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs @@ -16,26 +16,10 @@ // along with Cumulus. If not, see . use crate::LOG_TARGET; -use codec::Codec; use cumulus_primitives_aura::Slot; -use cumulus_primitives_core::BlockT; -use sc_client_api::UsageProvider; use sc_consensus_aura::SlotDuration; -use sp_api::ProvideRuntimeApi; -use sp_application_crypto::AppPublic; -use sp_consensus_aura::AuraApi; -use sp_core::Pair; -use sp_runtime::traits::Member; use sp_timestamp::Timestamp; -use std::{ - cmp::{max, min}, - sync::Arc, - time::Duration, -}; - -/// Lower limits of allowed block production interval. -/// Defensive mechanism, corresponds to 12 cores at 6 second block time. -const BLOCK_PRODUCTION_MINIMUM_INTERVAL_MS: Duration = Duration::from_millis(500); +use std::time::Duration; #[derive(Debug)] pub(crate) struct SlotInfo { @@ -43,62 +27,16 @@ pub(crate) struct SlotInfo { pub slot: Slot, } -/// Manages block-production timings based on chain parameters and assigned cores. +/// Manages block-production timings based on chain parameters. #[derive(Debug)] -pub(crate) struct SlotTimer { - /// Client that is used for runtime calls - client: Arc, +pub(crate) struct SlotTimer { /// Offset the current time by this duration. time_offset: Duration, - /// Last reported core count. - last_reported_core_num: Option, - /// Slot duration of the relay chain. This is used to compute how man block-production - /// attempts we should trigger per relay chain block. + /// Slot duration of the relay chain. This is used to compute when to wake up for + /// block production attempts. relay_slot_duration: Duration, /// Stores the latest slot that was reported by [`Self::wait_until_next_slot`]. last_reported_slot: Option, - _marker: std::marker::PhantomData<(Block, Box)>, -} - -/// Compute when to try block-authoring next. -/// The exact time point is determined by the slot duration of relay- and parachain as -/// well as the last observed core count. If more cores are available, we attempt to author blocks -/// for them. -/// -/// Returns a tuple with: -/// - `Duration`: How long to wait until the next slot. -/// - `Slot`: The AURA slot used for authoring -fn compute_next_wake_up_time( - para_slot_duration: SlotDuration, - relay_slot_duration: Duration, - core_count: Option, - time_now: Duration, - time_offset: Duration, -) -> (Duration, Slot) { - let para_slots_per_relay_block = - (relay_slot_duration.as_millis() / para_slot_duration.as_millis() as u128) as u32; - let assigned_core_num = core_count.unwrap_or(1); - - // Trigger at least once per relay block, if we have for example 12 second slot duration, - // we should still produce two blocks if we are scheduled on every relay block. - let mut block_production_interval = min(para_slot_duration.as_duration(), relay_slot_duration); - - if assigned_core_num > para_slots_per_relay_block && - para_slot_duration.as_duration() >= relay_slot_duration - { - block_production_interval = - max(relay_slot_duration / assigned_core_num, BLOCK_PRODUCTION_MINIMUM_INTERVAL_MS); - tracing::debug!( - target: LOG_TARGET, - ?block_production_interval, - "Expected to produce for {assigned_core_num} cores but only have {para_slots_per_relay_block} slots. Attempting to produce multiple blocks per slot." - ); - } - - let (duration, timestamp) = - time_until_next_attempt(time_now, block_production_interval, time_offset); - let aura_slot = Slot::from_timestamp(timestamp, para_slot_duration); - (duration, aura_slot) } /// Returns current duration since Unix epoch. @@ -110,10 +48,8 @@ fn duration_now() -> Duration { }) } -/// Returns the duration until the next block production should be attempted. -/// Returns: -/// - Duration: The duration until the next attempt. -fn time_until_next_attempt( +/// Returns the duration until the next block production slot and the timestamp at this slot. +fn time_until_next_slot( now: Duration, block_production_interval: Duration, offset: Duration, @@ -127,70 +63,55 @@ fn time_until_next_attempt( (Duration::from_millis(remaining_millis as u64), Timestamp::from(next_slot_time as u64)) } -impl SlotTimer -where - Block: BlockT, - Client: ProvideRuntimeApi + Send + Sync + 'static + UsageProvider, - Client::Api: AuraApi, - P: Pair, - P::Public: AppPublic + Member + Codec, - P::Signature: TryFrom> + Member + Codec, -{ +impl SlotTimer { /// Create a new slot timer. - pub fn new_with_offset( - client: Arc, - time_offset: Duration, - relay_slot_duration: Duration, - ) -> Self { - Self { - client, - time_offset, - last_reported_core_num: None, - relay_slot_duration, - last_reported_slot: None, - _marker: Default::default(), - } - } - - /// Inform the slot timer about the last seen number of cores. - pub fn update_scheduling(&mut self, num_cores_next_block: u32) { - self.last_reported_core_num = Some(num_cores_next_block); + pub fn new_with_offset(time_offset: Duration, relay_slot_duration: Duration) -> Self { + Self { time_offset, relay_slot_duration, last_reported_slot: None } } /// Returns a future that resolves when the next block production should be attempted. pub async fn wait_until_next_slot(&mut self) -> Result<(), ()> { - let Ok(slot_duration) = crate::slot_duration(&*self.client) else { - tracing::error!(target: LOG_TARGET, "Failed to fetch slot duration from runtime."); - return Err(()) - }; + let (time_until_next_attempt, timestamp) = + time_until_next_slot(duration_now(), self.relay_slot_duration, self.time_offset); - let (time_until_next_attempt, mut next_aura_slot) = compute_next_wake_up_time( - slot_duration, - self.relay_slot_duration, - self.last_reported_core_num, - duration_now(), - self.time_offset, - ); + // Calculate the current slot using the relay chain slot duration + let relay_slot_duration_for_slot = SlotDuration::from(self.relay_slot_duration); + let mut current_slot = Slot::from_timestamp(timestamp, relay_slot_duration_for_slot); match self.last_reported_slot { // If we already reported a slot, we don't want to skip a slot. But we also don't want // to go through all the slots if a node was halted for some reason. - Some(ls) if ls + 1 < next_aura_slot && next_aura_slot <= ls + 3 => { - next_aura_slot = ls + 1u64; + Some(ls) if ls + 1 < current_slot && current_slot <= ls + 3 => { + current_slot = ls + 1u64; + // Don't sleep since we're catching up + tracing::debug!( + target: LOG_TARGET, + last_slot = ?ls, + current_slot = ?current_slot, + "Catching up on skipped slot." + ); }, None | Some(_) => { + tracing::trace!( + target: LOG_TARGET, + time_to_sleep = ?time_until_next_attempt, + "Feeling sleepy 😴" + ); + + // Sleep based on relay chain timing tokio::time::sleep(time_until_next_attempt).await; }, } tracing::debug!( target: LOG_TARGET, - ?slot_duration, - aura_slot = ?next_aura_slot, - "New block production opportunity." + relay_slot_duration = ?self.relay_slot_duration, + current_slot = ?current_slot, + "New block production slot." ); - self.last_reported_slot = Some(next_aura_slot); + // Update internal slot tracking + self.last_reported_slot = Some(current_slot); Ok(()) } } @@ -199,77 +120,31 @@ where mod tests { use super::*; use rstest::rstest; - use sc_consensus_aura::SlotDuration; const RELAY_CHAIN_SLOT_DURATION: u64 = 6000; #[rstest] // Test that different now timestamps have correct impact - // |||| - #[case(6000, Some(1), 1000, 0, 5000)] - #[case(6000, Some(1), 0, 0, 6000)] - #[case(6000, Some(1), 6000, 0, 6000)] - #[case(6000, Some(0), 6000, 0, 6000)] - // Test that `None` core defaults to 1 - // |||| - #[case(6000, None, 1000, 0, 5000)] - #[case(6000, None, 0, 0, 6000)] - #[case(6000, None, 6000, 0, 6000)] + #[case(1000, 0, 5000)] + #[case(0, 0, 6000)] + #[case(6000, 0, 6000)] // Test that offset affects the current time correctly - // |||| - #[case(6000, Some(1), 1000, 1000, 6000)] - #[case(6000, Some(1), 12000, 2000, 2000)] - #[case(6000, Some(1), 12000, 6000, 6000)] - #[case(6000, Some(1), 12000, 7000, 1000)] - // Test that number of cores affects the block production interval - // ||||||| - #[case(6000, Some(3), 12000, 0, 2000)] - #[case(6000, Some(2), 12000, 0, 3000)] - #[case(6000, Some(3), 11999, 0, 1)] - // High core count - // |||||||| - #[case(6000, Some(12), 0, 0, 500)] - /// Test that the minimum block interval is respected - /// at high core counts. - /// ||||||||| - #[case(6000, Some(100), 0, 0, 500)] - // Test that slot_duration works correctly - // |||| - #[case(2000, Some(1), 1000, 0, 1000)] - #[case(2000, Some(1), 3000, 0, 1000)] - #[case(2000, Some(1), 10000, 0, 2000)] - #[case(2000, Some(2), 1000, 0, 1000)] - // Cores are ignored if relay_slot_duration != para_slot_duration - // ||||||| - #[case(2000, Some(3), 3000, 0, 1000)] - // For long slot durations, we should still check - // every relay chain block for the slot. - // ||||| - #[case(12000, None, 0, 0, 6000)] - #[case(12000, None, 6100, 0, 5900)] - #[case(12000, None, 6000, 2000, 2000)] - #[case(12000, Some(2), 6000, 0, 3000)] - #[case(12000, Some(3), 6000, 0, 2000)] - #[case(12000, Some(3), 8100, 0, 1900)] + #[case(1000, 1000, 6000)] + #[case(12000, 2000, 2000)] + #[case(12000, 6000, 6000)] + #[case(12000, 7000, 1000)] + // Test basic timing with relay slot duration + #[case(11999, 0, 1)] fn test_get_next_slot( - #[case] para_slot_millis: u64, - #[case] core_count: Option, #[case] time_now: u64, #[case] offset_millis: u64, #[case] expected_wait_duration: u128, ) { - let para_slot_duration = SlotDuration::from_millis(para_slot_millis); // 6 second slots let relay_slot_duration = Duration::from_millis(RELAY_CHAIN_SLOT_DURATION); - let time_now = Duration::from_millis(time_now); // 1 second passed + let time_now = Duration::from_millis(time_now); let offset = Duration::from_millis(offset_millis); - let (wait_duration, _) = compute_next_wake_up_time( - para_slot_duration, - relay_slot_duration, - core_count, - time_now, - offset, - ); + let (wait_duration, _) = time_until_next_slot(time_now, relay_slot_duration, offset); - assert_eq!(wait_duration.as_millis(), expected_wait_duration, "Wait time mismatch."); // Should wait 5 seconds + assert_eq!(wait_duration.as_millis(), expected_wait_duration, "Wait time mismatch."); } } From 943ab42af84c807c2891a62767aac9cbf667f214 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 10 Jul 2025 22:18:09 +0200 Subject: [PATCH 069/312] Handle authoring time and block interval better --- .../slot_based/block_builder_task.rs | 53 +++++++++++++++---- .../aura/src/collators/slot_based/mod.rs | 4 -- .../src/collators/slot_based/slot_timer.rs | 42 +++++++++++++-- .../polkadot-omni-node/lib/src/nodes/aura.rs | 1 - cumulus/test/service/src/lib.rs | 1 - 5 files changed, 80 insertions(+), 21 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 2cc18dd40925d..1f8549f0b8e38 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -95,8 +95,6 @@ pub struct BuilderTaskParams< pub proposer: Proposer, /// The generic collator service used to plug into this consensus engine. pub collator_service: CS, - /// The amount of time to spend authoring each block. - pub authoring_duration: Duration, /// Channel to send built blocks to the collation task. pub collator_sender: sc_utils::mpsc::TracingUnboundedSender>, /// Slot duration of the relay chain. @@ -159,7 +157,6 @@ where collator_service, collator_sender, code_hash_provider, - authoring_duration, relay_chain_slot_duration, para_backend, slot_offset, @@ -186,7 +183,7 @@ where loop { // We wait here until the next slot arrives. - if slot_timer.wait_until_next_slot().await.is_err() { + let Ok(slot_time) = slot_timer.wait_until_next_slot().await else { tracing::error!(target: LOG_TARGET, "Unable to wait for next slot."); return; }; @@ -324,7 +321,7 @@ where tracing::debug!( target: crate::LOG_TARGET, relay_parent = ?relay_parent, - "Not cores scheduled." + "No cores scheduled." ); continue; }, @@ -362,6 +359,8 @@ where let mut slot_schedule = slot_schedule.into_iter(); loop { + let time_for_core = slot_time.time_left() / cores.cores_left(); + match build_collation_for_core( pov_parent_header, pov_parent_hash, @@ -373,12 +372,12 @@ where &code_hash_provider, &slot_claim, &collator_sender, - authoring_duration, &mut collator, allowed_pov_size, cores.core_info(), cores.core_index(), (&mut slot_schedule).take(blocks_per_core as usize), + time_for_core, ) .await { @@ -399,6 +398,9 @@ where } } +/// Build a collation for one core. +/// +/// One collation can be composed of multiple blocks. async fn build_collation_for_core( pov_parent_header: Block::Header, pov_parent_hash: Block::Hash, @@ -410,12 +412,12 @@ async fn build_collation_for_core, slot_claim: &SlotClaim, collator_sender: &sc_utils::mpsc::TracingUnboundedSender>, - authoring_duration: Duration, collator: &mut Collator, allowed_pov_size: usize, core_info: CoreInfo, core_index: CoreIndex, - block_schedule: impl Iterator, + block_schedule: impl ExactSizeIterator, + slot_time_for_core: Duration, ) -> Result, ()> where RelayClient: RelayChainInterface + 'static, @@ -428,6 +430,8 @@ where Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, { + let core_start = Instant::now(); + let validation_data = PersistedValidationData { parent_head: pov_parent_header.encode().into(), relay_parent_number: *relay_parent_header.number(), @@ -451,12 +455,32 @@ where let mut blocks = Vec::new(); let mut proofs = Vec::new(); let mut ignored_nodes = IgnoredNodes::default(); + let num_blocks = block_schedule.len(); let mut parent_hash = pov_parent_hash; let mut parent_header = pov_parent_header.clone(); - for block_time in block_schedule { - let expected_block_end = Instant::now() + block_time; + for (block_index, block_time) in block_schedule.enumerate() { + let block_start = Instant::now(); + let slot_time_for_block = + slot_time_for_core.saturating_sub(core_start.elapsed()) / num_blocks as u32; + + if slot_time_for_block <= Duration::from_millis(20) { + tracing::error!( + target: LOG_TARGET, + slot_time_for_block_ms = %slot_time_for_block.as_millis(), + blocks_left = %(num_blocks - block_index), + ?core_index, + "Less than 20ms slot time left to produce blocks, stopping block production for core", + ); + + break + } + + // The authoring duration is either the block time returned by the runtime or the 90% of the + // rest of the slot time for the block. We take here 90% because we still need to create the + // inherents and need to import the block afterwards. + let authoring_duration = block_time.min(slot_time_for_block); let (parachain_inherent_data, other_inherent_data) = match collator .create_inherent_data( @@ -501,7 +525,9 @@ where blocks.push(res.block); proofs.push(res.proof); - if let Some(sleep) = expected_block_end.checked_duration_since(Instant::now()) { + // If there is still time left for the block in the slot, we sleep the rest of the time. + // This ensures that we have some steady block rate. + if let Some(sleep) = slot_time_for_block.checked_sub(block_start.elapsed()) { tokio::time::sleep(sleep).await; } } @@ -645,6 +671,11 @@ impl Cores { fn total_cores(&self) -> u32 { self.core_indices.len() as u32 } + + /// Returns the number of cores left. + fn cores_left(&self) -> u32 { + self.total_cores() - self.selector.0 as u32 + } } /// Determine the cores for the given `para_id`. diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index 3bfda6a36379d..0c14d995f0988 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -126,8 +126,6 @@ pub struct Params Self { + Self { slot_duration, slot_start_timestamp } + } + + /// Get the time remaining in this slot + pub fn time_left(&self) -> Duration { + let now = duration_now(); + let slot_end_time_millis = + self.slot_start_timestamp.as_millis() + self.slot_duration.as_millis() as u64; + let slot_end_time = Duration::from_millis(slot_end_time_millis); + + slot_end_time.saturating_sub(now) + } +} + +/// Manages block-production slots based on the relay chain slot duration. #[derive(Debug)] pub(crate) struct SlotTimer { /// Offset the current time by this duration. @@ -70,7 +96,7 @@ impl SlotTimer { } /// Returns a future that resolves when the next block production should be attempted. - pub async fn wait_until_next_slot(&mut self) -> Result<(), ()> { + pub async fn wait_until_next_slot(&mut self) -> Result { let (time_until_next_attempt, timestamp) = time_until_next_slot(duration_now(), self.relay_slot_duration, self.time_offset); @@ -78,11 +104,17 @@ impl SlotTimer { let relay_slot_duration_for_slot = SlotDuration::from(self.relay_slot_duration); let mut current_slot = Slot::from_timestamp(timestamp, relay_slot_duration_for_slot); + // Calculate the actual slot start timestamp (may be different if we're catching up) + let mut slot_start_timestamp = timestamp; + match self.last_reported_slot { // If we already reported a slot, we don't want to skip a slot. But we also don't want // to go through all the slots if a node was halted for some reason. Some(ls) if ls + 1 < current_slot && current_slot <= ls + 3 => { current_slot = ls + 1u64; + // Calculate the timestamp for the adjusted slot + slot_start_timestamp = + current_slot.timestamp(relay_slot_duration_for_slot).ok_or(())?; // Don't sleep since we're catching up tracing::debug!( target: LOG_TARGET, @@ -106,13 +138,15 @@ impl SlotTimer { tracing::debug!( target: LOG_TARGET, relay_slot_duration = ?self.relay_slot_duration, - current_slot = ?current_slot, + ?current_slot, + ?slot_start_timestamp, "New block production slot." ); // Update internal slot tracking self.last_reported_slot = Some(current_slot); - Ok(()) + + Ok(SlotTime::new(self.relay_slot_duration, slot_start_timestamp)) } } diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs index 3bf113947d4f0..c15efa2506c59 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs @@ -363,7 +363,6 @@ where para_id, proposer, collator_service, - authoring_duration: Duration::from_millis(2000), reinitialize: false, slot_offset: Duration::from_secs(1), block_import_handle, diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 37ee28cde2f77..53e7141bd5a6c 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -507,7 +507,6 @@ where para_id, proposer, collator_service, - authoring_duration: Duration::from_millis(2000), reinitialize: false, slot_offset: Duration::from_secs(1), block_import_handle: slot_based_handle, From 291ce7a4f19f0756895039da598e724bf3209738 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 10 Jul 2025 23:30:45 +0200 Subject: [PATCH 070/312] Adds some test using glutton --- .../tests/elastic_scaling/mod.rs | 1 + .../pov_bundling_3cores_glutton.rs | 146 ++++++++++++++++++ 2 files changed, 147 insertions(+) create mode 100644 cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs diff --git a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/mod.rs index e322abcc93b86..f1e06221f18ea 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/mod.rs @@ -16,4 +16,5 @@ // limitations under the License. mod pov_bundling; +mod pov_bundling_3cores_glutton; mod slot_based_rp_offset; diff --git a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs new file mode 100644 index 0000000000000..66a7ec9f74c86 --- /dev/null +++ b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs @@ -0,0 +1,146 @@ +// This file is part of Cumulus. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::anyhow; + +use cumulus_zombienet_sdk_helpers::{ + assert_finality_lag, assert_para_throughput, create_assign_core_call, +}; +use polkadot_primitives::Id as ParaId; +use serde_json::json; +use zombienet_sdk::{ + subxt::{ + backend::{legacy::LegacyRpcMethods, rpc::RpcClient}, + OnlineClient, PolkadotConfig, + }, + subxt_signer::sr25519::dev, + NetworkConfig, NetworkConfigBuilder, +}; + +const PARA_ID: u32 = 2400; + +/// A test that ensures that PoV bundling works with 3 cores and glutton consuming 80% ref time. +/// +/// This test starts with 3 cores assigned and configures glutton to use 80% of ref time, +/// then validates that the parachain produces 72 blocks. +#[tokio::test(flavor = "multi_thread")] +async fn pov_bundling_3cores_glutton() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let config = build_network_config().await?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; + + let relay_node = network.get_node("validator-0")?; + let para_node = network.get_node("collator-1")?; + + let para_client = para_node.wait_client().await?; + let relay_client: OnlineClient = relay_node.wait_client().await?; + let alice = dev::alice(); + + // Assign cores 2 and 3 to start with 3 cores total (core 1 is auto-assigned) + let assign_cores_call = create_assign_core_call(&[(2, PARA_ID), (3, PARA_ID)]); + + relay_client + .tx() + .sign_and_submit_then_watch_default(&assign_cores_call, &alice) + .await + .inspect(|_| log::info!("Tx send, waiting for finalization"))? + .wait_for_finalized_success() + .await?; + log::info!("3 cores total assigned to the parachain"); + + // Glutton is already configured at genesis to use 80% ref time + log::info!("Glutton configured at genesis to use 80% ref time"); + + // Wait for the parachain to produce 72 blocks with 3 cores and glutton active + // With 3 cores, we expect roughly 3x throughput compared to single core + // Adjusting expectations based on glutton consuming 80% of ref time + assert_para_throughput( + &relay_client, + 6, + [(ParaId::from(PARA_ID), 18..27)], // Expected 3-core throughput with glutton overhead + [(ParaId::from(PARA_ID), (para_client.clone(), 72..97))], // Target 72+ blocks + ) + .await?; + + assert_finality_lag(¶_client, 72).await?; + log::info!("Test finished successfully - 72 blocks produced with 3 cores and glutton"); + Ok(()) +} + +async fn build_network_config() -> Result { + let images = zombienet_sdk::environment::get_images_from_env(); + log::info!("Using images: {images:?}"); + NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=trace").into()]) + .with_default_resources(|resources| { + resources.with_request_cpu(4).with_request_memory("4G") + }) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "num_cores": 3, + "max_validators_per_core": 1 + } + } + } + })) + .with_node(|node| node.with_name("validator-0")); + (1..9).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) + }) + .with_parachain(|p| { + p.with_id(PARA_ID) + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("pov-bundling") + .with_default_args(vec![ + ("--authoring").into(), + ("slot-based").into(), + ("-lparachain=debug,aura=trace").into(), + ]) + .with_genesis_overrides(json!({ + "glutton": { + "compute": "10000000", // 80% ref time consumption + "storage": "0", // No storage consumption + "trashDataCount": 5000, // Initialize with some trash data + "blockLength": "0" // No block length consumption + } + })) + .with_collator(|n| n.with_name("collator-0")) + .with_collator(|n| n.with_name("collator-1")) + .with_collator(|n| n.with_name("collator-2")) + }) + .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { + Ok(val) => global_settings.with_base_dir(val), + _ => global_settings, + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + }) +} From 1afbb0e507caa2496346e3e4e43fb7292a06627f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 11 Jul 2025 13:09:12 +0200 Subject: [PATCH 071/312] Log the core configuration --- .../src/collators/slot_based/block_builder_task.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 1f8549f0b8e38..86f74bbeb1067 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -354,6 +354,13 @@ where let blocks_per_core = (slot_schedule.len() as u32 / cores.total_cores()).max(1); + tracing::debug!( + target: crate::LOG_TARGET, + %blocks_per_core, + core_indices = ?cores.core_indices(), + "Core configuration", + ); + let mut pov_parent_header = initial_parent.header; let mut pov_parent_hash = initial_parent.hash; let mut slot_schedule = slot_schedule.into_iter(); @@ -650,6 +657,11 @@ impl Cores { } } + /// Returns the core indices. + fn core_indices(&self) -> &[CoreIndex] { + &self.core_indices + } + /// Returns the current [`CoreIndex`]. fn core_index(&self) -> CoreIndex { self.core_indices[self.selector.0 as usize] From ff386601edc803122de245d46f887277c1dff031 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 11 Jul 2025 13:24:45 +0200 Subject: [PATCH 072/312] Fix the core assignments --- .../zombienet-sdk-helpers/src/lib.rs | 37 ++++++++++++++----- .../tests/elastic_scaling/pov_bundling.rs | 4 +- .../pov_bundling_3cores_glutton.rs | 4 +- 3 files changed, 32 insertions(+), 13 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 9399edd7e2348..d1504d38e873a 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -13,14 +13,8 @@ use tokio::{ time::{sleep, Duration}, }; use zombienet_sdk::subxt::{ - backend::legacy::LegacyRpcMethods, - blocks::Block, - config::Header, - events::Events, - ext::scale_value::value, - tx::DynamicPayload, - utils::H256, - OnlineClient, PolkadotConfig, + backend::legacy::LegacyRpcMethods, blocks::Block, config::Header, events::Events, + ext::scale_value::value, tx::DynamicPayload, utils::H256, OnlineClient, PolkadotConfig, }; // Maximum number of blocks to wait for a session change. @@ -28,6 +22,32 @@ use zombienet_sdk::subxt::{ const WAIT_MAX_BLOCKS_FOR_SESSION: u32 = 50; /// Create a batch call to assign cores to a parachain. +/// +/// Zombienet by default adds extra core for each registered parachain additionally to the one +/// requested by `num_cores`. It then assigns the parachains to the extra cores allocated at the +/// end. So, the passed core indices should be counted from zero. +/// +/// # Example +/// +/// Genesis patch: +/// ```json +/// "configuration": { +/// "config": { +/// "scheduler_params": { +/// "num_cores": 2, +/// } +/// } +/// } +/// ``` +/// +/// Runs the relay chain with `2` cores and we also add two parachains. +/// To assign these extra `2` cores, the call would look like this: +/// +/// ```rust +/// create_assign_core_call(&[(0, 2400), (1, 2400)]) +/// ``` +/// +/// The cores `2` and `3` are assigned to the parachains by zombienet. pub fn create_assign_core_call(core_and_para: &[(u32, u32)]) -> DynamicPayload { let mut assign_cores = vec![]; for (core, para_id) in core_and_para.iter() { @@ -195,7 +215,6 @@ async fn is_session_change( })) } - /// Returns [`CoreInfo`] for the given parachain block. fn find_core_info( block: &Block>, diff --git a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs index 614d329641962..9162116b0084a 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling.rs @@ -66,7 +66,7 @@ async fn pov_bundling() -> Result<(), anyhow::Error> { // 3 relay chain blocks assert_finality_lag(¶_client, 72).await?; - let assign_cores_call = create_assign_core_call(&[(2, PARA_ID), (3, PARA_ID)]); + let assign_cores_call = create_assign_core_call(&[(0, PARA_ID), (1, PARA_ID)]); relay_client .tx() @@ -86,7 +86,7 @@ async fn pov_bundling() -> Result<(), anyhow::Error> { .await?; assert_finality_lag(¶_client, 72).await?; - let assign_cores_call = create_assign_core_call(&[(4, PARA_ID), (5, PARA_ID), (6, PARA_ID)]); + let assign_cores_call = create_assign_core_call(&[(2, PARA_ID), (3, PARA_ID), (4, PARA_ID)]); // Assign two extra cores to each parachain. relay_client .tx() diff --git a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs index 66a7ec9f74c86..68af4b328ce65 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs @@ -55,8 +55,8 @@ async fn pov_bundling_3cores_glutton() -> Result<(), anyhow::Error> { let relay_client: OnlineClient = relay_node.wait_client().await?; let alice = dev::alice(); - // Assign cores 2 and 3 to start with 3 cores total (core 1 is auto-assigned) - let assign_cores_call = create_assign_core_call(&[(2, PARA_ID), (3, PARA_ID)]); + // Assign cores 0 and 1 to start with 3 cores total (core 1 is auto-assigned) + let assign_cores_call = create_assign_core_call(&[(0, PARA_ID), (1, PARA_ID)]); relay_client .tx() From 8296840ee683eeede2c3a3e641d3ca1c4b7a3b9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 14 Jul 2025 13:29:42 +0200 Subject: [PATCH 073/312] Fix test --- .../tests/elastic_scaling/pov_bundling_3cores_glutton.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs index 68af4b328ce65..6884d4c5a1661 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs @@ -76,8 +76,8 @@ async fn pov_bundling_3cores_glutton() -> Result<(), anyhow::Error> { assert_para_throughput( &relay_client, 6, - [(ParaId::from(PARA_ID), 18..27)], // Expected 3-core throughput with glutton overhead - [(ParaId::from(PARA_ID), (para_client.clone(), 72..97))], // Target 72+ blocks + [(ParaId::from(PARA_ID), 12..19)], + [(ParaId::from(PARA_ID), (para_client.clone(), 48..73))], ) .await?; From a2b4aec348b737851de654a5cbf2b1669bebb088 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 14 Jul 2025 19:55:25 +0200 Subject: [PATCH 074/312] Fix block building interval --- .../collators/slot_based/block_builder_task.rs | 12 ++++++++++-- .../aura/src/collators/slot_based/slot_timer.rs | 16 +++++++++++----- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 86f74bbeb1067..42e1a146a0660 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -469,8 +469,8 @@ where for (block_index, block_time) in block_schedule.enumerate() { let block_start = Instant::now(); - let slot_time_for_block = - slot_time_for_core.saturating_sub(core_start.elapsed()) / num_blocks as u32; + let slot_time_for_block = slot_time_for_core.saturating_sub(core_start.elapsed()) / + (num_blocks - block_index) as u32; if slot_time_for_block <= Duration::from_millis(20) { tracing::error!( @@ -484,6 +484,14 @@ where break } + tracing::trace!( + target: LOG_TARGET, + slot_time_for_block_ms = %slot_time_for_block.as_millis(), + %block_index, + core_index = %core_index.0, + "Going to build block" + ); + // The authoring duration is either the block time returned by the runtime or the 90% of the // rest of the slot time for the block. We take here 90% because we still need to create the // inherents and need to import the block afterwards. diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs b/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs index cc8c967a14dbc..8b9a6d0aedbc5 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs @@ -34,17 +34,23 @@ pub(crate) struct SlotTime { slot_duration: Duration, /// The exact timestamp when this slot started slot_start_timestamp: Timestamp, + /// Time offset to apply when calculating time remaining + time_offset: Duration, } impl SlotTime { /// Create a new SlotTime - pub fn new(slot_duration: Duration, slot_start_timestamp: Timestamp) -> Self { - Self { slot_duration, slot_start_timestamp } + pub fn new( + slot_duration: Duration, + slot_start_timestamp: Timestamp, + time_offset: Duration, + ) -> Self { + Self { slot_duration, slot_start_timestamp, time_offset } } /// Get the time remaining in this slot pub fn time_left(&self) -> Duration { - let now = duration_now(); + let now = duration_now().saturating_sub(self.time_offset); let slot_end_time_millis = self.slot_start_timestamp.as_millis() + self.slot_duration.as_millis() as u64; let slot_end_time = Duration::from_millis(slot_end_time_millis); @@ -80,7 +86,7 @@ fn time_until_next_slot( block_production_interval: Duration, offset: Duration, ) -> (Duration, Timestamp) { - let now = now.as_millis().saturating_sub(offset.as_millis()); + let now = now.saturating_sub(offset).as_millis(); let next_slot_time = ((now + block_production_interval.as_millis()) / block_production_interval.as_millis()) * @@ -146,7 +152,7 @@ impl SlotTimer { // Update internal slot tracking self.last_reported_slot = Some(current_slot); - Ok(SlotTime::new(self.relay_slot_duration, slot_start_timestamp)) + Ok(SlotTime::new(self.relay_slot_duration, slot_start_timestamp, self.time_offset)) } } From a1741fc9b1a39eed953c69a116f638026036b4bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 14 Jul 2025 22:38:46 +0200 Subject: [PATCH 075/312] Introduce `MaxParachainBlockWeight` --- Cargo.lock | 1 + cumulus/pallets/parachain-system/Cargo.toml | 2 + cumulus/pallets/parachain-system/src/lib.rs | 2 + .../src/max_parachain_block_weight.rs | 203 ++++++++++++++++++ .../pov_bundling_3cores_glutton.rs | 2 +- substrate/test-utils/runtime/src/lib.rs | 7 + 6 files changed, 216 insertions(+), 1 deletion(-) create mode 100644 cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs diff --git a/Cargo.lock b/Cargo.lock index d022cefd584c6..b5b1c372cf058 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4578,6 +4578,7 @@ dependencies = [ "pallet-message-queue", "parity-scale-codec", "polkadot-parachain-primitives", + "polkadot-primitives", "polkadot-runtime-parachains", "rand 0.8.5", "rstest", diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 6f84162dcb010..43d226b47aa3e 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -39,6 +39,7 @@ sp-version = { workspace = true } # Polkadot polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } +polkadot-primitives = { workspace = true } polkadot-runtime-parachains = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } @@ -86,6 +87,7 @@ std = [ "log/std", "pallet-message-queue/std", "polkadot-parachain-primitives/std", + "polkadot-primitives/std", "polkadot-runtime-parachains/std", "scale-info/std", "sp-consensus-babe/std", diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index e1ab811c6b25a..287c32e81a274 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -59,6 +59,7 @@ use xcm::{latest::XcmHash, VersionedLocation, VersionedXcm, MAX_XCM_DECODE_DEPTH use xcm_builder::InspectMessageQueues; mod benchmarking; +pub mod max_parachain_block_weight; pub mod migration; mod mock; #[cfg(test)] @@ -66,6 +67,7 @@ mod tests; pub mod weights; pub use weights::WeightInfo; +pub use max_parachain_block_weight::MaxParachainBlockWeight; mod unincluded_segment; diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs new file mode 100644 index 0000000000000..b38afd684c31a --- /dev/null +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs @@ -0,0 +1,203 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Utilities for calculating maximum parachain block weight based on core assignments. + +use cumulus_primitives_core::CumulusDigestItem; +use frame_support::weights::Weight; +use polkadot_primitives::MAX_POV_SIZE; + +/// A utility type for calculating the maximum block weight for a parachain based on +/// the number of relay chain cores assigned and the target number of blocks. +pub struct MaxParachainBlockWeight; + +impl MaxParachainBlockWeight { + /// Calculate the maximum block weight based on target blocks and core assignments. + /// + /// This function examines the current block's digest from `frame_system::Digests` storage + /// to find `CumulusDigestItem::CoreInfo` entries, which contain information about the + /// number of relay chain cores assigned to the parachain. Each core has a maximum + /// reference time of 2 seconds and the total maximum PoV size of `MAX_POV_SIZE` is + /// shared across all target blocks. + /// + /// # Parameters + /// - `target_blocks`: The target number of blocks to be produced + /// + /// # Returns + /// Returns the calculated maximum weight, or a conservative default if no core info is found + /// or if an error occurs during calculation. + pub fn get(target_blocks: u32) -> Weight { + // Maximum ref time per core (2 seconds in nanoseconds) + const MAX_REF_TIME_PER_CORE_NS: u64 = 2_000_000_000; + + // Get the current block's digest from frame-system storage + let digest = frame_system::Pallet::::digest(); + + // Search for CoreInfo in the block digest + let core_info = match CumulusDigestItem::find_core_info(&digest) { + Some(info) => info, + None => { + // If no core info is found, return a conservative default + return Weight::from_parts(MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); + }, + }; + + // Extract number of cores from the CoreInfo + let number_of_cores = u32::from(core_info.number_of_cores.0); + + // Ensure we have at least one core and valid target blocks + if number_of_cores == 0 || target_blocks == 0 { + return Weight::from_parts(MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); + } + + // Calculate total available ref time across all cores + let total_ref_time = MAX_REF_TIME_PER_CORE_NS.saturating_mul(number_of_cores as u64); + + // Distribute the total ref time across target blocks + let ref_time_per_block = total_ref_time.saturating_div(target_blocks as u64); + + // PoV size is also shared across target blocks + let proof_size_per_block = (MAX_POV_SIZE as u64).saturating_div(target_blocks as u64); + + Weight::from_parts(ref_time_per_block, proof_size_per_block) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use codec::Compact; + use cumulus_primitives_core::{ClaimQueueOffset, CoreInfo, CoreSelector}; + use frame_support::{construct_runtime, derive_impl}; + use sp_io; + use sp_runtime::{traits::IdentityLookup, BuildStorage}; + + type Block = frame_system::mocking::MockBlock; + + // Configure a mock runtime to test the functionality + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for Test { + type Block = Block; + type AccountId = u64; + type AccountData = (); + type Lookup = IdentityLookup; + } + + construct_runtime!( + pub enum Test { + System: frame_system, + } + ); + + fn new_test_ext_with_digest(num_cores: Option) -> sp_io::TestExternalities { + let storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + let mut ext = sp_io::TestExternalities::from(storage); + + ext.execute_with(|| { + if let Some(num_cores) = num_cores { + let core_info = CoreInfo { + selector: CoreSelector(0), + claim_queue_offset: ClaimQueueOffset(0), + number_of_cores: Compact(num_cores), + }; + + let digest = CumulusDigestItem::CoreInfo(core_info).to_digest_item(); + + frame_system::Pallet::::deposit_log(digest); + } + }); + + ext + } + + #[test] + fn test_single_core_single_block() { + new_test_ext_with_digest(Some(1)).execute_with(|| { + let weight = MaxParachainBlockWeight::get::(1); + + // With 1 core and 1 target block, should get full 2s ref time and full PoV size + assert_eq!(weight.ref_time(), 2_000_000_000); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); + }); + } + + #[test] + fn test_single_core_multiple_blocks() { + new_test_ext_with_digest(Some(1)).execute_with(|| { + let weight = MaxParachainBlockWeight::get::(4); + + // With 1 core and 4 target blocks, should get 0.5s ref time and 1/4 PoV size per block + assert_eq!(weight.ref_time(), 500_000_000); + assert_eq!(weight.proof_size(), (MAX_POV_SIZE as u64) / 4); + }); + } + + #[test] + fn test_multiple_cores_single_block() { + new_test_ext_with_digest(Some(3)).execute_with(|| { + let weight = MaxParachainBlockWeight::get::(1); + + // With 3 cores and 1 target block, should get 6s ref time total and full PoV size + assert_eq!(weight.ref_time(), 6_000_000_000); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); + }); + } + + #[test] + fn test_multiple_cores_multiple_blocks() { + new_test_ext_with_digest(Some(2)).execute_with(|| { + let weight = MaxParachainBlockWeight::get::(4); + + // With 2 cores and 4 target blocks, should get 1s ref time and 1/4 PoV size per block + assert_eq!(weight.ref_time(), 1_000_000_000); + assert_eq!(weight.proof_size(), (MAX_POV_SIZE as u64) / 4); + }); + } + + #[test] + fn test_no_core_info() { + new_test_ext_with_digest(None).execute_with(|| { + let weight = MaxParachainBlockWeight::get::(1); + + // Without core info, should return conservative default + assert_eq!(weight.ref_time(), 2_000_000_000); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); + }); + } + + #[test] + fn test_zero_cores() { + new_test_ext_with_digest(Some(0)).execute_with(|| { + let weight = MaxParachainBlockWeight::get::(1); + + // With 0 cores, should return conservative default + assert_eq!(weight.ref_time(), 2_000_000_000); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); + }); + } + + #[test] + fn test_zero_target_blocks() { + new_test_ext_with_digest(Some(2)).execute_with(|| { + let weight = MaxParachainBlockWeight::get::(0); + + // With 0 target blocks, should return conservative default + assert_eq!(weight.ref_time(), 2_000_000_000); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); + }); + } +} diff --git a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs index 6884d4c5a1661..8915ff6d0d8b8 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs @@ -124,7 +124,7 @@ async fn build_network_config() -> Result { ]) .with_genesis_overrides(json!({ "glutton": { - "compute": "10000000", // 80% ref time consumption + "compute": "1000000000", // 80% ref time consumption "storage": "0", // No storage consumption "trashDataCount": 5000, // Initialize with some trash data "blockLength": "0" // No block length consumption diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index f440028c74c7c..20e6a48053e04 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -568,6 +568,13 @@ impl_runtime_apis! { fn check_inherents(_block: Block, _data: InherentData) -> CheckInherentsResult { CheckInherentsResult::new() } + + fn block_rate() -> sp_block_builder::BlockRate { + sp_block_builder::BlockRate { + block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , + block_building_time: core::time::Duration::from_secs(2), + } + } } impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { From a95d05b2849eca586df5d9dc640e332392083444 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 17 Jul 2025 21:04:32 +0200 Subject: [PATCH 076/312] Some test improvements --- .../tests/elastic_scaling/pov_bundling_3cores_glutton.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs index 8915ff6d0d8b8..89b583c37c720 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/pov_bundling_3cores_glutton.rs @@ -67,9 +67,6 @@ async fn pov_bundling_3cores_glutton() -> Result<(), anyhow::Error> { .await?; log::info!("3 cores total assigned to the parachain"); - // Glutton is already configured at genesis to use 80% ref time - log::info!("Glutton configured at genesis to use 80% ref time"); - // Wait for the parachain to produce 72 blocks with 3 cores and glutton active // With 3 cores, we expect roughly 3x throughput compared to single core // Adjusting expectations based on glutton consuming 80% of ref time @@ -124,7 +121,7 @@ async fn build_network_config() -> Result { ]) .with_genesis_overrides(json!({ "glutton": { - "compute": "1000000000", // 80% ref time consumption + "compute": "2000000000", // 200% ref time consumption "storage": "0", // No storage consumption "trashDataCount": 5000, // Initialize with some trash data "blockLength": "0" // No block length consumption From 9cffb1432181dbfd1558ad013d3dcd5b64aed367 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 17 Jul 2025 21:05:02 +0200 Subject: [PATCH 077/312] Fix and use `MaxParachainBlockWeight` --- .../src/max_parachain_block_weight.rs | 26 +++++-------------- cumulus/test/runtime/src/lib.rs | 23 +++++++--------- substrate/frame/system/src/limits.rs | 9 +++---- 3 files changed, 21 insertions(+), 37 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs index b38afd684c31a..19bef8b60629d 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs @@ -17,7 +17,7 @@ //! Utilities for calculating maximum parachain block weight based on core assignments. use cumulus_primitives_core::CumulusDigestItem; -use frame_support::weights::Weight; +use frame_support::weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}; use polkadot_primitives::MAX_POV_SIZE; /// A utility type for calculating the maximum block weight for a parachain based on @@ -40,36 +40,24 @@ impl MaxParachainBlockWeight { /// Returns the calculated maximum weight, or a conservative default if no core info is found /// or if an error occurs during calculation. pub fn get(target_blocks: u32) -> Weight { - // Maximum ref time per core (2 seconds in nanoseconds) - const MAX_REF_TIME_PER_CORE_NS: u64 = 2_000_000_000; + // Maximum ref time per core + const MAX_REF_TIME_PER_CORE_NS: u64 = 2 * WEIGHT_REF_TIME_PER_SECOND; - // Get the current block's digest from frame-system storage let digest = frame_system::Pallet::::digest(); - // Search for CoreInfo in the block digest - let core_info = match CumulusDigestItem::find_core_info(&digest) { - Some(info) => info, - None => { - // If no core info is found, return a conservative default - return Weight::from_parts(MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); - }, + let Some(core_info) = CumulusDigestItem::find_core_info(&digest) else { + return Weight::from_parts(MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); }; - // Extract number of cores from the CoreInfo - let number_of_cores = u32::from(core_info.number_of_cores.0); + let number_of_cores = core_info.number_of_cores.0 as u32; // Ensure we have at least one core and valid target blocks if number_of_cores == 0 || target_blocks == 0 { return Weight::from_parts(MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); } - // Calculate total available ref time across all cores - let total_ref_time = MAX_REF_TIME_PER_CORE_NS.saturating_mul(number_of_cores as u64); - - // Distribute the total ref time across target blocks - let ref_time_per_block = total_ref_time.saturating_div(target_blocks as u64); + let ref_time_per_block = MAX_REF_TIME_PER_CORE_NS.saturating_div(target_blocks as u64); - // PoV size is also shared across target blocks let proof_size_per_block = (MAX_POV_SIZE as u64).saturating_div(target_blocks as u64); Weight::from_parts(ref_time_per_block, proof_size_per_block) diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 5b61b1e3343f0..4e72f449ce4b2 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -211,15 +211,14 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used /// by Operational extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -/// We allow for 1 second of compute with a 6 second average block time. -const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( - WEIGHT_REF_TIME_PER_SECOND, - cumulus_primitives_core::relay_chain::MAX_POV_SIZE as u64, -); +/// Target number of blocks per relay chain slot. +const NUMBER_OF_BLOCKS_PER_RELAY_SLOT: u32 = 12; parameter_types! { pub const BlockHashCount: BlockNumber = 4096; pub const Version: RuntimeVersion = VERSION; + /// We allow for 1 second of compute with a 6 second average block time. + pub MaximumBlockWeight: Weight = cumulus_pallet_parachain_system::MaxParachainBlockWeight::get::(NUMBER_OF_BLOCKS_PER_RELAY_SLOT); pub RuntimeBlockLength: BlockLength = BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() @@ -228,14 +227,14 @@ parameter_types! { weights.base_extrinsic = ExtrinsicBaseWeight::get(); }) .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MaximumBlockWeight::get()); }) .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + weights.max_total = Some(MaximumBlockWeight::get()); // Operational transactions have some extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + // are included even if block reached `MaximumBlockWeight`. weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT + MaximumBlockWeight::get() - NORMAL_DISPATCH_RATIO * MaximumBlockWeight::get() ); }) .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) @@ -632,11 +631,9 @@ impl_runtime_apis! { impl cumulus_primitives_core::SlotSchedule for Runtime { fn next_slot_schedule(num_cores: u32) -> Vec { - const TARGET_BLOCK_INTERVAL: u32 = 12; + let block_time = Duration::from_secs(2) * num_cores / NUMBER_OF_BLOCKS_PER_RELAY_SLOT; - let block_time = Duration::from_secs(2) * num_cores / TARGET_BLOCK_INTERVAL; - - vec![block_time.min(Duration::from_millis(500)); TARGET_BLOCK_INTERVAL as usize] + vec![block_time.min(Duration::from_millis(500)); NUMBER_OF_BLOCKS_PER_RELAY_SLOT as usize] } } } diff --git a/substrate/frame/system/src/limits.rs b/substrate/frame/system/src/limits.rs index ab5a98a6b9745..1057593d2d032 100644 --- a/substrate/frame/system/src/limits.rs +++ b/substrate/frame/system/src/limits.rs @@ -25,6 +25,7 @@ //! `DispatchClass`. This module contains configuration object for both resources, //! which should be passed to `frame_system` configuration when runtime is being set up. +use alloc::{string::String, vec::Vec, format}; use frame_support::{ dispatch::{DispatchClass, OneOrMany, PerDispatchClass}, weights::{constants, Weight}, @@ -33,7 +34,7 @@ use scale_info::TypeInfo; use sp_runtime::{traits::Bounded, Perbill, RuntimeDebug}; /// Block length limit configuration. -#[derive(RuntimeDebug, Clone, codec::Encode, codec::Decode, TypeInfo)] +#[derive(Debug, Clone, codec::Encode, codec::Decode, TypeInfo)] pub struct BlockLength { /// Maximal total length in bytes for each extrinsic class. /// @@ -69,10 +70,9 @@ impl BlockLength { } } -#[derive(Default, RuntimeDebug)] +#[derive(Default, Debug)] pub struct ValidationErrors { pub has_errors: bool, - #[cfg(feature = "std")] pub errors: Vec, } @@ -80,7 +80,6 @@ macro_rules! error_assert { ($cond : expr, $err : expr, $format : expr $(, $params: expr )*$(,)*) => { if !$cond { $err.has_errors = true; - #[cfg(feature = "std")] { $err.errors.push(format!($format $(, &$params )*)); } } } @@ -195,7 +194,7 @@ pub struct WeightsPerClass { /// /// As a consequence of `reserved` space, total consumed block weight might exceed `max_block` /// value, so this parameter should rather be thought of as "target block weight" than a hard limit. -#[derive(RuntimeDebug, Clone, codec::Encode, codec::Decode, TypeInfo)] +#[derive(Debug, Clone, codec::Encode, codec::Decode, TypeInfo)] pub struct BlockWeights { /// Base weight of block execution. pub base_block: Weight, From 4e82e5daf6a4cd73b756d81ccce9c725f7c2b470 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 17 Jul 2025 21:05:24 +0200 Subject: [PATCH 078/312] Do not sleep directly on the last block --- .../src/collators/slot_based/block_builder_task.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 42e1a146a0660..08b287e05f949 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -542,7 +542,12 @@ where // If there is still time left for the block in the slot, we sleep the rest of the time. // This ensures that we have some steady block rate. - if let Some(sleep) = slot_time_for_block.checked_sub(block_start.elapsed()) { + if let Some(sleep) = slot_time_for_block + .checked_sub(block_start.elapsed()) + // Let's not sleep for the last block here, to send out the collation as early as + // possible. + .filter(|_| block_index + 1 < num_blocks) + { tokio::time::sleep(sleep).await; } } @@ -561,6 +566,11 @@ where tracing::error!(target: crate::LOG_TARGET, ?err, "Unable to send block to collation task."); Err(()) } else { + // Now let's sleep for the rest of the core. + if let Some(sleep) = slot_time_for_core.checked_sub(core_start.elapsed()) { + tokio::time::sleep(sleep).await; + } + Ok(Some(parent_header)) } } From 8024d0808c1b7ee2ad0208be91d68987ede6f413 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 18 Jul 2025 14:15:31 +0200 Subject: [PATCH 079/312] Support skipping the last block in a slot --- .../slot_based/block_builder_task.rs | 51 ++++++++++++------- .../src/collators/slot_based/slot_timer.rs | 16 ++++++ 2 files changed, 50 insertions(+), 17 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 08b287e05f949..eb46248b7bff4 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -267,7 +267,7 @@ where let included_header_hash = included_header.hash(); - let slot_claim = match crate::collators::can_build_upon::<_, _, P>( + let Some(slot_claim) = crate::collators::can_build_upon::<_, _, P>( slot_info.slot, relay_slot, slot_info.timestamp, @@ -277,22 +277,19 @@ where &keystore, ) .await - { - Some(slot) => slot, - None => { - tracing::debug!( - target: crate::LOG_TARGET, - unincluded_segment_len = initial_parent.depth, - relay_parent = ?relay_parent, - relay_parent_num = %relay_parent_header.number(), - included_hash = ?included_header_hash, - included_num = %included_header.number(), - initial_parent = ?initial_parent.hash, - slot = ?slot_info.slot, - "Not eligible to claim slot." - ); - continue - }, + else { + tracing::debug!( + target: crate::LOG_TARGET, + unincluded_segment_len = initial_parent.depth, + relay_parent = ?relay_parent, + relay_parent_num = %relay_parent_header.number(), + included_hash = ?included_header_hash, + included_num = %included_header.number(), + initial_parent = ?initial_parent.hash, + slot = ?slot_info.slot, + "Not eligible to claim slot." + ); + continue }; tracing::debug!( @@ -385,6 +382,8 @@ where cores.core_index(), (&mut slot_schedule).take(blocks_per_core as usize), time_for_core, + cores.is_last_core() && + slot_time.is_parachain_slot_ending(para_slot_duration.as_duration()), ) .await { @@ -425,6 +424,7 @@ async fn build_collation_for_core, slot_time_for_core: Duration, + is_last_core_in_parachain_slot: bool, ) -> Result, ()> where RelayClient: RelayChainInterface + 'static, @@ -468,6 +468,18 @@ where let mut parent_header = pov_parent_header.clone(); for (block_index, block_time) in block_schedule.enumerate() { + //TODO: Remove when transaction streaming is implemented + // We require that the next node has imported our last block before it can start building + // the next block. To ensure that the next node is able to do so, we are skipping the last + // block in the parachain slot. In the future this can be removed again. + if block_index + 1 == num_blocks && num_blocks > 1 && is_last_core_in_parachain_slot { + tracing::debug!( + target: LOG_TARGET, + "Skipping block production so that the next node is able to import all blocks before its slot." + ); + break; + } + let block_start = Instant::now(); let slot_time_for_block = slot_time_for_core.saturating_sub(core_start.elapsed()) / (num_blocks - block_index) as u32; @@ -706,6 +718,11 @@ impl Cores { fn cores_left(&self) -> u32 { self.total_cores() - self.selector.0 as u32 } + + /// Returns if the current core is the last core. + fn is_last_core(&self) -> bool { + self.cores_left() == 1 + } } /// Determine the cores for the given `para_id`. diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs b/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs index 8b9a6d0aedbc5..9ec109bb488d3 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs @@ -57,6 +57,22 @@ impl SlotTime { slot_end_time.saturating_sub(now) } + + /// Check if the next relay chain slot would be in a different parachain slot. + pub fn is_parachain_slot_ending(&self, parachain_slot_duration: Duration) -> bool { + let now = duration_now().saturating_sub(self.time_offset); + let next_relay_slot_start_time = + self.slot_start_timestamp.as_duration() + self.slot_duration; + + // Calculate current parachain slot + let current_parachain_slot = now.as_millis() / parachain_slot_duration.as_millis(); + + // Calculate parachain slot for next relay slot + let next_parachain_slot = + next_relay_slot_start_time.as_millis() / parachain_slot_duration.as_millis() as u128; + + current_parachain_slot != next_parachain_slot + } } /// Manages block-production slots based on the relay chain slot duration. From aeb8166b280363d5c5c37754d139586bc9f04af6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 21 Jul 2025 23:21:07 +0200 Subject: [PATCH 080/312] Fixes --- cumulus/pallets/parachain-system/src/lib.rs | 4 +-- .../src/max_parachain_block_weight.rs | 34 +++++++++++-------- cumulus/pallets/parachain-system/src/tests.rs | 2 +- .../src/validate_block/tests.rs | 2 +- cumulus/test/client/src/block_builder.rs | 10 +----- .../test/service/benches/validate_block.rs | 11 +++--- cumulus/test/service/src/cli.rs | 6 ++-- .../tests/disabling/slashing.rs | 3 +- .../tests/functional/sync_backing.rs | 8 +---- substrate/frame/system/src/limits.rs | 2 +- 10 files changed, 36 insertions(+), 46 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 287c32e81a274..c26f198305c7f 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -66,8 +66,8 @@ mod mock; mod tests; pub mod weights; -pub use weights::WeightInfo; pub use max_parachain_block_weight::MaxParachainBlockWeight; +pub use weights::WeightInfo; mod unincluded_segment; @@ -1333,7 +1333,7 @@ impl Pallet { // a new block is allowed. assert!( new_len < capacity.get(), - "No space left for the block in the unincluded segment: {new_len} < {}", + "No space left for the block in the unincluded segment: new_len({new_len}) < capacity({})", capacity.get() ); weight_used diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs index 19bef8b60629d..3816e56ddfd58 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs @@ -56,9 +56,13 @@ impl MaxParachainBlockWeight { return Weight::from_parts(MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); } - let ref_time_per_block = MAX_REF_TIME_PER_CORE_NS.saturating_div(target_blocks as u64); + let total_ref_time = (number_of_cores as u64).saturating_mul(MAX_REF_TIME_PER_CORE_NS); + let ref_time_per_block = total_ref_time + .saturating_div(target_blocks as u64) + .min(MAX_REF_TIME_PER_CORE_NS); - let proof_size_per_block = (MAX_POV_SIZE as u64).saturating_div(target_blocks as u64); + let total_pov_size = (number_of_cores as u64).saturating_mul(MAX_POV_SIZE as u64); + let proof_size_per_block = total_pov_size.saturating_div(target_blocks as u64); Weight::from_parts(ref_time_per_block, proof_size_per_block) } @@ -118,7 +122,7 @@ mod tests { let weight = MaxParachainBlockWeight::get::(1); // With 1 core and 1 target block, should get full 2s ref time and full PoV size - assert_eq!(weight.ref_time(), 2_000_000_000); + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); }); } @@ -129,8 +133,8 @@ mod tests { let weight = MaxParachainBlockWeight::get::(4); // With 1 core and 4 target blocks, should get 0.5s ref time and 1/4 PoV size per block - assert_eq!(weight.ref_time(), 500_000_000); - assert_eq!(weight.proof_size(), (MAX_POV_SIZE as u64) / 4); + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND / 4); + assert_eq!(weight.proof_size(), (1 * MAX_POV_SIZE as u64) / 4); }); } @@ -139,9 +143,10 @@ mod tests { new_test_ext_with_digest(Some(3)).execute_with(|| { let weight = MaxParachainBlockWeight::get::(1); - // With 3 cores and 1 target block, should get 6s ref time total and full PoV size - assert_eq!(weight.ref_time(), 6_000_000_000); - assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); + // With 3 cores and 1 target block, should get max 2s ref time (capped per core) and 3x + // PoV size + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + assert_eq!(weight.proof_size(), 3 * MAX_POV_SIZE as u64); }); } @@ -150,9 +155,10 @@ mod tests { new_test_ext_with_digest(Some(2)).execute_with(|| { let weight = MaxParachainBlockWeight::get::(4); - // With 2 cores and 4 target blocks, should get 1s ref time and 1/4 PoV size per block - assert_eq!(weight.ref_time(), 1_000_000_000); - assert_eq!(weight.proof_size(), (MAX_POV_SIZE as u64) / 4); + // With 2 cores and 4 target blocks, should get 1s ref time and 2x PoV size / 4 per + // block + assert_eq!(weight.ref_time(), 2 * 2 * WEIGHT_REF_TIME_PER_SECOND / 4); + assert_eq!(weight.proof_size(), (2 * MAX_POV_SIZE as u64) / 4); }); } @@ -162,7 +168,7 @@ mod tests { let weight = MaxParachainBlockWeight::get::(1); // Without core info, should return conservative default - assert_eq!(weight.ref_time(), 2_000_000_000); + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); }); } @@ -173,7 +179,7 @@ mod tests { let weight = MaxParachainBlockWeight::get::(1); // With 0 cores, should return conservative default - assert_eq!(weight.ref_time(), 2_000_000_000); + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); }); } @@ -184,7 +190,7 @@ mod tests { let weight = MaxParachainBlockWeight::get::(0); // With 0 target blocks, should return conservative default - assert_eq!(weight.ref_time(), 2_000_000_000); + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); }); } diff --git a/cumulus/pallets/parachain-system/src/tests.rs b/cumulus/pallets/parachain-system/src/tests.rs index 5fee780d8ed78..86189d098c312 100755 --- a/cumulus/pallets/parachain-system/src/tests.rs +++ b/cumulus/pallets/parachain-system/src/tests.rs @@ -182,7 +182,7 @@ fn unincluded_segment_works() { } #[test] -#[should_panic = "no space left for the block in the unincluded segment"] +#[should_panic = "No space left for the block in the unincluded segment: new_len(1) < capacity(1)"] fn unincluded_segment_is_limited() { CONSENSUS_HOOK.with(|c| { *c.borrow_mut() = Box::new(|_| (Weight::zero(), NonZeroU32::new(1).unwrap().into())) diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 4e35d279a4887..3b7206d6a0a16 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -32,7 +32,7 @@ use polkadot_parachain_primitives::primitives::ValidationResult; #[cfg(feature = "experimental-ump-signals")] use relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; -use sp_api::{ProofRecorder, ProvideRuntimeApi, StorageProof, ApiExt, Core}; +use sp_api::{ApiExt, Core, ProofRecorder, ProvideRuntimeApi, StorageProof}; use sp_consensus_babe::SlotDuration; use sp_core::H256; use sp_runtime::{ diff --git a/cumulus/test/client/src/block_builder.rs b/cumulus/test/client/src/block_builder.rs index 1c464085d8e98..dd81a3bd4ef25 100644 --- a/cumulus/test/client/src/block_builder.rs +++ b/cumulus/test/client/src/block_builder.rs @@ -224,15 +224,7 @@ impl InitBlockBuilder for Client { validation_data: Option>, relay_sproof_builder: RelayStateSproofBuilder, ) -> BlockBuilderAndSupportData { - init_block_builder( - self, - at, - validation_data, - relay_sproof_builder, - None, - None, - None, - ) + init_block_builder(self, at, validation_data, relay_sproof_builder, None, None, None) } fn init_block_builder_with_ignored_nodes( diff --git a/cumulus/test/service/benches/validate_block.rs b/cumulus/test/service/benches/validate_block.rs index 178cf1459f176..e2ae6aa9dbc0c 100644 --- a/cumulus/test/service/benches/validate_block.rs +++ b/cumulus/test/service/benches/validate_block.rs @@ -91,12 +91,11 @@ fn benchmark_block_validation(c: &mut Criterion) { let para_id = ParaId::from(cumulus_test_runtime::PARACHAIN_ID); let mut test_client_builder = TestClientBuilder::with_default_backend(); let genesis_init = test_client_builder.genesis_init_mut(); - *genesis_init = - cumulus_test_client::GenesisParameters { - endowed_accounts: account_ids, - wasm: None, - blocks_per_pov: None, - }; + *genesis_init = cumulus_test_client::GenesisParameters { + endowed_accounts: account_ids, + wasm: None, + blocks_per_pov: None, + }; let client = test_client_builder.build_with_native_executor(None).0; let (max_transfer_count, extrinsics) = create_extrinsics(&client, &src_accounts, &dst_accounts); diff --git a/cumulus/test/service/src/cli.rs b/cumulus/test/service/src/cli.rs index 2f4980e30102a..94644d3161818 100644 --- a/cumulus/test/service/src/cli.rs +++ b/cumulus/test/service/src/cli.rs @@ -311,9 +311,9 @@ impl SubstrateCli for TestCollatorCli { }, "pov-bundling" => { tracing::info!("Using pov-bundling chain spec."); - Box::new(cumulus_test_service::get_pov_bundling_chain_spec( - Some(ParaId::from(2400)), - )) as Box<_> + Box::new(cumulus_test_service::get_pov_bundling_chain_spec(Some(ParaId::from( + 2400, + )))) as Box<_> }, "sync-backing" => { tracing::info!("Using sync backing chain spec."); diff --git a/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs b/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs index a6e9eb698542c..7cb750b5a07dc 100644 --- a/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs +++ b/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs @@ -7,8 +7,7 @@ use anyhow::anyhow; use cumulus_zombienet_sdk_helpers::{ - assert_blocks_are_being_finalized, assert_para_throughput, - wait_for_first_session_change, + assert_blocks_are_being_finalized, assert_para_throughput, wait_for_first_session_change, }; use polkadot_primitives::{BlockNumber, CandidateHash, DisputeState, Id as ParaId, SessionIndex}; use serde_json::json; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs b/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs index 8cfa2ed6ece8a..1e8b0278891ce 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs @@ -63,13 +63,7 @@ async fn sync_backing_test() -> Result<(), anyhow::Error> { let relay_client: OnlineClient = relay_node.wait_client().await?; - assert_para_throughput( - &relay_client, - 15, - [(ParaId::from(2500), 5..9)], - [], - ) - .await?; + assert_para_throughput(&relay_client, 15, [(ParaId::from(2500), 5..9)], []).await?; // Assert the parachain finalized block height is also on par with the number of backed // candidates. diff --git a/substrate/frame/system/src/limits.rs b/substrate/frame/system/src/limits.rs index 1057593d2d032..91239f7fbebb2 100644 --- a/substrate/frame/system/src/limits.rs +++ b/substrate/frame/system/src/limits.rs @@ -25,7 +25,7 @@ //! `DispatchClass`. This module contains configuration object for both resources, //! which should be passed to `frame_system` configuration when runtime is being set up. -use alloc::{string::String, vec::Vec, format}; +use alloc::{format, string::String, vec::Vec}; use frame_support::{ dispatch::{DispatchClass, OneOrMany, PerDispatchClass}, weights::{constants, Weight}, From 98446bec0121b677003e2b7bc2f8dce5bbc9069d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 23 Jul 2025 17:50:02 +0200 Subject: [PATCH 081/312] Introduce `BundleInfo` --- cumulus/primitives/core/src/lib.rs | 31 ++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 986b9db6a2fea..cfcb6fe4d877c 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -227,6 +227,18 @@ pub struct CoreInfo { pub number_of_cores: Compact, } +/// Information about a block that is part of a PoV bundle. +#[derive(Clone, Debug, Decode, Encode, PartialEq)] +pub struct BundleInfo { + /// The index of the block in the bundle. + pub index: u8, + /// Is this the last block in the bundle from the point of view of the node? + /// + /// It is possible that at `index` zero the runtime outputs the [`CumulusDigestItem::Special`] + /// that informs the node to use an entire for one block only. + pub maybe_last: bool, +} + /// Identifier for a relay chain block used by [`CumulusDigestItem`]. #[derive(Clone, Debug, PartialEq)] pub enum RelayBlockIdentifier { @@ -246,6 +258,9 @@ pub enum CumulusDigestItem { /// block. #[codec(index = 1)] CoreInfo(CoreInfo), + /// A digest item providing information about the position of the block in the bundle. + #[codec(index = 2)] + BundleInfo(BundleInfo), } impl CumulusDigestItem { @@ -315,6 +330,22 @@ impl CumulusDigestItem { _ => None, }) } + + /// Returns the [`BundleInfo`] from the given `digest`. + pub fn find_bundle_info(digest: &Digest) -> Option { + digest.convert_first(|d| match d { + DigestItem::PreRuntime(id, val) if id == &CUMULUS_CONSENSUS_ID => { + let Ok(CumulusDigestItem::BundleInfo(bundle_info)) = + CumulusDigestItem::decode_all(&mut &val[..]) + else { + return None + }; + + Some(bundle_info) + }, + _ => None, + }) + } } /// Extract the relay-parent from the provided header digest. Returns `None` if none were found. From 7fc391e5ed0c5e16c078aa9676d56bbee027a6fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 24 Jul 2025 23:09:21 +0200 Subject: [PATCH 082/312] Start the transaction extension --- Cargo.lock | 1 + cumulus/pallets/parachain-system/Cargo.toml | 1 + .../src/max_parachain_block_weight.rs | 137 +++++++++++++++++- 3 files changed, 138 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index b5b1c372cf058..9af828f359d96 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4566,6 +4566,7 @@ dependencies = [ "cumulus-primitives-proof-size-hostfunction", "cumulus-test-client", "cumulus-test-relay-sproof-builder", + "derive-where", "environmental", "frame-benchmarking", "frame-support", diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 43d226b47aa3e..370db8f35dc77 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -14,6 +14,7 @@ workspace = true [dependencies] bytes = { workspace = true } codec = { features = ["derive"], workspace = true } +derive-where = { workspace = true } environmental = { workspace = true } hashbrown = { workspace = true } impl-trait-for-tuples = { workspace = true } diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs index 3816e56ddfd58..0298f916d6bd2 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs @@ -16,9 +16,20 @@ //! Utilities for calculating maximum parachain block weight based on core assignments. +use crate::Config; +use codec::{Decode, DecodeWithMemTracking, Encode}; use cumulus_primitives_core::CumulusDigestItem; -use frame_support::weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}; +use frame_support::{ + dispatch::{DispatchInfo, PostDispatchInfo}, + pallet_prelude::{TransactionSource, TransactionValidityError, ValidTransaction}, + weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, +}; use polkadot_primitives::MAX_POV_SIZE; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{DispatchInfoOf, Dispatchable, Implication, PostDispatchInfoOf, TransactionExtension}, + DispatchResult, +}; /// A utility type for calculating the maximum block weight for a parachain based on /// the number of relay chain cores assigned and the target number of blocks. @@ -68,6 +79,130 @@ impl MaxParachainBlockWeight { } } +#[derive(Encode, Decode, DecodeWithMemTracking, TypeInfo)] +#[derive_where::derive_where(Clone, Eq, PartialEq, Default; S)] +#[scale_info(skip_type_params(T))] +pub struct DynamicMaxBlockWeight(pub S, core::marker::PhantomData); + +impl DynamicMaxBlockWeight { + /// Create a new `StorageWeightReclaim` instance. + pub fn new(s: S) -> Self { + Self(s, Default::default()) + } +} + +impl From for DynamicMaxBlockWeight { + fn from(s: S) -> Self { + Self::new(s) + } +} + +impl core::fmt::Debug for DynamicMaxBlockWeight { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "DynamicMaxBlockWeight<{:?}>", self.0) + } +} + +impl> + TransactionExtension for DynamicMaxBlockWeight +where + T::RuntimeCall: Dispatchable, +{ + const IDENTIFIER: &'static str = "DynamicMaxBlockWeight"; + + type Implicit = S::Implicit; + + type Val = S::Val; + + type Pre = S::Pre; + + fn implicit(&self) -> Result { + self.0.implicit() + } + + fn metadata() -> Vec { + let mut inner = S::metadata(); + inner.push(sp_runtime::traits::TransactionExtensionMetadata { + identifier: "DynamicMaxBlockWeight", + ty: scale_info::meta_type::<()>(), + implicit: scale_info::meta_type::<()>(), + }); + inner + } + + fn weight(&self, _: &T::RuntimeCall) -> Weight { + Weight::zero() + } + + fn validate( + &self, + origin: T::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + self_implicit: Self::Implicit, + inherited_implication: &impl Implication, + source: TransactionSource, + ) -> Result<(ValidTransaction, Self::Val, T::RuntimeOrigin), TransactionValidityError> { + self.0 + .validate(origin, call, info, len, self_implicit, inherited_implication, source) + } + + fn prepare( + self, + val: Self::Val, + origin: &T::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + // TODO: Check the weight of the call + // Store in some storage item the current block number + the mode that we allow + // There should be the default mode of not allowing to overshoot, then the mode we allow to + // overshoot if the weight of the call is below the weight of one core but above one of the + // axis of the actual block weight. So, if we are above the max storage proof size or the + // ref time, we allow it to above. Use the digest to check if we are in the first block. + self.0.prepare(val, origin, call, info, len) + } + + fn post_dispatch( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &mut PostDispatchInfo, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + S::post_dispatch(pre, info, post_info, len, result) + } + + fn bare_validate( + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + ) -> frame_support::pallet_prelude::TransactionValidity { + S::bare_validate(call, info, len) + } + + fn bare_validate_and_prepare( + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(), TransactionValidityError> { + S::bare_validate_and_prepare(call, info, len) + } + + fn bare_post_dispatch( + info: &DispatchInfoOf, + post_info: &mut PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + S::bare_post_dispatch(info, post_info, len, result)?; + + frame_system::Pallet::::reclaim_weight(info, post_info) + } +} + #[cfg(test)] mod tests { use super::*; From 91563822b6af2fc7497d8fcb8389aaa36cf42417 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 25 Jul 2025 14:06:08 +0200 Subject: [PATCH 083/312] Adds new test for testing too big transactions --- Cargo.lock | 1 + cumulus/test/runtime/Cargo.toml | 2 + cumulus/test/runtime/src/lib.rs | 8 + .../tests/zombie_ci/elastic_scaling/mod.rs | 1 + .../pov_bundling_utility_weight.rs | 179 ++++++++++++++++++ 5 files changed, 191 insertions(+) create mode 100644 cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs diff --git a/Cargo.lock b/Cargo.lock index 33332063f5b77..2406cc030a9b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4934,6 +4934,7 @@ dependencies = [ "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", + "pallet-utility", "parity-scale-codec", "scale-info", "serde_json", diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index f766ad2076301..38a38ef09657e 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -27,6 +27,7 @@ pallet-session = { workspace = true } pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } +pallet-utility = { workspace = true } sp-api = { workspace = true } sp-block-builder = { workspace = true } sp-consensus-aura = { workspace = true } @@ -74,6 +75,7 @@ std = [ "pallet-sudo/std", "pallet-timestamp/std", "pallet-transaction-payment/std", + "pallet-utility/std", "parachain-info/std", "scale-info/std", "serde_json/std", diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 4e72f449ce4b2..9e57a5e37fc42 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -331,6 +331,13 @@ impl pallet_sudo::Config for Runtime { type WeightInfo = pallet_sudo::weights::SubstrateWeight; } +impl pallet_utility::Config for Runtime { + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type PalletsOrigin = OriginCaller; + type WeightInfo = pallet_utility::weights::SubstrateWeight; +} + impl pallet_glutton::Config for Runtime { type RuntimeEvent = RuntimeEvent; type AdminOrigin = EnsureRoot; @@ -390,6 +397,7 @@ construct_runtime! { ParachainInfo: parachain_info, Balances: pallet_balances, Sudo: pallet_sudo, + Utility: pallet_utility, TransactionPayment: pallet_transaction_payment, TestPallet: test_pallet, Glutton: pallet_glutton, diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/mod.rs index 9a75132375659..612ccd5d3378e 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/mod.rs @@ -17,6 +17,7 @@ mod pov_bundling; mod pov_bundling_3cores_glutton; +mod pov_bundling_utility_weight; mod pov_recovery; mod slot_based_authoring; mod slot_based_rp_offset; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs new file mode 100644 index 0000000000000..3b9462fecac18 --- /dev/null +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs @@ -0,0 +1,179 @@ +// This file is part of Cumulus. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::anyhow; + +use cumulus_zombienet_sdk_helpers::{ + assert_finality_lag, assert_para_throughput, create_assign_core_call, + submit_extrinsic_and_wait_for_finalization_success, +}; +use polkadot_primitives::Id as ParaId; +use serde_json::json; +use zombienet_sdk::{ + subxt::{ + backend::{legacy::LegacyRpcMethods, rpc::RpcClient}, + ext::scale_value::value, + tx::DynamicPayload, + OnlineClient, PolkadotConfig, + }, + subxt_signer::sr25519::dev, + NetworkConfig, NetworkConfigBuilder, +}; + +const PARA_ID: u32 = 2400; + +/// A test that sends transactions using `pallet-utility` `with_weight` through `pallet-sudo`. +/// +/// This test starts with 3 cores assigned and sends two transactions: +/// 1. One with 1s ref_time +/// 2. One with half of the max PoV size +/// Each transaction is sent after the other and waits for finalization. +#[tokio::test(flavor = "multi_thread")] +async fn pov_bundling_utility_weight() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let config = build_network_config().await?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; + + let relay_node = network.get_node("validator-0")?; + let para_node = network.get_node("collator-1")?; + + let para_client: OnlineClient = para_node.wait_client().await?; + let relay_client: OnlineClient = relay_node.wait_client().await?; + let alice = dev::alice(); + + // Assign cores 0 and 1 to start with 3 cores total (core 2 is assigned by Zombienet) + let assign_cores_call = create_assign_core_call(&[(0, PARA_ID), (1, PARA_ID)]); + + relay_client + .tx() + .sign_and_submit_then_watch_default(&assign_cores_call, &alice) + .await + .inspect(|_| log::info!("Tx send, waiting for finalization"))? + .wait_for_finalized_success() + .await?; + log::info!("3 cores total assigned to the parachain"); + + // Create and send first transaction: 1s ref_time using utility.with_weight + let ref_time_1s = 1_000_000_000_000u64; // 1 second in ref_time units + let first_call = create_utility_with_weight_call(ref_time_1s, 0); + let sudo_first_call = create_sudo_call(first_call); + + log::info!("Sending first transaction with 1s ref_time"); + submit_extrinsic_and_wait_for_finalization_success(¶_client, &sudo_first_call, &alice) + .await?; + log::info!("First transaction finalized"); + + // Create and send second transaction: half max PoV size using utility.with_weight + let max_pov_size = 10 * 1024 * 1024u64; // 10MB max PoV size + let half_max_pov = max_pov_size / 2; + let second_call = create_utility_with_weight_call(0, half_max_pov); + let sudo_second_call = create_sudo_call(second_call); + + log::info!("Sending second transaction with half max PoV size"); + submit_extrinsic_and_wait_for_finalization_success(¶_client, &sudo_second_call, &alice) + .await?; + log::info!("Second transaction finalized"); + + Ok(()) +} + +/// Creates a `pallet-utility` `with_weight` call +fn create_utility_with_weight_call(ref_time: u64, proof_size: u64) -> DynamicPayload { + // Create a simple remark call as the inner call + let remark_data = vec![0u8; proof_size as usize]; // Fill with dummy data for PoV size + let inner_call = + zombienet_sdk::subxt::tx::dynamic("System", "remark", vec![value!(remark_data)]); + + // Create the weight struct + let weight = value!({ + ref_time: ref_time, + proof_size: proof_size + }); + + // Create the utility.with_weight call + zombienet_sdk::subxt::tx::dynamic( + "Utility", + "with_weight", + vec![ + inner_call.into_value(), + value! { + weight + }, + ], + ) +} + +/// Creates a `pallet-sudo` `sudo` call wrapping the inner call +fn create_sudo_call(inner_call: DynamicPayload) -> DynamicPayload { + zombienet_sdk::subxt::tx::dynamic("Sudo", "sudo", vec![inner_call.into_value()]) +} + +async fn build_network_config() -> Result { + let images = zombienet_sdk::environment::get_images_from_env(); + log::info!("Using images: {images:?}"); + NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=trace").into()]) + .with_default_resources(|resources| { + resources.with_request_cpu(4).with_request_memory("4G") + }) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "num_cores": 3, + "max_validators_per_core": 1 + } + } + } + })) + .with_node(|node| node.with_name("validator-0")); + (1..9).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) + }) + .with_parachain(|p| { + p.with_id(PARA_ID) + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("pov-bundling") + .with_default_args(vec![ + ("--authoring").into(), + ("slot-based").into(), + ("-lparachain=debug,aura=trace").into(), + ]) + .with_collator(|n| n.with_name("collator-0")) + .with_collator(|n| n.with_name("collator-1")) + .with_collator(|n| n.with_name("collator-2")) + }) + .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { + Ok(val) => global_settings.with_base_dir(val), + _ => global_settings, + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + }) +} From 975a4b773510cbd48170cbe4c623913fa4c98fdc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 25 Jul 2025 21:50:57 +0200 Subject: [PATCH 084/312] Implement the transaction extension --- cumulus/pallets/parachain-system/src/lib.rs | 11 +- .../src/max_parachain_block_weight.rs | 143 +++++++++++++++--- cumulus/test/runtime/src/lib.rs | 37 +++-- 3 files changed, 149 insertions(+), 42 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 80523b37f07cc..1d1090c62f2d0 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -69,7 +69,7 @@ mod mock; mod tests; pub mod weights; -pub use max_parachain_block_weight::MaxParachainBlockWeight; +pub use max_parachain_block_weight::{DynamicMaxBlockWeight, MaxParachainBlockWeight}; pub use weights::WeightInfo; mod unincluded_segment; @@ -191,7 +191,7 @@ pub mod ump_constants { #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; + use frame_support::pallet_prelude::{ValueQuery, *}; use frame_system::pallet_prelude::*; #[pallet::pallet] @@ -477,6 +477,8 @@ pub mod pallet { weight += T::DbWeight::get().reads_writes(3, 2); } + BlockWeightMode::::kill(); + // Remove the validation from the old block. ValidationData::::kill(); // NOTE: Killing here is required to at least include the trie nodes down to the keys @@ -753,6 +755,11 @@ pub mod pallet { NotScheduled, } + #[pallet::storage] + #[pallet::whitelist_storage] + pub type BlockWeightMode = + StorageValue<_, max_parachain_block_weight::BlockWeightMode, OptionQuery>; + /// Latest included block descendants the runtime accepted. In other words, these are /// ancestors of the currently executing block which have not been included in the observed /// relay-chain state. diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs index d5ea3bceea5c0..894ce32d41598 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs @@ -22,21 +22,35 @@ use codec::{Decode, DecodeWithMemTracking, Encode}; use cumulus_primitives_core::CumulusDigestItem; use frame_support::{ dispatch::{DispatchInfo, PostDispatchInfo}, - pallet_prelude::{TransactionSource, TransactionValidityError, ValidTransaction}, + pallet_prelude::{ + InvalidTransaction, TransactionSource, TransactionValidityError, ValidTransaction, + }, weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, }; use polkadot_primitives::MAX_POV_SIZE; use scale_info::TypeInfo; +use sp_core::Get; use sp_runtime::{ traits::{DispatchInfoOf, Dispatchable, Implication, PostDispatchInfoOf, TransactionExtension}, DispatchResult, }; +#[derive(Debug, Encode, Decode, Clone, Copy, TypeInfo)] +pub enum BlockWeightMode { + FullCore, + FractionOfCore { first_transaction_index: u32 }, +} + /// A utility type for calculating the maximum block weight for a parachain based on /// the number of relay chain cores assigned and the target number of blocks. pub struct MaxParachainBlockWeight; impl MaxParachainBlockWeight { + // Maximum ref time per core + const MAX_REF_TIME_PER_CORE_NS: u64 = 2 * WEIGHT_REF_TIME_PER_SECOND; + const FULL_CORE_WEIGHT: Weight = + Weight::from_parts(Self::MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); + /// Calculate the maximum block weight based on target blocks and core assignments. /// /// This function examines the current block's digest from `frame_system::Digests` storage @@ -51,27 +65,46 @@ impl MaxParachainBlockWeight { /// # Returns /// Returns the calculated maximum weight, or a conservative default if no core info is found /// or if an error occurs during calculation. - pub fn get(target_blocks: u32) -> Weight { - // Maximum ref time per core - const MAX_REF_TIME_PER_CORE_NS: u64 = 2 * WEIGHT_REF_TIME_PER_SECOND; + pub fn get(target_blocks: u32) -> Weight { + // If we are in `on_initialize` or at applying the inherents, we should + // allow the full core weight. + if !frame_system::Pallet::::inherents_applied() { + return Self::FULL_CORE_WEIGHT + } + match crate::BlockWeightMode::::get() { + // We allow the full core. + Some(BlockWeightMode::FullCore) => return Self::FULL_CORE_WEIGHT, + // Either the runtime is not using the `DynamicMaxBlockWeight` extension or there is + // some bug. Because after the inherents are applied, this value should be set by the + // extension. To be on the safe side, we allow the full core weight. + None => return Self::FULL_CORE_WEIGHT, + // Let's calculate below how much weight we can use. + Some(BlockWeightMode::FractionOfCore { .. }) => (), + } + + Self::target_block_weight::(target_blocks) + } + + fn target_block_weight(target_blocks: u32) -> Weight { let digest = frame_system::Pallet::::digest(); let Some(core_info) = CumulusDigestItem::find_core_info(&digest) else { - return Weight::from_parts(MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); + return Self::FULL_CORE_WEIGHT; }; let number_of_cores = core_info.number_of_cores.0 as u32; // Ensure we have at least one core and valid target blocks if number_of_cores == 0 || target_blocks == 0 { - return Weight::from_parts(MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); + return Self::FULL_CORE_WEIGHT; } - let total_ref_time = (number_of_cores as u64).saturating_mul(MAX_REF_TIME_PER_CORE_NS); + let total_ref_time = + (number_of_cores as u64).saturating_mul(Self::MAX_REF_TIME_PER_CORE_NS); let ref_time_per_block = total_ref_time .saturating_div(target_blocks as u64) - .min(MAX_REF_TIME_PER_CORE_NS); + .min(Self::MAX_REF_TIME_PER_CORE_NS); let total_pov_size = (number_of_cores as u64).saturating_mul(MAX_POV_SIZE as u64); let proof_size_per_block = total_pov_size.saturating_div(target_blocks as u64); @@ -82,30 +115,38 @@ impl MaxParachainBlockWeight { #[derive(Encode, Decode, DecodeWithMemTracking, TypeInfo)] #[derive_where::derive_where(Clone, Eq, PartialEq, Default; S)] -#[scale_info(skip_type_params(T))] -pub struct DynamicMaxBlockWeight(pub S, core::marker::PhantomData); +#[scale_info(skip_type_params(T, TargetBlockRate))] +pub struct DynamicMaxBlockWeight( + pub S, + core::marker::PhantomData<(T, TargetBlockRate)>, +); -impl DynamicMaxBlockWeight { +impl DynamicMaxBlockWeight { /// Create a new `StorageWeightReclaim` instance. pub fn new(s: S) -> Self { Self(s, Default::default()) } } -impl From for DynamicMaxBlockWeight { +impl From for DynamicMaxBlockWeight { fn from(s: S) -> Self { Self::new(s) } } -impl core::fmt::Debug for DynamicMaxBlockWeight { +impl core::fmt::Debug + for DynamicMaxBlockWeight +{ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { write!(f, "DynamicMaxBlockWeight<{:?}>", self.0) } } -impl> - TransactionExtension for DynamicMaxBlockWeight +impl< + T: Config + Send + Sync, + S: TransactionExtension, + TargetBlockRate: Get + Send + Sync + 'static, + > TransactionExtension for DynamicMaxBlockWeight where T::RuntimeCall: Dispatchable, { @@ -115,7 +156,7 @@ where type Val = S::Val; - type Pre = S::Pre; + type Pre = (Option<(BlockWeightMode, BlockWeightMode)>, S::Pre); fn implicit(&self) -> Result { self.0.implicit() @@ -157,13 +198,48 @@ where info: &DispatchInfoOf, len: usize, ) -> Result { - // TODO: Check the weight of the call - // Store in some storage item the current block number + the mode that we allow - // There should be the default mode of not allowing to overshoot, then the mode we allow to - // overshoot if the weight of the call is below the weight of one core but above one of the - // axis of the actual block weight. So, if we are above the max storage proof size or the - // ref time, we allow it to above. Use the digest to check if we are in the first block. - self.0.prepare(val, origin, call, info, len) + let digest = frame_system::Pallet::::digest(); + + let bundle_info = CumulusDigestItem::find_bundle_info(&digest); + + let mode = if frame_system::Pallet::::inherents_applied() && + bundle_info.map_or(false, |bi| bi.index == 0) + { + let extrinsic_index = frame_system::Pallet::::extrinsic_index().unwrap_or_default(); + + crate::BlockWeightMode::::mutate(|mode| { + let current_mode = *mode.get_or_insert_with(|| BlockWeightMode::FractionOfCore { + first_transaction_index: extrinsic_index, + }); + + let mut new_mode = current_mode; + + match current_mode { + // We are already allowing the full core, not that much more to do here. + BlockWeightMode::FullCore => {}, + BlockWeightMode::FractionOfCore { first_transaction_index } => { + if info.total_weight().any_gt( + MaxParachainBlockWeight::target_block_weight::( + TargetBlockRate::get(), + ), + ) { + if extrinsic_index.saturating_sub(first_transaction_index) < 10 { + new_mode = BlockWeightMode::FullCore; + *mode = Some(new_mode); + } else { + return Err(InvalidTransaction::ExhaustsResources) + } + } + }, + }; + + Ok(Some((current_mode, new_mode))) + })? + } else { + None + }; + + self.0.prepare(val, origin, call, info, len).map(|r| (mode, r)) } fn post_dispatch( @@ -173,7 +249,26 @@ where len: usize, result: &DispatchResult, ) -> Result<(), TransactionValidityError> { - S::post_dispatch(pre, info, post_info, len, result) + let (mode, pre) = pre; + S::post_dispatch(pre, info, post_info, len, result)?; + + let Some(mode) = mode else { return Ok(()) }; + + match mode { + // If the previous one was already `FullCore`, we don't need to change anything. + (BlockWeightMode::FullCore, _) => {}, + // If the previous one was a fraction and we gave the transaction a `FullCore` we need + // to check if it used it. + (prev @ BlockWeightMode::FractionOfCore { .. }, BlockWeightMode::FullCore) => + if post_info.calc_actual_weight(info).all_lt( + MaxParachainBlockWeight::target_block_weight::(TargetBlockRate::get()), + ) { + crate::BlockWeightMode::::put(prev); + }, + (BlockWeightMode::FractionOfCore { .. }, BlockWeightMode::FractionOfCore { .. }) => (), + } + + Ok(()) } fn bare_validate( diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 9e57a5e37fc42..48f7585791e0b 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -211,14 +211,14 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used /// by Operational extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -/// Target number of blocks per relay chain slot. -const NUMBER_OF_BLOCKS_PER_RELAY_SLOT: u32 = 12; parameter_types! { + /// Target number of blocks per relay chain slot. + pub const NumberOfBlocksPerRelaySlot: u32 = 12; pub const BlockHashCount: BlockNumber = 4096; pub const Version: RuntimeVersion = VERSION; /// We allow for 1 second of compute with a 6 second average block time. - pub MaximumBlockWeight: Weight = cumulus_pallet_parachain_system::MaxParachainBlockWeight::get::(NUMBER_OF_BLOCKS_PER_RELAY_SLOT); + pub MaximumBlockWeight: Weight = cumulus_pallet_parachain_system::MaxParachainBlockWeight::get::(NumberOfBlocksPerRelaySlot::get()); pub RuntimeBlockLength: BlockLength = BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() @@ -434,19 +434,24 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; /// The extension to the basic transaction logic. -pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim< +pub type TxExtension = cumulus_pallet_parachain_system::DynamicMaxBlockWeight< Runtime, - ( - frame_system::AuthorizeCall, - frame_system::CheckNonZeroSender, - frame_system::CheckSpecVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, - ), + cumulus_pallet_weight_reclaim::StorageWeightReclaim< + Runtime, + ( + frame_system::AuthorizeCall, + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, + ) + >, + NumberOfBlocksPerRelaySlot >; + /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; @@ -639,9 +644,9 @@ impl_runtime_apis! { impl cumulus_primitives_core::SlotSchedule for Runtime { fn next_slot_schedule(num_cores: u32) -> Vec { - let block_time = Duration::from_secs(2) * num_cores / NUMBER_OF_BLOCKS_PER_RELAY_SLOT; + let block_time = Duration::from_secs(2) * num_cores / NumberOfBlocksPerRelaySlot::get(); - vec![block_time.min(Duration::from_millis(500)); NUMBER_OF_BLOCKS_PER_RELAY_SLOT as usize] + vec![block_time.min(Duration::from_millis(500)); NumberOfBlocksPerRelaySlot::get() as usize] } } } From a0408574056248c7fc8672b7bcebbef3081649cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sun, 27 Jul 2025 14:50:33 +0200 Subject: [PATCH 085/312] Forward the `BundleInfo` inherent digest --- .../collators/slot_based/block_builder_task.rs | 15 ++++++++++++--- cumulus/test/runtime/src/lib.rs | 4 ++-- cumulus/test/service/src/lib.rs | 6 +++--- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index eb46248b7bff4..820693bc00a26 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -34,8 +34,8 @@ use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockIm use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; use cumulus_primitives_core::{ - extract_relay_parent, rpsr_digest, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, - PersistedValidationData, RelayParentOffsetApi, SlotSchedule, + extract_relay_parent, rpsr_digest, BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, + CumulusDigestItem, PersistedValidationData, RelayParentOffsetApi, SlotSchedule, }; use cumulus_relay_chain_interface::RelayChainInterface; use futures::prelude::*; @@ -472,6 +472,8 @@ where // We require that the next node has imported our last block before it can start building // the next block. To ensure that the next node is able to do so, we are skipping the last // block in the parachain slot. In the future this can be removed again. + let is_last = block_index + 1 == num_blocks || + (block_index + 2 == num_blocks && num_blocks > 1 && is_last_core_in_parachain_slot); if block_index + 1 == num_blocks && num_blocks > 1 && is_last_core_in_parachain_slot { tracing::debug!( target: LOG_TARGET, @@ -529,7 +531,14 @@ where .build_block_and_import( &parent_header, slot_claim, - Some(vec![CumulusDigestItem::CoreInfo(core_info.clone()).to_digest_item()]), + Some(vec![ + CumulusDigestItem::CoreInfo(core_info.clone()).to_digest_item(), + CumulusDigestItem::BundleInfo(BundleInfo { + index: block_index as u8, + maybe_last: is_last, + }) + .to_digest_item(), + ]), (parachain_inherent_data, other_inherent_data), authoring_duration, allowed_pov_size, diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 48f7585791e0b..2020f5576b892 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -447,9 +447,9 @@ pub type TxExtension = cumulus_pallet_parachain_system::DynamicMaxBlockWeight< frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, - ) + ), >, - NumberOfBlocksPerRelaySlot + NumberOfBlocksPerRelaySlot, >; /// Unchecked extrinsic type as expected by this runtime. diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index a1d88e8bd5b57..b1781482f3f37 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -981,7 +981,7 @@ pub fn construct_extrinsic( .map(|c| c / 2) .unwrap_or(2) as u64; let tip = 0; - let tx_ext: runtime::TxExtension = ( + let tx_ext: runtime::TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim::from(( frame_system::AuthorizeCall::::new(), frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), @@ -993,8 +993,8 @@ pub fn construct_extrinsic( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - ) - .into(); + )) + .into(); let raw_payload = runtime::SignedPayload::from_raw( function.clone(), tx_ext.clone(), From 7075f4fff1f89d7543a53ab27f08164febc3f641 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 28 Jul 2025 23:26:29 +0200 Subject: [PATCH 086/312] Forward the ignored nodes --- cumulus/client/consensus/aura/src/collator.rs | 5 ++++- .../collators/slot_based/block_builder_task.rs | 1 + cumulus/client/consensus/proposer/src/lib.rs | 6 ++++-- .../zombienet/zombienet-sdk-helpers/src/lib.rs | 15 --------------- 4 files changed, 9 insertions(+), 18 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index ebac7f3e94577..9843ebd11ab50 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -45,7 +45,7 @@ use futures::prelude::*; use sc_client_api::BackendTransaction; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction}; use sc_consensus_aura::standalone as aura_internal; -use sp_api::{ProvideRuntimeApi, StorageProof}; +use sp_api::{ProofRecorderIgnoredNodes, ProvideRuntimeApi, StorageProof}; use sp_application_crypto::AppPublic; use sp_consensus::BlockOrigin; use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; @@ -212,6 +212,7 @@ where inherent_data: (ParachainInherentData, InherentData), proposal_duration: Duration, max_pov_size: usize, + ignored_nodes_by_proof_recording: Option>, ) -> Result>, Box> { let mut digest = additional_pre_digest.into().unwrap_or_default(); digest.push(slot_claim.pre_digest.clone()); @@ -225,6 +226,7 @@ where Digest { logs: digest }, proposal_duration, Some(max_pov_size), + ignored_nodes_by_proof_recording, ) .await .map_err(|e| Box::new(e) as Box)?; @@ -291,6 +293,7 @@ where inherent_data, proposal_duration, max_pov_size, + None, ) .await?; diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 820693bc00a26..4df8950536b69 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -542,6 +542,7 @@ where (parachain_inherent_data, other_inherent_data), authoring_duration, allowed_pov_size, + Some(ignored_nodes.clone()), ) .await else { diff --git a/cumulus/client/consensus/proposer/src/lib.rs b/cumulus/client/consensus/proposer/src/lib.rs index b392cc2073d94..3a58e9515eb29 100644 --- a/cumulus/client/consensus/proposer/src/lib.rs +++ b/cumulus/client/consensus/proposer/src/lib.rs @@ -25,7 +25,7 @@ use cumulus_primitives_parachain_inherent::ParachainInherentData; use sc_basic_authorship::{ProposeArgs, ProposerFactory}; use sc_block_builder::BlockBuilderApi; use sc_transaction_pool_api::TransactionPool; -use sp_api::{ApiExt, CallApiAt, ProvideRuntimeApi}; +use sp_api::{ApiExt, CallApiAt, ProofRecorderIgnoredNodes, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_consensus::{EnableProofRecording, Environment, Proposal}; use sp_inherents::{InherentData, InherentDataProvider}; @@ -80,6 +80,7 @@ pub trait ProposerInterface { inherent_digests: Digest, max_duration: Duration, block_size_limit: Option, + ignored_nodes_by_proof_recording: Option>, ) -> Result>, Error>; } @@ -99,6 +100,7 @@ where inherent_digests: Digest, max_duration: Duration, block_size_limit: Option, + ignored_nodes_by_proof_recording: Option>, ) -> Result>, Error> { let proposer = self .init(parent_header) @@ -117,7 +119,7 @@ where inherent_digests, max_duration, block_size_limit, - ignored_nodes_by_proof_recording: None, + ignored_nodes_by_proof_recording, }) .await .map(Some) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 2d5211ab9fb5b..a4340a36f7abf 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -567,21 +567,6 @@ pub async fn assert_relay_parent_offset( Ok(()) } -/// Extract relay parent information from the digest logs. -fn extract_relay_parent_storage_root( - digest: &DigestItem, -) -> Option<(relay_chain::Hash, relay_chain::BlockNumber)> { - match digest { - DigestItem::Consensus(id, val) if id == &RPSR_CONSENSUS_ID => { - let (h, n): (relay_chain::Hash, Compact) = - Decode::decode(&mut &val[..]).ok()?; - - Some((h, n.0)) - }, - _ => None, - } -} - pub async fn submit_extrinsic_and_wait_for_finalization_success>( client: &OnlineClient, call: &DynamicPayload, From 49f5fdb2cbd7f1c85cdc11b85b601775bbf744ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 28 Jul 2025 23:27:17 +0200 Subject: [PATCH 087/312] Handle block import --- .../src/collators/slot_based/block_import.rs | 97 ++++++++++++++----- .../polkadot-omni-node/lib/src/common/aura.rs | 8 +- .../polkadot-omni-node/lib/src/nodes/aura.rs | 14 ++- cumulus/primitives/core/src/lib.rs | 12 ++- cumulus/test/service/src/lib.rs | 9 +- 5 files changed, 105 insertions(+), 35 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs index 27588c661d533..4e85b84406263 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -15,13 +15,21 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +use codec::Codec; +use cumulus_primitives_core::{CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; use futures::{stream::FusedStream, StreamExt}; +use parking_lot::Mutex; use sc_consensus::{BlockImport, StateAction}; +use sc_consensus_aura::{find_pre_digest, standalone::fetch_authorities}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use sp_api::{ApiExt, CallApiAt, CallContext, Core, ProvideRuntimeApi, StorageProof}; -use sp_runtime::traits::{Block as BlockT, Header as _}; -use sp_trie::proof_size_extension::ProofSizeExt; -use std::sync::Arc; +use sp_api::{ + ApiExt, CallApiAt, CallContext, Core, ProofRecorder, ProofRecorderIgnoredNodes, + ProvideRuntimeApi, StorageProof, +}; +use sp_consensus_aura::AuraApi; +use sp_runtime::traits::{Block as BlockT, HashingFor, Header as _}; +use sp_trie::{proof_size_extension::ProofSizeExt, recorder::IgnoredNodes}; +use std::{collections::HashMap, marker::PhantomData, sync::Arc}; /// Handle for receiving the block and the storage proof from the [`SlotBasedBlockImport`]. /// @@ -46,14 +54,23 @@ impl SlotBasedBlockImportHandle { } } +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +struct PoVBundle { + relay_block_identifier: RelayBlockIdentifier, + core_info: CoreInfo, + author_index: usize, +} + /// Special block import for the slot based collator. -pub struct SlotBasedBlockImport { +pub struct SlotBasedBlockImport { inner: BI, client: Arc, sender: TracingUnboundedSender<(Block, StorageProof)>, + nodes_to_ignore: Arc>>>, + _phantom: PhantomData, } -impl SlotBasedBlockImport { +impl SlotBasedBlockImport { /// Create a new instance. /// /// The returned [`SlotBasedBlockImportHandle`] needs to be passed to the @@ -62,25 +79,44 @@ impl SlotBasedBlockImport { pub fn new(inner: BI, client: Arc) -> (Self, SlotBasedBlockImportHandle) { let (sender, receiver) = tracing_unbounded("SlotBasedBlockImportChannel", 1000); - (Self { sender, client, inner }, SlotBasedBlockImportHandle { receiver }) + ( + Self { + sender, + client, + inner, + nodes_to_ignore: Default::default(), + _phantom: PhantomData, + }, + SlotBasedBlockImportHandle { receiver }, + ) } } -impl Clone for SlotBasedBlockImport { +impl Clone + for SlotBasedBlockImport +{ fn clone(&self) -> Self { - Self { inner: self.inner.clone(), client: self.client.clone(), sender: self.sender.clone() } + Self { + inner: self.inner.clone(), + client: self.client.clone(), + sender: self.sender.clone(), + nodes_to_ignore: self.nodes_to_ignore.clone(), + _phantom: PhantomData, + } } } #[async_trait::async_trait] -impl BlockImport for SlotBasedBlockImport +impl BlockImport + for SlotBasedBlockImport where Block: BlockT, BI: BlockImport + Send + Sync, BI::Error: Into, Client: ProvideRuntimeApi + CallApiAt + Send + Sync, Client::StateBackend: Send, - Client::Api: Core, + Client::Api: Core + AuraApi, + AuthorityId: Codec + Send + Sync + std::fmt::Debug, { type Error = sp_consensus::Error; @@ -95,20 +131,32 @@ where &self, mut params: sc_consensus::BlockImportParams, ) -> Result { - // If the channel exists and it is required to execute the block, we will execute the block - // here. This is done to collect the storage proof and to prevent re-execution, we push - // downwards the state changes. `StateAction::ApplyChanges` is ignored, because it either - // means that the node produced the block itself or the block was imported via state sync. - if !self.sender.is_closed() && !matches!(params.state_action, StateAction::ApplyChanges(_)) + let core_info = CumulusDigestItem::find_core_info(params.header.digest()); + let relay_block_identifier = + CumulusDigestItem::find_relay_block_identifier(params.header.digest()); + + if let (Some(core_info), Some(relay_block_identifier)) = (core_info, relay_block_identifier) { + let slot = find_pre_digest::(¶ms.header) + .map_err(|error| sp_consensus::Error::Other(Box::new(error)))?; + let authorities = fetch_authorities(&*self.client, *params.header.parent_hash())?; + + let pov_bundle = PoVBundle { + author_index: *slot as usize % authorities.len(), + core_info, + relay_block_identifier, + }; + + let mut nodes_to_ignore = self.nodes_to_ignore.lock(); + let nodes_to_ignore = nodes_to_ignore.entry(pov_bundle).or_default(); + + let recorder = ProofRecorder::::with_ignored_nodes(nodes_to_ignore.clone()); + let mut runtime_api = self.client.runtime_api(); runtime_api.set_call_context(CallContext::Onchain); - runtime_api.record_proof(); - let recorder = runtime_api - .proof_recorder() - .expect("Proof recording is enabled in the line above; qed."); + runtime_api.set_proof_recorder(recorder.clone()); runtime_api.register_extension(ProofSizeExt::new(recorder)); let parent_hash = *params.header.parent_hash(); @@ -116,7 +164,7 @@ where let block = Block::new(params.header.clone(), params.body.clone().unwrap_or_default()); runtime_api - .execute_block(parent_hash, block.clone()) + .execute_block(parent_hash, block) .map_err(|e| Box::new(e) as Box<_>)?; let storage_proof = @@ -133,11 +181,14 @@ where ))) } + nodes_to_ignore + .extend(IgnoredNodes::from_storage_proof::>(&storage_proof)); + nodes_to_ignore + .extend(IgnoredNodes::from_memory_db(gen_storage_changes.transaction.clone())); + params.state_action = StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes( gen_storage_changes, )); - - let _ = self.sender.unbounded_send((block, storage_proof)); } self.inner.import_block(params).await.map_err(Into::into) diff --git a/cumulus/polkadot-omni-node/lib/src/common/aura.rs b/cumulus/polkadot-omni-node/lib/src/common/aura.rs index 9ca725ff3279a..497b9a6f0bfe4 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/aura.rs @@ -54,6 +54,8 @@ pub trait AuraRuntimeApi: + AuraApi::Public> + AuraUnincludedSegmentApi + Sized +where + ::Public: std::fmt::Debug, { /// Check if the runtime has the Aura API. fn has_aura_api(&self, at: Block::Hash) -> bool { @@ -62,9 +64,11 @@ pub trait AuraRuntimeApi: } } -impl AuraRuntimeApi for T where +impl AuraRuntimeApi for T +where T: sp_api::ApiExt + AuraApi::Public> - + AuraUnincludedSegmentApi + + AuraUnincludedSegmentApi, + ::Public: std::fmt::Debug, { } diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs index 3d743f4c37225..872f0e2427122 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs @@ -66,14 +66,14 @@ use sc_service::{Configuration, Error, TaskManager}; use sc_telemetry::TelemetryHandle; use sc_transaction_pool::TransactionPoolHandle; use sp_api::ProvideRuntimeApi; -use sp_core::traits::SpawnNamed; +use sp_core::{traits::SpawnNamed, Pair}; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::{ app_crypto::AppCrypto, traits::{Block as BlockT, Header as HeaderT}, }; -use std::{marker::PhantomData, sync::Arc, time::Duration}; +use std::{fmt::Debug, marker::PhantomData, sync::Arc, time::Duration}; struct Verifier { client: Arc, @@ -220,7 +220,7 @@ where + substrate_frame_rpc_system::AccountNonceApi + SlotSchedule + GetParachainInfo, - AuraId: AuraIdT + Sync, + AuraId: AuraIdT + Sync + Debug, { if extra_args.authoring_policy == AuthoringPolicy::SlotBased { Box::new(AuraNode::< @@ -251,7 +251,7 @@ impl, RuntimeApi, AuraId> where RuntimeApi: ConstructNodeRuntimeApi>, RuntimeApi::RuntimeApi: AuraRuntimeApi + SlotSchedule, - AuraId: AuraIdT + Sync, + AuraId: AuraIdT + Sync + Debug, { #[docify::export_content] fn launch_slot_based_collator( @@ -263,6 +263,7 @@ where Block, Arc>, ParachainClient, + ::Public, >, >, CIDP, @@ -299,13 +300,14 @@ impl, RuntimeApi, AuraId> Block, Arc>, ParachainClient, + ::Public, >, SlotBasedBlockImportHandle, > for StartSlotBasedAuraConsensus where RuntimeApi: ConstructNodeRuntimeApi>, RuntimeApi::RuntimeApi: AuraRuntimeApi + SlotSchedule, - AuraId: AuraIdT + Sync, + AuraId: AuraIdT + Sync + Debug, { fn start_consensus( client: Arc>, @@ -315,6 +317,7 @@ where Block, Arc>, ParachainClient, + ::Public, >, >, prometheus_registry: Option<&Registry>, @@ -391,6 +394,7 @@ where Block, Arc>, ParachainClient, + ::Public, >; type BlockImportAuxiliaryData = SlotBasedBlockImportHandle; diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index cfcb6fe4d877c..d69ff4600f675 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -217,7 +217,7 @@ pub enum ServiceQuality { pub const CUMULUS_CONSENSUS_ID: ConsensusEngineId = *b"CMLS"; /// Information about the core on the relay chain this block will be validated on. -#[derive(Clone, Debug, Decode, Encode, PartialEq)] +#[derive(Clone, Debug, Decode, Encode, PartialEq, Eq)] pub struct CoreInfo { /// The selector that determines the actual core at `claim_queue_offset`. pub selector: CoreSelector, @@ -227,6 +227,14 @@ pub struct CoreInfo { pub number_of_cores: Compact, } +impl core::hash::Hash for CoreInfo { + fn hash(&self, state: &mut H) { + state.write_u8(self.selector.0); + state.write_u8(self.claim_queue_offset.0); + state.write_u16(self.number_of_cores.0); + } +} + /// Information about a block that is part of a PoV bundle. #[derive(Clone, Debug, Decode, Encode, PartialEq)] pub struct BundleInfo { @@ -240,7 +248,7 @@ pub struct BundleInfo { } /// Identifier for a relay chain block used by [`CumulusDigestItem`]. -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Hash, Eq)] pub enum RelayBlockIdentifier { /// The block is identified using its block hash. ByHash(relay_chain::Hash), diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index b1781482f3f37..e931aae702151 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -37,7 +37,7 @@ use cumulus_client_consensus_aura::{ use prometheus::Registry; use runtime::AccountId; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; -use sp_consensus_aura::sr25519::AuthorityPair; +use sp_consensus_aura::sr25519::{AuthorityId, AuthorityPair}; use std::{ collections::HashSet, future::Future, @@ -133,8 +133,11 @@ pub type Client = TFullClient; /// The block-import type being used by the test service. -pub type ParachainBlockImport = - TParachainBlockImport, Client>, Backend>; +pub type ParachainBlockImport = TParachainBlockImport< + Block, + SlotBasedBlockImport, Client, AuthorityId>, + Backend, +>; /// Transaction pool type used by the test service pub type TransactionPool = Arc>; From d34efa1ed6850a70f95f75273e38e54a8ce0b93e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 29 Jul 2025 16:52:33 +0200 Subject: [PATCH 088/312] cumulus zombienet: Send transactions as immortal Deep inside subxt the default period for a transaction is set to 32 blocks. When you have some chain that is building blocks every 500ms, this may leads to issues that manifest as invalid transaction signatures. To protect the poor developers of endless debugging sessions we now send transactions as immortal. --- .../zombienet-sdk-helpers/src/lib.rs | 54 +++++++++++-------- .../parachain_extrinsic_get_finalized.rs | 4 +- 2 files changed, 34 insertions(+), 24 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 92058c65be1cd..aaf3d70c44264 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -14,7 +14,7 @@ use tokio::{ use zombienet_sdk::subxt::{ self, blocks::Block, - config::{substrate::DigestItem, ExtrinsicParams}, + config::{signed_extensions::CheckMortalityParams, substrate::DigestItem, ExtrinsicParams}, dynamic::Value, events::Events, ext::scale_value::value, @@ -295,15 +295,26 @@ fn extract_relay_parent_storage_root( } } -pub async fn submit_extrinsic_and_wait_for_finalization_success>( - client: &OnlineClient, +/// Submits the given `call` as transaction and waits for it successful finalization. +/// +/// The transaction is send as immortal transaction. +pub async fn submit_extrinsic_and_wait_for_finalization_success>( + client: &OnlineClient, call: &DynamicPayload, signer: &S, -) -> Result<(), anyhow::Error> -where - >::Params: Default, -{ - let mut tx = client.tx().sign_and_submit_then_watch_default(call, signer).await?; +) -> Result<(), anyhow::Error> { + let mut extensions: <::ExtrinsicParams as ExtrinsicParams< + PolkadotConfig, + >>::Params = Default::default(); + + extensions.4 = CheckMortalityParams::::immortal(); + + let mut tx = client + .tx() + .create_signed(call, signer, extensions) + .await? + .submit_and_watch() + .await?; // Below we use the low level API to replicate the `wait_for_in_block` behaviour // which was removed in subxt 0.33.0. See https://github.com/paritytech/subxt/pull/1237. @@ -327,18 +338,18 @@ where Ok(()) } +/// Submits the given `call` as transaction and waits `timeout_secs` for it successful finalization. +/// +/// If the transaction does not reach the finalized state in `timeout_secs` an error is returned. +/// The transaction is send as immortal transaction. pub async fn submit_extrinsic_and_wait_for_finalization_success_with_timeout< - C: Config, - S: Signer, + S: Signer, >( - client: &OnlineClient, + client: &OnlineClient, call: &DynamicPayload, signer: &S, timeout_secs: impl Into, -) -> Result<(), anyhow::Error> -where - >::Params: Default, -{ +) -> Result<(), anyhow::Error> { let secs = timeout_secs.into(); let res = tokio::time::timeout( Duration::from_secs(secs), @@ -346,17 +357,15 @@ where ) .await; - if let Ok(inner_res) = res { - match inner_res { - Ok(_) => Ok(()), - Err(e) => Err(anyhow!("Error waiting for metric: {}", e)), - } - } else { + match res { + Ok(Ok(_)) => Ok(()), + Ok(Err(e)) => Err(anyhow!("Error waiting for metric: {}", e)), // timeout - Err(anyhow!("Timeout ({secs}), waiting for extrinsic finalization")) + Err(_) => Err(anyhow!("Timeout ({secs}), waiting for extrinsic finalization")), } } +/// Asserts that the given `para_id` is registered at the relay chain. pub async fn assert_para_is_registered( relay_client: &OnlineClient, para_id: ParaId, @@ -392,5 +401,6 @@ pub async fn assert_para_is_registered( } blocks_cnt += 1; } + Err(anyhow!("No more blocks to check")) } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/parachain_extrinsic_get_finalized.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/parachain_extrinsic_get_finalized.rs index e362f25a6c9d9..f3a7a261aead9 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/parachain_extrinsic_get_finalized.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/parachain_extrinsic_get_finalized.rs @@ -8,7 +8,7 @@ use crate::utils::{initialize_network, BEST_BLOCK_METRIC}; use cumulus_zombienet_sdk_helpers::submit_extrinsic_and_wait_for_finalization_success_with_timeout; use zombienet_orchestrator::network::node::{LogLineCount, LogLineCountOptions}; use zombienet_sdk::{ - subxt::{self, dynamic::Value, OnlineClient, SubstrateConfig}, + subxt::{self, dynamic::Value, OnlineClient, PolkadotConfig}, subxt_signer::sr25519::dev, NetworkConfig, NetworkConfigBuilder, }; @@ -105,7 +105,7 @@ async fn parachain_extrinsic_gets_finalized() -> Result<(), anyhow::Error> { log::info!("Ensuring parachain extrinsic gets finalized"); let call = subxt::dynamic::tx("System", "remark", vec![Value::from_bytes("xxx".as_bytes())]); - let charlie_client: OnlineClient = charlie.wait_client().await?; + let charlie_client: OnlineClient = charlie.wait_client().await?; let res = submit_extrinsic_and_wait_for_finalization_success_with_timeout( &charlie_client, From 55f057f81e00bda0fcd10563dd8b17d2abff165e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 29 Jul 2025 17:46:11 +0200 Subject: [PATCH 089/312] Some improvements --- Cargo.lock | 2 ++ cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs | 1 - cumulus/zombienet/zombienet-sdk/Cargo.toml | 2 ++ .../elastic_scaling/pov_bundling_utility_weight.rs | 13 ++++++++----- 4 files changed, 12 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2406cc030a9b7..96ad0004a3864 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5051,8 +5051,10 @@ name = "cumulus-zombienet-sdk-tests" version = "0.1.0" dependencies = [ "anyhow", + "cumulus-primitives-core", "cumulus-zombienet-sdk-helpers", "env_logger 0.11.3", + "frame-support", "futures", "log", "polkadot-primitives", diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index a4340a36f7abf..04a0304019a91 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -399,7 +399,6 @@ pub async fn assert_para_blocks_throughput( } } - dbg!(finalized_parachain_blocks.len()); if finalized_parachain_blocks.is_empty() { break } diff --git a/cumulus/zombienet/zombienet-sdk/Cargo.toml b/cumulus/zombienet/zombienet-sdk/Cargo.toml index bc88bbcaf037b..ac9cc1a5e0856 100644 --- a/cumulus/zombienet/zombienet-sdk/Cargo.toml +++ b/cumulus/zombienet/zombienet-sdk/Cargo.toml @@ -23,6 +23,8 @@ cumulus-zombienet-sdk-helpers = { workspace = true } sp-statement-store = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +frame-support = { workspace = true } +cumulus-primitives-core = { workspace = true } [features] zombie-ci = [] diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs index 3b9462fecac18..82c7ad1b3732a 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs @@ -17,10 +17,12 @@ use anyhow::anyhow; +use cumulus_primitives_core::relay_chain::MAX_POV_SIZE; use cumulus_zombienet_sdk_helpers::{ assert_finality_lag, assert_para_throughput, create_assign_core_call, submit_extrinsic_and_wait_for_finalization_success, }; +use frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND; use polkadot_primitives::Id as ParaId; use serde_json::json; use zombienet_sdk::{ @@ -73,7 +75,9 @@ async fn pov_bundling_utility_weight() -> Result<(), anyhow::Error> { log::info!("3 cores total assigned to the parachain"); // Create and send first transaction: 1s ref_time using utility.with_weight - let ref_time_1s = 1_000_000_000_000u64; // 1 second in ref_time units + // + // While we only should have 500ms available. + let ref_time_1s = WEIGHT_REF_TIME_PER_SECOND; let first_call = create_utility_with_weight_call(ref_time_1s, 0); let sudo_first_call = create_sudo_call(first_call); @@ -82,10 +86,9 @@ async fn pov_bundling_utility_weight() -> Result<(), anyhow::Error> { .await?; log::info!("First transaction finalized"); - // Create and send second transaction: half max PoV size using utility.with_weight - let max_pov_size = 10 * 1024 * 1024u64; // 10MB max PoV size - let half_max_pov = max_pov_size / 2; - let second_call = create_utility_with_weight_call(0, half_max_pov); + // Create a transaction that uses more than the allowed POV size per block. + let pov_size = MAX_POV_SIZE / 4 + 512 * 1024; + let second_call = create_utility_with_weight_call(0, pov_size as u64); let sudo_second_call = create_sudo_call(second_call); log::info!("Sending second transaction with half max PoV size"); From 3f7f4ed4884000969e4aa5affeb3551b3ba6021b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 29 Jul 2025 23:53:16 +0200 Subject: [PATCH 090/312] Ensure the block is the first in a core --- .../zombienet-sdk-helpers/src/lib.rs | 32 +++++++------ .../pov_bundling_utility_weight.rs | 45 +++++++++++++++---- 2 files changed, 55 insertions(+), 22 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 6b329e5f26136..9c52ce4e17580 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -18,7 +18,9 @@ use zombienet_sdk::subxt::{ self, backend::legacy::LegacyRpcMethods, blocks::Block, - config::{signed_extensions::CheckMortalityParams, substrate::DigestItem, ExtrinsicParams, Header}, + config::{ + signed_extensions::CheckMortalityParams, substrate::DigestItem, ExtrinsicParams, Header, + }, dynamic::Value, events::Events, ext::scale_value::value, @@ -226,7 +228,7 @@ async fn is_session_change( } /// Returns [`CoreInfo`] for the given parachain block. -fn find_core_info( +pub fn find_core_info( block: &Block>, ) -> Result { let substrate_digest = @@ -573,7 +575,7 @@ pub async fn submit_extrinsic_and_wait_for_finalization_success, call: &DynamicPayload, signer: &S, -) -> Result<(), anyhow::Error> { +) -> Result { let mut extensions: <::ExtrinsicParams as ExtrinsicParams< PolkadotConfig, >>::Params = Default::default(); @@ -589,24 +591,28 @@ pub async fn submit_extrinsic_and_wait_for_finalization_success { - let _result = tx_in_block.wait_for_success().await?; - let block_status = - if status.as_finalized().is_some() { "Finalized" } else { "Best" }; - log::info!("[{}] In block: {:#?}", block_status, tx_in_block.block_hash()); + while let Some(status) = tx.next().await.transpose()? { + match status { + TxStatus::InBestBlock(tx_in_block) => { + tx_in_block.wait_for_success().await?; + + log::info!("[Best] In block: {:#?}", tx_in_block.block_hash()); + }, + TxStatus::InFinalizedBlock(ref tx_in_block) => { + tx_in_block.wait_for_success().await?; + log::info!("[Finalized] In block: {:#?}", tx_in_block.block_hash()); + return Ok(tx_in_block.block_hash()) }, TxStatus::Error { message } | TxStatus::Invalid { message } | TxStatus::Dropped { message } => { - return Err(anyhow::format_err!("Error submitting tx: {message}")); + return Err(anyhow::anyhow!("Error submitting tx: {message}")); }, _ => continue, } } - Ok(()) + + Err(anyhow::anyhow!("Transaction event stream ended without reaching the finalized state")) } /// Submits the given `call` as transaction and waits `timeout_secs` for it successful finalization. diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs index 82c7ad1b3732a..ff750e12af317 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs @@ -19,7 +19,7 @@ use anyhow::anyhow; use cumulus_primitives_core::relay_chain::MAX_POV_SIZE; use cumulus_zombienet_sdk_helpers::{ - assert_finality_lag, assert_para_throughput, create_assign_core_call, + assert_finality_lag, assert_para_throughput, create_assign_core_call, find_core_info, submit_extrinsic_and_wait_for_finalization_success, }; use frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND; @@ -27,10 +27,7 @@ use polkadot_primitives::Id as ParaId; use serde_json::json; use zombienet_sdk::{ subxt::{ - backend::{legacy::LegacyRpcMethods, rpc::RpcClient}, - ext::scale_value::value, - tx::DynamicPayload, - OnlineClient, PolkadotConfig, + ext::scale_value::value, tx::DynamicPayload, utils::H256, OnlineClient, PolkadotConfig, }, subxt_signer::sr25519::dev, NetworkConfig, NetworkConfigBuilder, @@ -82,20 +79,26 @@ async fn pov_bundling_utility_weight() -> Result<(), anyhow::Error> { let sudo_first_call = create_sudo_call(first_call); log::info!("Sending first transaction with 1s ref_time"); - submit_extrinsic_and_wait_for_finalization_success(¶_client, &sudo_first_call, &alice) - .await?; + let block_hash = + submit_extrinsic_and_wait_for_finalization_success(¶_client, &sudo_first_call, &alice) + .await?; log::info!("First transaction finalized"); + ensure_is_first_block_in_core(¶_client, block_hash).await?; + // Create a transaction that uses more than the allowed POV size per block. let pov_size = MAX_POV_SIZE / 4 + 512 * 1024; let second_call = create_utility_with_weight_call(0, pov_size as u64); let sudo_second_call = create_sudo_call(second_call); log::info!("Sending second transaction with half max PoV size"); - submit_extrinsic_and_wait_for_finalization_success(¶_client, &sudo_second_call, &alice) - .await?; + let block_hash = + submit_extrinsic_and_wait_for_finalization_success(¶_client, &sudo_second_call, &alice) + .await?; log::info!("Second transaction finalized"); + ensure_is_first_block_in_core(¶_client, block_hash).await?; + Ok(()) } @@ -130,6 +133,30 @@ fn create_sudo_call(inner_call: DynamicPayload) -> DynamicPayload { zombienet_sdk::subxt::tx::dynamic("Sudo", "sudo", vec![inner_call.into_value()]) } +/// Checks if the given `block_hash` is the first block in a core. +async fn ensure_is_first_block_in_core( + para_client: &OnlineClient, + block_hash: H256, +) -> Result<(), anyhow::Error> { + let block = para_client.blocks().at(block_hash).await?; + let core_info = find_core_info(&block)?; + + let parent = para_client.blocks().at(block.header().parent_hash).await?; + + // Genesis is for sure on a different core :) + if parent.number() == 0 { + return Ok(()) + } + + let parent_core_info = find_core_info(&parent)?; + + if core_info == parent_core_info { + Err(anyhow::anyhow!("Not first block in core")) + } else { + Ok(()) + } +} + async fn build_network_config() -> Result { let images = zombienet_sdk::environment::get_images_from_env(); log::info!("Using images: {images:?}"); From e4081658c11b4c5ad0048b52d6236b6fd7df8919 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 30 Jul 2025 22:21:11 +0200 Subject: [PATCH 091/312] Introduce `UseFullCore` digest item --- .../src/max_parachain_block_weight.rs | 6 ++++++ cumulus/primitives/core/src/lib.rs | 13 ++++++++++++- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs index 894ce32d41598..04a1619f2e5d2 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs @@ -260,10 +260,16 @@ where // If the previous one was a fraction and we gave the transaction a `FullCore` we need // to check if it used it. (prev @ BlockWeightMode::FractionOfCore { .. }, BlockWeightMode::FullCore) => + //TODO: Use `BlockWeight` so we actually take reclaim into account if post_info.calc_actual_weight(info).all_lt( MaxParachainBlockWeight::target_block_weight::(TargetBlockRate::get()), ) { crate::BlockWeightMode::::put(prev); + } else { + // Inform the node that this block uses the entire core alone. + frame_system::Pallet::::deposit_log( + CumulusDigestItem::UseFullCore.to_digest_item(), + ); }, (BlockWeightMode::FractionOfCore { .. }, BlockWeightMode::FractionOfCore { .. }) => (), } diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index d69ff4600f675..ef7d79ea0c99e 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -269,12 +269,23 @@ pub enum CumulusDigestItem { /// A digest item providing information about the position of the block in the bundle. #[codec(index = 2)] BundleInfo(BundleInfo), + /// A digest item informing the node that this block should be put alone onto a core. + /// + /// In other words, the core should not be shared with other blocks. + #[codec(index = 3)] + UseFullCore, } impl CumulusDigestItem { /// Encode this as a Substrate [`DigestItem`]. pub fn to_digest_item(&self) -> DigestItem { - DigestItem::PreRuntime(CUMULUS_CONSENSUS_ID, self.encode()) + let encoded = self.encode(); + + match self { + Self::RelayParent(_) | Self::UseFullCore => + DigestItem::Consensus(CUMULUS_CONSENSUS_ID, encoded), + _ => DigestItem::PreRuntime(CUMULUS_CONSENSUS_ID, encoded), + } } /// Find [`CumulusDigestItem::CoreInfo`] in the given `digest`. From 5ed2fe0ded1a1d703160a8dd035acad46e575fcb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 31 Jul 2025 23:30:47 +0200 Subject: [PATCH 092/312] Make one block per PoV working --- .../slot_based/block_builder_task.rs | 18 +++++-- .../src/max_parachain_block_weight.rs | 33 +++++++------ cumulus/primitives/core/src/lib.rs | 18 +++++++ .../pov_bundling_utility_weight.rs | 48 ++++++++++++++----- 4 files changed, 88 insertions(+), 29 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 4df8950536b69..fe391cfd20f75 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -556,12 +556,24 @@ where // Announce the newly built block to our peers. collator.collator_service().announce_block(parent_hash, None); - ignored_nodes.extend(IgnoredNodes::from_storage_proof::>(&res.proof)); - ignored_nodes.extend(IgnoredNodes::from_memory_db(res.backend_transaction)); - blocks.push(res.block); proofs.push(res.proof); + if CumulusDigestItem::contains_use_full_core(parent_header.digest()) { + tracing::trace!( + target: crate::LOG_TARGET, + block_hash = ?parent_hash, + time_used_by_block_in_secs = %block_start.elapsed().as_secs_f32(), + "Found `UseFullCore` digest, stopping block production for core", + ); + break + } + + ignored_nodes.extend(IgnoredNodes::from_storage_proof::>( + proofs.last().expect("We just pushed the proof into the vector; qed"), + )); + ignored_nodes.extend(IgnoredNodes::from_memory_db(res.backend_transaction)); + // If there is still time left for the block in the slot, we sleep the rest of the time. // This ensures that we have some steady block rate. if let Some(sleep) = slot_time_for_block diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs index 04a1619f2e5d2..af45020de53a5 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs @@ -200,11 +200,10 @@ where ) -> Result { let digest = frame_system::Pallet::::digest(); - let bundle_info = CumulusDigestItem::find_bundle_info(&digest); + let is_first_block_on_core = + CumulusDigestItem::find_bundle_info(&digest).map_or(false, |bi| bi.index == 0); - let mode = if frame_system::Pallet::::inherents_applied() && - bundle_info.map_or(false, |bi| bi.index == 0) - { + let mode = if frame_system::Pallet::::inherents_applied() { let extrinsic_index = frame_system::Pallet::::extrinsic_index().unwrap_or_default(); crate::BlockWeightMode::::mutate(|mode| { @@ -218,11 +217,14 @@ where // We are already allowing the full core, not that much more to do here. BlockWeightMode::FullCore => {}, BlockWeightMode::FractionOfCore { first_transaction_index } => { - if info.total_weight().any_gt( - MaxParachainBlockWeight::target_block_weight::( + if info + .total_weight() + // The extrinsic lengths counts towards the POV size + .saturating_add(Weight::from_parts(0, len as u64)) + .any_gt(MaxParachainBlockWeight::target_block_weight::( TargetBlockRate::get(), - ), - ) { + )) && is_first_block_on_core + { if extrinsic_index.saturating_sub(first_transaction_index) < 10 { new_mode = BlockWeightMode::FullCore; *mode = Some(new_mode); @@ -260,10 +262,13 @@ where // If the previous one was a fraction and we gave the transaction a `FullCore` we need // to check if it used it. (prev @ BlockWeightMode::FractionOfCore { .. }, BlockWeightMode::FullCore) => - //TODO: Use `BlockWeight` so we actually take reclaim into account - if post_info.calc_actual_weight(info).all_lt( - MaxParachainBlockWeight::target_block_weight::(TargetBlockRate::get()), - ) { + if post_info + .calc_actual_weight(info) + // The extrinsic lengths counts towards the POV size + .saturating_add(Weight::from_parts(0, len as u64)) + .all_lt(MaxParachainBlockWeight::target_block_weight::( + TargetBlockRate::get(), + )) { crate::BlockWeightMode::::put(prev); } else { // Inform the node that this block uses the entire core alone. @@ -299,9 +304,7 @@ where len: usize, result: &DispatchResult, ) -> Result<(), TransactionValidityError> { - S::bare_post_dispatch(info, post_info, len, result)?; - - frame_system::Pallet::::reclaim_weight(info, post_info) + S::bare_post_dispatch(info, post_info, len, result) } } diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index ef7d79ea0c99e..1746bf36a7f7d 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -365,6 +365,24 @@ impl CumulusDigestItem { _ => None, }) } + + /// Returns `true` if the given `digest` contains the [`Self::UseFullCore`] item. + pub fn contains_use_full_core(digest: &Digest) -> bool { + digest + .convert_first(|d| match d { + DigestItem::Consensus(id, val) if id == &CUMULUS_CONSENSUS_ID => { + let Ok(CumulusDigestItem::UseFullCore) = + CumulusDigestItem::decode_all(&mut &val[..]) + else { + return None + }; + + Some(true) + }, + _ => None, + }) + .unwrap_or_default() + } } /// Extract the relay-parent from the provided header digest. Returns `None` if none were found. diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs index ff750e12af317..5e88f803dc165 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs @@ -16,7 +16,6 @@ // limitations under the License. use anyhow::anyhow; - use cumulus_primitives_core::relay_chain::MAX_POV_SIZE; use cumulus_zombienet_sdk_helpers::{ assert_finality_lag, assert_para_throughput, create_assign_core_call, find_core_info, @@ -25,6 +24,7 @@ use cumulus_zombienet_sdk_helpers::{ use frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND; use polkadot_primitives::Id as ParaId; use serde_json::json; +use std::sync::Arc; use zombienet_sdk::{ subxt::{ ext::scale_value::value, tx::DynamicPayload, utils::H256, OnlineClient, PolkadotConfig, @@ -84,7 +84,7 @@ async fn pov_bundling_utility_weight() -> Result<(), anyhow::Error> { .await?; log::info!("First transaction finalized"); - ensure_is_first_block_in_core(¶_client, block_hash).await?; + ensure_is_only_block_in_core(¶_client, block_hash).await?; // Create a transaction that uses more than the allowed POV size per block. let pov_size = MAX_POV_SIZE / 4 + 512 * 1024; @@ -97,7 +97,7 @@ async fn pov_bundling_utility_weight() -> Result<(), anyhow::Error> { .await?; log::info!("Second transaction finalized"); - ensure_is_first_block_in_core(¶_client, block_hash).await?; + ensure_is_only_block_in_core(¶_client, block_hash).await?; Ok(()) } @@ -133,25 +133,51 @@ fn create_sudo_call(inner_call: DynamicPayload) -> DynamicPayload { zombienet_sdk::subxt::tx::dynamic("Sudo", "sudo", vec![inner_call.into_value()]) } -/// Checks if the given `block_hash` is the first block in a core. -async fn ensure_is_first_block_in_core( +/// Checks if the given `block_hash` is the only block in a core. +/// +/// Assumes that the given block +async fn ensure_is_only_block_in_core( para_client: &OnlineClient, block_hash: H256, ) -> Result<(), anyhow::Error> { - let block = para_client.blocks().at(block_hash).await?; + let blocks = para_client.blocks(); + let block = blocks.at(block_hash).await?; let core_info = find_core_info(&block)?; let parent = para_client.blocks().at(block.header().parent_hash).await?; // Genesis is for sure on a different core :) - if parent.number() == 0 { - return Ok(()) + if parent.number() != 0 { + let parent_core_info = find_core_info(&parent)?; + + if core_info == parent_core_info { + return Err(anyhow::anyhow!( + "Not first block in core, found in block {}", + block.number() + )) + } } - let parent_core_info = find_core_info(&parent)?; + // Start with the latest best block. + let mut current_block = Arc::new(blocks.subscribe_best().await?.next().await.unwrap()?); + + let mut chain_of_blocks = vec![]; + + while current_block.hash() != block.hash() { + chain_of_blocks.push(current_block.clone()); + current_block = Arc::new(blocks.at(current_block.header().parent_hash).await?); + + if current_block.number() == 0 { + return Err(anyhow::anyhow!( + "Did not found block while going backwards from the best block" + )) + } + } - if core_info == parent_core_info { - Err(anyhow::anyhow!("Not first block in core")) + // The last block `CoreInfo` must be different or it shares the core with the block we are + // interested in. + if core_info == find_core_info(chain_of_blocks.last().unwrap())? { + Err(anyhow::anyhow!("Found more blocks on the same core")) } else { Ok(()) } From c3eaaffe3f3f97e38966bf256047038294403979 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 1 Aug 2025 21:32:09 +0200 Subject: [PATCH 093/312] Move the `pov_bundling` tests to their own module --- .../tests/zombie_ci/elastic_scaling/mod.rs | 3 -- .../zombienet-sdk/tests/zombie_ci/mod.rs | 1 + .../pov_bundling.rs => pov_bundling/basic.rs} | 0 .../tests/zombie_ci/pov_bundling/mod.rs | 20 +++++++++++++ .../three_cores_glutton.rs} | 0 .../utility_weight.rs} | 30 ++++++++++++------- 6 files changed, 40 insertions(+), 14 deletions(-) rename cumulus/zombienet/zombienet-sdk/tests/zombie_ci/{elastic_scaling/pov_bundling.rs => pov_bundling/basic.rs} (100%) create mode 100644 cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs rename cumulus/zombienet/zombienet-sdk/tests/zombie_ci/{elastic_scaling/pov_bundling_3cores_glutton.rs => pov_bundling/three_cores_glutton.rs} (100%) rename cumulus/zombienet/zombienet-sdk/tests/zombie_ci/{elastic_scaling/pov_bundling_utility_weight.rs => pov_bundling/utility_weight.rs} (90%) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/mod.rs index 612ccd5d3378e..b78c633361476 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/mod.rs @@ -15,9 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod pov_bundling; -mod pov_bundling_3cores_glutton; -mod pov_bundling_utility_weight; mod pov_recovery; mod slot_based_authoring; mod slot_based_rp_offset; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/mod.rs index fc36563f18cd4..53228b83ad07f 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/mod.rs @@ -7,6 +7,7 @@ mod full_node_catching_up; mod full_node_warp_sync; mod migrate_solo; mod parachain_extrinsic_get_finalized; +mod pov_bundling; mod pov_recovery; mod rpc_collator_build_blocks; mod runtime_upgrade; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/basic.rs similarity index 100% rename from cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling.rs rename to cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/basic.rs diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs new file mode 100644 index 0000000000000..bbc30f7333192 --- /dev/null +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs @@ -0,0 +1,20 @@ +// This file is part of Cumulus. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod basic; +mod three_cores_glutton; +mod utility_weight; \ No newline at end of file diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_3cores_glutton.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/three_cores_glutton.rs similarity index 100% rename from cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_3cores_glutton.rs rename to cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/three_cores_glutton.rs diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs similarity index 90% rename from cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs rename to cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs index 5e88f803dc165..b05a2f9573cf3 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_bundling_utility_weight.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs @@ -158,21 +158,29 @@ async fn ensure_is_only_block_in_core( } } - // Start with the latest best block. - let mut current_block = Arc::new(blocks.subscribe_best().await?.next().await.unwrap()?); + let chain_of_blocks = loop { + // Start with the latest best block. + let mut current_block = Arc::new(blocks.subscribe_best().await?.next().await.unwrap()?); - let mut chain_of_blocks = vec![]; + let mut chain_of_blocks = vec![]; - while current_block.hash() != block.hash() { - chain_of_blocks.push(current_block.clone()); - current_block = Arc::new(blocks.at(current_block.header().parent_hash).await?); + while current_block.hash() != block_hash { + chain_of_blocks.push(current_block.clone()); + current_block = Arc::new(blocks.at(current_block.header().parent_hash).await?); - if current_block.number() == 0 { - return Err(anyhow::anyhow!( - "Did not found block while going backwards from the best block" - )) + if current_block.number() == 0 { + return Err(anyhow::anyhow!( + "Did not found block while going backwards from the best block" + )) + } } - } + + // It possible that the first block we got is the same as the transaction got finalized. + // So, we just retry again until we found some more blocks. + if !chain_of_blocks.is_empty() { + break chain_of_blocks + } + }; // The last block `CoreInfo` must be different or it shares the core with the block we are // interested in. From d42d947e98448e38abd8bc2b72745bfe4e348cbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 1 Aug 2025 22:07:57 +0200 Subject: [PATCH 094/312] Some small changes --- .../zombienet-sdk-helpers/src/lib.rs | 58 +++++++++++++++++ .../tests/zombie_ci/pov_bundling/basic.rs | 8 +-- .../pov_bundling/three_cores_glutton.rs | 2 +- .../zombie_ci/pov_bundling/utility_weight.rs | 63 +------------------ 4 files changed, 66 insertions(+), 65 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 9c52ce4e17580..e0b58f04a251f 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -681,3 +681,61 @@ pub async fn assert_para_is_registered( Err(anyhow!("No more blocks to check")) } + +/// Checks if the given `block_hash` is the only block in a core. +/// +/// Assumes that the given block +pub async fn ensure_is_only_block_in_core( + para_client: &OnlineClient, + block_hash: H256, +) -> Result<(), anyhow::Error> { + let blocks = para_client.blocks(); + let block = blocks.at(block_hash).await?; + let core_info = find_core_info(&block)?; + + let parent = para_client.blocks().at(block.header().parent_hash).await?; + + // Genesis is for sure on a different core :) + if parent.number() != 0 { + let parent_core_info = find_core_info(&parent)?; + + if core_info == parent_core_info { + return Err(anyhow::anyhow!( + "Not first block in core, found in block {}", + block.number() + )) + } + } + + let chain_of_blocks = loop { + // Start with the latest best block. + let mut current_block = std::sync::Arc::new(blocks.subscribe_best().await?.next().await.unwrap()?); + + let mut chain_of_blocks = vec![]; + + while current_block.hash() != block_hash { + chain_of_blocks.push(current_block.clone()); + current_block = std::sync::Arc::new(blocks.at(current_block.header().parent_hash).await?); + + if current_block.number() == 0 { + return Err(anyhow::anyhow!( + "Did not found block while going backwards from the best block" + )) + } + } + + // It possible that the first block we got is the same as the transaction got finalized. + // So, we just retry again until we found some more blocks. + if !chain_of_blocks.is_empty() { + break chain_of_blocks + } + }; + + // The last block `CoreInfo` must be different or it shares the core with the block we are + // interested in. + if core_info == find_core_info(chain_of_blocks.last().unwrap())? { + Err(anyhow::anyhow!("Found more blocks on the same core")) + } else { + Ok(()) + } +} diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/basic.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/basic.rs index d7bafedabe487..88f91b1aa24a9 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/basic.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/basic.rs @@ -42,7 +42,7 @@ const PARA_ID: u32 = 2400; /// As we increase the number of cores via `assign_core`, we expect the blocks to spread over the /// relay cores. #[tokio::test(flavor = "multi_thread")] -async fn pov_bundling() -> Result<(), anyhow::Error> { +async fn pov_bundling_basic() -> Result<(), anyhow::Error> { let _ = env_logger::try_init_from_env( env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), ); @@ -65,8 +65,8 @@ async fn pov_bundling() -> Result<(), anyhow::Error> { [(ParaId::from(PARA_ID), (para_client.clone(), 48..73))], ) .await?; - // 3 relay chain blocks - assert_finality_lag(¶_client, 72).await?; + // 5 relay chain blocks + assert_finality_lag(¶_client, 60).await?; let assign_cores_call = create_assign_core_call(&[(0, PARA_ID), (1, PARA_ID)]); @@ -96,7 +96,7 @@ async fn pov_bundling() -> Result<(), anyhow::Error> { [(ParaId::from(PARA_ID), (para_client.clone(), 48..73))], ) .await?; - assert_finality_lag(¶_client, 72).await?; + assert_finality_lag(¶_client, 60).await?; let assign_cores_call = create_assign_core_call(&[(2, PARA_ID), (3, PARA_ID), (4, PARA_ID)]); // Assign two extra cores to each parachain. diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/three_cores_glutton.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/three_cores_glutton.rs index 594a4985ed7d3..fd28ceeb8180d 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/three_cores_glutton.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/three_cores_glutton.rs @@ -38,7 +38,7 @@ const PARA_ID: u32 = 2400; /// This test starts with 3 cores assigned and configures glutton to use 80% of ref time, /// then validates that the parachain produces 72 blocks. #[tokio::test(flavor = "multi_thread")] -async fn pov_bundling_3cores_glutton() -> Result<(), anyhow::Error> { +async fn pov_bundling_three_cores_glutton() -> Result<(), anyhow::Error> { let _ = env_logger::try_init_from_env( env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), ); diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs index b05a2f9573cf3..8dba384afcd84 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs @@ -18,7 +18,8 @@ use anyhow::anyhow; use cumulus_primitives_core::relay_chain::MAX_POV_SIZE; use cumulus_zombienet_sdk_helpers::{ - assert_finality_lag, assert_para_throughput, create_assign_core_call, find_core_info, + assert_finality_lag, assert_para_throughput, create_assign_core_call, + ensure_is_only_block_in_core, find_core_info, submit_extrinsic_and_wait_for_finalization_success, }; use frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND; @@ -39,7 +40,7 @@ const PARA_ID: u32 = 2400; /// /// This test starts with 3 cores assigned and sends two transactions: /// 1. One with 1s ref_time -/// 2. One with half of the max PoV size +/// 2. One with a PoV size bigger than what one block alone is allowed to process. /// Each transaction is sent after the other and waits for finalization. #[tokio::test(flavor = "multi_thread")] async fn pov_bundling_utility_weight() -> Result<(), anyhow::Error> { @@ -133,64 +134,6 @@ fn create_sudo_call(inner_call: DynamicPayload) -> DynamicPayload { zombienet_sdk::subxt::tx::dynamic("Sudo", "sudo", vec![inner_call.into_value()]) } -/// Checks if the given `block_hash` is the only block in a core. -/// -/// Assumes that the given block -async fn ensure_is_only_block_in_core( - para_client: &OnlineClient, - block_hash: H256, -) -> Result<(), anyhow::Error> { - let blocks = para_client.blocks(); - let block = blocks.at(block_hash).await?; - let core_info = find_core_info(&block)?; - - let parent = para_client.blocks().at(block.header().parent_hash).await?; - - // Genesis is for sure on a different core :) - if parent.number() != 0 { - let parent_core_info = find_core_info(&parent)?; - - if core_info == parent_core_info { - return Err(anyhow::anyhow!( - "Not first block in core, found in block {}", - block.number() - )) - } - } - - let chain_of_blocks = loop { - // Start with the latest best block. - let mut current_block = Arc::new(blocks.subscribe_best().await?.next().await.unwrap()?); - - let mut chain_of_blocks = vec![]; - - while current_block.hash() != block_hash { - chain_of_blocks.push(current_block.clone()); - current_block = Arc::new(blocks.at(current_block.header().parent_hash).await?); - - if current_block.number() == 0 { - return Err(anyhow::anyhow!( - "Did not found block while going backwards from the best block" - )) - } - } - - // It possible that the first block we got is the same as the transaction got finalized. - // So, we just retry again until we found some more blocks. - if !chain_of_blocks.is_empty() { - break chain_of_blocks - } - }; - - // The last block `CoreInfo` must be different or it shares the core with the block we are - // interested in. - if core_info == find_core_info(chain_of_blocks.last().unwrap())? { - Err(anyhow::anyhow!("Found more blocks on the same core")) - } else { - Ok(()) - } -} - async fn build_network_config() -> Result { let images = zombienet_sdk::environment::get_images_from_env(); log::info!("Using images: {images:?}"); From 93ce4621395dfa40a70b77f5178f2131bae7bf10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 1 Aug 2025 22:10:14 +0200 Subject: [PATCH 095/312] Okay --- .../zombienet-sdk/tests/zombie_ci/pov_bundling/basic.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/basic.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/basic.rs index 88f91b1aa24a9..82a015fb2cb41 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/basic.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/basic.rs @@ -65,8 +65,8 @@ async fn pov_bundling_basic() -> Result<(), anyhow::Error> { [(ParaId::from(PARA_ID), (para_client.clone(), 48..73))], ) .await?; - // 5 relay chain blocks - assert_finality_lag(¶_client, 60).await?; + // 6 relay chain blocks + assert_finality_lag(¶_client, 72).await?; let assign_cores_call = create_assign_core_call(&[(0, PARA_ID), (1, PARA_ID)]); @@ -96,7 +96,7 @@ async fn pov_bundling_basic() -> Result<(), anyhow::Error> { [(ParaId::from(PARA_ID), (para_client.clone(), 48..73))], ) .await?; - assert_finality_lag(¶_client, 60).await?; + assert_finality_lag(¶_client, 72).await?; let assign_cores_call = create_assign_core_call(&[(2, PARA_ID), (3, PARA_ID), (4, PARA_ID)]); // Assign two extra cores to each parachain. From c74a2db3e2c894e57bfa9dcc035c2656091c6896 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 2 Aug 2025 00:21:47 +0200 Subject: [PATCH 096/312] Runtime upgrade test --- .../zombie_ci/pov_bundling/runtime_upgrade.rs | 194 ++++++++++++++++++ 1 file changed, 194 insertions(+) create mode 100644 cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/runtime_upgrade.rs diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/runtime_upgrade.rs new file mode 100644 index 0000000000000..5bfa745fd3560 --- /dev/null +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/runtime_upgrade.rs @@ -0,0 +1,194 @@ +// This file is part of Cumulus. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::anyhow; +use cumulus_test_runtime::wasm_spec_version_incremented::WASM_BINARY_BLOATY as WASM_RUNTIME_UPGRADE; +use cumulus_zombienet_sdk_helpers::{ + assert_finality_lag, assert_para_throughput, create_assign_core_call, + ensure_is_only_block_in_core, find_core_info, + submit_extrinsic_and_wait_for_finalization_success, +}; +use polkadot_primitives::Id as ParaId; +use serde_json::json; +use sp_core::blake2_256; +use std::sync::Arc; +use zombienet_sdk::{ + subxt::{ + ext::scale_value::{value, Value}, + tx::DynamicPayload, + utils::H256, + OnlineClient, PolkadotConfig, + }, + subxt_signer::sr25519::dev, + NetworkConfig, NetworkConfigBuilder, +}; + +const PARA_ID: u32 = 2400; +const MIN_RUNTIME_SIZE_BYTES: usize = 2_621_440; // 2.5 MiB + +/// A test that performs runtime upgrade using the `authorize_upgrade` and +/// `apply_authorized_upgrade` logic. +/// +/// This test starts with 3 cores assigned and performs two transactions: +/// 1. First calls `authorize_upgrade` to authorize the new runtime code hash +/// 2. Then calls `apply_authorized_upgrade` with the actual runtime code +/// The runtime code is validated to be at least 2.5MiB in size, and both transactions +/// are validated to be the only block in their respective cores. +#[tokio::test(flavor = "multi_thread")] +async fn pov_bundling_runtime_upgrade() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + // Validate runtime size requirement + let runtime_wasm = + WASM_RUNTIME_UPGRADE.ok_or_else(|| anyhow!("WASM runtime upgrade binary not available"))?; + + if runtime_wasm.len() < MIN_RUNTIME_SIZE_BYTES { + return Err(anyhow!( + "Runtime size {} bytes is below minimum required {} bytes (2.5MiB)", + runtime_wasm.len(), + MIN_RUNTIME_SIZE_BYTES + )); + } + + log::info!("Runtime size validation passed: {} bytes", runtime_wasm.len()); + + let config = build_network_config().await?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; + + let relay_node = network.get_node("validator-0")?; + let para_node = network.get_node("collator-1")?; + + let para_client: OnlineClient = para_node.wait_client().await?; + let relay_client: OnlineClient = relay_node.wait_client().await?; + let alice = dev::alice(); + + // Assign cores 0 and 1 to start with 3 cores total (core 2 is assigned by Zombienet) + let assign_cores_call = create_assign_core_call(&[(0, PARA_ID), (1, PARA_ID)]); + + relay_client + .tx() + .sign_and_submit_then_watch_default(&assign_cores_call, &alice) + .await + .inspect(|_| log::info!("Tx send, waiting for finalization"))? + .wait_for_finalized_success() + .await?; + log::info!("3 cores total assigned to the parachain"); + + // Step 1: Authorize the runtime upgrade + let code_hash = blake2_256(runtime_wasm); + let authorize_call = create_authorize_upgrade_call(code_hash.into()); + let sudo_authorize_call = create_sudo_call(authorize_call); + + log::info!("Sending authorize_upgrade transaction"); + let block_hash = submit_extrinsic_and_wait_for_finalization_success( + ¶_client, + &sudo_authorize_call, + &alice, + ) + .await?; + log::info!("Authorize upgrade transaction finalized"); + + // Step 2: Apply the authorized upgrade with the actual runtime code + let apply_call = create_apply_authorized_upgrade_call(runtime_wasm.to_vec()); + + log::info!( + "Sending apply_authorized_upgrade transaction with runtime size: {} bytes", + runtime_wasm.len() + ); + let block_hash = + submit_extrinsic_and_wait_for_finalization_success(¶_client, &apply_call, &alice) + .await?; + log::info!("Apply authorized upgrade transaction finalized in block: {:?}", block_hash); + + ensure_is_only_block_in_core(¶_client, block_hash).await?; + + Ok(()) +} + +/// Creates a `System::authorize_upgrade` call +fn create_authorize_upgrade_call(code_hash: H256) -> DynamicPayload { + zombienet_sdk::subxt::tx::dynamic( + "System", + "authorize_upgrade", + vec![Value::from_bytes(code_hash)], + ) +} + +/// Creates a `System::apply_authorized_upgrade` call +fn create_apply_authorized_upgrade_call(code: Vec) -> DynamicPayload { + zombienet_sdk::subxt::tx::dynamic("System", "apply_authorized_upgrade", vec![value!(code)]) +} + +/// Creates a `pallet-sudo` `sudo` call wrapping the inner call +fn create_sudo_call(inner_call: DynamicPayload) -> DynamicPayload { + zombienet_sdk::subxt::tx::dynamic("Sudo", "sudo", vec![inner_call.into_value()]) +} + +async fn build_network_config() -> Result { + let images = zombienet_sdk::environment::get_images_from_env(); + log::info!("Using images: {images:?}"); + NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=trace").into()]) + .with_default_resources(|resources| { + resources.with_request_cpu(4).with_request_memory("4G") + }) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "num_cores": 3, + "max_validators_per_core": 1 + } + } + } + })) + .with_node(|node| node.with_name("validator-0")); + (1..9).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) + }) + .with_parachain(|p| { + p.with_id(PARA_ID) + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("pov-bundling") + .with_default_args(vec![ + ("--authoring").into(), + ("slot-based").into(), + ("-lparachain=debug,aura=trace").into(), + ]) + .with_collator(|n| n.with_name("collator-0")) + .with_collator(|n| n.with_name("collator-1")) + .with_collator(|n| n.with_name("collator-2")) + }) + .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { + Ok(val) => global_settings.with_base_dir(val), + _ => global_settings, + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + }) +} From ffa37b99513b86c26ab2bcfff65ddbae7172df73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 2 Aug 2025 00:22:08 +0200 Subject: [PATCH 097/312] Leftover changes --- Cargo.lock | 1 + cumulus/zombienet/zombienet-sdk/Cargo.toml | 1 + .../zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs | 1 + .../tests/zombie_ci/pov_bundling/utility_weight.rs | 7 +------ 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4460e8d05eb43..974ed07b05c65 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5052,6 +5052,7 @@ version = "0.1.0" dependencies = [ "anyhow", "cumulus-primitives-core", + "cumulus-test-runtime", "cumulus-zombienet-sdk-helpers", "env_logger 0.11.3", "frame-support", diff --git a/cumulus/zombienet/zombienet-sdk/Cargo.toml b/cumulus/zombienet/zombienet-sdk/Cargo.toml index ac9cc1a5e0856..43eb40834f305 100644 --- a/cumulus/zombienet/zombienet-sdk/Cargo.toml +++ b/cumulus/zombienet/zombienet-sdk/Cargo.toml @@ -25,6 +25,7 @@ sp-keyring = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } frame-support = { workspace = true } cumulus-primitives-core = { workspace = true } +cumulus-test-runtime = { workspace = true } [features] zombie-ci = [] diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs index bbc30f7333192..d640a1651eba4 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs @@ -16,5 +16,6 @@ // limitations under the License. mod basic; +mod runtime_upgrade; mod three_cores_glutton; mod utility_weight; \ No newline at end of file diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs index 8dba384afcd84..21705578c9f0b 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs @@ -120,12 +120,7 @@ fn create_utility_with_weight_call(ref_time: u64, proof_size: u64) -> DynamicPay zombienet_sdk::subxt::tx::dynamic( "Utility", "with_weight", - vec![ - inner_call.into_value(), - value! { - weight - }, - ], + vec![inner_call.into_value(), weight], ) } From 0ba90f8b2cea1afe0cb1108967d309753186299e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 2 Aug 2025 13:34:51 +0200 Subject: [PATCH 098/312] Send as usigned transaction --- .../zombienet-sdk-helpers/src/lib.rs | 48 +++++++++++-------- .../zombie_ci/pov_bundling/runtime_upgrade.rs | 13 +++-- 2 files changed, 39 insertions(+), 22 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index e0b58f04a251f..398408230b7e1 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -2,10 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::anyhow; -use codec::{Compact, Decode, Encode}; -use cumulus_primitives_core::{ - relay_chain, rpsr_digest::RPSR_CONSENSUS_ID, CoreInfo, CumulusDigestItem, RelayBlockIdentifier, -}; +use codec::{Decode, Encode}; +use cumulus_primitives_core::{CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; use futures::{pin_mut, select, stream::StreamExt, TryStreamExt}; use polkadot_primitives::{vstaging::CandidateReceiptV2, BlakeTwo256, HashT, Id as ParaId}; use sp_runtime::traits::Zero; @@ -18,13 +16,11 @@ use zombienet_sdk::subxt::{ self, backend::legacy::LegacyRpcMethods, blocks::Block, - config::{ - signed_extensions::CheckMortalityParams, substrate::DigestItem, ExtrinsicParams, Header, - }, + config::{signed_extensions::CheckMortalityParams, ExtrinsicParams, Header}, dynamic::Value, events::Events, ext::scale_value::value, - tx::{signer::Signer, DynamicPayload, TxStatus}, + tx::{signer::Signer, DynamicPayload, SubmittableExtrinsic, TxStatus}, utils::H256, Config, OnlineClient, PolkadotConfig, }; @@ -381,8 +377,6 @@ pub async fn assert_para_blocks_throughput( !included_parachain_block_identifiers.contains(&(core_info, rbi)) }); - dbg!(block.number()); - if !is_session_change(&block).await? { found_first_candidate |= !included_parachain_block_identifiers.is_empty(); @@ -568,7 +562,7 @@ pub async fn assert_relay_parent_offset( Ok(()) } -/// Submits the given `call` as transaction and waits for it successful finalization. +/// Submits the given `call` as signed transaction and waits for it successful finalization. /// /// The transaction is send as immortal transaction. pub async fn submit_extrinsic_and_wait_for_finalization_success>( @@ -582,12 +576,26 @@ pub async fn submit_extrinsic_and_wait_for_finalization_success::immortal(); - let mut tx = client - .tx() - .create_signed(call, signer, extensions) - .await? - .submit_and_watch() - .await?; + let tx = client.tx().create_signed(call, signer, extensions).await?; + + submit_tx_and_wait_for_finalization(tx).await +} + +/// Submits the given `call` as unsigned transaction and waits for it successful finalization. +pub async fn submit_unsigned_extrinsic_and_wait_for_finalization_success( + client: &OnlineClient, + call: &DynamicPayload, +) -> Result { + let tx = client.tx().create_unsigned(call)?; + + submit_tx_and_wait_for_finalization(tx).await +} + +/// Submit the given transaction and wait for its finalization. +async fn submit_tx_and_wait_for_finalization( + tx: SubmittableExtrinsic>, +) -> Result { + let mut tx = tx.submit_and_watch().await?; // Below we use the low level API to replicate the `wait_for_in_block` behaviour // which was removed in subxt 0.33.0. See https://github.com/paritytech/subxt/pull/1237. @@ -709,13 +717,15 @@ pub async fn ensure_is_only_block_in_core( let chain_of_blocks = loop { // Start with the latest best block. - let mut current_block = std::sync::Arc::new(blocks.subscribe_best().await?.next().await.unwrap()?); + let mut current_block = + std::sync::Arc::new(blocks.subscribe_best().await?.next().await.unwrap()?); let mut chain_of_blocks = vec![]; while current_block.hash() != block_hash { chain_of_blocks.push(current_block.clone()); - current_block = std::sync::Arc::new(blocks.at(current_block.header().parent_hash).await?); + current_block = + std::sync::Arc::new(blocks.at(current_block.header().parent_hash).await?); if current_block.number() == 0 { return Err(anyhow::anyhow!( diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/runtime_upgrade.rs index 5bfa745fd3560..9e8b94e253aaa 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/runtime_upgrade.rs @@ -16,11 +16,13 @@ // limitations under the License. use anyhow::anyhow; +use cumulus_primitives_core::relay_chain::MAX_POV_SIZE; use cumulus_test_runtime::wasm_spec_version_incremented::WASM_BINARY_BLOATY as WASM_RUNTIME_UPGRADE; use cumulus_zombienet_sdk_helpers::{ assert_finality_lag, assert_para_throughput, create_assign_core_call, ensure_is_only_block_in_core, find_core_info, submit_extrinsic_and_wait_for_finalization_success, + submit_unsigned_extrinsic_and_wait_for_finalization_success, }; use polkadot_primitives::Id as ParaId; use serde_json::json; @@ -38,7 +40,9 @@ use zombienet_sdk::{ }; const PARA_ID: u32 = 2400; -const MIN_RUNTIME_SIZE_BYTES: usize = 2_621_440; // 2.5 MiB +/// 4 blocks per core and each gets 1/4 of the [`MAX_POV_SIZE`], so the runtime needs to be bigger +/// than this to trigger the logic of getting one full core. +const MIN_RUNTIME_SIZE_BYTES: usize = MAX_POV_SIZE as usize / 4 + 50 * 1024; /// A test that performs runtime upgrade using the `authorize_upgrade` and /// `apply_authorized_upgrade` logic. @@ -58,7 +62,7 @@ async fn pov_bundling_runtime_upgrade() -> Result<(), anyhow::Error> { let runtime_wasm = WASM_RUNTIME_UPGRADE.ok_or_else(|| anyhow!("WASM runtime upgrade binary not available"))?; - if runtime_wasm.len() < MIN_RUNTIME_SIZE_BYTES { + if runtime_wasm.len() <= MIN_RUNTIME_SIZE_BYTES { return Err(anyhow!( "Runtime size {} bytes is below minimum required {} bytes (2.5MiB)", runtime_wasm.len(), @@ -113,13 +117,16 @@ async fn pov_bundling_runtime_upgrade() -> Result<(), anyhow::Error> { "Sending apply_authorized_upgrade transaction with runtime size: {} bytes", runtime_wasm.len() ); + let block_hash = - submit_extrinsic_and_wait_for_finalization_success(¶_client, &apply_call, &alice) + submit_unsigned_extrinsic_and_wait_for_finalization_success(¶_client, &apply_call) .await?; log::info!("Apply authorized upgrade transaction finalized in block: {:?}", block_hash); ensure_is_only_block_in_core(¶_client, block_hash).await?; + //TODO: Verify that the runtime upgrade block is also using a full core. + Ok(()) } From 46539ea7ded905b17856a5fdd1c69aad72e40c7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 4 Aug 2025 00:35:01 +0200 Subject: [PATCH 099/312] Rewrite `DynamicWeight` to fix the runtime upgrade test --- .../src/max_parachain_block_weight.rs | 189 +++++++++++------- .../tests/zombie_ci/pov_bundling/mod.rs | 2 +- 2 files changed, 116 insertions(+), 75 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs index af45020de53a5..74145d8035dc4 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs @@ -38,6 +38,7 @@ use sp_runtime::{ #[derive(Debug, Encode, Decode, Clone, Copy, TypeInfo)] pub enum BlockWeightMode { FullCore, + PotentialFullCore { first_transaction_index: u32 }, FractionOfCore { first_transaction_index: u32 }, } @@ -74,13 +75,14 @@ impl MaxParachainBlockWeight { match crate::BlockWeightMode::::get() { // We allow the full core. - Some(BlockWeightMode::FullCore) => return Self::FULL_CORE_WEIGHT, + Some(BlockWeightMode::FullCore | BlockWeightMode::PotentialFullCore { .. }) => + return Self::FULL_CORE_WEIGHT, + // Let's calculate below how much weight we can use. + Some(BlockWeightMode::FractionOfCore { .. }) => (), // Either the runtime is not using the `DynamicMaxBlockWeight` extension or there is // some bug. Because after the inherents are applied, this value should be set by the // extension. To be on the safe side, we allow the full core weight. None => return Self::FULL_CORE_WEIGHT, - // Let's calculate below how much weight we can use. - Some(BlockWeightMode::FractionOfCore { .. }) => (), } Self::target_block_weight::(target_blocks) @@ -128,6 +130,102 @@ impl DynamicMaxBlockWeight { } } +impl DynamicMaxBlockWeight +where + T: Config, + TargetBlockRate: Get, +{ + fn pre_dispatch_extrinsic( + info: &DispatchInfo, + len: usize, + ) -> Result<(), TransactionValidityError> { + let digest = frame_system::Pallet::::digest(); + + let is_first_block_on_core = + CumulusDigestItem::find_bundle_info(&digest).map_or(false, |bi| bi.index == 0); + + if frame_system::Pallet::::inherents_applied() { + let extrinsic_index = frame_system::Pallet::::extrinsic_index().unwrap_or_default(); + + crate::BlockWeightMode::::mutate(|mode| { + let current_mode = *mode.get_or_insert_with(|| BlockWeightMode::FractionOfCore { + first_transaction_index: extrinsic_index, + }); + + match current_mode { + // We are already allowing the full core, not that much more to do here. + BlockWeightMode::FullCore => {}, + BlockWeightMode::PotentialFullCore { first_transaction_index } | + BlockWeightMode::FractionOfCore { first_transaction_index } => { + let potential = + matches!(current_mode, BlockWeightMode::PotentialFullCore { .. }); + debug_assert!( + !potential, + "`PotentialFullCore` should resolve to `FullCore` or `FractionOfCore` after applying a transaction.", + ); + + if info + .total_weight() + // The extrinsic lengths counts towards the POV size + .saturating_add(Weight::from_parts(0, len as u64)) + .any_gt(MaxParachainBlockWeight::target_block_weight::( + TargetBlockRate::get(), + )) && is_first_block_on_core + { + if extrinsic_index.saturating_sub(first_transaction_index) < 10 { + *mode = Some(BlockWeightMode::PotentialFullCore { + first_transaction_index, + }); + } else { + return Err(InvalidTransaction::ExhaustsResources) + } + } else if potential { + *mode = + Some(BlockWeightMode::FractionOfCore { first_transaction_index }); + } + }, + }; + + Ok(()) + }).map_err(Into::into) + } else { + Ok(()) + } + } + + fn post_dispatch_extrinsic(info: &DispatchInfo, post_info: &PostDispatchInfo, len: usize) { + crate::BlockWeightMode::::mutate(|weight_mode| { + let Some(mode) = *weight_mode else { return }; + + match mode { + // If the previous one was already `FullCore` or `FractionOfCore`, we don't need to + // change anything. + BlockWeightMode::FullCore | BlockWeightMode::FractionOfCore { .. } => {}, + // Now we need to check if the transaction required more weight than a fraction of a + // core block. + BlockWeightMode::PotentialFullCore { first_transaction_index } => + if post_info + .calc_actual_weight(info) + // The extrinsic lengths counts towards the POV size + .saturating_add(Weight::from_parts(0, len as u64)) + .all_lt(MaxParachainBlockWeight::target_block_weight::( + TargetBlockRate::get(), + )) { + *weight_mode = + Some(BlockWeightMode::FractionOfCore { first_transaction_index }); + } else { + *weight_mode = Some(BlockWeightMode::FullCore); + + // Inform the node that this block uses the full core. + frame_system::Pallet::::deposit_log( + CumulusDigestItem::UseFullCore.to_digest_item(), + ); + }, + } + }); + } +} + impl From for DynamicMaxBlockWeight { fn from(s: S) -> Self { Self::new(s) @@ -156,7 +254,7 @@ where type Val = S::Val; - type Pre = (Option<(BlockWeightMode, BlockWeightMode)>, S::Pre); + type Pre = S::Pre; fn implicit(&self) -> Result { self.0.implicit() @@ -198,50 +296,9 @@ where info: &DispatchInfoOf, len: usize, ) -> Result { - let digest = frame_system::Pallet::::digest(); - - let is_first_block_on_core = - CumulusDigestItem::find_bundle_info(&digest).map_or(false, |bi| bi.index == 0); - - let mode = if frame_system::Pallet::::inherents_applied() { - let extrinsic_index = frame_system::Pallet::::extrinsic_index().unwrap_or_default(); - - crate::BlockWeightMode::::mutate(|mode| { - let current_mode = *mode.get_or_insert_with(|| BlockWeightMode::FractionOfCore { - first_transaction_index: extrinsic_index, - }); - - let mut new_mode = current_mode; - - match current_mode { - // We are already allowing the full core, not that much more to do here. - BlockWeightMode::FullCore => {}, - BlockWeightMode::FractionOfCore { first_transaction_index } => { - if info - .total_weight() - // The extrinsic lengths counts towards the POV size - .saturating_add(Weight::from_parts(0, len as u64)) - .any_gt(MaxParachainBlockWeight::target_block_weight::( - TargetBlockRate::get(), - )) && is_first_block_on_core - { - if extrinsic_index.saturating_sub(first_transaction_index) < 10 { - new_mode = BlockWeightMode::FullCore; - *mode = Some(new_mode); - } else { - return Err(InvalidTransaction::ExhaustsResources) - } - } - }, - }; - - Ok(Some((current_mode, new_mode))) - })? - } else { - None - }; + Self::pre_dispatch_extrinsic(info, len)?; - self.0.prepare(val, origin, call, info, len).map(|r| (mode, r)) + self.0.prepare(val, origin, call, info, len) } fn post_dispatch( @@ -251,33 +308,9 @@ where len: usize, result: &DispatchResult, ) -> Result<(), TransactionValidityError> { - let (mode, pre) = pre; S::post_dispatch(pre, info, post_info, len, result)?; - let Some(mode) = mode else { return Ok(()) }; - - match mode { - // If the previous one was already `FullCore`, we don't need to change anything. - (BlockWeightMode::FullCore, _) => {}, - // If the previous one was a fraction and we gave the transaction a `FullCore` we need - // to check if it used it. - (prev @ BlockWeightMode::FractionOfCore { .. }, BlockWeightMode::FullCore) => - if post_info - .calc_actual_weight(info) - // The extrinsic lengths counts towards the POV size - .saturating_add(Weight::from_parts(0, len as u64)) - .all_lt(MaxParachainBlockWeight::target_block_weight::( - TargetBlockRate::get(), - )) { - crate::BlockWeightMode::::put(prev); - } else { - // Inform the node that this block uses the entire core alone. - frame_system::Pallet::::deposit_log( - CumulusDigestItem::UseFullCore.to_digest_item(), - ); - }, - (BlockWeightMode::FractionOfCore { .. }, BlockWeightMode::FractionOfCore { .. }) => (), - } + Self::post_dispatch_extrinsic(info, post_info, len); Ok(()) } @@ -295,7 +328,11 @@ where info: &DispatchInfoOf, len: usize, ) -> Result<(), TransactionValidityError> { - S::bare_validate_and_prepare(call, info, len) + S::bare_validate_and_prepare(call, info, len)?; + + Self::pre_dispatch_extrinsic(info, len)?; + + Ok(()) } fn bare_post_dispatch( @@ -304,7 +341,11 @@ where len: usize, result: &DispatchResult, ) -> Result<(), TransactionValidityError> { - S::bare_post_dispatch(info, post_info, len, result) + S::bare_post_dispatch(info, post_info, len, result)?; + + Self::post_dispatch_extrinsic(info, post_info, len); + + Ok(()) } } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs index d640a1651eba4..3923d1dfdf96f 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs @@ -18,4 +18,4 @@ mod basic; mod runtime_upgrade; mod three_cores_glutton; -mod utility_weight; \ No newline at end of file +mod utility_weight; From d3f0113c9cd620bbed3737e4c28ab0bf9fb89646 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 1 Sep 2025 21:23:56 +0200 Subject: [PATCH 100/312] Fixes --- .../src/max_parachain_block_weight.rs | 18 ++++++++++++++++++ .../src/validate_block/tests.rs | 9 +++------ cumulus/test/client/src/lib.rs | 2 -- cumulus/test/service/src/chain_spec.rs | 8 -------- cumulus/test/service/src/lib.rs | 1 - substrate/frame/support/src/traits/messages.rs | 10 ++++++++++ 6 files changed, 31 insertions(+), 17 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs index 74145d8035dc4..039fc3caf6d0a 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs @@ -352,6 +352,7 @@ where #[cfg(test)] mod tests { use super::*; + use crate as parachain_system; use codec::Compact; use cumulus_primitives_core::{ClaimQueueOffset, CoreInfo, CoreSelector}; use frame_support::{construct_runtime, derive_impl}; @@ -367,11 +368,28 @@ mod tests { type AccountId = u64; type AccountData = (); type Lookup = IdentityLookup; + type OnSetCode = crate::ParachainSetCode; + } + + impl crate::Config for Test { + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = (); + type OutboundXcmpMessageSource = (); + type DmpQueue = (); + type ReservedDmpWeight = (); + type XcmpMessageHandler = (); + type ReservedXcmpWeight = (); + type CheckAssociatedRelayNumber = crate::RelayNumberStrictlyIncreases; + type WeightInfo = (); + type ConsensusHook = crate::ExpectParentIncluded; + type RelayParentOffset = (); } construct_runtime!( pub enum Test { System: frame_system, + ParachainSystem: parachain_system, } ); diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 3232cd12a4d30..560179e7797fa 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -29,8 +29,6 @@ use cumulus_test_client::{ }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use polkadot_parachain_primitives::primitives::ValidationResult; -#[cfg(feature = "experimental-ump-signals")] -use relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sp_api::{ApiExt, Core, ProofRecorder, ProvideRuntimeApi, StorageProof}; use sp_consensus_babe::SlotDuration; @@ -102,14 +100,13 @@ fn create_test_client() -> (Client, Header) { } /// Create test client using the runtime with `elastic-scaling` feature enabled. -fn create_elastic_scaling_test_client(blocks_per_pov: u32) -> (Client, Header) { +fn create_elastic_scaling_test_client() -> (Client, Header) { let mut builder = TestClientBuilder::new(); builder.genesis_init_mut().wasm = Some( test_runtime::elastic_scaling_500ms::WASM_BINARY .expect("You need to build the WASM binaries to run the tests!") .to_vec(), ); - builder.genesis_init_mut().blocks_per_pov = Some(blocks_per_pov); let client = builder.enable_import_proof_recording().build(); let genesis_header = client @@ -218,7 +215,7 @@ fn build_multiple_blocks_with_witness( let mut api = client.runtime_api(); let proof_recorder = ProofRecorder::::with_ignored_nodes(ignored_nodes.clone()); - api.set_proof_recorder(proof_recorder.clone()); + api.record_proof_with_recorder(proof_recorder.clone()); api.register_extension(ProofSizeExt::new(proof_recorder)); api.execute_block(parent_hash, built_block.block.clone()).unwrap(); @@ -607,7 +604,7 @@ fn validate_block_handles_ump_signal() { sp_tracing::try_init_simple(); - let (client, parent_head) = create_elastic_scaling_test_client(1); + let (client, parent_head) = create_elastic_scaling_test_client(); let extra_extrinsics = vec![transfer(&client, Alice, Bob, 69), transfer(&client, Bob, Charlie, 100)]; diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index dfaa9ffebd969..ae22a1631defa 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -77,7 +77,6 @@ pub type Client = client::Client; pub struct GenesisParameters { pub endowed_accounts: Vec, pub wasm: Option>, - pub blocks_per_pov: Option, } impl substrate_test_client::GenesisInit for GenesisParameters { @@ -88,7 +87,6 @@ impl substrate_test_client::GenesisInit for GenesisParameters { self.wasm.as_deref().unwrap_or_else(|| { cumulus_test_runtime::WASM_BINARY.expect("WASM binary not compiled!") }), - self.blocks_per_pov, ) .build_storage() .expect("Builds test runtime genesis storage") diff --git a/cumulus/test/service/src/chain_spec.rs b/cumulus/test/service/src/chain_spec.rs index 2dc839ca89776..5a10b3d2ccad5 100644 --- a/cumulus/test/service/src/chain_spec.rs +++ b/cumulus/test/service/src/chain_spec.rs @@ -30,7 +30,6 @@ pub fn get_chain_spec_with_extra_endowed( id: Option, extra_endowed_accounts: Vec, code: &[u8], - blocks_per_pov: Option, ) -> GenericChainSpec { let runtime_caller = GenesisConfigBuilderRuntimeCaller::::new(code); let mut development_preset = runtime_caller @@ -84,7 +83,6 @@ pub fn get_chain_spec(id: Option) -> GenericChainSpec { id, Default::default(), cumulus_test_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - None, ) } @@ -95,7 +93,6 @@ pub fn get_elastic_scaling_chain_spec(id: Option) -> GenericChainSpec { Default::default(), cumulus_test_runtime::elastic_scaling::WASM_BINARY .expect("WASM binary was not built, please build it!"), - None, ) } @@ -105,7 +102,6 @@ pub fn get_relay_parent_offset_chain_spec(id: Option) -> GenericChainSpe Default::default(), cumulus_test_runtime::relay_parent_offset::WASM_BINARY .expect("WASM binary was not built, please build it!"), - None, ) } @@ -116,7 +112,6 @@ pub fn get_elastic_scaling_500ms_chain_spec(id: Option) -> GenericChainS Default::default(), cumulus_test_runtime::elastic_scaling_500ms::WASM_BINARY .expect("WASM binary was not built, please build it!"), - None, ) } @@ -127,7 +122,6 @@ pub fn get_elastic_scaling_mvp_chain_spec(id: Option) -> GenericChainSpe Default::default(), cumulus_test_runtime::elastic_scaling_mvp::WASM_BINARY .expect("WASM binary was not built, please build it!"), - None, ) } @@ -137,7 +131,6 @@ pub fn get_pov_bundling_chain_spec(id: Option) -> GenericChainSpec { Default::default(), cumulus_test_runtime::pov_bundling::WASM_BINARY .expect("WASM binary was not built, please build it!"), - None, ) } @@ -147,6 +140,5 @@ pub fn get_sync_backing_chain_spec(id: Option) -> GenericChainSpec { Default::default(), cumulus_test_runtime::sync_backing::WASM_BINARY .expect("WASM binary was not built, please build it!"), - None, ) } diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 267d9e1f64199..65eec9de06ced 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -837,7 +837,6 @@ pub fn node_config( Some(para_id), endowed_accounts, cumulus_test_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - None, )); let mut storage = spec.as_storage_builder().build_storage().expect("could not build storage"); diff --git a/substrate/frame/support/src/traits/messages.rs b/substrate/frame/support/src/traits/messages.rs index 0a5c70f8f0fa5..eefe47ff53a61 100644 --- a/substrate/frame/support/src/traits/messages.rs +++ b/substrate/frame/support/src/traits/messages.rs @@ -356,6 +356,16 @@ pub trait HandleMessage { fn sweep_queue(); } +impl HandleMessage for () { + type MaxMessageLen = ConstU32<0>; + + fn handle_message(_: BoundedSlice) {} + + fn handle_messages<'a>(_: impl Iterator>) {} + + fn sweep_queue() {} +} + /// Adapter type to transform an [`EnqueueMessage`] with an origin into a [`HandleMessage`] impl. pub struct EnqueueWithOrigin(PhantomData<(E, O)>); impl, O: TypedGet> HandleMessage for EnqueueWithOrigin From 7ce112d2369e0c4a9ec9f6c9c64fbc8ed7252018 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 6 Sep 2025 10:59:12 +0200 Subject: [PATCH 101/312] Adds new test --- Cargo.lock | 1 + cumulus/test/runtime/Cargo.toml | 2 + cumulus/test/runtime/src/test_pallet.rs | 32 +++++++++- .../zombienet-sdk-helpers/src/lib.rs | 64 +++++++++++-------- .../zombie_ci/pov_bundling/runtime_upgrade.rs | 4 +- .../zombie_ci/pov_bundling/utility_weight.rs | 28 ++++++-- substrate/frame/system/src/lib.rs | 9 ++- 7 files changed, 106 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f4715aa1c978d..fb10ce3101006 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5109,6 +5109,7 @@ dependencies = [ "sp-version", "staging-parachain-info", "substrate-wasm-builder", + "tracing", ] [[package]] diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index 38a38ef09657e..9bcc4ae01484b 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -12,6 +12,7 @@ workspace = true codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } serde_json = { workspace = true } +tracing = { workspace = true } # Substrate frame-executive = { workspace = true } @@ -93,6 +94,7 @@ std = [ "sp-transaction-pool/std", "sp-version/std", "substrate-wasm-builder", + "tracing/std" ] increment-spec-version = [] # A runtime which expects to build behind the relay chain tip. diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index a972198c300d9..076538bc4992f 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -25,7 +25,7 @@ pub const TEST_RUNTIME_UPGRADE_KEY: &[u8] = b"+test_runtime_upgrade_key+"; pub mod pallet { use crate::test_pallet::TEST_RUNTIME_UPGRADE_KEY; use alloc::vec; - use frame_support::pallet_prelude::*; + use frame_support::{pallet_prelude::*, weights::constants::WEIGHT_REF_TIME_PER_SECOND}; use frame_system::pallet_prelude::*; #[pallet::pallet] @@ -38,8 +38,29 @@ pub mod pallet { #[pallet::storage] pub type TestMap = StorageMap<_, Twox64Concat, u32, (), ValueQuery>; + /// Flag to indicate if a 1s weight should be registered in the next `on_initialize`. + #[pallet::storage] + pub type ScheduleWeightRegistration = StorageValue<_, bool, ValueQuery>; + #[pallet::hooks] - impl Hooks> for Pallet {} + impl Hooks> for Pallet { + fn on_initialize(_n: BlockNumberFor) -> Weight { + if ScheduleWeightRegistration::::get() { + let weight_to_register = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 0); + + let left_weight = frame_system::Pallet::::block_weight_left(); + + if left_weight.can_consume(weight_to_register) { + tracing::info!("Consuming 1s of weight :)"); + // We have enough capacity, consume the flag and register the weight + ScheduleWeightRegistration::::kill(); + return weight_to_register + } + } + + Weight::zero() + } + } #[pallet::call] impl Pallet { @@ -105,6 +126,13 @@ pub mod pallet { TestMap::::remove(key); Ok(()) } + + /// Schedule a 1 second weight registration in the next `on_initialize`. + #[pallet::weight(0)] + pub fn schedule_weight_registration(_: OriginFor) -> DispatchResult { + ScheduleWeightRegistration::::set(true); + Ok(()) + } } #[derive(frame_support::DefaultNoBound)] diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 398408230b7e1..4db085f844e2c 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -7,7 +7,7 @@ use cumulus_primitives_core::{CoreInfo, CumulusDigestItem, RelayBlockIdentifier} use futures::{pin_mut, select, stream::StreamExt, TryStreamExt}; use polkadot_primitives::{vstaging::CandidateReceiptV2, BlakeTwo256, HashT, Id as ParaId}; use sp_runtime::traits::Zero; -use std::{cmp::max, collections::HashMap, ops::Range}; +use std::{cmp::max, collections::HashMap, ops::Range, sync::Arc}; use tokio::{ join, time::{sleep, Duration}, @@ -25,6 +25,15 @@ use zombienet_sdk::subxt::{ Config, OnlineClient, PolkadotConfig, }; +/// Specifies which block should occupy a full core. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum BlockToCheck { + /// The exact block hash provided should occupy a full core. + Exact(H256), + /// The parent of the block that should occupy a full core. + Parent(H256), +} + // Maximum number of blocks to wait for a session change. // If it does not arrive for whatever reason, we should not wait forever. const WAIT_MAX_BLOCKS_FOR_SESSION: u32 = 50; @@ -690,42 +699,27 @@ pub async fn assert_para_is_registered( Err(anyhow!("No more blocks to check")) } -/// Checks if the given `block_hash` is the only block in a core. -/// -/// Assumes that the given block +/// Checks if the specified block occupies a full core. pub async fn ensure_is_only_block_in_core( para_client: &OnlineClient, - block_hash: H256, + block_to_check: BlockToCheck, ) -> Result<(), anyhow::Error> { let blocks = para_client.blocks(); - let block = blocks.at(block_hash).await?; - let core_info = find_core_info(&block)?; - - let parent = para_client.blocks().at(block.header().parent_hash).await?; - - // Genesis is for sure on a different core :) - if parent.number() != 0 { - let parent_core_info = find_core_info(&parent)?; - if core_info == parent_core_info { - return Err(anyhow::anyhow!( - "Not first block in core, found in block {}", - block.number() - )) - } - } + let (block_hash, is_parent) = match block_to_check { + BlockToCheck::Exact(block_hash) => (block_hash, false), + BlockToCheck::Parent(block_hash) => (block_hash, true), + }; - let chain_of_blocks = loop { + let mut chain_of_blocks = loop { // Start with the latest best block. - let mut current_block = - std::sync::Arc::new(blocks.subscribe_best().await?.next().await.unwrap()?); + let mut current_block = Arc::new(blocks.subscribe_best().await?.next().await.unwrap()?); let mut chain_of_blocks = vec![]; while current_block.hash() != block_hash { chain_of_blocks.push(current_block.clone()); - current_block = - std::sync::Arc::new(blocks.at(current_block.header().parent_hash).await?); + current_block = Arc::new(blocks.at(current_block.header().parent_hash).await?); if current_block.number() == 0 { return Err(anyhow::anyhow!( @@ -741,6 +735,26 @@ pub async fn ensure_is_only_block_in_core( } }; + // If the input was the parent block, we have the actual block we are interested in as last + // member of `chain_of_blocks`. + let block = blocks + .at(if is_parent { chain_of_blocks.pop().unwrap().hash() } else { block_hash }) + .await?; + let core_info = find_core_info(&block)?; + let parent = blocks.at(block.header().parent_hash).await?; + + // Genesis is for sure on a different core :) + if parent.number() != 0 { + let parent_core_info = find_core_info(&parent)?; + + if core_info == parent_core_info { + return Err(anyhow::anyhow!( + "Not first block in core, found in block {}", + block.number() + )) + } + } + // The last block `CoreInfo` must be different or it shares the core with the block we are // interested in. if core_info == find_core_info(chain_of_blocks.last().unwrap())? { diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/runtime_upgrade.rs index 9e8b94e253aaa..127a0db528178 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/runtime_upgrade.rs @@ -22,7 +22,7 @@ use cumulus_zombienet_sdk_helpers::{ assert_finality_lag, assert_para_throughput, create_assign_core_call, ensure_is_only_block_in_core, find_core_info, submit_extrinsic_and_wait_for_finalization_success, - submit_unsigned_extrinsic_and_wait_for_finalization_success, + submit_unsigned_extrinsic_and_wait_for_finalization_success, BlockToCheck, }; use polkadot_primitives::Id as ParaId; use serde_json::json; @@ -123,7 +123,7 @@ async fn pov_bundling_runtime_upgrade() -> Result<(), anyhow::Error> { .await?; log::info!("Apply authorized upgrade transaction finalized in block: {:?}", block_hash); - ensure_is_only_block_in_core(¶_client, block_hash).await?; + ensure_is_only_block_in_core(¶_client, BlockToCheck::Exact(block_hash)).await?; //TODO: Verify that the runtime upgrade block is also using a full core. diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs index 21705578c9f0b..8ebb951d19d64 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs @@ -20,7 +20,7 @@ use cumulus_primitives_core::relay_chain::MAX_POV_SIZE; use cumulus_zombienet_sdk_helpers::{ assert_finality_lag, assert_para_throughput, create_assign_core_call, ensure_is_only_block_in_core, find_core_info, - submit_extrinsic_and_wait_for_finalization_success, + submit_extrinsic_and_wait_for_finalization_success, BlockToCheck, }; use frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND; use polkadot_primitives::Id as ParaId; @@ -85,7 +85,7 @@ async fn pov_bundling_utility_weight() -> Result<(), anyhow::Error> { .await?; log::info!("First transaction finalized"); - ensure_is_only_block_in_core(¶_client, block_hash).await?; + ensure_is_only_block_in_core(¶_client, BlockToCheck::Exact(block_hash)).await?; // Create a transaction that uses more than the allowed POV size per block. let pov_size = MAX_POV_SIZE / 4 + 512 * 1024; @@ -98,7 +98,18 @@ async fn pov_bundling_utility_weight() -> Result<(), anyhow::Error> { .await?; log::info!("Second transaction finalized"); - ensure_is_only_block_in_core(¶_client, block_hash).await?; + ensure_is_only_block_in_core(¶_client, BlockToCheck::Exact(block_hash)).await?; + + let third_call = create_schedule_weight_registration_call(); + let sudo_third_call = create_sudo_call(third_call); + + log::info!("Sending third transaction to schedule weight registration"); + let block_hash = + submit_extrinsic_and_wait_for_finalization_success(¶_client, &sudo_third_call, &alice) + .await?; + log::info!("Third transaction finalized, weight registration scheduled for next block"); + + ensure_is_only_block_in_core(¶_client, BlockToCheck::Parent(block_hash)).await?; Ok(()) } @@ -129,6 +140,15 @@ fn create_sudo_call(inner_call: DynamicPayload) -> DynamicPayload { zombienet_sdk::subxt::tx::dynamic("Sudo", "sudo", vec![inner_call.into_value()]) } +/// Creates a `test-pallet` `schedule_weight_registration` call +fn create_schedule_weight_registration_call() -> DynamicPayload { + zombienet_sdk::subxt::tx::dynamic( + "TestPallet", + "schedule_weight_registration", + vec![] as Vec, + ) +} + async fn build_network_config() -> Result { let images = zombienet_sdk::environment::get_images_from_env(); log::info!("Using images: {images:?}"); @@ -163,7 +183,7 @@ async fn build_network_config() -> Result { .with_default_args(vec![ ("--authoring").into(), ("slot-based").into(), - ("-lparachain=debug,aura=trace").into(), + ("-lparachain=debug,aura=trace,runtime=trace").into(), ]) .with_collator(|n| n.with_name("collator-0")) .with_collator(|n| n.with_name("collator-1")) diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 27cb0d1c08947..0be77f8058f49 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -148,7 +148,7 @@ use sp_runtime::{ traits::{DispatchInfoOf, PostDispatchInfoOf}, transaction_validity::TransactionValidityError, }; -use sp_weights::{RuntimeDbWeight, Weight}; +use sp_weights::{RuntimeDbWeight, Weight, WeightMeter}; #[cfg(any(feature = "std", test))] use sp_io::TestExternalities; @@ -2387,6 +2387,13 @@ impl Pallet { Ok(()) } + + /// Returns the weight left for the block. + pub fn block_weight_left() -> WeightMeter { + let left_weight = + T::BlockWeights::get().max_block.saturating_sub(BlockWeight::::get().total()); + WeightMeter::with_limit(left_weight) + } } /// Returns a 32 byte datum which is guaranteed to be universally unique. `entropy` is provided From 7dde4a91a90d83428c91b451a6b0eb391c0ae7c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 6 Sep 2025 10:59:52 +0200 Subject: [PATCH 102/312] Start fixing the test --- cumulus/pallets/parachain-system/src/lib.rs | 2 +- .../src/max_parachain_block_weight.rs | 93 ++++++++++++++++--- 2 files changed, 83 insertions(+), 12 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 1c29adeac9d9a..5b7ee2a280f1d 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -114,7 +114,7 @@ pub use unincluded_segment::{Ancestor, UsedBandwidth}; pub use pallet::*; -const LOG_TARGET: &str = "parachain-system"; +const LOG_TARGET: &str = "runtime::parachain-system"; /// Something that can check the associated relay block number. /// diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs index 039fc3caf6d0a..db80bc541951a 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs @@ -25,6 +25,7 @@ use frame_support::{ pallet_prelude::{ InvalidTransaction, TransactionSource, TransactionValidityError, ValidTransaction, }, + traits::PreInherents, weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, }; use polkadot_primitives::MAX_POV_SIZE; @@ -32,9 +33,11 @@ use scale_info::TypeInfo; use sp_core::Get; use sp_runtime::{ traits::{DispatchInfoOf, Dispatchable, Implication, PostDispatchInfoOf, TransactionExtension}, - DispatchResult, + Digest, DispatchResult, }; +const LOG_TARGET: &str = "runtime::parachain-system::block-weight"; + #[derive(Debug, Encode, Decode, Clone, Copy, TypeInfo)] pub enum BlockWeightMode { FullCore, @@ -67,6 +70,10 @@ impl MaxParachainBlockWeight { /// Returns the calculated maximum weight, or a conservative default if no core info is found /// or if an error occurs during calculation. pub fn get(target_blocks: u32) -> Weight { + let digest = frame_system::Pallet::::digest(); + let target_block_weight = + Self::target_block_weight_with_digest::(target_blocks, &digest); + // If we are in `on_initialize` or at applying the inherents, we should // allow the full core weight. if !frame_system::Pallet::::inherents_applied() { @@ -90,7 +97,10 @@ impl MaxParachainBlockWeight { fn target_block_weight(target_blocks: u32) -> Weight { let digest = frame_system::Pallet::::digest(); + Self::target_block_weight_with_digest::(target_blocks, &digest) + } + fn target_block_weight_with_digest(target_blocks: u32, digest: &Digest) -> Weight { let Some(core_info) = CumulusDigestItem::find_core_info(&digest) else { return Self::FULL_CORE_WEIGHT; }; @@ -115,6 +125,55 @@ impl MaxParachainBlockWeight { } } +/// Is this the first block in a core? +fn is_first_block_in_core() -> bool { + let digest = frame_system::Pallet::::digest(); + CumulusDigestItem::find_bundle_info(&digest).map_or(false, |bi| bi.index == 0) +} + +/// Is the `BlockWeight` already above the target block weight? +fn block_weight_over_target_block_weight>() -> bool { + let target_block_weight = + MaxParachainBlockWeight::target_block_weight::(TargetBlockRate::get()); + + frame_system::Pallet::::block_weight_left() + .consumed() + .any_gt(target_block_weight) +} + +pub struct DynamicMaxBlockWeightPreInherent( + core::marker::PhantomData<(T, TargetBlockRate)>, +); + +impl PreInherents for DynamicMaxBlockWeightPreInherent +where + T: Config, + TargetBlockRate: Get, +{ + fn pre_inherents() { + if block_weight_over_target_block_weight::() { + let is_first_block_in_core = is_first_block_in_core::(); + + if !is_first_block_in_core { + log::error!( + target: LOG_TARGET, + "Inherent block logic took longer than the target block weight, THIS IS A BUG!!!", + ); + } else { + log::debug!( + target: LOG_TARGET, + "Inherent block logic took longer than the target block weight, going to use the full core", + ); + } + + crate::BlockWeightMode::::put(BlockWeightMode::FullCore); + + // Inform the node that this block uses the full core. + frame_system::Pallet::::deposit_log(CumulusDigestItem::UseFullCore.to_digest_item()); + } + } +} + #[derive(Encode, Decode, DecodeWithMemTracking, TypeInfo)] #[derive_where::derive_where(Clone, Eq, PartialEq, Default; S)] #[scale_info(skip_type_params(T, TargetBlockRate))] @@ -139,11 +198,6 @@ where info: &DispatchInfo, len: usize, ) -> Result<(), TransactionValidityError> { - let digest = frame_system::Pallet::::digest(); - - let is_first_block_on_core = - CumulusDigestItem::find_bundle_info(&digest).map_or(false, |bi| bi.index == 0); - if frame_system::Pallet::::inherents_applied() { let extrinsic_index = frame_system::Pallet::::extrinsic_index().unwrap_or_default(); @@ -157,20 +211,37 @@ where BlockWeightMode::FullCore => {}, BlockWeightMode::PotentialFullCore { first_transaction_index } | BlockWeightMode::FractionOfCore { first_transaction_index } => { - let potential = + let is_potential = matches!(current_mode, BlockWeightMode::PotentialFullCore { .. }); debug_assert!( - !potential, + !is_potential, "`PotentialFullCore` should resolve to `FullCore` or `FractionOfCore` after applying a transaction.", ); - if info + let block_weight_over_limit = first_transaction_index == extrinsic_index + && block_weight_over_target_block_weight::(); + + // Protection against a misconfiguration as this should be detected by the pre-inherent hook. + if block_weight_over_limit { + *mode = Some(BlockWeightMode::FullCore); + + // Inform the node that this block uses the full core. + frame_system::Pallet::::deposit_log( + CumulusDigestItem::UseFullCore.to_digest_item(), + ); + + log::error!( + target: LOG_TARGET, + "Inherent block logic took longer than the target block weight, \ + `DynamicMaxBlockWeightPreInherent` not registered as `PreInherents` hook!", + ); + } else if info .total_weight() // The extrinsic lengths counts towards the POV size .saturating_add(Weight::from_parts(0, len as u64)) .any_gt(MaxParachainBlockWeight::target_block_weight::( TargetBlockRate::get(), - )) && is_first_block_on_core + )) && is_first_block_in_core::() { if extrinsic_index.saturating_sub(first_transaction_index) < 10 { *mode = Some(BlockWeightMode::PotentialFullCore { @@ -179,7 +250,7 @@ where } else { return Err(InvalidTransaction::ExhaustsResources) } - } else if potential { + } else if is_potential { *mode = Some(BlockWeightMode::FractionOfCore { first_transaction_index }); } From 5741aa8865f05d13462ddcb3816f220f3fb5e5a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 8 Sep 2025 23:08:35 +0200 Subject: [PATCH 103/312] Fixes --- .../src/max_parachain_block_weight.rs | 26 ++++++++++++------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs index db80bc541951a..231297e1b0100 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs @@ -73,26 +73,29 @@ impl MaxParachainBlockWeight { let digest = frame_system::Pallet::::digest(); let target_block_weight = Self::target_block_weight_with_digest::(target_blocks, &digest); + let max_block_weight = if is_first_block_in_core_with_digest(&digest) { + Self::FULL_CORE_WEIGHT + } else { + target_block_weight + }; - // If we are in `on_initialize` or at applying the inherents, we should - // allow the full core weight. + // If we are in `on_initialize` or at applying the inherents, we allow the maximum block + // weight as allowed by the current context. if !frame_system::Pallet::::inherents_applied() { - return Self::FULL_CORE_WEIGHT + return max_block_weight } match crate::BlockWeightMode::::get() { // We allow the full core. Some(BlockWeightMode::FullCore | BlockWeightMode::PotentialFullCore { .. }) => - return Self::FULL_CORE_WEIGHT, + Self::FULL_CORE_WEIGHT, // Let's calculate below how much weight we can use. - Some(BlockWeightMode::FractionOfCore { .. }) => (), + Some(BlockWeightMode::FractionOfCore { .. }) => target_block_weight, // Either the runtime is not using the `DynamicMaxBlockWeight` extension or there is // some bug. Because after the inherents are applied, this value should be set by the // extension. To be on the safe side, we allow the full core weight. - None => return Self::FULL_CORE_WEIGHT, + None => Self::FULL_CORE_WEIGHT, } - - Self::target_block_weight::(target_blocks) } fn target_block_weight(target_blocks: u32) -> Weight { @@ -128,7 +131,12 @@ impl MaxParachainBlockWeight { /// Is this the first block in a core? fn is_first_block_in_core() -> bool { let digest = frame_system::Pallet::::digest(); - CumulusDigestItem::find_bundle_info(&digest).map_or(false, |bi| bi.index == 0) + is_first_block_in_core_with_digest(&digest) +} + +/// Is this the first block in a core? (takes digest as parameter) +fn is_first_block_in_core_with_digest(digest: &Digest) -> bool { + CumulusDigestItem::find_bundle_info(digest).map_or(false, |bi| bi.index == 0) } /// Is the `BlockWeight` already above the target block weight? From 24b372504b7f691438de7ab78f9c6792664f7bf6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 8 Sep 2025 23:08:44 +0200 Subject: [PATCH 104/312] Try some things which are not yet perfect --- .../zombienet-sdk-helpers/src/lib.rs | 133 ++++++++++-------- .../zombie_ci/pov_bundling/utility_weight.rs | 2 +- 2 files changed, 78 insertions(+), 57 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 4db085f844e2c..c6441b2e0f274 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -3,11 +3,11 @@ use anyhow::anyhow; use codec::{Decode, Encode}; -use cumulus_primitives_core::{CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; +use cumulus_primitives_core::{BundleInfo, CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; use futures::{pin_mut, select, stream::StreamExt, TryStreamExt}; use polkadot_primitives::{vstaging::CandidateReceiptV2, BlakeTwo256, HashT, Id as ParaId}; use sp_runtime::traits::Zero; -use std::{cmp::max, collections::HashMap, ops::Range, sync::Arc}; +use std::{cmp::max, collections::HashMap, ops::Range}; use tokio::{ join, time::{sleep, Duration}, @@ -30,8 +30,8 @@ use zombienet_sdk::subxt::{ pub enum BlockToCheck { /// The exact block hash provided should occupy a full core. Exact(H256), - /// The parent of the block that should occupy a full core. - Parent(H256), + /// Wait for the next first bundle block. + NextFirstBundleBlock(H256), } // Maximum number of blocks to wait for a session change. @@ -699,6 +699,38 @@ pub async fn assert_para_is_registered( Err(anyhow!("No more blocks to check")) } +/// Returns [`BundleInfo`] for the given parachain block. +fn find_bundle_info( + block: &Block>, +) -> Result { + let substrate_digest = + sp_runtime::generic::Digest::decode(&mut &block.header().digest.encode()[..]) + .expect("`subxt::Digest` and `substrate::Digest` should encode and decode; qed"); + + CumulusDigestItem::find_bundle_info(&substrate_digest) + .ok_or_else(|| anyhow!("Failed to find `BundleInfo` digest")) +} + +/// Validates that the given block is the first block on its core (bundle index == 0). +async fn validate_block_is_only_on_core( + para_client: &OnlineClient, + block_hash: H256, +) -> Result<(), anyhow::Error> { + let blocks = para_client.blocks(); + let block = blocks.at(block_hash).await?; + + // Check if this block is the first block in the bundle (index == 0) + let bundle_info = find_bundle_info(&block)?; + if bundle_info.index != 0 { + return Err(anyhow::anyhow!( + "Not first block in core, found block with bundle index {}", + bundle_info.index + )); + } + + Ok(()) +} + /// Checks if the specified block occupies a full core. pub async fn ensure_is_only_block_in_core( para_client: &OnlineClient, @@ -706,60 +738,49 @@ pub async fn ensure_is_only_block_in_core( ) -> Result<(), anyhow::Error> { let blocks = para_client.blocks(); - let (block_hash, is_parent) = match block_to_check { - BlockToCheck::Exact(block_hash) => (block_hash, false), - BlockToCheck::Parent(block_hash) => (block_hash, true), - }; + match block_to_check { + BlockToCheck::Exact(block_hash) => + validate_block_is_only_on_core(para_client, block_hash).await, + BlockToCheck::NextFirstBundleBlock(start_block_hash) => { + // Find the first block after start_block_hash that has bundle index 0 + let mut current_block = blocks.at(start_block_hash).await?; - let mut chain_of_blocks = loop { - // Start with the latest best block. - let mut current_block = Arc::new(blocks.subscribe_best().await?.next().await.unwrap()?); - - let mut chain_of_blocks = vec![]; - - while current_block.hash() != block_hash { - chain_of_blocks.push(current_block.clone()); - current_block = Arc::new(blocks.at(current_block.header().parent_hash).await?); + loop { + // Get the next block by subscribing to best blocks and finding blocks after + // current_block + let mut best_block_stream = blocks.subscribe_best().await?; + + // Find a block that comes after our current block + let mut next_block = None; + while let Some(block) = best_block_stream.next().await.transpose()? { + if block.number() > current_block.number() { + // Walk back from this block to find the direct child of current_block + let mut candidate = block; + while candidate.number() > current_block.number() + 1 { + candidate = blocks.at(candidate.header().parent_hash).await?; + } + if candidate.header().parent_hash == current_block.hash() { + next_block = Some(candidate); + break; + } + } + } - if current_block.number() == 0 { - return Err(anyhow::anyhow!( - "Did not found block while going backwards from the best block" - )) + if let Some(next) = next_block { + // Check if this block has bundle index 0 (first block on core) + if let Ok(bundle_info) = find_bundle_info(&next) { + if bundle_info.index == 0 { + // Found the first bundle block, now validate it using the common logic + return validate_block_is_only_on_core(para_client, next.hash()).await; + } + } + + current_block = next; + } else { + // Wait a bit before trying again + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + } } - } - - // It possible that the first block we got is the same as the transaction got finalized. - // So, we just retry again until we found some more blocks. - if !chain_of_blocks.is_empty() { - break chain_of_blocks - } - }; - - // If the input was the parent block, we have the actual block we are interested in as last - // member of `chain_of_blocks`. - let block = blocks - .at(if is_parent { chain_of_blocks.pop().unwrap().hash() } else { block_hash }) - .await?; - let core_info = find_core_info(&block)?; - let parent = blocks.at(block.header().parent_hash).await?; - - // Genesis is for sure on a different core :) - if parent.number() != 0 { - let parent_core_info = find_core_info(&parent)?; - - if core_info == parent_core_info { - return Err(anyhow::anyhow!( - "Not first block in core, found in block {}", - block.number() - )) - } - } - - // The last block `CoreInfo` must be different or it shares the core with the block we are - // interested in. - if core_info == find_core_info(chain_of_blocks.last().unwrap())? { - Err(anyhow::anyhow!("Found more blocks on the same core")) - } else { - Ok(()) + }, } } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs index 8ebb951d19d64..8e90d4a1e4b28 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs @@ -109,7 +109,7 @@ async fn pov_bundling_utility_weight() -> Result<(), anyhow::Error> { .await?; log::info!("Third transaction finalized, weight registration scheduled for next block"); - ensure_is_only_block_in_core(¶_client, BlockToCheck::Parent(block_hash)).await?; + ensure_is_only_block_in_core(¶_client, BlockToCheck::NextFirstBundleBlock(block_hash)).await?; Ok(()) } From 5b87420d8f9795c56dfe9c8d53ace1e4260e9713 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 9 Sep 2025 15:10:12 +0200 Subject: [PATCH 105/312] Fix function --- .../zombienet-sdk-helpers/src/lib.rs | 102 +++++++++++------- 1 file changed, 61 insertions(+), 41 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index c6441b2e0f274..1bc8d5ea603d8 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -7,7 +7,7 @@ use cumulus_primitives_core::{BundleInfo, CoreInfo, CumulusDigestItem, RelayBloc use futures::{pin_mut, select, stream::StreamExt, TryStreamExt}; use polkadot_primitives::{vstaging::CandidateReceiptV2, BlakeTwo256, HashT, Id as ParaId}; use sp_runtime::traits::Zero; -use std::{cmp::max, collections::HashMap, ops::Range}; +use std::{cmp::max, collections::HashMap, ops::Range, sync::Arc}; use tokio::{ join, time::{sleep, Duration}, @@ -712,19 +712,54 @@ fn find_bundle_info( } /// Validates that the given block is the first block on its core (bundle index == 0). -async fn validate_block_is_only_on_core( +async fn ensure_is_only_block_in_core_impl( para_client: &OnlineClient, block_hash: H256, ) -> Result<(), anyhow::Error> { let blocks = para_client.blocks(); let block = blocks.at(block_hash).await?; + let block_core_info = find_core_info(&block)?; - // Check if this block is the first block in the bundle (index == 0) - let bundle_info = find_bundle_info(&block)?; - if bundle_info.index != 0 { + let parent = blocks.at(block.header().parent_hash).await?; + let parent_core_info = find_core_info(&parent)?; + + if parent_core_info == block_core_info { + return Err(anyhow::anyhow!( + "Not first block ({}) in core, at least the parent block is on the same core.", + block.header().number + )); + } + + let next_block = loop { + // Start with the latest best block. + let mut current_block = Arc::new(blocks.subscribe_best().await?.next().await.unwrap()?); + + let mut next_block = None; + + while current_block.hash() != block_hash { + next_block = Some(current_block.clone()); + current_block = Arc::new(blocks.at(current_block.header().parent_hash).await?); + + if current_block.number() == 0 { + return Err(anyhow::anyhow!( + "Did not found block while going backwards from the best block" + )) + } + } + + // It possible that the first block we got is the same as the transaction got finalized. + // So, we just retry again until we found some more blocks. + if let Some(next_block) = next_block { + break next_block + } + }; + + let next_block_core_info = find_core_info(&next_block)?; + + if next_block_core_info == block_core_info { return Err(anyhow::anyhow!( - "Not first block in core, found block with bundle index {}", - bundle_info.index + "Not first block ({}) in core, at least the following block is on the same core.", + block.header().number )); } @@ -740,47 +775,32 @@ pub async fn ensure_is_only_block_in_core( match block_to_check { BlockToCheck::Exact(block_hash) => - validate_block_is_only_on_core(para_client, block_hash).await, + ensure_is_only_block_in_core_impl(para_client, block_hash).await, BlockToCheck::NextFirstBundleBlock(start_block_hash) => { - // Find the first block after start_block_hash that has bundle index 0 - let mut current_block = blocks.at(start_block_hash).await?; + let start_block = blocks.at(start_block_hash).await?; - loop { - // Get the next block by subscribing to best blocks and finding blocks after - // current_block - let mut best_block_stream = blocks.subscribe_best().await?; - - // Find a block that comes after our current block - let mut next_block = None; - while let Some(block) = best_block_stream.next().await.transpose()? { - if block.number() > current_block.number() { - // Walk back from this block to find the direct child of current_block - let mut candidate = block; - while candidate.number() > current_block.number() + 1 { - candidate = blocks.at(candidate.header().parent_hash).await?; - } - if candidate.header().parent_hash == current_block.hash() { - next_block = Some(candidate); - break; - } - } - } + let mut best_block_stream = blocks.subscribe_best().await?; - if let Some(next) = next_block { - // Check if this block has bundle index 0 (first block on core) - if let Ok(bundle_info) = find_bundle_info(&next) { - if bundle_info.index == 0 { - // Found the first bundle block, now validate it using the common logic - return validate_block_is_only_on_core(para_client, next.hash()).await; - } + let mut next_first_bundle_block = None; + while let Some(mut block) = best_block_stream.next().await.transpose()? { + while block.number() > start_block.number() { + if find_bundle_info(&block)?.index == 0 { + next_first_bundle_block = Some(block.hash()); } - current_block = next; - } else { - // Wait a bit before trying again - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + block = blocks.at(block.header().parent_hash).await?; + } + + if next_first_bundle_block.is_some() { + break; } } + + if let Some(block) = next_first_bundle_block { + ensure_is_only_block_in_core_impl(para_client, block).await + } else { + Err(anyhow!("Could not find the next bundle after {}", start_block.number())) + } }, } } From 894d987a2b4d58480222c5d282013e15350055f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 10 Sep 2025 13:46:19 +0200 Subject: [PATCH 106/312] Fix test --- .../src/max_parachain_block_weight.rs | 2 +- cumulus/test/runtime/src/lib.rs | 1 + cumulus/test/runtime/src/test_pallet.rs | 2 +- .../zombienet/zombienet-sdk-helpers/src/lib.rs | 16 ++++++++++------ substrate/frame/system/src/lib.rs | 11 ++++++----- substrate/primitives/weights/src/weight_meter.rs | 5 +++++ 6 files changed, 24 insertions(+), 13 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs index 231297e1b0100..3419e817d042d 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs @@ -144,7 +144,7 @@ fn block_weight_over_target_block_weight>() let target_block_weight = MaxParachainBlockWeight::target_block_weight::(TargetBlockRate::get()); - frame_system::Pallet::::block_weight_left() + frame_system::Pallet::::remaining_block_weight() .consumed() .any_gt(target_block_weight) } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 2020f5576b892..5472bcbba951a 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -262,6 +262,7 @@ impl frame_system::Config for Runtime { type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; + type PreInherents = cumulus_pallet_parachain_system::max_parachain_block_weight::DynamicMaxBlockWeightPreInherent; } impl cumulus_pallet_weight_reclaim::Config for Runtime { diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index 076538bc4992f..8afbd3f1d3b9a 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -48,7 +48,7 @@ pub mod pallet { if ScheduleWeightRegistration::::get() { let weight_to_register = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 0); - let left_weight = frame_system::Pallet::::block_weight_left(); + let left_weight = frame_system::Pallet::::remaining_block_weight(); if left_weight.can_consume(weight_to_register) { tracing::info!("Consuming 1s of weight :)"); diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 1bc8d5ea603d8..452d619a682b0 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -721,13 +721,17 @@ async fn ensure_is_only_block_in_core_impl( let block_core_info = find_core_info(&block)?; let parent = blocks.at(block.header().parent_hash).await?; - let parent_core_info = find_core_info(&parent)?; - if parent_core_info == block_core_info { - return Err(anyhow::anyhow!( - "Not first block ({}) in core, at least the parent block is on the same core.", - block.header().number - )); + // Genesis is for sure on a different core :) + if parent.number() != 0 { + let parent_core_info = find_core_info(&parent)?; + + if parent_core_info == block_core_info { + return Err(anyhow::anyhow!( + "Not first block ({}) in core, at least the parent block is on the same core.", + block.header().number + )); + } } let next_block = loop { diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 0be77f8058f49..483ef74ce3816 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -2388,11 +2388,12 @@ impl Pallet { Ok(()) } - /// Returns the weight left for the block. - pub fn block_weight_left() -> WeightMeter { - let left_weight = - T::BlockWeights::get().max_block.saturating_sub(BlockWeight::::get().total()); - WeightMeter::with_limit(left_weight) + /// Returns the remaining weight of the block. + pub fn remaining_block_weight() -> WeightMeter { + let limit = T::BlockWeights::get().max_block; + let consumed = BlockWeight::::get().total(); + + WeightMeter::with_consumed_and_limit(consumed, limit) } } diff --git a/substrate/primitives/weights/src/weight_meter.rs b/substrate/primitives/weights/src/weight_meter.rs index cfe8396ae6d67..9a8bbb3bccb52 100644 --- a/substrate/primitives/weights/src/weight_meter.rs +++ b/substrate/primitives/weights/src/weight_meter.rs @@ -50,6 +50,11 @@ pub struct WeightMeter { } impl WeightMeter { + /// Creates [`Self`] from `consumed` and `limit`. + pub fn with_consumed_and_limit(consumed: Weight, limit: Weight) -> Self { + Self { consumed, limit } + } + /// Creates [`Self`] from a limit for the maximal consumable weight. pub fn with_limit(limit: Weight) -> Self { Self { consumed: Weight::zero(), limit } From 2725c6fd789f96b1416e62d00027be5a2d24386e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 12 Sep 2025 16:52:04 +0200 Subject: [PATCH 107/312] Introduce new test for inherents --- .../src/max_parachain_block_weight.rs | 149 +++++++++--------- cumulus/test/runtime/src/lib.rs | 3 +- cumulus/test/runtime/src/test_pallet.rs | 60 ++++++- .../zombie_ci/pov_bundling/utility_weight.rs | 29 +++- 4 files changed, 163 insertions(+), 78 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs index 3419e817d042d..61e9f0ba32261 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs @@ -41,8 +41,8 @@ const LOG_TARGET: &str = "runtime::parachain-system::block-weight"; #[derive(Debug, Encode, Decode, Clone, Copy, TypeInfo)] pub enum BlockWeightMode { FullCore, - PotentialFullCore { first_transaction_index: u32 }, - FractionOfCore { first_transaction_index: u32 }, + PotentialFullCore { first_transaction_index: Option }, + FractionOfCore { first_transaction_index: Option }, } /// A utility type for calculating the maximum block weight for a parachain based on @@ -73,7 +73,8 @@ impl MaxParachainBlockWeight { let digest = frame_system::Pallet::::digest(); let target_block_weight = Self::target_block_weight_with_digest::(target_blocks, &digest); - let max_block_weight = if is_first_block_in_core_with_digest(&digest) { + + let maybe_full_core_weight = if is_first_block_in_core_with_digest(&digest) { Self::FULL_CORE_WEIGHT } else { target_block_weight @@ -82,7 +83,7 @@ impl MaxParachainBlockWeight { // If we are in `on_initialize` or at applying the inherents, we allow the maximum block // weight as allowed by the current context. if !frame_system::Pallet::::inherents_applied() { - return max_block_weight + return maybe_full_core_weight } match crate::BlockWeightMode::::get() { @@ -91,10 +92,9 @@ impl MaxParachainBlockWeight { Self::FULL_CORE_WEIGHT, // Let's calculate below how much weight we can use. Some(BlockWeightMode::FractionOfCore { .. }) => target_block_weight, - // Either the runtime is not using the `DynamicMaxBlockWeight` extension or there is - // some bug. Because after the inherents are applied, this value should be set by the - // extension. To be on the safe side, we allow the full core weight. - None => Self::FULL_CORE_WEIGHT, + // Either the runtime is not using the `DynamicMaxBlockWeight` extension or there is a + // bug. The value should be set before applying the first extrinsic. + None => maybe_full_core_weight, } } @@ -149,11 +149,9 @@ fn block_weight_over_target_block_weight>() .any_gt(target_block_weight) } -pub struct DynamicMaxBlockWeightPreInherent( - core::marker::PhantomData<(T, TargetBlockRate)>, -); +pub struct MaxBlockWeightHooks(core::marker::PhantomData<(T, TargetBlockRate)>); -impl PreInherents for DynamicMaxBlockWeightPreInherent +impl PreInherents for MaxBlockWeightHooks where T: Config, TargetBlockRate: Get, @@ -202,74 +200,75 @@ where T: Config, TargetBlockRate: Get, { - fn pre_dispatch_extrinsic( + fn pre_validate_extrinsic( info: &DispatchInfo, len: usize, ) -> Result<(), TransactionValidityError> { - if frame_system::Pallet::::inherents_applied() { - let extrinsic_index = frame_system::Pallet::::extrinsic_index().unwrap_or_default(); - - crate::BlockWeightMode::::mutate(|mode| { - let current_mode = *mode.get_or_insert_with(|| BlockWeightMode::FractionOfCore { - first_transaction_index: extrinsic_index, - }); - - match current_mode { - // We are already allowing the full core, not that much more to do here. - BlockWeightMode::FullCore => {}, - BlockWeightMode::PotentialFullCore { first_transaction_index } | - BlockWeightMode::FractionOfCore { first_transaction_index } => { - let is_potential = - matches!(current_mode, BlockWeightMode::PotentialFullCore { .. }); - debug_assert!( - !is_potential, - "`PotentialFullCore` should resolve to `FullCore` or `FractionOfCore` after applying a transaction.", + let is_not_inherent = frame_system::Pallet::::inherents_applied(); + let extrinsic_index = is_not_inherent + .then(|| frame_system::Pallet::::extrinsic_index().unwrap_or_default()); + + crate::BlockWeightMode::::mutate(|mode| { + let current_mode = *mode.get_or_insert_with(|| BlockWeightMode::FractionOfCore { + first_transaction_index: extrinsic_index, + }); + + match current_mode { + // We are already allowing the full core, not that much more to do here. + BlockWeightMode::FullCore => {}, + BlockWeightMode::PotentialFullCore { first_transaction_index } | + BlockWeightMode::FractionOfCore { first_transaction_index } => { + let is_potential = + matches!(current_mode, BlockWeightMode::PotentialFullCore { .. }); + debug_assert!( + !is_potential, + "`PotentialFullCore` should resolve to `FullCore` or `FractionOfCore` after applying a transaction.", + ); + + let block_weight_over_limit = first_transaction_index == extrinsic_index + && block_weight_over_target_block_weight::(); + + // Protection against a misconfiguration as this should be detected by the pre-inherent hook. + if block_weight_over_limit { + *mode = Some(BlockWeightMode::FullCore); + + // Inform the node that this block uses the full core. + frame_system::Pallet::::deposit_log( + CumulusDigestItem::UseFullCore.to_digest_item(), ); - let block_weight_over_limit = first_transaction_index == extrinsic_index - && block_weight_over_target_block_weight::(); - - // Protection against a misconfiguration as this should be detected by the pre-inherent hook. - if block_weight_over_limit { - *mode = Some(BlockWeightMode::FullCore); - - // Inform the node that this block uses the full core. - frame_system::Pallet::::deposit_log( - CumulusDigestItem::UseFullCore.to_digest_item(), - ); - - log::error!( - target: LOG_TARGET, - "Inherent block logic took longer than the target block weight, \ - `DynamicMaxBlockWeightPreInherent` not registered as `PreInherents` hook!", - ); - } else if info - .total_weight() - // The extrinsic lengths counts towards the POV size - .saturating_add(Weight::from_parts(0, len as u64)) - .any_gt(MaxParachainBlockWeight::target_block_weight::( - TargetBlockRate::get(), - )) && is_first_block_in_core::() - { - if extrinsic_index.saturating_sub(first_transaction_index) < 10 { - *mode = Some(BlockWeightMode::PotentialFullCore { - first_transaction_index, - }); - } else { - return Err(InvalidTransaction::ExhaustsResources) - } - } else if is_potential { - *mode = - Some(BlockWeightMode::FractionOfCore { first_transaction_index }); + log::error!( + target: LOG_TARGET, + "Inherent block logic took longer than the target block weight, \ + `DynamicMaxBlockWeightPreInherent` not registered as `PreInherents` hook!", + ); + } else if info + .total_weight() + // The extrinsic lengths counts towards the POV size + .saturating_add(Weight::from_parts(0, len as u64)) + .any_gt(MaxParachainBlockWeight::target_block_weight::( + TargetBlockRate::get(), + )) && is_first_block_in_core::() + { + // TODO: make 10 configurable + if extrinsic_index.unwrap_or_default().saturating_sub(first_transaction_index.unwrap_or_default()) < 10 { + *mode = Some(BlockWeightMode::PotentialFullCore { + // While applying inherents `extrinsic_index` and `first_transaction_index` will be `None`. + // When the first transaction is applied, we want to store the index. + first_transaction_index: first_transaction_index.or(extrinsic_index), + }); + } else { + return Err(InvalidTransaction::ExhaustsResources) } - }, - }; + } else if is_potential { + *mode = + Some(BlockWeightMode::FractionOfCore { first_transaction_index }); + } + }, + }; - Ok(()) - }).map_err(Into::into) - } else { Ok(()) - } + }).map_err(Into::into) } fn post_dispatch_extrinsic(info: &DispatchInfo, post_info: &PostDispatchInfo, len: usize) { @@ -363,6 +362,8 @@ where inherited_implication: &impl Implication, source: TransactionSource, ) -> Result<(ValidTransaction, Self::Val, T::RuntimeOrigin), TransactionValidityError> { + Self::pre_validate_extrinsic(info, len)?; + self.0 .validate(origin, call, info, len, self_implicit, inherited_implication, source) } @@ -375,8 +376,6 @@ where info: &DispatchInfoOf, len: usize, ) -> Result { - Self::pre_dispatch_extrinsic(info, len)?; - self.0.prepare(val, origin, call, info, len) } @@ -409,7 +408,7 @@ where ) -> Result<(), TransactionValidityError> { S::bare_validate_and_prepare(call, info, len)?; - Self::pre_dispatch_extrinsic(info, len)?; + Self::pre_validate_extrinsic(info, len)?; Ok(()) } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 5472bcbba951a..f59a9909be966 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -262,7 +262,8 @@ impl frame_system::Config for Runtime { type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; - type PreInherents = cumulus_pallet_parachain_system::max_parachain_block_weight::DynamicMaxBlockWeightPreInherent; + type PreInherents = + cumulus_pallet_parachain_system::max_parachain_block_weight::MaxBlockWeightHooks; } impl cumulus_pallet_weight_reclaim::Config for Runtime { diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index 8afbd3f1d3b9a..e7bd1ab6baf9b 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -25,9 +25,16 @@ pub const TEST_RUNTIME_UPGRADE_KEY: &[u8] = b"+test_runtime_upgrade_key+"; pub mod pallet { use crate::test_pallet::TEST_RUNTIME_UPGRADE_KEY; use alloc::vec; - use frame_support::{pallet_prelude::*, weights::constants::WEIGHT_REF_TIME_PER_SECOND}; + use frame_support::{ + inherent::{InherentData, InherentIdentifier, ProvideInherent}, + pallet_prelude::*, + weights::constants::WEIGHT_REF_TIME_PER_SECOND, + }; use frame_system::pallet_prelude::*; + /// The inherent identifier for weight consumption. + pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"consume0"; + #[pallet::pallet] pub struct Pallet(_); @@ -42,6 +49,10 @@ pub mod pallet { #[pallet::storage] pub type ScheduleWeightRegistration = StorageValue<_, bool, ValueQuery>; + /// Weight to be consumed by the inherent call. + #[pallet::storage] + pub type InherentWeightConsume = StorageValue<_, Weight, OptionQuery>; + #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(_n: BlockNumberFor) -> Weight { @@ -133,6 +144,53 @@ pub mod pallet { ScheduleWeightRegistration::::set(true); Ok(()) } + + /// Set the weight to be consumed by the next inherent call. + #[pallet::weight(0)] + pub fn set_inherent_weight_consume(_: OriginFor, weight: Weight) -> DispatchResult { + InherentWeightConsume::::put(weight); + Ok(()) + } + + /// Consume weight via inherent call (clears the storage after consuming). + #[pallet::weight(( + InherentWeightConsume::::get().unwrap_or_default(), + DispatchClass::Mandatory + ))] + pub fn consume_weight_inherent(origin: OriginFor) -> DispatchResult { + ensure_none(origin)?; + + // Clear the storage item to ensure this can only be called once per inherent + InherentWeightConsume::::kill(); + + Ok(()) + } + } + + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + type Error = sp_inherents::MakeFatalError<()>; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(_data: &InherentData) -> Option { + // Check if there's weight to consume from storage + let weight_to_consume = InherentWeightConsume::::get()?; + + // Check if the weight fits in the remaining block capacity + let remaining_weight = frame_system::Pallet::::remaining_block_weight(); + + if remaining_weight.can_consume(weight_to_consume) { + Some(Call::consume_weight_inherent {}) + } else { + // Weight doesn't fit, don't create the inherent + None + } + } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::consume_weight_inherent {}) + } } #[derive(frame_support::DefaultNoBound)] diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs index 8e90d4a1e4b28..12180af3839bd 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs @@ -109,7 +109,24 @@ async fn pov_bundling_utility_weight() -> Result<(), anyhow::Error> { .await?; log::info!("Third transaction finalized, weight registration scheduled for next block"); - ensure_is_only_block_in_core(¶_client, BlockToCheck::NextFirstBundleBlock(block_hash)).await?; + ensure_is_only_block_in_core(¶_client, BlockToCheck::NextFirstBundleBlock(block_hash)) + .await?; + + let inherent_weight_call = create_set_inherent_weight_consume_call(ref_time_1s, 0); + let sudo_inherent_weight_call = create_sudo_call(inherent_weight_call); + + log::info!("Sending transaction to set inherent weight consumption (1s ref_time)"); + let block_hash = submit_extrinsic_and_wait_for_finalization_success( + ¶_client, + &sudo_inherent_weight_call, + &alice, + ) + .await?; + log::info!("Weight consumption scheduled for next inherent call"); + + // The next block should contain the consume_weight_inherent and consume the 1s ref_time + ensure_is_only_block_in_core(¶_client, BlockToCheck::NextFirstBundleBlock(block_hash)) + .await?; Ok(()) } @@ -149,6 +166,16 @@ fn create_schedule_weight_registration_call() -> DynamicPayload { ) } +/// Creates a `test-pallet` `set_inherent_weight_consume` call +fn create_set_inherent_weight_consume_call(ref_time: u64, proof_size: u64) -> DynamicPayload { + let weight = value!({ + ref_time: ref_time, + proof_size: proof_size + }); + + zombienet_sdk::subxt::tx::dynamic("TestPallet", "set_inherent_weight_consume", vec![weight]) +} + async fn build_network_config() -> Result { let images = zombienet_sdk::environment::get_images_from_env(); log::info!("Using images: {images:?}"); From 3833dd16ef3aa245bd4d8556587537364ef5f2b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 12 Sep 2025 16:54:37 +0200 Subject: [PATCH 108/312] Fix --- cumulus/test/runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index f59a9909be966..842957324dfa5 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -263,7 +263,7 @@ impl frame_system::Config for Runtime { type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; type PreInherents = - cumulus_pallet_parachain_system::max_parachain_block_weight::MaxBlockWeightHooks; + cumulus_pallet_parachain_system::max_parachain_block_weight::MaxBlockWeightHooks; } impl cumulus_pallet_weight_reclaim::Config for Runtime { From e9937f110c8b17aeea46fffff5647fbcf4a47065 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 12 Sep 2025 21:10:29 +0200 Subject: [PATCH 109/312] Renamings and other fixes --- ...lity_weight.rs => full_core_usage_scenarios.rs} | 14 +++++--------- .../tests/zombie_ci/pov_bundling/mod.rs | 2 +- 2 files changed, 6 insertions(+), 10 deletions(-) rename cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/{utility_weight.rs => full_core_usage_scenarios.rs} (92%) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/full_core_usage_scenarios.rs similarity index 92% rename from cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs rename to cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/full_core_usage_scenarios.rs index 12180af3839bd..a9021a6068e5e 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/utility_weight.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/full_core_usage_scenarios.rs @@ -43,7 +43,7 @@ const PARA_ID: u32 = 2400; /// 2. One with a PoV size bigger than what one block alone is allowed to process. /// Each transaction is sent after the other and waits for finalization. #[tokio::test(flavor = "multi_thread")] -async fn pov_bundling_utility_weight() -> Result<(), anyhow::Error> { +async fn pov_bundling_full_core_usage_scenarios() -> Result<(), anyhow::Error> { let _ = env_logger::try_init_from_env( env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), ); @@ -79,11 +79,10 @@ async fn pov_bundling_utility_weight() -> Result<(), anyhow::Error> { let first_call = create_utility_with_weight_call(ref_time_1s, 0); let sudo_first_call = create_sudo_call(first_call); - log::info!("Sending first transaction with 1s ref_time"); + log::info!("Testing scenario 1: Sending a transaction with 1s ref time weight usage"); let block_hash = submit_extrinsic_and_wait_for_finalization_success(¶_client, &sudo_first_call, &alice) .await?; - log::info!("First transaction finalized"); ensure_is_only_block_in_core(¶_client, BlockToCheck::Exact(block_hash)).await?; @@ -92,22 +91,20 @@ async fn pov_bundling_utility_weight() -> Result<(), anyhow::Error> { let second_call = create_utility_with_weight_call(0, pov_size as u64); let sudo_second_call = create_sudo_call(second_call); - log::info!("Sending second transaction with half max PoV size"); + log::info!("Testing scenario 2: Sending a transaction with ~2.5MiB storage weight usage"); let block_hash = submit_extrinsic_and_wait_for_finalization_success(¶_client, &sudo_second_call, &alice) .await?; - log::info!("Second transaction finalized"); ensure_is_only_block_in_core(¶_client, BlockToCheck::Exact(block_hash)).await?; let third_call = create_schedule_weight_registration_call(); let sudo_third_call = create_sudo_call(third_call); - log::info!("Sending third transaction to schedule weight registration"); + log::info!("Testing scenario 3: Enabling an inherent that will use 1s ref time"); let block_hash = submit_extrinsic_and_wait_for_finalization_success(¶_client, &sudo_third_call, &alice) .await?; - log::info!("Third transaction finalized, weight registration scheduled for next block"); ensure_is_only_block_in_core(¶_client, BlockToCheck::NextFirstBundleBlock(block_hash)) .await?; @@ -115,14 +112,13 @@ async fn pov_bundling_utility_weight() -> Result<(), anyhow::Error> { let inherent_weight_call = create_set_inherent_weight_consume_call(ref_time_1s, 0); let sudo_inherent_weight_call = create_sudo_call(inherent_weight_call); - log::info!("Sending transaction to set inherent weight consumption (1s ref_time)"); + log::info!("Testing scenario 4: Enabling `on_initialize` to use 1s ref time"); let block_hash = submit_extrinsic_and_wait_for_finalization_success( ¶_client, &sudo_inherent_weight_call, &alice, ) .await?; - log::info!("Weight consumption scheduled for next inherent call"); // The next block should contain the consume_weight_inherent and consume the 1s ref_time ensure_is_only_block_in_core(¶_client, BlockToCheck::NextFirstBundleBlock(block_hash)) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs index 3923d1dfdf96f..4f9fe31f04812 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs @@ -18,4 +18,4 @@ mod basic; mod runtime_upgrade; mod three_cores_glutton; -mod utility_weight; +mod full_core_usage_scenarios; From f8dbab6ee1ff1c349823c7230adda4d8552ed1fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 15 Sep 2025 14:15:58 +0200 Subject: [PATCH 110/312] Change `SlotSchedule` runtime api --- .../slot_based/block_builder_task.rs | 38 +++++++++++-------- .../lib/src/fake_runtime_api/utils.rs | 2 +- cumulus/primitives/core/src/lib.rs | 22 +++++++---- cumulus/test/runtime/src/lib.rs | 7 +++- 4 files changed, 44 insertions(+), 25 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index e35b5699db94d..d15b7395b65fd 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -333,11 +333,11 @@ where }, }; - let slot_schedule = match para_client + let block_interval = match para_client .runtime_api() .next_slot_schedule(initial_parent.hash, cores.total_cores()) { - Ok(schedule) => schedule, + Ok(interval) => interval, Err(error) => { tracing::debug!( target: crate::LOG_TARGET, @@ -345,11 +345,14 @@ where ?error, "Failed to fetch `slot_schedule`, assuming one block with 2s" ); - vec![Duration::from_secs(2)] + cumulus_primitives_core::BlockInterval { + number_of_blocks: 1, + block_time: Duration::from_secs(2), + } }, }; - let blocks_per_core = (slot_schedule.len() as u32 / cores.total_cores()).max(1); + let blocks_per_core = (block_interval.number_of_blocks / cores.total_cores()).max(1); tracing::debug!( target: crate::LOG_TARGET, @@ -360,7 +363,6 @@ where let mut pov_parent_header = initial_parent.header; let mut pov_parent_hash = initial_parent.hash; - let mut slot_schedule = slot_schedule.into_iter(); loop { let time_for_core = slot_time.time_left() / cores.cores_left(); @@ -380,7 +382,8 @@ where allowed_pov_size, cores.core_info(), cores.core_index(), - (&mut slot_schedule).take(blocks_per_core as usize), + block_interval.block_time, + blocks_per_core, time_for_core, cores.is_last_core() && slot_time.is_parachain_slot_ending(para_slot_duration.as_duration()), @@ -422,7 +425,8 @@ async fn build_collation_for_core, + block_time: Duration, + blocks_per_core: u32, slot_time_for_core: Duration, is_last_core_in_parachain_slot: bool, ) -> Result, ()> @@ -462,19 +466,23 @@ where let mut blocks = Vec::new(); let mut proofs = Vec::new(); let mut ignored_nodes = IgnoredNodes::default(); - let num_blocks = block_schedule.len(); let mut parent_hash = pov_parent_hash; let mut parent_header = pov_parent_header.clone(); - for (block_index, block_time) in block_schedule.enumerate() { + for block_index in 0..blocks_per_core { //TODO: Remove when transaction streaming is implemented // We require that the next node has imported our last block before it can start building // the next block. To ensure that the next node is able to do so, we are skipping the last // block in the parachain slot. In the future this can be removed again. - let is_last = block_index + 1 == num_blocks || - (block_index + 2 == num_blocks && num_blocks > 1 && is_last_core_in_parachain_slot); - if block_index + 1 == num_blocks && num_blocks > 1 && is_last_core_in_parachain_slot { + let is_last = block_index + 1 == blocks_per_core || + (block_index + 2 == blocks_per_core && + blocks_per_core > 1 && + is_last_core_in_parachain_slot); + if block_index + 1 == blocks_per_core && + blocks_per_core > 1 && + is_last_core_in_parachain_slot + { tracing::debug!( target: LOG_TARGET, "Skipping block production so that the next node is able to import all blocks before its slot." @@ -484,13 +492,13 @@ where let block_start = Instant::now(); let slot_time_for_block = slot_time_for_core.saturating_sub(core_start.elapsed()) / - (num_blocks - block_index) as u32; + (blocks_per_core - block_index) as u32; if slot_time_for_block <= Duration::from_millis(20) { tracing::error!( target: LOG_TARGET, slot_time_for_block_ms = %slot_time_for_block.as_millis(), - blocks_left = %(num_blocks - block_index), + blocks_left = %(blocks_per_core - block_index), ?core_index, "Less than 20ms slot time left to produce blocks, stopping block production for core", ); @@ -580,7 +588,7 @@ where .checked_sub(block_start.elapsed()) // Let's not sleep for the last block here, to send out the collation as early as // possible. - .filter(|_| block_index + 1 < num_blocks) + .filter(|_| block_index + 1 < blocks_per_core) { tokio::time::sleep(sleep).await; } diff --git a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs index 2695f2deb4285..958fe803b8e27 100644 --- a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs +++ b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs @@ -248,7 +248,7 @@ macro_rules! impl_node_runtime_apis { } impl cumulus_primitives_core::SlotSchedule<$block> for $runtime { - fn next_slot_schedule(_: u32) -> Vec { + fn next_slot_schedule(_: u32) -> cumulus_primitives_core::BlockInterval { unimplemented!() } } diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 58316beb3dbcc..4f15e9bf28661 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -503,6 +503,15 @@ pub struct CollationInfo { pub head_data: HeadData, } +/// Block interval configuration for parachain block production for one relay chain slot. +#[derive(Clone, Debug, codec::Decode, codec::Encode, PartialEq, TypeInfo)] +pub struct BlockInterval { + /// The number of blocks to produce in the relay chain slot. + pub number_of_blocks: u32, + /// The target block time in wall clock time for each block. + pub block_time: Duration, +} + sp_api::decl_runtime_apis! { /// Runtime api to collect information about a collation. /// @@ -538,18 +547,17 @@ sp_api::decl_runtime_apis! { /// API for parachain slot scheduling. /// - /// This runtime API allows the parachain runtime to communicate the number of scheduled blocks + /// This runtime API allows the parachain runtime to communicate the block interval /// to the node side. The node will call this API every relay chain slot (~6 seconds) - /// to get the scheduled parachain blocks. The block interval is calculated by dividing the - /// relay chain slot duration by the number of scheduled blocks. + /// to get the scheduled parachain block interval. pub trait SlotSchedule { /// Get the block production schedule for the next relay chain slot. /// /// - `num_cores`: The number of cores assigned to this parachain /// - /// Returns a vector of [`Duration`] values each representing the block time on standard - /// hardware in wall clock time. This should be used as the upper wall clock time when - /// building a block. - fn next_slot_schedule(num_cores: u32) -> Vec; + /// Returns a [`BlockInterval`] specifying the number of blocks and target block time + /// on standard hardware in wall clock time. This should be used as the upper wall + /// clock time when building a block. + fn next_slot_schedule(num_cores: u32) -> BlockInterval; } } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 842957324dfa5..2be3f39e2a310 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -645,10 +645,13 @@ impl_runtime_apis! { } impl cumulus_primitives_core::SlotSchedule for Runtime { - fn next_slot_schedule(num_cores: u32) -> Vec { + fn next_slot_schedule(num_cores: u32) -> cumulus_primitives_core::BlockInterval { let block_time = Duration::from_secs(2) * num_cores / NumberOfBlocksPerRelaySlot::get(); - vec![block_time.min(Duration::from_millis(500)); NumberOfBlocksPerRelaySlot::get() as usize] + cumulus_primitives_core::BlockInterval { + number_of_blocks: NumberOfBlocksPerRelaySlot::get(), + block_time: block_time.min(Duration::from_millis(500)), + } } } } From 93884054a77f471eee7108871445cec01bdf09da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 15 Sep 2025 20:58:43 +0200 Subject: [PATCH 111/312] Do not execute a block again, if we just build it --- .../src/collators/slot_based/block_import.rs | 138 ++++++++++-------- 1 file changed, 80 insertions(+), 58 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs index 61a4067e35b1c..63ba9f9ec50e1 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -26,6 +26,7 @@ use sp_api::{ ApiExt, CallApiAt, CallContext, Core, ProofRecorder, ProofRecorderIgnoredNodes, ProvideRuntimeApi, StorageProof, }; +use sp_consensus::BlockOrigin; use sp_consensus_aura::AuraApi; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as _}; use sp_trie::{proof_size_extension::ProofSizeExt, recorder::IgnoredNodes}; @@ -90,6 +91,83 @@ impl SlotBasedBlockImport, + ) -> Result<(), sp_consensus::Error> + where + Client: ProvideRuntimeApi + CallApiAt + Send + Sync, + Client::StateBackend: Send, + Client::Api: Core + AuraApi, + AuthorityId: Codec + Send + Sync + std::fmt::Debug, + { + let core_info = CumulusDigestItem::find_core_info(params.header.digest()); + let relay_block_identifier = + CumulusDigestItem::find_relay_block_identifier(params.header.digest()); + + let (Some(core_info), Some(relay_block_identifier)) = (core_info, relay_block_identifier) + else { + return Ok(()) + }; + + let slot = find_pre_digest::(¶ms.header) + .map_err(|error| sp_consensus::Error::Other(Box::new(error)))?; + let authorities = fetch_authorities(&*self.client, *params.header.parent_hash())?; + + let pov_bundle = PoVBundle { + author_index: *slot as usize % authorities.len(), + core_info, + relay_block_identifier, + }; + + let mut nodes_to_ignore = self.nodes_to_ignore.lock(); + let nodes_to_ignore = nodes_to_ignore.entry(pov_bundle).or_default(); + + let recorder = ProofRecorder::::with_ignored_nodes(nodes_to_ignore.clone()); + + let mut runtime_api = self.client.runtime_api(); + + runtime_api.set_call_context(CallContext::Onchain); + + runtime_api.record_proof_with_recorder(recorder.clone()); + runtime_api.register_extension(ProofSizeExt::new(recorder)); + + let parent_hash = *params.header.parent_hash(); + + let block = Block::new(params.header.clone(), params.body.clone().unwrap_or_default()); + + runtime_api + .execute_block(parent_hash, block) + .map_err(|e| Box::new(e) as Box<_>)?; + + let storage_proof = + runtime_api.extract_proof().expect("Proof recording was enabled above; qed"); + + let state = self.client.state_at(parent_hash).map_err(|e| Box::new(e) as Box<_>)?; + let gen_storage_changes = runtime_api + .into_storage_changes(&state, parent_hash) + .map_err(sp_consensus::Error::ChainLookup)?; + + if params.header.state_root() != &gen_storage_changes.transaction_storage_root { + return Err(sp_consensus::Error::Other(Box::new(sp_blockchain::Error::InvalidStateRoot))) + } + + nodes_to_ignore + .extend(IgnoredNodes::from_storage_proof::>(&storage_proof)); + nodes_to_ignore + .extend(IgnoredNodes::from_memory_db(gen_storage_changes.transaction.clone())); + + params.state_action = + StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(gen_storage_changes)); + + Ok(()) + } } impl Clone @@ -131,64 +209,8 @@ where &self, mut params: sc_consensus::BlockImportParams, ) -> Result { - let core_info = CumulusDigestItem::find_core_info(params.header.digest()); - let relay_block_identifier = - CumulusDigestItem::find_relay_block_identifier(params.header.digest()); - - if let (Some(core_info), Some(relay_block_identifier)) = (core_info, relay_block_identifier) - { - let slot = find_pre_digest::(¶ms.header) - .map_err(|error| sp_consensus::Error::Other(Box::new(error)))?; - let authorities = fetch_authorities(&*self.client, *params.header.parent_hash())?; - - let pov_bundle = PoVBundle { - author_index: *slot as usize % authorities.len(), - core_info, - relay_block_identifier, - }; - - let mut nodes_to_ignore = self.nodes_to_ignore.lock(); - let nodes_to_ignore = nodes_to_ignore.entry(pov_bundle).or_default(); - - let recorder = ProofRecorder::::with_ignored_nodes(nodes_to_ignore.clone()); - - let mut runtime_api = self.client.runtime_api(); - - runtime_api.set_call_context(CallContext::Onchain); - - runtime_api.record_proof_with_recorder(recorder.clone()); - runtime_api.register_extension(ProofSizeExt::new(recorder)); - - let parent_hash = *params.header.parent_hash(); - - let block = Block::new(params.header.clone(), params.body.clone().unwrap_or_default()); - - runtime_api - .execute_block(parent_hash, block) - .map_err(|e| Box::new(e) as Box<_>)?; - - let storage_proof = - runtime_api.extract_proof().expect("Proof recording was enabled above; qed"); - - let state = self.client.state_at(parent_hash).map_err(|e| Box::new(e) as Box<_>)?; - let gen_storage_changes = runtime_api - .into_storage_changes(&state, parent_hash) - .map_err(sp_consensus::Error::ChainLookup)?; - - if params.header.state_root() != &gen_storage_changes.transaction_storage_root { - return Err(sp_consensus::Error::Other(Box::new( - sp_blockchain::Error::InvalidStateRoot, - ))) - } - - nodes_to_ignore - .extend(IgnoredNodes::from_storage_proof::>(&storage_proof)); - nodes_to_ignore - .extend(IgnoredNodes::from_memory_db(gen_storage_changes.transaction.clone())); - - params.state_action = StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes( - gen_storage_changes, - )); + if params.origin != BlockOrigin::Own { + self.execute_block_and_collect_storage_proof(&mut params)?; } self.inner.import_block(params).await.map_err(Into::into) From b0877956e05cf0f254e5c769f318dbafacd493da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 17 Sep 2025 22:22:35 +0200 Subject: [PATCH 112/312] New scenario --- cumulus/test/runtime/src/test_pallet.rs | 25 +++++++++++++++++++ .../pov_bundling/full_core_usage_scenarios.rs | 23 +++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index e7bd1ab6baf9b..e163eec066c82 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -165,6 +165,31 @@ pub mod pallet { Ok(()) } + + /// This function registers a high weight usage manually, while it actually only announces + /// to use a weight of `0` :) + /// + /// Uses a custom `authorize` logic to ensure the transaction is only accepted when we can + /// fit the `1s` weight into the block. + #[pallet::weight(0)] + #[pallet::authorize(| + _source: TransactionSource, + | -> TransactionValidityWithRefund { + if frame_system::Pallet::::remaining_block_weight().can_consume(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 0)) { + Ok((ValidTransaction { provides: vec![vec![1, 2, 3, 4, 5]], ..Default::default() }, Weight::zero())) + } else { + Err(TransactionValidityError::Invalid(InvalidTransaction::ExhaustsResources)) + } + })] + pub fn use_more_weight_than_announced(_: OriginFor) -> DispatchResult { + // Register weight manually. + frame_system::Pallet::::register_extra_weight_unchecked( + Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 0), + DispatchClass::Normal, + ); + + Ok(()) + } } #[pallet::inherent] diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/full_core_usage_scenarios.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/full_core_usage_scenarios.rs index a9021a6068e5e..9d62fda0e26e7 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/full_core_usage_scenarios.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/full_core_usage_scenarios.rs @@ -124,6 +124,20 @@ async fn pov_bundling_full_core_usage_scenarios() -> Result<(), anyhow::Error> { ensure_is_only_block_in_core(¶_client, BlockToCheck::NextFirstBundleBlock(block_hash)) .await?; + let use_more_weight_than_announced = create_use_more_weight_than_announced_call(); + + log::info!( + "Testing scenario 5: Sending a transaction which uses more weight than what it registered" + ); + let block_hash = submit_extrinsic_and_wait_for_finalization_success( + ¶_client, + &use_more_weight_than_announced, + &alice, + ) + .await?; + + ensure_is_only_block_in_core(¶_client, BlockToCheck::Exact(block_hash)).await?; + Ok(()) } @@ -162,6 +176,15 @@ fn create_schedule_weight_registration_call() -> DynamicPayload { ) } +/// Creates a `test-pallet` `use_more_weight_than_announced` call +fn create_use_more_weight_than_announced_call() -> DynamicPayload { + zombienet_sdk::subxt::tx::dynamic( + "TestPallet", + "use_more_weight_than_announced", + vec![] as Vec, + ) +} + /// Creates a `test-pallet` `set_inherent_weight_consume` call fn create_set_inherent_weight_consume_call(ref_time: u64, proof_size: u64) -> DynamicPayload { let weight = value!({ From a0c4f9e52b47f9754c0873cd575500bfab87953f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 18 Sep 2025 22:55:38 +0200 Subject: [PATCH 113/312] More tests --- .../src/max_parachain_block_weight.rs | 68 ++++++++--- cumulus/test/runtime/src/lib.rs | 3 +- cumulus/test/runtime/src/test_pallet.rs | 109 ++++++++++++++++-- cumulus/test/service/src/lib.rs | 3 +- .../zombienet-sdk-helpers/src/lib.rs | 43 ++++--- .../pov_bundling/full_core_usage_scenarios.rs | 27 ++++- 6 files changed, 203 insertions(+), 50 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs index 61e9f0ba32261..acafa928a9595 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs @@ -240,7 +240,7 @@ where log::error!( target: LOG_TARGET, "Inherent block logic took longer than the target block weight, \ - `DynamicMaxBlockWeightPreInherent` not registered as `PreInherents` hook!", + `MaxBlockWeightHooks` not registered as `PreInherents` hook!", ); } else if info .total_weight() @@ -271,33 +271,67 @@ where }).map_err(Into::into) } - fn post_dispatch_extrinsic(info: &DispatchInfo, post_info: &PostDispatchInfo, len: usize) { + fn post_dispatch_extrinsic() { crate::BlockWeightMode::::mutate(|weight_mode| { let Some(mode) = *weight_mode else { return }; + let target_block_weight = + MaxParachainBlockWeight::target_block_weight::(TargetBlockRate::get()); + + let is_above_limit = frame_system::Pallet::::remaining_block_weight() + .consumed() + .any_gt(target_block_weight); + match mode { - // If the previous one was already `FullCore` or `FractionOfCore`, we don't need to - // change anything. - BlockWeightMode::FullCore | BlockWeightMode::FractionOfCore { .. } => {}, + // If the previous mode was already `FullCore`, we are fine. + BlockWeightMode::FullCore => {}, + BlockWeightMode::FractionOfCore { .. } => + // If we are above the limit, it means the transaction used more weight than what it + // had announced, which should not happen. + if is_above_limit { + log::error!( + target: LOG_TARGET, + "Extrinsic ({}) used more weight than what it had announced and pushed the \ + block above the allowed weight limit!", + frame_system::Pallet::::extrinsic_index().unwrap_or_default() + ); + + // If this isn't the first block in a core, we register the full core weight + // to ensure that we don't include any other transactions. Because we don't + // know how many weight of the core was already used by the blocks before. + if !is_first_block_in_core::() { + log::error!( + target: LOG_TARGET, + "Registering `FULL_CORE_WEIGHT` to ensure no other transaction is included \ + in this block, because this isn't the first block in the core!", + ); + + frame_system::Pallet::::register_extra_weight_unchecked( + MaxParachainBlockWeight::FULL_CORE_WEIGHT, + frame_support::dispatch::DispatchClass::Mandatory, + ); + } + + *weight_mode = Some(BlockWeightMode::FullCore); + + // Inform the node that this block uses the full core. + frame_system::Pallet::::deposit_log( + CumulusDigestItem::UseFullCore.to_digest_item(), + ); + }, // Now we need to check if the transaction required more weight than a fraction of a // core block. BlockWeightMode::PotentialFullCore { first_transaction_index } => - if post_info - .calc_actual_weight(info) - // The extrinsic lengths counts towards the POV size - .saturating_add(Weight::from_parts(0, len as u64)) - .all_lt(MaxParachainBlockWeight::target_block_weight::( - TargetBlockRate::get(), - )) { - *weight_mode = - Some(BlockWeightMode::FractionOfCore { first_transaction_index }); - } else { + if is_above_limit { *weight_mode = Some(BlockWeightMode::FullCore); // Inform the node that this block uses the full core. frame_system::Pallet::::deposit_log( CumulusDigestItem::UseFullCore.to_digest_item(), ); + } else { + *weight_mode = + Some(BlockWeightMode::FractionOfCore { first_transaction_index }); }, } }); @@ -388,7 +422,7 @@ where ) -> Result<(), TransactionValidityError> { S::post_dispatch(pre, info, post_info, len, result)?; - Self::post_dispatch_extrinsic(info, post_info, len); + Self::post_dispatch_extrinsic(); Ok(()) } @@ -421,7 +455,7 @@ where ) -> Result<(), TransactionValidityError> { S::bare_post_dispatch(info, post_info, len, result)?; - Self::post_dispatch_extrinsic(info, post_info, len); + Self::post_dispatch_extrinsic(); Ok(()) } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 3681010327438..52dd2c8a03a5a 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -106,7 +106,7 @@ pub use pallet_timestamp::{Call as TimestampCall, Now}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; -pub use test_pallet::Call as TestPalletCall; +pub use test_pallet::{Call as TestPalletCall, TestTransactionExtension}; pub type SessionHandlers = (); @@ -453,6 +453,7 @@ pub type TxExtension = cumulus_pallet_parachain_system::DynamicMaxBlockWeight< frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + test_pallet::TestTransactionExtension, ), >, NumberOfBlocksPerRelaySlot, diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index e163eec066c82..2f925931cd6a0 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -25,12 +25,16 @@ pub const TEST_RUNTIME_UPGRADE_KEY: &[u8] = b"+test_runtime_upgrade_key+"; pub mod pallet { use crate::test_pallet::TEST_RUNTIME_UPGRADE_KEY; use alloc::vec; + use cumulus_primitives_core::CumulusDigestItem; use frame_support::{ + dispatch::DispatchInfo, inherent::{InherentData, InherentIdentifier, ProvideInherent}, pallet_prelude::*, + traits::IsSubType, weights::constants::WEIGHT_REF_TIME_PER_SECOND, }; use frame_system::pallet_prelude::*; + use sp_runtime::traits::{Dispatchable, Implication, TransactionExtension}; /// The inherent identifier for weight consumption. pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"consume0"; @@ -169,19 +173,13 @@ pub mod pallet { /// This function registers a high weight usage manually, while it actually only announces /// to use a weight of `0` :) /// - /// Uses a custom `authorize` logic to ensure the transaction is only accepted when we can - /// fit the `1s` weight into the block. + /// Uses the [`TestTransactionExtension`] logic to ensure the transaction is only accepted + /// when we can fit the `1s` weight into the block. #[pallet::weight(0)] - #[pallet::authorize(| - _source: TransactionSource, - | -> TransactionValidityWithRefund { - if frame_system::Pallet::::remaining_block_weight().can_consume(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 0)) { - Ok((ValidTransaction { provides: vec![vec![1, 2, 3, 4, 5]], ..Default::default() }, Weight::zero())) - } else { - Err(TransactionValidityError::Invalid(InvalidTransaction::ExhaustsResources)) - } - })] - pub fn use_more_weight_than_announced(_: OriginFor) -> DispatchResult { + pub fn use_more_weight_than_announced( + _: OriginFor, + _must_be_first_block_in_core: bool, + ) -> DispatchResult { // Register weight manually. frame_system::Pallet::::register_extra_weight_unchecked( Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 0), @@ -231,4 +229,91 @@ pub mod pallet { sp_io::storage::set(TEST_RUNTIME_UPGRADE_KEY, &[1, 2, 3, 4]); } } + + #[derive( + Encode, + Decode, + CloneNoBound, + EqNoBound, + PartialEqNoBound, + TypeInfo, + RuntimeDebugNoBound, + DecodeWithMemTracking, + )] + #[scale_info(skip_type_params(T))] + pub struct TestTransactionExtension(core::marker::PhantomData); + + impl Default for TestTransactionExtension { + fn default() -> Self { + Self(core::marker::PhantomData) + } + } + + impl TransactionExtension for TestTransactionExtension + where + T: Config + Send + Sync, + T::RuntimeCall: IsSubType> + Dispatchable, + { + const IDENTIFIER: &'static str = "TestTransactionExtension"; + type Implicit = (); + type Val = (); + type Pre = (); + + fn validate( + &self, + origin: T::RuntimeOrigin, + call: &T::RuntimeCall, + _info: &DispatchInfo, + _len: usize, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Implication, + _: TransactionSource, + ) -> ValidateResult { + if let Some(call) = call.is_sub_type() { + match call { + Call::use_more_weight_than_announced { must_be_first_block_in_core } => + if { + let digest = frame_system::Pallet::::digest(); + + CumulusDigestItem::find_bundle_info(&digest) + // Default being `true` to support `validate_transaction` + .map_or(true, |bi| bi.index == 0) || + // If it doesn't need to be the first block in the core, we can just always accept the transaction. + !must_be_first_block_in_core + } { + Ok(( + ValidTransaction { + provides: vec![vec![1, 2, 3, 4, 5]], + ..Default::default() + }, + (), + origin, + )) + } else { + Err(TransactionValidityError::Invalid( + InvalidTransaction::ExhaustsResources, + )) + }, + _ => Ok((Default::default(), (), origin)), + } + } else { + Ok((Default::default(), (), origin)) + } + } + + fn prepare( + self, + val: Self::Val, + _origin: &T::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfo, + _len: usize, + ) -> Result { + Ok(val) + } + + fn weight(&self, _: &T::RuntimeCall) -> Weight { + Weight::zero() + } + } } diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 67f70c038e994..afdd162a8efbd 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -928,12 +928,13 @@ pub fn construct_extrinsic( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + runtime::TestTransactionExtension::::default(), )) .into(); let raw_payload = runtime::SignedPayload::from_raw( function.clone(), tx_ext.clone(), - ((), (), runtime::VERSION.spec_version, genesis_block, current_block_hash, (), (), ()), + ((), (), runtime::VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()), ); let signature = raw_payload.using_encoded(|e| caller.sign(e)); runtime::UncheckedExtrinsic::new_signed( diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 52fb7b5e6e0ef..ee8d3701f2af7 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -710,26 +710,32 @@ fn find_bundle_info( .ok_or_else(|| anyhow!("Failed to find `BundleInfo` digest")) } -/// Validates that the given block is the first block on its core (bundle index == 0). -async fn ensure_is_only_block_in_core_impl( +/// Validates that the given block is a "special" block in the core. +/// +/// If `is_only_block_in_core` is true, it checks if the given block is the first block in the core +/// and the only one. If this is `false`, it only checks if the block is the last block in the core. +async fn ensure_is_block_in_core_impl( para_client: &OnlineClient, block_hash: H256, + is_only_block_in_core: bool, ) -> Result<(), anyhow::Error> { let blocks = para_client.blocks(); let block = blocks.at(block_hash).await?; let block_core_info = find_core_info(&block)?; - let parent = blocks.at(block.header().parent_hash).await?; + if is_only_block_in_core { + let parent = blocks.at(block.header().parent_hash).await?; - // Genesis is for sure on a different core :) - if parent.number() != 0 { - let parent_core_info = find_core_info(&parent)?; + // Genesis is for sure on a different core :) + if parent.number() != 0 { + let parent_core_info = find_core_info(&parent)?; - if parent_core_info == block_core_info { - return Err(anyhow::anyhow!( - "Not first block ({}) in core, at least the parent block is on the same core.", - block.header().number - )); + if parent_core_info == block_core_info { + return Err(anyhow::anyhow!( + "Not first block ({}) in core, at least the parent block is on the same core.", + block.header().number + )); + } } } @@ -761,7 +767,8 @@ async fn ensure_is_only_block_in_core_impl( if next_block_core_info == block_core_info { return Err(anyhow::anyhow!( - "Not first block ({}) in core, at least the following block is on the same core.", + "Not {} block ({}) in core, at least the following block is on the same core.", + if is_only_block_in_core { "first" } else { "last" }, block.header().number )); } @@ -778,7 +785,7 @@ pub async fn ensure_is_only_block_in_core( match block_to_check { BlockToCheck::Exact(block_hash) => - ensure_is_only_block_in_core_impl(para_client, block_hash).await, + ensure_is_block_in_core_impl(para_client, block_hash, true).await, BlockToCheck::NextFirstBundleBlock(start_block_hash) => { let start_block = blocks.at(start_block_hash).await?; @@ -800,10 +807,18 @@ pub async fn ensure_is_only_block_in_core( } if let Some(block) = next_first_bundle_block { - ensure_is_only_block_in_core_impl(para_client, block).await + ensure_is_block_in_core_impl(para_client, block, true).await } else { Err(anyhow!("Could not find the next bundle after {}", start_block.number())) } }, } } + +/// Checks if the specified block is the last block in a core. +pub async fn ensure_is_last_block_in_core( + para_client: &OnlineClient, + block_to_check: H256, +) -> Result<(), anyhow::Error> { + ensure_is_block_in_core_impl(para_client, block_to_check, false).await +} diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/full_core_usage_scenarios.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/full_core_usage_scenarios.rs index 9d62fda0e26e7..83760ceda9947 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/full_core_usage_scenarios.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/full_core_usage_scenarios.rs @@ -19,7 +19,7 @@ use anyhow::anyhow; use cumulus_primitives_core::relay_chain::MAX_POV_SIZE; use cumulus_zombienet_sdk_helpers::{ assert_finality_lag, assert_para_throughput, create_assign_core_call, - ensure_is_only_block_in_core, find_core_info, + ensure_is_last_block_in_core, ensure_is_only_block_in_core, find_core_info, submit_extrinsic_and_wait_for_finalization_success, BlockToCheck, }; use frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND; @@ -124,10 +124,11 @@ async fn pov_bundling_full_core_usage_scenarios() -> Result<(), anyhow::Error> { ensure_is_only_block_in_core(¶_client, BlockToCheck::NextFirstBundleBlock(block_hash)) .await?; - let use_more_weight_than_announced = create_use_more_weight_than_announced_call(); + let use_more_weight_than_announced = create_use_more_weight_than_announced_call(true); log::info!( - "Testing scenario 5: Sending a transaction which uses more weight than what it registered" + "Testing scenario 5: Sending a transaction which uses more weight than what \ + it registered and transactions appears in the first block of a core" ); let block_hash = submit_extrinsic_and_wait_for_finalization_success( ¶_client, @@ -138,6 +139,21 @@ async fn pov_bundling_full_core_usage_scenarios() -> Result<(), anyhow::Error> { ensure_is_only_block_in_core(¶_client, BlockToCheck::Exact(block_hash)).await?; + let use_more_weight_than_announced = create_use_more_weight_than_announced_call(false); + + log::info!( + "Testing scenario 6: Sending a transaction which uses more weight than what \ + it registered and transactions appears in the last block of a core" + ); + let block_hash = submit_extrinsic_and_wait_for_finalization_success( + ¶_client, + &use_more_weight_than_announced, + &alice, + ) + .await?; + + ensure_is_last_block_in_core(¶_client, block_hash).await?; + Ok(()) } @@ -177,11 +193,12 @@ fn create_schedule_weight_registration_call() -> DynamicPayload { } /// Creates a `test-pallet` `use_more_weight_than_announced` call -fn create_use_more_weight_than_announced_call() -> DynamicPayload { +fn create_use_more_weight_than_announced_call(must_be_first_block_in_core: bool) -> DynamicPayload { zombienet_sdk::subxt::tx::dynamic( "TestPallet", "use_more_weight_than_announced", - vec![] as Vec, + vec![value![must_be_first_block_in_core]] + as Vec, ) } From 0970d3fd2da179febf420ae91368be1182268fdb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 19 Sep 2025 13:06:36 +0200 Subject: [PATCH 114/312] Rename to `block_bundling` --- cumulus/test/runtime/build.rs | 2 +- cumulus/test/runtime/src/lib.rs | 4 ++-- cumulus/test/service/src/chain_spec.rs | 4 ++-- cumulus/test/service/src/cli.rs | 6 +++--- .../zombie_ci/{pov_bundling => block_bundling}/basic.rs | 2 +- .../full_core_usage_scenarios.rs | 2 +- .../tests/zombie_ci/{pov_bundling => block_bundling}/mod.rs | 0 .../{pov_bundling => block_bundling}/runtime_upgrade.rs | 2 +- .../{pov_bundling => block_bundling}/three_cores_glutton.rs | 2 +- cumulus/zombienet/zombienet-sdk/tests/zombie_ci/mod.rs | 2 +- 10 files changed, 13 insertions(+), 13 deletions(-) rename cumulus/zombienet/zombienet-sdk/tests/zombie_ci/{pov_bundling => block_bundling}/basic.rs (98%) rename cumulus/zombienet/zombienet-sdk/tests/zombie_ci/{pov_bundling => block_bundling}/full_core_usage_scenarios.rs (99%) rename cumulus/zombienet/zombienet-sdk/tests/zombie_ci/{pov_bundling => block_bundling}/mod.rs (100%) rename cumulus/zombienet/zombienet-sdk/tests/zombie_ci/{pov_bundling => block_bundling}/runtime_upgrade.rs (98%) rename cumulus/zombienet/zombienet-sdk/tests/zombie_ci/{pov_bundling => block_bundling}/three_cores_glutton.rs (98%) diff --git a/cumulus/test/runtime/build.rs b/cumulus/test/runtime/build.rs index ddde5c2f2a8b7..e9de9b49ea090 100644 --- a/cumulus/test/runtime/build.rs +++ b/cumulus/test/runtime/build.rs @@ -52,7 +52,7 @@ fn main() { .enable_feature("pov-bundling") .enable_feature("experimental-ump-signals") .import_memory() - .set_file_name("wasm_binary_pov_bundling.rs") + .set_file_name("wasm_binary_block_bundling.rs") .build(); WasmBuilder::new() diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 52dd2c8a03a5a..ed5edeee6e5db 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -46,9 +46,9 @@ pub mod elastic_scaling { include!(concat!(env!("OUT_DIR"), "/wasm_binary_elastic_scaling.rs")); } -pub mod pov_bundling { +pub mod block_bundling { #[cfg(feature = "std")] - include!(concat!(env!("OUT_DIR"), "/wasm_binary_pov_bundling.rs")); + include!(concat!(env!("OUT_DIR"), "/wasm_binary_block_bundling.rs")); } pub mod sync_backing { diff --git a/cumulus/test/service/src/chain_spec.rs b/cumulus/test/service/src/chain_spec.rs index 5a10b3d2ccad5..7f3d3888a2274 100644 --- a/cumulus/test/service/src/chain_spec.rs +++ b/cumulus/test/service/src/chain_spec.rs @@ -125,11 +125,11 @@ pub fn get_elastic_scaling_mvp_chain_spec(id: Option) -> GenericChainSpe ) } -pub fn get_pov_bundling_chain_spec(id: Option) -> GenericChainSpec { +pub fn get_block_bundling_chain_spec(id: Option) -> GenericChainSpec { get_chain_spec_with_extra_endowed( id, Default::default(), - cumulus_test_runtime::pov_bundling::WASM_BINARY + cumulus_test_runtime::block_bundling::WASM_BINARY .expect("WASM binary was not built, please build it!"), ) } diff --git a/cumulus/test/service/src/cli.rs b/cumulus/test/service/src/cli.rs index ee860a65eed40..fe15d70f5cb6c 100644 --- a/cumulus/test/service/src/cli.rs +++ b/cumulus/test/service/src/cli.rs @@ -306,9 +306,9 @@ impl SubstrateCli for TestCollatorCli { ParaId::from(2300), ))) as Box<_> }, - "pov-bundling" => { - tracing::info!("Using pov-bundling chain spec."); - Box::new(cumulus_test_service::get_pov_bundling_chain_spec(Some(ParaId::from( + "block-bundling" => { + tracing::info!("Using block-bundling chain spec."); + Box::new(cumulus_test_service::get_block_bundling_chain_spec(Some(ParaId::from( 2400, )))) as Box<_> }, diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/basic.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs similarity index 98% rename from cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/basic.rs rename to cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs index 82a015fb2cb41..40e02fd70444c 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/basic.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs @@ -42,7 +42,7 @@ const PARA_ID: u32 = 2400; /// As we increase the number of cores via `assign_core`, we expect the blocks to spread over the /// relay cores. #[tokio::test(flavor = "multi_thread")] -async fn pov_bundling_basic() -> Result<(), anyhow::Error> { +async fn block_bundling_basic() -> Result<(), anyhow::Error> { let _ = env_logger::try_init_from_env( env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), ); diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/full_core_usage_scenarios.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs similarity index 99% rename from cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/full_core_usage_scenarios.rs rename to cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs index 83760ceda9947..1ebc59e431b3b 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/full_core_usage_scenarios.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs @@ -43,7 +43,7 @@ const PARA_ID: u32 = 2400; /// 2. One with a PoV size bigger than what one block alone is allowed to process. /// Each transaction is sent after the other and waits for finalization. #[tokio::test(flavor = "multi_thread")] -async fn pov_bundling_full_core_usage_scenarios() -> Result<(), anyhow::Error> { +async fn block_bundling_full_core_usage_scenarios() -> Result<(), anyhow::Error> { let _ = env_logger::try_init_from_env( env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), ); diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs similarity index 100% rename from cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/mod.rs rename to cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs similarity index 98% rename from cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/runtime_upgrade.rs rename to cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs index 127a0db528178..ce48ea2221413 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs @@ -53,7 +53,7 @@ const MIN_RUNTIME_SIZE_BYTES: usize = MAX_POV_SIZE as usize / 4 + 50 * 1024; /// The runtime code is validated to be at least 2.5MiB in size, and both transactions /// are validated to be the only block in their respective cores. #[tokio::test(flavor = "multi_thread")] -async fn pov_bundling_runtime_upgrade() -> Result<(), anyhow::Error> { +async fn block_bundling_runtime_upgrade() -> Result<(), anyhow::Error> { let _ = env_logger::try_init_from_env( env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), ); diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/three_cores_glutton.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs similarity index 98% rename from cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/three_cores_glutton.rs rename to cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs index fd28ceeb8180d..9a31c34116deb 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_bundling/three_cores_glutton.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs @@ -38,7 +38,7 @@ const PARA_ID: u32 = 2400; /// This test starts with 3 cores assigned and configures glutton to use 80% of ref time, /// then validates that the parachain produces 72 blocks. #[tokio::test(flavor = "multi_thread")] -async fn pov_bundling_three_cores_glutton() -> Result<(), anyhow::Error> { +async fn block_bundling_three_cores_glutton() -> Result<(), anyhow::Error> { let _ = env_logger::try_init_from_env( env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), ); diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/mod.rs index 53228b83ad07f..05902a0c6637b 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/mod.rs @@ -7,7 +7,7 @@ mod full_node_catching_up; mod full_node_warp_sync; mod migrate_solo; mod parachain_extrinsic_get_finalized; -mod pov_bundling; +mod block_bundling; mod pov_recovery; mod rpc_collator_build_blocks; mod runtime_upgrade; From 0f864d3e4fa5f1b42a394428837a835d15fbfa88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 19 Sep 2025 13:32:14 +0200 Subject: [PATCH 115/312] More renamings --- cumulus/test/runtime/Cargo.toml | 4 ++-- cumulus/test/runtime/build.rs | 2 +- cumulus/test/runtime/src/lib.rs | 6 +++--- .../zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs | 2 +- .../zombie_ci/block_bundling/full_core_usage_scenarios.rs | 2 +- .../tests/zombie_ci/block_bundling/runtime_upgrade.rs | 2 +- .../tests/zombie_ci/block_bundling/three_cores_glutton.rs | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index 9bcc4ae01484b..b2dbf7991cd4d 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -103,8 +103,8 @@ relay-parent-offset = [] elastic-scaling = [] # A runtime with low slot duration of 500ms for low-latency testing with 12 cores. elastic-scaling-500ms = [] -# A runtime pov-bundling. -pov-bundling = [] +# A runtime that uses block-bundling. +block-bundling = [] # A runtime with 6s slot duration which sends RFC-103 compatible UMP signals. experimental-ump-signals = [ "cumulus-pallet-parachain-system/experimental-ump-signals", diff --git a/cumulus/test/runtime/build.rs b/cumulus/test/runtime/build.rs index e9de9b49ea090..305ef10fd3ec3 100644 --- a/cumulus/test/runtime/build.rs +++ b/cumulus/test/runtime/build.rs @@ -49,7 +49,7 @@ fn main() { WasmBuilder::new() .with_current_project() - .enable_feature("pov-bundling") + .enable_feature("block-bundling") .enable_feature("experimental-ump-signals") .import_memory() .set_file_name("wasm_binary_block_bundling.rs") diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index ed5edeee6e5db..560eca077a829 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -120,7 +120,7 @@ impl_opaque_keys! { pub const PARACHAIN_ID: u32 = 100; #[cfg(all( - any(feature = "elastic-scaling-500ms", feature = "pov-bundling"), + any(feature = "elastic-scaling-500ms", feature = "block-bundling"), not(any(feature = "elastic-scaling", feature = "relay-parent-offset")) ))] pub const BLOCK_PROCESSING_VELOCITY: u32 = 12; @@ -131,7 +131,7 @@ pub const BLOCK_PROCESSING_VELOCITY: u32 = 3; #[cfg(not(any( feature = "elastic-scaling", feature = "elastic-scaling-500ms", - feature = "pov-bundling", + feature = "block-bundling", feature = "relay-parent-offset" )))] pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; @@ -573,7 +573,7 @@ impl_runtime_apis! { } fn block_rate() -> sp_block_builder::BlockRate { - if cfg!(feature = "pov-bundling") { + if cfg!(feature = "block-bundling") { sp_block_builder::BlockRate { block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_millis(500) }, block_building_time: core::time::Duration::from_millis(500), diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs index 40e02fd70444c..eb373e6185d6f 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs @@ -157,7 +157,7 @@ async fn build_network_config() -> Result { p.with_id(PARA_ID) .with_default_command("test-parachain") .with_default_image(images.cumulus.as_str()) - .with_chain("pov-bundling") + .with_chain("block-bundling") .with_default_args(vec![ ("--authoring").into(), ("slot-based").into(), diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs index 1ebc59e431b3b..0cdf01681b21c 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs @@ -242,7 +242,7 @@ async fn build_network_config() -> Result { p.with_id(PARA_ID) .with_default_command("test-parachain") .with_default_image(images.cumulus.as_str()) - .with_chain("pov-bundling") + .with_chain("block-bundling") .with_default_args(vec![ ("--authoring").into(), ("slot-based").into(), diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs index ce48ea2221413..3763439ac0acb 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs @@ -179,7 +179,7 @@ async fn build_network_config() -> Result { p.with_id(PARA_ID) .with_default_command("test-parachain") .with_default_image(images.cumulus.as_str()) - .with_chain("pov-bundling") + .with_chain("block-bundling") .with_default_args(vec![ ("--authoring").into(), ("slot-based").into(), diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs index 9a31c34116deb..1fd1bd24ad702 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs @@ -113,7 +113,7 @@ async fn build_network_config() -> Result { p.with_id(PARA_ID) .with_default_command("test-parachain") .with_default_image(images.cumulus.as_str()) - .with_chain("pov-bundling") + .with_chain("block-bundling") .with_default_args(vec![ ("--authoring").into(), ("slot-based").into(), From 386f38a89dd8868cd5c0b34816d14ae1ab79e7bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 19 Sep 2025 15:56:07 +0200 Subject: [PATCH 116/312] Remove outdated code --- .../runtimes/assets/asset-hub-rococo/src/lib.rs | 5 ----- .../runtimes/assets/asset-hub-westend/src/lib.rs | 5 ----- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 6 ------ .../bridge-hubs/bridge-hub-westend/src/lib.rs | 6 ------ .../collectives/collectives-westend/src/lib.rs | 5 ----- .../runtimes/coretime/coretime-rococo/src/lib.rs | 6 ------ .../runtimes/coretime/coretime-westend/src/lib.rs | 6 ------ .../runtimes/glutton/glutton-westend/src/lib.rs | 5 ----- .../runtimes/people/people-rococo/src/lib.rs | 5 ----- .../runtimes/people/people-westend/src/lib.rs | 5 ----- .../parachains/runtimes/testing/penpal/src/lib.rs | 5 ----- .../runtimes/testing/rococo-parachain/src/lib.rs | 5 ----- .../testing/yet-another-parachain/src/lib.rs | 5 ----- .../lib/src/fake_runtime_api/utils.rs | 3 --- cumulus/test/runtime/src/lib.rs | 13 ------------- polkadot/node/service/src/fake_runtime_api.rs | 3 --- polkadot/runtime/rococo/src/lib.rs | 5 ----- polkadot/runtime/test-runtime/src/lib.rs | 5 ----- polkadot/runtime/westend/src/lib.rs | 5 ----- substrate/primitives/block-builder/src/lib.rs | 2 -- substrate/test-utils/runtime/src/lib.rs | 5 ----- .../src/overhead/fake_runtime_api.rs | 3 --- 22 files changed, 113 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 4025baa06e58e..abbc34eeb1bda 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1414,11 +1414,6 @@ impl_runtime_apis! { data.check_extrinsics(&block) } - fn block_rate() -> sp_block_builder::BlockRate { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , - block_building_time: core::time::Duration::from_millis(500), - } } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index c99827a4bfe9c..8b35992510d09 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1781,11 +1781,6 @@ pallet_revive::impl_runtime_apis_plus_revive!( data.check_extrinsics(&block) } - fn block_rate() -> sp_block_builder::BlockRate { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , - block_building_time: core::time::Duration::from_secs(2), - } } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 1ab8976c1f1f4..5ebf59a32403f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -788,12 +788,6 @@ impl_runtime_apis! { data.check_extrinsics(&block) } - fn block_rate() -> sp_block_builder::BlockRate { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , - block_building_time: core::time::Duration::from_secs(2), - } - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 7d73fb55874f2..2c69f62a16add 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -739,12 +739,6 @@ impl_runtime_apis! { data.check_extrinsics(&block) } - fn block_rate() -> sp_block_builder::BlockRate { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , - block_building_time: core::time::Duration::from_secs(2), - } - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 09e37b31c3543..32fbd05828195 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -927,11 +927,6 @@ impl_runtime_apis! { data.check_extrinsics(&block) } - fn block_rate() -> sp_block_builder::BlockRate { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , - block_building_time: core::time::Duration::from_secs(2), - } } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 3d4c68f05c6b6..d0654a022ae5e 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -768,12 +768,6 @@ impl_runtime_apis! { data.check_extrinsics(&block) } - fn block_rate() -> sp_block_builder::BlockRate { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , - block_building_time: core::time::Duration::from_secs(2), - } - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 4169c749d4f87..9731047968d7d 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -768,12 +768,6 @@ impl_runtime_apis! { data.check_extrinsics(&block) } - fn block_rate() -> sp_block_builder::BlockRate { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , - block_building_time: core::time::Duration::from_secs(2), - } - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index a25ad80dd26fe..24114ab491cf2 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -400,11 +400,6 @@ impl_runtime_apis! { data.check_extrinsics(&block) } - fn block_rate() -> sp_block_builder::BlockRate { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , - block_building_time: core::time::Duration::from_secs(2), - } } } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 568e636be1be2..398ab77851773 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -722,11 +722,6 @@ impl_runtime_apis! { data.check_extrinsics(&block) } - fn block_rate() -> sp_block_builder::BlockRate { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , - block_building_time: core::time::Duration::from_secs(2), - } } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 6ac77aa56b082..3a4074829b4c1 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -724,11 +724,6 @@ impl_runtime_apis! { data.check_extrinsics(&block) } - fn block_rate() -> sp_block_builder::BlockRate { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , - block_building_time: core::time::Duration::from_secs(2), - } } } diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 838791585ba9b..2d36ba159da96 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -964,11 +964,6 @@ impl_runtime_apis! { data.check_extrinsics(&block) } - fn block_rate() -> sp_block_builder::BlockRate { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , - block_building_time: core::time::Duration::from_secs(2), - } } } diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index b1f2ca18fd80c..12c7b684faeea 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -756,11 +756,6 @@ impl_runtime_apis! { data.check_extrinsics(&block) } - fn block_rate() -> sp_block_builder::BlockRate { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , - block_building_time: core::time::Duration::from_secs(2), - } } } diff --git a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs index cc792e2c7e4e2..ec4ee9d873167 100644 --- a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs @@ -550,11 +550,6 @@ impl_runtime_apis! { data.check_extrinsics(&block) } - fn block_rate() -> sp_block_builder::BlockRate { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , - block_building_time: core::time::Duration::from_secs(2), - } } } diff --git a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs index fd80795f980b1..8f7dc2c09bb8d 100644 --- a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs +++ b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs @@ -106,9 +106,6 @@ macro_rules! impl_node_runtime_apis { unimplemented!() } - fn block_rate() -> sp_block_builder::BlockRate { - unimplemented!() - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue<$block> for $runtime { diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 560eca077a829..21fd98b7f8689 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -572,19 +572,6 @@ impl_runtime_apis! { data.check_extrinsics(&block) } - fn block_rate() -> sp_block_builder::BlockRate { - if cfg!(feature = "block-bundling") { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_millis(500) }, - block_building_time: core::time::Duration::from_millis(500), - } - } else { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) }, - block_building_time: core::time::Duration::from_secs(2), - } - } - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index e8722258e6a40..f26f734638158 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -104,9 +104,6 @@ sp_api::impl_runtime_apis! { unimplemented!() } - fn block_rate() -> sp_block_builder::BlockRate { - unimplemented!() - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 3c451ac7a5232..85300af2d3833 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1994,11 +1994,6 @@ sp_api::impl_runtime_apis! { data.check_extrinsics(&block) } - fn block_rate() -> sp_block_builder::BlockRate { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , - block_building_time: core::time::Duration::from_secs(2), - } } } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index b3078f11ede83..4842bb159b76b 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -939,11 +939,6 @@ sp_api::impl_runtime_apis! { data.check_extrinsics(&block) } - fn block_rate() -> sp_block_builder::BlockRate { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6)}, - block_building_time: core::time::Duration::from_secs(2), - } } } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 5c697e05fa555..7192ef67a64d6 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2230,11 +2230,6 @@ sp_api::impl_runtime_apis! { data.check_extrinsics(&block) } - fn block_rate() -> sp_block_builder::BlockRate { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , - block_building_time: core::time::Duration::from_secs(2), - } } } diff --git a/substrate/primitives/block-builder/src/lib.rs b/substrate/primitives/block-builder/src/lib.rs index e2eeb924a81f6..8a4de3e6f530a 100644 --- a/substrate/primitives/block-builder/src/lib.rs +++ b/substrate/primitives/block-builder/src/lib.rs @@ -85,7 +85,5 @@ sp_api::decl_runtime_apis! { /// Check that the inherents are valid. The inherent data will vary from chain to chain. fn check_inherents(block: Block, data: InherentData) -> CheckInherentsResult; - - fn block_rate() -> BlockRate; } } diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 3ade76841a8f6..4a5f93f835cd5 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -568,11 +568,6 @@ impl_runtime_apis! { CheckInherentsResult::new() } - fn block_rate() -> sp_block_builder::BlockRate { - sp_block_builder::BlockRate { - block_time: sp_block_builder::BlockTime::Regularly { every: core::time::Duration::from_secs(6) } , - block_building_time: core::time::Duration::from_secs(2), - } } } diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/fake_runtime_api.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/fake_runtime_api.rs index 9c3f9b3d61e5f..6ee7bbffde4a9 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/fake_runtime_api.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/fake_runtime_api.rs @@ -82,9 +82,6 @@ sp_api::impl_runtime_apis! { unimplemented!() } - fn block_rate() -> sp_block_builder::BlockRate { - unimplemented!() - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { From 4a924dc0dab262b9cc4d6cb4e837dd80fd18b29a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 19 Sep 2025 15:58:19 +0200 Subject: [PATCH 117/312] Forgot --- substrate/primitives/block-builder/src/lib.rs | 28 ------------------- 1 file changed, 28 deletions(-) diff --git a/substrate/primitives/block-builder/src/lib.rs b/substrate/primitives/block-builder/src/lib.rs index 8a4de3e6f530a..8d94d9dcdf85d 100644 --- a/substrate/primitives/block-builder/src/lib.rs +++ b/substrate/primitives/block-builder/src/lib.rs @@ -31,34 +31,6 @@ mod client_side; #[cfg(feature = "std")] pub use client_side::*; -#[derive(Encode, Decode, scale_info::TypeInfo, Debug)] -pub struct BlockRate { - /// Time between individual blocks. - pub block_time: BlockTime, - /// Maximum time to spend building per block. - pub block_building_time: Duration, -} - -#[derive(Encode, Decode, scale_info::TypeInfo, Debug)] -pub enum BlockTime { - /// Blocks are expected every X. - Regularly { - /// Time between blocks. - every: Duration, - }, - /// Blocks are coming at unexpected times. - Irregular, -} - -impl BlockTime { - pub fn as_regular(&self) -> Option { - match self { - Self::Regularly { every } => Some(*every), - Self::Irregular => None, - } - } -} - sp_api::decl_runtime_apis! { /// The `BlockBuilder` api trait that provides the required functionality for building a block. #[api_version(6)] From ca375894954100e8d8d5a4259c1e5961f135065a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 19 Sep 2025 16:09:30 +0200 Subject: [PATCH 118/312] Adds the `SlotSchedule` runtime api --- .../runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs | 9 +++++++++ .../runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs | 9 +++++++++ .../runtimes/coretime/coretime-rococo/src/lib.rs | 9 +++++++++ .../runtimes/coretime/coretime-westend/src/lib.rs | 9 +++++++++ .../parachains/runtimes/people/people-rococo/src/lib.rs | 9 +++++++++ .../parachains/runtimes/people/people-westend/src/lib.rs | 9 +++++++++ 6 files changed, 54 insertions(+) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 5ebf59a32403f..878742ab13de5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -1638,4 +1638,13 @@ mod tests { } }); } + + impl cumulus_primitives_core::SlotSchedule for Runtime { + fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::BlockInterval { + cumulus_primitives_core::BlockInterval { + number_of_blocks: 1, + block_time: core::time::Duration::from_secs(2), + } + } + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 2c69f62a16add..95c8ed75de663 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -1504,4 +1504,13 @@ mod tests { } }); } + + impl cumulus_primitives_core::SlotSchedule for Runtime { + fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::BlockInterval { + cumulus_primitives_core::BlockInterval { + number_of_blocks: 1, + block_time: core::time::Duration::from_secs(2), + } + } + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index d0654a022ae5e..48dd8fcdceb65 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -1188,6 +1188,15 @@ impl_runtime_apis! { ParachainInfo::parachain_id() } } + + impl cumulus_primitives_core::SlotSchedule for Runtime { + fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::BlockInterval { + cumulus_primitives_core::BlockInterval { + number_of_blocks: 1, + block_time: core::time::Duration::from_secs(2), + } + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 9731047968d7d..75c07704795fc 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -1205,6 +1205,15 @@ impl_runtime_apis! { ParachainInfo::parachain_id() } } + + impl cumulus_primitives_core::SlotSchedule for Runtime { + fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::BlockInterval { + cumulus_primitives_core::BlockInterval { + number_of_blocks: 1, + block_time: core::time::Duration::from_secs(2), + } + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 398ab77851773..1210791cc1beb 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -1121,6 +1121,15 @@ impl_runtime_apis! { ParachainInfo::parachain_id() } } + + impl cumulus_primitives_core::SlotSchedule for Runtime { + fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::BlockInterval { + cumulus_primitives_core::BlockInterval { + number_of_blocks: 1, + block_time: core::time::Duration::from_secs(2), + } + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 3a4074829b4c1..1160454cd7de8 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -1140,6 +1140,15 @@ impl_runtime_apis! { } } + impl cumulus_primitives_core::SlotSchedule for Runtime { + fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::BlockInterval { + cumulus_primitives_core::BlockInterval { + number_of_blocks: 1, + block_time: core::time::Duration::from_secs(2), + } + } + } + impl sp_statement_store::runtime_api::ValidateStatement for Runtime { fn validate_statement( _source: StatementSource, From 4d81c0932f30ee3b16fec2db23f84cfc46aa0de1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 23 Sep 2025 21:37:58 +0200 Subject: [PATCH 119/312] Fix --- substrate/test-utils/runtime/src/lib.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 4a5f93f835cd5..b4838716970a0 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -567,8 +567,6 @@ impl_runtime_apis! { fn check_inherents(_block: Block, _data: InherentData) -> CheckInherentsResult { CheckInherentsResult::new() } - - } } impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { From 2959c032a7320f2956c3ce6da24a52db9777d21d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 23 Sep 2025 21:41:17 +0200 Subject: [PATCH 120/312] Implement `ReplayStorageProofProvider` & `RecordingStorageProofProvider` --- Cargo.lock | 1 + .../api/proc-macro/src/impl_runtime_apis.rs | 15 + substrate/primitives/api/src/lib.rs | 2 +- substrate/primitives/api/test/Cargo.toml | 1 + .../api/test/tests/runtime_calls.rs | 61 ++- .../externalities/src/extensions.rs | 97 +++- substrate/primitives/externalities/src/lib.rs | 2 +- substrate/primitives/state-machine/src/ext.rs | 29 +- .../src/overlayed_changes/mod.rs | 36 +- substrate/primitives/trie/src/lib.rs | 25 + .../trie/src/proof_size_extension.rs | 443 ++++++++++++++++++ 11 files changed, 701 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4f65b4966a6de..37c9f2a78fde0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -22649,6 +22649,7 @@ dependencies = [ "sp-api", "sp-consensus", "sp-core 28.0.0", + "sp-externalities 0.25.0", "sp-metadata-ir", "sp-runtime", "sp-state-machine", diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index 76e26cce2653e..06b780a947077 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -412,6 +412,11 @@ fn generate_runtime_api_base_structures() -> Result { &mut std::cell::RefCell::borrow_mut(&self.changes) ); + #crate_::Extensions::commit_transaction( + &mut std::cell::RefCell::borrow_mut(&self.extensions), + #crate_::TransactionType::Host, + ); + // Will panic on an `Err` below, however we should call commit // on the recorder and the changes together. std::result::Result::and(res, std::result::Result::map_err(res2, drop)) @@ -426,6 +431,11 @@ fn generate_runtime_api_base_structures() -> Result { &mut std::cell::RefCell::borrow_mut(&self.changes) ); + #crate_::Extensions::rollback_transaction( + &mut std::cell::RefCell::borrow_mut(&self.extensions), + #crate_::TransactionType::Host, + ); + // Will panic on an `Err` below, however we should call commit // on the recorder and the changes together. std::result::Result::and(res, std::result::Result::map_err(res2, drop)) @@ -441,6 +451,11 @@ fn generate_runtime_api_base_structures() -> Result { if let Some(recorder) = &self.recorder { #crate_::ProofRecorder::::start_transaction(&recorder); } + + #crate_::Extensions::start_transaction( + &mut std::cell::RefCell::borrow_mut(&self.extensions), + #crate_::TransactionType::Host, + ); } } } diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index c8f419770f5f0..61c1d12b80fbe 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -81,7 +81,7 @@ pub mod __private { mod std_imports { pub use hash_db::Hasher; pub use sp_core::traits::CallContext; - pub use sp_externalities::{Extension, Extensions}; + pub use sp_externalities::{Extension, Extensions, TransactionType}; pub use sp_runtime::StateVersion; pub use sp_state_machine::{ Backend as StateBackend, InMemoryBackend, OverlayedChanges, StorageProof, TrieBackend, diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index 23da17c3f55ac..6582612d78ab2 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -25,6 +25,7 @@ sc-block-builder = { workspace = true, default-features = true } scale-info = { features = ["derive"], workspace = true } sp-api = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } sp-metadata-ir = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } diff --git a/substrate/primitives/api/test/tests/runtime_calls.rs b/substrate/primitives/api/test/tests/runtime_calls.rs index 0470b8b72aa04..33cb72249a826 100644 --- a/substrate/primitives/api/test/tests/runtime_calls.rs +++ b/substrate/primitives/api/test/tests/runtime_calls.rs @@ -15,10 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::panic::UnwindSafe; +use std::{ + panic::UnwindSafe, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, +}; use sc_block_builder::BlockBuilderBuilder; use sp_api::{ApiExt, Core, ProvideRuntimeApi}; +use sp_externalities::{decl_extension, TransactionType}; use sp_runtime::{ traits::{HashingFor, Header as HeaderT}, TransactionOutcome, @@ -182,14 +189,44 @@ fn disable_logging_works() { // Ensure that the type is not unwind safe! static_assertions::assert_not_impl_any!(>::Api: UnwindSafe); +#[derive(Default)] +struct TransactionTesterInner { + started: AtomicUsize, + committed: AtomicUsize, + rolled_back: AtomicUsize, +} + +decl_extension! { + struct TransactionTester(Arc); + + impl TransactionTester { + fn start_transaction(&mut self, ty: TransactionType) { + assert_eq!(ty, TransactionType::Host); + self.0.started.fetch_add(1, Ordering::Relaxed); + } + + fn commit_transaction(&mut self, ty: TransactionType) { + assert_eq!(ty, TransactionType::Host); + self.0.committed.fetch_add(1, Ordering::Relaxed); + } + + fn rollback_transaction(&mut self, ty: TransactionType) { + assert_eq!(ty, TransactionType::Host); + self.0.rolled_back.fetch_add(1, Ordering::Relaxed); + } + } +} + #[test] fn ensure_transactional_works() { const KEY: &[u8] = b"test"; let client = TestClientBuilder::new().build(); let best_hash = client.chain_info().best_hash; + let transaction_tester = Arc::new(TransactionTesterInner::default()); - let runtime_api = client.runtime_api(); + let mut runtime_api = client.runtime_api(); + runtime_api.register_extension(TransactionTester(transaction_tester.clone())); runtime_api.execute_in_transaction(|api| { api.write_key_value(best_hash, KEY.to_vec(), vec![1, 2, 3], false).unwrap(); @@ -207,7 +244,8 @@ fn ensure_transactional_works() { .unwrap(); assert_eq!(changes.main_storage_changes[0].1, Some(vec![1, 2, 3, 4])); - let runtime_api = client.runtime_api(); + let mut runtime_api = client.runtime_api(); + runtime_api.register_extension(TransactionTester(transaction_tester.clone())); runtime_api.execute_in_transaction(|api| { assert!(api.write_key_value(best_hash, KEY.to_vec(), vec![1, 2, 3], true).is_err()); @@ -218,4 +256,21 @@ fn ensure_transactional_works() { .into_storage_changes(&client.state_at(best_hash).unwrap(), best_hash) .unwrap(); assert_eq!(changes.main_storage_changes[0].1, Some(vec![1, 2, 3])); + + let mut runtime_api = client.runtime_api(); + runtime_api.register_extension(TransactionTester(transaction_tester.clone())); + runtime_api.execute_in_transaction(|api| { + assert!(api.write_key_value(best_hash, KEY.to_vec(), vec![1, 2], true).is_err()); + + TransactionOutcome::Rollback(()) + }); + + let changes = runtime_api + .into_storage_changes(&client.state_at(best_hash).unwrap(), best_hash) + .unwrap(); + assert!(changes.main_storage_changes.is_empty()); + + assert_eq!(transaction_tester.started.load(Ordering::Relaxed), 4); + assert_eq!(transaction_tester.committed.load(Ordering::Relaxed), 3); + assert_eq!(transaction_tester.rolled_back.load(Ordering::Relaxed), 1); } diff --git a/substrate/primitives/externalities/src/extensions.rs b/substrate/primitives/externalities/src/extensions.rs index 6e7e369a676cf..9e6b64e26a8b1 100644 --- a/substrate/primitives/externalities/src/extensions.rs +++ b/substrate/primitives/externalities/src/extensions.rs @@ -32,6 +32,27 @@ use core::{ ops::DerefMut, }; +/// Informs [`Extension`] about what type of transaction is started, committed or rolled back. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum TransactionType { + /// A transaction started by the host. + Host, + /// A transaction started by the runtime. + Runtime, +} + +impl TransactionType { + /// Is `self` set to [`Self::Host`]. + pub fn is_host(self) -> bool { + matches!(self, Self::Host) + } + + /// Is `self` set to [`Self::Runtime`]. + pub fn is_runtime(self) -> bool { + matches!(self, Self::Runtime) + } +} + /// Marker trait for types that should be registered as [`Externalities`](crate::Externalities) /// extension. /// @@ -40,11 +61,26 @@ use core::{ pub trait Extension: Send + 'static { /// Return the extension as `&mut dyn Any`. /// - /// This is a trick to make the trait type castable into an `Any`. + /// This is a trick to make the trait type castable into an [`Any`]. fn as_mut_any(&mut self) -> &mut dyn Any; /// Get the [`TypeId`] of this `Extension`. fn type_id(&self) -> TypeId; + + /// Start a transaction of type `ty`. + fn start_transaction(&mut self, ty: TransactionType) { + let _ty = ty; + } + + /// Commit a transaction of type `ty`. + fn commit_transaction(&mut self, ty: TransactionType) { + let _ty = ty; + } + + /// Rollback a transaction of type `ty`. + fn rollback_transaction(&mut self, ty: TransactionType) { + let _ty = ty; + } } impl Extension for Box { @@ -55,6 +91,18 @@ impl Extension for Box { fn type_id(&self) -> TypeId { (**self).type_id() } + + fn start_transaction(&mut self, ty: TransactionType) { + (**self).start_transaction(ty); + } + + fn commit_transaction(&mut self, ty: TransactionType) { + (**self).commit_transaction(ty); + } + + fn rollback_transaction(&mut self, ty: TransactionType) { + (**self).rollback_transaction(ty); + } } /// Macro for declaring an extension that usable with [`Extensions`]. @@ -70,11 +118,37 @@ impl Extension for Box { /// struct TestExt(String); /// } /// ``` +/// +/// The [`Extension`] trait provides hooks that are called when starting, committing or rolling back +/// a transaction. These can be implemented with the macro as well: +/// ``` +/// # use sp_externalities::decl_extension; +/// decl_extension! { +/// /// Some test extension +/// struct TestExtWithCallback(String); +/// +/// impl TestExtWithCallback { +/// fn start_transaction(&mut self, ty: TransactionType) { +/// // do something cool +/// } +/// +/// // The other methods `commit_transaction` and `rollback_transaction` can also +/// // be implemented in the same way. +/// } +/// } +/// ``` #[macro_export] macro_rules! decl_extension { ( $( #[ $attr:meta ] )* $vis:vis struct $ext_name:ident ($inner:ty); + $( + impl $ext_name_impl:ident { + $( + $impls:tt + )* + } + )* ) => { $( #[ $attr ] )* $vis struct $ext_name (pub $inner); @@ -87,6 +161,12 @@ macro_rules! decl_extension { fn type_id(&self) -> core::any::TypeId { core::any::Any::type_id(self) } + + $( + $( + $impls + )* + )* } impl core::ops::Deref for $ext_name { @@ -220,6 +300,21 @@ impl Extensions { pub fn merge(&mut self, other: Self) { self.extensions.extend(other.extensions); } + + /// Start a transaction of type `ty`. + pub fn start_transaction(&mut self, ty: TransactionType) { + self.extensions.values_mut().for_each(|e| e.start_transaction(ty)); + } + + /// Commit a transaction of type `ty`. + pub fn commit_transaction(&mut self, ty: TransactionType) { + self.extensions.values_mut().for_each(|e| e.commit_transaction(ty)); + } + + /// Rollback a transaction of type `ty`. + pub fn rollback_transaction(&mut self, ty: TransactionType) { + self.extensions.values_mut().for_each(|e| e.rollback_transaction(ty)); + } } impl Extend for Extensions { diff --git a/substrate/primitives/externalities/src/lib.rs b/substrate/primitives/externalities/src/lib.rs index bcc46ee4f1b29..a543b6758ee4f 100644 --- a/substrate/primitives/externalities/src/lib.rs +++ b/substrate/primitives/externalities/src/lib.rs @@ -32,7 +32,7 @@ use core::any::{Any, TypeId}; use sp_storage::{ChildInfo, StateVersion, TrackedStorageKey}; -pub use extensions::{Extension, ExtensionStore, Extensions}; +pub use extensions::{Extension, ExtensionStore, Extensions, TransactionType}; pub use scope_limited::{set_and_run_with_externalities, with_externalities}; mod extensions; diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index afd0eeb1a55cd..5230bd46449c8 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -29,7 +29,9 @@ use sp_core::hexdisplay::HexDisplay; use sp_core::storage::{ well_known_keys::is_child_storage_key, ChildInfo, StateVersion, TrackedStorageKey, }; -use sp_externalities::{Extension, ExtensionStore, Externalities, MultiRemovalResults}; +use sp_externalities::{ + Extension, ExtensionStore, Externalities, MultiRemovalResults, TransactionType, +}; use crate::{trace, warn}; use alloc::{boxed::Box, vec::Vec}; @@ -579,15 +581,34 @@ where } fn storage_start_transaction(&mut self) { - self.overlay.start_transaction() + self.overlay.start_transaction(); + + #[cfg(feature = "std")] + if let Some(exts) = self.extensions.as_mut() { + exts.start_transaction(TransactionType::Runtime); + } } fn storage_rollback_transaction(&mut self) -> Result<(), ()> { - self.overlay.rollback_transaction().map_err(|_| ()) + self.overlay.rollback_transaction().map_err(|_| ())?; + + #[cfg(feature = "std")] + if let Some(exts) = self.extensions.as_mut() { + exts.rollback_transaction(TransactionType::Runtime); + } + + Ok(()) } fn storage_commit_transaction(&mut self) -> Result<(), ()> { - self.overlay.commit_transaction().map_err(|_| ()) + self.overlay.commit_transaction().map_err(|_| ())?; + + #[cfg(feature = "std")] + if let Some(exts) = self.extensions.as_mut() { + exts.commit_transaction(TransactionType::Runtime); + } + + Ok(()) } fn wipe(&mut self) { diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs index efc86a2eb3294..e9aa4e122ee42 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs @@ -31,11 +31,12 @@ use sp_core::{ storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo, StateVersion}, }; #[cfg(feature = "std")] -use sp_externalities::{Extension, Extensions}; +use sp_externalities::{Extension, Extensions, TransactionType}; use sp_trie::{empty_child_trie_root, LayoutV1}; #[cfg(not(feature = "std"))] use alloc::collections::btree_map::BTreeMap as Map; +use core::ops::DerefMut; #[cfg(feature = "std")] use std::collections::{hash_map::Entry as MapEntry, HashMap as Map}; @@ -817,6 +818,16 @@ pub enum OverlayedExtension<'a> { Owned(Box), } +#[cfg(feature = "std")] +impl OverlayedExtension<'_> { + fn extension(&mut self) -> &mut dyn Extension { + match self { + Self::MutRef(ext) => *ext, + Self::Owned(ext) => &mut *ext, + } + } +} + /// Overlayed extensions which are sourced from [`Extensions`]. /// /// The sourced extensions will be stored as mutable references, @@ -870,6 +881,29 @@ impl<'a> OverlayedExtensions<'a> { pub fn deregister(&mut self, type_id: TypeId) -> bool { self.extensions.remove(&type_id).is_some() } + + /// Start a transaction. + /// + /// The `ty` declares the type of transaction. + pub fn start_transaction(&mut self, ty: TransactionType) { + self.extensions.values_mut().for_each(|e| e.extension().start_transaction(ty)); + } + + /// Commit a transaction. + /// + /// The `ty` declares the type of transaction. + pub fn commit_transaction(&mut self, ty: TransactionType) { + self.extensions.values_mut().for_each(|e| e.extension().commit_transaction(ty)); + } + + /// Rollback a transaction. + /// + /// The `ty` declares the type of transaction. + pub fn rollback_transaction(&mut self, ty: TransactionType) { + self.extensions + .values_mut() + .for_each(|e| e.extension().rollback_transaction(ty)); + } } #[cfg(test)] diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index de828c2e10194..46d4f4cde41c4 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -188,6 +188,31 @@ pub trait TrieRecorderProvider { pub trait ProofSizeProvider { /// Returns the storage proof size. fn estimate_encoded_size(&self) -> usize; + + /// Start a transaction. + /// + /// `is_host` is set to `true` when the transaction was started by the host. + fn start_transaction(&mut self, is_host: bool) { + let _ = is_host; + } + + /// Rollback the last transaction. + /// + /// `is_host` is set to `true` when the transaction to rollback was started by the host. + /// + /// If there is no active transaction, the call should be ignored. + fn rollback_transaction(&mut self, is_host: bool) { + let _ = is_host; + } + + /// Commit the last transaction. + /// + /// `is_host` is set to `true` when the transaction to commit was started by the host. + /// + /// If there is no active transaction, the call should be ignored. + fn commit_transaction(&mut self, is_host: bool) { + let _ = is_host; + } } /// TrieDB error over `TrieConfiguration` trait. diff --git a/substrate/primitives/trie/src/proof_size_extension.rs b/substrate/primitives/trie/src/proof_size_extension.rs index c97f334494afd..1de98e6590a1f 100644 --- a/substrate/primitives/trie/src/proof_size_extension.rs +++ b/substrate/primitives/trie/src/proof_size_extension.rs @@ -18,12 +18,29 @@ //! Externalities extension that provides access to the current proof size //! of the underlying recorder. +use parking_lot::Mutex; + use crate::ProofSizeProvider; +use std::{collections::VecDeque, sync::Arc}; sp_externalities::decl_extension! { /// The proof size extension to fetch the current storage proof size /// in externalities. pub struct ProofSizeExt(Box); + + impl ProofSizeExt { + fn start_transaction(&mut self, ty: sp_externalities::TransactionType) { + self.0.start_transaction(ty.is_host()); + } + + fn rollback_transaction(&mut self, ty: sp_externalities::TransactionType) { + self.0.rollback_transaction(ty.is_host()); + } + + fn commit_transaction(&mut self, ty: sp_externalities::TransactionType) { + self.0.commit_transaction(ty.is_host()); + } + } } impl ProofSizeExt { @@ -37,3 +54,429 @@ impl ProofSizeExt { self.0.estimate_encoded_size() as _ } } + +/// Proof size estimations as recorded by [`RecordingProofSizeProvider`]. +/// +/// Each item is the estimated proof size as observed when calling +/// [`ProofSizeProvider::estimate_encoded_size`]. The items are ordered by t +pub struct RecordedProofSizeEstimations(pub VecDeque); + +/// Inner structure of [`RecordingProofSizeProvider`]. +struct RecordingProofSizeProviderInner { + inner: Box, + proof_size_estimations: Vec>, +} + +/// An implementation of [`ProofSizeProvider`] that records the return value of the calls to +/// [`ProofSizeProvider::estimate_encoded_size`]. +/// +/// Wraps an inner [`ProofSizeProvider`] that is used to get the actual encoded size estimations. +/// Each estimation is recorded in the order it was observed. +#[derive(Clone)] +pub struct RecordingProofSizeProvider { + inner: Arc>, +} + +impl RecordingProofSizeProvider { + /// Creates a new instance of [`RecordingProofSizeProvider`]. + pub fn new(recorder: T) -> Self { + Self { + inner: Arc::new(Mutex::new(RecordingProofSizeProviderInner { + inner: Box::new(recorder), + // Init the always existing transaction. + proof_size_estimations: vec![Vec::new()], + })), + } + } + + /// Returns the recorded estimations returned by each call to + /// [`Self::estimate_encoded_size`]. + pub fn recorded_estimations(&self) -> Vec { + self.inner.lock().proof_size_estimations.iter().flatten().copied().collect() + } +} + +impl ProofSizeProvider for RecordingProofSizeProvider { + fn estimate_encoded_size(&self) -> usize { + let mut inner = self.inner.lock(); + + let estimation = inner.inner.estimate_encoded_size(); + + inner + .proof_size_estimations + .last_mut() + .expect("There is always at least one transaction open; qed") + .push(estimation); + + estimation + } + + fn start_transaction(&mut self, is_host: bool) { + // We don't care about runtime transactions, because they are part of the consensus critical + // path, that will always deterministically call this code. + if is_host { + self.inner.lock().proof_size_estimations.push(Default::default()); + } + } + + fn rollback_transaction(&mut self, is_host: bool) { + let mut inner = self.inner.lock(); + + // The host side transaction needs to be reverted, because this is only done when an + // entire execution is rolled back. So, the execution will never be part of the consensus + // critical path. + if is_host && inner.proof_size_estimations.len() > 1 { + inner.proof_size_estimations.pop(); + } + } + + fn commit_transaction(&mut self, is_host: bool) { + let mut inner = self.inner.lock(); + + if is_host && inner.proof_size_estimations.len() > 1 { + let last = inner + .proof_size_estimations + .pop() + .expect("There are more than one element in the vector; qed"); + + inner + .proof_size_estimations + .last_mut() + .expect("There are more than one element in the vector; qed") + .extend(last); + } + } +} + +/// An implementation of [`ProofSizeProvider`] that replays estimations recorded by +/// [`RecordingProofSizeProvider`]. +/// +/// The recorded estimations are removed as they are required by calls to +/// [`Self::estimate_encoded_size`]. Will return `0` when all estimations are consumed. +pub struct ReplayProofSizeProvider(Arc>); + +impl ReplayProofSizeProvider { + /// Creates a new instance from the given [`RecordedProofSizeEstimations`]. + pub fn from_recorded(recorded: RecordedProofSizeEstimations) -> Self { + Self(Arc::new(Mutex::new(recorded))) + } +} + +impl From for ReplayProofSizeProvider { + fn from(value: RecordedProofSizeEstimations) -> Self { + Self::from_recorded(value) + } +} + +impl ProofSizeProvider for ReplayProofSizeProvider { + fn estimate_encoded_size(&self) -> usize { + self.0.lock().0.pop_front().unwrap_or_default() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::sync::atomic::{AtomicUsize, Ordering}; + + // Mock ProofSizeProvider for testing + #[derive(Clone)] + struct MockProofSizeProvider { + size: Arc, + } + + impl MockProofSizeProvider { + fn new(initial_size: usize) -> Self { + Self { size: Arc::new(AtomicUsize::new(initial_size)) } + } + + fn set_size(&self, new_size: usize) { + self.size.store(new_size, Ordering::Relaxed); + } + } + + impl ProofSizeProvider for MockProofSizeProvider { + fn estimate_encoded_size(&self) -> usize { + self.size.load(Ordering::Relaxed) + } + + fn start_transaction(&mut self, _is_host: bool) {} + fn rollback_transaction(&mut self, _is_host: bool) {} + fn commit_transaction(&mut self, _is_host: bool) {} + } + + #[test] + fn recording_proof_size_provider_basic_functionality() { + let mock = MockProofSizeProvider::new(100); + let tracker = RecordingProofSizeProvider::new(mock.clone()); + + // Initial state - no estimations recorded yet + assert_eq!(tracker.recorded_estimations(), Vec::::new()); + + // Call estimate_encoded_size and verify it's recorded + let size = tracker.estimate_encoded_size(); + assert_eq!(size, 100); + assert_eq!(tracker.recorded_estimations(), vec![100]); + + // Change the mock size and call again + mock.set_size(200); + let size = tracker.estimate_encoded_size(); + assert_eq!(size, 200); + assert_eq!(tracker.recorded_estimations(), vec![100, 200]); + + // Multiple calls with same size + let size = tracker.estimate_encoded_size(); + assert_eq!(size, 200); + assert_eq!(tracker.recorded_estimations(), vec![100, 200, 200]); + } + + #[test] + fn recording_proof_size_provider_host_transactions() { + let mock = MockProofSizeProvider::new(100); + let mut tracker = RecordingProofSizeProvider::new(mock.clone()); + + // Record some estimations in the initial transaction + tracker.estimate_encoded_size(); + tracker.estimate_encoded_size(); + assert_eq!(tracker.recorded_estimations(), vec![100, 100]); + + // Start a host transaction + tracker.start_transaction(true); + mock.set_size(200); + tracker.estimate_encoded_size(); + + // Should have 3 estimations total + assert_eq!(tracker.recorded_estimations(), vec![100, 100, 200]); + + // Commit the host transaction + tracker.commit_transaction(true); + + // All estimations should still be there + assert_eq!(tracker.recorded_estimations(), vec![100, 100, 200]); + + // Add more estimations + mock.set_size(300); + tracker.estimate_encoded_size(); + assert_eq!(tracker.recorded_estimations(), vec![100, 100, 200, 300]); + } + + #[test] + fn recording_proof_size_provider_host_transaction_rollback() { + let mock = MockProofSizeProvider::new(100); + let mut tracker = RecordingProofSizeProvider::new(mock.clone()); + + // Record some estimations in the initial transaction + tracker.estimate_encoded_size(); + assert_eq!(tracker.recorded_estimations(), vec![100]); + + // Start a host transaction + tracker.start_transaction(true); + mock.set_size(200); + tracker.estimate_encoded_size(); + tracker.estimate_encoded_size(); + + // Should have 3 estimations total + assert_eq!(tracker.recorded_estimations(), vec![100, 200, 200]); + + // Rollback the host transaction + tracker.rollback_transaction(true); + + // Should only have the original estimation + assert_eq!(tracker.recorded_estimations(), vec![100]); + } + + #[test] + fn recording_proof_size_provider_runtime_transactions_ignored() { + let mock = MockProofSizeProvider::new(100); + let mut tracker = RecordingProofSizeProvider::new(mock.clone()); + + // Record initial estimation + tracker.estimate_encoded_size(); + assert_eq!(tracker.recorded_estimations(), vec![100]); + + // Start a runtime transaction (is_host = false) + tracker.start_transaction(false); + mock.set_size(200); + tracker.estimate_encoded_size(); + + // Should have both estimations + assert_eq!(tracker.recorded_estimations(), vec![100, 200]); + + // Commit runtime transaction - should not affect recording + tracker.commit_transaction(false); + assert_eq!(tracker.recorded_estimations(), vec![100, 200]); + + // Rollback runtime transaction - should not affect recording + tracker.rollback_transaction(false); + assert_eq!(tracker.recorded_estimations(), vec![100, 200]); + } + + #[test] + fn recording_proof_size_provider_nested_host_transactions() { + let mock = MockProofSizeProvider::new(100); + let mut tracker = RecordingProofSizeProvider::new(mock.clone()); + + // Initial estimation + tracker.estimate_encoded_size(); + assert_eq!(tracker.recorded_estimations(), vec![100]); + + // Start first host transaction + tracker.start_transaction(true); + mock.set_size(200); + tracker.estimate_encoded_size(); + + // Start nested host transaction + tracker.start_transaction(true); + mock.set_size(300); + tracker.estimate_encoded_size(); + + assert_eq!(tracker.recorded_estimations(), vec![100, 200, 300]); + + // Commit nested transaction + tracker.commit_transaction(true); + assert_eq!(tracker.recorded_estimations(), vec![100, 200, 300]); + + // Commit outer transaction + tracker.commit_transaction(true); + assert_eq!(tracker.recorded_estimations(), vec![100, 200, 300]); + } + + #[test] + fn recording_proof_size_provider_nested_host_transaction_rollback() { + let mock = MockProofSizeProvider::new(100); + let mut tracker = RecordingProofSizeProvider::new(mock.clone()); + + // Initial estimation + tracker.estimate_encoded_size(); + + // Start first host transaction + tracker.start_transaction(true); + mock.set_size(200); + tracker.estimate_encoded_size(); + + // Start nested host transaction + tracker.start_transaction(true); + mock.set_size(300); + tracker.estimate_encoded_size(); + + assert_eq!(tracker.recorded_estimations(), vec![100, 200, 300]); + + // Rollback nested transaction + tracker.rollback_transaction(true); + assert_eq!(tracker.recorded_estimations(), vec![100, 200]); + + // Rollback outer transaction + tracker.rollback_transaction(true); + assert_eq!(tracker.recorded_estimations(), vec![100]); + } + + #[test] + fn recording_proof_size_provider_rollback_on_base_transaction_does_nothing() { + let mock = MockProofSizeProvider::new(100); + let mut tracker = RecordingProofSizeProvider::new(mock.clone()); + + // Record some estimations + tracker.estimate_encoded_size(); + tracker.estimate_encoded_size(); + assert_eq!(tracker.recorded_estimations(), vec![100, 100]); + + // Try to rollback the base transaction - should do nothing + tracker.rollback_transaction(true); + assert_eq!(tracker.recorded_estimations(), vec![100, 100]); + } + + #[test] + fn recorded_proof_size_estimations_struct() { + let estimations = vec![100, 200, 300]; + let recorded = RecordedProofSizeEstimations(estimations.into()); + let expected: VecDeque = vec![100, 200, 300].into(); + assert_eq!(recorded.0, expected); + } + + #[test] + fn replay_proof_size_provider_basic_functionality() { + let estimations = vec![100, 200, 300, 150]; + let recorded = RecordedProofSizeEstimations(estimations.into()); + let replay = ReplayProofSizeProvider::from_recorded(recorded); + + // Should replay estimations in order + assert_eq!(replay.estimate_encoded_size(), 100); + assert_eq!(replay.estimate_encoded_size(), 200); + assert_eq!(replay.estimate_encoded_size(), 300); + assert_eq!(replay.estimate_encoded_size(), 150); + } + + #[test] + fn replay_proof_size_provider_exhausted_returns_zero() { + let estimations = vec![100, 200]; + let recorded = RecordedProofSizeEstimations(estimations.into()); + let replay = ReplayProofSizeProvider::from_recorded(recorded); + + // Consume all estimations + assert_eq!(replay.estimate_encoded_size(), 100); + assert_eq!(replay.estimate_encoded_size(), 200); + + // Should return 0 when exhausted + assert_eq!(replay.estimate_encoded_size(), 0); + assert_eq!(replay.estimate_encoded_size(), 0); + } + + #[test] + fn replay_proof_size_provider_empty_returns_zero() { + let recorded = RecordedProofSizeEstimations(VecDeque::new()); + let replay = ReplayProofSizeProvider::from_recorded(recorded); + + // Should return 0 for empty estimations + assert_eq!(replay.estimate_encoded_size(), 0); + assert_eq!(replay.estimate_encoded_size(), 0); + } + + #[test] + fn replay_proof_size_provider_from_trait() { + let estimations = vec![42, 84]; + let recorded = RecordedProofSizeEstimations(estimations.into()); + let replay: ReplayProofSizeProvider = recorded.into(); + + assert_eq!(replay.estimate_encoded_size(), 42); + assert_eq!(replay.estimate_encoded_size(), 84); + assert_eq!(replay.estimate_encoded_size(), 0); + } + + #[test] + fn record_and_replay_integration() { + let mock = MockProofSizeProvider::new(100); + let recorder = RecordingProofSizeProvider::new(mock.clone()); + + // Record some estimations + // recorder.estimate_encoded_size(); + mock.set_size(200); + recorder.estimate_encoded_size(); + mock.set_size(300); + recorder.estimate_encoded_size(); + + // Get recorded estimations + let recorded_estimations = recorder.recorded_estimations(); + assert_eq!(recorded_estimations, vec![100, 200, 300]); + + // Create replay provider from recorded estimations + let recorded = RecordedProofSizeEstimations(recorded_estimations.into()); + let replay = ReplayProofSizeProvider::from_recorded(recorded); + + // Replay should return the same sequence + assert_eq!(replay.estimate_encoded_size(), 100); + assert_eq!(replay.estimate_encoded_size(), 200); + assert_eq!(replay.estimate_encoded_size(), 300); + assert_eq!(replay.estimate_encoded_size(), 0); + } + + #[test] + fn replay_proof_size_provider_single_value() { + let estimations = vec![42]; + let recorded = RecordedProofSizeEstimations(estimations.into()); + let replay = ReplayProofSizeProvider::from_recorded(recorded); + + // Should return the single value then default to 0 + assert_eq!(replay.estimate_encoded_size(), 42); + assert_eq!(replay.estimate_encoded_size(), 0); + } +} From e0f9c457523b9611db466bf53f4a3fe5bc30b233 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 24 Sep 2025 22:26:48 +0200 Subject: [PATCH 121/312] Fixes --- .../assets/asset-hub-rococo/src/lib.rs | 2 -- .../assets/asset-hub-westend/src/lib.rs | 2 -- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 19 +++++++++---------- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 19 +++++++++---------- .../collectives-westend/src/lib.rs | 2 -- .../glutton/glutton-westend/src/lib.rs | 2 -- .../runtimes/people/people-rococo/src/lib.rs | 2 -- .../runtimes/testing/penpal/src/lib.rs | 2 -- .../testing/rococo-parachain/src/lib.rs | 2 -- .../testing/yet-another-parachain/src/lib.rs | 2 -- polkadot/runtime/test-runtime/src/lib.rs | 2 -- polkadot/runtime/westend/src/lib.rs | 2 -- 12 files changed, 18 insertions(+), 40 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index abbc34eeb1bda..d5507aebc0353 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1413,8 +1413,6 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } - - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 8b35992510d09..f707e83be1ac5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1780,8 +1780,6 @@ pallet_revive::impl_runtime_apis_plus_revive!( ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } - - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 878742ab13de5..53d72f9b877ff 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -787,7 +787,6 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } - } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { @@ -1579,6 +1578,15 @@ impl_runtime_apis! { ParachainInfo::parachain_id() } } + + impl cumulus_primitives_core::SlotSchedule for Runtime { + fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::BlockInterval { + cumulus_primitives_core::BlockInterval { + number_of_blocks: 1, + block_time: core::time::Duration::from_secs(2), + } + } + } } #[cfg(test)] @@ -1638,13 +1646,4 @@ mod tests { } }); } - - impl cumulus_primitives_core::SlotSchedule for Runtime { - fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::BlockInterval { - cumulus_primitives_core::BlockInterval { - number_of_blocks: 1, - block_time: core::time::Duration::from_secs(2), - } - } - } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 95c8ed75de663..1b9fc0b1baa82 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -738,7 +738,6 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } - } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { @@ -1441,6 +1440,15 @@ impl_runtime_apis! { ParachainInfo::parachain_id() } } + + impl cumulus_primitives_core::SlotSchedule for Runtime { + fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::BlockInterval { + cumulus_primitives_core::BlockInterval { + number_of_blocks: 1, + block_time: core::time::Duration::from_secs(2), + } + } + } } cumulus_pallet_parachain_system::register_validate_block! { @@ -1504,13 +1512,4 @@ mod tests { } }); } - - impl cumulus_primitives_core::SlotSchedule for Runtime { - fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::BlockInterval { - cumulus_primitives_core::BlockInterval { - number_of_blocks: 1, - block_time: core::time::Duration::from_secs(2), - } - } - } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 32fbd05828195..a4983f96e55fb 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -926,8 +926,6 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } - - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 24114ab491cf2..863c7a035aa79 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -399,8 +399,6 @@ impl_runtime_apis! { fn check_inherents(block: Block, data: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } - - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 1210791cc1beb..7a58b17624040 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -721,8 +721,6 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } - - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 2d36ba159da96..5bb8ac8e2792f 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -963,8 +963,6 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } - - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 12c7b684faeea..4eab1261a0780 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -755,8 +755,6 @@ impl_runtime_apis! { fn check_inherents(block: Block, data: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } - - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs index ec4ee9d873167..40be27f4aee4a 100644 --- a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs @@ -549,8 +549,6 @@ impl_runtime_apis! { fn check_inherents(block: Block, data: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } - - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 4842bb159b76b..3a3b2c7b3e6af 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -938,8 +938,6 @@ sp_api::impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } - - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 7192ef67a64d6..4d3eeab7d0f4f 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2229,8 +2229,6 @@ sp_api::impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } - - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { From b00a95a64817023e0c43159c7c710aed75dca5d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 26 Sep 2025 21:39:08 +0200 Subject: [PATCH 122/312] Fixes --- cumulus/parachains/runtimes/people/people-westend/src/lib.rs | 2 -- polkadot/runtime/rococo/src/lib.rs | 2 -- 2 files changed, 4 deletions(-) diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 1160454cd7de8..f50a406f91f71 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -723,8 +723,6 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } - - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 85300af2d3833..586b989075ce1 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1993,8 +1993,6 @@ sp_api::impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } - - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { From 593226775f5f1338ec5d55fa60caf18b60eb0599 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 26 Sep 2025 21:48:15 +0200 Subject: [PATCH 123/312] Forward extensions to block builder --- Cargo.lock | 33 +--- Cargo.toml | 2 - cumulus/client/consensus/aura/Cargo.toml | 2 +- cumulus/client/consensus/aura/src/collator.rs | 124 +++++++++---- .../consensus/aura/src/collators/basic.rs | 6 +- .../consensus/aura/src/collators/lookahead.rs | 12 +- .../slot_based/block_builder_task.rs | 40 ++-- .../aura/src/collators/slot_based/mod.rs | 6 +- cumulus/client/consensus/aura/src/lib.rs | 24 +-- cumulus/client/consensus/proposer/Cargo.toml | 31 ---- cumulus/client/consensus/proposer/src/lib.rs | 129 ------------- .../client/consensus/relay-chain/src/lib.rs | 48 +++-- cumulus/polkadot-omni-node/lib/Cargo.toml | 1 - .../polkadot-omni-node/lib/src/nodes/aura.rs | 8 +- cumulus/test/service/Cargo.toml | 1 - cumulus/test/service/src/lib.rs | 2 +- substrate/client/basic-authorship/Cargo.toml | 1 + .../basic-authorship/src/basic_authorship.rs | 171 ++++-------------- substrate/client/basic-authorship/src/lib.rs | 3 +- substrate/client/block-builder/Cargo.toml | 2 +- substrate/client/block-builder/src/lib.rs | 86 ++++----- substrate/client/consensus/aura/src/lib.rs | 31 ++-- substrate/client/consensus/babe/src/tests.rs | 23 +-- .../client/consensus/manual-seal/Cargo.toml | 2 + .../consensus/manual-seal/src/consensus.rs | 6 +- .../manual-seal/src/consensus/aura.rs | 6 +- .../manual-seal/src/consensus/babe.rs | 6 +- .../client/consensus/manual-seal/src/lib.rs | 34 ++-- .../consensus/manual-seal/src/seal_block.rs | 47 +++-- substrate/client/consensus/pow/src/lib.rs | 49 ++--- substrate/client/consensus/pow/src/worker.rs | 13 +- substrate/client/consensus/slots/Cargo.toml | 1 + substrate/client/consensus/slots/src/lib.rs | 57 +++--- substrate/client/consensus/slots/src/slots.rs | 28 ++- .../api/test/tests/runtime_calls.rs | 18 +- .../primitives/consensus/common/Cargo.toml | 3 + .../primitives/consensus/common/src/lib.rs | 128 ++++--------- .../externalities/src/extensions.rs | 26 +++ .../benchmarking-cli/src/extrinsic/bench.rs | 11 +- umbrella/Cargo.toml | 5 - umbrella/src/lib.rs | 3 - 41 files changed, 498 insertions(+), 731 deletions(-) delete mode 100644 cumulus/client/consensus/proposer/Cargo.toml delete mode 100644 cumulus/client/consensus/proposer/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 37c9f2a78fde0..46fc771b455cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4347,7 +4347,6 @@ dependencies = [ "async-trait", "cumulus-client-collator", "cumulus-client-consensus-common", - "cumulus-client-consensus-proposer", "cumulus-client-parachain-inherent", "cumulus-primitives-aura", "cumulus-primitives-core", @@ -4378,6 +4377,7 @@ dependencies = [ "sp-consensus", "sp-consensus-aura", "sp-core 28.0.0", + "sp-externalities 0.25.0", "sp-inherents", "sp-keyring", "sp-keystore", @@ -4427,25 +4427,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "cumulus-client-consensus-proposer" -version = "0.7.0" -dependencies = [ - "anyhow", - "async-trait", - "cumulus-primitives-parachain-inherent", - "sc-basic-authorship", - "sc-block-builder", - "sc-transaction-pool-api", - "sp-api", - "sp-blockchain", - "sp-consensus", - "sp-inherents", - "sp-runtime", - "sp-state-machine", - "thiserror 1.0.65", -] - [[package]] name = "cumulus-client-consensus-relay-chain" version = "0.7.0" @@ -5144,7 +5125,6 @@ dependencies = [ "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", - "cumulus-client-consensus-proposer", "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", "cumulus-client-service", @@ -15835,7 +15815,6 @@ dependencies = [ "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", - "cumulus-client-consensus-proposer", "cumulus-client-consensus-relay-chain", "cumulus-client-parachain-inherent", "cumulus-client-service", @@ -16211,7 +16190,6 @@ dependencies = [ "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", - "cumulus-client-consensus-proposer", "cumulus-client-consensus-relay-chain", "cumulus-client-network", "cumulus-client-parachain-inherent", @@ -19526,6 +19504,7 @@ dependencies = [ "sp-core 28.0.0", "sp-inherents", "sp-runtime", + "sp-state-machine", "sp-trie", "substrate-prometheus-endpoint", "substrate-test-runtime-client", @@ -19540,10 +19519,10 @@ dependencies = [ "sp-block-builder", "sp-blockchain", "sp-core 28.0.0", + "sp-externalities 0.25.0", "sp-inherents", "sp-runtime", "sp-state-machine", - "sp-trie", "substrate-test-runtime-client", ] @@ -19995,10 +19974,12 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-slots", "sp-core 28.0.0", + "sp-externalities 0.25.0", "sp-inherents", "sp-keystore", "sp-runtime", "sp-timestamp", + "sp-trie", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", @@ -20050,6 +20031,7 @@ dependencies = [ "sp-inherents", "sp-runtime", "sp-state-machine", + "sp-trie", "substrate-test-runtime-client", ] @@ -22758,9 +22740,12 @@ dependencies = [ "async-trait", "futures", "log", + "sp-api", + "sp-externalities 0.25.0", "sp-inherents", "sp-runtime", "sp-state-machine", + "sp-trie", "thiserror 1.0.65", ] diff --git a/Cargo.toml b/Cargo.toml index 63fc621a4756b..bc5bad755dc2b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -67,7 +67,6 @@ members = [ "cumulus/client/collator", "cumulus/client/consensus/aura", "cumulus/client/consensus/common", - "cumulus/client/consensus/proposer", "cumulus/client/consensus/relay-chain", "cumulus/client/network", "cumulus/client/parachain-inherent", @@ -740,7 +739,6 @@ cumulus-client-cli = { path = "cumulus/client/cli", default-features = false } cumulus-client-collator = { path = "cumulus/client/collator", default-features = false } cumulus-client-consensus-aura = { path = "cumulus/client/consensus/aura", default-features = false } cumulus-client-consensus-common = { path = "cumulus/client/consensus/common", default-features = false } -cumulus-client-consensus-proposer = { path = "cumulus/client/consensus/proposer", default-features = false } cumulus-client-consensus-relay-chain = { path = "cumulus/client/consensus/relay-chain", default-features = false } cumulus-client-network = { path = "cumulus/client/network", default-features = false } cumulus-client-parachain-inherent = { path = "cumulus/client/parachain-inherent", default-features = false } diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index 8dca303ffebdb..899286c9a97d7 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -36,6 +36,7 @@ sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-consensus-aura = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } @@ -46,7 +47,6 @@ sp-trie = { workspace = true, default-features = true } # Cumulus cumulus-client-collator = { workspace = true, default-features = true } cumulus-client-consensus-common = { workspace = true, default-features = true } -cumulus-client-consensus-proposer = { workspace = true, default-features = true } cumulus-client-parachain-inherent = { workspace = true, default-features = true } cumulus-primitives-aura = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index 9843ebd11ab50..e57337dec9c14 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -30,22 +30,24 @@ use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterfa use cumulus_client_consensus_common::{ self as consensus_common, ParachainBlockImportMarker, ParachainCandidate, }; -use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_client_parachain_inherent::{ParachainInherentData, ParachainInherentDataProvider}; use cumulus_primitives_core::{ relay_chain::Hash as PHash, DigestItem, ParachainBlockData, PersistedValidationData, }; use cumulus_relay_chain_interface::RelayChainInterface; +use sp_consensus::{Environment, ProposeArgs, Proposer}; use polkadot_node_primitives::{Collation, MaybeCompressedPoV}; use polkadot_primitives::{Header as PHeader, Id as ParaId}; +use sp_externalities::Extensions; +use sp_trie::proof_size_extension::ProofSizeExt; use crate::collators::RelayParentData; use futures::prelude::*; use sc_client_api::BackendTransaction; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction}; use sc_consensus_aura::standalone as aura_internal; -use sp_api::{ProofRecorderIgnoredNodes, ProvideRuntimeApi, StorageProof}; +use sp_api::{ProofRecorder, ProofRecorderIgnoredNodes, ProvideRuntimeApi, StorageProof}; use sp_application_crypto::AppPublic; use sp_consensus::BlockOrigin; use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; @@ -61,7 +63,7 @@ use sp_timestamp::Timestamp; use std::{error::Error, time::Duration}; /// Parameters for instantiating a [`Collator`]. -pub struct Params { +pub struct Params { /// A builder for inherent data builders. pub create_inherent_data_providers: CIDP, /// The block import handle. @@ -72,13 +74,38 @@ pub struct Params { pub keystore: KeystorePtr, /// The identifier of the parachain within the relay-chain. pub para_id: ParaId, - /// The block proposer used for building blocks. - pub proposer: Proposer, + /// The proposer used for building blocks. + pub proposer: PF, /// The collator service used for bundling proposals into collations and announcing /// to the network. pub collator_service: CS, } +/// Parameters for [`Collator::build_block_and_import`]. +pub struct BuildBlockAndImportParams<'a, Block: BlockT, P: Pair> { + /// The parent header to build on top of. + pub parent_header: &'a Block::Header, + /// The slot claim for this block. + pub slot_claim: &'a SlotClaim, + /// Additional pre-digest items to include. + pub additional_pre_digest: Vec, + /// Parachain-specific inherent data. + pub parachain_inherent_data: ParachainInherentData, + /// Other inherent data (timestamp, etc.). + pub extra_inherent_data: InherentData, + /// Maximum duration to spend on block proposal. + pub proposal_duration: Duration, + /// Maximum PoV size in bytes. + pub max_pov_size: usize, + /// Optional [`ProofRecorder`] to use. + /// + /// If not set, one will be initialized internally and [`ProofSizeExt`] will be + /// registered. + pub storage_proof_recorder: Option>, + /// Extra extensions to forward to the block production. + pub extra_extensions: Extensions, +} + /// Result of [`Collator::build_block_and_import`]. pub struct BuiltBlock { /// The block that was built. @@ -99,31 +126,31 @@ impl From> for ParachainCandidate { /// A utility struct for writing collation logic that makes use of Aura entirely /// or in part. See module docs for more details. -pub struct Collator { +pub struct Collator { create_inherent_data_providers: CIDP, block_import: BI, relay_client: RClient, keystore: KeystorePtr, para_id: ParaId, - proposer: Proposer, + proposer: PF, collator_service: CS, _marker: std::marker::PhantomData<(Block, Box)>, } -impl Collator +impl Collator where Block: BlockT, RClient: RelayChainInterface, CIDP: CreateInherentDataProviders + 'static, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - Proposer: ProposerInterface, + PF: Environment, CS: CollatorServiceInterface, P: Pair, P::Public: AppPublic + Member, P::Signature: TryFrom> + Member + Codec, { /// Instantiate a new instance of the `Aura` manager. - pub fn new(params: Params) -> Self { + pub fn new(params: Params) -> Self { Collator { create_inherent_data_providers: params.create_inherent_data_providers, block_import: params.block_import, @@ -203,40 +230,57 @@ where .await } - /// Build and import a parachain block on the given parent header, using the given slot claim. + /// Build and import a parachain block using the given parameters. pub async fn build_block_and_import( &mut self, - parent_header: &Block::Header, - slot_claim: &SlotClaim, - additional_pre_digest: impl Into>>, - inherent_data: (ParachainInherentData, InherentData), - proposal_duration: Duration, - max_pov_size: usize, - ignored_nodes_by_proof_recording: Option>, + mut params: BuildBlockAndImportParams<'_, Block, P>, ) -> Result>, Box> { - let mut digest = additional_pre_digest.into().unwrap_or_default(); - digest.push(slot_claim.pre_digest.clone()); + let mut digest = params.additional_pre_digest; + digest.push(params.slot_claim.pre_digest.clone()); - let maybe_proposal = self + // Create the proposer using the factory + let proposer = self .proposer - .propose( - &parent_header, - &inherent_data.0, - inherent_data.1, - Digest { logs: digest }, - proposal_duration, - Some(max_pov_size), - ignored_nodes_by_proof_recording, - ) + .init(¶ms.parent_header) .await .map_err(|e| Box::new(e) as Box)?; - let Some(proposal) = maybe_proposal else { return Ok(None) }; + // Prepare inherent data - merge parachain inherent data with other inherent data + let mut inherent_data_combined = params.extra_inherent_data; + params + .parachain_inherent_data + .provide_inherent_data(&mut inherent_data_combined) + .await + .map_err(|e| Box::new(e) as Box)?; + + let storage_proof_recorder = params.storage_proof_recorder.unwrap_or_default(); + + if !params.extra_extensions.is_registered(ProofSizeExt::type_id()) { + params + .extra_extensions + .register(ProofSizeExt::new(storage_proof_recorder.clone())); + } + + // Create proposal arguments + let propose_args = ProposeArgs { + inherent_data: inherent_data_combined, + inherent_digests: Digest { logs: digest }, + max_duration: params.proposal_duration, + block_size_limit: Some(params.max_pov_size), + extra_extensions: params.extra_extensions, + storage_proof_recorder: Some(storage_proof_recorder.clone()), + }; + + // Propose the block + let proposal = proposer + .propose(propose_args) + .await + .map_err(|e| Box::new(e) as Box)?; let sealed_importable = seal::<_, P>( proposal.block, proposal.storage_changes, - &slot_claim.author_pub, + ¶ms.slot_claim.author_pub, &self.keystore, ) .map_err(|e| e as Box)?; @@ -265,7 +309,9 @@ where .map_err(|e| Box::new(e) as Box) .await?; - Ok(Some(BuiltBlock { block, proof: proposal.proof, backend_transaction })) + let proof = storage_proof_recorder.drain_storage_proof(); + + Ok(Some(BuiltBlock { block, proof, backend_transaction })) } /// Propose, seal, import a block and packaging it into a collation. @@ -286,15 +332,17 @@ where max_pov_size: usize, ) -> Result)>, Box> { let maybe_candidate = self - .build_block_and_import( + .build_block_and_import(BuildBlockAndImportParams { parent_header, slot_claim, - additional_pre_digest, - inherent_data, + additional_pre_digest: additional_pre_digest.into().unwrap_or_default(), + parachain_inherent_data: inherent_data.0, + extra_inherent_data: inherent_data.1, proposal_duration, max_pov_size, - None, - ) + storage_proof_recorder: None, + extra_extensions: Default::default(), + }) .await?; let Some(candidate) = maybe_candidate else { return Ok(None) }; diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs index a66abf979d683..8613f1efd4cb8 100644 --- a/cumulus/client/consensus/aura/src/collators/basic.rs +++ b/cumulus/client/consensus/aura/src/collators/basic.rs @@ -28,9 +28,9 @@ use cumulus_client_collator::{ relay_chain_driven::CollationRequest, service::ServiceInterface as CollatorServiceInterface, }; use cumulus_client_consensus_common::ParachainBlockImportMarker; -use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_core::{relay_chain::BlockId as RBlockId, CollectCollationInfo}; use cumulus_relay_chain_interface::RelayChainInterface; +use sp_consensus::Environment; use polkadot_node_primitives::CollationResult; use polkadot_overseer::Handle as OverseerHandle; @@ -74,7 +74,7 @@ pub struct Params { pub overseer_handle: OverseerHandle, /// The length of slots in the relay chain. pub relay_chain_slot_duration: Duration, - /// The underlying block proposer this should call into. + /// The proposer for building blocks. pub proposer: Proposer, /// The generic collator service used to plug into this consensus engine. pub collator_service: CS, @@ -106,7 +106,7 @@ where CIDP: CreateInherentDataProviders + Send + 'static, CIDP::InherentDataProviders: Send, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - Proposer: ProposerInterface + Send + Sync + 'static, + Proposer: Environment + Clone + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, P: Pair, P::Public: AppPublic + Member + Codec, diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 8b7738ce230d5..6ad88a470c536 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -35,10 +35,10 @@ use codec::{Codec, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; -use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::AuraUnincludedSegmentApi; use cumulus_primitives_core::{CollectCollationInfo, PersistedValidationData}; use cumulus_relay_chain_interface::RelayChainInterface; +use sp_consensus::Environment; use polkadot_node_primitives::SubmitCollationParams; use polkadot_node_subsystem::messages::CollationGenerationMessage; @@ -60,7 +60,7 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; use std::{path::PathBuf, sync::Arc, time::Duration}; /// Parameters for [`run`]. -pub struct Params { +pub struct Params { /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. /// the timestamp, slot, and paras inherents should be omitted, as they are set by this /// collator. @@ -85,8 +85,8 @@ pub struct Params { pub overseer_handle: OverseerHandle, /// The length of slots in the relay chain. pub relay_chain_slot_duration: Duration, - /// The underlying block proposer this should call into. - pub proposer: Proposer, + /// The proposer for building blocks. + pub proposer: ProposerFactory, /// The generic collator service used to plug into this consensus engine. pub collator_service: CS, /// The amount of time to spend authoring each block. @@ -119,7 +119,7 @@ where CIDP: CreateInherentDataProviders + 'static, CIDP::InherentDataProviders: Send, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - Proposer: ProposerInterface + Send + Sync + 'static, + Proposer: Environment + Clone + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, P: Pair, @@ -171,7 +171,7 @@ where CIDP: CreateInherentDataProviders + 'static, CIDP::InherentDataProviders: Send, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - Proposer: ProposerInterface + Send + Sync + 'static, + Proposer: Environment + Clone + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, P: Pair, diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index a405688101f90..bf31fae62cf9d 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -17,7 +17,7 @@ use super::CollatorMessage; use crate::{ - collator::{self as collator_util, Collator, SlotClaim}, + collator::{self as collator_util, BuildBlockAndImportParams, Collator, SlotClaim}, collators::{ check_validation_code_or_log, slot_based::{ @@ -31,7 +31,6 @@ use crate::{ use codec::{Codec, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; -use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; use cumulus_primitives_core::{ extract_relay_parent, rpsr_digest, BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, @@ -45,16 +44,18 @@ use polkadot_primitives::{ use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; use sc_consensus::BlockImport; use sc_consensus_aura::SlotDuration; -use sp_api::{ProvideRuntimeApi, StorageProof}; +use sp_api::{ProofRecorder, ProvideRuntimeApi, StorageProof}; use sp_application_crypto::AppPublic; use sp_block_builder::BlockBuilder; use sp_blockchain::HeaderBackend; +use sp_consensus::Environment; use sp_consensus_aura::AuraApi; use sp_core::crypto::Pair; +use sp_externalities::Extensions; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT, Member, Zero}; -use sp_trie::recorder::IgnoredNodes; +use sp_trie::{proof_size_extension::ProofSizeExt, recorder::IgnoredNodes}; use std::{ collections::VecDeque, sync::Arc, @@ -91,7 +92,7 @@ pub struct BuilderTaskParams< pub keystore: KeystorePtr, /// The para's ID. pub para_id: ParaId, - /// The underlying block proposer this should call into. + /// The proposer for building blocks. pub proposer: Proposer, /// The generic collator service used to plug into this consensus engine. pub collator_service: CS, @@ -137,7 +138,7 @@ where CIDP: CreateInherentDataProviders + 'static, CIDP::InherentDataProviders: Send, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - Proposer: ProposerInterface + Send + Sync + 'static, + Proposer: Environment + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, CHP: consensus_common::ValidationCodeHashProvider + Send + Sync + 'static, P: Pair, @@ -438,7 +439,7 @@ where CIDP: CreateInherentDataProviders + 'static, CIDP::InherentDataProviders: Send, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - Proposer: ProposerInterface + Send + Sync + 'static, + Proposer: Environment + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, { let core_start = Instant::now(); @@ -535,23 +536,30 @@ where Ok(x) => x, }; + let storage_proof_recorder = + ProofRecorder::::with_ignored_nodes(ignored_nodes.clone()); + let mut extra_extensions = Extensions::default(); + extra_extensions.register(ProofSizeExt::new(storage_proof_recorder.clone())); + let Ok(Some(res)) = collator - .build_block_and_import( - &parent_header, + .build_block_and_import(BuildBlockAndImportParams { + parent_header: &parent_header, slot_claim, - Some(vec![ + additional_pre_digest: vec![ CumulusDigestItem::CoreInfo(core_info.clone()).to_digest_item(), CumulusDigestItem::BundleInfo(BundleInfo { index: block_index as u8, maybe_last: is_last, }) .to_digest_item(), - ]), - (parachain_inherent_data, other_inherent_data), - authoring_duration, - allowed_pov_size, - Some(ignored_nodes.clone()), - ) + ], + parachain_inherent_data, + extra_inherent_data: other_inherent_data, + proposal_duration: authoring_duration, + max_pov_size: allowed_pov_size, + storage_proof_recorder: storage_proof_recorder.into(), + extra_extensions, + }) .await else { tracing::error!(target: crate::LOG_TARGET, "Unable to build block at slot."); diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index 40ddcbe94d060..03ac5188de31c 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -71,7 +71,6 @@ pub use block_import::{SlotBasedBlockImport, SlotBasedBlockImportHandle}; use codec::Codec; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; -use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::AuraUnincludedSegmentApi; use cumulus_primitives_core::{RelayParentOffsetApi, SlotSchedule}; use cumulus_relay_chain_interface::RelayChainInterface; @@ -86,6 +85,7 @@ use sp_api::{ProvideRuntimeApi, StorageProof}; use sp_application_crypto::AppPublic; use sp_block_builder::BlockBuilder; use sp_blockchain::HeaderBackend; +use sp_consensus::Environment; use sp_consensus_aura::AuraApi; use sp_core::{crypto::Pair, traits::SpawnNamed}; use sp_inherents::CreateInherentDataProviders; @@ -124,7 +124,7 @@ pub struct Params + 'static, CIDP::InherentDataProviders: Send, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - Proposer: ProposerInterface + Send + Sync + 'static, + Proposer: Environment + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + Clone + 'static, CHP: consensus_common::ValidationCodeHashProvider + Send + Sync + 'static, P: Pair + 'static, diff --git a/cumulus/client/consensus/aura/src/lib.rs b/cumulus/client/consensus/aura/src/lib.rs index 422a593d91555..345c4ce141a58 100644 --- a/cumulus/client/consensus/aura/src/lib.rs +++ b/cumulus/client/consensus/aura/src/lib.rs @@ -36,10 +36,10 @@ use sc_client_api::{backend::AuxStore, BlockOf}; use sc_consensus::BlockImport; use sc_consensus_slots::{BackoffAuthoringBlocksStrategy, SimpleSlotWorker, SlotInfo}; use sc_telemetry::TelemetryHandle; -use sp_api::ProvideRuntimeApi; +use sp_api::{ProofRecorder, ProvideRuntimeApi}; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; -use sp_consensus::{EnableProofRecording, Environment, ProofRecording, Proposer, SyncOracle}; +use sp_consensus::{Environment, Proposer, SyncOracle}; use sp_consensus_aura::{AuraApi, SlotDuration}; use sp_core::crypto::Pair; use sp_inherents::CreateInherentDataProviders; @@ -144,12 +144,7 @@ where SO: SyncOracle + Send + Sync + Clone + 'static, BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static, PF: Environment + Send + Sync + 'static, - PF::Proposer: Proposer< - B, - Error = Error, - ProofRecording = EnableProofRecording, - Proof = ::Proof, - >, + PF::Proposer: Proposer, Error: std::error::Error + Send + From + 'static, P: Pair + 'static, P::Public: AppPublic + Member + Codec, @@ -218,7 +213,7 @@ where CIDP: CreateInherentDataProviders + Send + Sync + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send, W: SimpleSlotWorker + Send + Sync, - W::Proposer: Proposer::Proof>, + W::Proposer: Proposer, { async fn produce_candidate( &mut self, @@ -229,7 +224,9 @@ where let inherent_data_providers = self.inherent_data(parent.hash(), validation_data, relay_parent).await?; - let info = SlotInfo::new( + let storage_proof_recorder = ProofRecorder::::default(); + + let info = SlotInfo::with_storage_proof_recorder( inherent_data_providers.slot(), Box::new(inherent_data_providers), self.slot_duration.as_duration(), @@ -239,6 +236,7 @@ where // TODO: If we got benchmarking that includes the proof size, // we should be able to use the maximum pov size. Some((validation_data.max_pov_size / 2) as usize), + storage_proof_recorder.clone(), ); // With async backing this function will be called every relay chain block. @@ -253,9 +251,11 @@ where return None } - let res = self.aura_worker.lock().await.on_slot(info).await?; + let block = self.aura_worker.lock().await.on_slot(info).await?; + + let proof = storage_proof_recorder.drain_storage_proof(); - Some(ParachainCandidate { block: res.block, proof: res.storage_proof }) + Some(ParachainCandidate { block, proof }) } } diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml deleted file mode 100644 index b98c77b3f891b..0000000000000 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ /dev/null @@ -1,31 +0,0 @@ -[package] -name = "cumulus-client-consensus-proposer" -description = "A Substrate `Proposer` for building parachain blocks" -version = "0.7.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage.workspace = true -repository.workspace = true - -[lints] -workspace = true - -[dependencies] -anyhow = { workspace = true, default-features = true } -async-trait = { workspace = true } -thiserror = { workspace = true } - -# Substrate -sc-basic-authorship = { workspace = true } -sc-block-builder = { workspace = true } -sc-transaction-pool-api = { workspace = true } -sp-api = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } -sp-inherents = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } -sp-state-machine = { workspace = true, default-features = true } - -# Cumulus -cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/proposer/src/lib.rs b/cumulus/client/consensus/proposer/src/lib.rs deleted file mode 100644 index 12f9260cb81d6..0000000000000 --- a/cumulus/client/consensus/proposer/src/lib.rs +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! The Cumulus [`ProposerInterface`] is an extension of the Substrate [`ProposerFactory`] -//! for creating new parachain blocks. -//! -//! This utility is designed to be composed within any collator consensus algorithm. - -use async_trait::async_trait; -use cumulus_primitives_parachain_inherent::ParachainInherentData; -use sc_basic_authorship::{ProposeArgs, ProposerFactory}; -use sc_block_builder::BlockBuilderApi; -use sc_transaction_pool_api::TransactionPool; -use sp_api::{ApiExt, CallApiAt, ProofRecorderIgnoredNodes, ProvideRuntimeApi}; -use sp_blockchain::HeaderBackend; - -use sp_consensus::{EnableProofRecording, Environment, Proposal}; -use sp_inherents::{InherentData, InherentDataProvider}; -use sp_runtime::{traits::Block as BlockT, Digest}; -use sp_state_machine::StorageProof; -use std::{fmt::Debug, time::Duration}; - -/// Errors that can occur when proposing a parachain block. -#[derive(thiserror::Error, Debug)] -#[error(transparent)] -pub struct Error { - inner: anyhow::Error, -} - -impl Error { - /// Create an error tied to the creation of a proposer. - pub fn proposer_creation(err: impl Into) -> Self { - Error { inner: err.into().context("Proposer Creation") } - } - - /// Create an error tied to the proposing logic itself. - pub fn proposing(err: impl Into) -> Self { - Error { inner: err.into().context("Proposing") } - } -} - -/// A type alias for easily referring to the type of a proposal produced by a specific -/// [`ProposerInterface`]. -pub type ProposalOf = Proposal; - -/// An interface for proposers. -#[async_trait] -pub trait ProposerInterface { - /// Propose a collation using the supplied `InherentData` and the provided - /// `ParachainInherentData`. - /// - /// Also specify any required inherent digests, the maximum proposal duration, - /// and the block size limit in bytes. See the documentation on - /// [`sp_consensus::Proposer::propose`] for more details on how to interpret these parameters. - /// - /// The `InherentData` and `Digest` are left deliberately general in order to accommodate - /// all possible collator selection algorithms or inherent creation mechanisms, - /// while the `ParachainInherentData` is made explicit so it will be constructed appropriately. - /// - /// If the `InherentData` passed into this function already has a `ParachainInherentData`, - /// this should throw an error. - async fn propose( - &mut self, - parent_header: &Block::Header, - paras_inherent_data: &ParachainInherentData, - other_inherent_data: InherentData, - inherent_digests: Digest, - max_duration: Duration, - block_size_limit: Option, - ignored_nodes_by_proof_recording: Option>, - ) -> Result>, Error>; -} - -#[async_trait] -impl ProposerInterface for ProposerFactory -where - A: TransactionPool + 'static, - C: HeaderBackend + ProvideRuntimeApi + CallApiAt + Send + Sync + 'static, - C::Api: ApiExt + BlockBuilderApi, - Block: sp_runtime::traits::Block, -{ - async fn propose( - &mut self, - parent_header: &Block::Header, - paras_inherent_data: &ParachainInherentData, - other_inherent_data: InherentData, - inherent_digests: Digest, - max_duration: Duration, - block_size_limit: Option, - ignored_nodes_by_proof_recording: Option>, - ) -> Result>, Error> { - let proposer = self - .init(parent_header) - .await - .map_err(|e| Error::proposer_creation(anyhow::Error::new(e)))?; - - let mut inherent_data = other_inherent_data; - paras_inherent_data - .provide_inherent_data(&mut inherent_data) - .await - .map_err(|e| Error::proposing(anyhow::Error::new(e)))?; - - proposer - .propose_block(ProposeArgs { - inherent_data, - inherent_digests, - max_duration, - block_size_limit, - ignored_nodes_by_proof_recording, - }) - .await - .map(Some) - .map_err(|e| Error::proposing(anyhow::Error::new(e)).into()) - } -} diff --git a/cumulus/client/consensus/relay-chain/src/lib.rs b/cumulus/client/consensus/relay-chain/src/lib.rs index 8a23edfb9449e..f9ba897333967 100644 --- a/cumulus/client/consensus/relay-chain/src/lib.rs +++ b/cumulus/client/consensus/relay-chain/src/lib.rs @@ -42,9 +42,8 @@ use cumulus_primitives_core::{relay_chain::Hash as PHash, ParaId, PersistedValid use cumulus_relay_chain_interface::RelayChainInterface; use sc_consensus::{BlockImport, BlockImportParams}; -use sp_consensus::{ - BlockOrigin, EnableProofRecording, Environment, ProofRecording, Proposal, Proposer, -}; +use sp_api::ProofRecorder; +use sp_consensus::{BlockOrigin, Environment, Proposal, ProposeArgs, Proposer}; use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; @@ -149,11 +148,7 @@ where RCInterface: RelayChainInterface + Clone, BI: BlockImport + ParachainBlockImportMarker + Send + Sync, PF: Environment + Send + Sync, - PF::Proposer: Proposer< - B, - ProofRecording = EnableProofRecording, - Proof = ::Proof, - >, + PF::Proposer: Proposer, CIDP: CreateInherentDataProviders, { async fn produce_candidate( @@ -174,18 +169,23 @@ where let inherent_data = self.inherent_data(parent.hash(), validation_data, relay_parent).await?; - let Proposal { block, storage_changes, proof } = proposer - .propose( - inherent_data, - Default::default(), - // TODO: Fix this. - Duration::from_millis(500), - // Set the block limit to 50% of the maximum PoV size. - // - // TODO: If we got benchmarking that includes that encapsulates the proof size, - // we should be able to use the maximum pov size. - Some((validation_data.max_pov_size / 2) as usize), - ) + let storage_proof_recorder = ProofRecorder::::default(); + + let propose_args = ProposeArgs { + inherent_data, + // TODO: Fix this. + max_duration: Duration::from_millis(500), + // Set the block limit to 50% of the maximum PoV size. + // + // TODO: If we got benchmarking that includes that encapsulates the proof size, + // we should be able to use the maximum pov size. + block_size_limit: Some((validation_data.max_pov_size / 2) as usize), + storage_proof_recorder: Some(storage_proof_recorder.clone()), + ..Default::default() + }; + + let Proposal { block, storage_changes } = proposer + .propose(propose_args) .await .map_err(|e| tracing::error!(target: LOG_TARGET, error = ?e, "Proposing failed.")) .ok()?; @@ -209,6 +209,8 @@ where return None } + let proof = storage_proof_recorder.drain_storage_proof(); + Some(ParachainCandidate { block, proof }) } } @@ -237,11 +239,7 @@ pub fn build_relay_chain_consensus( where Block: BlockT, PF: Environment + Send + Sync + 'static, - PF::Proposer: Proposer< - Block, - ProofRecording = EnableProofRecording, - Proof = ::Proof, - >, + PF::Proposer: Proposer, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, CIDP: CreateInherentDataProviders + 'static, RCInterface: RelayChainInterface + Clone + 'static, diff --git a/cumulus/polkadot-omni-node/lib/Cargo.toml b/cumulus/polkadot-omni-node/lib/Cargo.toml index fa97d36c68572..216671802bccd 100644 --- a/cumulus/polkadot-omni-node/lib/Cargo.toml +++ b/cumulus/polkadot-omni-node/lib/Cargo.toml @@ -96,7 +96,6 @@ cumulus-client-cli = { workspace = true, default-features = true } cumulus-client-collator = { workspace = true, default-features = true } cumulus-client-consensus-aura = { workspace = true, default-features = true } cumulus-client-consensus-common = { workspace = true, default-features = true } -cumulus-client-consensus-proposer = { workspace = true, default-features = true } cumulus-client-consensus-relay-chain = { workspace = true, default-features = true } cumulus-client-parachain-inherent = { workspace = true, default-features = true } cumulus-client-service = { workspace = true, default-features = true } diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs index 71d065782a9db..92b4f34e86207 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs @@ -45,7 +45,6 @@ use cumulus_client_consensus_aura::{ }, equivocation_import_queue::Verifier as EquivocationVerifier, }; -use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; #[allow(deprecated)] use cumulus_client_service::CollatorSybilResistance; @@ -66,6 +65,7 @@ use sc_service::{Configuration, Error, TaskManager}; use sc_telemetry::TelemetryHandle; use sc_transaction_pool::TransactionPoolHandle; use sp_api::ProvideRuntimeApi; +use sp_consensus::{Environment, Proposer}; use sp_core::{traits::SpawnNamed, Pair}; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; @@ -282,7 +282,7 @@ where + Send + Sync + 'static, - Proposer: ProposerInterface + Send + Sync + 'static, + Proposer: Environment + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + Clone + 'static, Spawner: SpawnNamed, { @@ -335,7 +335,7 @@ where node_extra_args: NodeExtraArgs, block_import_handle: SlotBasedBlockImportHandle, ) -> Result<(), Error> { - let proposer = sc_basic_authorship::ProposerFactory::with_proof_recording( + let proposer = sc_basic_authorship::ProposerFactory::new( task_manager.spawn_handle(), client.clone(), transaction_pool, @@ -459,7 +459,7 @@ where node_extra_args: NodeExtraArgs, _: (), ) -> Result<(), Error> { - let proposer = sc_basic_authorship::ProposerFactory::with_proof_recording( + let proposer = sc_basic_authorship::ProposerFactory::new( task_manager.spawn_handle(), client.clone(), transaction_pool, diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 4a427f4ad6593..80adf90b46f42 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -74,7 +74,6 @@ cumulus-client-cli = { workspace = true, default-features = true } cumulus-client-collator = { workspace = true, default-features = true } cumulus-client-consensus-aura = { workspace = true, default-features = true } cumulus-client-consensus-common = { workspace = true, default-features = true } -cumulus-client-consensus-proposer = { workspace = true, default-features = true } cumulus-client-parachain-inherent = { workspace = true, default-features = true } cumulus-client-pov-recovery = { workspace = true, default-features = true } cumulus-client-service = { workspace = true, default-features = true } diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index afdd162a8efbd..4bc8cdd4bf5c2 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -433,7 +433,7 @@ where })?; if let Some(collator_key) = collator_key { - let proposer = sc_basic_authorship::ProposerFactory::with_proof_recording( + let proposer = sc_basic_authorship::ProposerFactory::new( task_manager.spawn_handle(), client.clone(), transaction_pool.clone(), diff --git a/substrate/client/basic-authorship/Cargo.toml b/substrate/client/basic-authorship/Cargo.toml index 59f0c3a645c58..d72e5bac42a94 100644 --- a/substrate/client/basic-authorship/Cargo.toml +++ b/substrate/client/basic-authorship/Cargo.toml @@ -30,6 +30,7 @@ sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } [dev-dependencies] diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index 779a765eb928a..1e87638c83eb4 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -32,17 +32,17 @@ use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder}; use sc_proposer_metrics::{EndProposingReason, MetricsLink as PrometheusMetrics}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool, TxInvalidityReportMap}; -use sp_api::{ApiExt, CallApiAt, ProofRecorder, ProvideRuntimeApi}; +use sp_api::{ApiExt, CallApiAt, ProvideRuntimeApi}; use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend}; -use sp_consensus::{DisableProofRecording, EnableProofRecording, ProofRecording, Proposal}; +use sp_consensus::{Proposal, ProposeArgs}; use sp_core::traits::SpawnNamed; use sp_inherents::InherentData; use sp_runtime::{ traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT}, - Digest, ExtrinsicInclusionMode, Percent, SaturatedConversion, + ExtrinsicInclusionMode, Percent, SaturatedConversion, }; -use sp_trie::recorder::IgnoredNodes; -use std::{marker::PhantomData, pin::Pin, sync::Arc, time}; +use sp_state_machine::StorageProof; +use std::{pin::Pin, sync::Arc, time}; /// Default block size limit in bytes used by [`Proposer`]. /// @@ -58,7 +58,7 @@ const DEFAULT_SOFT_DEADLINE_PERCENT: Percent = Percent::from_percent(50); const LOG_TARGET: &'static str = "basic-authorship"; /// [`Proposer`] factory. -pub struct ProposerFactory { +pub struct ProposerFactory { spawn_handle: Box, /// The client instance. client: Arc, @@ -82,11 +82,9 @@ pub struct ProposerFactory { telemetry: Option, /// When estimating the block size, should the proof be included? include_proof_in_block_size_estimation: bool, - /// phantom member to pin the `ProofRecording` type. - _phantom: PhantomData, } -impl Clone for ProposerFactory { +impl Clone for ProposerFactory { fn clone(&self) -> Self { Self { spawn_handle: self.spawn_handle.clone(), @@ -97,16 +95,12 @@ impl Clone for ProposerFactory { soft_deadline_percent: self.soft_deadline_percent, telemetry: self.telemetry.clone(), include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation, - _phantom: self._phantom, } } } -impl ProposerFactory { +impl ProposerFactory { /// Create a new proposer factory. - /// - /// Proof recording will be disabled when using proposers built by this instance to build - /// blocks. pub fn new( spawn_handle: impl SpawnNamed + 'static, client: Arc, @@ -123,35 +117,6 @@ impl ProposerFactory { telemetry, client, include_proof_in_block_size_estimation: false, - _phantom: PhantomData, - } - } -} - -impl ProposerFactory { - /// Create a new proposer factory with proof recording enabled. - /// - /// Each proposer created by this instance will record a proof while building a block. - /// - /// This will also include the proof into the estimation of the block size. This can be disabled - /// by calling [`ProposerFactory::disable_proof_in_block_size_estimation`]. - pub fn with_proof_recording( - spawn_handle: impl SpawnNamed + 'static, - client: Arc, - transaction_pool: Arc, - prometheus: Option<&PrometheusRegistry>, - telemetry: Option, - ) -> Self { - ProposerFactory { - client, - spawn_handle: Box::new(spawn_handle), - transaction_pool, - metrics: PrometheusMetrics::new(prometheus), - default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT, - soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT, - telemetry, - include_proof_in_block_size_estimation: true, - _phantom: PhantomData, } } @@ -159,9 +124,7 @@ impl ProposerFactory { pub fn disable_proof_in_block_size_estimation(&mut self) { self.include_proof_in_block_size_estimation = false; } -} -impl ProposerFactory { /// Set the default block size limit in bytes. /// /// The default value for the block size limit is: @@ -190,7 +153,7 @@ impl ProposerFactory { } } -impl ProposerFactory +impl ProposerFactory where A: TransactionPool + 'static, Block: BlockT, @@ -201,7 +164,7 @@ where &mut self, parent_header: &::Header, now: Box time::Instant + Send + Sync>, - ) -> Proposer { + ) -> Proposer { let parent_hash = parent_header.hash(); info!( @@ -210,7 +173,7 @@ where parent_header.number() ); - let proposer = Proposer::<_, _, _, PR> { + let proposer = Proposer::<_, _, _> { spawn_handle: self.spawn_handle.clone(), client: self.client.clone(), parent_hash, @@ -221,7 +184,6 @@ where default_block_size_limit: self.default_block_size_limit, soft_deadline_percent: self.soft_deadline_percent, telemetry: self.telemetry.clone(), - _phantom: PhantomData, include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation, }; @@ -229,16 +191,15 @@ where } } -impl sp_consensus::Environment for ProposerFactory +impl sp_consensus::Environment for ProposerFactory where A: TransactionPool + 'static, Block: BlockT, C: HeaderBackend + ProvideRuntimeApi + CallApiAt + Send + Sync + 'static, C::Api: ApiExt + BlockBuilderApi, - PR: ProofRecording, { type CreateProposer = future::Ready>; - type Proposer = Proposer; + type Proposer = Proposer; type Error = sp_blockchain::Error; fn init(&mut self, parent_header: &::Header) -> Self::CreateProposer { @@ -247,7 +208,7 @@ where } /// The proposer logic. -pub struct Proposer { +pub struct Proposer { spawn_handle: Box, client: Arc, parent_hash: Block::Hash, @@ -259,72 +220,20 @@ pub struct Proposer { include_proof_in_block_size_estimation: bool, soft_deadline_percent: Percent, telemetry: Option, - _phantom: PhantomData, } -impl sp_consensus::Proposer for Proposer +impl sp_consensus::Proposer for Proposer where A: TransactionPool + 'static, Block: BlockT, C: HeaderBackend + ProvideRuntimeApi + CallApiAt + Send + Sync + 'static, C::Api: ApiExt + BlockBuilderApi, - PR: ProofRecording, { - type Proposal = - Pin, Self::Error>> + Send>>; + type Proposal = Pin, Self::Error>> + Send>>; type Error = sp_blockchain::Error; - type ProofRecording = PR; - type Proof = PR::Proof; - fn propose( - self, - inherent_data: InherentData, - inherent_digests: Digest, - max_duration: time::Duration, - block_size_limit: Option, - ) -> Self::Proposal { - Self::propose_block( - self, - ProposeArgs { - inherent_data, - inherent_digests, - max_duration, - block_size_limit, - ignored_nodes_by_proof_recording: None, - }, - ) - .boxed() - } -} - -/// Arguments for [`Proposer::propose`]. -pub struct ProposeArgs { - /// The inherent data to pass to the block production. - pub inherent_data: InherentData, - /// The inherent digests to include in the produced block. - pub inherent_digests: Digest, - /// Max duration for building the block. - pub max_duration: time::Duration, - /// Optional size limit for the produced block. - /// - /// When set, block production ends before hitting this limit. The limit includes the storage - /// proof, when proof recording is activated. - pub block_size_limit: Option, - /// Trie nodes that should not be recorded. - /// - /// Only applies when proof recording is enabled. - pub ignored_nodes_by_proof_recording: Option>, -} - -impl Default for ProposeArgs { - fn default() -> Self { - Self { - inherent_data: Default::default(), - inherent_digests: Default::default(), - max_duration: Default::default(), - block_size_limit: None, - ignored_nodes_by_proof_recording: None, - } + fn propose(self, args: ProposeArgs) -> Self::Proposal { + Self::propose_block(self, args).boxed() } } @@ -333,19 +242,18 @@ impl Default for ProposeArgs { /// It allows us to increase block utilization. const MAX_SKIPPED_TRANSACTIONS: usize = 8; -impl Proposer +impl Proposer where A: TransactionPool + 'static, Block: BlockT, C: HeaderBackend + ProvideRuntimeApi + CallApiAt + Send + Sync + 'static, C::Api: ApiExt + BlockBuilderApi, - PR: ProofRecording, { /// Propose a new block. pub async fn propose_block( self, args: ProposeArgs, - ) -> Result, sp_blockchain::Error> { + ) -> Result, sp_blockchain::Error> { let (tx, rx) = oneshot::channel(); let spawn_handle = self.spawn_handle.clone(); @@ -370,26 +278,28 @@ where async fn propose_with( self, - ProposeArgs { + args: ProposeArgs, + ) -> Result, sp_blockchain::Error> { + let ProposeArgs { inherent_data, inherent_digests, max_duration, block_size_limit, - ignored_nodes_by_proof_recording, - }: ProposeArgs, - ) -> Result, sp_blockchain::Error> { + storage_proof_recorder, + extra_extensions, + } = args; // leave some time for evaluation and block finalization (10%) let deadline = (self.now)() + max_duration - max_duration / 10; let block_timer = time::Instant::now(); + // Determine if proof recording was requested + let proof_recording_enabled = storage_proof_recorder.is_some(); + let mut block_builder = BlockBuilderBuilder::new(&*self.client) .on_parent_block(self.parent_hash) .with_parent_block_number(self.parent_number) - .with_proof_recorder(PR::ENABLED.then(|| { - ProofRecorder::::with_ignored_nodes( - ignored_nodes_by_proof_recording.unwrap_or_default(), - ) - })) + .with_proof_recorder(storage_proof_recorder) .with_inherent_digests(inherent_digests) + .with_extra_extensions(extra_extensions) .build()?; self.apply_inherents(&mut block_builder, inherent_data)?; @@ -400,14 +310,11 @@ where self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?, ExtrinsicInclusionMode::OnlyInherents => EndProposingReason::TransactionForbidden, }; - let (block, storage_changes, proof) = block_builder.build()?.into_inner(); + let (block, storage_changes) = block_builder.build()?.into_inner(); let block_took = block_timer.elapsed(); - let proof = - PR::into_proof(proof).map_err(|e| sp_blockchain::Error::Application(Box::new(e)))?; - self.print_summary(&block, end_reason, block_took, block_timer.elapsed()); - Ok(Proposal { block, proof, storage_changes }) + Ok(Proposal { block, storage_changes }) } /// Apply all inherents to the block. @@ -1044,13 +951,8 @@ mod tests { // Without a block limit we should include all of them assert_eq!(block.extrinsics().len(), extrinsics_num); - let mut proposer_factory = ProposerFactory::with_proof_recording( - spawner.clone(), - client.clone(), - txpool.clone(), - None, - None, - ); + let mut proposer_factory = + ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap(); @@ -1060,7 +962,7 @@ mod tests { let builder = BlockBuilderBuilder::new(&*client) .on_parent_block(genesis_header.hash()) .with_parent_block_number(0) - .enable_proof_recording() + .with_proof_recorder(Some(Default::default())) .build() .unwrap(); builder.estimate_block_size(true) + extrinsics[0].encoded_size() @@ -1068,6 +970,7 @@ mod tests { let block = block_on(proposer.propose_block(ProposeArgs { max_duration: deadline, block_size_limit: Some(block_limit), + storage_proof_recorder: Some(Default::default()), ..Default::default() })) .map(|r| r.block) diff --git a/substrate/client/basic-authorship/src/lib.rs b/substrate/client/basic-authorship/src/lib.rs index b08b66e23aa13..dd347ca7d8591 100644 --- a/substrate/client/basic-authorship/src/lib.rs +++ b/substrate/client/basic-authorship/src/lib.rs @@ -74,5 +74,6 @@ mod basic_authorship; pub use crate::basic_authorship::{ - ProposeArgs, Proposer, ProposerFactory, DEFAULT_BLOCK_SIZE_LIMIT, + Proposer, ProposerFactory, DEFAULT_BLOCK_SIZE_LIMIT, }; +pub use sp_consensus::ProposeArgs; diff --git a/substrate/client/block-builder/Cargo.toml b/substrate/client/block-builder/Cargo.toml index 85bc395179e59..8afc30e89b6c8 100644 --- a/substrate/client/block-builder/Cargo.toml +++ b/substrate/client/block-builder/Cargo.toml @@ -21,9 +21,9 @@ sp-api = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-trie = { workspace = true, default-features = true } [dev-dependencies] sp-state-machine = { workspace = true, default-features = true } diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index 00b82382f5428..267a836ac3a49 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -34,6 +34,7 @@ use sp_api::{ }; use sp_blockchain::{ApplyExtrinsicFailed, Error, HeaderBackend}; use sp_core::traits::CallContext; +use sp_externalities::Extensions; use sp_runtime::{ legacy, traits::{Block as BlockT, Hash, HashingFor, Header as HeaderT, NumberFor, One}, @@ -42,7 +43,6 @@ use sp_runtime::{ use std::marker::PhantomData; pub use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_trie::proof_size_extension::ProofSizeExt; /// A builder for creating an instance of [`BlockBuilder`]. pub struct BlockBuilderBuilder<'a, B, C> { @@ -103,6 +103,7 @@ where inherent_digests: Default::default(), parent_block: self.parent_block, parent_number, + extra_extensions: Extensions::new(), }) } @@ -120,6 +121,7 @@ where inherent_digests: Default::default(), parent_block: self.parent_block, parent_number, + extra_extensions: Extensions::new(), } } } @@ -134,24 +136,16 @@ pub struct BlockBuilderBuilderStage2<'a, B: BlockT, C> { inherent_digests: Digest, parent_block: B::Hash, parent_number: NumberFor, + extra_extensions: Extensions, } impl<'a, B: BlockT, C> BlockBuilderBuilderStage2<'a, B, C> { - /// Enable proof recording for the block builder. - pub fn enable_proof_recording(mut self) -> Self { - self.proof_recorder = Some(Default::default()); - self - } - - /// Enable/disable proof recording for the block builder. - pub fn with_proof_recording(mut self, enable: bool) -> Self { - self.proof_recorder = enable.then(|| Default::default()); - self - } - /// Enable/disable proof recording for the block builder using the given proof recorder. - pub fn with_proof_recorder(mut self, proof_recorder: Option>) -> Self { - self.proof_recorder = proof_recorder; + pub fn with_proof_recorder( + mut self, + proof_recorder: impl Into>>, + ) -> Self { + self.proof_recorder = proof_recorder.into(); self } @@ -161,6 +155,12 @@ impl<'a, B: BlockT, C> BlockBuilderBuilderStage2<'a, B, C> { self } + /// Set the extra extensions to be registered with the runtime API during block building. + pub fn with_extra_extensions(mut self, extra_extensions: Extensions) -> Self { + self.extra_extensions = extra_extensions; + self + } + /// Create the instance of the [`BlockBuilder`]. pub fn build(self) -> Result, Error> where @@ -173,6 +173,7 @@ impl<'a, B: BlockT, C> BlockBuilderBuilderStage2<'a, B, C> { self.parent_number, self.proof_recorder, self.inherent_digests, + self.extra_extensions, ) } } @@ -180,22 +181,18 @@ impl<'a, B: BlockT, C> BlockBuilderBuilderStage2<'a, B, C> { /// A block that was build by [`BlockBuilder`] plus some additional data. /// /// This additional data includes the `storage_changes`, these changes can be applied to the -/// backend to get the state of the block. Furthermore an optional `proof` is included which -/// can be used to proof that the build block contains the expected data. The `proof` will -/// only be set when proof recording was activated. +/// backend to get the state of the block. pub struct BuiltBlock { /// The actual block that was build. pub block: Block, /// The changes that need to be applied to the backend to get the state of the build block. pub storage_changes: StorageChanges, - /// An optional proof that was recorded while building the block. - pub proof: Option, } impl BuiltBlock { /// Convert into the inner values. - pub fn into_inner(self) -> (Block, StorageChanges, Option) { - (self.block, self.storage_changes, self.proof) + pub fn into_inner(self) -> (Block, StorageChanges) { + (self.block, self.storage_changes) } } @@ -229,6 +226,7 @@ where parent_number: NumberFor, proof_recorder: Option>, inherent_digests: Digest, + extra_extensions: Extensions, ) -> Result { let header = <::Header as HeaderT>::new( parent_number + One::one(), @@ -243,10 +241,13 @@ where let mut api = call_api_at.runtime_api(); if let Some(recorder) = proof_recorder { - api.record_proof_with_recorder(recorder.clone()); - api.register_extension(ProofSizeExt::new(recorder)); + api.record_proof_with_recorder(recorder); } + extra_extensions.into_extensions().for_each(|e| { + api.register_extension(e); + }); + api.set_call_context(CallContext::Onchain); let core_version = api @@ -316,7 +317,7 @@ where /// Returns the build `Block`, the changes to the storage and an optional `StorageProof` /// supplied by `self.api`, combined as [`BuiltBlock`]. /// The storage proof will be `Some(_)` when proof recording was enabled. - pub fn build(mut self) -> Result, Error> { + pub fn build(self) -> Result, Error> { let header = self.api.finalize_block(self.parent_hash)?; debug_assert_eq!( @@ -327,8 +328,6 @@ where ), ); - let proof = self.api.extract_proof(); - let state = self.call_api_at.state_at(self.parent_hash)?; let storage_changes = self @@ -336,11 +335,7 @@ where .into_storage_changes(&state, self.parent_hash) .map_err(sp_blockchain::Error::StorageChanges)?; - Ok(BuiltBlock { - block: ::new(header, self.extrinsics), - storage_changes, - proof, - }) + Ok(BuiltBlock { block: ::new(header, self.extrinsics), storage_changes }) } /// Create the inherents for the block. @@ -382,7 +377,8 @@ mod tests { use sp_core::Blake2Hasher; use sp_state_machine::Backend; use substrate_test_runtime_client::{ - runtime::ExtrinsicBuilder, DefaultTestClientBuilderExt, TestClientBuilderExt, + runtime::{Block, ExtrinsicBuilder}, + DefaultTestClientBuilderExt, TestClientBuilderExt, }; #[test] @@ -392,16 +388,18 @@ mod tests { let genesis_hash = client.info().best_hash; + let storage_proof_recorder = ProofRecorder::::default(); + let block = BlockBuilderBuilder::new(&client) .on_parent_block(genesis_hash) .with_parent_block_number(0) - .enable_proof_recording() + .with_proof_recorder(storage_proof_recorder.clone()) .build() .unwrap() .build() .unwrap(); - let proof = block.proof.expect("Proof is build on request"); + let proof = storage_proof_recorder.drain_storage_proof(); let genesis_state_root = client.header(genesis_hash).unwrap().unwrap().state_root; let backend = @@ -420,10 +418,12 @@ mod tests { let client = builder.build(); let genesis_hash = client.info().best_hash; + let proof_recorder = ProofRecorder::::default(); + let mut block_builder = BlockBuilderBuilder::new(&client) .on_parent_block(genesis_hash) .with_parent_block_number(0) - .enable_proof_recording() + .with_proof_recorder(proof_recorder.clone()) .build() .unwrap(); @@ -431,12 +431,14 @@ mod tests { let block = block_builder.build().unwrap(); - let proof_with_panic = block.proof.expect("Proof is build on request").encoded_size(); + let proof_with_panic = proof_recorder.drain_storage_proof().encoded_size(); + + let proof_recorder = ProofRecorder::::default(); let mut block_builder = BlockBuilderBuilder::new(&client) .on_parent_block(genesis_hash) .with_parent_block_number(0) - .enable_proof_recording() + .with_proof_recorder(proof_recorder.clone()) .build() .unwrap(); @@ -444,18 +446,20 @@ mod tests { let block = block_builder.build().unwrap(); - let proof_without_panic = block.proof.expect("Proof is build on request").encoded_size(); + let proof_without_panic = proof_recorder.drain_storage_proof().encoded_size(); + + let proof_recorder = ProofRecorder::::default(); let block = BlockBuilderBuilder::new(&client) .on_parent_block(genesis_hash) .with_parent_block_number(0) - .enable_proof_recording() + .with_proof_recorder(proof_recorder.clone()) .build() .unwrap() .build() .unwrap(); - let proof_empty_block = block.proof.expect("Proof is build on request").encoded_size(); + let proof_empty_block = proof_recorder.drain_storage_proof().encoded_size(); // Ensure that we rolled back the changes of the panicked transaction. assert!(proof_without_panic > proof_with_panic); diff --git a/substrate/client/consensus/aura/src/lib.rs b/substrate/client/consensus/aura/src/lib.rs index 6e8b6828e2513..f0b68bc13e254 100644 --- a/substrate/client/consensus/aura/src/lib.rs +++ b/substrate/client/consensus/aura/src/lib.rs @@ -554,7 +554,7 @@ mod tests { use sc_keystore::LocalKeystore; use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::{key_types::AURA, AppCrypto}; - use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; + use sp_consensus::{NoNetwork as DummyOracle, Proposal, ProposeArgs}; use sp_consensus_aura::sr25519::AuthorityPair; use sp_inherents::InherentData; use sp_keyring::sr25519::Keyring; @@ -592,31 +592,21 @@ mod tests { impl Proposer for DummyProposer { type Error = Error; - type Proposal = future::Ready, Error>>; - type ProofRecording = DisableProofRecording; - type Proof = (); - - fn propose( - self, - _: InherentData, - digests: Digest, - _: Duration, - _: Option, - ) -> Self::Proposal { + type Proposal = future::Ready, Error>>; + + fn propose(self, args: ProposeArgs) -> Self::Proposal { let r = BlockBuilderBuilder::new(&*self.0) .on_parent_block(self.0.chain_info().best_hash) .fetch_parent_block_number(&*self.0) .unwrap() - .with_inherent_digests(digests) + .with_inherent_digests(args.inherent_digests) .build() .unwrap() .build(); - future::ready(r.map(|b| Proposal { - block: b.block, - proof: (), - storage_changes: b.storage_changes, - })) + future::ready( + r.map(|b| Proposal { block: b.block, storage_changes: b.storage_changes }), + ) } } @@ -864,7 +854,7 @@ mod tests { let head = client.expect_header(client.info().genesis_hash).unwrap(); - let res = worker + let block = worker .on_slot(SlotInfo { slot: 0.into(), ends_at: Instant::now() + Duration::from_secs(100), @@ -872,11 +862,12 @@ mod tests { duration: Duration::from_millis(1000), chain_head: head, block_size_limit: None, + storage_proof_recorder: None, }) .await .unwrap(); // The returned block should be imported and we should be able to get its header by now. - assert!(client.header(res.block.hash()).unwrap().is_some()); + assert!(client.header(block.hash()).unwrap().is_some()); } } diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs index e5f8c26a4488c..c116acf3e83e1 100644 --- a/substrate/client/consensus/babe/src/tests.rs +++ b/substrate/client/consensus/babe/src/tests.rs @@ -28,21 +28,20 @@ use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; use sc_network_test::{Block as TestBlock, *}; use sc_transaction_pool_api::RejectAllTxPool; use sp_application_crypto::key_types::BABE; -use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; +use sp_consensus::{NoNetwork as DummyOracle, Proposal, ProposeArgs}; use sp_consensus_babe::{ inherents::{BabeCreateInherentDataProviders, InherentDataProvider}, make_vrf_sign_data, AllowedSlots, AuthorityId, AuthorityPair, Slot, }; use sp_consensus_slots::SlotDuration; use sp_core::crypto::Pair; -use sp_inherents::InherentData; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore}; use sp_runtime::{ generic::{Digest, DigestItem}, traits::Block as BlockT, }; -use std::{cell::RefCell, task::Poll, time::Duration}; +use std::{cell::RefCell, task::Poll}; use substrate_test_runtime_client::DefaultTestClientBuilderExt; type Item = DigestItem; @@ -105,7 +104,7 @@ impl DummyProposer { fn propose_with( &mut self, pre_digests: Digest, - ) -> future::Ready, Error>> { + ) -> future::Ready, Error>> { let block_builder = BlockBuilderBuilder::new(&*self.factory.client) .on_parent_block(self.parent_hash) .fetch_parent_block_number(&*self.factory.client) @@ -122,24 +121,16 @@ impl DummyProposer { // mutate the block header according to the mutator. (self.factory.mutator)(&mut block.header, Stage::PreSeal); - future::ready(Ok(Proposal { block, proof: (), storage_changes: Default::default() })) + future::ready(Ok(Proposal { block, storage_changes: Default::default() })) } } impl Proposer for DummyProposer { type Error = Error; - type Proposal = future::Ready, Error>>; - type ProofRecording = DisableProofRecording; - type Proof = (); + type Proposal = future::Ready, Error>>; - fn propose( - mut self, - _: InherentData, - pre_digests: Digest, - _: Duration, - _: Option, - ) -> Self::Proposal { - self.propose_with(pre_digests) + fn propose(mut self, args: ProposeArgs) -> Self::Proposal { + self.propose_with(args.inherent_digests) } } diff --git a/substrate/client/consensus/manual-seal/Cargo.toml b/substrate/client/consensus/manual-seal/Cargo.toml index 4d232f7256cb7..c06cf8db740a8 100644 --- a/substrate/client/consensus/manual-seal/Cargo.toml +++ b/substrate/client/consensus/manual-seal/Cargo.toml @@ -39,9 +39,11 @@ sp-consensus-aura = { workspace = true, default-features = true } sp-consensus-babe = { workspace = true, default-features = true } sp-consensus-slots = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } sp-timestamp = { workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/consensus/manual-seal/src/consensus.rs b/substrate/client/consensus/manual-seal/src/consensus.rs index 2cc2b902b1ce9..93177624c4986 100644 --- a/substrate/client/consensus/manual-seal/src/consensus.rs +++ b/substrate/client/consensus/manual-seal/src/consensus.rs @@ -20,6 +20,7 @@ use super::Error; use sc_consensus::BlockImportParams; +use sp_api::StorageProof; use sp_inherents::InherentData; use sp_runtime::{traits::Block as BlockT, Digest}; @@ -30,9 +31,6 @@ pub mod timestamp; /// Consensus data provider, manual seal uses this trait object for authoring blocks valid /// for any runtime. pub trait ConsensusDataProvider: Send + Sync { - /// The proof type. - type Proof; - /// Attempt to create a consensus digest. fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result; @@ -42,6 +40,6 @@ pub trait ConsensusDataProvider: Send + Sync { parent: &B::Header, params: &mut BlockImportParams, inherents: &InherentData, - proof: Self::Proof, + proof: StorageProof, ) -> Result<(), Error>; } diff --git a/substrate/client/consensus/manual-seal/src/consensus/aura.rs b/substrate/client/consensus/manual-seal/src/consensus/aura.rs index 566a2266c701b..4a8ded77e9db1 100644 --- a/substrate/client/consensus/manual-seal/src/consensus/aura.rs +++ b/substrate/client/consensus/manual-seal/src/consensus/aura.rs @@ -22,7 +22,7 @@ use crate::{ConsensusDataProvider, Error}; use sc_client_api::{AuxStore, UsageProvider}; use sc_consensus::BlockImportParams; -use sp_api::ProvideRuntimeApi; +use sp_api::{ProvideRuntimeApi, StorageProof}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_consensus_aura::{ digests::CompatibleDigestItem, @@ -69,8 +69,6 @@ where C::Api: AuraApi, P: Send + Sync, { - type Proof = P; - fn create_digest( &self, _parent: &B::Header, @@ -93,7 +91,7 @@ where _parent: &B::Header, _params: &mut BlockImportParams, _inherents: &InherentData, - _proof: Self::Proof, + _proof: StorageProof, ) -> Result<(), Error> { Ok(()) } diff --git a/substrate/client/consensus/manual-seal/src/consensus/babe.rs b/substrate/client/consensus/manual-seal/src/consensus/babe.rs index a68e46f0134d6..156cae8a0c839 100644 --- a/substrate/client/consensus/manual-seal/src/consensus/babe.rs +++ b/substrate/client/consensus/manual-seal/src/consensus/babe.rs @@ -33,7 +33,7 @@ use sp_keystore::KeystorePtr; use std::{marker::PhantomData, sync::Arc}; use sc_consensus::{BlockImportParams, ForkChoiceStrategy, Verifier}; -use sp_api::ProvideRuntimeApi; +use sp_api::{ProvideRuntimeApi, StorageProof}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_consensus_babe::{ digests::{NextEpochDescriptor, PreDigest, SecondaryPlainPreDigest}, @@ -197,8 +197,6 @@ where C::Api: BabeApi, P: Send + Sync, { - type Proof = P; - fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result { let slot = inherents .babe_inherent_data()? @@ -265,7 +263,7 @@ where parent: &B::Header, params: &mut BlockImportParams, inherents: &InherentData, - _proof: Self::Proof, + _proof: StorageProof, ) -> Result<(), Error> { let slot = inherents .babe_inherent_data()? diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs index af9bcc8d56d6f..bba604ddfcfd4 100644 --- a/substrate/client/consensus/manual-seal/src/lib.rs +++ b/substrate/client/consensus/manual-seal/src/lib.rs @@ -87,7 +87,7 @@ where } /// Params required to start the manual sealing authorship task. -pub struct ManualSealParams, TP, SC, CS, CIDP, P> { +pub struct ManualSealParams, TP, SC, CS, CIDP> { /// Block import instance. pub block_import: BI, @@ -108,14 +108,14 @@ pub struct ManualSealParams, TP, SC, C pub select_chain: SC, /// Digest provider for inclusion in blocks. - pub consensus_data_provider: Option>>, + pub consensus_data_provider: Option>>, /// Something that can create the inherent data providers. pub create_inherent_data_providers: CIDP, } /// Params required to start the instant sealing authorship task. -pub struct InstantSealParams, TP, SC, CIDP, P> { +pub struct InstantSealParams, TP, SC, CIDP> { /// Block import instance for well. importing blocks. pub block_import: BI, @@ -132,7 +132,7 @@ pub struct InstantSealParams, TP, SC, pub select_chain: SC, /// Digest provider for inclusion in blocks. - pub consensus_data_provider: Option>>, + pub consensus_data_provider: Option>>, /// Something that can create the inherent data providers. pub create_inherent_data_providers: CIDP, @@ -151,7 +151,7 @@ pub struct DelayedFinalizeParams { } /// Creates the background authorship task for the manually seal engine. -pub async fn run_manual_seal( +pub async fn run_manual_seal( ManualSealParams { mut block_import, mut env, @@ -161,19 +161,18 @@ pub async fn run_manual_seal( select_chain, consensus_data_provider, create_inherent_data_providers, - }: ManualSealParams, + }: ManualSealParams, ) where B: BlockT + 'static, BI: BlockImport + Send + Sync + 'static, C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, - E::Proposer: Proposer, + E::Proposer: Proposer, CS: Stream::Hash>> + Unpin + 'static, SC: SelectChain + 'static, TP: TransactionPool, CIDP: CreateInherentDataProviders, - P: codec::Encode + Send + Sync + 'static, { while let Some(command) = commands_stream.next().await { match command { @@ -211,7 +210,7 @@ pub async fn run_manual_seal( /// runs the background authorship task for the instant seal engine. /// instant-seal creates a new block for every transaction imported into /// the transaction pool. -pub async fn run_instant_seal( +pub async fn run_instant_seal( InstantSealParams { block_import, env, @@ -220,18 +219,17 @@ pub async fn run_instant_seal( select_chain, consensus_data_provider, create_inherent_data_providers, - }: InstantSealParams, + }: InstantSealParams, ) where B: BlockT + 'static, BI: BlockImport + Send + Sync + 'static, C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, - E::Proposer: Proposer, + E::Proposer: Proposer, SC: SelectChain + 'static, TP: TransactionPool, CIDP: CreateInherentDataProviders, - P: codec::Encode + Send + Sync + 'static, { // instant-seal creates blocks as soon as transactions are imported // into the transaction pool. @@ -261,7 +259,7 @@ pub async fn run_instant_seal( /// /// This function will finalize the block immediately as well. If you don't /// want this behavior use `run_instant_seal` instead. -pub async fn run_instant_seal_and_finalize( +pub async fn run_instant_seal_and_finalize( InstantSealParams { block_import, env, @@ -270,18 +268,17 @@ pub async fn run_instant_seal_and_finalize( select_chain, consensus_data_provider, create_inherent_data_providers, - }: InstantSealParams, + }: InstantSealParams, ) where B: BlockT + 'static, BI: BlockImport + Send + Sync + 'static, C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, - E::Proposer: Proposer, + E::Proposer: Proposer, SC: SelectChain + 'static, TP: TransactionPool, CIDP: CreateInherentDataProviders, - P: codec::Encode + Send + Sync + 'static, { // Creates and finalizes blocks as soon as transactions are imported // into the transaction pool. @@ -350,6 +347,7 @@ mod tests { use sc_consensus::ImportedAux; use sc_transaction_pool::{BasicPool, FullChainApi, Options, RevalidationType}; use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionSource}; + use sp_api::StorageProof; use sp_inherents::InherentData; use sp_runtime::generic::{Digest, DigestItem}; use substrate_test_runtime_client::{ @@ -371,8 +369,6 @@ mod tests { B: BlockT, C: ProvideRuntimeApi + Send + Sync, { - type Proof = (); - fn create_digest( &self, _parent: &B::Header, @@ -386,7 +382,7 @@ mod tests { _parent: &B::Header, params: &mut BlockImportParams, _inherents: &InherentData, - _proof: Self::Proof, + _proof: StorageProof, ) -> Result<(), Error> { params.post_digests.push(DigestItem::Other(vec![1])); Ok(()) diff --git a/substrate/client/consensus/manual-seal/src/seal_block.rs b/substrate/client/consensus/manual-seal/src/seal_block.rs index 716e889ec0395..f0bb06bb9286b 100644 --- a/substrate/client/consensus/manual-seal/src/seal_block.rs +++ b/substrate/client/consensus/manual-seal/src/seal_block.rs @@ -19,21 +19,24 @@ //! Block sealing utilities use crate::{rpc, ConsensusDataProvider, CreatedBlock, Error}; +use codec::Encode; use futures::prelude::*; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction}; use sc_transaction_pool_api::TransactionPool; -use sp_api::ProvideRuntimeApi; +use sp_api::{ProofRecorder, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; -use sp_consensus::{self, BlockOrigin, Environment, Proposer, SelectChain}; +use sp_consensus::{self, BlockOrigin, Environment, ProposeArgs, Proposer, SelectChain}; +use sp_externalities::Extensions; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use sp_trie::proof_size_extension::ProofSizeExt; use std::{sync::Arc, time::Duration}; /// max duration for creating a proposal in secs pub const MAX_PROPOSAL_DURATION: u64 = 10; /// params for sealing a new block -pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP, CIDP, P> { +pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP, CIDP> { /// if true, empty blocks(without extrinsics) will be created. /// otherwise, will return Error::EmptyTransactionPool. pub create_empty: bool, @@ -52,7 +55,7 @@ pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP /// SelectChain object pub select_chain: &'a SC, /// Digest provider for inclusion in blocks. - pub consensus_data_provider: Option<&'a dyn ConsensusDataProvider>, + pub consensus_data_provider: Option<&'a dyn ConsensusDataProvider>, /// block import object pub block_import: &'a mut BI, /// Something that can create the inherent data providers. @@ -60,7 +63,7 @@ pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP } /// seals a new block with the given params -pub async fn seal_block( +pub async fn seal_block( SealBlockParams { create_empty, finalize, @@ -73,17 +76,16 @@ pub async fn seal_block( create_inherent_data_providers, consensus_data_provider: digest_provider, mut sender, - }: SealBlockParams<'_, B, BI, SC, C, E, TP, CIDP, P>, + }: SealBlockParams<'_, B, BI, SC, C, E, TP, CIDP>, ) where B: BlockT, BI: BlockImport + Send + Sync + 'static, C: HeaderBackend + ProvideRuntimeApi, E: Environment, - E::Proposer: Proposer, + E::Proposer: Proposer, TP: TransactionPool, SC: SelectChain, CIDP: CreateInherentDataProviders, - P: codec::Encode + Send + Sync + 'static, { let future = async { if pool.status().ready == 0 && !create_empty { @@ -109,19 +111,29 @@ pub async fn seal_block( let proposer = env.init(&parent).map_err(|err| Error::StringError(err.to_string())).await?; let inherents_len = inherent_data.len(); - let digest = if let Some(digest_provider) = digest_provider { + let inherent_digests = if let Some(digest_provider) = digest_provider { digest_provider.create_digest(&parent, &inherent_data)? } else { Default::default() }; + let storage_proof_recorder = ProofRecorder::::default(); + + let mut extra_extensions = Extensions::default(); + // Required by parachains + extra_extensions.register(ProofSizeExt::new(storage_proof_recorder.clone())); + + let propose_args = ProposeArgs { + inherent_data: inherent_data.clone(), + inherent_digests, + max_duration: Duration::from_secs(MAX_PROPOSAL_DURATION), + storage_proof_recorder: Some(storage_proof_recorder.clone()), + extra_extensions, + ..Default::default() + }; + let proposal = proposer - .propose( - inherent_data.clone(), - digest, - Duration::from_secs(MAX_PROPOSAL_DURATION), - None, - ) + .propose(propose_args) .map_err(|err| Error::StringError(err.to_string())) .await?; @@ -129,8 +141,9 @@ pub async fn seal_block( return Err(Error::EmptyTransactionPool) } + let proof = storage_proof_recorder.drain_storage_proof(); + let (header, body) = proposal.block.deconstruct(); - let proof = proposal.proof; let proof_size = proof.encoded_size(); let mut params = BlockImportParams::new(BlockOrigin::Own, header.clone()); params.body = Some(body); @@ -145,7 +158,7 @@ pub async fn seal_block( } // Make sure we return the same post-hash that will be calculated when importing the block - // This is important in case the digest_provider added any signature, seal, ect. + // This is important in case the digest_provider added any signature, seal, etc. let mut post_header = header.clone(); post_header.digest_mut().logs.extend(params.post_digests.iter().cloned()); diff --git a/substrate/client/consensus/pow/src/lib.rs b/substrate/client/consensus/pow/src/lib.rs index 0a32332426f47..cc68ab70a438d 100644 --- a/substrate/client/consensus/pow/src/lib.rs +++ b/substrate/client/consensus/pow/src/lib.rs @@ -56,7 +56,9 @@ use sc_consensus::{ use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::HeaderBackend; -use sp_consensus::{Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle}; +use sp_consensus::{ + Environment, Error as ConsensusError, ProposeArgs, Proposer, SelectChain, SyncOracle, +}; use sp_consensus_pow::{Seal, TotalDifficulty, POW_ENGINE_ID}; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; use sp_runtime::{ @@ -493,10 +495,7 @@ pub fn start_mining_worker( create_inherent_data_providers: CIDP, timeout: Duration, build_time: Duration, -) -> ( - MiningHandle>::Proof>, - impl Future, -) +) -> (MiningHandle, impl Future) where Block: BlockT, C: BlockchainEvents + 'static, @@ -589,9 +588,9 @@ where }, }; - let mut inherent_digest = Digest::default(); + let mut inherent_digests = Digest::default(); if let Some(pre_runtime) = &pre_runtime { - inherent_digest.push(DigestItem::PreRuntime(POW_ENGINE_ID, pre_runtime.to_vec())); + inherent_digests.push(DigestItem::PreRuntime(POW_ENGINE_ID, pre_runtime.to_vec())); } let pre_runtime = pre_runtime.clone(); @@ -609,21 +608,29 @@ where }, }; - let proposal = - match proposer.propose(inherent_data, inherent_digest, build_time, None).await { - Ok(x) => x, - Err(err) => { - warn!( - target: LOG_TARGET, - "Unable to propose new block for authoring. \ - Creating proposal failed: {}", - err, - ); - continue - }, - }; + let propose_args = ProposeArgs { + inherent_data, + inherent_digests, + max_duration: build_time, + block_size_limit: None, + storage_proof_recorder: None, + extra_extensions: Default::default(), + }; + + let proposal = match proposer.propose(propose_args).await { + Ok(x) => x, + Err(err) => { + warn!( + target: LOG_TARGET, + "Unable to propose new block for authoring. \ + Creating proposal failed: {}", + err, + ); + continue + }, + }; - let build = MiningBuild:: { + let build = MiningBuild:: { metadata: MiningMetadata { best_hash, pre_hash: proposal.block.header().hash(), diff --git a/substrate/client/consensus/pow/src/worker.rs b/substrate/client/consensus/pow/src/worker.rs index 73400136483a7..f5700d91caef3 100644 --- a/substrate/client/consensus/pow/src/worker.rs +++ b/substrate/client/consensus/pow/src/worker.rs @@ -56,11 +56,11 @@ pub struct MiningMetadata { } /// A build of mining, containing the metadata and the block proposal. -pub struct MiningBuild, Proof> { +pub struct MiningBuild> { /// Mining metadata. pub metadata: MiningMetadata, /// Mining proposal. - pub proposal: Proposal, + pub proposal: Proposal, } /// Version of the mining worker. @@ -72,16 +72,15 @@ pub struct MiningHandle< Block: BlockT, Algorithm: PowAlgorithm, L: sc_consensus::JustificationSyncLink, - Proof, > { version: Arc, algorithm: Arc, justification_sync_link: Arc, - build: Arc>>>, + build: Arc>>>, block_import: Arc>>, } -impl MiningHandle +impl MiningHandle where Block: BlockT, Algorithm: PowAlgorithm, @@ -112,7 +111,7 @@ where self.increment_version(); } - pub(crate) fn on_build(&self, value: MiningBuild) { + pub(crate) fn on_build(&self, value: MiningBuild) { let mut build = self.build.lock(); *build = Some(value); self.increment_version(); @@ -216,7 +215,7 @@ where } } -impl Clone for MiningHandle +impl Clone for MiningHandle where Block: BlockT, Algorithm: PowAlgorithm, diff --git a/substrate/client/consensus/slots/Cargo.toml b/substrate/client/consensus/slots/Cargo.toml index cc39575efe828..b286a64956962 100644 --- a/substrate/client/consensus/slots/Cargo.toml +++ b/substrate/client/consensus/slots/Cargo.toml @@ -33,6 +33,7 @@ sp-core = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } [dev-dependencies] substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/consensus/slots/src/lib.rs b/substrate/client/consensus/slots/src/lib.rs index 4f7e85541777a..a4e55d1460c62 100644 --- a/substrate/client/consensus/slots/src/lib.rs +++ b/substrate/client/consensus/slots/src/lib.rs @@ -38,10 +38,11 @@ use log::{debug, info, warn}; use sc_consensus::{BlockImport, JustificationSyncLink}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO, CONSENSUS_WARN}; use sp_arithmetic::traits::BaseArithmetic; -use sp_consensus::{Proposal, Proposer, SelectChain, SyncOracle}; +use sp_consensus::{Proposal, ProposeArgs, Proposer, SelectChain, SyncOracle}; use sp_consensus_slots::{Slot, SlotDuration}; use sp_inherents::CreateInherentDataProviders; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT}; +use sp_state_machine::StorageProof; use std::{ fmt::Debug, ops::Deref, @@ -55,26 +56,17 @@ const LOG_TARGET: &str = "slots"; /// See [`sp_state_machine::StorageChanges`] for more information. pub type StorageChanges = sp_state_machine::StorageChanges>; -/// The result of [`SlotWorker::on_slot`]. -#[derive(Debug, Clone)] -pub struct SlotResult { - /// The block that was built. - pub block: Block, - /// The storage proof that was recorded while building the block. - pub storage_proof: Proof, -} - /// A worker that should be invoked at every new slot. /// /// The implementation should not make any assumptions of the slot being bound to the time or /// similar. The only valid assumption is that the slot number is always increasing. #[async_trait::async_trait] -pub trait SlotWorker { +pub trait SlotWorker { /// Called when a new slot is triggered. /// /// Returns a future that resolves to a [`SlotResult`] iff a block was successfully built in /// the slot. Otherwise `None` is returned. - async fn on_slot(&mut self, slot_info: SlotInfo) -> Option>; + async fn on_slot(&mut self, slot_info: SlotInfo) -> Option; } /// A skeleton implementation for `SlotWorker` which tries to claim a slot at @@ -185,7 +177,7 @@ pub trait SimpleSlotWorker { claim: &Self::Claim, slot_info: SlotInfo, end_proposing_at: Instant, - ) -> Option>::Proof>> { + ) -> Option> { let slot = slot_info.slot; let telemetry = self.telemetry(); let log_target = self.logging_target(); @@ -200,13 +192,17 @@ pub trait SimpleSlotWorker { // deadline our production to 98% of the total time left for proposing. As we deadline // the proposing below to the same total time left, the 2% margin should be enough for // the result to be returned. + let propose_args = ProposeArgs { + inherent_data, + inherent_digests: sp_runtime::generic::Digest { logs }, + max_duration: proposing_remaining_duration.mul_f32(0.98), + block_size_limit: slot_info.block_size_limit, + storage_proof_recorder: slot_info.storage_proof_recorder, + ..Default::default() + }; + let proposing = proposer - .propose( - inherent_data, - sp_runtime::generic::Digest { logs }, - proposing_remaining_duration.mul_f32(0.98), - slot_info.block_size_limit, - ) + .propose(propose_args) .map_err(|e| sp_consensus::Error::ClientImport(e.to_string())); let proposal = match futures::future::select( @@ -283,10 +279,7 @@ pub trait SimpleSlotWorker { } /// Implements [`SlotWorker::on_slot`]. - async fn on_slot( - &mut self, - slot_info: SlotInfo, - ) -> Option>::Proof>> + async fn on_slot(&mut self, slot_info: SlotInfo) -> Option where Self: Sync, { @@ -377,7 +370,7 @@ pub trait SimpleSlotWorker { let proposal = self.propose(proposer, &claim, slot_info, end_proposing_at).await?; - let (block, storage_proof) = (proposal.block, proposal.proof); + let block = proposal.block; let (header, body) = block.deconstruct(); let header_num = *header.number(); let header_hash = header.hash(); @@ -444,7 +437,7 @@ pub trait SimpleSlotWorker { }, } - Some(SlotResult { block: B::new(header, body), storage_proof }) + Some(B::new(header, body)) } } @@ -456,13 +449,10 @@ pub trait SimpleSlotWorker { pub struct SimpleSlotWorkerToSlotWorker(pub T); #[async_trait::async_trait] -impl + Send + Sync, B: BlockT> - SlotWorker>::Proof> for SimpleSlotWorkerToSlotWorker +impl + Send + Sync, B: BlockT> SlotWorker + for SimpleSlotWorkerToSlotWorker { - async fn on_slot( - &mut self, - slot_info: SlotInfo, - ) -> Option>::Proof>> { + async fn on_slot(&mut self, slot_info: SlotInfo) -> Option { self.0.on_slot(slot_info).await } } @@ -503,7 +493,7 @@ impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G, H, I, J); /// /// Every time a new slot is triggered, `worker.on_slot` is called and the future it returns is /// polled until completion, unless we are major syncing. -pub async fn start_slot_worker( +pub async fn start_slot_worker( slot_duration: SlotDuration, client: C, mut worker: W, @@ -512,7 +502,7 @@ pub async fn start_slot_worker( ) where B: BlockT, C: SelectChain, - W: SlotWorker, + W: SlotWorker, SO: SyncOracle + Send, CIDP: CreateInherentDataProviders + Send + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send, @@ -829,6 +819,7 @@ mod test { Default::default(), ), block_size_limit: None, + storage_proof_recorder: None, } } diff --git a/substrate/client/consensus/slots/src/slots.rs b/substrate/client/consensus/slots/src/slots.rs index c0b412e8ad5b0..4a4d3ef10da6c 100644 --- a/substrate/client/consensus/slots/src/slots.rs +++ b/substrate/client/consensus/slots/src/slots.rs @@ -23,7 +23,8 @@ use super::{InherentDataProviderExt, Slot, LOG_TARGET}; use sp_consensus::{SelectChain, SyncOracle}; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT}; +use sp_trie::recorder::Recorder; use futures_timer::Delay; use std::time::{Duration, Instant}; @@ -62,6 +63,8 @@ pub struct SlotInfo { /// /// For more information see [`Proposer::propose`](sp_consensus::Proposer::propose). pub block_size_limit: Option, + /// Optional [`StorageProofRecorder`] to use when build the block. + pub storage_proof_recorder: Option>>, } impl SlotInfo { @@ -82,6 +85,29 @@ impl SlotInfo { chain_head, block_size_limit, ends_at: Instant::now() + time_until_next_slot(duration), + storage_proof_recorder: None, + } + } + + /// Create a new [`SlotInfo`] with a storage proof recorder. + /// + /// `ends_at` is calculated using `timestamp` and `duration`. + pub fn with_storage_proof_recorder( + slot: Slot, + create_inherent_data: Box, + duration: Duration, + chain_head: B::Header, + block_size_limit: Option, + storage_proof_recorder: Recorder>, + ) -> Self { + Self { + slot, + create_inherent_data, + duration, + chain_head, + block_size_limit, + ends_at: Instant::now() + time_until_next_slot(duration), + storage_proof_recorder: Some(storage_proof_recorder), } } } diff --git a/substrate/primitives/api/test/tests/runtime_calls.rs b/substrate/primitives/api/test/tests/runtime_calls.rs index 33cb72249a826..4f663dcd4f99a 100644 --- a/substrate/primitives/api/test/tests/runtime_calls.rs +++ b/substrate/primitives/api/test/tests/runtime_calls.rs @@ -24,7 +24,7 @@ use std::{ }; use sc_block_builder::BlockBuilderBuilder; -use sp_api::{ApiExt, Core, ProvideRuntimeApi}; +use sp_api::{ApiExt, Core, ProofRecorder, ProvideRuntimeApi}; use sp_externalities::{decl_extension, TransactionType}; use sp_runtime::{ traits::{HashingFor, Header as HeaderT}, @@ -111,21 +111,23 @@ fn record_proof_works() { } .into_unchecked_extrinsic(); + let storage_proof_recorder = ProofRecorder::::default(); + // Build the block and record proof let mut builder = BlockBuilderBuilder::new(&client) .on_parent_block(client.chain_info().best_hash) .with_parent_block_number(client.chain_info().best_number) - .enable_proof_recording() + .with_proof_recorder(storage_proof_recorder.clone()) .build() .unwrap(); builder.push(transaction.clone()).unwrap(); - let (block, _, proof) = builder.build().expect("Bake block").into_inner(); - let backend = create_proof_check_backend::>( - storage_root, - proof.expect("Proof was generated"), - ) - .expect("Creates proof backend."); + let (block, _) = builder.build().expect("Bake block").into_inner(); + + let proof = storage_proof_recorder.drain_storage_proof(); + + let backend = create_proof_check_backend::>(storage_root, proof) + .expect("Creates proof backend."); // Use the proof backend to execute `execute_block`. let mut overlay = Default::default(); diff --git a/substrate/primitives/consensus/common/Cargo.toml b/substrate/primitives/consensus/common/Cargo.toml index 376ef8c04c231..caf995ed98070 100644 --- a/substrate/primitives/consensus/common/Cargo.toml +++ b/substrate/primitives/consensus/common/Cargo.toml @@ -20,9 +20,12 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = { workspace = true } futures = { features = ["thread-pool"], workspace = true } log = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } thiserror = { workspace = true } [dev-dependencies] diff --git a/substrate/primitives/consensus/common/src/lib.rs b/substrate/primitives/consensus/common/src/lib.rs index 37636b34b03df..ceeca43b0a0ff 100644 --- a/substrate/primitives/consensus/common/src/lib.rs +++ b/substrate/primitives/consensus/common/src/lib.rs @@ -24,6 +24,8 @@ use std::{sync::Arc, time::Duration}; use futures::prelude::*; +use sp_api::ProofRecorder; +use sp_externalities::Extensions; use sp_runtime::{ traits::{Block as BlockT, HashingFor}, Digest, @@ -83,7 +85,7 @@ pub trait Environment { + Unpin + 'static; /// Error which can occur upon creation. - type Error: From + std::error::Error + 'static; + type Error: From + Send + Sync + std::error::Error + 'static; /// Initialize the proposal logic on top of a specific header. Provide /// the authorities at that header. @@ -91,83 +93,47 @@ pub trait Environment { } /// A proposal that is created by a [`Proposer`]. -pub struct Proposal { +pub struct Proposal { /// The block that was build. pub block: Block, - /// Proof that was recorded while building the block. - pub proof: Proof, /// The storage changes while building this block. pub storage_changes: sp_state_machine::StorageChanges>, } -/// Error that is returned when [`ProofRecording`] requested to record a proof, -/// but no proof was recorded. -#[derive(Debug, thiserror::Error)] -#[error("Proof should be recorded, but no proof was provided.")] -pub struct NoProofRecorded; - -/// A trait to express the state of proof recording on type system level. -/// -/// This is used by [`Proposer`] to signal if proof recording is enabled. This can be used by -/// downstream users of the [`Proposer`] trait to enforce that proof recording is activated when -/// required. The only two implementations of this trait are [`DisableProofRecording`] and -/// [`EnableProofRecording`]. -/// -/// This trait is sealed and can not be implemented outside of this crate! -pub trait ProofRecording: Send + Sync + private::Sealed + 'static { - /// The proof type that will be used internally. - type Proof: Send + Sync + 'static; - /// Is proof recording enabled? - const ENABLED: bool; - /// Convert the given `storage_proof` into [`Self::Proof`]. +/// Arguments for [`Proposer::propose`]. +pub struct ProposeArgs { + /// The inherent data to pass to the block production. + pub inherent_data: InherentData, + /// The inherent digests to include in the produced block. + pub inherent_digests: Digest, + /// Max duration for building the block. + pub max_duration: Duration, + /// Optional size limit for the produced block. /// - /// Internally Substrate uses `Option` to express the both states of proof - /// recording (for now) and as [`Self::Proof`] is some different type, we need to provide a - /// function to convert this value. + /// When set, block production ends before hitting this limit. The limit includes the storage + /// proof, when proof recording is activated. + pub block_size_limit: Option, + /// Optional proof recorder for recording storage proofs during block production. /// - /// If the proof recording was requested, but `None` is given, this will return - /// `Err(NoProofRecorded)`. - fn into_proof(storage_proof: Option) -> Result; + /// When `Some`, a storage proof will be recorded and included in the proposal. + pub storage_proof_recorder: Option>, + /// Extra extensions for the runtime environment. + pub extra_extensions: Extensions, } -/// Express that proof recording is disabled. -/// -/// For more information see [`ProofRecording`]. -pub struct DisableProofRecording; - -impl ProofRecording for DisableProofRecording { - type Proof = (); - const ENABLED: bool = false; - - fn into_proof(_: Option) -> Result { - Ok(()) +impl Default for ProposeArgs { + fn default() -> Self { + Self { + inherent_data: Default::default(), + inherent_digests: Default::default(), + max_duration: Default::default(), + block_size_limit: Default::default(), + storage_proof_recorder: Default::default(), + extra_extensions: Default::default(), + } } } -/// Express that proof recording is enabled. -/// -/// For more information see [`ProofRecording`]. -pub struct EnableProofRecording; - -impl ProofRecording for EnableProofRecording { - type Proof = sp_state_machine::StorageProof; - const ENABLED: bool = true; - - fn into_proof(proof: Option) -> Result { - proof.ok_or(NoProofRecorded) - } -} - -/// Provides `Sealed` trait to prevent implementing trait [`ProofRecording`] outside of this crate. -mod private { - /// Special trait that prevents the implementation of [`super::ProofRecording`] outside of this - /// crate. - pub trait Sealed {} - - impl Sealed for super::DisableProofRecording {} - impl Sealed for super::EnableProofRecording {} -} - /// Logic for a proposer. /// /// This will encapsulate creation and evaluation of proposals at a specific @@ -176,41 +142,19 @@ mod private { /// Proposers are generic over bits of "consensus data" which are engine-specific. pub trait Proposer { /// Error type which can occur when proposing or evaluating. - type Error: From + std::error::Error + 'static; + type Error: From + Send + Sync + std::error::Error + 'static; /// Future that resolves to a committed proposal with an optional proof. - type Proposal: Future, Self::Error>> - + Send - + Unpin - + 'static; - /// The supported proof recording by the implementor of this trait. See [`ProofRecording`] - /// for more information. - type ProofRecording: self::ProofRecording + Send + Sync + 'static; - /// The proof type used by [`Self::ProofRecording`]. - type Proof: Send + Sync + 'static; + type Proposal: Future, Self::Error>> + Send + Unpin + 'static; /// Create a proposal. /// - /// Gets the `inherent_data` and `inherent_digests` as input for the proposal. Additionally - /// a maximum duration for building this proposal is given. If building the proposal takes - /// longer than this maximum, the proposal will be very likely discarded. - /// - /// If `block_size_limit` is given, the proposer should push transactions until the block size - /// limit is hit. Depending on the `finalize_block` implementation of the runtime, it probably - /// incorporates other operations (that are happening after the block limit is hit). So, - /// when the block size estimation also includes a proof that is recorded alongside the block - /// production, the proof can still grow. This means that the `block_size_limit` should not be - /// the hard limit of what is actually allowed. + /// Takes a [`ProposeArgs`] struct containing all the necessary parameters for block production + /// including inherent data, digests, duration limits, storage proof recorder, and extensions. /// /// # Return /// /// Returns a future that resolves to a [`Proposal`] or to [`Error`]. - fn propose( - self, - inherent_data: InherentData, - inherent_digests: Digest, - max_duration: Duration, - block_size_limit: Option, - ) -> Self::Proposal; + fn propose(self, args: ProposeArgs) -> Self::Proposal; } /// An oracle for when major synchronization work is being undertaken. diff --git a/substrate/primitives/externalities/src/extensions.rs b/substrate/primitives/externalities/src/extensions.rs index 9e6b64e26a8b1..5a02534ba2750 100644 --- a/substrate/primitives/externalities/src/extensions.rs +++ b/substrate/primitives/externalities/src/extensions.rs @@ -169,6 +169,14 @@ macro_rules! decl_extension { )* } + impl $ext_name { + /// Returns the `TypeId` of this extension. + #[allow(dead_code)] + pub fn type_id() -> core::any::TypeId { + core::any::TypeId::of::() + } + } + impl core::ops::Deref for $ext_name { type Target = $inner; @@ -205,6 +213,14 @@ macro_rules! decl_extension { core::any::Any::type_id(self) } } + + impl $ext_name { + /// Returns the `TypeId` of this extension. + #[allow(dead_code)] + pub fn type_id() -> core::any::TypeId { + core::any::TypeId::of::() + } + } } } @@ -258,6 +274,11 @@ impl Extensions { self.extensions.insert(type_id, Box::new(ext)); } + /// Returns `true` if an extension for the given `type_id` is already registered. + pub fn is_registered(&self, type_id: TypeId) -> bool { + self.extensions.contains_key(&type_id) + } + /// Register extension `extension` using the given `type_id`. pub fn register_with_type_id( &mut self, @@ -315,6 +336,11 @@ impl Extensions { pub fn rollback_transaction(&mut self, ty: TransactionType) { self.extensions.values_mut().for_each(|e| e.rollback_transaction(ty)); } + + /// Returns an iterator that returns all stored extensions. + pub fn into_extensions(self) -> impl Iterator> { + self.extensions.into_values() + } } impl Extend for Extensions { diff --git a/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs b/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs index 49e8abb507a6c..f14b11d7f9e6e 100644 --- a/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs +++ b/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs @@ -20,7 +20,7 @@ use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder, BuiltBlock}; use sc_cli::{Error, Result}; use sc_client_api::UsageProvider; -use sp_api::{ApiExt, CallApiAt, Core, ProvideRuntimeApi}; +use sp_api::{ApiExt, CallApiAt, Core, ProofRecorder, ProvideRuntimeApi}; use sp_blockchain::{ ApplyExtrinsicFailed::Validity, Error::{ApplyExtrinsicFailed, RuntimeApiError}, @@ -137,11 +137,14 @@ where ext_builder: Option<&dyn ExtrinsicBuilder>, ) -> Result<(Block, Option, u64)> { let chain = self.client.usage_info().chain; + + let storage_proof_recorder = self.record_proof.then(|| ProofRecorder::::default()); + let mut builder = BlockBuilderBuilder::new(&*self.client) .on_parent_block(chain.best_hash) .with_parent_block_number(chain.best_number) .with_inherent_digests(Digest { logs: self.digest_items.clone() }) - .with_proof_recording(self.record_proof) + .with_proof_recorder(storage_proof_recorder.clone()) .build()?; // Create and insert the inherents. @@ -175,7 +178,9 @@ where None => None, }; - let BuiltBlock { block, proof, .. } = builder.build()?; + let BuiltBlock { block, .. } = builder.build()?; + + let proof = storage_proof_recorder.map(|r| r.drain_storage_proof()); Ok(( block, diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index 4a7ce8a0f1f17..54fbc636e7d0c 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -843,7 +843,6 @@ node = [ "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", - "cumulus-client-consensus-proposer", "cumulus-client-consensus-relay-chain", "cumulus-client-network", "cumulus-client-parachain-inherent", @@ -2183,10 +2182,6 @@ default-features = false optional = true path = "../cumulus/client/consensus/common" -[dependencies.cumulus-client-consensus-proposer] -default-features = false -optional = true -path = "../cumulus/client/consensus/proposer" [dependencies.cumulus-client-consensus-relay-chain] default-features = false diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index d25ab21ac9388..121c18b5f6296 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -92,9 +92,6 @@ pub use cumulus_client_consensus_aura; #[cfg(feature = "cumulus-client-consensus-common")] pub use cumulus_client_consensus_common; -/// A Substrate `Proposer` for building parachain blocks. -#[cfg(feature = "cumulus-client-consensus-proposer")] -pub use cumulus_client_consensus_proposer; /// The relay-chain provided consensus algorithm. #[cfg(feature = "cumulus-client-consensus-relay-chain")] From 57f97e9f0f7bb185a568bc814bf46493188e5f28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sun, 28 Sep 2025 23:32:07 +0200 Subject: [PATCH 124/312] Split up block building and import --- cumulus/client/consensus/aura/src/collator.rs | 38 +++++++++++++++---- .../slot_based/block_builder_task.rs | 19 ++++++---- 2 files changed, 43 insertions(+), 14 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index e57337dec9c14..9e37920b64634 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -233,8 +233,25 @@ where /// Build and import a parachain block using the given parameters. pub async fn build_block_and_import( &mut self, - mut params: BuildBlockAndImportParams<'_, Block, P>, + params: BuildBlockAndImportParams<'_, Block, P>, ) -> Result>, Box> { + let Some((built_block, import_block)) = self.build_block(params).await? else { + return Ok(None) + }; + + self.import_block(import_block).await?; + + Ok(Some(built_block)) + } + + /// Build a parachain block using the given parameters. + pub async fn build_block( + &mut self, + mut params: BuildBlockAndImportParams<'_, Block, P>, + ) -> Result< + Option<(BuiltBlock, BlockImportParams)>, + Box, + > { let mut digest = params.additional_pre_digest; digest.push(params.slot_claim.pre_digest.clone()); @@ -304,14 +321,21 @@ where return Ok(None) }; - self.block_import - .import_block(sealed_importable) - .map_err(|e| Box::new(e) as Box) - .await?; - let proof = storage_proof_recorder.drain_storage_proof(); - Ok(Some(BuiltBlock { block, proof, backend_transaction })) + Ok(Some((BuiltBlock { block, proof, backend_transaction }, sealed_importable))) + } + + /// Import the given `import_block`. + pub async fn import_block( + &mut self, + import_block: BlockImportParams, + ) -> Result<(), Box> { + self.block_import + .import_block(import_block) + .map_err(|e| Box::new(e) as Box) + .await + .map(drop) } /// Propose, seal, import a block and packaging it into a collation. diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index bf31fae62cf9d..814801e8039d2 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -541,8 +541,8 @@ where let mut extra_extensions = Extensions::default(); extra_extensions.register(ProofSizeExt::new(storage_proof_recorder.clone())); - let Ok(Some(res)) = collator - .build_block_and_import(BuildBlockAndImportParams { + let Ok(Some((built_block, import_block))) = collator + .build_block(BuildBlockAndImportParams { parent_header: &parent_header, slot_claim, additional_pre_digest: vec![ @@ -566,14 +566,19 @@ where return Ok(None); }; - parent_hash = res.block.header().hash(); - parent_header = res.block.header().clone(); + if let Err(error) = collator.import_block(import_block).await { + tracing::error!(target: crate::LOG_TARGET, ?error, "Failed to import built block."); + return Ok(None); + } + + parent_hash = built_block.block.header().hash(); + parent_header = built_block.block.header().clone(); // Announce the newly built block to our peers. collator.collator_service().announce_block(parent_hash, None); - blocks.push(res.block); - proofs.push(res.proof); + blocks.push(built_block.block); + proofs.push(built_block.proof); if CumulusDigestItem::contains_use_full_core(parent_header.digest()) { tracing::trace!( @@ -588,7 +593,7 @@ where ignored_nodes.extend(IgnoredNodes::from_storage_proof::>( proofs.last().expect("We just pushed the proof into the vector; qed"), )); - ignored_nodes.extend(IgnoredNodes::from_memory_db(res.backend_transaction)); + ignored_nodes.extend(IgnoredNodes::from_memory_db(built_block.backend_transaction)); // If there is still time left for the block in the slot, we sleep the rest of the time. // This ensures that we have some steady block rate. From 2b8e9558ce51f64cff3beec4fe89e04dc35df7d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 29 Sep 2025 21:15:23 +0200 Subject: [PATCH 125/312] Introduce `tracing_block` test --- cumulus/zombienet/zombienet-sdk/run.sh | 2 +- .../tests/zombie_ci/block_bundling/mod.rs | 1 + .../zombie_ci/block_bundling/tracing_block.rs | 181 ++++++++++++++++++ 3 files changed, 183 insertions(+), 1 deletion(-) create mode 100644 cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs diff --git a/cumulus/zombienet/zombienet-sdk/run.sh b/cumulus/zombienet/zombienet-sdk/run.sh index 40d5bafc6c248..afabf28b668a1 100755 --- a/cumulus/zombienet/zombienet-sdk/run.sh +++ b/cumulus/zombienet/zombienet-sdk/run.sh @@ -6,4 +6,4 @@ cargo build --release -p cumulus-test-service --bin test-parachain -p polkadot - RELEASE_DIR=$(dirname "$(cargo locate-project --workspace --message-format plain)")/target/release export PATH=$RELEASE_DIR:$PATH -ZOMBIE_PROVIDER=native cargo test --release -p cumulus-zombienet-sdk-tests --features zombie-ci "$@" +ZOMBIE_PROVIDER=native cargo test --release -p cumulus-zombienet-sdk-tests --features zombie-ci -- --test-threads 1 "$@" diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs index 4f9fe31f04812..503b4472c03e4 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs @@ -19,3 +19,4 @@ mod basic; mod runtime_upgrade; mod three_cores_glutton; mod full_core_usage_scenarios; +mod tracing_block; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs new file mode 100644 index 0000000000000..d2e1cd2659527 --- /dev/null +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs @@ -0,0 +1,181 @@ +// This file is part of Cumulus. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::utils::initialize_network; +use anyhow::anyhow; +use cumulus_zombienet_sdk_helpers::submit_extrinsic_and_wait_for_finalization_success; +use futures::stream::StreamExt; +use serde_json::json; +use zombienet_sdk::{ + subxt::{ + backend::rpc::RpcClient, + dynamic::Value, + ext::{scale_value::value, subxt_rpcs::rpc_params}, + OnlineClient, PolkadotConfig, + }, + subxt_signer::sr25519::dev, + NetworkConfig, NetworkConfigBuilder, +}; + +const PARA_ID: u32 = 2400; + +/// A test that sends a transfer transaction, waits for it to be finalized, and then runs the +/// tracing_block rpc for the block containing the transfer. +#[tokio::test(flavor = "multi_thread")] +async fn block_bundling_tracing_block() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + log::info!("Spawning network"); + let config = build_network_config().await?; + let network = initialize_network(config).await?; + + let para_node = network.get_node("collator-0")?; + let para_client: OnlineClient = para_node.wait_client().await?; + + // Wait for a few blocks to ensure the network is stable + log::info!("Waiting for network to stabilize"); + let mut finalized_stream = para_client.blocks().subscribe_finalized().await?; + let mut block_count = 0u32; + + while let Some(block) = finalized_stream.next().await { + let _block = block?; + block_count += 1; + if block_count >= 3 { + log::info!("Network stabilized after 3 blocks"); + break; + } + } + + // Create a balance transfer transaction + let alice = dev::alice(); + let bob = dev::bob().public_key(); + let transfer_amount = 1_000_000_000_000u128; // 1 unit with 12 decimals + + log::info!("Creating balance transfer transaction"); + let transfer_call = zombienet_sdk::subxt::dynamic::tx( + "Balances", + "transfer_allow_death", + vec![Value::unnamed_variant("Id", [Value::from_bytes(bob)]), Value::u128(transfer_amount)], + ); + + // Submit the transfer transaction and wait for finalization + log::info!("Submitting transfer transaction and waiting for finalization"); + let transfer_block_hash = + submit_extrinsic_and_wait_for_finalization_success(¶_client, &transfer_call, &alice) + .await?; + + log::info!("Transfer transaction finalized in block: {:?}", transfer_block_hash); + + // Get RPC client to make tracing_block call + let rpc_client = para_node.rpc().await?; + + log::info!("Calling tracing_block RPC for the block containing the transfer"); + + // Make the tracing_block RPC call for the block containing our transfer + let trace_result: serde_json::Value = rpc_client + .request( + "state_traceBlock", + rpc_params![ + format!("{:?}", transfer_block_hash), + None::, + None::, + None:: + ], + ) + .await?; + + log::info!("Successfully received tracing result for transfer block"); + + // Verify that we got a valid response (non-empty) + if trace_result.is_null() { + return Err(anyhow!("tracing_block returned null result")); + } + + // Verify the trace contains information about our transfer + if let Some(trace_obj) = trace_result.as_object() { + log::info!("Trace result contains {} top-level keys", trace_obj.len()); + + // Log some details about the trace for debugging + if let Some(storage_changes) = trace_obj.get("storageChanges") { + log::info!("Found storage changes in trace"); + } + if let Some(block_trace) = trace_obj.get("block") { + log::info!("Found block trace information"); + } + } + + log::info!("Block bundling tracing test with transfer finished successfully"); + Ok(()) +} + +async fn build_network_config() -> Result { + // images are not relevant for `native`, but we leave it here in case we use `k8s` some day + let images = zombienet_sdk::environment::get_images_from_env(); + log::info!("Using images: {images:?}"); + + NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=trace").into()]) + .with_default_resources(|resources| { + // These settings are applicable only for `k8s` provider. + // Leaving them in case we switch to `k8s` some day. + resources.with_request_cpu(4).with_request_memory("4G") + }) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "num_cores": 7, + "max_validators_per_core": 1 + } + } + } + })) + // Have to set a `with_node` outside of the loop below, so that `r` has the right + // type. + .with_node(|node| node.with_name("validator-0")); + (1..9).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) + }) + .with_parachain(|p| { + p.with_id(PARA_ID) + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("block-bundling") + .with_default_args(vec![ + ("--authoring").into(), + ("slot-based").into(), + ("-lparachain=debug,aura=trace").into(), + ("--enable-offchain-indexing=true").into(), + ]) + .with_collator(|n| n.with_name("collator-0")) + }) + .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { + Ok(val) => global_settings.with_base_dir(val), + _ => global_settings, + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + }) +} From d9d5a9d1ce95f83b5e53ddb9a0b530b491d48f30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 29 Sep 2025 22:32:37 +0200 Subject: [PATCH 126/312] `trace_block`: Support overwriting the `execute_block` This is required for example for parachains that require special extensions to be registered (e.g. `ProofSizeExt`) to succeed the block execution. This pull request changes the signature of `spawn_tasks` which now requires a `tracing_execute_block` parameter. If your chain is a solochain, just set the parameter to `None` or overwrite it if you need any special handling. For parachain builders, this value can be set to `cumulus_service::ParachainTracingExecuteBlock`. --- Cargo.lock | 2 + cumulus/client/service/Cargo.toml | 3 + cumulus/client/service/src/lib.rs | 35 ++++- .../polkadot-omni-node/lib/src/common/spec.rs | 6 +- .../lib/src/nodes/manual_seal.rs | 4 + cumulus/test/service/src/lib.rs | 4 +- polkadot/node/service/src/builder/mod.rs | 1 + substrate/bin/node/cli/src/service.rs | 1 + substrate/client/rpc/src/state/mod.rs | 12 +- substrate/client/rpc/src/state/state_full.rs | 15 +- substrate/client/rpc/src/state/tests.rs | 22 +-- substrate/client/service/src/builder.rs | 133 ++++++++++++------ substrate/client/tracing/src/block/mod.rs | 59 +++++++- .../frame/revive/dev-node/node/src/service.rs | 1 + templates/minimal/node/src/service.rs | 1 + templates/parachain/node/src/service.rs | 3 +- templates/solochain/node/src/service.rs | 1 + 17 files changed, 232 insertions(+), 71 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 46fc771b455cd..b487370c0218e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4566,6 +4566,7 @@ dependencies = [ "sc-service", "sc-sysinfo", "sc-telemetry", + "sc-tracing", "sc-transaction-pool", "sc-utils", "sp-api", @@ -4575,6 +4576,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-transaction-pool", + "sp-trie", ] [[package]] diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index 3ea36d70b42b6..176b389ff06a5 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -1,4 +1,5 @@ [package] + name = "cumulus-client-service" version = "0.7.0" authors.workspace = true @@ -26,6 +27,7 @@ sc-rpc = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } sc-sysinfo = { workspace = true, default-features = true } sc-telemetry = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } sc-transaction-pool = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } @@ -33,6 +35,7 @@ sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-transaction-pool = { workspace = true, default-features = true } diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index 62199f2704162..251d4f6f3bdfd 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -46,14 +46,16 @@ use sc_network_sync::SyncingService; use sc_network_transactions::TransactionsHandlerController; use sc_service::{Configuration, SpawnTaskHandle, TaskManager, WarpSyncConfig}; use sc_telemetry::{log, TelemetryWorkerHandle}; +use sc_tracing::block::TracingExecuteBlock; use sc_utils::mpsc::TracingUnboundedSender; -use sp_api::ProvideRuntimeApi; +use sp_api::{ApiExt, Core, ProofRecorder, ProvideRuntimeApi}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_core::Decode; use sp_runtime::{ traits::{Block as BlockT, BlockIdTo, Header}, SaturatedConversion, Saturating, }; +use sp_trie::proof_size_extension::ProofSizeExt; use std::{ sync::Arc, time::{Duration, Instant}, @@ -615,3 +617,34 @@ impl ParachainInformantMetrics { }) } } + +/// Implementation of [`TracingExecuteBlock`] for parachains. +/// +/// Ensures that all the required extensions required by parachain runtimes are registered and +/// available. +pub struct ParachainTracingExecuteBlock { + client: Arc, +} + +impl ParachainTracingExecuteBlock { + /// Creates a new instance of `self`. + pub fn new(client: Arc) -> Self { + Self { client } + } +} + +impl TracingExecuteBlock for ParachainTracingExecuteBlock +where + Block: BlockT, + Client: ProvideRuntimeApi + Send + Sync, + Client::Api: Core, +{ + fn execute_block(&self, parent_hash: Block::Hash, block: Block) -> sp_blockchain::Result<()> { + let mut runtime_api = self.client.runtime_api(); + let storage_proof_recorder = ProofRecorder::::default(); + runtime_api.register_extension(ProofSizeExt::new(storage_proof_recorder.clone())); + runtime_api.record_proof_with_recorder(storage_proof_recorder); + + runtime_api.execute_block(parent_hash, block).map_err(Into::into) + } +} diff --git a/cumulus/polkadot-omni-node/lib/src/common/spec.rs b/cumulus/polkadot-omni-node/lib/src/common/spec.rs index f0d4cc0e0a88d..70d6a23bb3ff2 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/spec.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/spec.rs @@ -31,7 +31,8 @@ use cumulus_client_bootnodes::{start_bootnode_tasks, StartBootnodeTasksParams}; use cumulus_client_cli::CollatorOptions; use cumulus_client_service::{ build_network, build_relay_chain_interface, prepare_node_config, start_relay_chain_tasks, - BuildNetworkParams, CollatorSybilResistance, DARecoveryProfile, StartRelayChainTasksParams, + BuildNetworkParams, CollatorSybilResistance, DARecoveryProfile, ParachainTracingExecuteBlock, + StartRelayChainTasksParams, }; use cumulus_primitives_core::{BlockT, GetParachainInfo, ParaId}; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; @@ -448,6 +449,9 @@ pub(crate) trait NodeSpec: BaseNodeSpec { system_rpc_tx, tx_handler_controller, telemetry: telemetry.as_mut(), + tracing_execute_block: Some(Arc::new(ParachainTracingExecuteBlock::new( + client.clone(), + ))), })?; if let Some(hwbench) = hwbench { diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs b/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs index edd19ac597939..440dbf8763cdb 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs @@ -21,6 +21,7 @@ use crate::common::{ }; use codec::Encode; use cumulus_client_parachain_inherent::{MockValidationDataInherentDataProvider, MockXcmConfig}; +use cumulus_client_service::ParachainTracingExecuteBlock; use cumulus_primitives_aura::AuraUnincludedSegmentApi; use cumulus_primitives_core::CollectCollationInfo; use futures::FutureExt; @@ -283,6 +284,9 @@ impl ManualSealNode { sync_service, config, telemetry: telemetry.as_mut(), + tracing_execute_block: Some(Arc::new(ParachainTracingExecuteBlock::new( + client.clone(), + ))), })?; Ok(task_manager) diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 4bc8cdd4bf5c2..d6f2578ea404d 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -52,7 +52,8 @@ use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImpo use cumulus_client_pov_recovery::{RecoveryDelayRange, RecoveryHandle}; use cumulus_client_service::{ build_network, prepare_node_config, start_relay_chain_tasks, BuildNetworkParams, - CollatorSybilResistance, DARecoveryProfile, StartRelayChainTasksParams, + CollatorSybilResistance, DARecoveryProfile, ParachainTracingExecuteBlock, + StartRelayChainTasksParams, }; use cumulus_primitives_core::{relay_chain::ValidationCode, GetParachainInfo, ParaId}; use cumulus_relay_chain_inprocess_interface::RelayChainInProcessInterface; @@ -392,6 +393,7 @@ where system_rpc_tx, tx_handler_controller, telemetry: None, + tracing_execute_block: Some(Arc::new(ParachainTracingExecuteBlock::new(client.clone()))), })?; let announce_block = { diff --git a/polkadot/node/service/src/builder/mod.rs b/polkadot/node/service/src/builder/mod.rs index 47ce9048d8111..439e4a21acecf 100644 --- a/polkadot/node/service/src/builder/mod.rs +++ b/polkadot/node/service/src/builder/mod.rs @@ -494,6 +494,7 @@ where system_rpc_tx, tx_handler_controller, telemetry: telemetry.as_mut(), + tracing_execute_block: None, })?; if let Some(hwbench) = hwbench { diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 1ef506ddd9c77..59881bb7c48c1 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -566,6 +566,7 @@ pub fn new_full_base::Hash>>( tx_handler_controller, sync_service: sync_service.clone(), telemetry: telemetry.as_mut(), + tracing_execute_block: None, })?; if let Some(hwbench) = hwbench { diff --git a/substrate/client/rpc/src/state/mod.rs b/substrate/client/rpc/src/state/mod.rs index d8989b3e1bee7..2d3986432603f 100644 --- a/substrate/client/rpc/src/state/mod.rs +++ b/substrate/client/rpc/src/state/mod.rs @@ -30,6 +30,7 @@ use sc_client_api::{ Backend, BlockBackend, BlockchainEvents, ExecutorProvider, ProofProvider, StorageProvider, }; use sc_rpc_api::{check_if_safe, DenyUnsafe}; +use sc_tracing::block::TracingExecuteBlock; use sp_api::{CallApiAt, Metadata, ProvideRuntimeApi}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_core::{ @@ -164,6 +165,7 @@ where pub fn new_full( client: Arc, executor: SubscriptionTaskExecutor, + execute_block: Option>>, ) -> (State, ChildState) where Block: BlockT + 'static, @@ -183,9 +185,13 @@ where + 'static, Client::Api: Metadata, { - let child_backend = - Box::new(self::state_full::FullState::new(client.clone(), executor.clone())); - let backend = Box::new(self::state_full::FullState::new(client, executor)); + let child_backend = Box::new(self::state_full::FullState::new( + client.clone(), + executor.clone(), + execute_block.clone(), + )); + let backend = + Box::new(self::state_full::FullState::new(client, executor, execute_block.clone())); (State { backend }, ChildState { backend: child_backend }) } diff --git a/substrate/client/rpc/src/state/state_full.rs b/substrate/client/rpc/src/state/state_full.rs index 7703936f8115d..29abbce511dc8 100644 --- a/substrate/client/rpc/src/state/state_full.rs +++ b/substrate/client/rpc/src/state/state_full.rs @@ -37,6 +37,7 @@ use sc_client_api::{ StorageProvider, }; use sc_rpc_api::state::ReadProof; +use sc_tracing::block::TracingExecuteBlock; use sp_api::{CallApiAt, Metadata, ProvideRuntimeApi}; use sp_blockchain::{ CachedHeaderMetadata, Error as ClientError, HeaderBackend, HeaderMetadata, @@ -55,7 +56,7 @@ use sp_version::RuntimeVersion; /// The maximum time allowed for an RPC call when running without unsafe RPC enabled. const MAXIMUM_SAFE_RPC_CALL_TIMEOUT: Duration = Duration::from_secs(30); -/// Ranges to query in state_queryStorage. +/// Ranges to query in `state_queryStorage`. struct QueryStorageRange { /// Hashes of all the blocks in the range. pub hashes: Vec, @@ -65,7 +66,8 @@ struct QueryStorageRange { pub struct FullState { client: Arc, executor: SubscriptionTaskExecutor, - _phantom: PhantomData<(BE, Block)>, + block_execute: Option>>, + _phantom: PhantomData, } impl FullState @@ -78,8 +80,12 @@ where Block: BlockT + 'static, { /// Create new state API backend for full nodes. - pub fn new(client: Arc, executor: SubscriptionTaskExecutor) -> Self { - Self { client, executor, _phantom: PhantomData } + pub fn new( + client: Arc, + executor: SubscriptionTaskExecutor, + block_execute: Option>>, + ) -> Self { + Self { client, executor, block_execute, _phantom: PhantomData } } /// Returns given block hash or best block hash if None is passed. @@ -479,6 +485,7 @@ where targets, storage_keys, methods, + self.block_execute.clone(), ) .trace_block() .map_err(|e| invalid_block::(block, None, e.to_string())) diff --git a/substrate/client/rpc/src/state/tests.rs b/substrate/client/rpc/src/state/tests.rs index c02f0d0b759bf..6f26cbcbb9936 100644 --- a/substrate/client/rpc/src/state/tests.rs +++ b/substrate/client/rpc/src/state/tests.rs @@ -106,7 +106,7 @@ async fn should_return_storage_entries() { .add_extra_child_storage(&child_info, KEY2.to_vec(), CHILD_VALUE2.to_vec()) .build(); let genesis_hash = client.genesis_hash(); - let (_client, child) = new_full(Arc::new(client), test_executor()); + let (_client, child) = new_full(Arc::new(client), test_executor(), None); let keys = &[StorageKey(KEY1.to_vec()), StorageKey(KEY2.to_vec())]; assert_eq!( @@ -137,7 +137,7 @@ async fn should_return_child_storage() { .build(), ); let genesis_hash = client.genesis_hash(); - let (_client, child) = new_full(client, test_executor()); + let (_client, child) = new_full(client, test_executor(), None); let child_key = prefixed_storage_key(); let key = StorageKey(b"key".to_vec()); @@ -168,7 +168,7 @@ async fn should_return_child_storage_entries() { .build(), ); let genesis_hash = client.genesis_hash(); - let (_client, child) = new_full(client, test_executor()); + let (_client, child) = new_full(client, test_executor(), None); let child_key = prefixed_storage_key(); let keys = vec![StorageKey(b"key1".to_vec()), StorageKey(b"key2".to_vec())]; @@ -199,7 +199,7 @@ async fn should_return_child_storage_entries() { async fn should_call_contract() { let client = Arc::new(substrate_test_runtime_client::new()); let genesis_hash = client.genesis_hash(); - let (client, _child) = new_full(client, test_executor()); + let (client, _child) = new_full(client, test_executor(), None); assert_matches!( client.call("balanceOf".into(), Bytes(vec![1, 2, 3]), Some(genesis_hash).into()), @@ -211,7 +211,7 @@ async fn should_call_contract() { async fn should_notify_about_storage_changes() { let mut sub = { let client = Arc::new(substrate_test_runtime_client::new()); - let (api, _child) = new_full(client.clone(), test_executor()); + let (api, _child) = new_full(client.clone(), test_executor(), None); let mut api_rpc = api.into_rpc(); api_rpc.extensions_mut().insert(DenyUnsafe::No); @@ -250,7 +250,7 @@ async fn should_notify_about_storage_changes() { async fn should_send_initial_storage_changes_and_notifications() { let mut sub = { let client = Arc::new(substrate_test_runtime_client::new()); - let (api, _child) = new_full(client.clone(), test_executor()); + let (api, _child) = new_full(client.clone(), test_executor(), None); let alice_balance_key = [ sp_crypto_hashing::twox_128(b"System"), @@ -300,7 +300,7 @@ async fn should_send_initial_storage_changes_and_notifications() { #[tokio::test] async fn should_query_storage() { async fn run_tests(client: Arc) { - let (api, _child) = new_full(client.clone(), test_executor()); + let (api, _child) = new_full(client.clone(), test_executor(), None); let add_block = |index| { let mut builder = BlockBuilderBuilder::new(&*client) @@ -468,7 +468,7 @@ async fn should_query_storage() { #[tokio::test] async fn should_return_runtime_version() { let client = Arc::new(substrate_test_runtime_client::new()); - let (api, _child) = new_full(client.clone(), test_executor()); + let (api, _child) = new_full(client.clone(), test_executor(), None); // it is basically json-encoded substrate_test_runtime_client::runtime::VERSION let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ @@ -491,7 +491,7 @@ async fn should_return_runtime_version() { async fn should_notify_on_runtime_version_initially() { let mut sub = { let client = Arc::new(substrate_test_runtime_client::new()); - let (api, _child) = new_full(client, test_executor()); + let (api, _child) = new_full(client, test_executor(), None); let mut api_rpc = api.into_rpc(); api_rpc.extensions_mut().insert(DenyUnsafe::No); @@ -518,7 +518,7 @@ fn should_deserialize_storage_key() { #[tokio::test] async fn wildcard_storage_subscriptions_are_rpc_unsafe() { let client = Arc::new(substrate_test_runtime_client::new()); - let (api, _child) = new_full(client, test_executor()); + let (api, _child) = new_full(client, test_executor(), None); let mut api_rpc = api.into_rpc(); api_rpc.extensions_mut().insert(DenyUnsafe::Yes); @@ -529,7 +529,7 @@ async fn wildcard_storage_subscriptions_are_rpc_unsafe() { #[tokio::test] async fn concrete_storage_subscriptions_are_rpc_safe() { let client = Arc::new(substrate_test_runtime_client::new()); - let (api, _child) = new_full(client, test_executor()); + let (api, _child) = new_full(client, test_executor(), None); let mut api_rpc = api.into_rpc(); api_rpc.extensions_mut().insert(DenyUnsafe::Yes); diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 74d94c30cd69b..3813fce66143c 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -81,6 +81,7 @@ use sc_rpc_spec_v2::{ transaction::{TransactionApiServer, TransactionBroadcastApiServer}, }; use sc_telemetry::{telemetry, ConnectionMessage, Telemetry, TelemetryHandle, SUBSTRATE_INFO}; +use sc_tracing::block::TracingExecuteBlock; use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_api::{CallApiAt, ProvideRuntimeApi}; @@ -461,11 +462,29 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { pub sync_service: Arc>, /// Telemetry instance for this node. pub telemetry: Option<&'a mut Telemetry>, + /// Optional [`TracingExecuteBlock`] handle. + /// + /// Will be used by the `trace_block` RPC to execute the actual block. + pub tracing_execute_block: Option>>, } /// Spawn the tasks that are required to run a node. pub fn spawn_tasks( - params: SpawnTasksParams, + SpawnTasksParams { + mut config, + task_manager, + client, + backend, + keystore, + transaction_pool, + rpc_builder, + network, + system_rpc_tx, + tx_handler_controller, + sync_service, + telemetry, + tracing_execute_block: execute_block, + }: SpawnTasksParams, ) -> Result where TCl: ProvideRuntimeApi @@ -492,21 +511,6 @@ where TBackend: 'static + sc_client_api::backend::Backend + Send, TExPool: MaintainedTransactionPool::Hash> + 'static, { - let SpawnTasksParams { - mut config, - task_manager, - client, - backend, - keystore, - transaction_pool, - rpc_builder, - network, - system_rpc_tx, - tx_handler_controller, - sync_service, - telemetry, - } = params; - let chain_info = client.usage_info().chain; sp_session::generate_initial_session_keys( @@ -603,21 +607,22 @@ where .transpose()?; let gen_rpc_module = || { - gen_rpc_module( - task_manager.spawn_handle(), - client.clone(), - transaction_pool.clone(), - keystore.clone(), - system_rpc_tx.clone(), - config.impl_name.clone(), - config.impl_version.clone(), - config.chain_spec.as_ref(), - &config.state_pruning, - config.blocks_pruning, - backend.clone(), - &*rpc_builder, - rpc_v2_metrics.clone(), - ) + gen_rpc_module(GenRpcModuleParams { + spawn_handle: task_manager.spawn_handle(), + client: client.clone(), + transaction_pool: transaction_pool.clone(), + keystore: keystore.clone(), + system_rpc_tx: system_rpc_tx.clone(), + impl_name: config.impl_name.clone(), + impl_version: config.impl_version.clone(), + chain_spec: config.chain_spec.as_ref(), + state_pruning: &config.state_pruning, + blocks_pruning: config.blocks_pruning, + backend: backend.clone(), + rpc_builder: &*rpc_builder, + metrics: rpc_v2_metrics.clone(), + tracing_execute_block: execute_block.clone(), + }) }; let rpc_server_handle = start_rpc_servers( @@ -750,21 +755,58 @@ where Ok(telemetry.handle()) } +/// Parameters for [`gen_rpc_module`]. +pub struct GenRpcModuleParams<'a, TBl: BlockT, TBackend, TCl, TRpc, TExPool> { + /// The handle to spawn tasks. + pub spawn_handle: SpawnTaskHandle, + /// Access to the client. + pub client: Arc, + /// The transaction pool. + pub transaction_pool: Arc, + /// Keystore handle. + pub keystore: KeystorePtr, + /// Sender for system requests. + pub system_rpc_tx: TracingUnboundedSender>, + /// Implementation name of this node. + pub impl_name: String, + /// Implementation version of this node. + pub impl_version: String, + /// The chain spec. + pub chain_spec: &'a dyn ChainSpec, + /// Enabled pruning mode for this node. + pub state_pruning: &'a Option, + /// Enabled blocks pruning mode. + pub blocks_pruning: BlocksPruning, + /// Backend of the node. + pub backend: Arc, + /// RPC builder. + pub rpc_builder: &'a (dyn Fn(SubscriptionTaskExecutor) -> Result, Error>), + /// Transaction metrics handle. + pub metrics: Option, + /// Optional [`TracingExecuteBlock`] handle. + /// + /// Will be used by the `trace_block` RPC to execute the actual block. + pub tracing_execute_block: Option>>, +} + /// Generate RPC module using provided configuration pub fn gen_rpc_module( - spawn_handle: SpawnTaskHandle, - client: Arc, - transaction_pool: Arc, - keystore: KeystorePtr, - system_rpc_tx: TracingUnboundedSender>, - impl_name: String, - impl_version: String, - chain_spec: &dyn ChainSpec, - state_pruning: &Option, - blocks_pruning: BlocksPruning, - backend: Arc, - rpc_builder: &(dyn Fn(SubscriptionTaskExecutor) -> Result, Error>), - metrics: Option, + GenRpcModuleParams { + spawn_handle, + client, + transaction_pool, + keystore, + system_rpc_tx, + impl_name, + impl_version, + chain_spec, + state_pruning, + blocks_pruning, + backend, + rpc_builder, + metrics, + tracing_execute_block: execute_block, + }: GenRpcModuleParams, ) -> Result, Error> where TBl: BlockT, @@ -799,7 +841,8 @@ where let (chain, state, child_state) = { let chain = sc_rpc::chain::new_full(client.clone(), task_executor.clone()).into_rpc(); - let (state, child_state) = sc_rpc::state::new_full(client.clone(), task_executor.clone()); + let (state, child_state) = + sc_rpc::state::new_full(client.clone(), task_executor.clone(), execute_block); let state = state.into_rpc(); let child_state = child_state.into_rpc(); diff --git a/substrate/client/tracing/src/block/mod.rs b/substrate/client/tracing/src/block/mod.rs index 3ebbc3d97e171..967c814099f17 100644 --- a/substrate/client/tracing/src/block/mod.rs +++ b/substrate/client/tracing/src/block/mod.rs @@ -36,7 +36,7 @@ use tracing::{ use crate::{SpanDatum, TraceEvent, Values}; use sc_client_api::BlockBackend; -use sp_api::{Core, Metadata, ProvideRuntimeApi}; +use sp_api::{Core, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_core::hexdisplay::HexDisplay; use sp_rpc::tracing::{BlockTrace, Span, TraceBlockResponse}; @@ -52,6 +52,43 @@ const TRACE_TARGET: &str = "block_trace"; // The name of a field required for all events. const REQUIRED_EVENT_FIELD: &str = "method"; +/// Something that can execute a block in a tracing context. +/// +/// [`DefaultExecuteBlock`] provides a default implementation that simply forwards the block to +/// [`Core::execute_block`] without any other changes. +pub trait TracingExecuteBlock: Send + Sync { + /// Execute the given `block` on top of the state of `parent_hash`. + /// + /// The execution should be done sync on the same thread, because the caller will register + /// special tracing collectors. + fn execute_block(&self, parent_hash: Block::Hash, block: Block) -> sp_blockchain::Result<()>; +} + +/// Default implementation of [`ExecuteBlock`]. +/// +/// Uses [`Core::execute_block`] to directly execute a block. +struct DefaultExecuteBlock { + client: Arc, +} + +impl DefaultExecuteBlock { + /// Creates a new instance. + pub fn new(client: Arc) -> Self { + Self { client } + } +} + +impl TracingExecuteBlock for DefaultExecuteBlock +where + Client: ProvideRuntimeApi + Send + Sync + 'static, + Client::Api: Core, + Block: BlockT, +{ + fn execute_block(&self, parent_hash: Block::Hash, block: Block) -> sp_blockchain::Result<()> { + self.client.runtime_api().execute_block(parent_hash, block).map_err(Into::into) + } +} + /// Tracing Block Result type alias pub type TraceBlockResult = Result; @@ -96,11 +133,13 @@ impl Subscriber for BlockSubscriber { if !metadata.is_span() && metadata.fields().field(REQUIRED_EVENT_FIELD).is_none() { return false } + for (target, level) in &self.targets { if metadata.level() <= level && metadata.target().starts_with(target) { return true } } + false } @@ -167,6 +206,7 @@ pub struct BlockExecutor { targets: Option, storage_keys: Option, methods: Option, + execute_block: Arc>, } impl BlockExecutor @@ -178,7 +218,7 @@ where + Send + Sync + 'static, - Client::Api: Metadata, + Client::Api: Core, { /// Create a new `BlockExecutor` pub fn new( @@ -187,8 +227,17 @@ where targets: Option, storage_keys: Option, methods: Option, + execute_block: Option>>, ) -> Self { - Self { client, block, targets, storage_keys, methods } + Self { + client: client.clone(), + block, + targets, + storage_keys, + methods, + execute_block: execute_block + .unwrap_or_else(|| Arc::new(DefaultExecuteBlock::new(client))), + } } /// Execute block, record all spans and events belonging to `Self::targets` @@ -228,7 +277,7 @@ where if let Err(e) = dispatcher::with_default(&dispatch, || { let span = tracing::info_span!(target: TRACE_TARGET, "trace_block"); let _enter = span.enter(); - self.client.runtime_api().execute_block(parent_hash, block) + self.execute_block.execute_block(parent_hash, block) }) { return Err(Error::Dispatch(format!( "Failed to collect traces and execute block: {}", @@ -311,6 +360,7 @@ fn patch_and_filter(mut span: SpanDatum, targets: &str) -> Option { return None } } + Some(span.into()) } @@ -321,6 +371,7 @@ fn check_target(targets: &str, target: &str, level: &Level) -> bool { return true } } + false } diff --git a/substrate/frame/revive/dev-node/node/src/service.rs b/substrate/frame/revive/dev-node/node/src/service.rs index 5cee6586da51d..4f53173627d70 100644 --- a/substrate/frame/revive/dev-node/node/src/service.rs +++ b/substrate/frame/revive/dev-node/node/src/service.rs @@ -186,6 +186,7 @@ pub fn new_full::Ha sync_service, config, telemetry: telemetry.as_mut(), + tracing_execute_block: None, })?; let proposer = sc_basic_authorship::ProposerFactory::new( diff --git a/templates/minimal/node/src/service.rs b/templates/minimal/node/src/service.rs index 5988dbf3ce6ed..cd68ba6fd767b 100644 --- a/templates/minimal/node/src/service.rs +++ b/templates/minimal/node/src/service.rs @@ -194,6 +194,7 @@ pub fn new_full::Ha sync_service, config, telemetry: telemetry.as_mut(), + tracing_execute_block: None, })?; let proposer = sc_basic_authorship::ProposerFactory::new( diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index 4ce1bba376e8d..7d95673eafdc9 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -9,7 +9,7 @@ use parachain_template_runtime::{ opaque::{Block, Hash}, }; -use polkadot_sdk::*; +use polkadot_sdk::{cumulus_client_service::ParachainTracingExecuteBlock, *}; // Cumulus Imports use cumulus_client_bootnodes::{start_bootnode_tasks, StartBootnodeTasksParams}; @@ -347,6 +347,7 @@ pub async fn start_parachain_node( system_rpc_tx, tx_handler_controller, telemetry: telemetry.as_mut(), + tracing_execute_block: Some(Arc::new(ParachainTracingExecuteBlock::new(client.clone()))), })?; if let Some(hwbench) = hwbench { diff --git a/templates/solochain/node/src/service.rs b/templates/solochain/node/src/service.rs index 79d97fbab8dfa..dd932b2974945 100644 --- a/templates/solochain/node/src/service.rs +++ b/templates/solochain/node/src/service.rs @@ -234,6 +234,7 @@ pub fn new_full< sync_service: sync_service.clone(), config, telemetry: telemetry.as_mut(), + tracing_execute_block: None, })?; if role.is_authority() { From 08e3250ea08cda787f872b39de92e9972776e7ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 29 Sep 2025 22:36:41 +0200 Subject: [PATCH 127/312] Aux schema stuff --- .../src/collators/slot_based/aux_schema.rs | 87 +++++++++++++++++++ .../slot_based/block_builder_task.rs | 12 ++- .../aura/src/collators/slot_based/mod.rs | 1 + 3 files changed, 97 insertions(+), 3 deletions(-) create mode 100644 cumulus/client/consensus/aura/src/collators/slot_based/aux_schema.rs diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/aux_schema.rs b/cumulus/client/consensus/aura/src/collators/slot_based/aux_schema.rs new file mode 100644 index 0000000000000..a6dd484cdaf26 --- /dev/null +++ b/cumulus/client/consensus/aura/src/collators/slot_based/aux_schema.rs @@ -0,0 +1,87 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use codec::{Decode, Encode}; +use sc_client_api::backend::AuxStore; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; + +const STORAGE_PROOF_RECORDING_VERSION: &[u8] = b"cumulus_aura_storage_proof_recording_version"; +const STORAGE_PROOF_RECORDING_CURRENT_VERSION: u32 = 1; + +/// The aux storage key used to store the storage proof size recordings for the given block hash. +pub fn storage_proof_recording_key(block_hash: H) -> Vec { + (b"cumulus_aura_storage_proof_recording", block_hash).encode() +} + +fn load_decode(backend: &B, key: &[u8]) -> ClientResult> +where + B: AuxStore, + T: Decode, +{ + let corrupt = |e: codec::Error| { + ClientError::Backend(format!("Storage proof recording DB is corrupted. Decode error: {}", e)) + }; + match backend.get_aux(key)? { + None => Ok(None), + Some(t) => T::decode(&mut &t[..]).map(Some).map_err(corrupt), + } +} + +/// Write the storage proof size recordings of a block to aux storage. +pub(crate) fn write_storage_proof_recording( + block_hash: H, + recordings: Vec, + write_aux: F, +) -> R +where + F: FnOnce(&[(Vec, &[u8])]) -> R, +{ + STORAGE_PROOF_RECORDING_CURRENT_VERSION.using_encoded(|version| { + let key = storage_proof_recording_key(block_hash); + recordings.using_encoded(|s| { + write_aux(&[ + (key, s), + (STORAGE_PROOF_RECORDING_VERSION.to_vec(), version), + ]) + }) + }) +} + +/// Load the storage proof size recordings associated with a block. +pub fn load_storage_proof_recording( + backend: &B, + block_hash: H, +) -> ClientResult>> { + let version = load_decode::<_, u32>(backend, STORAGE_PROOF_RECORDING_VERSION)?; + + match version { + None => Ok(None), + Some(STORAGE_PROOF_RECORDING_CURRENT_VERSION) => + load_decode(backend, storage_proof_recording_key(block_hash).as_slice()), + Some(other) => + Err(ClientError::Backend(format!("Unsupported storage proof recording DB version: {:?}", other))), + } +} + +/// Prune the storage proof size recordings for a block from aux storage. +pub(crate) fn prune_storage_proof_recording( + backend: &B, + block_hash: H, +) -> ClientResult<()> { + let key = storage_proof_recording_key(block_hash); + backend.insert_aux(&[], &[key.as_slice()]) +} \ No newline at end of file diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 814801e8039d2..ffebac26d983f 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -55,7 +55,10 @@ use sp_externalities::Extensions; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT, Member, Zero}; -use sp_trie::{proof_size_extension::ProofSizeExt, recorder::IgnoredNodes}; +use sp_trie::{ + proof_size_extension::{ProofSizeExt, RecordingProofSizeProvider}, + recorder::IgnoredNodes, +}; use std::{ collections::VecDeque, sync::Arc, @@ -517,7 +520,7 @@ where // The authoring duration is either the block time returned by the runtime or the 90% of the // rest of the slot time for the block. We take here 90% because we still need to create the - // inherents and need to import the block afterwards. + // inherents and need to import the block afterward. let authoring_duration = block_time.min(slot_time_for_block); let (parachain_inherent_data, other_inherent_data) = match collator @@ -538,8 +541,11 @@ where let storage_proof_recorder = ProofRecorder::::with_ignored_nodes(ignored_nodes.clone()); + + let proof_size_recorder = RecordingProofSizeProvider::new(storage_proof_recorder.clone()); + let mut extra_extensions = Extensions::default(); - extra_extensions.register(ProofSizeExt::new(storage_proof_recorder.clone())); + extra_extensions.register(ProofSizeExt::new(proof_size_recorder.clone())); let Ok(Some((built_block, import_block))) = collator .build_block(BuildBlockAndImportParams { diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index 03ac5188de31c..ddcc751a39805 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -93,6 +93,7 @@ use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Member}; use std::{path::PathBuf, sync::Arc, time::Duration}; +mod aux_schema; mod block_builder_task; mod block_import; mod collation_task; From 88a273eeb4c530040cde0c02b355c33e1f9b393a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 1 Oct 2025 12:55:36 +0200 Subject: [PATCH 128/312] FinalityNotification: Directly include stale blocks The finality notification was already carrying the information about the stale heads. However, most users of the stale heads were expanding these stale heads to all the stale blocks. So, we were iterating the same forks multiple times in the node for each finality notification. Also in a possible future where we start actually pruning headers as well, expanding these forks would fail. So, this pull request is changing the finality notification to directly carry the stale blocks (which were calculated any way already). --- cumulus/client/pov-recovery/src/tests.rs | 2 +- substrate/client/api/src/backend.rs | 14 ++- substrate/client/api/src/client.rs | 12 ++- substrate/client/consensus/babe/src/lib.rs | 11 +-- substrate/client/db/src/lib.rs | 88 ++++++++++++------- .../merkle-mountain-range/src/offchain_mmr.rs | 9 +- .../src/chain_head/chain_head_follow.rs | 26 ++---- .../rpc-spec-v2/src/chain_head/test_utils.rs | 14 ++- substrate/client/service/src/client/client.rs | 69 +++++++-------- .../client/service/test/src/client/mod.rs | 42 ++++++--- .../primitives/blockchain/src/backend.rs | 39 +++----- 11 files changed, 169 insertions(+), 157 deletions(-) diff --git a/cumulus/client/pov-recovery/src/tests.rs b/cumulus/client/pov-recovery/src/tests.rs index 1476133736d4e..574632c96454d 100644 --- a/cumulus/client/pov-recovery/src/tests.rs +++ b/cumulus/client/pov-recovery/src/tests.rs @@ -1203,7 +1203,7 @@ async fn candidate_is_finalized_while_awaiting_recovery() { let (unpin_sender, _unpin_receiver) = sc_utils::mpsc::tracing_unbounded("test_unpin", 10); finality_notifications_tx .unbounded_send(FinalityNotification::from_summary( - FinalizeSummary { header: header.clone(), finalized: vec![], stale_heads: vec![] }, + FinalizeSummary { header: header.clone(), finalized: vec![], stale_blocks: vec![] }, unpin_sender, )) .unwrap(); diff --git a/substrate/client/api/src/backend.rs b/substrate/client/api/src/backend.rs index f520400f3def0..189264b22ccfd 100644 --- a/substrate/client/api/src/backend.rs +++ b/substrate/client/api/src/backend.rs @@ -79,6 +79,15 @@ pub struct ImportSummary { pub import_notification_action: ImportNotificationAction, } +/// A stale block. +#[derive(Clone, Debug)] +pub struct StaleBlock { + /// The hash of this block. + pub hash: Block::Hash, + /// Is this a head? + pub is_head: bool, +} + /// Finalization operation summary. /// /// Contains information about the block that just got finalized, @@ -87,10 +96,11 @@ pub struct FinalizeSummary { /// Last finalized block header. pub header: Block::Header, /// Blocks that were finalized. + /// /// The last entry is the one that has been explicitly finalized. pub finalized: Vec, - /// Heads that became stale during this finalization operation. - pub stale_heads: Vec, + /// Blocks that became stale during this finalization operation. + pub stale_blocks: Vec>, } /// Import operation wrapper. diff --git a/substrate/client/api/src/client.rs b/substrate/client/api/src/client.rs index 764930984ed71..7dc223a6de694 100644 --- a/substrate/client/api/src/client.rs +++ b/substrate/client/api/src/client.rs @@ -31,7 +31,9 @@ use std::{ sync::Arc, }; -use crate::{blockchain::Info, notifications::StorageEventStream, FinalizeSummary, ImportSummary}; +use crate::{ + blockchain::Info, notifications::StorageEventStream, FinalizeSummary, ImportSummary, StaleBlock, +}; use sc_transaction_pool_api::ChainEvent; use sc_utils::mpsc::{TracingUnboundedReceiver, TracingUnboundedSender}; @@ -404,8 +406,8 @@ pub struct FinalityNotification { /// /// This maps to the range `(old_finalized, new_finalized)`. pub tree_route: Arc<[Block::Hash]>, - /// Stale branches heads. - pub stale_heads: Arc<[Block::Hash]>, + /// Stale blocks. + pub stale_blocks: Arc<[Arc>]>, /// Handle to unpin the block this notification is for unpin_handle: UnpinHandle, } @@ -439,7 +441,9 @@ impl FinalityNotification { hash, header: summary.header, tree_route: Arc::from(summary.finalized), - stale_heads: Arc::from(summary.stale_heads), + stale_blocks: Arc::from( + summary.stale_blocks.into_iter().map(Arc::from).collect::>(), + ), unpin_handle: UnpinHandle::new(hash, unpin_worker_sender), } } diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs index 34fad5d42f559..040ff210b2bf7 100644 --- a/substrate/client/consensus/babe/src/lib.rs +++ b/substrate/client/consensus/babe/src/lib.rs @@ -561,16 +561,7 @@ fn aux_storage_cleanup + HeaderBackend, Block: B .filter(|h| **h != notification.hash), ); - // Cleans data for stale forks. - let stale_forks = match client.expand_forks(¬ification.stale_heads) { - Ok(stale_forks) => stale_forks, - Err(e) => { - warn!(target: LOG_TARGET, "{:?}", e); - - Default::default() - }, - }; - hashes.extend(stale_forks.iter()); + hashes.extend(notification.stale_blocks.iter().map(|b| b.hash)); hashes .into_iter() diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 05696bdc74d56..7731d54d407ae 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -1898,7 +1898,11 @@ impl Backend { } if remove_displaced { - let new_displaced = self.blockchain.displaced_leaves_after_finalizing(f_hash, f_num)?; + let new_displaced = self.blockchain.displaced_leaves_after_finalizing( + f_hash, + f_num, + *f_header.parent_hash(), + )?; self.blockchain.leaves.write().remove_displaced_leaves(FinalizationOutcome::new( new_displaced.displaced_leaves.iter().copied(), @@ -3289,16 +3293,18 @@ pub(crate) mod tests { let a4_hash = insert_disconnected_header(&backend, a4_number, a3_hash, H256::from([2; 32]), true); { - let displaced = - blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap(); + let displaced = blockchain + .displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([200; 32])) + .unwrap(); assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, genesis_hash]); assert_eq!(displaced.displaced_leaves, vec![(genesis_number, genesis_hash)]); assert_eq!(displaced.displaced_blocks, vec![]); } { - let displaced = - blockchain.displaced_leaves_after_finalizing(a4_hash, a4_number).unwrap(); + let displaced = blockchain + .displaced_leaves_after_finalizing(a4_hash, a4_number, a3_hash) + .unwrap(); assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, genesis_hash]); assert_eq!(displaced.displaced_leaves, vec![(genesis_number, genesis_hash)]); assert_eq!(displaced.displaced_blocks, vec![]); @@ -3315,8 +3321,9 @@ pub(crate) mod tests { false, ); { - let displaced = - blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap(); + let displaced = blockchain + .displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([2; 32])) + .unwrap(); assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, a1_hash]); assert_eq!(displaced.displaced_leaves, vec![]); assert_eq!(displaced.displaced_blocks, vec![]); @@ -3334,8 +3341,9 @@ pub(crate) mod tests { false, ); { - let displaced = - blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap(); + let displaced = blockchain + .displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([2; 32])) + .unwrap(); assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, a1_hash, b1_hash]); assert_eq!(displaced.displaced_leaves, vec![]); assert_eq!(displaced.displaced_blocks, vec![]); @@ -3358,8 +3366,9 @@ pub(crate) mod tests { let b5_hash = insert_disconnected_header(&backend, b5_number, b4_hash, H256::from([43; 32]), false); { - let displaced = - blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap(); + let displaced = blockchain + .displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([2; 32])) + .unwrap(); assert_eq!(blockchain.leaves().unwrap(), vec![b5_hash, a4_hash, a1_hash]); assert_eq!(displaced.displaced_leaves, vec![]); assert_eq!(displaced.displaced_blocks, vec![]); @@ -3374,8 +3383,9 @@ pub(crate) mod tests { let c4_hash = insert_disconnected_header(&backend, c4_number, a3_hash, H256::from([44; 32]), false); { - let displaced = - blockchain.displaced_leaves_after_finalizing(a4_hash, a4_number).unwrap(); + let displaced = blockchain + .displaced_leaves_after_finalizing(a4_hash, a4_number, a3_hash) + .unwrap(); assert_eq!(blockchain.leaves().unwrap(), vec![b5_hash, a4_hash, c4_hash, a1_hash]); assert_eq!(displaced.displaced_leaves, vec![(c4_number, c4_hash)]); assert_eq!(displaced.displaced_blocks, vec![c4_hash]); @@ -3404,24 +3414,27 @@ pub(crate) mod tests { { let displaced = blockchain - .displaced_leaves_after_finalizing(genesis_hash, genesis_number) + .displaced_leaves_after_finalizing(genesis_hash, genesis_number, Default::default()) .unwrap(); assert_eq!(displaced.displaced_leaves, vec![]); assert_eq!(displaced.displaced_blocks, vec![]); } { - let displaced_a1 = - blockchain.displaced_leaves_after_finalizing(a1_hash, a1_number).unwrap(); + let displaced_a1 = blockchain + .displaced_leaves_after_finalizing(a1_hash, a1_number, genesis_hash) + .unwrap(); assert_eq!(displaced_a1.displaced_leaves, vec![]); assert_eq!(displaced_a1.displaced_blocks, vec![]); - let displaced_a2 = - blockchain.displaced_leaves_after_finalizing(a2_hash, a3_number).unwrap(); + let displaced_a2 = blockchain + .displaced_leaves_after_finalizing(a2_hash, a2_number, a1_hash) + .unwrap(); assert_eq!(displaced_a2.displaced_leaves, vec![]); assert_eq!(displaced_a2.displaced_blocks, vec![]); - let displaced_a3 = - blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap(); + let displaced_a3 = blockchain + .displaced_leaves_after_finalizing(a3_hash, a3_number, a2_hash) + .unwrap(); assert_eq!(displaced_a3.displaced_leaves, vec![]); assert_eq!(displaced_a3.displaced_blocks, vec![]); } @@ -3429,8 +3442,9 @@ pub(crate) mod tests { // Finalized block is above leaves and not imported yet. // We will not be able to make a connection, // nothing can be marked as displaced. - let displaced = - blockchain.displaced_leaves_after_finalizing(H256::from([57; 32]), 10).unwrap(); + let displaced = blockchain + .displaced_leaves_after_finalizing(H256::from([57; 32]), 10, H256::from([56; 32])) + .unwrap(); assert_eq!(displaced.displaced_leaves, vec![]); assert_eq!(displaced.displaced_blocks, vec![]); } @@ -3454,8 +3468,9 @@ pub(crate) mod tests { let d2_hash = insert_header(&backend, d2_number, d1_hash, None, Default::default()); { - let displaced_a1 = - blockchain.displaced_leaves_after_finalizing(a1_hash, a1_number).unwrap(); + let displaced_a1 = blockchain + .displaced_leaves_after_finalizing(a1_hash, a1_number, genesis_hash) + .unwrap(); assert_eq!( displaced_a1.displaced_leaves, vec![(c2_number, c2_hash), (d2_number, d2_hash)] @@ -3464,27 +3479,31 @@ pub(crate) mod tests { displaced_blocks.sort(); assert_eq!(displaced_a1.displaced_blocks, displaced_blocks); - let displaced_a2 = - blockchain.displaced_leaves_after_finalizing(a2_hash, a2_number).unwrap(); + let displaced_a2 = blockchain + .displaced_leaves_after_finalizing(a2_hash, a2_number, a1_hash) + .unwrap(); assert_eq!(displaced_a1.displaced_leaves, displaced_a2.displaced_leaves); assert_eq!(displaced_a1.displaced_blocks, displaced_a2.displaced_blocks); - let displaced_a3 = - blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap(); + let displaced_a3 = blockchain + .displaced_leaves_after_finalizing(a3_hash, a3_number, a2_hash) + .unwrap(); assert_eq!(displaced_a1.displaced_leaves, displaced_a3.displaced_leaves); assert_eq!(displaced_a1.displaced_blocks, displaced_a3.displaced_blocks); } { - let displaced = - blockchain.displaced_leaves_after_finalizing(b1_hash, b1_number).unwrap(); + let displaced = blockchain + .displaced_leaves_after_finalizing(b1_hash, b1_number, genesis_hash) + .unwrap(); assert_eq!(displaced.displaced_leaves, vec![(a3_number, a3_hash)]); let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash]; displaced_blocks.sort(); assert_eq!(displaced.displaced_blocks, displaced_blocks); } { - let displaced = - blockchain.displaced_leaves_after_finalizing(b2_hash, b2_number).unwrap(); + let displaced = blockchain + .displaced_leaves_after_finalizing(b2_hash, b2_number, b1_hash) + .unwrap(); assert_eq!( displaced.displaced_leaves, vec![(a3_number, a3_hash), (d2_number, d2_hash)] @@ -3494,8 +3513,9 @@ pub(crate) mod tests { assert_eq!(displaced.displaced_blocks, displaced_blocks); } { - let displaced = - blockchain.displaced_leaves_after_finalizing(c2_hash, c2_number).unwrap(); + let displaced = blockchain + .displaced_leaves_after_finalizing(c2_hash, c2_number, c1_hash) + .unwrap(); assert_eq!( displaced.displaced_leaves, vec![(a3_number, a3_hash), (d2_number, d2_hash)] diff --git a/substrate/client/merkle-mountain-range/src/offchain_mmr.rs b/substrate/client/merkle-mountain-range/src/offchain_mmr.rs index 94593f9c2c7ba..a62b8ba455d27 100644 --- a/substrate/client/merkle-mountain-range/src/offchain_mmr.rs +++ b/substrate/client/merkle-mountain-range/src/offchain_mmr.rs @@ -273,14 +273,7 @@ where self.write_gadget_state_or_log(); // Remove offchain MMR nodes for stale forks. - let stale_forks = self.client.expand_forks(¬ification.stale_heads).unwrap_or_else(|e| { - warn!(target: LOG_TARGET, "{:?}", e); - - Default::default() - }); - for hash in stale_forks.iter() { - self.prune_branch(hash); - } + notification.stale_blocks.iter().for_each(|s| self.prune_branch(&s.hash)); } } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs index e9975b36b4a10..4a05d43d52a90 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs @@ -33,6 +33,7 @@ use futures::{ use log::debug; use sc_client_api::{ Backend, BlockBackend, BlockImportNotification, BlockchainEvents, FinalityNotification, + StaleBlock, }; use sc_rpc::utils::Subscription; use schnellru::{ByLength, LruMap}; @@ -603,20 +604,14 @@ where Ok(events) } - /// Get all pruned block hashes from the provided stale heads. + /// Get all pruned block hashes from the provided stale blocks. fn get_pruned_hashes( &mut self, - stale_heads: &[Block::Hash], - last_finalized: Block::Hash, + stale_blocks: &[Arc>], ) -> Result, SubscriptionManagementError> { - let blockchain = self.backend.blockchain(); - let mut pruned = Vec::new(); - - for stale_head in stale_heads { - let tree_route = sp_blockchain::tree_route(blockchain, last_finalized, *stale_head)?; - - // Collect only blocks that are not part of the canonical chain. - pruned.extend(tree_route.enacted().iter().filter_map(|block| { + Ok(stale_blocks + .iter() + .filter_map(|block| { if self.pruned_blocks.get(&block.hash).is_some() { // The block was already reported as pruned. return None @@ -624,10 +619,8 @@ where self.pruned_blocks.insert(block.hash, ()); Some(block.hash) - })) - } - - Ok(pruned) + }) + .collect()) } /// Handle the finalization notification by generating the `Finalized` event. @@ -656,8 +649,7 @@ where // Report all pruned blocks from the notification that are not // part of the fork we need to ignore. - let pruned_block_hashes = - self.get_pruned_hashes(¬ification.stale_heads, last_finalized)?; + let pruned_block_hashes = self.get_pruned_hashes(¬ification.stale_blocks)?; for finalized in &finalized_block_hashes { self.announced_blocks.insert(*finalized, true); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs index fd43ffefc4934..4757ded9dbffc 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs @@ -23,7 +23,7 @@ use sc_client_api::{ execution_extensions::ExecutionExtensions, BlockBackend, BlockImportNotification, BlockchainEvents, CallExecutor, ChildInfo, ExecutorProvider, FinalityNotification, FinalityNotifications, FinalizeSummary, ImportNotifications, KeysIter, MerkleValue, PairsIter, - StorageData, StorageEventStream, StorageKey, StorageProvider, + StaleBlock, StorageData, StorageEventStream, StorageKey, StorageProvider, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_api::{CallApiAt, CallApiAtParams}; @@ -75,7 +75,7 @@ impl ChainHeadMockClient { } /// Trigger the import stram from a header and a list of stale heads. - pub async fn trigger_finality_stream(&self, header: Header, stale_heads: Vec) { + pub async fn trigger_finality_stream(&self, header: Header, stale_blocks: Vec) { // Ensure the client called the `finality_notification_stream`. while self.finality_sinks.lock().is_empty() { tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; @@ -83,8 +83,14 @@ impl ChainHeadMockClient { // Build the notification. let (sink, _stream) = tracing_unbounded("test_sink", 100_000); - let summary = - FinalizeSummary { header: header.clone(), finalized: vec![header.hash()], stale_heads }; + let summary = FinalizeSummary { + header: header.clone(), + finalized: vec![header.hash()], + stale_blocks: stale_blocks + .into_iter() + .map(|h| StaleBlock { hash: h, is_head: false }) + .collect(), + }; let notification = FinalityNotification::from_summary(summary, sink); for sink in self.finality_sinks.lock().iter_mut() { diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 5f55499b32f06..549895c37902b 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -41,7 +41,7 @@ use sc_client_api::{ execution_extensions::ExecutionExtensions, notifications::{StorageEventStream, StorageNotifications}, CallExecutor, ExecutorProvider, KeysIter, OnFinalityAction, OnImportAction, PairsIter, - ProofProvider, TrieCacheContext, UnpinWorkerMessage, UsageProvider, + ProofProvider, StaleBlock, TrieCacheContext, UnpinWorkerMessage, UsageProvider, }; use sc_consensus::{ BlockCheckParams, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction, @@ -737,29 +737,24 @@ where None => FinalizeSummary { header: header.clone(), finalized: vec![hash], - stale_heads: Vec::new(), + stale_blocks: Vec::new(), }, }; if parent_exists { - // Add to the stale list all heads that are branching from parent besides our - // current `head`. - for head in self - .backend - .blockchain() - .leaves()? - .into_iter() - .filter(|h| *h != parent_hash) - { - let route_from_parent = sp_blockchain::tree_route( - self.backend.blockchain(), - parent_hash, - head, - )?; - if route_from_parent.retracted().is_empty() { - summary.stale_heads.push(head); - } - } + // The stale blocks that will be displaced after the block is finalized. + let stale_heads = self.backend.blockchain().displaced_leaves_after_finalizing( + hash, + *header.number(), + parent_hash, + )?; + + summary.stale_blocks.extend(stale_heads.displaced_blocks.into_iter().map( + |b| StaleBlock { + hash: b, + is_head: stale_heads.displaced_leaves.iter().any(|(_, h)| *h == b), + }, + )); } operation.notify_finalized = Some(summary); } @@ -935,30 +930,28 @@ where let finalized = route_from_finalized.enacted().iter().map(|elem| elem.hash).collect::>(); - let block_number = route_from_finalized - .last() - .expect( - "The block to finalize is always the latest \ - block in the route to the finalized block; qed", - ) - .number; - - // The stale heads are the leaves that will be displaced after the - // block is finalized. - let stale_heads = self - .backend - .blockchain() - .displaced_leaves_after_finalizing(hash, block_number)? - .hashes() - .collect(); - let header = self .backend .blockchain() .header(hash)? .expect("Block to finalize expected to be onchain; qed"); + let block_number = *header.number(); + + // The stale blocks that will be displaced after the block is finalized. + let mut stale_blocks = Vec::new(); + + let stale_heads = self.backend.blockchain().displaced_leaves_after_finalizing( + hash, + block_number, + *header.parent_hash(), + )?; + + stale_blocks.extend(stale_heads.displaced_blocks.into_iter().map(|b| StaleBlock { + hash: b, + is_head: stale_heads.displaced_leaves.iter().any(|(_, h)| *h == b), + })); - operation.notify_finalized = Some(FinalizeSummary { header, finalized, stale_heads }); + operation.notify_finalized = Some(FinalizeSummary { header, finalized, stale_blocks }); } Ok(()) diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index b38ee82dadd23..9af31f0232ca6 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -134,18 +134,20 @@ fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> Vec ) } +#[track_caller] fn finality_notification_check( notifications: &mut FinalityNotifications, finalized: &[Hash], - stale_heads: &[Hash], + stale_blocks: &[Hash], ) { match notifications.try_recv() { Ok(notif) => { - let stale_heads_expected: HashSet<_> = stale_heads.iter().collect(); - let stale_heads: HashSet<_> = notif.stale_heads.iter().collect(); + let stale_blocks_expected = HashSet::::from_iter(stale_blocks.iter().copied()); + let stale_blocks = HashSet::from_iter(notif.stale_blocks.into_iter().map(|b| b.hash)); + assert_eq!(notif.tree_route.as_ref(), &finalized[..finalized.len() - 1]); assert_eq!(notif.hash, *finalized.last().unwrap()); - assert_eq!(stale_heads, stale_heads_expected); + assert_eq!(stale_blocks, stale_blocks_expected); }, Err(TryRecvError::Closed) => { panic!("unexpected notification result, client send channel was closed") @@ -1154,7 +1156,11 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { assert_eq!(client.chain_info().finalized_hash, b1.hash()); - finality_notification_check(&mut finality_notifications, &[b1.hash()], &[a2.hash()]); + finality_notification_check( + &mut finality_notifications, + &[b1.hash()], + &[a1.hash(), a2.hash()], + ); assert!(matches!(finality_notifications.try_recv().unwrap_err(), TryRecvError::Empty)); } @@ -1228,6 +1234,12 @@ fn finalizing_diverged_block_should_trigger_reorg() { // knowing about B2) assert_eq!(client.chain_info().best_hash, b1.hash()); + finality_notification_check( + &mut finality_notifications, + &[b1.hash()], + &[a1.hash(), a2.hash()], + ); + // `SelectChain` should report B2 as best block though assert_eq!(block_on(select_chain.best_chain()).unwrap().hash(), b2.hash()); @@ -1249,7 +1261,6 @@ fn finalizing_diverged_block_should_trigger_reorg() { ClientExt::finalize_block(&client, b3.hash(), None).unwrap(); - finality_notification_check(&mut finality_notifications, &[b1.hash()], &[a2.hash()]); finality_notification_check(&mut finality_notifications, &[b2.hash(), b3.hash()], &[]); assert!(matches!(finality_notifications.try_recv().unwrap_err(), TryRecvError::Empty)); } @@ -1368,15 +1379,20 @@ fn finality_notifications_content() { ClientExt::finalize_block(&client, a2.hash(), None).unwrap(); + finality_notification_check( + &mut finality_notifications, + &[a1.hash(), a2.hash()], + &[c1.hash(), b1.hash(), b2.hash()], + ); + // Import and finalize D4 block_on(client.import_as_final(BlockOrigin::Own, d4.clone())).unwrap(); finality_notification_check( &mut finality_notifications, - &[a1.hash(), a2.hash()], - &[c1.hash(), b2.hash()], + &[d3.hash(), d4.hash()], + &[a3.hash()], ); - finality_notification_check(&mut finality_notifications, &[d3.hash(), d4.hash()], &[a3.hash()]); assert!(matches!(finality_notifications.try_recv().unwrap_err(), TryRecvError::Empty)); } @@ -1565,6 +1581,12 @@ fn doesnt_import_blocks_that_revert_finality() { // B3 at the same height but that doesn't include it ClientExt::finalize_block(&client, a2.hash(), None).unwrap(); + finality_notification_check( + &mut finality_notifications, + &[a1.hash(), a2.hash()], + &[b1.hash(), b2.hash()], + ); + let import_err = block_on(client.import(BlockOrigin::Own, b3)).err().unwrap(); let expected_err = ConsensusError::ClientImport(sp_blockchain::Error::NotInFinalizedChain.to_string()); @@ -1606,8 +1628,6 @@ fn doesnt_import_blocks_that_revert_finality() { block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); ClientExt::finalize_block(&client, a3.hash(), None).unwrap(); - finality_notification_check(&mut finality_notifications, &[a1.hash(), a2.hash()], &[b2.hash()]); - finality_notification_check(&mut finality_notifications, &[a3.hash()], &[]); assert!(matches!(finality_notifications.try_recv().unwrap_err(), TryRecvError::Empty)); diff --git a/substrate/primitives/blockchain/src/backend.rs b/substrate/primitives/blockchain/src/backend.rs index d7386a71a0d1e..7bed14d1ab4fa 100644 --- a/substrate/primitives/blockchain/src/backend.rs +++ b/substrate/primitives/blockchain/src/backend.rs @@ -253,7 +253,13 @@ pub trait Backend: &self, finalized_block_hash: Block::Hash, finalized_block_number: NumberFor, + finalized_block_parent_hash: Block::Hash, ) -> std::result::Result, Error> { + // There are no forks at genesis. + if finalized_block_number.is_zero() { + return Ok(DisplacedLeavesAfterFinalization::default()); + } + let leaves = self.leaves()?; let now = std::time::Instant::now(); @@ -265,37 +271,14 @@ pub trait Backend: "Checking for displaced leaves after finalization." ); - // If we have only one leaf there are no forks, and we can return early. - if finalized_block_number == Zero::zero() || leaves.len() == 1 { - return Ok(DisplacedLeavesAfterFinalization::default()); - } - // Store hashes of finalized blocks for quick checking later, the last block is the // finalized one let mut finalized_chain = VecDeque::new(); - let current_finalized = match self.header_metadata(finalized_block_hash) { - Ok(metadata) => metadata, - Err(Error::UnknownBlock(_)) => { - debug!( - target: crate::LOG_TARGET, - hash = ?finalized_block_hash, - elapsed = ?now.elapsed(), - "Tried to fetch unknown block, block ancestry has gaps.", - ); - return Ok(DisplacedLeavesAfterFinalization::default()); - }, - Err(e) => { - debug!( - target: crate::LOG_TARGET, - hash = ?finalized_block_hash, - err = ?e, - elapsed = ?now.elapsed(), - "Failed to fetch block.", - ); - return Err(e); - }, - }; - finalized_chain.push_front(MinimalBlockMetadata::from(¤t_finalized)); + finalized_chain.push_front(MinimalBlockMetadata { + number: finalized_block_number, + hash: finalized_block_hash, + parent: finalized_block_parent_hash, + }); // Local cache is a performance optimization in case of finalized block deep below the // tip of the chain with a lot of leaves above finalized block From 9a0f6e651a2df02fe8776a7e04a5e570db8c0826 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 1 Oct 2025 13:53:01 +0200 Subject: [PATCH 129/312] Fix --- substrate/client/rpc/src/state/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/client/rpc/src/state/tests.rs b/substrate/client/rpc/src/state/tests.rs index 6f26cbcbb9936..c35d9efa42b58 100644 --- a/substrate/client/rpc/src/state/tests.rs +++ b/substrate/client/rpc/src/state/tests.rs @@ -53,7 +53,7 @@ async fn should_return_storage() { .add_extra_storage(b":map:acc2".to_vec(), vec![1, 2, 3]) .build(); let genesis_hash = client.genesis_hash(); - let (client, child) = new_full(Arc::new(client), test_executor()); + let (client, child) = new_full(Arc::new(client), test_executor(), None); let key = StorageKey(KEY.to_vec()); let ext = allow_unsafe(); From 92d166d1c33358d555338ed2aee9a279c51297da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 1 Oct 2025 14:00:31 +0200 Subject: [PATCH 130/312] Move the proof size recording to its own crate --- Cargo.lock | 11 ++ Cargo.toml | 2 + cumulus/client/consensus/aura/Cargo.toml | 1 + .../src/collators/slot_based/aux_schema.rs | 87 ------------- .../aura/src/collators/slot_based/mod.rs | 1 - .../client/proof-size-recording/Cargo.toml | 18 +++ .../client/proof-size-recording/src/lib.rs | 117 ++++++++++++++++++ 7 files changed, 149 insertions(+), 88 deletions(-) delete mode 100644 cumulus/client/consensus/aura/src/collators/slot_based/aux_schema.rs create mode 100644 cumulus/client/proof-size-recording/Cargo.toml create mode 100644 cumulus/client/proof-size-recording/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index b487370c0218e..41da894ea66d8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4348,6 +4348,7 @@ dependencies = [ "cumulus-client-collator", "cumulus-client-consensus-common", "cumulus-client-parachain-inherent", + "cumulus-client-proof-size-recording", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-relay-chain-interface", @@ -4538,6 +4539,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "cumulus-client-proof-size-recording" +version = "0.1.0" +dependencies = [ + "parity-scale-codec", + "sc-client-api", + "sp-blockchain", + "sp-runtime", +] + [[package]] name = "cumulus-client-service" version = "0.7.0" diff --git a/Cargo.toml b/Cargo.toml index bc5bad755dc2b..d93118556fcfc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,6 +71,7 @@ members = [ "cumulus/client/network", "cumulus/client/parachain-inherent", "cumulus/client/pov-recovery", + "cumulus/client/proof-size-recording", "cumulus/client/relay-chain-inprocess-interface", "cumulus/client/relay-chain-interface", "cumulus/client/relay-chain-minimal-node", @@ -743,6 +744,7 @@ cumulus-client-consensus-relay-chain = { path = "cumulus/client/consensus/relay- cumulus-client-network = { path = "cumulus/client/network", default-features = false } cumulus-client-parachain-inherent = { path = "cumulus/client/parachain-inherent", default-features = false } cumulus-client-pov-recovery = { path = "cumulus/client/pov-recovery", default-features = false } +cumulus-client-proof-size-recording = { path = "cumulus/client/proof-size-recording", default-features = false } cumulus-client-service = { path = "cumulus/client/service", default-features = false } cumulus-pallet-aura-ext = { path = "cumulus/pallets/aura-ext", default-features = false } cumulus-pallet-dmp-queue = { default-features = false, path = "cumulus/pallets/dmp-queue" } diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index 899286c9a97d7..2562e4889b303 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -48,6 +48,7 @@ sp-trie = { workspace = true, default-features = true } cumulus-client-collator = { workspace = true, default-features = true } cumulus-client-consensus-common = { workspace = true, default-features = true } cumulus-client-parachain-inherent = { workspace = true, default-features = true } +cumulus-client-proof-size-recording = { workspace = true, default-features = true } cumulus-primitives-aura = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } cumulus-relay-chain-interface = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/aux_schema.rs b/cumulus/client/consensus/aura/src/collators/slot_based/aux_schema.rs deleted file mode 100644 index a6dd484cdaf26..0000000000000 --- a/cumulus/client/consensus/aura/src/collators/slot_based/aux_schema.rs +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -use codec::{Decode, Encode}; -use sc_client_api::backend::AuxStore; -use sp_blockchain::{Error as ClientError, Result as ClientResult}; - -const STORAGE_PROOF_RECORDING_VERSION: &[u8] = b"cumulus_aura_storage_proof_recording_version"; -const STORAGE_PROOF_RECORDING_CURRENT_VERSION: u32 = 1; - -/// The aux storage key used to store the storage proof size recordings for the given block hash. -pub fn storage_proof_recording_key(block_hash: H) -> Vec { - (b"cumulus_aura_storage_proof_recording", block_hash).encode() -} - -fn load_decode(backend: &B, key: &[u8]) -> ClientResult> -where - B: AuxStore, - T: Decode, -{ - let corrupt = |e: codec::Error| { - ClientError::Backend(format!("Storage proof recording DB is corrupted. Decode error: {}", e)) - }; - match backend.get_aux(key)? { - None => Ok(None), - Some(t) => T::decode(&mut &t[..]).map(Some).map_err(corrupt), - } -} - -/// Write the storage proof size recordings of a block to aux storage. -pub(crate) fn write_storage_proof_recording( - block_hash: H, - recordings: Vec, - write_aux: F, -) -> R -where - F: FnOnce(&[(Vec, &[u8])]) -> R, -{ - STORAGE_PROOF_RECORDING_CURRENT_VERSION.using_encoded(|version| { - let key = storage_proof_recording_key(block_hash); - recordings.using_encoded(|s| { - write_aux(&[ - (key, s), - (STORAGE_PROOF_RECORDING_VERSION.to_vec(), version), - ]) - }) - }) -} - -/// Load the storage proof size recordings associated with a block. -pub fn load_storage_proof_recording( - backend: &B, - block_hash: H, -) -> ClientResult>> { - let version = load_decode::<_, u32>(backend, STORAGE_PROOF_RECORDING_VERSION)?; - - match version { - None => Ok(None), - Some(STORAGE_PROOF_RECORDING_CURRENT_VERSION) => - load_decode(backend, storage_proof_recording_key(block_hash).as_slice()), - Some(other) => - Err(ClientError::Backend(format!("Unsupported storage proof recording DB version: {:?}", other))), - } -} - -/// Prune the storage proof size recordings for a block from aux storage. -pub(crate) fn prune_storage_proof_recording( - backend: &B, - block_hash: H, -) -> ClientResult<()> { - let key = storage_proof_recording_key(block_hash); - backend.insert_aux(&[], &[key.as_slice()]) -} \ No newline at end of file diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index ddcc751a39805..03ac5188de31c 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -93,7 +93,6 @@ use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Member}; use std::{path::PathBuf, sync::Arc, time::Duration}; -mod aux_schema; mod block_builder_task; mod block_import; mod collation_task; diff --git a/cumulus/client/proof-size-recording/Cargo.toml b/cumulus/client/proof-size-recording/Cargo.toml new file mode 100644 index 0000000000000..520be6ab0a509 --- /dev/null +++ b/cumulus/client/proof-size-recording/Cargo.toml @@ -0,0 +1,18 @@ +[package] +authors.workspace = true +name = "cumulus-client-proof-size-recording" +version = "0.1.0" +edition.workspace = true +description = "Storage proof size recording utilities." +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +codec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } diff --git a/cumulus/client/proof-size-recording/src/lib.rs b/cumulus/client/proof-size-recording/src/lib.rs new file mode 100644 index 0000000000000..918a65a924aaa --- /dev/null +++ b/cumulus/client/proof-size-recording/src/lib.rs @@ -0,0 +1,117 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Proof size recording utilities. + +use codec::{Decode, Encode}; +use sc_client_api::{ + backend::AuxStore, + client::{AuxDataOperations, FinalityNotification, PreCommitActions}, +}; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use sp_runtime::traits::Block as BlockT; +use std::sync::Arc; + +const PROOF_SIZE_RECORDING_VERSION: &[u8] = b"cumulus_proof_size_recording_version"; +const PROOF_SIZE_RECORDING_CURRENT_VERSION: u32 = 1; + +/// The aux storage key used to store the proof size recordings for the given block hash. +fn proof_size_recording_key(block_hash: H) -> Vec { + (b"cumulus_proof_size_recording", block_hash).encode() +} + +fn load_decode(backend: &B, key: &[u8]) -> ClientResult> +where + B: AuxStore, + T: Decode, +{ + let corrupt = |e: codec::Error| { + ClientError::Backend(format!("Proof size recording DB is corrupted. Decode error: {}", e)) + }; + match backend.get_aux(key)? { + None => Ok(None), + Some(t) => T::decode(&mut &t[..]).map(Some).map_err(corrupt), + } +} + +/// Write the proof size recordings of a block to aux storage. +pub fn write_proof_size_recording( + block_hash: H, + recordings: Vec, + write_aux: F, +) -> R +where + F: FnOnce(&[(Vec, &[u8])]) -> R, +{ + PROOF_SIZE_RECORDING_CURRENT_VERSION.using_encoded(|version| { + let key = proof_size_recording_key(block_hash); + recordings.using_encoded(|s| { + write_aux(&[(key, s), (PROOF_SIZE_RECORDING_VERSION.to_vec(), version)]) + }) + }) +} + +/// Load the proof size recordings associated with a block. +pub fn load_proof_size_recording( + backend: &B, + block_hash: H, +) -> ClientResult>> { + let version = load_decode::<_, u32>(backend, PROOF_SIZE_RECORDING_VERSION)?; + + match version { + None => Ok(None), + Some(PROOF_SIZE_RECORDING_CURRENT_VERSION) => + load_decode(backend, proof_size_recording_key(block_hash).as_slice()), + Some(other) => Err(ClientError::Backend(format!( + "Unsupported proof size recording DB version: {:?}", + other + ))), + } +} + +/// Cleanup auxiliary storage for finalized blocks. +/// +/// This function removes proof size recordings for blocks that are no longer needed +/// after finalization. It processes the finalized blocks and their stale heads to +/// determine which recordings can be safely removed. +fn aux_storage_cleanup(notification: &FinalityNotification) -> AuxDataOperations +where + Block: BlockT, +{ + // Convert the hashes to deletion operations + notification + .stale_blocks + .iter() + .map(|b| (proof_size_recording_key(b.hash), None)) + .collect() +} + +/// Register a finality action for cleaning up proof size recordings. +/// +/// This should be called during consensus initialization to automatically clean up +/// proof size recordings when blocks are finalized. +pub fn register_proof_size_recording_cleanup(client: Arc) +where + C: PreCommitActions + 'static, + Block: BlockT, +{ + let on_finality = move |notification: &FinalityNotification| -> AuxDataOperations { + aux_storage_cleanup(notification) + }; + + client.register_finality_action(Box::new(on_finality)); +} From 98f0d3000c1fcfad4a0168aa64c38973ffe1c2f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 1 Oct 2025 15:31:21 +0200 Subject: [PATCH 131/312] Pass `orig_hash` of the block --- cumulus/client/service/src/lib.rs | 4 +++- substrate/client/tracing/src/block/mod.rs | 17 ++++++++++++----- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index 251d4f6f3bdfd..808285f2e9366 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -645,6 +645,8 @@ where runtime_api.register_extension(ProofSizeExt::new(storage_proof_recorder.clone())); runtime_api.record_proof_with_recorder(storage_proof_recorder); - runtime_api.execute_block(parent_hash, block).map_err(Into::into) + runtime_api + .execute_block(*block.header().parent_hash(), block) + .map_err(Into::into) } } diff --git a/substrate/client/tracing/src/block/mod.rs b/substrate/client/tracing/src/block/mod.rs index 967c814099f17..35f01ae2837bb 100644 --- a/substrate/client/tracing/src/block/mod.rs +++ b/substrate/client/tracing/src/block/mod.rs @@ -57,11 +57,15 @@ const REQUIRED_EVENT_FIELD: &str = "method"; /// [`DefaultExecuteBlock`] provides a default implementation that simply forwards the block to /// [`Core::execute_block`] without any other changes. pub trait TracingExecuteBlock: Send + Sync { - /// Execute the given `block` on top of the state of `parent_hash`. + /// Execute the given `block`. + /// + /// The `block` is prepared to be executed right away, this means that any `Seal` was already + /// removed from the header. As this changes the `hash` of the block, `orig_hash` is passed + /// alongside to the callee. /// /// The execution should be done sync on the same thread, because the caller will register /// special tracing collectors. - fn execute_block(&self, parent_hash: Block::Hash, block: Block) -> sp_blockchain::Result<()>; + fn execute_block(&self, orig_hash: Block::Hash, block: Block) -> sp_blockchain::Result<()>; } /// Default implementation of [`ExecuteBlock`]. @@ -84,8 +88,11 @@ where Client::Api: Core, Block: BlockT, { - fn execute_block(&self, parent_hash: Block::Hash, block: Block) -> sp_blockchain::Result<()> { - self.client.runtime_api().execute_block(parent_hash, block).map_err(Into::into) + fn execute_block(&self, _: Block::Hash, block: Block) -> sp_blockchain::Result<()> { + self.client + .runtime_api() + .execute_block(*block.header().parent_hash(), block) + .map_err(Into::into) } } @@ -277,7 +284,7 @@ where if let Err(e) = dispatcher::with_default(&dispatch, || { let span = tracing::info_span!(target: TRACE_TARGET, "trace_block"); let _enter = span.enter(); - self.execute_block.execute_block(parent_hash, block) + self.execute_block.execute_block(self.block, block) }) { return Err(Error::Dispatch(format!( "Failed to collect traces and execute block: {}", From 183bf63f486615d5a6238190f8b78859874906bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 1 Oct 2025 17:14:55 +0200 Subject: [PATCH 132/312] Fix `tracing_block` job --- Cargo.lock | 2 ++ .../slot_based/block_builder_task.rs | 24 +++++++++++--- .../src/collators/slot_based/block_import.rs | 22 +++++++++++-- .../aura/src/collators/slot_based/mod.rs | 7 ++++- .../client/proof-size-recording/src/lib.rs | 23 ++++++-------- cumulus/client/service/Cargo.toml | 1 + cumulus/client/service/src/lib.rs | 30 +++++++++++++++--- cumulus/zombienet/zombienet-sdk/Cargo.toml | 1 + .../zombie_ci/block_bundling/tracing_block.rs | 31 +++++++------------ 9 files changed, 96 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 41da894ea66d8..4726ace2be2a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4559,6 +4559,7 @@ dependencies = [ "cumulus-client-consensus-common", "cumulus-client-network", "cumulus-client-pov-recovery", + "cumulus-client-proof-size-recording", "cumulus-primitives-core", "cumulus-primitives-proof-size-hostfunction", "cumulus-relay-chain-inprocess-interface", @@ -5234,6 +5235,7 @@ dependencies = [ "serde_json", "sp-core 28.0.0", "sp-keyring", + "sp-rpc", "sp-statement-store", "tokio", "zombienet-configuration", diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index ffebac26d983f..c87fe70601318 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -31,6 +31,7 @@ use crate::{ use codec::{Codec, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; +use cumulus_client_proof_size_recording::prepare_proof_size_recording_transaction; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; use cumulus_primitives_core::{ extract_relay_parent, rpsr_digest, BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, @@ -547,7 +548,7 @@ where let mut extra_extensions = Extensions::default(); extra_extensions.register(ProofSizeExt::new(proof_size_recorder.clone())); - let Ok(Some((built_block, import_block))) = collator + let Ok(Some((built_block, mut import_block))) = collator .build_block(BuildBlockAndImportParams { parent_header: &parent_header, slot_claim, @@ -572,14 +573,29 @@ where return Ok(None); }; + parent_hash = built_block.block.header().hash(); + parent_header = built_block.block.header().clone(); + + // Extract and add proof size recordings to the import block + let recorded_sizes = proof_size_recorder + .recorded_estimations() + .into_iter() + .map(|size| size as u32) + .collect::>(); + + if !recorded_sizes.is_empty() { + prepare_proof_size_recording_transaction(parent_hash, recorded_sizes).for_each( + |(k, v)| { + import_block.auxiliary.push((k, Some(v))); + }, + ); + } + if let Err(error) = collator.import_block(import_block).await { tracing::error!(target: crate::LOG_TARGET, ?error, "Failed to import built block."); return Ok(None); } - parent_hash = built_block.block.header().hash(); - parent_header = built_block.block.header().clone(); - // Announce the newly built block to our peers. collator.collator_service().announce_block(parent_hash, None); diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs index 63ba9f9ec50e1..e1d8e66c0eed9 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -16,6 +16,7 @@ // along with Cumulus. If not, see . use codec::Codec; +use cumulus_client_proof_size_recording::prepare_proof_size_recording_transaction; use cumulus_primitives_core::{CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; use futures::{stream::FusedStream, StreamExt}; use parking_lot::Mutex; @@ -29,7 +30,7 @@ use sp_api::{ use sp_consensus::BlockOrigin; use sp_consensus_aura::AuraApi; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as _}; -use sp_trie::{proof_size_extension::ProofSizeExt, recorder::IgnoredNodes}; +use sp_trie::{proof_size_extension::{ProofSizeExt, RecordingProofSizeProvider}, recorder::IgnoredNodes}; use std::{collections::HashMap, marker::PhantomData, sync::Arc}; /// Handle for receiving the block and the storage proof from the [`SlotBasedBlockImport`]. @@ -130,13 +131,14 @@ impl SlotBasedBlockImport::with_ignored_nodes(nodes_to_ignore.clone()); + let proof_size_recorder = RecordingProofSizeProvider::new(recorder.clone()); let mut runtime_api = self.client.runtime_api(); runtime_api.set_call_context(CallContext::Onchain); runtime_api.record_proof_with_recorder(recorder.clone()); - runtime_api.register_extension(ProofSizeExt::new(recorder)); + runtime_api.register_extension(ProofSizeExt::new(proof_size_recorder.clone())); let parent_hash = *params.header.parent_hash(); @@ -163,6 +165,22 @@ impl SlotBasedBlockImport>(); + + if !recorded_sizes.is_empty() { + let block_hash = params.header.hash(); + prepare_proof_size_recording_transaction(block_hash, recorded_sizes).for_each( + |(k, v)| { + params.auxiliary.push((k, Some(v))); + }, + ); + } + params.state_action = StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(gen_storage_changes)); diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index 03ac5188de31c..2345cebff50ff 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -71,6 +71,7 @@ pub use block_import::{SlotBasedBlockImport, SlotBasedBlockImportHandle}; use codec::Codec; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; +use cumulus_client_proof_size_recording::register_proof_size_recording_cleanup; use cumulus_primitives_aura::AuraUnincludedSegmentApi; use cumulus_primitives_core::{RelayParentOffsetApi, SlotSchedule}; use cumulus_relay_chain_interface::RelayChainInterface; @@ -78,7 +79,7 @@ use futures::FutureExt; use polkadot_primitives::{ CollatorPair, CoreIndex, Hash as RelayHash, Id as ParaId, ValidationCodeHash, }; -use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; +use sc_client_api::{backend::AuxStore, client::PreCommitActions, BlockBackend, BlockOf, UsageProvider}; use sc_consensus::BlockImport; use sc_utils::mpsc::tracing_unbounded; use sp_api::{ProvideRuntimeApi, StorageProof}; @@ -157,6 +158,7 @@ pub fn run + BlockBackend + UsageProvider + + PreCommitActions + Send + Sync + 'static, @@ -199,6 +201,9 @@ pub fn run( +/// Prepare a transaction to write the proof size recordings to the aux storage. +/// +/// Returns the key-value pairs that need to be written to the aux storage. +pub fn prepare_proof_size_recording_transaction( block_hash: H, recordings: Vec, - write_aux: F, -) -> R -where - F: FnOnce(&[(Vec, &[u8])]) -> R, -{ - PROOF_SIZE_RECORDING_CURRENT_VERSION.using_encoded(|version| { - let key = proof_size_recording_key(block_hash); - recordings.using_encoded(|s| { - write_aux(&[(key, s), (PROOF_SIZE_RECORDING_VERSION.to_vec(), version)]) - }) - }) +) -> impl Iterator, Vec)> { + let current_version = PROOF_SIZE_RECORDING_CURRENT_VERSION.encode(); + let key = proof_size_recording_key(block_hash); + let recordings = recordings.encode(); + + [(key, recordings), (PROOF_SIZE_RECORDING_VERSION.to_vec(), current_version)].into_iter() } /// Load the proof size recordings associated with a block. diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index 176b389ff06a5..d478214c1f9b1 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -48,6 +48,7 @@ cumulus-client-collator = { workspace = true, default-features = true } cumulus-client-consensus-common = { workspace = true, default-features = true } cumulus-client-network = { workspace = true, default-features = true } cumulus-client-pov-recovery = { workspace = true, default-features = true } +cumulus-client-proof-size-recording = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } cumulus-relay-chain-inprocess-interface = { workspace = true, default-features = true } diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index 808285f2e9366..0d51dba021a97 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -23,6 +23,7 @@ use cumulus_client_cli::CollatorOptions; use cumulus_client_consensus_common::ParachainConsensus; use cumulus_client_network::{AssumeSybilResistance, RequireSecondedInBlockAnnounce}; use cumulus_client_pov_recovery::{PoVRecovery, RecoveryDelayRange, RecoveryHandle}; +use cumulus_client_proof_size_recording::load_proof_size_recording; use cumulus_primitives_core::{CollectCollationInfo, ParaId}; pub use cumulus_primitives_proof_size_hostfunction::storage_proof_size; use cumulus_relay_chain_inprocess_interface::build_inprocess_relay_chain; @@ -32,7 +33,8 @@ use futures::{channel::mpsc, StreamExt}; use polkadot_primitives::{CandidateEvent, CollatorPair, OccupiedCoreAssumption}; use prometheus::{Histogram, HistogramOpts, Registry}; use sc_client_api::{ - Backend as BackendT, BlockBackend, BlockchainEvents, Finalizer, ProofProvider, UsageProvider, + AuxStore, Backend as BackendT, BlockBackend, BlockchainEvents, Finalizer, ProofProvider, + UsageProvider, }; use sc_consensus::{ import_queue::{ImportQueue, ImportQueueService}, @@ -55,8 +57,11 @@ use sp_runtime::{ traits::{Block as BlockT, BlockIdTo, Header}, SaturatedConversion, Saturating, }; -use sp_trie::proof_size_extension::ProofSizeExt; +use sp_trie::proof_size_extension::{ + ProofSizeExt, RecordedProofSizeEstimations, ReplayProofSizeProvider, +}; use std::{ + collections::VecDeque, sync::Arc, time::{Duration, Instant}, }; @@ -636,13 +641,28 @@ impl ParachainTracingExecuteBlock { impl TracingExecuteBlock for ParachainTracingExecuteBlock where Block: BlockT, - Client: ProvideRuntimeApi + Send + Sync, + Client: ProvideRuntimeApi + AuxStore + Send + Sync, Client::Api: Core, { - fn execute_block(&self, parent_hash: Block::Hash, block: Block) -> sp_blockchain::Result<()> { + fn execute_block(&self, orig_hash: Block::Hash, block: Block) -> sp_blockchain::Result<()> { let mut runtime_api = self.client.runtime_api(); let storage_proof_recorder = ProofRecorder::::default(); - runtime_api.register_extension(ProofSizeExt::new(storage_proof_recorder.clone())); + + // Try to load proof size recordings for this block + match load_proof_size_recording(&*self.client, orig_hash)? { + Some(recordings) => { + let recorded = RecordedProofSizeEstimations( + recordings.into_iter().map(|x| x as usize).collect(), + ); + let replay_provider = ReplayProofSizeProvider::from_recorded(recorded); + runtime_api.register_extension(ProofSizeExt::new(replay_provider)); + }, + None => { + // No recordings found or error loading, fall back to default recorder + runtime_api.register_extension(ProofSizeExt::new(storage_proof_recorder.clone())); + }, + } + runtime_api.record_proof_with_recorder(storage_proof_recorder); runtime_api diff --git a/cumulus/zombienet/zombienet-sdk/Cargo.toml b/cumulus/zombienet/zombienet-sdk/Cargo.toml index 43eb40834f305..e63d9050d52f9 100644 --- a/cumulus/zombienet/zombienet-sdk/Cargo.toml +++ b/cumulus/zombienet/zombienet-sdk/Cargo.toml @@ -20,6 +20,7 @@ zombienet-sdk = { workspace = true } zombienet-orchestrator = { workspace = true } zombienet-configuration = { workspace = true } cumulus-zombienet-sdk-helpers = { workspace = true } +sp-rpc = { workspace = true, default-features = true } sp-statement-store = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs index d2e1cd2659527..ed77a1d455f6f 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs @@ -20,6 +20,7 @@ use anyhow::anyhow; use cumulus_zombienet_sdk_helpers::submit_extrinsic_and_wait_for_finalization_success; use futures::stream::StreamExt; use serde_json::json; +use sp_rpc::tracing::{BlockTrace, TraceBlockResponse}; use zombienet_sdk::{ subxt::{ backend::rpc::RpcClient, @@ -88,7 +89,7 @@ async fn block_bundling_tracing_block() -> Result<(), anyhow::Error> { log::info!("Calling tracing_block RPC for the block containing the transfer"); // Make the tracing_block RPC call for the block containing our transfer - let trace_result: serde_json::Value = rpc_client + let trace_result: TraceBlockResponse = rpc_client .request( "state_traceBlock", rpc_params![ @@ -102,26 +103,16 @@ async fn block_bundling_tracing_block() -> Result<(), anyhow::Error> { log::info!("Successfully received tracing result for transfer block"); - // Verify that we got a valid response (non-empty) - if trace_result.is_null() { - return Err(anyhow!("tracing_block returned null result")); + // Decode and verify the BlockTrace is successful + match trace_result { + TraceBlockResponse::TraceError(error) => { + Err(anyhow!("Block tracing failed: {}", error.error)); + }, + TraceBlockResponse::BlockTrace(_) => { + log::info!("✅ Block trace successful!"); + Ok(()) + }, } - - // Verify the trace contains information about our transfer - if let Some(trace_obj) = trace_result.as_object() { - log::info!("Trace result contains {} top-level keys", trace_obj.len()); - - // Log some details about the trace for debugging - if let Some(storage_changes) = trace_obj.get("storageChanges") { - log::info!("Found storage changes in trace"); - } - if let Some(block_trace) = trace_obj.get("block") { - log::info!("Found block trace information"); - } - } - - log::info!("Block bundling tracing test with transfer finished successfully"); - Ok(()) } async fn build_network_config() -> Result { From cb4c8a6f60df91af07ae56e79dc0a8d17e810f46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 1 Oct 2025 21:41:48 +0200 Subject: [PATCH 133/312] Fix --- .../tests/zombie_ci/block_bundling/tracing_block.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs index ed77a1d455f6f..1d3d9178f21fc 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs @@ -106,7 +106,7 @@ async fn block_bundling_tracing_block() -> Result<(), anyhow::Error> { // Decode and verify the BlockTrace is successful match trace_result { TraceBlockResponse::TraceError(error) => { - Err(anyhow!("Block tracing failed: {}", error.error)); + Err(anyhow!("Block tracing failed: {}", error.error)) }, TraceBlockResponse::BlockTrace(_) => { log::info!("✅ Block trace successful!"); From eeeba247542c8b7953f147d12e26e28d75420a34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 2 Oct 2025 20:52:51 +0200 Subject: [PATCH 134/312] Fixes --- .../slot_based/block_builder_task.rs | 12 ++++----- .../slot_based/relay_chain_data_cache.rs | 5 ++++ .../aura/src/collators/slot_based/tests.rs | 25 +++++++++---------- cumulus/client/consensus/aura/src/lib.rs | 2 +- .../src/validate_block/tests.rs | 9 ++++--- cumulus/test/client/src/block_builder.rs | 22 ++++++++++------ cumulus/test/client/src/lib.rs | 7 +++--- cumulus/test/runtime/src/lib.rs | 2 +- .../test/service/benches/block_production.rs | 2 +- .../benches/block_production_glutton.rs | 2 +- substrate/client/block-builder/src/lib.rs | 11 +++++--- substrate/client/consensus/aura/src/lib.rs | 6 +---- 12 files changed, 60 insertions(+), 45 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index c87fe70601318..6227b6e27af41 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -685,7 +685,7 @@ fn adjust_para_to_relay_parent_slot( /// /// The function traverses backwards from the best block until it finds the block at the specified /// offset, collecting all blocks in between to maintain the chain of ancestry. -async fn offset_relay_parent_find_descendants( +pub async fn offset_relay_parent_find_descendants( relay_chain_data_cache: &mut RelayChainDataCache, relay_best_block: RelayHash, relay_parent_offset: u32, @@ -736,7 +736,7 @@ where } /// Return value of [`determine_cores`]. -struct Cores { +pub struct Cores { selector: CoreSelector, claim_queue_offset: ClaimQueueOffset, core_indices: Vec, @@ -744,7 +744,7 @@ struct Cores { impl Cores { /// Returns the current [`CoreInfo`]. - fn core_info(&self) -> CoreInfo { + pub fn core_info(&self) -> CoreInfo { CoreInfo { selector: self.selector, claim_queue_offset: self.claim_queue_offset, @@ -758,7 +758,7 @@ impl Cores { } /// Returns the current [`CoreIndex`]. - fn core_index(&self) -> CoreIndex { + pub fn core_index(&self) -> CoreIndex { self.core_indices[self.selector.0 as usize] } @@ -775,7 +775,7 @@ impl Cores { } /// Returns the total number of cores. - fn total_cores(&self) -> u32 { + pub fn total_cores(&self) -> u32 { self.core_indices.len() as u32 } @@ -793,7 +793,7 @@ impl Cores { /// Determine the cores for the given `para_id`. /// /// Takes into account the `parent` core to find the next available cores. -async fn determine_cores( +pub async fn determine_cores( relay_chain_data_cache: &mut RelayChainDataCache, relay_parent: &RelayHeader, para_id: ParaId, diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs b/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs index 2217bed3e908f..b4bb1f77e28e2 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs @@ -106,4 +106,9 @@ where Ok(RelayChainData { relay_parent_header, claim_queue, max_pov_size }) } + + #[cfg(test)] + pub fn insert_test_data(&mut self, relay_parent_hash: RelayHash, data: RelayChainData) { + self.cached_data.insert(relay_parent_hash, data); + } } diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs b/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs index a26ac2c581e92..cab4d57fd0676 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs @@ -16,7 +16,7 @@ // along with Cumulus. If not, see . use super::{ - block_builder_task::{determine_core, offset_relay_parent_find_descendants}, + block_builder_task::{determine_cores, offset_relay_parent_find_descendants}, relay_chain_data_cache::{RelayChainData, RelayChainDataCache}, }; use async_trait::async_trait; @@ -125,11 +125,11 @@ async fn determine_core_new_relay_parent() { // Setup claim queue data for the cache cache.set_test_data(relay_parent.clone(), vec![CoreIndex(0), CoreIndex(1)]); - let result = determine_core(&mut cache, &relay_parent, 1.into(), ¶_parent, 0).await; + let result = determine_cores(&mut cache, &relay_parent, 1.into(), ¶_parent).await; let core = result.unwrap(); let core = core.unwrap(); - assert_eq!(core.core_selector(), CoreSelector(0)); + assert_eq!(core.core_info().selector, CoreSelector(0)); assert_eq!(core.core_index(), CoreIndex(0)); assert_eq!(core.total_cores(), 2); } @@ -174,11 +174,11 @@ async fn determine_core_with_core_info() { // Setup claim queue data for the cache cache.set_test_data(relay_parent.clone(), vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)]); - let result = determine_core(&mut cache, &relay_parent, 1.into(), ¶_parent, 0).await; + let result = determine_cores(&mut cache, &relay_parent, 1.into(), ¶_parent).await; match result { Ok(Some(core)) => { - assert_eq!(core.core_selector(), CoreSelector(1)); // Should be next selector (0 + 1) + assert_eq!(core.core_info().selector, CoreSelector(1)); // Should be next selector (0 + 1) assert_eq!(core.core_index(), CoreIndex(1)); assert_eq!(core.total_cores(), 3); }, @@ -208,7 +208,7 @@ async fn determine_core_no_cores_available() { // Setup empty claim queue cache.set_test_data(relay_parent.clone(), vec![]); - let result = determine_core(&mut cache, &relay_parent, 1.into(), ¶_parent, 0).await; + let result = determine_cores(&mut cache, &relay_parent, 1.into(), ¶_parent).await; let core = result.unwrap(); assert!(core.is_none()); @@ -253,7 +253,7 @@ async fn determine_core_selector_overflow() { // Setup claim queue with only 2 cores cache.set_test_data(relay_parent.clone(), vec![CoreIndex(0), CoreIndex(1)]); - let result = determine_core(&mut cache, &relay_parent, 1.into(), ¶_parent, 0).await; + let result = determine_cores(&mut cache, &relay_parent, 1.into(), ¶_parent).await; let core = result.unwrap(); assert!(core.is_none()); // Should return None when selector overflows @@ -297,12 +297,12 @@ async fn determine_core_uses_last_claimed_core_selector() { Some(CoreSelector(1)), ); - let result = determine_core(&mut cache, &relay_parent, 1.into(), ¶_parent, 0).await; + let result = determine_cores(&mut cache, &relay_parent, 1.into(), ¶_parent).await; match result { Ok(Some(core)) => { // Should use last_claimed_core_selector (1) + 1 = 2 - assert_eq!(core.core_selector(), CoreSelector(2)); + assert_eq!(core.core_info().selector, CoreSelector(2)); assert_eq!(core.core_index(), CoreIndex(2)); assert_eq!(core.total_cores(), 3); }, @@ -350,7 +350,7 @@ async fn determine_core_uses_last_claimed_core_selector_wraps_around() { Some(CoreSelector(2)), ); - let result = determine_core(&mut cache, &relay_parent, 1.into(), ¶_parent, 0).await; + let result = determine_cores(&mut cache, &relay_parent, 1.into(), ¶_parent).await; match result { Ok(Some(_)) => panic!("Expected None due to selector overflow"), @@ -399,12 +399,12 @@ async fn determine_core_no_last_claimed_core_selector() { None, ); - let result = determine_core(&mut cache, &relay_parent, 1.into(), ¶_parent, 0).await; + let result = determine_cores(&mut cache, &relay_parent, 1.into(), ¶_parent).await; match result { Ok(Some(core)) => { // Should start from selector 0 + 1 = 1 when no last selector - assert_eq!(core.core_selector(), CoreSelector(1)); + assert_eq!(core.core_info().selector, CoreSelector(1)); assert_eq!(core.core_index(), CoreIndex(1)); assert_eq!(core.total_cores(), 3); }, @@ -645,7 +645,6 @@ impl RelayChainDataCache { relay_parent_header, claim_queue: claim_queue_snapshot, max_pov_size: 1024 * 1024, - last_claimed_core_selector, }; self.insert_test_data(relay_parent_hash, data); diff --git a/cumulus/client/consensus/aura/src/lib.rs b/cumulus/client/consensus/aura/src/lib.rs index a55012ed4473c..1e4ab56165b48 100644 --- a/cumulus/client/consensus/aura/src/lib.rs +++ b/cumulus/client/consensus/aura/src/lib.rs @@ -26,7 +26,7 @@ use codec::Encode; use cumulus_primitives_core::{relay_chain::HeadData, PersistedValidationData}; use polkadot_node_primitives::PoV; use polkadot_primitives::{BlockNumber as RBlockNumber, Hash as RHash}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member, NumberFor}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::{fs, fs::File, path::PathBuf}; mod import_queue; diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index e322329b1f44f..553e6d7a6ca5b 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -142,6 +142,7 @@ fn build_block_with_witness( let cumulus_test_client::BlockBuilderAndSupportData { mut block_builder, persisted_validation_data, + .. } = client.init_block_builder_with_pre_digests(Some(validation_data), sproof_builder, pre_digests); extra_extrinsics.into_iter().for_each(|e| block_builder.push(e).unwrap()); @@ -193,6 +194,7 @@ fn build_multiple_blocks_with_witness( let cumulus_test_client::BlockBuilderAndSupportData { mut block_builder, persisted_validation_data: p_v_data, + proof_recorder, } = client.init_block_builder_with_ignored_nodes( parent_head.hash(), Some(validation_data.clone()), @@ -230,11 +232,10 @@ fn build_multiple_blocks_with_witness( }) .unwrap(); - ignored_nodes.extend(IgnoredNodes::from_storage_proof::( - &built_block.proof.clone().unwrap(), - )); + let proof_new = proof_recorder.drain_storage_proof(); + ignored_nodes.extend(IgnoredNodes::from_storage_proof::(&proof_new)); ignored_nodes.extend(IgnoredNodes::from_memory_db(built_block.storage_changes.transaction)); - proof = StorageProof::merge([proof, built_block.proof.unwrap()]); + proof = StorageProof::merge([proof, proof_new]); parent_head = built_block.block.header.clone(); diff --git a/cumulus/test/client/src/block_builder.rs b/cumulus/test/client/src/block_builder.rs index 224d3778d9400..fce3797c1277b 100644 --- a/cumulus/test/client/src/block_builder.rs +++ b/cumulus/test/client/src/block_builder.rs @@ -30,6 +30,7 @@ use sp_runtime::{traits::Header as HeaderT, Digest, DigestItem}; pub struct BlockBuilderAndSupportData<'a> { pub block_builder: sc_block_builder::BlockBuilder<'a, Block, Client>, pub persisted_validation_data: PersistedValidationData, + pub proof_recorder: ProofRecorder, } /// An extension for the Cumulus test client to init a block builder. @@ -144,13 +145,14 @@ fn init_block_builder( .collect::>(), }; + let proof_recorder = + ProofRecorder::::with_ignored_nodes(ignored_nodes.unwrap_or_default()); + let mut block_builder = BlockBuilderBuilder::new(client) .on_parent_block(at) .fetch_parent_block_number(client) .unwrap() - .with_proof_recorder(Some(ProofRecorder::::with_ignored_nodes( - ignored_nodes.unwrap_or_default(), - ))) + .with_proof_recorder(Some(proof_recorder.clone())) .with_inherent_digests(pre_digests) .build() .expect("Creates new block builder for test runtime"); @@ -187,7 +189,11 @@ fn init_block_builder( .into_iter() .for_each(|ext| block_builder.push(ext).expect("Pushes inherent")); - BlockBuilderAndSupportData { block_builder, persisted_validation_data: validation_data } + BlockBuilderAndSupportData { + block_builder, + persisted_validation_data: validation_data, + proof_recorder, + } } impl InitBlockBuilder for Client { @@ -275,11 +281,13 @@ pub trait BuildParachainBlockData { impl<'a> BuildParachainBlockData for sc_block_builder::BlockBuilder<'a, Block, Client> { fn build_parachain_block(self, parent_state_root: Hash) -> ParachainBlockData { + let proof_recorder = self + .proof_recorder() + .expect("Proof recorder is always set for the test block builder; qed"); let built_block = self.build().expect("Builds the block"); - let storage_proof = built_block - .proof - .expect("We enabled proof recording before.") + let storage_proof = proof_recorder + .drain_storage_proof() .into_compact_proof::<
::Hashing>(parent_state_root) .expect("Creates the compact proof"); diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index ae22a1631defa..4f5c70e3f0b3f 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -26,8 +26,8 @@ pub use polkadot_parachain_primitives::primitives::{ BlockData, HeadData, ValidationParams, ValidationResult, }; use runtime::{ - Balance, Block, BlockHashCount, Runtime, RuntimeCall, Signature, SignedPayload, TxExtension, - UncheckedExtrinsic, VERSION, + test_pallet, Balance, Block, BlockHashCount, Runtime, RuntimeCall, Signature, SignedPayload, + TxExtension, UncheckedExtrinsic, VERSION, }; use sc_consensus_aura::{ find_pre_digest, @@ -152,6 +152,7 @@ pub fn generate_extrinsic_with_pair( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + test_pallet::TestTransactionExtension::::default(), ) .into(), ); @@ -161,7 +162,7 @@ pub fn generate_extrinsic_with_pair( let raw_payload = SignedPayload::from_raw( function.clone(), tx_ext.clone(), - ((), (), VERSION.spec_version, genesis_block, current_block_hash, (), (), ()), + ((), (), VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()), ); let signature = raw_payload.using_encoded(|e| origin.sign(e)); diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index d48c21df939b5..4f35ebe846fa7 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -62,7 +62,7 @@ pub mod async_backing { } mod genesis_config_presets; -mod test_pallet; +pub mod test_pallet; extern crate alloc; diff --git a/cumulus/test/service/benches/block_production.rs b/cumulus/test/service/benches/block_production.rs index 3b0db578041f0..246b14deead67 100644 --- a/cumulus/test/service/benches/block_production.rs +++ b/cumulus/test/service/benches/block_production.rs @@ -81,7 +81,7 @@ fn benchmark_block_production(c: &mut Criterion) { let mut block_builder = BlockBuilderBuilder::new(&*client) .on_parent_block(chain.best_hash) .with_parent_block_number(chain.best_number) - .enable_proof_recording() + .with_proof_recorder(Some(Default::default())) .build() .unwrap(); diff --git a/cumulus/test/service/benches/block_production_glutton.rs b/cumulus/test/service/benches/block_production_glutton.rs index 6ab2c0e56bd18..31e0d3ce1d494 100644 --- a/cumulus/test/service/benches/block_production_glutton.rs +++ b/cumulus/test/service/benches/block_production_glutton.rs @@ -78,7 +78,7 @@ fn benchmark_block_production_compute(c: &mut Criterion) { let mut block_builder = BlockBuilderBuilder::new(&*client) .on_parent_block(best_hash) .with_parent_block_number(best_number) - .enable_proof_recording() + .with_proof_recorder(Some(Default::default())) .build() .unwrap(); block_builder.push(validation_data).unwrap(); diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index 267a836ac3a49..5f21ccd368ea0 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -368,6 +368,11 @@ where size } } + + /// Returns the [`ProofRecorder`] used by the block builder. + pub fn proof_recorder(&self) -> Option> { + self.api.proof_recorder() + } } #[cfg(test)] @@ -390,7 +395,7 @@ mod tests { let storage_proof_recorder = ProofRecorder::::default(); - let block = BlockBuilderBuilder::new(&client) + BlockBuilderBuilder::new(&client) .on_parent_block(genesis_hash) .with_parent_block_number(0) .with_proof_recorder(storage_proof_recorder.clone()) @@ -444,13 +449,13 @@ mod tests { block_builder.push(ExtrinsicBuilder::new_read(8).build()).unwrap(); - let block = block_builder.build().unwrap(); + block_builder.build().unwrap(); let proof_without_panic = proof_recorder.drain_storage_proof().encoded_size(); let proof_recorder = ProofRecorder::::default(); - let block = BlockBuilderBuilder::new(&client) + BlockBuilderBuilder::new(&client) .on_parent_block(genesis_hash) .with_parent_block_number(0) .with_proof_recorder(proof_recorder.clone()) diff --git a/substrate/client/consensus/aura/src/lib.rs b/substrate/client/consensus/aura/src/lib.rs index f0b68bc13e254..fd8ae3d660de4 100644 --- a/substrate/client/consensus/aura/src/lib.rs +++ b/substrate/client/consensus/aura/src/lib.rs @@ -556,13 +556,9 @@ mod tests { use sp_application_crypto::{key_types::AURA, AppCrypto}; use sp_consensus::{NoNetwork as DummyOracle, Proposal, ProposeArgs}; use sp_consensus_aura::sr25519::AuthorityPair; - use sp_inherents::InherentData; use sp_keyring::sr25519::Keyring; use sp_keystore::Keystore; - use sp_runtime::{ - traits::{Block as BlockT, Header as _}, - Digest, - }; + use sp_runtime::traits::{Block as BlockT, Header as _}; use sp_timestamp::Timestamp; use std::{ task::Poll, From 8b6fde5ade2be3bf7407418336a21b6c22ac161a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 8 Oct 2025 22:21:32 +0200 Subject: [PATCH 135/312] Split up some stuff and document it --- cumulus/pallets/parachain-system/src/lib.rs | 1 - .../src/max_parachain_block_weight.rs | 608 ------------------ .../src/max_parachain_block_weight/mock.rs | 107 +++ .../src/max_parachain_block_weight/mod.rs | 160 +++++ .../pre_inherents_hook.rs | 59 ++ .../src/max_parachain_block_weight/tests.rs | 280 ++++++++ .../transaction_extension.rs | 352 ++++++++++ cumulus/test/runtime/src/lib.rs | 2 +- 8 files changed, 959 insertions(+), 610 deletions(-) delete mode 100644 cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs create mode 100644 cumulus/pallets/parachain-system/src/max_parachain_block_weight/mock.rs create mode 100644 cumulus/pallets/parachain-system/src/max_parachain_block_weight/mod.rs create mode 100644 cumulus/pallets/parachain-system/src/max_parachain_block_weight/pre_inherents_hook.rs create mode 100644 cumulus/pallets/parachain-system/src/max_parachain_block_weight/tests.rs create mode 100644 cumulus/pallets/parachain-system/src/max_parachain_block_weight/transaction_extension.rs diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 43f758da96ec1..c6f7b6ca1fe4b 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -69,7 +69,6 @@ mod mock; mod tests; pub mod weights; -pub use max_parachain_block_weight::{DynamicMaxBlockWeight, MaxParachainBlockWeight}; pub use weights::WeightInfo; mod unincluded_segment; diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs deleted file mode 100644 index acafa928a9595..0000000000000 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight.rs +++ /dev/null @@ -1,608 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Utilities for calculating maximum parachain block weight based on core assignments. - -use crate::Config; -use alloc::vec::Vec; -use codec::{Decode, DecodeWithMemTracking, Encode}; -use cumulus_primitives_core::CumulusDigestItem; -use frame_support::{ - dispatch::{DispatchInfo, PostDispatchInfo}, - pallet_prelude::{ - InvalidTransaction, TransactionSource, TransactionValidityError, ValidTransaction, - }, - traits::PreInherents, - weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, -}; -use polkadot_primitives::MAX_POV_SIZE; -use scale_info::TypeInfo; -use sp_core::Get; -use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, Implication, PostDispatchInfoOf, TransactionExtension}, - Digest, DispatchResult, -}; - -const LOG_TARGET: &str = "runtime::parachain-system::block-weight"; - -#[derive(Debug, Encode, Decode, Clone, Copy, TypeInfo)] -pub enum BlockWeightMode { - FullCore, - PotentialFullCore { first_transaction_index: Option }, - FractionOfCore { first_transaction_index: Option }, -} - -/// A utility type for calculating the maximum block weight for a parachain based on -/// the number of relay chain cores assigned and the target number of blocks. -pub struct MaxParachainBlockWeight; - -impl MaxParachainBlockWeight { - // Maximum ref time per core - const MAX_REF_TIME_PER_CORE_NS: u64 = 2 * WEIGHT_REF_TIME_PER_SECOND; - const FULL_CORE_WEIGHT: Weight = - Weight::from_parts(Self::MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); - - /// Calculate the maximum block weight based on target blocks and core assignments. - /// - /// This function examines the current block's digest from `frame_system::Digests` storage - /// to find `CumulusDigestItem::CoreInfo` entries, which contain information about the - /// number of relay chain cores assigned to the parachain. Each core has a maximum - /// reference time of 2 seconds and the total maximum PoV size of `MAX_POV_SIZE` is - /// shared across all target blocks. - /// - /// # Parameters - /// - `target_blocks`: The target number of blocks to be produced - /// - /// # Returns - /// Returns the calculated maximum weight, or a conservative default if no core info is found - /// or if an error occurs during calculation. - pub fn get(target_blocks: u32) -> Weight { - let digest = frame_system::Pallet::::digest(); - let target_block_weight = - Self::target_block_weight_with_digest::(target_blocks, &digest); - - let maybe_full_core_weight = if is_first_block_in_core_with_digest(&digest) { - Self::FULL_CORE_WEIGHT - } else { - target_block_weight - }; - - // If we are in `on_initialize` or at applying the inherents, we allow the maximum block - // weight as allowed by the current context. - if !frame_system::Pallet::::inherents_applied() { - return maybe_full_core_weight - } - - match crate::BlockWeightMode::::get() { - // We allow the full core. - Some(BlockWeightMode::FullCore | BlockWeightMode::PotentialFullCore { .. }) => - Self::FULL_CORE_WEIGHT, - // Let's calculate below how much weight we can use. - Some(BlockWeightMode::FractionOfCore { .. }) => target_block_weight, - // Either the runtime is not using the `DynamicMaxBlockWeight` extension or there is a - // bug. The value should be set before applying the first extrinsic. - None => maybe_full_core_weight, - } - } - - fn target_block_weight(target_blocks: u32) -> Weight { - let digest = frame_system::Pallet::::digest(); - Self::target_block_weight_with_digest::(target_blocks, &digest) - } - - fn target_block_weight_with_digest(target_blocks: u32, digest: &Digest) -> Weight { - let Some(core_info) = CumulusDigestItem::find_core_info(&digest) else { - return Self::FULL_CORE_WEIGHT; - }; - - let number_of_cores = core_info.number_of_cores.0 as u32; - - // Ensure we have at least one core and valid target blocks - if number_of_cores == 0 || target_blocks == 0 { - return Self::FULL_CORE_WEIGHT; - } - - let total_ref_time = - (number_of_cores as u64).saturating_mul(Self::MAX_REF_TIME_PER_CORE_NS); - let ref_time_per_block = total_ref_time - .saturating_div(target_blocks as u64) - .min(Self::MAX_REF_TIME_PER_CORE_NS); - - let total_pov_size = (number_of_cores as u64).saturating_mul(MAX_POV_SIZE as u64); - let proof_size_per_block = total_pov_size.saturating_div(target_blocks as u64); - - Weight::from_parts(ref_time_per_block, proof_size_per_block) - } -} - -/// Is this the first block in a core? -fn is_first_block_in_core() -> bool { - let digest = frame_system::Pallet::::digest(); - is_first_block_in_core_with_digest(&digest) -} - -/// Is this the first block in a core? (takes digest as parameter) -fn is_first_block_in_core_with_digest(digest: &Digest) -> bool { - CumulusDigestItem::find_bundle_info(digest).map_or(false, |bi| bi.index == 0) -} - -/// Is the `BlockWeight` already above the target block weight? -fn block_weight_over_target_block_weight>() -> bool { - let target_block_weight = - MaxParachainBlockWeight::target_block_weight::(TargetBlockRate::get()); - - frame_system::Pallet::::remaining_block_weight() - .consumed() - .any_gt(target_block_weight) -} - -pub struct MaxBlockWeightHooks(core::marker::PhantomData<(T, TargetBlockRate)>); - -impl PreInherents for MaxBlockWeightHooks -where - T: Config, - TargetBlockRate: Get, -{ - fn pre_inherents() { - if block_weight_over_target_block_weight::() { - let is_first_block_in_core = is_first_block_in_core::(); - - if !is_first_block_in_core { - log::error!( - target: LOG_TARGET, - "Inherent block logic took longer than the target block weight, THIS IS A BUG!!!", - ); - } else { - log::debug!( - target: LOG_TARGET, - "Inherent block logic took longer than the target block weight, going to use the full core", - ); - } - - crate::BlockWeightMode::::put(BlockWeightMode::FullCore); - - // Inform the node that this block uses the full core. - frame_system::Pallet::::deposit_log(CumulusDigestItem::UseFullCore.to_digest_item()); - } - } -} - -#[derive(Encode, Decode, DecodeWithMemTracking, TypeInfo)] -#[derive_where::derive_where(Clone, Eq, PartialEq, Default; S)] -#[scale_info(skip_type_params(T, TargetBlockRate))] -pub struct DynamicMaxBlockWeight( - pub S, - core::marker::PhantomData<(T, TargetBlockRate)>, -); - -impl DynamicMaxBlockWeight { - /// Create a new `StorageWeightReclaim` instance. - pub fn new(s: S) -> Self { - Self(s, Default::default()) - } -} - -impl DynamicMaxBlockWeight -where - T: Config, - TargetBlockRate: Get, -{ - fn pre_validate_extrinsic( - info: &DispatchInfo, - len: usize, - ) -> Result<(), TransactionValidityError> { - let is_not_inherent = frame_system::Pallet::::inherents_applied(); - let extrinsic_index = is_not_inherent - .then(|| frame_system::Pallet::::extrinsic_index().unwrap_or_default()); - - crate::BlockWeightMode::::mutate(|mode| { - let current_mode = *mode.get_or_insert_with(|| BlockWeightMode::FractionOfCore { - first_transaction_index: extrinsic_index, - }); - - match current_mode { - // We are already allowing the full core, not that much more to do here. - BlockWeightMode::FullCore => {}, - BlockWeightMode::PotentialFullCore { first_transaction_index } | - BlockWeightMode::FractionOfCore { first_transaction_index } => { - let is_potential = - matches!(current_mode, BlockWeightMode::PotentialFullCore { .. }); - debug_assert!( - !is_potential, - "`PotentialFullCore` should resolve to `FullCore` or `FractionOfCore` after applying a transaction.", - ); - - let block_weight_over_limit = first_transaction_index == extrinsic_index - && block_weight_over_target_block_weight::(); - - // Protection against a misconfiguration as this should be detected by the pre-inherent hook. - if block_weight_over_limit { - *mode = Some(BlockWeightMode::FullCore); - - // Inform the node that this block uses the full core. - frame_system::Pallet::::deposit_log( - CumulusDigestItem::UseFullCore.to_digest_item(), - ); - - log::error!( - target: LOG_TARGET, - "Inherent block logic took longer than the target block weight, \ - `MaxBlockWeightHooks` not registered as `PreInherents` hook!", - ); - } else if info - .total_weight() - // The extrinsic lengths counts towards the POV size - .saturating_add(Weight::from_parts(0, len as u64)) - .any_gt(MaxParachainBlockWeight::target_block_weight::( - TargetBlockRate::get(), - )) && is_first_block_in_core::() - { - // TODO: make 10 configurable - if extrinsic_index.unwrap_or_default().saturating_sub(first_transaction_index.unwrap_or_default()) < 10 { - *mode = Some(BlockWeightMode::PotentialFullCore { - // While applying inherents `extrinsic_index` and `first_transaction_index` will be `None`. - // When the first transaction is applied, we want to store the index. - first_transaction_index: first_transaction_index.or(extrinsic_index), - }); - } else { - return Err(InvalidTransaction::ExhaustsResources) - } - } else if is_potential { - *mode = - Some(BlockWeightMode::FractionOfCore { first_transaction_index }); - } - }, - }; - - Ok(()) - }).map_err(Into::into) - } - - fn post_dispatch_extrinsic() { - crate::BlockWeightMode::::mutate(|weight_mode| { - let Some(mode) = *weight_mode else { return }; - - let target_block_weight = - MaxParachainBlockWeight::target_block_weight::(TargetBlockRate::get()); - - let is_above_limit = frame_system::Pallet::::remaining_block_weight() - .consumed() - .any_gt(target_block_weight); - - match mode { - // If the previous mode was already `FullCore`, we are fine. - BlockWeightMode::FullCore => {}, - BlockWeightMode::FractionOfCore { .. } => - // If we are above the limit, it means the transaction used more weight than what it - // had announced, which should not happen. - if is_above_limit { - log::error!( - target: LOG_TARGET, - "Extrinsic ({}) used more weight than what it had announced and pushed the \ - block above the allowed weight limit!", - frame_system::Pallet::::extrinsic_index().unwrap_or_default() - ); - - // If this isn't the first block in a core, we register the full core weight - // to ensure that we don't include any other transactions. Because we don't - // know how many weight of the core was already used by the blocks before. - if !is_first_block_in_core::() { - log::error!( - target: LOG_TARGET, - "Registering `FULL_CORE_WEIGHT` to ensure no other transaction is included \ - in this block, because this isn't the first block in the core!", - ); - - frame_system::Pallet::::register_extra_weight_unchecked( - MaxParachainBlockWeight::FULL_CORE_WEIGHT, - frame_support::dispatch::DispatchClass::Mandatory, - ); - } - - *weight_mode = Some(BlockWeightMode::FullCore); - - // Inform the node that this block uses the full core. - frame_system::Pallet::::deposit_log( - CumulusDigestItem::UseFullCore.to_digest_item(), - ); - }, - // Now we need to check if the transaction required more weight than a fraction of a - // core block. - BlockWeightMode::PotentialFullCore { first_transaction_index } => - if is_above_limit { - *weight_mode = Some(BlockWeightMode::FullCore); - - // Inform the node that this block uses the full core. - frame_system::Pallet::::deposit_log( - CumulusDigestItem::UseFullCore.to_digest_item(), - ); - } else { - *weight_mode = - Some(BlockWeightMode::FractionOfCore { first_transaction_index }); - }, - } - }); - } -} - -impl From for DynamicMaxBlockWeight { - fn from(s: S) -> Self { - Self::new(s) - } -} - -impl core::fmt::Debug - for DynamicMaxBlockWeight -{ - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "DynamicMaxBlockWeight<{:?}>", self.0) - } -} - -impl< - T: Config + Send + Sync, - S: TransactionExtension, - TargetBlockRate: Get + Send + Sync + 'static, - > TransactionExtension for DynamicMaxBlockWeight -where - T::RuntimeCall: Dispatchable, -{ - const IDENTIFIER: &'static str = "DynamicMaxBlockWeight"; - - type Implicit = S::Implicit; - - type Val = S::Val; - - type Pre = S::Pre; - - fn implicit(&self) -> Result { - self.0.implicit() - } - - fn metadata() -> Vec { - let mut inner = S::metadata(); - inner.push(sp_runtime::traits::TransactionExtensionMetadata { - identifier: "DynamicMaxBlockWeight", - ty: scale_info::meta_type::<()>(), - implicit: scale_info::meta_type::<()>(), - }); - inner - } - - fn weight(&self, _: &T::RuntimeCall) -> Weight { - Weight::zero() - } - - fn validate( - &self, - origin: T::RuntimeOrigin, - call: &T::RuntimeCall, - info: &DispatchInfoOf, - len: usize, - self_implicit: Self::Implicit, - inherited_implication: &impl Implication, - source: TransactionSource, - ) -> Result<(ValidTransaction, Self::Val, T::RuntimeOrigin), TransactionValidityError> { - Self::pre_validate_extrinsic(info, len)?; - - self.0 - .validate(origin, call, info, len, self_implicit, inherited_implication, source) - } - - fn prepare( - self, - val: Self::Val, - origin: &T::RuntimeOrigin, - call: &T::RuntimeCall, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.0.prepare(val, origin, call, info, len) - } - - fn post_dispatch( - pre: Self::Pre, - info: &DispatchInfoOf, - post_info: &mut PostDispatchInfo, - len: usize, - result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - S::post_dispatch(pre, info, post_info, len, result)?; - - Self::post_dispatch_extrinsic(); - - Ok(()) - } - - fn bare_validate( - call: &T::RuntimeCall, - info: &DispatchInfoOf, - len: usize, - ) -> frame_support::pallet_prelude::TransactionValidity { - S::bare_validate(call, info, len) - } - - fn bare_validate_and_prepare( - call: &T::RuntimeCall, - info: &DispatchInfoOf, - len: usize, - ) -> Result<(), TransactionValidityError> { - S::bare_validate_and_prepare(call, info, len)?; - - Self::pre_validate_extrinsic(info, len)?; - - Ok(()) - } - - fn bare_post_dispatch( - info: &DispatchInfoOf, - post_info: &mut PostDispatchInfoOf, - len: usize, - result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - S::bare_post_dispatch(info, post_info, len, result)?; - - Self::post_dispatch_extrinsic(); - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate as parachain_system; - use codec::Compact; - use cumulus_primitives_core::{ClaimQueueOffset, CoreInfo, CoreSelector}; - use frame_support::{construct_runtime, derive_impl}; - use sp_io; - use sp_runtime::{traits::IdentityLookup, BuildStorage}; - - type Block = frame_system::mocking::MockBlock; - - // Configure a mock runtime to test the functionality - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] - impl frame_system::Config for Test { - type Block = Block; - type AccountId = u64; - type AccountData = (); - type Lookup = IdentityLookup; - type OnSetCode = crate::ParachainSetCode; - } - - impl crate::Config for Test { - type RuntimeEvent = RuntimeEvent; - type OnSystemEvent = (); - type SelfParaId = (); - type OutboundXcmpMessageSource = (); - type DmpQueue = (); - type ReservedDmpWeight = (); - type XcmpMessageHandler = (); - type ReservedXcmpWeight = (); - type CheckAssociatedRelayNumber = crate::RelayNumberStrictlyIncreases; - type WeightInfo = (); - type ConsensusHook = crate::ExpectParentIncluded; - type RelayParentOffset = (); - } - - construct_runtime!( - pub enum Test { - System: frame_system, - ParachainSystem: parachain_system, - } - ); - - fn new_test_ext_with_digest(num_cores: Option) -> sp_io::TestExternalities { - let storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); - - let mut ext = sp_io::TestExternalities::from(storage); - - ext.execute_with(|| { - if let Some(num_cores) = num_cores { - let core_info = CoreInfo { - selector: CoreSelector(0), - claim_queue_offset: ClaimQueueOffset(0), - number_of_cores: Compact(num_cores), - }; - - let digest = CumulusDigestItem::CoreInfo(core_info).to_digest_item(); - - frame_system::Pallet::::deposit_log(digest); - } - }); - - ext - } - - #[test] - fn test_single_core_single_block() { - new_test_ext_with_digest(Some(1)).execute_with(|| { - let weight = MaxParachainBlockWeight::get::(1); - - // With 1 core and 1 target block, should get full 2s ref time and full PoV size - assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); - assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); - }); - } - - #[test] - fn test_single_core_multiple_blocks() { - new_test_ext_with_digest(Some(1)).execute_with(|| { - let weight = MaxParachainBlockWeight::get::(4); - - // With 1 core and 4 target blocks, should get 0.5s ref time and 1/4 PoV size per block - assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND / 4); - assert_eq!(weight.proof_size(), (1 * MAX_POV_SIZE as u64) / 4); - }); - } - - #[test] - fn test_multiple_cores_single_block() { - new_test_ext_with_digest(Some(3)).execute_with(|| { - let weight = MaxParachainBlockWeight::get::(1); - - // With 3 cores and 1 target block, should get max 2s ref time (capped per core) and 3x - // PoV size - assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); - assert_eq!(weight.proof_size(), 3 * MAX_POV_SIZE as u64); - }); - } - - #[test] - fn test_multiple_cores_multiple_blocks() { - new_test_ext_with_digest(Some(2)).execute_with(|| { - let weight = MaxParachainBlockWeight::get::(4); - - // With 2 cores and 4 target blocks, should get 1s ref time and 2x PoV size / 4 per - // block - assert_eq!(weight.ref_time(), 2 * 2 * WEIGHT_REF_TIME_PER_SECOND / 4); - assert_eq!(weight.proof_size(), (2 * MAX_POV_SIZE as u64) / 4); - }); - } - - #[test] - fn test_no_core_info() { - new_test_ext_with_digest(None).execute_with(|| { - let weight = MaxParachainBlockWeight::get::(1); - - // Without core info, should return conservative default - assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); - assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); - }); - } - - #[test] - fn test_zero_cores() { - new_test_ext_with_digest(Some(0)).execute_with(|| { - let weight = MaxParachainBlockWeight::get::(1); - - // With 0 cores, should return conservative default - assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); - assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); - }); - } - - #[test] - fn test_zero_target_blocks() { - new_test_ext_with_digest(Some(2)).execute_with(|| { - let weight = MaxParachainBlockWeight::get::(0); - - // With 0 target blocks, should return conservative default - assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); - assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); - }); - } -} diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mock.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mock.rs new file mode 100644 index 0000000000000..b433b84b540c7 --- /dev/null +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mock.rs @@ -0,0 +1,107 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{transaction_extension::DynamicMaxBlockWeight, *}; +use crate as parachain_system; +use codec::Compact; +use cumulus_primitives_core::{ + BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, +}; +use frame_executive; +use frame_support::{ + construct_runtime, derive_impl, + dispatch::{DispatchClass, DispatchInfo, Pays}, + traits::Hooks, + weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, +}; +use frame_system::mocking::MockBlock; +use polkadot_primitives::MAX_POV_SIZE; +use sp_core::ConstU32; +use sp_io; +use sp_runtime::{ + generic::Header, + testing::{TestXt, UintAuthorityId}, + traits::{ + BlakeTwo256, Block as BlockT, Dispatchable, Header as HeaderT, IdentityLookup, + TransactionExtension, + }, + transaction_validity::TransactionSource, + BuildStorage, Perbill, +}; + +type Block = frame_system::mocking::MockBlock; + +// Configure a mock runtime to test the functionality +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type Block = Block; + type AccountId = u64; + type AccountData = (); + type Lookup = IdentityLookup; + type OnSetCode = crate::ParachainSetCode; +} + +impl crate::Config for Test { + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = (); + type OutboundXcmpMessageSource = (); + type DmpQueue = (); + type ReservedDmpWeight = (); + type XcmpMessageHandler = (); + type ReservedXcmpWeight = (); + type CheckAssociatedRelayNumber = crate::RelayNumberStrictlyIncreases; + type WeightInfo = (); + type ConsensusHook = crate::ExpectParentIncluded; + type RelayParentOffset = (); +} + +construct_runtime!( + pub enum Test { + System: frame_system, + ParachainSystem: parachain_system, + } +); + +pub type Executive = frame_executive::Executive< + Test, + Block, + frame_system::ChainContext, + Test, + AllPalletsWithSystem, +>; + +fn new_test_ext_with_digest(num_cores: Option) -> sp_io::TestExternalities { + let storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + let mut ext = sp_io::TestExternalities::from(storage); + + ext.execute_with(|| { + if let Some(num_cores) = num_cores { + let core_info = CoreInfo { + selector: CoreSelector(0), + claim_queue_offset: ClaimQueueOffset(0), + number_of_cores: Compact(num_cores), + }; + + let digest = CumulusDigestItem::CoreInfo(core_info).to_digest_item(); + + frame_system::Pallet::::deposit_log(digest); + } + }); + + ext +} diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mod.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mod.rs new file mode 100644 index 0000000000000..728a0082a70cc --- /dev/null +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mod.rs @@ -0,0 +1,160 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Utilities for calculating maximum parachain block weight based on core assignments. + +use crate::Config; +use codec::{Decode, Encode}; +use core::marker::PhantomData; +use cumulus_primitives_core::CumulusDigestItem; +use frame_support::weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}; +use polkadot_primitives::MAX_POV_SIZE; +use scale_info::TypeInfo; +use sp_core::Get; +use sp_runtime::Digest; + +#[cfg(test)] +pub(crate) mod mock; +pub mod pre_inherents_hook; +#[cfg(test)] +mod tests; +pub mod transaction_extension; + +pub use pre_inherents_hook::DynamicMaxBlockWeightHooks; +pub use transaction_extension::DynamicMaxBlockWeight; + +const LOG_TARGET: &str = "runtime::parachain-system::block-weight"; + +/// The current block weight mode. +/// +/// Based on this mode [`MaxParachainBlockWeight`] determines the current allowed block weight. +#[derive(Debug, Encode, Decode, Clone, Copy, TypeInfo)] +pub enum BlockWeightMode { + /// The block is allowed to use the weight of a full core. + FullCore, + /// The current active transaction is allowed to use the weight of a full core. + PotentialFullCore { first_transaction_index: Option }, + /// The block is only allowed to consume its fraction of the core. + /// + /// How much each block is allowed to consume, depends on the target number of blocks and the + /// available cores on the relay chain. + FractionOfCore { first_transaction_index: Option }, +} + +/// A utility type for calculating the maximum block weight for a parachain based on +/// the number of relay chain cores assigned and the target number of blocks. +pub struct MaxParachainBlockWeight(PhantomData); + +impl MaxParachainBlockWeight { + // Maximum ref time per core + const MAX_REF_TIME_PER_CORE_NS: u64 = 2 * WEIGHT_REF_TIME_PER_SECOND; + const FULL_CORE_WEIGHT: Weight = + Weight::from_parts(Self::MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); + + /// Calculate the maximum block weight based on target blocks and core assignments. + /// + /// This function examines the current block's digest from `frame_system::Digests` storage + /// to find `CumulusDigestItem::CoreInfo` entries, which contain information about the + /// number of relay chain cores assigned to the parachain. Each core has a maximum + /// reference time of 2 seconds and the total maximum PoV size of `MAX_POV_SIZE` is + /// shared across all target blocks. + /// + /// # Parameters + /// - `target_blocks`: The target number of blocks to be produced + /// + /// # Returns + /// Returns the calculated maximum weight, or a conservative default if no core info is found + /// or if an error occurs during calculation. + pub fn get(target_blocks: u32) -> Weight { + let digest = frame_system::Pallet::::digest(); + let target_block_weight = Self::target_block_weight_with_digest(target_blocks, &digest); + + let maybe_full_core_weight = if is_first_block_in_core_with_digest(&digest) { + Self::FULL_CORE_WEIGHT + } else { + target_block_weight + }; + + // If we are in `on_initialize` or at applying the inherents, we allow the maximum block + // weight as allowed by the current context. + if !frame_system::Pallet::::inherents_applied() { + return maybe_full_core_weight + } + + match crate::BlockWeightMode::::get() { + // We allow the full core. + Some(BlockWeightMode::FullCore | BlockWeightMode::PotentialFullCore { .. }) => + Self::FULL_CORE_WEIGHT, + // Let's calculate below how much weight we can use. + Some(BlockWeightMode::FractionOfCore { .. }) => target_block_weight, + // Either the runtime is not using the `DynamicMaxBlockWeight` extension or there is a + // bug. The value should be set before applying the first extrinsic. + None => maybe_full_core_weight, + } + } + + /// Returns the target block weight for one block. + fn target_block_weight(target_blocks: u32) -> Weight { + let digest = frame_system::Pallet::::digest(); + Self::target_block_weight_with_digest(target_blocks, &digest) + } + + /// Same as [`Self::target_block_weight`], but takes the `digests` directly. + fn target_block_weight_with_digest(target_blocks: u32, digest: &Digest) -> Weight { + let Some(core_info) = CumulusDigestItem::find_core_info(&digest) else { + return Self::FULL_CORE_WEIGHT; + }; + + let number_of_cores = core_info.number_of_cores.0 as u32; + + // Ensure we have at least one core and valid target blocks + if number_of_cores == 0 || target_blocks == 0 { + return Self::FULL_CORE_WEIGHT; + } + + let total_ref_time = + (number_of_cores as u64).saturating_mul(Self::MAX_REF_TIME_PER_CORE_NS); + let ref_time_per_block = total_ref_time + .saturating_div(target_blocks as u64) + .min(Self::MAX_REF_TIME_PER_CORE_NS); + + let total_pov_size = (number_of_cores as u64).saturating_mul(MAX_POV_SIZE as u64); + let proof_size_per_block = total_pov_size.saturating_div(target_blocks as u64); + + Weight::from_parts(ref_time_per_block, proof_size_per_block) + } +} + +/// Is this the first block in a core? +fn is_first_block_in_core() -> bool { + let digest = frame_system::Pallet::::digest(); + is_first_block_in_core_with_digest(&digest) +} + +/// Is this the first block in a core? (takes digest as parameter) +fn is_first_block_in_core_with_digest(digest: &Digest) -> bool { + CumulusDigestItem::find_bundle_info(digest).map_or(false, |bi| bi.index == 0) +} + +/// Is the `BlockWeight` already above the target block weight? +fn block_weight_over_target_block_weight>() -> bool { + let target_block_weight = + MaxParachainBlockWeight::::target_block_weight(TargetBlockRate::get()); + + frame_system::Pallet::::remaining_block_weight() + .consumed() + .any_gt(target_block_weight) +} diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/pre_inherents_hook.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/pre_inherents_hook.rs new file mode 100644 index 0000000000000..44525c5047b37 --- /dev/null +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/pre_inherents_hook.rs @@ -0,0 +1,59 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + block_weight_over_target_block_weight, is_first_block_in_core, BlockWeightMode, LOG_TARGET, +}; +use cumulus_primitives_core::CumulusDigestItem; +use frame_support::traits::PreInherents; +use sp_core::Get; + +pub struct DynamicMaxBlockWeightHooks( + pub core::marker::PhantomData<(Config, TargetBlockRate)>, +); + +impl PreInherents for DynamicMaxBlockWeightHooks +where + Config: crate::Config, + TargetBlockRate: Get, +{ + fn pre_inherents() { + if !block_weight_over_target_block_weight::() { + return + } + + let is_first_block_in_core = is_first_block_in_core::(); + + if !is_first_block_in_core { + log::error!( + target: LOG_TARGET, + "Inherent block logic took longer than the target block weight, THIS IS A BUG!!!", + ); + } else { + log::debug!( + target: LOG_TARGET, + "Inherent block logic took longer than the target block weight, going to use the full core", + ); + } + + crate::BlockWeightMode::::put(BlockWeightMode::FullCore); + + // Inform the node that this block uses the full core. + frame_system::Pallet::::deposit_log( + CumulusDigestItem::UseFullCore.to_digest_item(), + ); + } +} diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/tests.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/tests.rs new file mode 100644 index 0000000000000..bb45228d3cf0c --- /dev/null +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/tests.rs @@ -0,0 +1,280 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{mock::*, transaction_extension::DynamicMaxBlockWeight, *}; +use crate as parachain_system; +use codec::Compact; +use cumulus_primitives_core::{ + BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, +}; +use frame_executive; +use frame_support::{ + construct_runtime, derive_impl, + dispatch::{DispatchClass, DispatchInfo, Pays}, + traits::Hooks, + weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, +}; +use frame_system::mocking::MockBlock; +use polkadot_primitives::MAX_POV_SIZE; +use sp_core::ConstU32; +use sp_io; +use sp_runtime::{ + generic::Header, + testing::{TestXt, UintAuthorityId}, + traits::{ + BlakeTwo256, Block as BlockT, Dispatchable, Header as HeaderT, IdentityLookup, + TransactionExtension, + }, + transaction_validity::TransactionSource, + BuildStorage, Perbill, +}; + +#[test] +fn test_single_core_single_block() { + new_test_ext_with_digest(Some(1)).execute_with(|| { + let weight = MaxParachainBlockWeight::get::(1); + + // With 1 core and 1 target block, should get full 2s ref time and full PoV size + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); + }); +} + +#[test] +fn test_single_core_multiple_blocks() { + new_test_ext_with_digest(Some(1)).execute_with(|| { + let weight = MaxParachainBlockWeight::get::(4); + + // With 1 core and 4 target blocks, should get 0.5s ref time and 1/4 PoV size per block + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND / 4); + assert_eq!(weight.proof_size(), (1 * MAX_POV_SIZE as u64) / 4); + }); +} + +#[test] +fn test_multiple_cores_single_block() { + new_test_ext_with_digest(Some(3)).execute_with(|| { + let weight = MaxParachainBlockWeight::get::(1); + + // With 3 cores and 1 target block, should get max 2s ref time (capped per core) and 3x + // PoV size + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + assert_eq!(weight.proof_size(), 3 * MAX_POV_SIZE as u64); + }); +} + +#[test] +fn test_multiple_cores_multiple_blocks() { + new_test_ext_with_digest(Some(2)).execute_with(|| { + let weight = MaxParachainBlockWeight::get::(4); + + // With 2 cores and 4 target blocks, should get 1s ref time and 2x PoV size / 4 per + // block + assert_eq!(weight.ref_time(), 2 * 2 * WEIGHT_REF_TIME_PER_SECOND / 4); + assert_eq!(weight.proof_size(), (2 * MAX_POV_SIZE as u64) / 4); + }); +} + +#[test] +fn test_no_core_info() { + new_test_ext_with_digest(None).execute_with(|| { + let weight = MaxParachainBlockWeight::get::(1); + + // Without core info, should return conservative default + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); + }); +} + +#[test] +fn test_zero_cores() { + new_test_ext_with_digest(Some(0)).execute_with(|| { + let weight = MaxParachainBlockWeight::get::(1); + + // With 0 cores, should return conservative default + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); + }); +} + +#[test] +fn test_zero_target_blocks() { + new_test_ext_with_digest(Some(2)).execute_with(|| { + let weight = MaxParachainBlockWeight::get::(0); + + // With 0 target blocks, should return conservative default + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); + }); +} + +#[test] +fn test_target_block_weight_calculation() { + new_test_ext_with_digest(Some(4)).execute_with(|| { + // Test target_block_weight function directly + let weight_2_blocks = MaxParachainBlockWeight::target_block_weight::(2); + let weight_8_blocks = MaxParachainBlockWeight::target_block_weight::(8); + + // With 4 cores and 2 target blocks, should get 2s per block + assert_eq!(weight_2_blocks.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + assert_eq!(weight_2_blocks.proof_size(), (4 * MAX_POV_SIZE as u64) / 2); + + // With 4 cores and 8 target blocks, should get 1s per block + assert_eq!(weight_8_blocks.ref_time(), 2 * 4 * WEIGHT_REF_TIME_PER_SECOND / 8); + assert_eq!(weight_8_blocks.proof_size(), (4 * MAX_POV_SIZE as u64) / 8); + }); +} + +#[test] +fn test_max_ref_time_per_core_cap() { + new_test_ext_with_digest(Some(8)).execute_with(|| { + // Even with many cores, ref time per block should be capped at MAX_REF_TIME_PER_CORE_NS + let weight = MaxParachainBlockWeight::get::(1); + + // Should be capped at 2s ref time per core + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + // But proof size should scale with number of cores + assert_eq!(weight.proof_size(), 8 * MAX_POV_SIZE as u64); + }); +} + +#[test] +fn test_target_block_weight_with_digest_edge_cases() { + use cumulus_primitives_core::CumulusDigestItem; + use sp_runtime::Digest; + + // Test with empty digest + let empty_digest = Digest::default(); + let weight = MaxParachainBlockWeight::target_block_weight_with_digest::(1, &empty_digest); + assert_eq!(weight, MaxParachainBlockWeight::FULL_CORE_WEIGHT); + + // Test with digest containing core info + let core_info = CoreInfo { + selector: CoreSelector(0), + claim_queue_offset: ClaimQueueOffset(0), + number_of_cores: Compact(2u16), + }; + let digest_item = CumulusDigestItem::CoreInfo(core_info).to_digest_item(); + let mut digest = Digest::default(); + digest.push(digest_item); + + let weight = MaxParachainBlockWeight::target_block_weight_with_digest::(2, &digest); + assert_eq!(weight.ref_time(), 2 * 2 * WEIGHT_REF_TIME_PER_SECOND / 2); + assert_eq!(weight.proof_size(), (2 * MAX_POV_SIZE as u64) / 2); +} + +#[test] +fn test_is_first_block_in_core_functions() { + use cumulus_primitives_core::{BundleInfo, CumulusDigestItem}; + use sp_runtime::Digest; + + new_test_ext_with_digest(Some(1)).execute_with(|| { + // Test without bundle info - should return false + let empty_digest = Digest::default(); + assert!(!super::is_first_block_in_core_with_digest(&empty_digest)); + + // Test with bundle info index = 0 - should return true + let bundle_info_first = BundleInfo { index: 0, maybe_last: false }; + let digest_item_first = CumulusDigestItem::BundleInfo(bundle_info_first).to_digest_item(); + let mut digest_first = Digest::default(); + digest_first.push(digest_item_first); + assert!(super::is_first_block_in_core_with_digest(&digest_first)); + + // Test with bundle info index > 0 - should return false + let bundle_info_not_first = BundleInfo { index: 5, maybe_last: true }; + let digest_item_not_first = + CumulusDigestItem::BundleInfo(bundle_info_not_first).to_digest_item(); + let mut digest_not_first = Digest::default(); + digest_not_first.push(digest_item_not_first); + assert!(!super::is_first_block_in_core_with_digest(&digest_not_first)); + }); +} + +#[test] +fn test_dynamic_max_block_weight_creation() { + use super::transaction_extension::DynamicMaxBlockWeight; + + // Test creating DynamicMaxBlockWeight with new() + let inner = (); + let dynamic_weight = DynamicMaxBlockWeight::::new(inner); + assert_eq!(dynamic_weight.0, ()); + + // Test creating DynamicMaxBlockWeight with From trait + let dynamic_weight_from: DynamicMaxBlockWeight = ().into(); + assert_eq!(dynamic_weight_from.0, ()); + + // Test Debug formatting + let debug_string = format!("{:?}", dynamic_weight); + assert!(debug_string.contains("DynamicMaxBlockWeight")); +} + +#[test] +fn test_max_block_weight_hooks_type() { + use super::pre_inherents_hook::DynamicMaxBlockWeightHooks; + use sp_core::ConstU32; + + // Ensure the type can be instantiated (compile-time test) + let _hooks: DynamicMaxBlockWeightHooks> = + DynamicMaxBlockWeightHooks(core::marker::PhantomData); +} + +#[test] +fn test_block_weight_mode_with_different_transaction_indices() { + // Test BlockWeightMode with None transaction indices + let mode_with_none = BlockWeightMode::PotentialFullCore { first_transaction_index: None }; + let mode_with_some = BlockWeightMode::FractionOfCore { first_transaction_index: Some(42) }; + + // Test encoding/decoding + use codec::{Decode, Encode}; + let encoded_none = mode_with_none.encode(); + let decoded_none = BlockWeightMode::decode(&mut &encoded_none[..]).unwrap(); + assert!(matches!( + decoded_none, + BlockWeightMode::PotentialFullCore { first_transaction_index: None } + )); + + let encoded_some = mode_with_some.encode(); + let decoded_some = BlockWeightMode::decode(&mut &encoded_some[..]).unwrap(); + assert!(matches!( + decoded_some, + BlockWeightMode::FractionOfCore { first_transaction_index: Some(42) } + )); +} + +#[test] +fn test_saturation_arithmetic() { + new_test_ext_with_digest(Some(u16::MAX)).execute_with(|| { + // Test with maximum number of cores to ensure no overflow + let weight = MaxParachainBlockWeight::get::(1); + + // Should be capped at 2s ref time per core even with max cores + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + // Proof size should saturate properly + assert!(weight.proof_size() > 0); + }); +} + +#[test] +fn test_large_target_blocks() { + new_test_ext_with_digest(Some(4)).execute_with(|| { + // Test with very large number of target blocks + let weight = MaxParachainBlockWeight::get::(u32::MAX); + + // Should not panic and should return minimal weights + assert!(weight.ref_time() > 0); + assert!(weight.proof_size() > 0); + }); +} diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/transaction_extension.rs new file mode 100644 index 0000000000000..e97b3cf5b31a0 --- /dev/null +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/transaction_extension.rs @@ -0,0 +1,352 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + block_weight_over_target_block_weight, is_first_block_in_core, BlockWeightMode, + MaxParachainBlockWeight, LOG_TARGET, +}; +use crate::Config; +use alloc::vec::Vec; +use codec::{Decode, DecodeWithMemTracking, Encode}; +use cumulus_primitives_core::CumulusDigestItem; +use frame_support::{ + dispatch::{DispatchInfo, PostDispatchInfo}, + pallet_prelude::{ + InvalidTransaction, TransactionSource, TransactionValidityError, ValidTransaction, + }, + weights::Weight, +}; +use scale_info::TypeInfo; +use sp_core::Get; +use sp_runtime::{ + traits::{DispatchInfoOf, Dispatchable, Implication, PostDispatchInfoOf, TransactionExtension}, + DispatchResult, +}; + +/// Transaction extension that dynamically changes the max block weight. +/// +/// With block bundling parachains are running with block weights that may not allow certain +/// transactions to be applied, e.g. a runtime upgrade. To ensure that these transactions can still +/// be applied, this transaction extension can change the max block weight as required. There are +/// multiple requirements for it to change the block weight: +/// +/// 1. The block weight is only allowed to change in the *first block of a core*. +/// +/// 2. Either the inherent block logic (`on_initialize` etc), any `inherent` or any transaction up +/// to `MAX_TRANSACTION_TO_CONSIDER` required more block weight than the target block weight. +/// +/// We do not allow any block to randomly change the block weight, because the node side is tracking +/// the wall clock time it takes to build a block. When it takes too long, the node is aborting the +/// block production. But because the node knows that the first block of a core may runs longer, it +/// allows this block to take up to `2s` of wall clock time. `2s` is the time each `PoV` gets on the +/// relay chain for its validation or in other words the maximum core execution time. +/// +/// When the extension is changing the block weight, it changes it to the maximum core execution +/// time of `2s`. +/// +/// The extension also requires that the weight of *one* transaction alone is bigger than the max +/// block weight. +/// +/// Takes the following generic parameters: +/// +/// - `TargetBlockRate`: The target block rate the parachain should be running with. Or in other +/// words, the number of blocks the parachain should produce in `6s`(relay chain slot duration). +/// +/// - `MAX_TRANSACTION`: The maximum number of transactions to consider before giving up to change +/// the max block weight. +#[derive(Encode, Decode, DecodeWithMemTracking, TypeInfo)] +#[derive_where::derive_where(Clone, Eq, PartialEq, Default; S)] +#[scale_info(skip_type_params(T, TargetBlockRate))] +pub struct DynamicMaxBlockWeight< + T, + S, + TargetBlockRate, + const MAX_TRANSACTION_TO_CONSIDER: usize = 100, + const ONLY_OPERATIONAL: bool = false, +>(pub S, core::marker::PhantomData<(T, TargetBlockRate)>); + +impl DynamicMaxBlockWeight { + /// Create a new [`DynamicMaxBlockWeight`] instance. + pub fn new(s: S) -> Self { + Self(s, Default::default()) + } +} + +impl DynamicMaxBlockWeight +where + T: Config, + TargetBlockRate: Get, +{ + fn pre_validate_extrinsic( + info: &DispatchInfo, + len: usize, + ) -> Result<(), TransactionValidityError> { + let is_not_inherent = frame_system::Pallet::::inherents_applied(); + let extrinsic_index = is_not_inherent + .then(|| frame_system::Pallet::::extrinsic_index().unwrap_or_default()); + + crate::BlockWeightMode::::mutate(|mode| { + let current_mode = *mode.get_or_insert_with(|| BlockWeightMode::FractionOfCore { + first_transaction_index: extrinsic_index, + }); + + match current_mode { + // We are already allowing the full core, not that much more to do here. + BlockWeightMode::FullCore => {}, + BlockWeightMode::PotentialFullCore { first_transaction_index } | + BlockWeightMode::FractionOfCore { first_transaction_index } => { + let is_potential = + matches!(current_mode, BlockWeightMode::PotentialFullCore { .. }); + debug_assert!( + !is_potential, + "`PotentialFullCore` should resolve to `FullCore` or `FractionOfCore` after applying a transaction.", + ); + + let block_weight_over_limit = first_transaction_index == extrinsic_index + && block_weight_over_target_block_weight::(); + + // Protection against a misconfiguration as this should be detected by the pre-inherent hook. + if block_weight_over_limit { + *mode = Some(BlockWeightMode::FullCore); + + // Inform the node that this block uses the full core. + frame_system::Pallet::::deposit_log( + CumulusDigestItem::UseFullCore.to_digest_item(), + ); + + log::error!( + target: LOG_TARGET, + "Inherent block logic took longer than the target block weight, \ + `MaxBlockWeightHooks` not registered as `PreInherents` hook!", + ); + } else if info + .total_weight() + // The extrinsic lengths counts towards the POV size + .saturating_add(Weight::from_parts(0, len as u64)) + .any_gt(MaxParachainBlockWeight::::target_block_weight( + TargetBlockRate::get(), + )) && is_first_block_in_core::() + { + // TODO: make 10 configurable + if extrinsic_index.unwrap_or_default().saturating_sub(first_transaction_index.unwrap_or_default()) < 10 { + *mode = Some(BlockWeightMode::PotentialFullCore { + // While applying inherents `extrinsic_index` and `first_transaction_index` will be `None`. + // When the first transaction is applied, we want to store the index. + first_transaction_index: first_transaction_index.or(extrinsic_index), + }); + } else { + return Err(InvalidTransaction::ExhaustsResources) + } + } else if is_potential { + *mode = + Some(BlockWeightMode::FractionOfCore { first_transaction_index }); + } + }, + }; + + Ok(()) + }).map_err(Into::into) + } + + fn post_dispatch_extrinsic() { + crate::BlockWeightMode::::mutate(|weight_mode| { + let Some(mode) = *weight_mode else { return }; + + let target_block_weight = + MaxParachainBlockWeight::::target_block_weight(TargetBlockRate::get()); + + let is_above_limit = frame_system::Pallet::::remaining_block_weight() + .consumed() + .any_gt(target_block_weight); + + match mode { + // If the previous mode was already `FullCore`, we are fine. + BlockWeightMode::FullCore => {}, + BlockWeightMode::FractionOfCore { .. } => + // If we are above the limit, it means the transaction used more weight than what it + // had announced, which should not happen. + if is_above_limit { + log::error!( + target: LOG_TARGET, + "Extrinsic ({}) used more weight than what it had announced and pushed the \ + block above the allowed weight limit!", + frame_system::Pallet::::extrinsic_index().unwrap_or_default() + ); + + // If this isn't the first block in a core, we register the full core weight + // to ensure that we don't include any other transactions. Because we don't + // know how many weight of the core was already used by the blocks before. + if !is_first_block_in_core::() { + log::error!( + target: LOG_TARGET, + "Registering `FULL_CORE_WEIGHT` to ensure no other transaction is included \ + in this block, because this isn't the first block in the core!", + ); + + frame_system::Pallet::::register_extra_weight_unchecked( + MaxParachainBlockWeight::::FULL_CORE_WEIGHT, + frame_support::dispatch::DispatchClass::Mandatory, + ); + } + + *weight_mode = Some(BlockWeightMode::FullCore); + + // Inform the node that this block uses the full core. + frame_system::Pallet::::deposit_log( + CumulusDigestItem::UseFullCore.to_digest_item(), + ); + }, + // Now we need to check if the transaction required more weight than a fraction of a + // core block. + BlockWeightMode::PotentialFullCore { first_transaction_index } => + if is_above_limit { + *weight_mode = Some(BlockWeightMode::FullCore); + + // Inform the node that this block uses the full core. + frame_system::Pallet::::deposit_log( + CumulusDigestItem::UseFullCore.to_digest_item(), + ); + } else { + *weight_mode = + Some(BlockWeightMode::FractionOfCore { first_transaction_index }); + }, + } + }); + } +} + +impl From for DynamicMaxBlockWeight { + fn from(s: S) -> Self { + Self::new(s) + } +} + +impl core::fmt::Debug + for DynamicMaxBlockWeight +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "DynamicMaxBlockWeight<{:?}>", self.0) + } +} + +impl< + T: Config + Send + Sync, + S: TransactionExtension, + TargetBlockRate: Get + Send + Sync + 'static, + > TransactionExtension for DynamicMaxBlockWeight +where + T::RuntimeCall: Dispatchable, +{ + const IDENTIFIER: &'static str = "DynamicMaxBlockWeight"; + + type Implicit = S::Implicit; + + type Val = S::Val; + + type Pre = S::Pre; + + fn implicit(&self) -> Result { + self.0.implicit() + } + + fn metadata() -> Vec { + let mut inner = S::metadata(); + inner.push(sp_runtime::traits::TransactionExtensionMetadata { + identifier: "DynamicMaxBlockWeight", + ty: scale_info::meta_type::<()>(), + implicit: scale_info::meta_type::<()>(), + }); + inner + } + + fn weight(&self, _: &T::RuntimeCall) -> Weight { + Weight::zero() + } + + fn validate( + &self, + origin: T::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + self_implicit: Self::Implicit, + inherited_implication: &impl Implication, + source: TransactionSource, + ) -> Result<(ValidTransaction, Self::Val, T::RuntimeOrigin), TransactionValidityError> { + Self::pre_validate_extrinsic(info, len)?; + + self.0 + .validate(origin, call, info, len, self_implicit, inherited_implication, source) + } + + fn prepare( + self, + val: Self::Val, + origin: &T::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + self.0.prepare(val, origin, call, info, len) + } + + fn post_dispatch( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &mut PostDispatchInfo, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + S::post_dispatch(pre, info, post_info, len, result)?; + + Self::post_dispatch_extrinsic(); + + Ok(()) + } + + fn bare_validate( + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + ) -> frame_support::pallet_prelude::TransactionValidity { + S::bare_validate(call, info, len) + } + + fn bare_validate_and_prepare( + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(), TransactionValidityError> { + S::bare_validate_and_prepare(call, info, len)?; + + Self::pre_validate_extrinsic(info, len)?; + + Ok(()) + } + + fn bare_post_dispatch( + info: &DispatchInfoOf, + post_info: &mut PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + S::bare_post_dispatch(info, post_info, len, result)?; + + Self::post_dispatch_extrinsic(); + + Ok(()) + } +} diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 4f35ebe846fa7..82d26b006bcbf 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -276,7 +276,7 @@ impl frame_system::Config for Runtime { type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; type PreInherents = - cumulus_pallet_parachain_system::max_parachain_block_weight::MaxBlockWeightHooks< + cumulus_pallet_parachain_system::max_parachain_block_weight::DynamicMaxBlockWeightHooks< Runtime, NumberOfBlocksPerRelaySlot, >; From ef9b5b55d4b24166d875ac6481a4aa58aed1766c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 9 Oct 2025 22:14:46 +0200 Subject: [PATCH 136/312] Docs and other stuff --- Cargo.lock | 1 + cumulus/pallets/parachain-system/Cargo.toml | 3 +- cumulus/pallets/parachain-system/src/lib.rs | 11 +-- .../src/max_parachain_block_weight/mock.rs | 3 +- .../src/max_parachain_block_weight/mod.rs | 49 ++++++---- .../pre_inherents_hook.rs | 14 +++ .../src/max_parachain_block_weight/tests.rs | 41 ++++---- .../transaction_extension.rs | 94 +++++++++++-------- cumulus/test/client/src/lib.rs | 2 +- cumulus/test/runtime/src/lib.rs | 37 ++++---- .../tests/zombie_ci/block_bundling/basic.rs | 8 +- .../block_bundling/runtime_upgrade.rs | 2 +- .../elastic_scaling/upgrade_to_3_cores.rs | 21 +---- 13 files changed, 158 insertions(+), 128 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index de1fee8f1adae..cdb9b9ab4d300 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4703,6 +4703,7 @@ dependencies = [ "derive-where", "environmental", "frame-benchmarking", + "frame-executive", "frame-support", "frame-system", "futures", diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 370db8f35dc77..943c8dff60a4e 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -59,8 +59,8 @@ rand = { workspace = true, default-features = true } rstest = { workspace = true } trie-standardmap = { workspace = true } - # Substrate +frame-executive = { workspace = true } sc-consensus = { workspace = true } sp-api = { workspace = true, default-features = true } sp-consensus-slots = { workspace = true, default-features = true } @@ -68,6 +68,7 @@ sp-crypto-hashing = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } + # Cumulus cumulus-test-client = { workspace = true } cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index c6f7b6ca1fe4b..d308eb8b2c903 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -62,19 +62,15 @@ use xcm::{latest::XcmHash, VersionedLocation, VersionedXcm, MAX_XCM_DECODE_DEPTH use xcm_builder::InspectMessageQueues; mod benchmarking; +pub mod consensus_hook; pub mod max_parachain_block_weight; pub mod migration; mod mock; +pub mod relay_state_snapshot; #[cfg(test)] mod tests; -pub mod weights; - -pub use weights::WeightInfo; - mod unincluded_segment; - -pub mod consensus_hook; -pub mod relay_state_snapshot; +pub mod weights; #[macro_use] pub mod validate_block; mod descendant_validation; @@ -108,6 +104,7 @@ pub use consensus_hook::{ConsensusHook, ExpectParentIncluded}; pub use cumulus_pallet_parachain_system_proc_macro::register_validate_block; pub use relay_state_snapshot::{MessagingStateSnapshot, RelayChainStateProof}; pub use unincluded_segment::{Ancestor, UsedBandwidth}; +pub use weights::WeightInfo; pub use pallet::*; diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mock.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mock.rs index b433b84b540c7..b26a5aa218bec 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mock.rs @@ -20,7 +20,6 @@ use codec::Compact; use cumulus_primitives_core::{ BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, }; -use frame_executive; use frame_support::{ construct_runtime, derive_impl, dispatch::{DispatchClass, DispatchInfo, Pays}, @@ -84,7 +83,7 @@ pub type Executive = frame_executive::Executive< AllPalletsWithSystem, >; -fn new_test_ext_with_digest(num_cores: Option) -> sp_io::TestExternalities { +pub fn new_test_ext_with_digest(num_cores: Option) -> sp_io::TestExternalities { let storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); let mut ext = sp_io::TestExternalities::from(storage); diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mod.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mod.rs index 728a0082a70cc..7647b312459c4 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mod.rs @@ -14,7 +14,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Utilities for calculating maximum parachain block weight based on core assignments. +//! Provides functionality to dynamically calculate the max block weight for a parachain. +//! +//! With block bundling parachains are relative free to choose whatever block interval they want. +//! This means they will run under normal conditions with blocks that have a small block weight. +//! These small blocks may prevent certain transactions to be applied, e.g. a runtime upgrade. But +//! it is not only about transactions, also certain block logic may requires more weight from time +//! to time. To serve these needs [`MaxParachainBlockWeight`], [`DynamicMaxBlockWeight`] and +//! [`DynamicMaxBlockWeightHooks`] exist. +//! +//! - [`MaxParachainBlockWeight`]: use crate::Config; use codec::{Decode, Encode}; @@ -46,16 +55,31 @@ pub enum BlockWeightMode { /// The block is allowed to use the weight of a full core. FullCore, /// The current active transaction is allowed to use the weight of a full core. - PotentialFullCore { first_transaction_index: Option }, + PotentialFullCore { + /// The index of the first transaction. + first_transaction_index: Option, + /// The target weight that was used to determine that the extrinsic is above this limit. + target_weight: Weight, + }, /// The block is only allowed to consume its fraction of the core. /// /// How much each block is allowed to consume, depends on the target number of blocks and the /// available cores on the relay chain. - FractionOfCore { first_transaction_index: Option }, + FractionOfCore { + /// The index of the first transaction. + first_transaction_index: Option, + }, } -/// A utility type for calculating the maximum block weight for a parachain based on -/// the number of relay chain cores assigned and the target number of blocks. +/// Provides a [`get`](Self::get) method to calculate the max block weight based on the number of +/// target blocks. +/// +/// This takes internally into consideration the number of available cores, communicated via the +/// [`CumulusDigestItem::CoreInfo`] digest, to calculate the available resources. Based on the +/// available cores and the number of desired blocks a target weight is calculated. But it does not +/// only take the number of cores and blocks into consideration, but also the current +/// [`BlockWeightMode`]. The [`BlockWeightMode`] is set by the [`DynamicMaxBlockWeight`] +/// transaction extension depending certain conditions. pub struct MaxParachainBlockWeight(PhantomData); impl MaxParachainBlockWeight { @@ -64,20 +88,7 @@ impl MaxParachainBlockWeight { const FULL_CORE_WEIGHT: Weight = Weight::from_parts(Self::MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); - /// Calculate the maximum block weight based on target blocks and core assignments. - /// - /// This function examines the current block's digest from `frame_system::Digests` storage - /// to find `CumulusDigestItem::CoreInfo` entries, which contain information about the - /// number of relay chain cores assigned to the parachain. Each core has a maximum - /// reference time of 2 seconds and the total maximum PoV size of `MAX_POV_SIZE` is - /// shared across all target blocks. - /// - /// # Parameters - /// - `target_blocks`: The target number of blocks to be produced - /// - /// # Returns - /// Returns the calculated maximum weight, or a conservative default if no core info is found - /// or if an error occurs during calculation. + /// Calculate the maximum block weight based on target blocks and available cores. pub fn get(target_blocks: u32) -> Weight { let digest = frame_system::Pallet::::digest(); let target_block_weight = Self::target_block_weight_with_digest(target_blocks, &digest); diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/pre_inherents_hook.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/pre_inherents_hook.rs index 44525c5047b37..9e8607bb3231e 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/pre_inherents_hook.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/pre_inherents_hook.rs @@ -17,10 +17,17 @@ use super::{ block_weight_over_target_block_weight, is_first_block_in_core, BlockWeightMode, LOG_TARGET, }; +use crate::max_parachain_block_weight::MaxParachainBlockWeight; use cumulus_primitives_core::CumulusDigestItem; use frame_support::traits::PreInherents; use sp_core::Get; +/// A pre-inherent hook that may increases max block weight after `on_initialize`. +/// +/// The hook is called before applying the first inherent. It checks the used block weight of +/// `on_initialize`. If the used block weight is above the target block weight, the hook will allow +/// the block to use the weight of a full core. It also sets the [`CumulusDigestItem::UseFullCore`] +/// digest. pub struct DynamicMaxBlockWeightHooks( pub core::marker::PhantomData<(Config, TargetBlockRate)>, ); @@ -42,6 +49,13 @@ where target: LOG_TARGET, "Inherent block logic took longer than the target block weight, THIS IS A BUG!!!", ); + + // We are already above the allowed maximum and do not want to accept any more + // extrinsics. + frame_system::Pallet::::register_extra_weight_unchecked( + MaxParachainBlockWeight::::FULL_CORE_WEIGHT, + frame_support::dispatch::DispatchClass::Mandatory, + ); } else { log::debug!( target: LOG_TARGET, diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/tests.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/tests.rs index bb45228d3cf0c..7e07b5d5f76cd 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/tests.rs @@ -20,7 +20,6 @@ use codec::Compact; use cumulus_primitives_core::{ BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, }; -use frame_executive; use frame_support::{ construct_runtime, derive_impl, dispatch::{DispatchClass, DispatchInfo, Pays}, @@ -45,7 +44,7 @@ use sp_runtime::{ #[test] fn test_single_core_single_block() { new_test_ext_with_digest(Some(1)).execute_with(|| { - let weight = MaxParachainBlockWeight::get::(1); + let weight = MaxParachainBlockWeight::::get(1); // With 1 core and 1 target block, should get full 2s ref time and full PoV size assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); @@ -56,7 +55,7 @@ fn test_single_core_single_block() { #[test] fn test_single_core_multiple_blocks() { new_test_ext_with_digest(Some(1)).execute_with(|| { - let weight = MaxParachainBlockWeight::get::(4); + let weight = MaxParachainBlockWeight::::get(4); // With 1 core and 4 target blocks, should get 0.5s ref time and 1/4 PoV size per block assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND / 4); @@ -67,7 +66,7 @@ fn test_single_core_multiple_blocks() { #[test] fn test_multiple_cores_single_block() { new_test_ext_with_digest(Some(3)).execute_with(|| { - let weight = MaxParachainBlockWeight::get::(1); + let weight = MaxParachainBlockWeight::::get(1); // With 3 cores and 1 target block, should get max 2s ref time (capped per core) and 3x // PoV size @@ -79,7 +78,7 @@ fn test_multiple_cores_single_block() { #[test] fn test_multiple_cores_multiple_blocks() { new_test_ext_with_digest(Some(2)).execute_with(|| { - let weight = MaxParachainBlockWeight::get::(4); + let weight = MaxParachainBlockWeight::::get(4); // With 2 cores and 4 target blocks, should get 1s ref time and 2x PoV size / 4 per // block @@ -91,7 +90,7 @@ fn test_multiple_cores_multiple_blocks() { #[test] fn test_no_core_info() { new_test_ext_with_digest(None).execute_with(|| { - let weight = MaxParachainBlockWeight::get::(1); + let weight = MaxParachainBlockWeight::::get(1); // Without core info, should return conservative default assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); @@ -102,7 +101,7 @@ fn test_no_core_info() { #[test] fn test_zero_cores() { new_test_ext_with_digest(Some(0)).execute_with(|| { - let weight = MaxParachainBlockWeight::get::(1); + let weight = MaxParachainBlockWeight::::get(1); // With 0 cores, should return conservative default assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); @@ -113,7 +112,7 @@ fn test_zero_cores() { #[test] fn test_zero_target_blocks() { new_test_ext_with_digest(Some(2)).execute_with(|| { - let weight = MaxParachainBlockWeight::get::(0); + let weight = MaxParachainBlockWeight::::get(0); // With 0 target blocks, should return conservative default assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); @@ -125,8 +124,8 @@ fn test_zero_target_blocks() { fn test_target_block_weight_calculation() { new_test_ext_with_digest(Some(4)).execute_with(|| { // Test target_block_weight function directly - let weight_2_blocks = MaxParachainBlockWeight::target_block_weight::(2); - let weight_8_blocks = MaxParachainBlockWeight::target_block_weight::(8); + let weight_2_blocks = MaxParachainBlockWeight::::target_block_weight(2); + let weight_8_blocks = MaxParachainBlockWeight::::target_block_weight(8); // With 4 cores and 2 target blocks, should get 2s per block assert_eq!(weight_2_blocks.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); @@ -142,7 +141,7 @@ fn test_target_block_weight_calculation() { fn test_max_ref_time_per_core_cap() { new_test_ext_with_digest(Some(8)).execute_with(|| { // Even with many cores, ref time per block should be capped at MAX_REF_TIME_PER_CORE_NS - let weight = MaxParachainBlockWeight::get::(1); + let weight = MaxParachainBlockWeight::::get(1); // Should be capped at 2s ref time per core assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); @@ -158,8 +157,8 @@ fn test_target_block_weight_with_digest_edge_cases() { // Test with empty digest let empty_digest = Digest::default(); - let weight = MaxParachainBlockWeight::target_block_weight_with_digest::(1, &empty_digest); - assert_eq!(weight, MaxParachainBlockWeight::FULL_CORE_WEIGHT); + let weight = MaxParachainBlockWeight::::target_block_weight_with_digest(1, &empty_digest); + assert_eq!(weight, MaxParachainBlockWeight::::FULL_CORE_WEIGHT); // Test with digest containing core info let core_info = CoreInfo { @@ -171,7 +170,7 @@ fn test_target_block_weight_with_digest_edge_cases() { let mut digest = Digest::default(); digest.push(digest_item); - let weight = MaxParachainBlockWeight::target_block_weight_with_digest::(2, &digest); + let weight = MaxParachainBlockWeight::::target_block_weight_with_digest(2, &digest); assert_eq!(weight.ref_time(), 2 * 2 * WEIGHT_REF_TIME_PER_SECOND / 2); assert_eq!(weight.proof_size(), (2 * MAX_POV_SIZE as u64) / 2); } @@ -234,7 +233,10 @@ fn test_max_block_weight_hooks_type() { #[test] fn test_block_weight_mode_with_different_transaction_indices() { // Test BlockWeightMode with None transaction indices - let mode_with_none = BlockWeightMode::PotentialFullCore { first_transaction_index: None }; + let mode_with_none = BlockWeightMode::PotentialFullCore { + first_transaction_index: None, + target_weight: Weight::zero(), + }; let mode_with_some = BlockWeightMode::FractionOfCore { first_transaction_index: Some(42) }; // Test encoding/decoding @@ -243,7 +245,10 @@ fn test_block_weight_mode_with_different_transaction_indices() { let decoded_none = BlockWeightMode::decode(&mut &encoded_none[..]).unwrap(); assert!(matches!( decoded_none, - BlockWeightMode::PotentialFullCore { first_transaction_index: None } + BlockWeightMode::PotentialFullCore { + first_transaction_index: None, + target_weight: Weight::Zero + } )); let encoded_some = mode_with_some.encode(); @@ -258,7 +263,7 @@ fn test_block_weight_mode_with_different_transaction_indices() { fn test_saturation_arithmetic() { new_test_ext_with_digest(Some(u16::MAX)).execute_with(|| { // Test with maximum number of cores to ensure no overflow - let weight = MaxParachainBlockWeight::get::(1); + let weight = MaxParachainBlockWeight::::get(1); // Should be capped at 2s ref time per core even with max cores assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); @@ -271,7 +276,7 @@ fn test_saturation_arithmetic() { fn test_large_target_blocks() { new_test_ext_with_digest(Some(4)).execute_with(|| { // Test with very large number of target blocks - let weight = MaxParachainBlockWeight::get::(u32::MAX); + let weight = MaxParachainBlockWeight::::get(u32::MAX); // Should not panic and should return minimal weights assert!(weight.ref_time() > 0); diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/transaction_extension.rs index e97b3cf5b31a0..0121365477a81 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/transaction_extension.rs @@ -38,35 +38,34 @@ use sp_runtime::{ /// Transaction extension that dynamically changes the max block weight. /// -/// With block bundling parachains are running with block weights that may not allow certain +/// With block bundling, parachains are running with block weights that may not allow certain /// transactions to be applied, e.g. a runtime upgrade. To ensure that these transactions can still /// be applied, this transaction extension can change the max block weight as required. There are /// multiple requirements for it to change the block weight: /// -/// 1. The block weight is only allowed to change in the *first block of a core*. +/// 1. Only the first block of a core is allowed to change its block weight. /// -/// 2. Either the inherent block logic (`on_initialize` etc), any `inherent` or any transaction up -/// to `MAX_TRANSACTION_TO_CONSIDER` required more block weight than the target block weight. +/// 2. Any `inherent` or any transaction up to `MAX_TRANSACTION_TO_CONSIDER` requires more block +/// weight than the target block weight. Target block weight is the max weight for the respective +/// extrinsic class. /// -/// We do not allow any block to randomly change the block weight, because the node side is tracking -/// the wall clock time it takes to build a block. When it takes too long, the node is aborting the -/// block production. But because the node knows that the first block of a core may runs longer, it -/// allows this block to take up to `2s` of wall clock time. `2s` is the time each `PoV` gets on the -/// relay chain for its validation or in other words the maximum core execution time. +/// Because the node is tracking the wall clock time while building a block to abort block +/// production if it takes too long, we do not allow any block to change the block weight. The node +/// knows that the first block of a core may runs longer. So, the node allows this block to take up +/// to `2s` of wall clock time. `2s` is the time each `PoV` gets on the relay chain for its +/// validation or in other words the maximum core execution time. The extension sets the +/// [`CumulusDigestItem::UseFullCore`] digest when the block should occupy the entire core. /// -/// When the extension is changing the block weight, it changes it to the maximum core execution -/// time of `2s`. -/// -/// The extension also requires that the weight of *one* transaction alone is bigger than the max -/// block weight. -/// -/// Takes the following generic parameters: +/// # Generic parameters /// /// - `TargetBlockRate`: The target block rate the parachain should be running with. Or in other /// words, the number of blocks the parachain should produce in `6s`(relay chain slot duration). /// /// - `MAX_TRANSACTION`: The maximum number of transactions to consider before giving up to change /// the max block weight. +/// +/// - `ONLY_OPERATIONAL`: Should only operational transactions be allowed to change the max block +/// weight? #[derive(Encode, Decode, DecodeWithMemTracking, TypeInfo)] #[derive_where::derive_where(Clone, Eq, PartialEq, Default; S)] #[scale_info(skip_type_params(T, TargetBlockRate))] @@ -74,7 +73,7 @@ pub struct DynamicMaxBlockWeight< T, S, TargetBlockRate, - const MAX_TRANSACTION_TO_CONSIDER: usize = 100, + const MAX_TRANSACTION_TO_CONSIDER: u32 = 10, const ONLY_OPERATIONAL: bool = false, >(pub S, core::marker::PhantomData<(T, TargetBlockRate)>); @@ -85,7 +84,13 @@ impl DynamicMaxBlockWeight { } } -impl DynamicMaxBlockWeight +impl< + T, + S, + TargetBlockRate, + const MAX_TRANSACTION_TO_CONSIDER: u32, + const ONLY_OPERATIONAL: bool, + > DynamicMaxBlockWeight where T: Config, TargetBlockRate: Get, @@ -106,7 +111,7 @@ where match current_mode { // We are already allowing the full core, not that much more to do here. BlockWeightMode::FullCore => {}, - BlockWeightMode::PotentialFullCore { first_transaction_index } | + BlockWeightMode::PotentialFullCore { first_transaction_index, .. } | BlockWeightMode::FractionOfCore { first_transaction_index } => { let is_potential = matches!(current_mode, BlockWeightMode::PotentialFullCore { .. }); @@ -118,6 +123,11 @@ where let block_weight_over_limit = first_transaction_index == extrinsic_index && block_weight_over_target_block_weight::(); + let block_weights = T::BlockWeights::get(); + let target_weight = block_weights.get(info.class).max_total.unwrap_or_else( + || MaxParachainBlockWeight::::target_block_weight(TargetBlockRate::get()).saturating_sub(block_weights.base_block) + ); + // Protection against a misconfiguration as this should be detected by the pre-inherent hook. if block_weight_over_limit { *mode = Some(BlockWeightMode::FullCore); @@ -136,13 +146,11 @@ where .total_weight() // The extrinsic lengths counts towards the POV size .saturating_add(Weight::from_parts(0, len as u64)) - .any_gt(MaxParachainBlockWeight::::target_block_weight( - TargetBlockRate::get(), - )) && is_first_block_in_core::() + .any_gt(target_weight) && is_first_block_in_core::() { - // TODO: make 10 configurable - if extrinsic_index.unwrap_or_default().saturating_sub(first_transaction_index.unwrap_or_default()) < 10 { + if extrinsic_index.unwrap_or_default().saturating_sub(first_transaction_index.unwrap_or_default()) < MAX_TRANSACTION_TO_CONSIDER { *mode = Some(BlockWeightMode::PotentialFullCore { + target_weight, // While applying inherents `extrinsic_index` and `first_transaction_index` will be `None`. // When the first transaction is applied, we want to store the index. first_transaction_index: first_transaction_index.or(extrinsic_index), @@ -161,23 +169,23 @@ where }).map_err(Into::into) } - fn post_dispatch_extrinsic() { + fn post_dispatch_extrinsic(info: &DispatchInfo) { crate::BlockWeightMode::::mutate(|weight_mode| { let Some(mode) = *weight_mode else { return }; - let target_block_weight = - MaxParachainBlockWeight::::target_block_weight(TargetBlockRate::get()); - - let is_above_limit = frame_system::Pallet::::remaining_block_weight() - .consumed() - .any_gt(target_block_weight); - match mode { // If the previous mode was already `FullCore`, we are fine. BlockWeightMode::FullCore => {}, - BlockWeightMode::FractionOfCore { .. } => - // If we are above the limit, it means the transaction used more weight than what it - // had announced, which should not happen. + BlockWeightMode::FractionOfCore { .. } => { + let target_block_weight = + MaxParachainBlockWeight::::target_block_weight(TargetBlockRate::get()); + + let is_above_limit = frame_system::Pallet::::remaining_block_weight() + .consumed() + .any_gt(target_block_weight); + + // If we are above the limit, it means the transaction used more weight than + // what it had announced, which should not happen. if is_above_limit { log::error!( target: LOG_TARGET, @@ -208,11 +216,14 @@ where frame_system::Pallet::::deposit_log( CumulusDigestItem::UseFullCore.to_digest_item(), ); - }, + } + }, // Now we need to check if the transaction required more weight than a fraction of a // core block. - BlockWeightMode::PotentialFullCore { first_transaction_index } => - if is_above_limit { + BlockWeightMode::PotentialFullCore { first_transaction_index, target_weight } => { + let block_weight = frame_system::BlockWeight::::get(); + + if block_weight.get(info.class).any_gt(target_weight) { *weight_mode = Some(BlockWeightMode::FullCore); // Inform the node that this block uses the full core. @@ -222,7 +233,8 @@ where } else { *weight_mode = Some(BlockWeightMode::FractionOfCore { first_transaction_index }); - }, + } + }, } }); } @@ -312,7 +324,7 @@ where ) -> Result<(), TransactionValidityError> { S::post_dispatch(pre, info, post_info, len, result)?; - Self::post_dispatch_extrinsic(); + Self::post_dispatch_extrinsic(info); Ok(()) } @@ -345,7 +357,7 @@ where ) -> Result<(), TransactionValidityError> { S::bare_post_dispatch(info, post_info, len, result)?; - Self::post_dispatch_extrinsic(); + Self::post_dispatch_extrinsic(info); Ok(()) } diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index 4f5c70e3f0b3f..7b7e614efe3d1 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -19,7 +19,7 @@ mod block_builder; pub use block_builder::*; use codec::{Decode, Encode}; -use cumulus_pallet_parachain_system::DynamicMaxBlockWeight; +use cumulus_pallet_parachain_system::max_parachain_block_weight::DynamicMaxBlockWeight; pub use cumulus_test_runtime as runtime; use cumulus_test_runtime::AuraId; pub use polkadot_parachain_primitives::primitives::{ diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 82d26b006bcbf..f981e0fc8e1f1 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -231,7 +231,7 @@ parameter_types! { pub const BlockHashCount: BlockNumber = 4096; pub const Version: RuntimeVersion = VERSION; /// We allow for 1 second of compute with a 6 second average block time. - pub MaximumBlockWeight: Weight = cumulus_pallet_parachain_system::MaxParachainBlockWeight::get::(NumberOfBlocksPerRelaySlot::get()); + pub MaximumBlockWeight: Weight = cumulus_pallet_parachain_system::max_parachain_block_weight::MaxParachainBlockWeight::::get(NumberOfBlocksPerRelaySlot::get()); pub RuntimeBlockLength: BlockLength = BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() @@ -453,24 +453,25 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; /// The extension to the basic transaction logic. -pub type TxExtension = cumulus_pallet_parachain_system::DynamicMaxBlockWeight< - Runtime, - cumulus_pallet_weight_reclaim::StorageWeightReclaim< +pub type TxExtension = + cumulus_pallet_parachain_system::max_parachain_block_weight::DynamicMaxBlockWeight< Runtime, - ( - frame_system::AuthorizeCall, - frame_system::CheckNonZeroSender, - frame_system::CheckSpecVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, - test_pallet::TestTransactionExtension, - ), - >, - NumberOfBlocksPerRelaySlot, ->; + cumulus_pallet_weight_reclaim::StorageWeightReclaim< + Runtime, + ( + frame_system::AuthorizeCall, + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, + test_pallet::TestTransactionExtension, + ), + >, + NumberOfBlocksPerRelaySlot, + >; /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs index 289fbff762ae7..5447d8570918e 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs @@ -19,7 +19,10 @@ use anyhow::anyhow; use crate::utils::initialize_network; -use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_para_throughput, assign_cores}; +use cumulus_zombienet_sdk_helpers::{ + assert_finality_lag, assert_para_throughput, assign_cores, create_assign_core_call, + submit_extrinsic_and_wait_for_finalization_success_with_timeout, +}; use polkadot_primitives::Id as ParaId; use serde_json::json; use zombienet_sdk::{ @@ -27,6 +30,7 @@ use zombienet_sdk::{ backend::{legacy::LegacyRpcMethods, rpc::RpcClient}, OnlineClient, PolkadotConfig, }, + subxt_signer::sr25519::dev, NetworkConfig, NetworkConfigBuilder, }; @@ -66,7 +70,7 @@ async fn block_bundling_basic() -> Result<(), anyhow::Error> { relay_client .tx() - .sign_and_submit_then_watch_default(&assign_cores_call, &alice) + .sign_and_submit_then_watch_default(&assign_cores_call, &dev::alice()) .await .inspect(|_| log::info!("Tx send, waiting for finalization"))? .wait_for_finalized_success() diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs index 3763439ac0acb..0187f8cb33182 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs @@ -183,7 +183,7 @@ async fn build_network_config() -> Result { .with_default_args(vec![ ("--authoring").into(), ("slot-based").into(), - ("-lparachain=debug,aura=trace").into(), + ("-lparachain=debug,aura=trace,basic-authorship=trace,runtime=trace").into(), ]) .with_collator(|n| n.with_name("collator-0")) .with_collator(|n| n.with_name("collator-1")) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs index 6e2c886db521d..8aa1a338ae6fd 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs @@ -50,20 +50,10 @@ async fn elastic_scaling_upgrade_to_3_cores( if async_backing { log::info!("Ensuring parachain makes progress making 6s blocks"); - assert_para_throughput( - &alice_client, - 20, - [(ParaId::from(PARA_ID), 15..21)].into_iter().collect(), - ) - .await?; + assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 15..21)], []).await?; } else { log::info!("Ensuring parachain makes progress making 12s blocks"); - assert_para_throughput( - &alice_client, - 20, - [(ParaId::from(PARA_ID), 7..12)].into_iter().collect(), - ) - .await?; + assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 7..12)], []).await?; } assign_cores(alice, PARA_ID, vec![1, 2]).await?; @@ -103,12 +93,7 @@ async fn elastic_scaling_upgrade_to_3_cores( ); log::info!("Ensure elastic scaling works, 3 blocks should be produced in each 6s slot"); - assert_para_throughput( - &alice_client, - 20, - [(ParaId::from(PARA_ID), 50..61)].into_iter().collect(), - ) - .await?; + assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 50..61)], []).await?; Ok(()) } From 751dc9d0a76e81700d27db04e536b4515c488320 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 10 Oct 2025 08:23:44 +0200 Subject: [PATCH 137/312] Hacky solution --- .../utils/wasm-builder/src/wasm_project.rs | 93 +++++++++++++++++-- 1 file changed, 86 insertions(+), 7 deletions(-) diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index b6709682ef491..1b779b8c6f73e 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -27,7 +27,7 @@ use std::{ borrow::ToOwned, collections::HashSet, env, fs, - hash::{Hash, Hasher}, + hash::{Hash, Hasher, DefaultHasher}, ops::Deref, path::{Path, PathBuf}, process, @@ -138,7 +138,7 @@ pub(crate) fn create_and_compile( let crate_metadata = crate_metadata(orig_project_cargo_toml); - let project = create_project( + let (project, enabled_features) = create_project( target, orig_project_cargo_toml, &runtime_workspace, @@ -196,9 +196,16 @@ pub(crate) fn create_and_compile( ) }; - let blob_name = + let base_blob_name = blob_out_name_override.unwrap_or_else(|| get_blob_name(target, &wasm_project_cargo_toml)); + // Generate feature hash for file naming + let features_hash = generate_features_hash(&enabled_features); + let blob_name = format!("{}-{}", features_hash, base_blob_name); + + // Check if we will have multiple outputs after creating this file + let should_cleanup_legacy = will_have_multiple_outputs_after_adding(&project, &base_blob_name, target, &features_hash); + let (final_blob_binary, bloaty_blob_binary) = match target { RuntimeTarget::Wasm => { let out_path = project.join(format!("{blob_name}.wasm")); @@ -211,6 +218,8 @@ pub(crate) fn create_and_compile( &blob_name, check_for_runtime_version_section, &build_config, + should_cleanup_legacy, + &base_blob_name, ) }, RuntimeTarget::Riscv => { @@ -242,6 +251,8 @@ fn maybe_compact_and_compress_wasm( blob_name: &str, check_for_runtime_version_section: bool, build_config: &BuildConfiguration, + should_cleanup_legacy: bool, + base_blob_name: &str, ) -> (Option, WasmBinaryBloaty) { // Try to compact and compress the bloaty blob, if the *outer* profile wants it. // @@ -275,6 +286,20 @@ fn maybe_compact_and_compress_wasm( .as_ref() .map(|binary| copy_blob_to_target_directory(wasm_project_cargo_toml, binary)); + let legacy_path = project.join(format!("{}.compact.compressed.wasm", base_blob_name)); + + if should_cleanup_legacy { + // Remove legacy file since we will have multiple outputs + let _ = fs::remove_file(&legacy_path); + } else { + // Only one output file will exist, create/maintain the legacy filename too + if let Some(final_binary) = &final_blob_binary { + if final_binary.wasm_binary_path() != legacy_path { + let _ = fs::copy(final_binary.wasm_binary_path(), &legacy_path); + } + } + } + (final_blob_binary, bloaty_blob_binary) } @@ -646,11 +671,65 @@ fn has_runtime_wasm_feature_declared( package.features.keys().any(|k| k == "runtime-wasm") } +/// Generate a short hash from enabled features +fn generate_features_hash(enabled_features: &HashSet) -> String { + let mut hasher = DefaultHasher::new(); + let mut sorted_features: Vec<_> = enabled_features.iter().collect(); + sorted_features.sort(); + + for feature in sorted_features { + feature.hash(&mut hasher); + } + + // Use only the first 8 characters of the hex hash for brevity + format!("{:x}", hasher.finish())[..8].to_string() +} + +/// Check if adding a new file with the given hash will result in multiple different hashes +fn will_have_multiple_outputs_after_adding(project: &Path, base_blob_name: &str, target: RuntimeTarget, new_hash: &str) -> bool { + let extension = match target { + RuntimeTarget::Wasm => ".compact.compressed.wasm", + RuntimeTarget::Riscv => ".polkavm", + }; + + // Look for existing files that match the pattern: {hash}-{base_blob_name}{extension} + // Exclude the legacy file: {base_blob_name}{extension} + let legacy_file = format!("{}{}", base_blob_name, extension); + let pattern_suffix = format!("-{}{}", base_blob_name, extension); + + if let Ok(entries) = fs::read_dir(project) { + let mut unique_hashes = HashSet::new(); + unique_hashes.insert(new_hash.to_string()); // Add the hash we're about to create + + for entry in entries.filter_map(|e| e.ok()) { + if let Some(file_name) = entry.file_name().to_str() { + // Skip the legacy file without hash + if file_name == legacy_file { + continue; + } + + // Check if this matches our hash pattern + if file_name.ends_with(&pattern_suffix) { + let hash_len = file_name.len() - pattern_suffix.len(); + if hash_len == 8 { // We use 8-character hashes + let hash = &file_name[..8]; + unique_hashes.insert(hash.to_string()); + } + } + } + } + + unique_hashes.len() > 1 + } else { + false + } +} + /// Create the project used to build the wasm binary. /// /// # Returns /// -/// The path to the created wasm project. +/// The path to the created wasm project and the set of enabled features. fn create_project( target: RuntimeTarget, project_cargo_toml: &Path, @@ -658,7 +737,7 @@ fn create_project( crate_metadata: &Metadata, workspace_root_path: &Path, features_to_enable: Vec, -) -> PathBuf { +) -> (PathBuf, HashSet) { let crate_name = get_crate_name(project_cargo_toml); let crate_path = project_cargo_toml.parent().expect("Parent path exists; qed"); let wasm_project_folder = wasm_workspace.join(&crate_name); @@ -682,7 +761,7 @@ fn create_project( workspace_root_path, &crate_name, crate_path, - enabled_features.into_iter(), + enabled_features.clone().into_iter(), ); match target { @@ -705,7 +784,7 @@ fn create_project( crate::copy_file_if_changed(crate_lock_file, wasm_project_folder.join("Cargo.lock")); } - wasm_project_folder + (wasm_project_folder, enabled_features) } /// A rustc profile. From b426fe23f19b416e18def95b30999ce96a94032d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 10 Oct 2025 11:41:38 +0200 Subject: [PATCH 138/312] Fixes + comments --- .../src/max_parachain_block_weight/mod.rs | 22 +++++++++++-------- .../src/max_parachain_block_weight/tests.rs | 17 +------------- 2 files changed, 14 insertions(+), 25 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mod.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mod.rs index 7647b312459c4..7b75a4a3ba15e 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mod.rs @@ -16,7 +16,13 @@ //! Provides functionality to dynamically calculate the max block weight for a parachain. //! -//! With block bundling parachains are relative free to choose whatever block interval they want. +//! With block bundling, parachains are relative free to choose whatever block interval they want. +//! The block interval is the time between individual blocks. The available resources per block (max +//! block weight) depend on the number of cores allocated to the parachain on the relay chain. Each +//! relay chain cores provides an execution time of `2s` and a storage size of `10MiB`. Depending on +//! the desired number of blocks to produce, the resources need to be divided for +//! +//! //! This means they will run under normal conditions with blocks that have a small block weight. //! These small blocks may prevent certain transactions to be applied, e.g. a runtime upgrade. But //! it is not only about transactions, also certain block logic may requires more weight from time @@ -71,15 +77,13 @@ pub enum BlockWeightMode { }, } -/// Provides a [`get`](Self::get) method to calculate the max block weight based on the number of -/// target blocks. +/// Calculates the maximum block weight for a parachain. +/// +/// Based on the available cores and the number of desired blocks a block weight is calculated. /// -/// This takes internally into consideration the number of available cores, communicated via the -/// [`CumulusDigestItem::CoreInfo`] digest, to calculate the available resources. Based on the -/// available cores and the number of desired blocks a target weight is calculated. But it does not -/// only take the number of cores and blocks into consideration, but also the current -/// [`BlockWeightMode`]. The [`BlockWeightMode`] is set by the [`DynamicMaxBlockWeight`] -/// transaction extension depending certain conditions. +/// The max block weight is partly dynamic and controlled via the [`DynamicMaxBlockWeight`] +/// transaction extension. The transaction extension is communicating the desired max block weight +/// using the [`BlockWeightMode`]. pub struct MaxParachainBlockWeight(PhantomData); impl MaxParachainBlockWeight { diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/tests.rs b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/tests.rs index 7e07b5d5f76cd..f0e3b26233bc8 100644 --- a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/max_parachain_block_weight/tests.rs @@ -245,10 +245,7 @@ fn test_block_weight_mode_with_different_transaction_indices() { let decoded_none = BlockWeightMode::decode(&mut &encoded_none[..]).unwrap(); assert!(matches!( decoded_none, - BlockWeightMode::PotentialFullCore { - first_transaction_index: None, - target_weight: Weight::Zero - } + BlockWeightMode::PotentialFullCore { first_transaction_index: None, .. } )); let encoded_some = mode_with_some.encode(); @@ -271,15 +268,3 @@ fn test_saturation_arithmetic() { assert!(weight.proof_size() > 0); }); } - -#[test] -fn test_large_target_blocks() { - new_test_ext_with_digest(Some(4)).execute_with(|| { - // Test with very large number of target blocks - let weight = MaxParachainBlockWeight::::get(u32::MAX); - - // Should not panic and should return minimal weights - assert!(weight.ref_time() > 0); - assert!(weight.proof_size() > 0); - }); -} From 7ff7a7787a6b8f9fa74acd02c2c9a864df9d012e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 15 Oct 2025 00:26:08 +0300 Subject: [PATCH 139/312] Rename to block_weight --- .../src/{max_parachain_block_weight => block_weight}/mock.rs | 0 .../src/{max_parachain_block_weight => block_weight}/mod.rs | 0 .../pre_inherents_hook.rs | 0 .../src/{max_parachain_block_weight => block_weight}/tests.rs | 0 .../transaction_extension.rs | 0 cumulus/pallets/parachain-system/src/lib.rs | 2 +- 6 files changed, 1 insertion(+), 1 deletion(-) rename cumulus/pallets/parachain-system/src/{max_parachain_block_weight => block_weight}/mock.rs (100%) rename cumulus/pallets/parachain-system/src/{max_parachain_block_weight => block_weight}/mod.rs (100%) rename cumulus/pallets/parachain-system/src/{max_parachain_block_weight => block_weight}/pre_inherents_hook.rs (100%) rename cumulus/pallets/parachain-system/src/{max_parachain_block_weight => block_weight}/tests.rs (100%) rename cumulus/pallets/parachain-system/src/{max_parachain_block_weight => block_weight}/transaction_extension.rs (100%) diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs similarity index 100% rename from cumulus/pallets/parachain-system/src/max_parachain_block_weight/mock.rs rename to cumulus/pallets/parachain-system/src/block_weight/mock.rs diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs similarity index 100% rename from cumulus/pallets/parachain-system/src/max_parachain_block_weight/mod.rs rename to cumulus/pallets/parachain-system/src/block_weight/mod.rs diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/pre_inherents_hook.rs b/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs similarity index 100% rename from cumulus/pallets/parachain-system/src/max_parachain_block_weight/pre_inherents_hook.rs rename to cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs similarity index 100% rename from cumulus/pallets/parachain-system/src/max_parachain_block_weight/tests.rs rename to cumulus/pallets/parachain-system/src/block_weight/tests.rs diff --git a/cumulus/pallets/parachain-system/src/max_parachain_block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs similarity index 100% rename from cumulus/pallets/parachain-system/src/max_parachain_block_weight/transaction_extension.rs rename to cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 2f87632af56c9..1b9d74129d677 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -62,8 +62,8 @@ use xcm::{latest::XcmHash, VersionedLocation, VersionedXcm, MAX_XCM_DECODE_DEPTH use xcm_builder::InspectMessageQueues; mod benchmarking; +pub mod block_weight; pub mod consensus_hook; -pub mod max_parachain_block_weight; pub mod migration; mod mock; pub mod relay_state_snapshot; From 56df092ca8e0bec4f8e08f18641455fadc69709c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 15 Oct 2025 00:33:04 +0300 Subject: [PATCH 140/312] Fix --- cumulus/pallets/parachain-system/src/block_weight/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index 1875cb4432dc5..f368a8225d170 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -38,13 +38,13 @@ //! # Setup //! //! Setup the transaction extension: -#![doc = docify::embed!("src/max_parachain_block_weight/mock.rs", tx_extension_setup)] +#![doc = docify::embed!("src/block_weight/mock.rs", tx_extension_setup)] //! //! Setting up `MaximumBlockWeight`: -#![doc = docify::embed!("src/max_parachain_block_weight/mock.rs", max_block_weight_setup)] +#![doc = docify::embed!("src/block_weight/mock.rs", max_block_weight_setup)] //! //! Registering of the `PreInherents` hook: -#![doc = docify::embed!("src/max_parachain_block_weight/mock.rs", pre_inherents_setup)] +#![doc = docify::embed!("src/block_weight/mock.rs", pre_inherents_setup)] use crate::Config; use codec::{Decode, Encode}; From 93c88915f321495c089b97043d535c4e5cc0423c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 15 Oct 2025 00:37:46 +0300 Subject: [PATCH 141/312] More fixes --- .../src/block_weight/pre_inherents_hook.rs | 2 +- cumulus/pallets/parachain-system/src/lib.rs | 2 +- cumulus/test/client/src/lib.rs | 2 +- cumulus/test/runtime/src/lib.rs | 46 +++++++++---------- 4 files changed, 25 insertions(+), 27 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs b/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs index cc07d84ee4707..62f56df082fd9 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs @@ -17,7 +17,7 @@ use super::{ block_weight_over_target_block_weight, is_first_block_in_core, BlockWeightMode, LOG_TARGET, }; -use crate::max_parachain_block_weight::MaxParachainBlockWeight; +use crate::block_weight::MaxParachainBlockWeight; use cumulus_primitives_core::CumulusDigestItem; use frame_support::traits::PreInherents; use sp_core::Get; diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 1b9d74129d677..567d095df2d80 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -763,7 +763,7 @@ pub mod pallet { #[pallet::storage] #[pallet::whitelist_storage] pub type BlockWeightMode = - StorageValue<_, max_parachain_block_weight::BlockWeightMode, OptionQuery>; + StorageValue<_, block_weight::BlockWeightMode, OptionQuery>; /// Latest included block descendants the runtime accepted. In other words, these are /// ancestors of the currently executing block which have not been included in the observed diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index 7b7e614efe3d1..ca71ce54a917b 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -19,7 +19,7 @@ mod block_builder; pub use block_builder::*; use codec::{Decode, Encode}; -use cumulus_pallet_parachain_system::max_parachain_block_weight::DynamicMaxBlockWeight; +use cumulus_pallet_parachain_system::block_weight::DynamicMaxBlockWeight; pub use cumulus_test_runtime as runtime; use cumulus_test_runtime::AuraId; pub use polkadot_parachain_primitives::primitives::{ diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index f981e0fc8e1f1..830389d5b2f8f 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -231,7 +231,7 @@ parameter_types! { pub const BlockHashCount: BlockNumber = 4096; pub const Version: RuntimeVersion = VERSION; /// We allow for 1 second of compute with a 6 second average block time. - pub MaximumBlockWeight: Weight = cumulus_pallet_parachain_system::max_parachain_block_weight::MaxParachainBlockWeight::::get(NumberOfBlocksPerRelaySlot::get()); + pub MaximumBlockWeight: Weight = cumulus_pallet_parachain_system::block_weight::MaxParachainBlockWeight::::get(NumberOfBlocksPerRelaySlot::get()); pub RuntimeBlockLength: BlockLength = BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() @@ -275,11 +275,10 @@ impl frame_system::Config for Runtime { type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; - type PreInherents = - cumulus_pallet_parachain_system::max_parachain_block_weight::DynamicMaxBlockWeightHooks< - Runtime, - NumberOfBlocksPerRelaySlot, - >; + type PreInherents = cumulus_pallet_parachain_system::block_weight::DynamicMaxBlockWeightHooks< + Runtime, + NumberOfBlocksPerRelaySlot, + >; type SingleBlockMigrations = SingleBlockMigrations; } @@ -453,25 +452,24 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; /// The extension to the basic transaction logic. -pub type TxExtension = - cumulus_pallet_parachain_system::max_parachain_block_weight::DynamicMaxBlockWeight< +pub type TxExtension = cumulus_pallet_parachain_system::block_weight::DynamicMaxBlockWeight< + Runtime, + cumulus_pallet_weight_reclaim::StorageWeightReclaim< Runtime, - cumulus_pallet_weight_reclaim::StorageWeightReclaim< - Runtime, - ( - frame_system::AuthorizeCall, - frame_system::CheckNonZeroSender, - frame_system::CheckSpecVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, - test_pallet::TestTransactionExtension, - ), - >, - NumberOfBlocksPerRelaySlot, - >; + ( + frame_system::AuthorizeCall, + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, + test_pallet::TestTransactionExtension, + ), + >, + NumberOfBlocksPerRelaySlot, +>; /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = From 3079e3df7149c456a233cb97268561cf740b9dd5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 15 Oct 2025 01:00:56 +0300 Subject: [PATCH 142/312] Fix --- .../src/collators/slot_based/block_import.rs | 5 ++++- .../aura/src/collators/slot_based/mod.rs | 4 +++- cumulus/test/runtime/src/lib.rs | 20 +++++++++++-------- .../tests/zombie_ci/block_bundling/mod.rs | 2 +- .../zombie_ci/block_bundling/tracing_block.rs | 5 ++--- .../zombienet-sdk/tests/zombie_ci/mod.rs | 2 +- substrate/client/basic-authorship/src/lib.rs | 4 +--- .../utils/wasm-builder/src/wasm_project.rs | 15 ++++++++++---- umbrella/src/lib.rs | 1 - 9 files changed, 35 insertions(+), 23 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs index e1d8e66c0eed9..94c91477dc49f 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -30,7 +30,10 @@ use sp_api::{ use sp_consensus::BlockOrigin; use sp_consensus_aura::AuraApi; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as _}; -use sp_trie::{proof_size_extension::{ProofSizeExt, RecordingProofSizeProvider}, recorder::IgnoredNodes}; +use sp_trie::{ + proof_size_extension::{ProofSizeExt, RecordingProofSizeProvider}, + recorder::IgnoredNodes, +}; use std::{collections::HashMap, marker::PhantomData, sync::Arc}; /// Handle for receiving the block and the storage proof from the [`SlotBasedBlockImport`]. diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index 2345cebff50ff..a9a217e460307 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -79,7 +79,9 @@ use futures::FutureExt; use polkadot_primitives::{ CollatorPair, CoreIndex, Hash as RelayHash, Id as ParaId, ValidationCodeHash, }; -use sc_client_api::{backend::AuxStore, client::PreCommitActions, BlockBackend, BlockOf, UsageProvider}; +use sc_client_api::{ + backend::AuxStore, client::PreCommitActions, BlockBackend, BlockOf, UsageProvider, +}; use sc_consensus::BlockImport; use sc_utils::mpsc::tracing_unbounded; use sp_api::{ProvideRuntimeApi, StorageProof}; diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 830389d5b2f8f..ed075f9ffaa73 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -71,7 +71,7 @@ use core::time::Duration; use frame_support::{derive_impl, traits::OnRuntimeUpgrade, PalletId}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; -use sp_core::{ConstBool, ConstU32, ConstU64, OpaqueMetadata}; +use sp_core::{ConstBool, ConstU32, ConstU64, Get, OpaqueMetadata}; use sp_runtime::{ generic, impl_opaque_keys, @@ -225,13 +225,17 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// by Operational extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +/// Target number of blocks per relay chain slot. +const TARGET_BLOCKS: u32 = 12; +type MaximumBlockWeight = cumulus_pallet_parachain_system::block_weight::MaxParachainBlockWeight< + Runtime, + ConstU32, +>; + parameter_types! { - /// Target number of blocks per relay chain slot. - pub const NumberOfBlocksPerRelaySlot: u32 = 12; pub const BlockHashCount: BlockNumber = 4096; pub const Version: RuntimeVersion = VERSION; /// We allow for 1 second of compute with a 6 second average block time. - pub MaximumBlockWeight: Weight = cumulus_pallet_parachain_system::block_weight::MaxParachainBlockWeight::::get(NumberOfBlocksPerRelaySlot::get()); pub RuntimeBlockLength: BlockLength = BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() @@ -277,7 +281,7 @@ impl frame_system::Config for Runtime { type MaxConsumers = frame_support::traits::ConstU32<16>; type PreInherents = cumulus_pallet_parachain_system::block_weight::DynamicMaxBlockWeightHooks< Runtime, - NumberOfBlocksPerRelaySlot, + ConstU32, >; type SingleBlockMigrations = SingleBlockMigrations; } @@ -468,7 +472,7 @@ pub type TxExtension = cumulus_pallet_parachain_system::block_weight::DynamicMax test_pallet::TestTransactionExtension, ), >, - NumberOfBlocksPerRelaySlot, + ConstU32, >; /// Unchecked extrinsic type as expected by this runtime. @@ -650,10 +654,10 @@ impl_runtime_apis! { impl cumulus_primitives_core::SlotSchedule for Runtime { fn next_slot_schedule(num_cores: u32) -> cumulus_primitives_core::BlockInterval { - let block_time = Duration::from_secs(2) * num_cores / NumberOfBlocksPerRelaySlot::get(); + let block_time = Duration::from_secs(2) * num_cores / TARGET_BLOCKS; cumulus_primitives_core::BlockInterval { - number_of_blocks: NumberOfBlocksPerRelaySlot::get(), + number_of_blocks: TARGET_BLOCKS, block_time: block_time.min(Duration::from_millis(500)), } } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs index 503b4472c03e4..a16032c40f8bc 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs @@ -16,7 +16,7 @@ // limitations under the License. mod basic; +mod full_core_usage_scenarios; mod runtime_upgrade; mod three_cores_glutton; -mod full_core_usage_scenarios; mod tracing_block; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs index 1d3d9178f21fc..e59b2b88ae93c 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs @@ -105,9 +105,8 @@ async fn block_bundling_tracing_block() -> Result<(), anyhow::Error> { // Decode and verify the BlockTrace is successful match trace_result { - TraceBlockResponse::TraceError(error) => { - Err(anyhow!("Block tracing failed: {}", error.error)) - }, + TraceBlockResponse::TraceError(error) => + Err(anyhow!("Block tracing failed: {}", error.error)), TraceBlockResponse::BlockTrace(_) => { log::info!("✅ Block trace successful!"); Ok(()) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/mod.rs index 05902a0c6637b..b660d383a8cb9 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/mod.rs @@ -1,13 +1,13 @@ // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 +mod block_bundling; mod bootnodes; mod elastic_scaling; mod full_node_catching_up; mod full_node_warp_sync; mod migrate_solo; mod parachain_extrinsic_get_finalized; -mod block_bundling; mod pov_recovery; mod rpc_collator_build_blocks; mod runtime_upgrade; diff --git a/substrate/client/basic-authorship/src/lib.rs b/substrate/client/basic-authorship/src/lib.rs index dd347ca7d8591..3300aebd8e7b0 100644 --- a/substrate/client/basic-authorship/src/lib.rs +++ b/substrate/client/basic-authorship/src/lib.rs @@ -73,7 +73,5 @@ mod basic_authorship; -pub use crate::basic_authorship::{ - Proposer, ProposerFactory, DEFAULT_BLOCK_SIZE_LIMIT, -}; +pub use crate::basic_authorship::{Proposer, ProposerFactory, DEFAULT_BLOCK_SIZE_LIMIT}; pub use sp_consensus::ProposeArgs; diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index 1b779b8c6f73e..510ade9fd9aaa 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -27,7 +27,7 @@ use std::{ borrow::ToOwned, collections::HashSet, env, fs, - hash::{Hash, Hasher, DefaultHasher}, + hash::{DefaultHasher, Hash, Hasher}, ops::Deref, path::{Path, PathBuf}, process, @@ -204,7 +204,8 @@ pub(crate) fn create_and_compile( let blob_name = format!("{}-{}", features_hash, base_blob_name); // Check if we will have multiple outputs after creating this file - let should_cleanup_legacy = will_have_multiple_outputs_after_adding(&project, &base_blob_name, target, &features_hash); + let should_cleanup_legacy = + will_have_multiple_outputs_after_adding(&project, &base_blob_name, target, &features_hash); let (final_blob_binary, bloaty_blob_binary) = match target { RuntimeTarget::Wasm => { @@ -686,7 +687,12 @@ fn generate_features_hash(enabled_features: &HashSet) -> String { } /// Check if adding a new file with the given hash will result in multiple different hashes -fn will_have_multiple_outputs_after_adding(project: &Path, base_blob_name: &str, target: RuntimeTarget, new_hash: &str) -> bool { +fn will_have_multiple_outputs_after_adding( + project: &Path, + base_blob_name: &str, + target: RuntimeTarget, + new_hash: &str, +) -> bool { let extension = match target { RuntimeTarget::Wasm => ".compact.compressed.wasm", RuntimeTarget::Riscv => ".polkavm", @@ -711,7 +717,8 @@ fn will_have_multiple_outputs_after_adding(project: &Path, base_blob_name: &str, // Check if this matches our hash pattern if file_name.ends_with(&pattern_suffix) { let hash_len = file_name.len() - pattern_suffix.len(); - if hash_len == 8 { // We use 8-character hashes + if hash_len == 8 { + // We use 8-character hashes let hash = &file_name[..8]; unique_hashes.insert(hash.to_string()); } diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index 73454d5e9b1c7..8ed55eed4b148 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -92,7 +92,6 @@ pub use cumulus_client_consensus_aura; #[cfg(feature = "cumulus-client-consensus-common")] pub use cumulus_client_consensus_common; - /// The relay-chain provided consensus algorithm. #[cfg(feature = "cumulus-client-consensus-relay-chain")] pub use cumulus_client_consensus_relay_chain; From 4f2066ba7a6c7908dd775741b08129fa35ccdcbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 15 Oct 2025 18:41:02 +0300 Subject: [PATCH 143/312] Fix tests --- .../slot_based/block_builder_task.rs | 2 +- .../parachain-system/src/block_weight/mock.rs | 99 +++++++++-- .../parachain-system/src/block_weight/mod.rs | 8 +- .../src/block_weight/tests.rs | 163 ++++-------------- .../src/validate_block/tests.rs | 2 +- cumulus/primitives/core/src/lib.rs | 34 ---- cumulus/test/runtime/src/lib.rs | 6 - substrate/client/rpc/src/state/state_full.rs | 3 +- 8 files changed, 125 insertions(+), 192 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 6227b6e27af41..3b1e70c439c35 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -350,7 +350,7 @@ where ?error, "Failed to fetch `slot_schedule`, assuming one block with 2s" ); - cumulus_primitives_core::BlockInterval { + cumulus_primitives_core::NextSlotSchedule { number_of_blocks: 1, block_time: Duration::from_secs(2), } diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index d2d78ae457eba..f1111b846b6b6 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -22,24 +22,20 @@ use cumulus_primitives_core::{ }; use frame_support::{ construct_runtime, derive_impl, - dispatch::{DispatchClass, DispatchInfo, Pays}, - traits::Hooks, - weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, + dispatch::DispatchClass, + parameter_types, + weights::{ + constants::{BlockExecutionWeight, ExtrinsicBaseWeight}, + Weight, + }, }; -use frame_system::mocking::MockBlock; -use polkadot_primitives::MAX_POV_SIZE; +use frame_system::limits::BlockWeights; use sp_core::ConstU32; use sp_io; -use sp_runtime::{ - generic::Header, - testing::{TestXt, UintAuthorityId}, - traits::{ - BlakeTwo256, Block as BlockT, Dispatchable, Header as HeaderT, IdentityLookup, - TransactionExtension, - }, - transaction_validity::TransactionSource, - BuildStorage, Perbill, -}; +use sp_runtime::{BuildStorage, Perbill}; + +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); type Block = frame_system::mocking::MockBlock; @@ -63,6 +59,8 @@ pub type TxExtension = DynamicMaxBlockWeight< #[docify::export_content(max_block_weight_setup)] mod max_block_weight_setup { + use super::*; + type MaximumBlockWeight = MaxParachainBlockWeight>; parameter_types! { @@ -92,7 +90,7 @@ mod max_block_weight_setup { #[docify::export(pre_inherents_setup)] impl frame_system::Config for Runtime { // Setup the block weight. - type BlockWeights = RuntimeBlockWeights; + type BlockWeights = max_block_weight_setup::RuntimeBlockWeights; // Set the `PreInherents` hook. type PreInherents = DynamicMaxBlockWeightHooks>; @@ -146,9 +144,76 @@ pub fn new_test_ext_with_digest(num_cores: Option) -> sp_io::TestExternalit let digest = CumulusDigestItem::CoreInfo(core_info).to_digest_item(); - frame_system::Pallet::::deposit_log(digest); + frame_system::Pallet::::deposit_log(digest); } }); ext } + +/// Helper to create test externalities with core and bundle info +pub fn new_test_ext_with_bundle( + num_cores: Option, + bundle_index: u8, + maybe_last: bool, +) -> sp_io::TestExternalities { + let storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + let mut ext = sp_io::TestExternalities::from(storage); + + ext.execute_with(|| { + if let Some(num_cores) = num_cores { + let core_info = CoreInfo { + selector: CoreSelector(0), + claim_queue_offset: ClaimQueueOffset(0), + number_of_cores: Compact(num_cores), + }; + + let digest = CumulusDigestItem::CoreInfo(core_info).to_digest_item(); + frame_system::Pallet::::deposit_log(digest); + } + + let bundle_info = BundleInfo { index: bundle_index, maybe_last }; + let digest = CumulusDigestItem::BundleInfo(bundle_info).to_digest_item(); + frame_system::Pallet::::deposit_log(digest); + }); + + ext +} + +/// Helper to create test externalities for first block in core +pub fn new_test_ext_first_block(num_cores: u16) -> sp_io::TestExternalities { + new_test_ext_with_bundle(Some(num_cores), 0, false) +} + +/// Helper to create test externalities for non-first block in core +pub fn new_test_ext_non_first_block(num_cores: u16) -> sp_io::TestExternalities { + new_test_ext_with_bundle(Some(num_cores), 1, false) +} + +/// Helper to check if UseFullCore digest was deposited +pub fn has_use_full_core_digest() -> bool { + use codec::Decode; + use cumulus_primitives_core::CUMULUS_CONSENSUS_ID; + use sp_runtime::DigestItem; + + let digest = frame_system::Pallet::::digest(); + digest.logs.iter().any(|log| match log { + DigestItem::Consensus(id, val) if id == &CUMULUS_CONSENSUS_ID => { + if let Ok(CumulusDigestItem::UseFullCore) = CumulusDigestItem::decode(&mut &val[..]) { + true + } else { + false + } + }, + _ => false, + }) +} + +/// Helper to register weight as consumed (simulating on_initialize) +pub fn register_weight(weight: Weight) { + frame_system::Pallet::::register_extra_weight_unchecked( + weight, + DispatchClass::Mandatory, + ); +} diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index f368a8225d170..ebbc8817c335e 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -130,8 +130,12 @@ impl> return Self::FULL_CORE_WEIGHT; } - let total_ref_time = - (number_of_cores as u64).saturating_mul(Self::MAX_REF_TIME_PER_CORE_NS); + // At maximum we want to allow `6s` of ref time, because we don't want to overload nodes + // that are running with standard hardware. These nodes need to be able to import all the + // blocks in 6s. + let total_ref_time = (number_of_cores as u64) + .saturating_mul(Self::MAX_REF_TIME_PER_CORE_NS) + .min(WEIGHT_REF_TIME_PER_SECOND * 6); let ref_time_per_block = total_ref_time .saturating_div(target_blocks as u64) .min(Self::MAX_REF_TIME_PER_CORE_NS); diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index f0e3b26233bc8..8170d2474314d 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -14,39 +14,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::{mock::*, transaction_extension::DynamicMaxBlockWeight, *}; -use crate as parachain_system; +use super::{mock::*, *}; use codec::Compact; use cumulus_primitives_core::{ BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, }; -use frame_support::{ - construct_runtime, derive_impl, - dispatch::{DispatchClass, DispatchInfo, Pays}, - traits::Hooks, - weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, -}; -use frame_system::mocking::MockBlock; +use frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND; use polkadot_primitives::MAX_POV_SIZE; use sp_core::ConstU32; -use sp_io; -use sp_runtime::{ - generic::Header, - testing::{TestXt, UintAuthorityId}, - traits::{ - BlakeTwo256, Block as BlockT, Dispatchable, Header as HeaderT, IdentityLookup, - TransactionExtension, - }, - transaction_validity::TransactionSource, - BuildStorage, Perbill, -}; +use sp_runtime::Digest; #[test] fn test_single_core_single_block() { new_test_ext_with_digest(Some(1)).execute_with(|| { - let weight = MaxParachainBlockWeight::::get(1); + let weight = MaxParachainBlockWeight::>::get(); - // With 1 core and 1 target block, should get full 2s ref time and full PoV size assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); }); @@ -55,7 +37,7 @@ fn test_single_core_single_block() { #[test] fn test_single_core_multiple_blocks() { new_test_ext_with_digest(Some(1)).execute_with(|| { - let weight = MaxParachainBlockWeight::::get(4); + let weight = MaxParachainBlockWeight::>::get(); // With 1 core and 4 target blocks, should get 0.5s ref time and 1/4 PoV size per block assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND / 4); @@ -66,19 +48,18 @@ fn test_single_core_multiple_blocks() { #[test] fn test_multiple_cores_single_block() { new_test_ext_with_digest(Some(3)).execute_with(|| { - let weight = MaxParachainBlockWeight::::get(1); + let weight = MaxParachainBlockWeight::>::get(); - // With 3 cores and 1 target block, should get max 2s ref time (capped per core) and 3x - // PoV size + // With 3 cores and 1 target blocks, should get 2s ref time and 1 PoV size assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); - assert_eq!(weight.proof_size(), 3 * MAX_POV_SIZE as u64); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); }); } #[test] fn test_multiple_cores_multiple_blocks() { new_test_ext_with_digest(Some(2)).execute_with(|| { - let weight = MaxParachainBlockWeight::::get(4); + let weight = MaxParachainBlockWeight::>::get(); // With 2 cores and 4 target blocks, should get 1s ref time and 2x PoV size / 4 per // block @@ -90,7 +71,7 @@ fn test_multiple_cores_multiple_blocks() { #[test] fn test_no_core_info() { new_test_ext_with_digest(None).execute_with(|| { - let weight = MaxParachainBlockWeight::::get(1); + let weight = MaxParachainBlockWeight::>::get(); // Without core info, should return conservative default assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); @@ -101,7 +82,7 @@ fn test_no_core_info() { #[test] fn test_zero_cores() { new_test_ext_with_digest(Some(0)).execute_with(|| { - let weight = MaxParachainBlockWeight::::get(1); + let weight = MaxParachainBlockWeight::>::get(); // With 0 cores, should return conservative default assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); @@ -112,9 +93,7 @@ fn test_zero_cores() { #[test] fn test_zero_target_blocks() { new_test_ext_with_digest(Some(2)).execute_with(|| { - let weight = MaxParachainBlockWeight::::get(0); - - // With 0 target blocks, should return conservative default + let weight = MaxParachainBlockWeight::>::get(); assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); }); @@ -124,41 +103,36 @@ fn test_zero_target_blocks() { fn test_target_block_weight_calculation() { new_test_ext_with_digest(Some(4)).execute_with(|| { // Test target_block_weight function directly - let weight_2_blocks = MaxParachainBlockWeight::::target_block_weight(2); - let weight_8_blocks = MaxParachainBlockWeight::::target_block_weight(8); + // Both calls return the same since ConstU32<4> is fixed at compile time + let weight = MaxParachainBlockWeight::>::target_block_weight(); - // With 4 cores and 2 target blocks, should get 2s per block - assert_eq!(weight_2_blocks.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); - assert_eq!(weight_2_blocks.proof_size(), (4 * MAX_POV_SIZE as u64) / 2); - - // With 4 cores and 8 target blocks, should get 1s per block - assert_eq!(weight_8_blocks.ref_time(), 2 * 4 * WEIGHT_REF_TIME_PER_SECOND / 8); - assert_eq!(weight_8_blocks.proof_size(), (4 * MAX_POV_SIZE as u64) / 8); + // With 4 cores and 4 target blocks, should get 2s per block (8s / 4) + assert_eq!(weight.ref_time(), 4 * 2 * WEIGHT_REF_TIME_PER_SECOND / 4); + assert_eq!(weight.proof_size(), (4 * MAX_POV_SIZE as u64) / 4); }); } #[test] fn test_max_ref_time_per_core_cap() { new_test_ext_with_digest(Some(8)).execute_with(|| { - // Even with many cores, ref time per block should be capped at MAX_REF_TIME_PER_CORE_NS - let weight = MaxParachainBlockWeight::::get(1); + // With 8 cores and 4 target blocks, ref time per block should be capped at 2s per core + let weight = MaxParachainBlockWeight::>::get(); - // Should be capped at 2s ref time per core - assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); - // But proof size should scale with number of cores - assert_eq!(weight.proof_size(), 8 * MAX_POV_SIZE as u64); + // 8 cores * 2s = 16s total, divided by 4 blocks = 4s, but capped at 6s for all blocks in + // total + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND * 3 / 4); + assert_eq!(weight.proof_size(), 4 * MAX_POV_SIZE as u64); }); } #[test] fn test_target_block_weight_with_digest_edge_cases() { - use cumulus_primitives_core::CumulusDigestItem; - use sp_runtime::Digest; - // Test with empty digest let empty_digest = Digest::default(); - let weight = MaxParachainBlockWeight::::target_block_weight_with_digest(1, &empty_digest); - assert_eq!(weight, MaxParachainBlockWeight::::FULL_CORE_WEIGHT); + let weight = MaxParachainBlockWeight::>::target_block_weight_with_digest( + &empty_digest, + ); + assert_eq!(weight, MaxParachainBlockWeight::>::FULL_CORE_WEIGHT); // Test with digest containing core info let core_info = CoreInfo { @@ -166,20 +140,18 @@ fn test_target_block_weight_with_digest_edge_cases() { claim_queue_offset: ClaimQueueOffset(0), number_of_cores: Compact(2u16), }; - let digest_item = CumulusDigestItem::CoreInfo(core_info).to_digest_item(); - let mut digest = Digest::default(); - digest.push(digest_item); - let weight = MaxParachainBlockWeight::::target_block_weight_with_digest(2, &digest); - assert_eq!(weight.ref_time(), 2 * 2 * WEIGHT_REF_TIME_PER_SECOND / 2); - assert_eq!(weight.proof_size(), (2 * MAX_POV_SIZE as u64) / 2); + let digest = Digest { logs: vec![CumulusDigestItem::CoreInfo(core_info).to_digest_item()] }; + + // With 2 cores and 4 target blocks: (2 cores * 2s) / 4 blocks = 1s + let weight = + MaxParachainBlockWeight::>::target_block_weight_with_digest(&digest); + assert_eq!(weight.ref_time(), 2 * 2 * WEIGHT_REF_TIME_PER_SECOND / 4); + assert_eq!(weight.proof_size(), (2 * MAX_POV_SIZE as u64) / 4); } #[test] fn test_is_first_block_in_core_functions() { - use cumulus_primitives_core::{BundleInfo, CumulusDigestItem}; - use sp_runtime::Digest; - new_test_ext_with_digest(Some(1)).execute_with(|| { // Test without bundle info - should return false let empty_digest = Digest::default(); @@ -201,70 +173,3 @@ fn test_is_first_block_in_core_functions() { assert!(!super::is_first_block_in_core_with_digest(&digest_not_first)); }); } - -#[test] -fn test_dynamic_max_block_weight_creation() { - use super::transaction_extension::DynamicMaxBlockWeight; - - // Test creating DynamicMaxBlockWeight with new() - let inner = (); - let dynamic_weight = DynamicMaxBlockWeight::::new(inner); - assert_eq!(dynamic_weight.0, ()); - - // Test creating DynamicMaxBlockWeight with From trait - let dynamic_weight_from: DynamicMaxBlockWeight = ().into(); - assert_eq!(dynamic_weight_from.0, ()); - - // Test Debug formatting - let debug_string = format!("{:?}", dynamic_weight); - assert!(debug_string.contains("DynamicMaxBlockWeight")); -} - -#[test] -fn test_max_block_weight_hooks_type() { - use super::pre_inherents_hook::DynamicMaxBlockWeightHooks; - use sp_core::ConstU32; - - // Ensure the type can be instantiated (compile-time test) - let _hooks: DynamicMaxBlockWeightHooks> = - DynamicMaxBlockWeightHooks(core::marker::PhantomData); -} - -#[test] -fn test_block_weight_mode_with_different_transaction_indices() { - // Test BlockWeightMode with None transaction indices - let mode_with_none = BlockWeightMode::PotentialFullCore { - first_transaction_index: None, - target_weight: Weight::zero(), - }; - let mode_with_some = BlockWeightMode::FractionOfCore { first_transaction_index: Some(42) }; - - // Test encoding/decoding - use codec::{Decode, Encode}; - let encoded_none = mode_with_none.encode(); - let decoded_none = BlockWeightMode::decode(&mut &encoded_none[..]).unwrap(); - assert!(matches!( - decoded_none, - BlockWeightMode::PotentialFullCore { first_transaction_index: None, .. } - )); - - let encoded_some = mode_with_some.encode(); - let decoded_some = BlockWeightMode::decode(&mut &encoded_some[..]).unwrap(); - assert!(matches!( - decoded_some, - BlockWeightMode::FractionOfCore { first_transaction_index: Some(42) } - )); -} - -#[test] -fn test_saturation_arithmetic() { - new_test_ext_with_digest(Some(u16::MAX)).execute_with(|| { - // Test with maximum number of cores to ensure no overflow - let weight = MaxParachainBlockWeight::::get(1); - - // Should be capped at 2s ref time per core even with max cores - assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); - // Proof size should saturate properly - assert!(weight.proof_size() > 0); - }); -} diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 749e1522a71bb..2da42a6529d62 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -563,7 +563,7 @@ fn state_changes_in_multiple_blocks_are_applied_in_exact_order() { #[test] fn validate_block_handles_ump_signal() { use cumulus_primitives_core::{ - relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}, + relay_chain::{UMPSignal, UMP_SEPARATOR}, ClaimQueueOffset, CoreInfo, CoreSelector, }; sp_tracing::try_init_simple(); diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index e46b801939764..bfb165c2568a1 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -24,7 +24,6 @@ use core::time::Duration; use alloc::vec::Vec; use codec::{Compact, Decode, DecodeAll, DecodeWithMemTracking, Encode, MaxEncodedLen}; -use core::time::Duration; use polkadot_parachain_primitives::primitives::HeadData; use scale_info::TypeInfo; use sp_runtime::RuntimeDebug; @@ -535,18 +534,6 @@ pub struct CollationInfo { pub head_data: HeadData, } -<<<<<<< HEAD -/// Block interval configuration for parachain block production for one relay chain slot. -#[derive(Clone, Debug, codec::Decode, codec::Encode, PartialEq, TypeInfo)] -pub struct BlockInterval { - /// The number of blocks to produce in the relay chain slot. - pub number_of_blocks: u32, - /// The target block time in wall clock time for each block. - pub block_time: Duration, -} - -||||||| 5f69bea23d -======= /// The schedule for the next relay chain slot. /// /// Returns the maximum number of parachain blocks to produce and the block time per block to use. @@ -587,7 +574,6 @@ impl NextSlotSchedule { } } ->>>>>>> origin/master sp_api::decl_runtime_apis! { /// Runtime api to collect information about a collation. /// @@ -619,25 +605,6 @@ sp_api::decl_runtime_apis! { pub trait RelayParentOffsetApi { /// Fetch the slot offset that is expected from the relay chain. fn relay_parent_offset() -> u32; -<<<<<<< HEAD - } - - /// API for parachain slot scheduling. - /// - /// This runtime API allows the parachain runtime to communicate the block interval - /// to the node side. The node will call this API every relay chain slot (~6 seconds) - /// to get the scheduled parachain block interval. - pub trait SlotSchedule { - /// Get the block production schedule for the next relay chain slot. - /// - /// - `num_cores`: The number of cores assigned to this parachain - /// - /// Returns a [`BlockInterval`] specifying the number of blocks and target block time - /// on standard hardware in wall clock time. This should be used as the upper wall - /// clock time when building a block. - fn next_slot_schedule(num_cores: u32) -> BlockInterval; -||||||| 5f69bea23d -======= } /// API for parachain slot scheduling. @@ -752,6 +719,5 @@ mod tests { let schedule = NextSlotSchedule::x_blocks_using_y_cores(12, 1); assert_eq!(schedule.number_of_blocks, 12); assert_eq!(schedule.block_time, Duration::from_nanos(166_666_666)); ->>>>>>> origin/master } } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index f146c6317f941..6e0223fd0ff63 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -233,15 +233,9 @@ type MaximumBlockWeight = cumulus_pallet_parachain_system::block_weight::MaxPara >; parameter_types! { -<<<<<<< HEAD - pub const BlockHashCount: BlockNumber = 4096; -||||||| 5f69bea23d - pub const BlockHashCount: BlockNumber = 250; -======= /// Target number of blocks per relay chain slot. pub const NumberOfBlocksPerRelaySlot: u32 = 12; pub const BlockHashCount: BlockNumber = 250; ->>>>>>> origin/master pub const Version: RuntimeVersion = VERSION; /// We allow for 1 second of compute with a 6 second average block time. pub RuntimeBlockLength: BlockLength = diff --git a/substrate/client/rpc/src/state/state_full.rs b/substrate/client/rpc/src/state/state_full.rs index b17f03c074594..ed04d510fd118 100644 --- a/substrate/client/rpc/src/state/state_full.rs +++ b/substrate/client/rpc/src/state/state_full.rs @@ -66,7 +66,6 @@ struct QueryStorageRange { pub struct FullState { client: Arc, executor: SubscriptionTaskExecutor, - block_execute: Option>>, execute_block: Option>>, _phantom: PhantomData, } @@ -86,7 +85,7 @@ where executor: SubscriptionTaskExecutor, execute_block: Option>>, ) -> Self { - Self { client, executor, block_execute, execute_block, _phantom: PhantomData } + Self { client, executor, execute_block, _phantom: PhantomData } } /// Returns given block hash or best block hash if None is passed. From 057b7f7e3127580d4261fdb0f6cb486062662bfb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 17 Oct 2025 10:53:47 +0300 Subject: [PATCH 144/312] Fixes --- .../parachain-system/src/block_weight/mock.rs | 32 +- .../parachain-system/src/block_weight/mod.rs | 6 +- .../src/block_weight/pre_inherents_hook.rs | 4 + .../src/block_weight/tests.rs | 483 +++++++++++++++++- .../src/block_weight/transaction_extension.rs | 22 +- .../system/src/extensions/check_weight.rs | 8 +- 6 files changed, 522 insertions(+), 33 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index f1111b846b6b6..0c0880a611ab1 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -24,6 +24,7 @@ use frame_support::{ construct_runtime, derive_impl, dispatch::DispatchClass, parameter_types, + traits::PreInherents, weights::{ constants::{BlockExecutionWeight, ExtrinsicBaseWeight}, Weight, @@ -34,12 +35,15 @@ use sp_core::ConstU32; use sp_io; use sp_runtime::{BuildStorage, Perbill}; -const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); +const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(1); + +/// A simple call, which one doesn't matter. +pub const CALL: &RuntimeCall = + &RuntimeCall::System(frame_system::Call::set_heap_pages { pages: 0u64 }); type Block = frame_system::mocking::MockBlock; -const TARGET_BLOCK_RATE: u32 = 12; +pub const TARGET_BLOCK_RATE: u32 = 12; #[docify::export(tx_extension_setup)] pub type TxExtension = DynamicMaxBlockWeight< @@ -70,15 +74,10 @@ mod max_block_weight_setup { weights.base_extrinsic = ExtrinsicBaseWeight::get(); }) .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MaximumBlockWeight::get()); + weights.max_total = Some(MaximumBlockWeight::get()); }) .for_class(DispatchClass::Operational, |weights| { weights.max_total = Some(MaximumBlockWeight::get()); - // Operational transactions have some extra reserved space, so that they - // are included even if block reached `MaximumBlockWeight`. - weights.reserved = Some( - MaximumBlockWeight::get() - NORMAL_DISPATCH_RATIO * MaximumBlockWeight::get() - ); }) .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) .build_or_panic(); @@ -211,9 +210,14 @@ pub fn has_use_full_core_digest() -> bool { } /// Helper to register weight as consumed (simulating on_initialize) -pub fn register_weight(weight: Weight) { - frame_system::Pallet::::register_extra_weight_unchecked( - weight, - DispatchClass::Mandatory, - ); +pub fn register_weight(weight: Weight, class: DispatchClass) { + frame_system::Pallet::::register_extra_weight_unchecked(weight, class); +} + +/// Emulates what happes after `initialize_block` finished. +pub fn initialize_block_finished() { + System::set_block_consumed_resources(Weight::zero(), 0); + System::note_finished_initialize(); + ::PreInherents::pre_inherents(); + System::note_inherents_applied(); } diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index ebbc8817c335e..62e309b118016 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -71,7 +71,7 @@ const LOG_TARGET: &str = "runtime::parachain-system::block-weight"; /// The current block weight mode. /// /// Based on this mode [`MaxParachainBlockWeight`] determines the current allowed block weight. -#[derive(Debug, Encode, Decode, Clone, Copy, TypeInfo)] +#[derive(Debug, Encode, Decode, Clone, Copy, TypeInfo, PartialEq)] pub enum BlockWeightMode { /// The block is allowed to use the weight of a full core. FullCore, @@ -141,7 +141,9 @@ impl> .min(Self::MAX_REF_TIME_PER_CORE_NS); let total_pov_size = (number_of_cores as u64).saturating_mul(MAX_POV_SIZE as u64); - let proof_size_per_block = total_pov_size.saturating_div(target_blocks as u64); + // Each block at max gets one core. + let proof_size_per_block = + total_pov_size.saturating_div(target_blocks as u64).min(MAX_POV_SIZE as u64); Weight::from_parts(ref_time_per_block, proof_size_per_block) } diff --git a/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs b/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs index 62f56df082fd9..c121da9394953 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs @@ -39,6 +39,10 @@ where { fn pre_inherents() { if !block_weight_over_target_block_weight::() { + // We still initialize the `BlockWeightMode`. + crate::BlockWeightMode::::put(BlockWeightMode::FractionOfCore { + first_transaction_index: None, + }); return } diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 8170d2474314d..ea7e7a1fc9566 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -14,15 +14,28 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::{mock::*, *}; +use super::{mock::*, transaction_extension::DynamicMaxBlockWeight, *}; +use assert_matches::assert_matches; use codec::Compact; use cumulus_primitives_core::{ BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, }; -use frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND; +use frame_support::{ + assert_err, assert_ok, + dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}, + pallet_prelude::InvalidTransaction, + weights::constants::WEIGHT_REF_TIME_PER_SECOND, +}; +use frame_system::{CheckWeight, RawOrigin as SystemOrigin}; use polkadot_primitives::MAX_POV_SIZE; use sp_core::ConstU32; -use sp_runtime::Digest; +use sp_runtime::{ + traits::{DispatchTransaction, TransactionExtension}, + Digest, +}; + +type TxExtension = DynamicMaxBlockWeight, ConstU32<4>>; +type MaximumBlockWeight = MaxParachainBlockWeight>; #[test] fn test_single_core_single_block() { @@ -106,9 +119,8 @@ fn test_target_block_weight_calculation() { // Both calls return the same since ConstU32<4> is fixed at compile time let weight = MaxParachainBlockWeight::>::target_block_weight(); - // With 4 cores and 4 target blocks, should get 2s per block (8s / 4) - assert_eq!(weight.ref_time(), 4 * 2 * WEIGHT_REF_TIME_PER_SECOND / 4); - assert_eq!(weight.proof_size(), (4 * MAX_POV_SIZE as u64) / 4); + assert_eq!(weight.ref_time(), 3 * 2 * WEIGHT_REF_TIME_PER_SECOND / 4); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); }); } @@ -121,7 +133,7 @@ fn test_max_ref_time_per_core_cap() { // 8 cores * 2s = 16s total, divided by 4 blocks = 4s, but capped at 6s for all blocks in // total assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND * 3 / 4); - assert_eq!(weight.proof_size(), 4 * MAX_POV_SIZE as u64); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); }); } @@ -173,3 +185,460 @@ fn test_is_first_block_in_core_functions() { assert!(!super::is_first_block_in_core_with_digest(&digest_not_first)); }); } + +// ======================================== +// Transaction Extension Tests +// ======================================== + +#[test] +fn tx_extension_sets_fraction_of_core_mode() { + use frame_support::dispatch::{DispatchClass, DispatchInfo}; + + new_test_ext_first_block(2).execute_with(|| { + initialize_block_finished(); + + // BlockWeightMode should not be set yet + assert!(crate::BlockWeightMode::::get().is_none()); + + // Create a small transaction + let small_weight = Weight::from_parts(100_000, 1024); + let info = DispatchInfo { + call_weight: small_weight, + class: DispatchClass::Normal, + pays_fee: frame_support::dispatch::Pays::Yes, + ..Default::default() + }; + + assert_ok!(TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + )); + + assert_eq!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { first_transaction_index: Some(0) }) + ); + }); +} + +#[test] +fn tx_extension_large_tx_enables_full_core_usage() { + sp_tracing::init_for_tests(); + new_test_ext_first_block(2).execute_with(|| { + initialize_block_finished(); + + // Create a transaction larger than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + let info = DispatchInfo { + call_weight: large_weight, + class: DispatchClass::Normal, + ..Default::default() + }; + + assert_ok!(TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + )); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::PotentialFullCore { first_transaction_index: Some(0), .. }) + ); + + let mut post_info = PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; + + assert_ok!(TxExtension::post_dispatch((), &info, &mut post_info, 0, &Ok(()))); + + assert_eq!(crate::BlockWeightMode::::get(), Some(BlockWeightMode::FullCore)); + assert!(has_use_full_core_digest()); + assert_eq!(MaximumBlockWeight::get().ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + }); +} + +#[test] +fn tx_extension_large_tx_with_refund_goes_back_to_fractional() { + new_test_ext_first_block(2).execute_with(|| { + initialize_block_finished(); + + // Create a transaction larger than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + let info = DispatchInfo { + call_weight: large_weight, + class: DispatchClass::Normal, + ..Default::default() + }; + + assert_ok!(TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + )); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::PotentialFullCore { first_transaction_index: Some(0), .. }) + ); + + let mut post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(5000, 5000)), + pays_fee: Default::default(), + }; + + assert_ok!(TxExtension::post_dispatch((), &info, &mut post_info, 0, &Ok(()))); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { .. }) + ); + assert!(!has_use_full_core_digest()); + assert_eq!(MaximumBlockWeight::get(), target_weight); + }); +} + +#[test] +fn tx_extension_large_tx_is_rejected_on_non_first_block() { + new_test_ext_non_first_block(2).execute_with(|| { + initialize_block_finished(); + + // Create a transaction larger than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + let info = DispatchInfo { + call_weight: large_weight, + class: DispatchClass::Normal, + ..Default::default() + }; + + assert_eq!( + TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + ) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() + ); + + // Should stay in FractionOfCore mode (not PotentialFullCore) since not first block + assert_eq!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { first_transaction_index: None }) + ); + assert!(!has_use_full_core_digest()); + assert_eq!(MaximumBlockWeight::get(), target_weight); + }); +} + +#[test] +fn tx_extension_post_dispatch_to_full_core_because_of_manual_weight() { + new_test_ext_non_first_block(2).execute_with(|| { + initialize_block_finished(); + + let target_weight = MaxParachainBlockWeight::>::target_block_weight(); + + // Transaction announces small weight + let small_weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND / 10, 1024); + let info = DispatchInfo { call_weight: small_weight, ..Default::default() }; + + assert_ok!(TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + )); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { first_transaction_index: Some(0) }) + ); + + // But actually uses much more weight (bug in weight annotation) + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + register_weight(large_weight, DispatchClass::Normal); + + let mut post_info = PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; + assert_ok!(TxExtension::post_dispatch((), &info, &mut post_info, 0, &Ok(()))); + + // Should transition to FullCore due to exceeding limit + assert_matches!(crate::BlockWeightMode::::get(), Some(BlockWeightMode::FullCore)); + + assert!(has_use_full_core_digest()); + }); +} + +#[test] +fn tx_extension_large_tx_after_limit_is_rejected() { + sp_tracing::init_for_tests(); + new_test_ext_first_block(2).execute_with(|| { + initialize_block_finished(); + + // Set some index above the limit. + System::set_extrinsic_index(20); + + // Create a transaction larger than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + let info = DispatchInfo { call_weight: large_weight, ..Default::default() }; + + assert_eq!( + TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + ) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() + ); + + assert_eq!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { first_transaction_index: None }) + ); + assert!(!has_use_full_core_digest()); + assert_eq!(MaximumBlockWeight::get(), target_weight); + }); +} + +#[test] +fn tx_extension_large_weight_before_first_tx() { + sp_tracing::init_for_tests(); + new_test_ext_first_block(2).execute_with(|| { + initialize_block_finished(); + + let target_weight = MaximumBlockWeight::target_block_weight(); + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + register_weight(large_weight, DispatchClass::Normal); + + let small_weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND / 10, 1024); + let info = DispatchInfo { call_weight: small_weight, ..Default::default() }; + + assert_ok!(TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + )); + + assert_matches!(crate::BlockWeightMode::::get(), Some(BlockWeightMode::FullCore)); + + assert!(has_use_full_core_digest()); + assert_eq!(MaximumBlockWeight::get().ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + }); +} + +// ======================================== +// Pre-Inherents Hook Tests +// ======================================== + +#[test] +fn test_pre_inherents_hook_first_block_over_limit() { + new_test_ext_first_block(2).execute_with(|| { + use frame_support::traits::PreInherents; + + // Simulate on_initialize consuming more than target weight + let target_weight = MaxParachainBlockWeight::>::target_block_weight(); + let excessive_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + // register_weight(excessive_weight); + + // Call pre_inherents hook + DynamicMaxBlockWeightHooks::>::pre_inherents(); + + // Should be in FullCore mode + let mode = crate::BlockWeightMode::::get(); + assert!(matches!(mode, Some(BlockWeightMode::FullCore))); + + // Should have UseFullCore digest + assert!(has_use_full_core_digest()); + }); +} + +#[test] +fn test_pre_inherents_hook_non_first_block_over_limit() { + new_test_ext_non_first_block(2).execute_with(|| { + use frame_support::traits::PreInherents; + + // Simulate on_initialize consuming more than target weight + let target_weight = MaxParachainBlockWeight::>::target_block_weight(); + let excessive_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + // register_weight(excessive_weight); + + // Get initial remaining weight + let initial_remaining = frame_system::Pallet::::remaining_block_weight(); + + // Call pre_inherents hook + DynamicMaxBlockWeightHooks::>::pre_inherents(); + + // Should be in FullCore mode + let mode = crate::BlockWeightMode::::get(); + assert!(matches!(mode, Some(BlockWeightMode::FullCore))); + + // Should have UseFullCore digest + assert!(has_use_full_core_digest()); + + // Should have registered FULL_CORE_WEIGHT to prevent more transactions + let final_remaining = frame_system::Pallet::::remaining_block_weight(); + assert!(final_remaining.remaining().any_lt(initial_remaining.remaining())); + }); +} + +#[test] +fn test_pre_inherents_hook_under_limit_no_change() { + new_test_ext_first_block(2).execute_with(|| { + use frame_support::traits::PreInherents; + + // Simulate on_initialize consuming less than target weight + let target_weight = MaxParachainBlockWeight::>::target_block_weight(); + let small_weight = + Weight::from_parts(target_weight.ref_time() / 2, target_weight.proof_size() / 2); + + // register_weight(small_weight); + + // Call pre_inherents hook + DynamicMaxBlockWeightHooks::>::pre_inherents(); + + // Should NOT be in FullCore mode + let mode = crate::BlockWeightMode::::get(); + assert!(mode.is_none()); + + // Should NOT have UseFullCore digest + assert!(!has_use_full_core_digest()); + }); +} + +// ======================================== +// Integration Tests +// ======================================== + +#[test] +fn test_integration_first_block_with_large_inherent() { + new_test_ext_first_block(2).execute_with(|| { + use frame_support::traits::PreInherents; + + // Simulate large on_initialize + let target_weight = MaxParachainBlockWeight::>::target_block_weight(); + let large_inherent_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND / 2, 512 * 1024)); + + // register_weight(large_inherent_weight); + + // Pre-inherents hook should detect and switch to FullCore + DynamicMaxBlockWeightHooks::>::pre_inherents(); + + // Mark inherents as applied + frame_system::Pallet::::note_finished_initialize(); + + // Now check max block weight + let max_weight = MaxParachainBlockWeight::>::get(); + + // Should return FULL_CORE_WEIGHT + assert_eq!(max_weight, MaxParachainBlockWeight::>::FULL_CORE_WEIGHT); + + // Should have UseFullCore digest + assert!(has_use_full_core_digest()); + }); +} + +#[test] +fn test_integration_bundle_info_correctly_detected() { + // Test that bundle info at different indices is correctly detected + for index in 0u8..5 { + new_test_ext_with_bundle(Some(2), index, false).execute_with(|| { + let is_first = super::is_first_block_in_core::(); + if index == 0 { + assert!(is_first, "Index 0 should be first block"); + } else { + assert!(!is_first, "Index {} should not be first block", index); + } + }); + } +} + +#[test] +fn test_integration_max_weight_without_bundle_info() { + new_test_ext_with_digest(Some(2)).execute_with(|| { + // Without bundle info, cannot determine if first block + // Should still work but max weight determination will be conservative + + frame_system::Pallet::::note_finished_initialize(); + + let max_weight = MaxParachainBlockWeight::>::get(); + + // With 2 cores and 4 target blocks + let expected_weight = + Weight::from_parts(2 * 2 * WEIGHT_REF_TIME_PER_SECOND / 4, 2 * MAX_POV_SIZE as u64 / 4); + + assert_eq!(max_weight, expected_weight); + }); +} + +#[test] +fn test_integration_6s_ref_time_cap() { + // Test that even with many cores, we cap at 6s total ref time + new_test_ext_with_digest(Some(10)).execute_with(|| { + frame_system::Pallet::::note_finished_initialize(); + + let max_weight = MaxParachainBlockWeight::>::get(); + + assert_eq!(max_weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + assert_eq!(max_weight.proof_size(), MAX_POV_SIZE as u64); + }); +} + +#[test] +fn test_integration_multiple_target_blocks_reduces_weight() { + // Same cores, different target blocks + let num_cores = 4; + + new_test_ext_with_digest(Some(num_cores)).execute_with(|| { + frame_system::Pallet::::note_finished_initialize(); + + let weight_2_blocks = MaxParachainBlockWeight::>::get(); + let weight_4_blocks = MaxParachainBlockWeight::>::get(); + let weight_8_blocks = MaxParachainBlockWeight::>::get(); + + // More target blocks = less weight per block + assert!(weight_2_blocks.ref_time() > weight_4_blocks.ref_time()); + assert!(weight_4_blocks.ref_time() > weight_8_blocks.ref_time()); + + assert!(weight_2_blocks.proof_size() > weight_4_blocks.proof_size()); + assert!(weight_4_blocks.proof_size() > weight_8_blocks.proof_size()); + }); +} diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index ca847ebacf5e0..a1ea876f6d814 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -116,8 +116,8 @@ where len: usize, ) -> Result<(), TransactionValidityError> { let is_not_inherent = frame_system::Pallet::::inherents_applied(); - let transaction_index = is_not_inherent - .then(|| frame_system::Pallet::::extrinsic_index().unwrap_or_default()); + let extrinsic_index = frame_system::Pallet::::extrinsic_index().unwrap_or_default(); + let transaction_index = is_not_inherent.then(|| extrinsic_index); crate::BlockWeightMode::::mutate(|mode| { let current_mode = *mode.get_or_insert_with(|| BlockWeightMode::FractionOfCore { @@ -141,7 +141,7 @@ where "`PotentialFullCore` should resolve to `FullCore` or `FractionOfCore` after applying a transaction.", ); - let block_weight_over_limit = first_transaction_index == transaction_index + let block_weight_over_limit = extrinsic_index == 0 && block_weight_over_target_block_weight::(); let block_weights = Config::BlockWeights::get(); @@ -164,13 +164,14 @@ where "Inherent block logic took longer than the target block weight, \ `DynamicMaxBlockWeightHooks` not registered as `PreInherents` hook!", ); - } else if info + } else if dbg!(dbg!(info .total_weight() // The extrinsic lengths counts towards the POV size - .saturating_add(Weight::from_parts(0, len as u64)) - .any_gt(target_weight) && is_first_block_in_core::() + .saturating_add(Weight::from_parts(0, len as u64))) + .any_gt(dbg!(target_weight))) { - if transaction_index.unwrap_or_default().saturating_sub(first_transaction_index.unwrap_or_default()) < MAX_TRANSACTION_TO_CONSIDER { + if transaction_index.unwrap_or_default().saturating_sub(first_transaction_index.unwrap_or_default()) < MAX_TRANSACTION_TO_CONSIDER + && is_first_block_in_core::() { log::trace!( target: LOG_TARGET, "Enabling `PotentialFullCore` mode for extrinsic", @@ -196,12 +197,15 @@ where "Resetting back to `FractionOfCore`" ); *mode = - Some(BlockWeightMode::FractionOfCore { first_transaction_index }); + Some(BlockWeightMode::FractionOfCore { first_transaction_index: first_transaction_index.or(transaction_index) }); } else { log::trace!( target: LOG_TARGET, "Not changing block weight mode" ); + + *mode = + Some(BlockWeightMode::FractionOfCore { first_transaction_index: first_transaction_index.or(transaction_index) }); } }, }; @@ -265,7 +269,7 @@ where let block_weight = frame_system::BlockWeight::::get(); let extrinsic_class_weight = block_weight.get(info.class); - if extrinsic_class_weight.any_gt(target_weight) { + if dbg!(extrinsic_class_weight).any_gt(dbg!(target_weight)) { log::trace!( target: LOG_TARGET, "Extrinsic class weight {extrinsic_class_weight:?} above target weight {target_weight:?}, enabling `FullCore` mode." diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs index 68685d7e8ca42..3e91fc5292d8d 100644 --- a/substrate/frame/system/src/extensions/check_weight.rs +++ b/substrate/frame/system/src/extensions/check_weight.rs @@ -38,10 +38,16 @@ use sp_weights::Weight; /// /// This extension does not influence any fields of `TransactionValidity` in case the /// transaction is valid. -#[derive(Encode, Decode, DecodeWithMemTracking, Clone, Eq, PartialEq, Default, TypeInfo)] +#[derive(Encode, Decode, DecodeWithMemTracking, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct CheckWeight(core::marker::PhantomData); +impl Default for CheckWeight { + fn default() -> Self { + Self(Default::default()) + } +} + impl CheckWeight where T::RuntimeCall: Dispatchable, From c9debc5133abc5c130188e68144c737cb6f8aad9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 17 Oct 2025 11:25:36 +0300 Subject: [PATCH 145/312] Tests --- .../parachain-system/src/block_weight/mock.rs | 118 +-- .../src/block_weight/tests.rs | 729 +++++++++--------- 2 files changed, 419 insertions(+), 428 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index 0c0880a611ab1..15d5db7962326 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -128,66 +128,84 @@ pub type Executive = frame_executive::Executive< AllPalletsWithSystem, >; -pub fn new_test_ext_with_digest(num_cores: Option) -> sp_io::TestExternalities { - let storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); - - let mut ext = sp_io::TestExternalities::from(storage); +/// Builder for test externalities with fluent API +pub struct TestExtBuilder { + num_cores: Option, + bundle_index: Option, + bundle_maybe_last: bool, +} - ext.execute_with(|| { - if let Some(num_cores) = num_cores { - let core_info = CoreInfo { - selector: CoreSelector(0), - claim_queue_offset: ClaimQueueOffset(0), - number_of_cores: Compact(num_cores), - }; +impl Default for TestExtBuilder { + fn default() -> Self { + sp_tracing::init_for_tests(); - let digest = CumulusDigestItem::CoreInfo(core_info).to_digest_item(); + Self { num_cores: None, bundle_index: None, bundle_maybe_last: false } + } +} - frame_system::Pallet::::deposit_log(digest); - } - }); +impl TestExtBuilder { + /// Create a new builder + pub fn new() -> Self { + Self::default() + } - ext -} + /// Set the number of cores + pub fn number_of_cores(mut self, num_cores: u16) -> Self { + self.num_cores = Some(num_cores); + self + } -/// Helper to create test externalities with core and bundle info -pub fn new_test_ext_with_bundle( - num_cores: Option, - bundle_index: u8, - maybe_last: bool, -) -> sp_io::TestExternalities { - let storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); - - let mut ext = sp_io::TestExternalities::from(storage); - - ext.execute_with(|| { - if let Some(num_cores) = num_cores { - let core_info = CoreInfo { - selector: CoreSelector(0), - claim_queue_offset: ClaimQueueOffset(0), - number_of_cores: Compact(num_cores), - }; - - let digest = CumulusDigestItem::CoreInfo(core_info).to_digest_item(); - frame_system::Pallet::::deposit_log(digest); + /// Set this as the first block in the core (bundle index = 0) + pub fn first_block_in_core(mut self, is_first: bool) -> Self { + if is_first { + self.bundle_index = Some(0); + } else if self.bundle_index.is_none() { + // If not first and no bundle index set, default to index 1 + self.bundle_index = Some(1); } + self + } - let bundle_info = BundleInfo { index: bundle_index, maybe_last }; - let digest = CumulusDigestItem::BundleInfo(bundle_info).to_digest_item(); - frame_system::Pallet::::deposit_log(digest); - }); + /// Set the bundle index directly + pub fn bundle_index(mut self, index: u8) -> Self { + self.bundle_index = Some(index); + self + } - ext -} + /// Set whether this is maybe the last block in the bundle + pub fn maybe_last(mut self, maybe_last: bool) -> Self { + self.bundle_maybe_last = maybe_last; + self + } -/// Helper to create test externalities for first block in core -pub fn new_test_ext_first_block(num_cores: u16) -> sp_io::TestExternalities { - new_test_ext_with_bundle(Some(num_cores), 0, false) -} + /// Build the test externalities + pub fn build(self) -> sp_io::TestExternalities { + let storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext = sp_io::TestExternalities::from(storage); + + ext.execute_with(|| { + // Add core info if specified + if let Some(num_cores) = self.num_cores { + let core_info = CoreInfo { + selector: CoreSelector(0), + claim_queue_offset: ClaimQueueOffset(0), + number_of_cores: Compact(num_cores), + }; + let digest = CumulusDigestItem::CoreInfo(core_info).to_digest_item(); + frame_system::Pallet::::deposit_log(digest); + } + + // Add bundle info if specified + if let Some(bundle_index) = self.bundle_index { + let bundle_info = + BundleInfo { index: bundle_index, maybe_last: self.bundle_maybe_last }; + let digest = CumulusDigestItem::BundleInfo(bundle_info).to_digest_item(); + frame_system::Pallet::::deposit_log(digest); + } + }); -/// Helper to create test externalities for non-first block in core -pub fn new_test_ext_non_first_block(num_cores: u16) -> sp_io::TestExternalities { - new_test_ext_with_bundle(Some(num_cores), 1, false) + ext + } } /// Helper to check if UseFullCore digest was deposited diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index ea7e7a1fc9566..85c6e0f7c2702 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -24,6 +24,7 @@ use frame_support::{ assert_err, assert_ok, dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}, pallet_prelude::InvalidTransaction, + traits::PreInherents, weights::constants::WEIGHT_REF_TIME_PER_SECOND, }; use frame_system::{CheckWeight, RawOrigin as SystemOrigin}; @@ -39,7 +40,7 @@ type MaximumBlockWeight = MaxParachainBlockWeight>::get(); assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); @@ -49,7 +50,7 @@ fn test_single_core_single_block() { #[test] fn test_single_core_multiple_blocks() { - new_test_ext_with_digest(Some(1)).execute_with(|| { + TestExtBuilder::new().number_of_cores(1).build().execute_with(|| { let weight = MaxParachainBlockWeight::>::get(); // With 1 core and 4 target blocks, should get 0.5s ref time and 1/4 PoV size per block @@ -60,7 +61,7 @@ fn test_single_core_multiple_blocks() { #[test] fn test_multiple_cores_single_block() { - new_test_ext_with_digest(Some(3)).execute_with(|| { + TestExtBuilder::new().number_of_cores(3).build().execute_with(|| { let weight = MaxParachainBlockWeight::>::get(); // With 3 cores and 1 target blocks, should get 2s ref time and 1 PoV size @@ -71,7 +72,7 @@ fn test_multiple_cores_single_block() { #[test] fn test_multiple_cores_multiple_blocks() { - new_test_ext_with_digest(Some(2)).execute_with(|| { + TestExtBuilder::new().number_of_cores(2).build().execute_with(|| { let weight = MaxParachainBlockWeight::>::get(); // With 2 cores and 4 target blocks, should get 1s ref time and 2x PoV size / 4 per @@ -83,7 +84,7 @@ fn test_multiple_cores_multiple_blocks() { #[test] fn test_no_core_info() { - new_test_ext_with_digest(None).execute_with(|| { + TestExtBuilder::new().build().execute_with(|| { let weight = MaxParachainBlockWeight::>::get(); // Without core info, should return conservative default @@ -94,7 +95,7 @@ fn test_no_core_info() { #[test] fn test_zero_cores() { - new_test_ext_with_digest(Some(0)).execute_with(|| { + TestExtBuilder::new().number_of_cores(0).build().execute_with(|| { let weight = MaxParachainBlockWeight::>::get(); // With 0 cores, should return conservative default @@ -105,7 +106,7 @@ fn test_zero_cores() { #[test] fn test_zero_target_blocks() { - new_test_ext_with_digest(Some(2)).execute_with(|| { + TestExtBuilder::new().number_of_cores(2).build().execute_with(|| { let weight = MaxParachainBlockWeight::>::get(); assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); @@ -114,7 +115,7 @@ fn test_zero_target_blocks() { #[test] fn test_target_block_weight_calculation() { - new_test_ext_with_digest(Some(4)).execute_with(|| { + TestExtBuilder::new().number_of_cores(4).build().execute_with(|| { // Test target_block_weight function directly // Both calls return the same since ConstU32<4> is fixed at compile time let weight = MaxParachainBlockWeight::>::target_block_weight(); @@ -126,7 +127,7 @@ fn test_target_block_weight_calculation() { #[test] fn test_max_ref_time_per_core_cap() { - new_test_ext_with_digest(Some(8)).execute_with(|| { + TestExtBuilder::new().number_of_cores(8).build().execute_with(|| { // With 8 cores and 4 target blocks, ref time per block should be capped at 2s per core let weight = MaxParachainBlockWeight::>::get(); @@ -164,7 +165,7 @@ fn test_target_block_weight_with_digest_edge_cases() { #[test] fn test_is_first_block_in_core_functions() { - new_test_ext_with_digest(Some(1)).execute_with(|| { + TestExtBuilder::new().number_of_cores(1).build().execute_with(|| { // Test without bundle info - should return false let empty_digest = Digest::default(); assert!(!super::is_first_block_in_core_with_digest(&empty_digest)); @@ -186,420 +187,406 @@ fn test_is_first_block_in_core_functions() { }); } -// ======================================== -// Transaction Extension Tests -// ======================================== - #[test] fn tx_extension_sets_fraction_of_core_mode() { use frame_support::dispatch::{DispatchClass, DispatchInfo}; - new_test_ext_first_block(2).execute_with(|| { - initialize_block_finished(); - - // BlockWeightMode should not be set yet - assert!(crate::BlockWeightMode::::get().is_none()); - - // Create a small transaction - let small_weight = Weight::from_parts(100_000, 1024); - let info = DispatchInfo { - call_weight: small_weight, - class: DispatchClass::Normal, - pays_fee: frame_support::dispatch::Pays::Yes, - ..Default::default() - }; - - assert_ok!(TxExtension::validate_and_prepare( - TxExtension::new(Default::default()), - SystemOrigin::Signed(0).into(), - &CALL, - &info, - 100, - 0, - )); - - assert_eq!( - crate::BlockWeightMode::::get(), - Some(BlockWeightMode::FractionOfCore { first_transaction_index: Some(0) }) - ); - }); + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + initialize_block_finished(); + + // BlockWeightMode should not be set yet + assert!(crate::BlockWeightMode::::get().is_none()); + + // Create a small transaction + let small_weight = Weight::from_parts(100_000, 1024); + let info = DispatchInfo { + call_weight: small_weight, + class: DispatchClass::Normal, + pays_fee: frame_support::dispatch::Pays::Yes, + ..Default::default() + }; + + assert_ok!(TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + )); + + assert_eq!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { first_transaction_index: Some(0) }) + ); + }); } #[test] fn tx_extension_large_tx_enables_full_core_usage() { - sp_tracing::init_for_tests(); - new_test_ext_first_block(2).execute_with(|| { - initialize_block_finished(); - - // Create a transaction larger than target weight - let target_weight = MaximumBlockWeight::target_block_weight(); - let large_weight = target_weight - .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); - - let info = DispatchInfo { - call_weight: large_weight, - class: DispatchClass::Normal, - ..Default::default() - }; - - assert_ok!(TxExtension::validate_and_prepare( - TxExtension::new(Default::default()), - SystemOrigin::Signed(0).into(), - &CALL, - &info, - 100, - 0, - )); - - assert_matches!( - crate::BlockWeightMode::::get(), - Some(BlockWeightMode::PotentialFullCore { first_transaction_index: Some(0), .. }) - ); - - let mut post_info = PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; - - assert_ok!(TxExtension::post_dispatch((), &info, &mut post_info, 0, &Ok(()))); - - assert_eq!(crate::BlockWeightMode::::get(), Some(BlockWeightMode::FullCore)); - assert!(has_use_full_core_digest()); - assert_eq!(MaximumBlockWeight::get().ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); - }); -} + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + initialize_block_finished(); + + // Create a transaction larger than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + let info = DispatchInfo { + call_weight: large_weight, + class: DispatchClass::Normal, + ..Default::default() + }; + + assert_ok!(TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + )); -#[test] -fn tx_extension_large_tx_with_refund_goes_back_to_fractional() { - new_test_ext_first_block(2).execute_with(|| { - initialize_block_finished(); - - // Create a transaction larger than target weight - let target_weight = MaximumBlockWeight::target_block_weight(); - let large_weight = target_weight - .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); - - let info = DispatchInfo { - call_weight: large_weight, - class: DispatchClass::Normal, - ..Default::default() - }; - - assert_ok!(TxExtension::validate_and_prepare( - TxExtension::new(Default::default()), - SystemOrigin::Signed(0).into(), - &CALL, - &info, - 100, - 0, - )); - - assert_matches!( - crate::BlockWeightMode::::get(), - Some(BlockWeightMode::PotentialFullCore { first_transaction_index: Some(0), .. }) - ); - - let mut post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(5000, 5000)), - pays_fee: Default::default(), - }; - - assert_ok!(TxExtension::post_dispatch((), &info, &mut post_info, 0, &Ok(()))); - - assert_matches!( - crate::BlockWeightMode::::get(), - Some(BlockWeightMode::FractionOfCore { .. }) - ); - assert!(!has_use_full_core_digest()); - assert_eq!(MaximumBlockWeight::get(), target_weight); - }); + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::PotentialFullCore { first_transaction_index: Some(0), .. }) + ); + + let mut post_info = + PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; + + assert_ok!(TxExtension::post_dispatch((), &info, &mut post_info, 0, &Ok(()))); + + assert_eq!(crate::BlockWeightMode::::get(), Some(BlockWeightMode::FullCore)); + assert!(has_use_full_core_digest()); + assert_eq!(MaximumBlockWeight::get().ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + }); } #[test] -fn tx_extension_large_tx_is_rejected_on_non_first_block() { - new_test_ext_non_first_block(2).execute_with(|| { - initialize_block_finished(); - - // Create a transaction larger than target weight - let target_weight = MaximumBlockWeight::target_block_weight(); - let large_weight = target_weight - .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); - - let info = DispatchInfo { - call_weight: large_weight, - class: DispatchClass::Normal, - ..Default::default() - }; - - assert_eq!( - TxExtension::validate_and_prepare( +fn tx_extension_large_tx_with_refund_goes_back_to_fractional() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + initialize_block_finished(); + + // Create a transaction larger than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + let info = DispatchInfo { + call_weight: large_weight, + class: DispatchClass::Normal, + ..Default::default() + }; + + assert_ok!(TxExtension::validate_and_prepare( TxExtension::new(Default::default()), SystemOrigin::Signed(0).into(), &CALL, &info, 100, 0, - ) - .unwrap_err(), - InvalidTransaction::ExhaustsResources.into() - ); - - // Should stay in FractionOfCore mode (not PotentialFullCore) since not first block - assert_eq!( - crate::BlockWeightMode::::get(), - Some(BlockWeightMode::FractionOfCore { first_transaction_index: None }) - ); - assert!(!has_use_full_core_digest()); - assert_eq!(MaximumBlockWeight::get(), target_weight); - }); + )); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::PotentialFullCore { first_transaction_index: Some(0), .. }) + ); + + let mut post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(5000, 5000)), + pays_fee: Default::default(), + }; + + assert_ok!(TxExtension::post_dispatch((), &info, &mut post_info, 0, &Ok(()))); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { .. }) + ); + assert!(!has_use_full_core_digest()); + assert_eq!(MaximumBlockWeight::get(), target_weight); + }); } #[test] -fn tx_extension_post_dispatch_to_full_core_because_of_manual_weight() { - new_test_ext_non_first_block(2).execute_with(|| { - initialize_block_finished(); - - let target_weight = MaxParachainBlockWeight::>::target_block_weight(); - - // Transaction announces small weight - let small_weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND / 10, 1024); - let info = DispatchInfo { call_weight: small_weight, ..Default::default() }; - - assert_ok!(TxExtension::validate_and_prepare( - TxExtension::new(Default::default()), - SystemOrigin::Signed(0).into(), - &CALL, - &info, - 100, - 0, - )); - - assert_matches!( - crate::BlockWeightMode::::get(), - Some(BlockWeightMode::FractionOfCore { first_transaction_index: Some(0) }) - ); - - // But actually uses much more weight (bug in weight annotation) - let large_weight = target_weight - .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); - register_weight(large_weight, DispatchClass::Normal); - - let mut post_info = PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; - assert_ok!(TxExtension::post_dispatch((), &info, &mut post_info, 0, &Ok(()))); - - // Should transition to FullCore due to exceeding limit - assert_matches!(crate::BlockWeightMode::::get(), Some(BlockWeightMode::FullCore)); - - assert!(has_use_full_core_digest()); - }); +fn tx_extension_large_tx_is_rejected_on_non_first_block() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(false) + .build() + .execute_with(|| { + initialize_block_finished(); + + // Create a transaction larger than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + let info = DispatchInfo { + call_weight: large_weight, + class: DispatchClass::Normal, + ..Default::default() + }; + + assert_eq!( + TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + ) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() + ); + + // Should stay in FractionOfCore mode (not PotentialFullCore) since not first block + assert_eq!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { first_transaction_index: None }) + ); + assert!(!has_use_full_core_digest()); + assert_eq!(MaximumBlockWeight::get(), target_weight); + }); } #[test] -fn tx_extension_large_tx_after_limit_is_rejected() { - sp_tracing::init_for_tests(); - new_test_ext_first_block(2).execute_with(|| { - initialize_block_finished(); - - // Set some index above the limit. - System::set_extrinsic_index(20); +fn tx_extension_post_dispatch_to_full_core_because_of_manual_weight() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(false) + .build() + .execute_with(|| { + initialize_block_finished(); - // Create a transaction larger than target weight - let target_weight = MaximumBlockWeight::target_block_weight(); - let large_weight = target_weight - .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + let target_weight = + MaxParachainBlockWeight::>::target_block_weight(); - let info = DispatchInfo { call_weight: large_weight, ..Default::default() }; + // Transaction announces small weight + let small_weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND / 10, 1024); + let info = DispatchInfo { call_weight: small_weight, ..Default::default() }; - assert_eq!( - TxExtension::validate_and_prepare( + assert_ok!(TxExtension::validate_and_prepare( TxExtension::new(Default::default()), SystemOrigin::Signed(0).into(), &CALL, &info, 100, 0, - ) - .unwrap_err(), - InvalidTransaction::ExhaustsResources.into() - ); - - assert_eq!( - crate::BlockWeightMode::::get(), - Some(BlockWeightMode::FractionOfCore { first_transaction_index: None }) - ); - assert!(!has_use_full_core_digest()); - assert_eq!(MaximumBlockWeight::get(), target_weight); - }); -} - -#[test] -fn tx_extension_large_weight_before_first_tx() { - sp_tracing::init_for_tests(); - new_test_ext_first_block(2).execute_with(|| { - initialize_block_finished(); - - let target_weight = MaximumBlockWeight::target_block_weight(); - let large_weight = target_weight - .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + )); - register_weight(large_weight, DispatchClass::Normal); + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { first_transaction_index: Some(0) }) + ); - let small_weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND / 10, 1024); - let info = DispatchInfo { call_weight: small_weight, ..Default::default() }; + // But actually uses much more weight (bug in weight annotation) + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + register_weight(large_weight, DispatchClass::Normal); - assert_ok!(TxExtension::validate_and_prepare( - TxExtension::new(Default::default()), - SystemOrigin::Signed(0).into(), - &CALL, - &info, - 100, - 0, - )); + let mut post_info = + PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; + assert_ok!(TxExtension::post_dispatch((), &info, &mut post_info, 0, &Ok(()))); - assert_matches!(crate::BlockWeightMode::::get(), Some(BlockWeightMode::FullCore)); + // Should transition to FullCore due to exceeding limit + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FullCore) + ); - assert!(has_use_full_core_digest()); - assert_eq!(MaximumBlockWeight::get().ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); - }); + assert!(has_use_full_core_digest()); + }); } -// ======================================== -// Pre-Inherents Hook Tests -// ======================================== - #[test] -fn test_pre_inherents_hook_first_block_over_limit() { - new_test_ext_first_block(2).execute_with(|| { - use frame_support::traits::PreInherents; - - // Simulate on_initialize consuming more than target weight - let target_weight = MaxParachainBlockWeight::>::target_block_weight(); - let excessive_weight = target_weight - .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); - - // register_weight(excessive_weight); - - // Call pre_inherents hook - DynamicMaxBlockWeightHooks::>::pre_inherents(); - - // Should be in FullCore mode - let mode = crate::BlockWeightMode::::get(); - assert!(matches!(mode, Some(BlockWeightMode::FullCore))); - - // Should have UseFullCore digest - assert!(has_use_full_core_digest()); - }); +fn tx_extension_large_tx_after_limit_is_rejected() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + initialize_block_finished(); + + // Set some index above the limit. + System::set_extrinsic_index(20); + + // Create a transaction larger than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + let info = DispatchInfo { call_weight: large_weight, ..Default::default() }; + + assert_eq!( + TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + ) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() + ); + + assert_eq!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { first_transaction_index: None }) + ); + assert!(!has_use_full_core_digest()); + assert_eq!(MaximumBlockWeight::get(), target_weight); + }); } #[test] -fn test_pre_inherents_hook_non_first_block_over_limit() { - new_test_ext_non_first_block(2).execute_with(|| { - use frame_support::traits::PreInherents; - - // Simulate on_initialize consuming more than target weight - let target_weight = MaxParachainBlockWeight::>::target_block_weight(); - let excessive_weight = target_weight - .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); +fn tx_extension_large_weight_before_first_tx() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + initialize_block_finished(); - // register_weight(excessive_weight); + let target_weight = MaximumBlockWeight::target_block_weight(); + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); - // Get initial remaining weight - let initial_remaining = frame_system::Pallet::::remaining_block_weight(); + register_weight(large_weight, DispatchClass::Normal); - // Call pre_inherents hook - DynamicMaxBlockWeightHooks::>::pre_inherents(); + let small_weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND / 10, 1024); + let info = DispatchInfo { call_weight: small_weight, ..Default::default() }; - // Should be in FullCore mode - let mode = crate::BlockWeightMode::::get(); - assert!(matches!(mode, Some(BlockWeightMode::FullCore))); + assert_ok!(TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + )); - // Should have UseFullCore digest - assert!(has_use_full_core_digest()); + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FullCore) + ); - // Should have registered FULL_CORE_WEIGHT to prevent more transactions - let final_remaining = frame_system::Pallet::::remaining_block_weight(); - assert!(final_remaining.remaining().any_lt(initial_remaining.remaining())); - }); + assert!(has_use_full_core_digest()); + assert_eq!(MaximumBlockWeight::get().ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + }); } #[test] -fn test_pre_inherents_hook_under_limit_no_change() { - new_test_ext_first_block(2).execute_with(|| { - use frame_support::traits::PreInherents; - - // Simulate on_initialize consuming less than target weight - let target_weight = MaxParachainBlockWeight::>::target_block_weight(); - let small_weight = - Weight::from_parts(target_weight.ref_time() / 2, target_weight.proof_size() / 2); - - // register_weight(small_weight); - - // Call pre_inherents hook - DynamicMaxBlockWeightHooks::>::pre_inherents(); - - // Should NOT be in FullCore mode - let mode = crate::BlockWeightMode::::get(); - assert!(mode.is_none()); - - // Should NOT have UseFullCore digest - assert!(!has_use_full_core_digest()); - }); +fn pre_inherents_hook_first_block_over_limit() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + // Simulate on_initialize consuming more than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let excessive_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + register_weight(excessive_weight, DispatchClass::Mandatory); + + // Call pre_inherents hook + DynamicMaxBlockWeightHooks::>::pre_inherents(); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FullCore) + ); + + // Should have UseFullCore digest + assert!(has_use_full_core_digest()); + }); } -// ======================================== -// Integration Tests -// ======================================== - #[test] -fn test_integration_first_block_with_large_inherent() { - new_test_ext_first_block(2).execute_with(|| { - use frame_support::traits::PreInherents; - - // Simulate large on_initialize - let target_weight = MaxParachainBlockWeight::>::target_block_weight(); - let large_inherent_weight = target_weight - .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND / 2, 512 * 1024)); - - // register_weight(large_inherent_weight); - - // Pre-inherents hook should detect and switch to FullCore - DynamicMaxBlockWeightHooks::>::pre_inherents(); - - // Mark inherents as applied - frame_system::Pallet::::note_finished_initialize(); - - // Now check max block weight - let max_weight = MaxParachainBlockWeight::>::get(); - - // Should return FULL_CORE_WEIGHT - assert_eq!(max_weight, MaxParachainBlockWeight::>::FULL_CORE_WEIGHT); - - // Should have UseFullCore digest - assert!(has_use_full_core_digest()); - }); +fn pre_inherents_hook_non_first_block_over_limit() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(false) + .build() + .execute_with(|| { + // Simulate on_initialize consuming more than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let excessive_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + register_weight(excessive_weight, DispatchClass::Mandatory); + + // Get initial remaining weight + let initial_remaining = frame_system::Pallet::::remaining_block_weight(); + + // Call pre_inherents hook + DynamicMaxBlockWeightHooks::>::pre_inherents(); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FullCore) + ); + + assert!(has_use_full_core_digest()); + + // Should have registered FULL_CORE_WEIGHT to prevent more transactions + let final_remaining = frame_system::Pallet::::remaining_block_weight(); + assert!(final_remaining.consumed().all_gte(MaximumBlockWeight::FULL_CORE_WEIGHT)); + }); } #[test] -fn test_integration_bundle_info_correctly_detected() { - // Test that bundle info at different indices is correctly detected - for index in 0u8..5 { - new_test_ext_with_bundle(Some(2), index, false).execute_with(|| { - let is_first = super::is_first_block_in_core::(); - if index == 0 { - assert!(is_first, "Index 0 should be first block"); - } else { - assert!(!is_first, "Index {} should not be first block", index); - } +fn pre_inherents_hook_under_limit_no_change() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + // Simulate on_initialize consuming less than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let small_weight = + Weight::from_parts(target_weight.ref_time() / 2, target_weight.proof_size() / 2); + + register_weight(small_weight, DispatchClass::Mandatory); + + // Call pre_inherents hook + DynamicMaxBlockWeightHooks::>::pre_inherents(); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { first_transaction_index: None }) + ); + + // Should NOT have UseFullCore digest + assert!(!has_use_full_core_digest()); }); - } } #[test] -fn test_integration_max_weight_without_bundle_info() { - new_test_ext_with_digest(Some(2)).execute_with(|| { +fn max_weight_without_bundle_info() { + TestExtBuilder::new().number_of_cores(2).build().execute_with(|| { // Without bundle info, cannot determine if first block // Should still work but max weight determination will be conservative frame_system::Pallet::::note_finished_initialize(); - let max_weight = MaxParachainBlockWeight::>::get(); + let max_weight = MaximumBlockWeight::get(); // With 2 cores and 4 target blocks let expected_weight = @@ -610,35 +597,21 @@ fn test_integration_max_weight_without_bundle_info() { } #[test] -fn test_integration_6s_ref_time_cap() { - // Test that even with many cores, we cap at 6s total ref time - new_test_ext_with_digest(Some(10)).execute_with(|| { +fn ref_time_and_pov_size_cap() { + TestExtBuilder::new().number_of_cores(10).build().execute_with(|| { frame_system::Pallet::::note_finished_initialize(); let max_weight = MaxParachainBlockWeight::>::get(); + // At most one core will always only be able to use the resources of one core. assert_eq!(max_weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); assert_eq!(max_weight.proof_size(), MAX_POV_SIZE as u64); - }); -} - -#[test] -fn test_integration_multiple_target_blocks_reduces_weight() { - // Same cores, different target blocks - let num_cores = 4; - - new_test_ext_with_digest(Some(num_cores)).execute_with(|| { - frame_system::Pallet::::note_finished_initialize(); - let weight_2_blocks = MaxParachainBlockWeight::>::get(); - let weight_4_blocks = MaxParachainBlockWeight::>::get(); - let weight_8_blocks = MaxParachainBlockWeight::>::get(); - - // More target blocks = less weight per block - assert!(weight_2_blocks.ref_time() > weight_4_blocks.ref_time()); - assert!(weight_4_blocks.ref_time() > weight_8_blocks.ref_time()); + let max_weight = MaxParachainBlockWeight::>::get(); - assert!(weight_2_blocks.proof_size() > weight_4_blocks.proof_size()); - assert!(weight_4_blocks.proof_size() > weight_8_blocks.proof_size()); + // Each blocks get its own core (can use the max pov size), but ref time of all blocks + // together is in max `6s` + assert_eq!(max_weight.ref_time(), 6 * WEIGHT_REF_TIME_PER_SECOND / 4); + assert_eq!(max_weight.proof_size(), MAX_POV_SIZE as u64); }); } From a4a6d5ed7e318a99f3afb0461d06e5a2529963f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 17 Oct 2025 11:47:38 +0300 Subject: [PATCH 146/312] Fix some warnings and extend the tests --- .../parachain-system/src/block_weight/mock.rs | 28 ++----- .../src/block_weight/tests.rs | 79 +++++++++++-------- .../src/block_weight/transaction_extension.rs | 22 ++++-- .../src/overlayed_changes/mod.rs | 1 - 4 files changed, 67 insertions(+), 63 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index 15d5db7962326..ee8ddfb9e49c6 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -61,6 +61,9 @@ pub type TxExtension = DynamicMaxBlockWeight< ConstU32, >; +#[allow(dead_code)] +type NotDeadCode = TxExtension; + #[docify::export_content(max_block_weight_setup)] mod max_block_weight_setup { use super::*; @@ -93,9 +96,10 @@ impl frame_system::Config for Runtime { // Set the `PreInherents` hook. type PreInherents = DynamicMaxBlockWeightHooks>; - // Rest of the types is omitted here. + // Just required to make it compile, but not that important for this example here. type Block = Block; type OnSetCode = crate::ParachainSetCode; + // Rest of the types are omitted here. } impl crate::Config for Runtime { @@ -120,15 +124,7 @@ construct_runtime!( } ); -pub type Executive = frame_executive::Executive< - Runtime, - Block, - frame_system::ChainContext, - Runtime, - AllPalletsWithSystem, ->; - -/// Builder for test externalities with fluent API +/// Builder for test externalities pub struct TestExtBuilder { num_cores: Option, bundle_index: Option, @@ -166,18 +162,6 @@ impl TestExtBuilder { self } - /// Set the bundle index directly - pub fn bundle_index(mut self, index: u8) -> Self { - self.bundle_index = Some(index); - self - } - - /// Set whether this is maybe the last block in the bundle - pub fn maybe_last(mut self, maybe_last: bool) -> Self { - self.bundle_maybe_last = maybe_last; - self - } - /// Build the test externalities pub fn build(self) -> sp_io::TestExternalities { let storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 85c6e0f7c2702..a46239b01ac7f 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -21,7 +21,7 @@ use cumulus_primitives_core::{ BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, }; use frame_support::{ - assert_err, assert_ok, + assert_ok, dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}, pallet_prelude::InvalidTransaction, traits::PreInherents, @@ -456,39 +456,55 @@ fn tx_extension_large_tx_after_limit_is_rejected() { #[test] fn tx_extension_large_weight_before_first_tx() { - TestExtBuilder::new() - .number_of_cores(2) - .first_block_in_core(true) - .build() - .execute_with(|| { - initialize_block_finished(); - - let target_weight = MaximumBlockWeight::target_block_weight(); - let large_weight = target_weight - .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); - - register_weight(large_weight, DispatchClass::Normal); + for first_block_in_core in [true, false] { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(first_block_in_core) + .build() + .execute_with(|| { + initialize_block_finished(); - let small_weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND / 10, 1024); - let info = DispatchInfo { call_weight: small_weight, ..Default::default() }; + let target_weight = MaximumBlockWeight::target_block_weight(); + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); - assert_ok!(TxExtension::validate_and_prepare( - TxExtension::new(Default::default()), - SystemOrigin::Signed(0).into(), - &CALL, - &info, - 100, - 0, - )); + register_weight(large_weight, DispatchClass::Normal); - assert_matches!( - crate::BlockWeightMode::::get(), - Some(BlockWeightMode::FullCore) - ); + let small_weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND / 10, 1024); + let info = DispatchInfo { call_weight: small_weight, ..Default::default() }; - assert!(has_use_full_core_digest()); - assert_eq!(MaximumBlockWeight::get().ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); - }); + let res = TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + ); + + if first_block_in_core { + assert!(res.is_ok()) + } else { + assert_eq!(res.unwrap_err(), InvalidTransaction::ExhaustsResources.into()); + } + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FullCore) + ); + + assert!(has_use_full_core_digest()); + assert_eq!(MaximumBlockWeight::get().ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + + if !first_block_in_core { + // Should have registered FULL_CORE_WEIGHT to prevent more transactions + let final_remaining = frame_system::Pallet::::remaining_block_weight(); + assert!(final_remaining + .consumed() + .all_gte(MaximumBlockWeight::FULL_CORE_WEIGHT)); + } + }); + } } #[test] @@ -532,9 +548,6 @@ fn pre_inherents_hook_non_first_block_over_limit() { register_weight(excessive_weight, DispatchClass::Mandatory); - // Get initial remaining weight - let initial_remaining = frame_system::Pallet::::remaining_block_weight(); - // Call pre_inherents hook DynamicMaxBlockWeightHooks::>::pre_inherents(); diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index a1ea876f6d814..fd2f60cd097f7 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -22,7 +22,7 @@ use alloc::vec::Vec; use codec::{Decode, DecodeWithMemTracking, Encode}; use cumulus_primitives_core::CumulusDigestItem; use frame_support::{ - dispatch::{DispatchInfo, PostDispatchInfo}, + dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}, pallet_prelude::{ InvalidTransaction, TransactionSource, TransactionValidityError, ValidTransaction, }, @@ -150,7 +150,6 @@ where ); // Protection against a misconfiguration as this should be detected by the pre-inherent hook. - //TODO: Ensure we are first block in core if block_weight_over_limit { *mode = Some(BlockWeightMode::FullCore); @@ -159,16 +158,25 @@ where CumulusDigestItem::UseFullCore.to_digest_item(), ); + if !is_first_block_in_core::() { + // We are already above the allowed maximum and do not want to accept any more + // extrinsics. + frame_system::Pallet::::register_extra_weight_unchecked( + MaxParachainBlockWeight::::FULL_CORE_WEIGHT, + DispatchClass::Mandatory, + ); + } + log::error!( target: LOG_TARGET, "Inherent block logic took longer than the target block weight, \ `DynamicMaxBlockWeightHooks` not registered as `PreInherents` hook!", ); - } else if dbg!(dbg!(info + } else if info .total_weight() // The extrinsic lengths counts towards the POV size - .saturating_add(Weight::from_parts(0, len as u64))) - .any_gt(dbg!(target_weight))) + .saturating_add(Weight::from_parts(0, len as u64)) + .any_gt(target_weight) { if transaction_index.unwrap_or_default().saturating_sub(first_transaction_index.unwrap_or_default()) < MAX_TRANSACTION_TO_CONSIDER && is_first_block_in_core::() { @@ -251,7 +259,7 @@ where frame_system::Pallet::::register_extra_weight_unchecked( MaxParachainBlockWeight::::FULL_CORE_WEIGHT, - frame_support::dispatch::DispatchClass::Mandatory, + DispatchClass::Mandatory, ); } @@ -269,7 +277,7 @@ where let block_weight = frame_system::BlockWeight::::get(); let extrinsic_class_weight = block_weight.get(info.class); - if dbg!(extrinsic_class_weight).any_gt(dbg!(target_weight)) { + if extrinsic_class_weight.any_gt(target_weight) { log::trace!( target: LOG_TARGET, "Extrinsic class weight {extrinsic_class_weight:?} above target weight {target_weight:?}, enabling `FullCore` mode." diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs index e9aa4e122ee42..5157df59d8469 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs @@ -36,7 +36,6 @@ use sp_trie::{empty_child_trie_root, LayoutV1}; #[cfg(not(feature = "std"))] use alloc::collections::btree_map::BTreeMap as Map; -use core::ops::DerefMut; #[cfg(feature = "std")] use std::collections::{hash_map::Entry as MapEntry, HashMap as Map}; From a2815371e73c5e5dd610bc19401d081258a5f109 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 17 Oct 2025 17:16:51 +0200 Subject: [PATCH 147/312] Start a benchmark --- .../parachain-system/src/benchmarking.rs | 78 ++++++++++++++++++- .../parachain-system/src/block_weight/mock.rs | 3 + .../parachain-system/src/block_weight/mod.rs | 4 +- .../src/block_weight/transaction_extension.rs | 14 ++-- cumulus/primitives/core/src/lib.rs | 16 ++++ polkadot/primitives/src/v9/mod.rs | 12 +++ 6 files changed, 115 insertions(+), 12 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/benchmarking.rs b/cumulus/pallets/parachain-system/src/benchmarking.rs index c3d59e82255a3..6b174aed99925 100644 --- a/cumulus/pallets/parachain-system/src/benchmarking.rs +++ b/cumulus/pallets/parachain-system/src/benchmarking.rs @@ -20,12 +20,29 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use crate::parachain_inherent::InboundDownwardMessages; -use cumulus_primitives_core::{relay_chain::Hash as RelayHash, InboundDownwardMessage}; +use crate::{ + block_weight::{ + mock::has_use_full_core_digest, BlockWeightMode, DynamicMaxBlockWeight, + MaxParachainBlockWeight, + }, + parachain_inherent::InboundDownwardMessages, +}; +use cumulus_primitives_core::{ + relay_chain::Hash as RelayHash, BundleInfo, CoreInfo, InboundDownwardMessage, +}; use frame_benchmarking::v2::*; -use sp_runtime::traits::BlakeTwo256; +use frame_support::{ + dispatch::{DispatchInfo, PostDispatchInfo}, + weights::constants::WEIGHT_REF_TIME_PER_SECOND, +}; +use frame_system::RawOrigin; +use sp_core::ConstU32; +use sp_runtime::traits::{BlakeTwo256, DispatchTransaction, Dispatchable}; -#[benchmarks] +#[benchmarks(where + T: Send + Sync, + T::RuntimeCall: Dispatchable, +)] mod benchmarks { use super::*; @@ -64,6 +81,59 @@ mod benchmarks { head } + #[benchmark] + fn block_weight_tx_extension_max_weight() -> Result<(), BenchmarkError> { + let caller = account("caller", 0, 0); + + frame_system::Pallet::::inherents_applied(); + + frame_system::Pallet::::deposit_log( + BundleInfo { index: 0, maybe_last: false }.to_digest_item(), + ); + frame_system::Pallet::::deposit_log( + CoreInfo { + selector: 0.into(), + claim_queue_offset: 0.into(), + number_of_cores: 1.into(), + } + .to_digest_item(), + ); + let target_weight = MaxParachainBlockWeight::>::get(); + + let info = DispatchInfo { + // The weight needs to be more than the target weight. + call_weight: target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 0)), + extension_weight: Weight::zero(), + class: DispatchClass::Normal, + ..Default::default() + }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; + let len = 0_usize; + + crate::BlockWeightMode::::put(BlockWeightMode::FractionOfCore { + first_transaction_index: None, + }); + + let ext = DynamicMaxBlockWeight::>::new(()); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| Ok(post_info)) + .unwrap() + .unwrap(); + } + + assert!(has_use_full_core_digest()); + assert_eq!( + MaxParachainBlockWeight::>::get(), + MaxParachainBlockWeight::>::FULL_CORE_WEIGHT + ); + + Ok(()) + } + impl_benchmark_test_suite! { Pallet, crate::mock::new_test_ext(), diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index ee8ddfb9e49c6..68a96df50973d 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -125,12 +125,14 @@ construct_runtime!( ); /// Builder for test externalities +#[cfg(test)] pub struct TestExtBuilder { num_cores: Option, bundle_index: Option, bundle_maybe_last: bool, } +#[cfg(test)] impl Default for TestExtBuilder { fn default() -> Self { sp_tracing::init_for_tests(); @@ -139,6 +141,7 @@ impl Default for TestExtBuilder { } } +#[cfg(test)] impl TestExtBuilder { /// Create a new builder pub fn new() -> Self { diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index 62e309b118016..58ba8dbd2d74f 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -56,7 +56,7 @@ use scale_info::TypeInfo; use sp_core::Get; use sp_runtime::Digest; -#[cfg(test)] +#[cfg(any(test, feature = "runtime-benchmarks"))] pub(crate) mod mock; pub mod pre_inherents_hook; #[cfg(test)] @@ -106,7 +106,7 @@ impl> { // Maximum ref time per core const MAX_REF_TIME_PER_CORE_NS: u64 = 2 * WEIGHT_REF_TIME_PER_SECOND; - const FULL_CORE_WEIGHT: Weight = + pub(crate) const FULL_CORE_WEIGHT: Weight = Weight::from_parts(Self::MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); /// Returns the target block weight for one block. diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index fd2f60cd097f7..eed3cd99831b5 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -15,7 +15,7 @@ // limitations under the License. use super::{ - block_weight_over_target_block_weight, is_first_block_in_core, BlockWeightMode, + block_weight_over_target_block_weight, is_first_block_in_core_with_digest, BlockWeightMode, MaxParachainBlockWeight, LOG_TARGET, }; use alloc::vec::Vec; @@ -141,12 +141,13 @@ where "`PotentialFullCore` should resolve to `FullCore` or `FractionOfCore` after applying a transaction.", ); + let digest = frame_system::Pallet::::digest(); let block_weight_over_limit = extrinsic_index == 0 && block_weight_over_target_block_weight::(); let block_weights = Config::BlockWeights::get(); let target_weight = block_weights.get(info.class).max_total.unwrap_or_else( - || MaxParachainBlockWeight::::target_block_weight().saturating_sub(block_weights.base_block) + || MaxParachainBlockWeight::::target_block_weight_with_digest(&digest).saturating_sub(block_weights.base_block) ); // Protection against a misconfiguration as this should be detected by the pre-inherent hook. @@ -158,7 +159,7 @@ where CumulusDigestItem::UseFullCore.to_digest_item(), ); - if !is_first_block_in_core::() { + if !is_first_block_in_core_with_digest(&digest) { // We are already above the allowed maximum and do not want to accept any more // extrinsics. frame_system::Pallet::::register_extra_weight_unchecked( @@ -179,7 +180,7 @@ where .any_gt(target_weight) { if transaction_index.unwrap_or_default().saturating_sub(first_transaction_index.unwrap_or_default()) < MAX_TRANSACTION_TO_CONSIDER - && is_first_block_in_core::() { + && is_first_block_in_core_with_digest(&digest) { log::trace!( target: LOG_TARGET, "Enabling `PotentialFullCore` mode for extrinsic", @@ -230,8 +231,9 @@ where // If the previous mode was already `FullCore`, we are fine. BlockWeightMode::FullCore => {}, BlockWeightMode::FractionOfCore { .. } => { + let digest = frame_system::Pallet::::digest(); let target_block_weight = - MaxParachainBlockWeight::::target_block_weight(); + MaxParachainBlockWeight::::target_block_weight_with_digest(&digest); let is_above_limit = frame_system::Pallet::::remaining_block_weight() .consumed() @@ -250,7 +252,7 @@ where // If this isn't the first block in a core, we register the full core weight // to ensure that we don't include any other transactions. Because we don't // know how many weight of the core was already used by the blocks before. - if !is_first_block_in_core::() { + if !is_first_block_in_core_with_digest(&digest) { log::error!( target: LOG_TARGET, "Registering `FULL_CORE_WEIGHT` to ensure no other transaction is included \ diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index bfb165c2568a1..afd4446f44860 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -240,6 +240,14 @@ impl core::hash::Hash for CoreInfo { } } +impl CoreInfo { + /// Puts this into a [`CumulusDigestItem::CoreInfo`] and then encodes it as a Substrate + /// [`DigestItem`]. + pub fn to_digest_item(&self) -> DigestItem { + CumulusDigestItem::CoreInfo(self.clone()).to_digest_item() + } +} + /// Information about a block that is part of a PoV bundle. #[derive(Clone, Debug, Decode, Encode, PartialEq)] pub struct BundleInfo { @@ -253,6 +261,14 @@ pub struct BundleInfo { pub maybe_last: bool, } +impl BundleInfo { + /// Puts this into a [`CumulusDigestItem::BundleInfo`] and then encodes it as a Substrate + /// [`DigestItem`]. + pub fn to_digest_item(&self) -> DigestItem { + CumulusDigestItem::BundleInfo(self.clone()).to_digest_item() + } +} + /// Return value of [`CumulusDigestItem::core_info_exists_at_max_once`] #[derive(Debug, Clone, PartialEq, Eq)] pub enum CoreInfoExistsAtMaxOnce { diff --git a/polkadot/primitives/src/v9/mod.rs b/polkadot/primitives/src/v9/mod.rs index 360da8ff9b956..c55edd47c3c19 100644 --- a/polkadot/primitives/src/v9/mod.rs +++ b/polkadot/primitives/src/v9/mod.rs @@ -2238,10 +2238,22 @@ impl Ord for CommittedCandidateReceiptV2 { #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Debug, Copy)] pub struct CoreSelector(pub u8); +impl From for CoreSelector { + fn from(value: u8) -> Self { + Self(value) + } +} + /// An offset in the relay chain claim queue. #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Debug, Copy)] pub struct ClaimQueueOffset(pub u8); +impl From for ClaimQueueOffset { + fn from(value: u8) -> Self { + Self(value) + } +} + /// Signals that a parachain can send to the relay chain via the UMP queue. #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Debug)] pub enum UMPSignal { From 5bf99e4edc45f782058cb25f115ed2a5c891fe3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 17 Oct 2025 17:06:49 +0200 Subject: [PATCH 148/312] cumulus-bootnodes: Do not require a specific hash On the wire the hash is represented as `Vec` any way. So, there is no need to take this as an extra type. --- Cargo.lock | 2 +- cumulus/client/bootnodes/Cargo.toml | 3 --- cumulus/client/bootnodes/src/advertisement.rs | 7 +++---- cumulus/client/bootnodes/src/discovery.rs | 9 ++++----- cumulus/client/bootnodes/src/task.rs | 9 ++++----- cumulus/polkadot-omni-node/lib/src/common/spec.rs | 3 ++- templates/parachain/node/Cargo.toml | 1 + templates/parachain/node/src/service.rs | 3 ++- 8 files changed, 17 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d570f072d1979..cf8b4da2adeef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4341,7 +4341,6 @@ dependencies = [ "ip_network", "log", "num-traits", - "parachains-common", "parity-scale-codec", "prost 0.12.6", "prost-build", @@ -14308,6 +14307,7 @@ dependencies = [ "jsonrpsee", "log", "parachain-template-runtime", + "parity-scale-codec", "polkadot-sdk", "serde", "substrate-prometheus-endpoint", diff --git a/cumulus/client/bootnodes/Cargo.toml b/cumulus/client/bootnodes/Cargo.toml index 25821ff7b2e6f..300a0075f38a2 100644 --- a/cumulus/client/bootnodes/Cargo.toml +++ b/cumulus/client/bootnodes/Cargo.toml @@ -33,9 +33,6 @@ sc-service = { workspace = true, default-features = true } sp-consensus-babe = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -# Polkadot - # Cumulus cumulus-primitives-core = { workspace = true, default-features = true } cumulus-relay-chain-interface = { workspace = true, default-features = true } -parachains-common = { workspace = true, default-features = true } diff --git a/cumulus/client/bootnodes/src/advertisement.rs b/cumulus/client/bootnodes/src/advertisement.rs index 09c583586378c..790d93a9d4d6a 100644 --- a/cumulus/client/bootnodes/src/advertisement.rs +++ b/cumulus/client/bootnodes/src/advertisement.rs @@ -27,7 +27,6 @@ use cumulus_relay_chain_interface::{RelayChainInterface, RelayChainResult}; use futures::{future::Fuse, pin_mut, FutureExt, StreamExt}; use ip_network::IpNetwork; use log::{debug, error, trace, warn}; -use parachains_common::Hash as ParaHash; use prost::Message; use sc_network::{ config::OutgoingResponse, @@ -63,7 +62,7 @@ pub struct BootnodeAdvertisementParams { /// Whether to advertise non-global IPs. pub advertise_non_global_ips: bool, /// Parachain genesis hash. - pub parachain_genesis_hash: ParaHash, + pub parachain_genesis_hash: Vec, /// Parachain fork ID. pub parachain_fork_id: Option, /// Parachain side public addresses. @@ -83,7 +82,7 @@ pub struct BootnodeAdvertisement { request_receiver: async_channel::Receiver, parachain_network: Arc, advertise_non_global_ips: bool, - parachain_genesis_hash: ParaHash, + parachain_genesis_hash: Vec, parachain_fork_id: Option, public_addresses: Vec, } @@ -408,7 +407,7 @@ impl BootnodeAdvertisement { let response = crate::schema::Response { peer_id: self.parachain_network.local_peer_id().to_bytes(), addrs: self.paranode_addresses().iter().map(|a| a.to_vec()).collect(), - genesis_hash: self.parachain_genesis_hash.clone().as_bytes().to_vec(), + genesis_hash: self.parachain_genesis_hash.clone(), fork_id: self.parachain_fork_id.clone(), }; diff --git a/cumulus/client/bootnodes/src/discovery.rs b/cumulus/client/bootnodes/src/discovery.rs index 73b6890ff9bc8..6a563c408fb0a 100644 --- a/cumulus/client/bootnodes/src/discovery.rs +++ b/cumulus/client/bootnodes/src/discovery.rs @@ -44,7 +44,6 @@ use futures::{ FutureExt, StreamExt, }; use log::{debug, error, info, trace, warn}; -use parachains_common::Hash as ParaHash; use prost::Message; use sc_network::{ event::{DhtEvent, Event}, @@ -70,7 +69,7 @@ pub struct BootnodeDiscoveryParams { /// Parachain node network service. pub parachain_network: Arc, /// Parachain genesis hash. - pub parachain_genesis_hash: ParaHash, + pub parachain_genesis_hash: Vec, /// Parachain fork ID. pub parachain_fork_id: Option, /// Relay chain interface. @@ -85,7 +84,7 @@ pub struct BootnodeDiscoveryParams { pub struct BootnodeDiscovery { para_id_scale_compact: Vec, parachain_network: Arc, - parachain_genesis_hash: ParaHash, + parachain_genesis_hash: Vec, parachain_fork_id: Option, relay_chain_interface: Arc, relay_chain_network: Arc, @@ -311,7 +310,7 @@ impl BootnodeDiscovery { match (response.genesis_hash, response.fork_id) { (genesis_hash, fork_id) - if genesis_hash == self.parachain_genesis_hash.as_ref() && + if genesis_hash == self.parachain_genesis_hash && fork_id == self.parachain_fork_id => {}, (genesis_hash, fork_id) => { warn!( @@ -320,7 +319,7 @@ impl BootnodeDiscovery { genesis hash {}, fork ID {:?} don't match expected genesis hash {}, fork ID {:?}", hex::encode(genesis_hash), fork_id, - hex::encode(self.parachain_genesis_hash), + hex::encode(&self.parachain_genesis_hash), self.parachain_fork_id, ); return; diff --git a/cumulus/client/bootnodes/src/task.rs b/cumulus/client/bootnodes/src/task.rs index c77cfa81faa76..135c4415ba01d 100644 --- a/cumulus/client/bootnodes/src/task.rs +++ b/cumulus/client/bootnodes/src/task.rs @@ -26,7 +26,6 @@ use cumulus_primitives_core::{relay_chain::BlockId, ParaId}; use cumulus_relay_chain_interface::RelayChainInterface; use log::{debug, error}; use num_traits::Zero; -use parachains_common::Hash as ParaHash; use sc_network::{request_responses::IncomingRequest, service::traits::NetworkService, Multiaddr}; use sc_service::TaskManager; use std::sync::Arc; @@ -57,7 +56,7 @@ pub struct StartBootnodeTasksParams<'a> { /// Whether to advertise non-global IP addresses. pub advertise_non_global_ips: bool, /// Parachain genesis hash. - pub parachain_genesis_hash: ParaHash, + pub parachain_genesis_hash: Vec, /// Parachain fork ID. pub parachain_fork_id: Option, /// Parachain public addresses provided by the operator. @@ -71,7 +70,7 @@ async fn bootnode_advertisement( request_receiver: async_channel::Receiver, parachain_network: Arc, advertise_non_global_ips: bool, - parachain_genesis_hash: ParaHash, + parachain_genesis_hash: Vec, parachain_fork_id: Option, public_addresses: Vec, ) { @@ -95,7 +94,7 @@ async fn bootnode_advertisement( async fn bootnode_discovery( para_id: ParaId, parachain_network: Arc, - parachain_genesis_hash: ParaHash, + parachain_genesis_hash: Vec, parachain_fork_id: Option, relay_chain_interface: Arc, relay_chain_fork_id: Option, @@ -177,7 +176,7 @@ pub fn start_bootnode_tasks( request_receiver, parachain_network.clone(), advertise_non_global_ips, - parachain_genesis_hash, + parachain_genesis_hash.clone(), parachain_fork_id.clone(), parachain_public_addresses, ), diff --git a/cumulus/polkadot-omni-node/lib/src/common/spec.rs b/cumulus/polkadot-omni-node/lib/src/common/spec.rs index 686e2cc4e1669..3170bb09f0b36 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/spec.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/spec.rs @@ -27,6 +27,7 @@ use crate::{ ConstructNodeRuntimeApi, NodeBlock, NodeExtraArgs, }, }; +use codec::Encode; use cumulus_client_bootnodes::{start_bootnode_tasks, StartBootnodeTasksParams}; use cumulus_client_cli::CollatorOptions; use cumulus_client_service::{ @@ -517,7 +518,7 @@ pub(crate) trait NodeSpec: BaseNodeSpec { request_receiver: paranode_rx, parachain_network: network, advertise_non_global_ips, - parachain_genesis_hash: client.chain_info().genesis_hash, + parachain_genesis_hash: client.chain_info().genesis_hash.encode(), parachain_fork_id, parachain_public_addresses, }); diff --git a/templates/parachain/node/Cargo.toml b/templates/parachain/node/Cargo.toml index b622f2dc14cb0..2b97708d3d22a 100644 --- a/templates/parachain/node/Cargo.toml +++ b/templates/parachain/node/Cargo.toml @@ -14,6 +14,7 @@ build = "build.rs" workspace = true [dependencies] +codec = { workspace = true, default-features = true } clap = { features = ["derive"], workspace = true } color-print = { workspace = true } docify = { workspace = true } diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index 7aca62896a220..e766c2207546a 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -9,6 +9,7 @@ use parachain_template_runtime::{ opaque::{Block, Hash}, }; +use codec::Encode; use polkadot_sdk::{cumulus_client_service::ParachainTracingExecuteBlock, *}; // Cumulus Imports @@ -415,7 +416,7 @@ pub async fn start_parachain_node( request_receiver: paranode_rx, parachain_network: network, advertise_non_global_ips, - parachain_genesis_hash: client.chain_info().genesis_hash, + parachain_genesis_hash: client.chain_info().genesis_hash.encode(), parachain_fork_id, parachain_public_addresses, }); From efb7b4a17ac445c62d86db577f1423df9ad43b0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 17 Oct 2025 18:16:00 +0200 Subject: [PATCH 149/312] Finish benchmarks --- .../parachain-system/src/benchmarking.rs | 129 +++++++++++++++++- .../parachain-system/src/block_weight/mod.rs | 2 +- .../src/block_weight/transaction_extension.rs | 6 + 3 files changed, 129 insertions(+), 8 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/benchmarking.rs b/cumulus/pallets/parachain-system/src/benchmarking.rs index 6b174aed99925..e992e25443c2a 100644 --- a/cumulus/pallets/parachain-system/src/benchmarking.rs +++ b/cumulus/pallets/parachain-system/src/benchmarking.rs @@ -22,8 +22,8 @@ use super::*; use crate::{ block_weight::{ - mock::has_use_full_core_digest, BlockWeightMode, DynamicMaxBlockWeight, - MaxParachainBlockWeight, + mock::{has_use_full_core_digest, register_weight}, + BlockWeightMode, DynamicMaxBlockWeight, MaxParachainBlockWeight, }, parachain_inherent::InboundDownwardMessages, }; @@ -85,7 +85,9 @@ mod benchmarks { fn block_weight_tx_extension_max_weight() -> Result<(), BenchmarkError> { let caller = account("caller", 0, 0); - frame_system::Pallet::::inherents_applied(); + frame_system::Pallet::::note_inherents_applied(); + + frame_system::Pallet::::set_extrinsic_index(1); frame_system::Pallet::::deposit_log( BundleInfo { index: 0, maybe_last: false }.to_digest_item(), @@ -98,7 +100,7 @@ mod benchmarks { } .to_digest_item(), ); - let target_weight = MaxParachainBlockWeight::>::get(); + let target_weight = MaxParachainBlockWeight::>::target_block_weight(); let info = DispatchInfo { // The weight needs to be more than the target weight. @@ -120,11 +122,16 @@ mod benchmarks { #[block] { - ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| Ok(post_info)) - .unwrap() - .unwrap(); + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| { + // Normally this is done by `CheckWeight` + register_weight(info.call_weight, DispatchClass::Normal); + Ok(post_info) + }) + .unwrap() + .unwrap(); } + assert_eq!(crate::BlockWeightMode::::get().unwrap(), BlockWeightMode::FullCore); assert!(has_use_full_core_digest()); assert_eq!( MaxParachainBlockWeight::>::get(), @@ -134,6 +141,114 @@ mod benchmarks { Ok(()) } + #[benchmark] + fn block_weight_tx_extension_stays_fraction_of_core() -> Result<(), BenchmarkError> { + let caller = account("caller", 0, 0); + + frame_system::Pallet::::note_inherents_applied(); + + frame_system::Pallet::::set_extrinsic_index(1); + + frame_system::Pallet::::deposit_log( + BundleInfo { index: 0, maybe_last: false }.to_digest_item(), + ); + frame_system::Pallet::::deposit_log( + CoreInfo { + selector: 0.into(), + claim_queue_offset: 0.into(), + number_of_cores: 1.into(), + } + .to_digest_item(), + ); + let target_weight = MaxParachainBlockWeight::>::target_block_weight(); + + let info = DispatchInfo { + call_weight: Weight::from_parts(1024, 1024), + extension_weight: Weight::zero(), + class: DispatchClass::Normal, + ..Default::default() + }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; + let len = 0_usize; + + crate::BlockWeightMode::::put(BlockWeightMode::FractionOfCore { + first_transaction_index: None, + }); + + let ext = DynamicMaxBlockWeight::>::new(()); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| { + // Normally this is done by `CheckWeight` + register_weight(info.call_weight, DispatchClass::Normal); + Ok(post_info) + }) + .unwrap() + .unwrap(); + } + + assert_eq!( + crate::BlockWeightMode::::get().unwrap(), + BlockWeightMode::FractionOfCore { first_transaction_index: Some(1) } + ); + assert!(!has_use_full_core_digest()); + assert_eq!(MaxParachainBlockWeight::>::get(), target_weight); + + Ok(()) + } + + #[benchmark] + fn block_weight_tx_extension_full_core() -> Result<(), BenchmarkError> { + let caller = account("caller", 0, 0); + + frame_system::Pallet::::note_inherents_applied(); + + frame_system::Pallet::::set_extrinsic_index(1); + + frame_system::Pallet::::deposit_log( + BundleInfo { index: 0, maybe_last: false }.to_digest_item(), + ); + frame_system::Pallet::::deposit_log( + CoreInfo { + selector: 0.into(), + claim_queue_offset: 0.into(), + number_of_cores: 1.into(), + } + .to_digest_item(), + ); + + let info = DispatchInfo { + call_weight: Weight::from_parts(1024, 1024), + extension_weight: Weight::zero(), + class: DispatchClass::Normal, + ..Default::default() + }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; + let len = 0_usize; + + crate::BlockWeightMode::::put(BlockWeightMode::FullCore); + + let ext = DynamicMaxBlockWeight::>::new(()); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| { + // Normally this is done by `CheckWeight` + register_weight(info.call_weight, DispatchClass::Normal); + Ok(post_info) + }) + .unwrap() + .unwrap(); + } + + assert_eq!(crate::BlockWeightMode::::get().unwrap(), BlockWeightMode::FullCore); + + Ok(()) + } + impl_benchmark_test_suite! { Pallet, crate::mock::new_test_ext(), diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index 58ba8dbd2d74f..e2887d1ba1859 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -110,7 +110,7 @@ impl> Weight::from_parts(Self::MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); /// Returns the target block weight for one block. - fn target_block_weight() -> Weight { + pub(crate) fn target_block_weight() -> Weight { let digest = frame_system::Pallet::::digest(); Self::target_block_weight_with_digest(&digest) } diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index eed3cd99831b5..9b3b7899e3d5b 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -60,6 +60,12 @@ use sp_runtime::{ /// dispatching the extrinsic are repeated with the post dispatch weights. The [`BlockWeightMode`] /// may is changed properly. /// +/// # Note +/// +/// The extension requires that any of the inner extensions sets the +/// [`BlockWeight`](frame_system::BlockWeight). Otherwise the weight tracking is not working +/// properly. Normally this is done by [`CheckWeight`](frame_system::CheckWeight). +/// /// # Generic parameters /// /// - `Config`: The [`Config`](crate::Config) trait of this pallet. From d46dc8ea69f1ade95de11026cfba5d14e8ae92b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 18 Oct 2025 20:23:07 +0200 Subject: [PATCH 150/312] Use the benchmarked weights in the extension --- .../parachain-system/src/benchmarking.rs | 6 +++ .../src/block_weight/transaction_extension.rs | 37 +++++++++++++------ .../pallets/parachain-system/src/weights.rs | 28 ++++++++++++++ 3 files changed, 60 insertions(+), 11 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/benchmarking.rs b/cumulus/pallets/parachain-system/src/benchmarking.rs index e992e25443c2a..ed95b0feb874a 100644 --- a/cumulus/pallets/parachain-system/src/benchmarking.rs +++ b/cumulus/pallets/parachain-system/src/benchmarking.rs @@ -81,6 +81,10 @@ mod benchmarks { head } + /// The worst-case scenario for the block weight transaction extension. + /// + /// Before executing an extrinsic `FractionOfCore` is set, changed to `PotentialFullCore` and + /// post dispatch switches to `FullCore`. #[benchmark] fn block_weight_tx_extension_max_weight() -> Result<(), BenchmarkError> { let caller = account("caller", 0, 0); @@ -141,6 +145,7 @@ mod benchmarks { Ok(()) } + /// A benchmark that assumes that an extrinsic was executed with `FractionOfCore` set. #[benchmark] fn block_weight_tx_extension_stays_fraction_of_core() -> Result<(), BenchmarkError> { let caller = account("caller", 0, 0); @@ -199,6 +204,7 @@ mod benchmarks { Ok(()) } + /// A benchmark that assumes that `FullCore` was set already before executing an extrinsic. #[benchmark] fn block_weight_tx_extension_full_core() -> Result<(), BenchmarkError> { let caller = account("caller", 0, 0); diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index 9b3b7899e3d5b..603e76fb8864b 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -18,6 +18,7 @@ use super::{ block_weight_over_target_block_weight, is_first_block_in_core_with_digest, BlockWeightMode, MaxParachainBlockWeight, LOG_TARGET, }; +use crate::WeightInfo; use alloc::vec::Vec; use codec::{Decode, DecodeWithMemTracking, Encode}; use cumulus_primitives_core::CumulusDigestItem; @@ -117,6 +118,7 @@ where Config: crate::Config, TargetBlockRate: Get, { + /// Should be executed before `validate` is called for any inner extension. fn pre_validate_extrinsic( info: &DispatchInfo, len: usize, @@ -229,13 +231,19 @@ where }).map_err(Into::into) } - fn post_dispatch_extrinsic(info: &DispatchInfo) { + /// Should be called after all inner extensions have finished executing their post dispatch + /// handling. + /// + /// Returns the weight to refund. Aka the weight that wasn't used by this extension. + fn post_dispatch_extrinsic(info: &DispatchInfo) -> Weight { crate::BlockWeightMode::::mutate(|weight_mode| { - let Some(mode) = *weight_mode else { return }; + let Some(mode) = *weight_mode else { return Weight::zero() }; match mode { // If the previous mode was already `FullCore`, we are fine. - BlockWeightMode::FullCore => {}, + BlockWeightMode::FullCore => + Config::WeightInfo::block_weight_tx_extension_max_weight() + .saturating_sub(Config::WeightInfo::block_weight_tx_extension_full_core()), BlockWeightMode::FractionOfCore { .. } => { let digest = frame_system::Pallet::::digest(); let target_block_weight = @@ -278,6 +286,10 @@ where CumulusDigestItem::UseFullCore.to_digest_item(), ); } + + Config::WeightInfo::block_weight_tx_extension_max_weight().saturating_sub( + Config::WeightInfo::block_weight_tx_extension_stays_fraction_of_core(), + ) }, // Now we need to check if the transaction required more weight than a fraction of a // core block. @@ -307,9 +319,12 @@ where *weight_mode = Some(BlockWeightMode::FractionOfCore { first_transaction_index }); } + + // We run into the worst case, so no refund :) + Weight::zero() }, } - }); + }) } } @@ -361,7 +376,7 @@ where } fn weight(&self, _: &Config::RuntimeCall) -> Weight { - Weight::zero() + Config::WeightInfo::block_weight_tx_extension_max_weight() } fn validate( @@ -391,18 +406,18 @@ where self.0.prepare(val, origin, call, info, len) } - fn post_dispatch( + fn post_dispatch_details( pre: Self::Pre, info: &DispatchInfoOf, - post_info: &mut PostDispatchInfo, + post_info: &PostDispatchInfo, len: usize, result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - Inner::post_dispatch(pre, info, post_info, len, result)?; + ) -> Result { + let weight_refund = Inner::post_dispatch_details(pre, info, post_info, len, result)?; - Self::post_dispatch_extrinsic(info); + let extra_refund = Self::post_dispatch_extrinsic(info); - Ok(()) + Ok(weight_refund.saturating_add(extra_refund)) } fn bare_validate( diff --git a/cumulus/pallets/parachain-system/src/weights.rs b/cumulus/pallets/parachain-system/src/weights.rs index ba7d8b1e87f6b..086a6b993b695 100644 --- a/cumulus/pallets/parachain-system/src/weights.rs +++ b/cumulus/pallets/parachain-system/src/weights.rs @@ -55,6 +55,9 @@ use core::marker::PhantomData; /// Weight functions needed for cumulus_pallet_parachain_system. pub trait WeightInfo { fn enqueue_inbound_downward_messages(n: u32, ) -> Weight; + fn block_weight_tx_extension_max_weight() -> Weight; + fn block_weight_tx_extension_stays_fraction_of_core() -> Weight; + fn block_weight_tx_extension_full_core() -> Weight; } /// Weights for cumulus_pallet_parachain_system using the Substrate node and recommended hardware. @@ -84,6 +87,18 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } + + fn block_weight_tx_extension_max_weight() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_stays_fraction_of_core() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_full_core() -> Weight { + Weight::zero() + } } // For backwards compatibility and tests @@ -112,4 +127,17 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } + + fn block_weight_tx_extension_max_weight() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_stays_fraction_of_core() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_full_core() -> Weight { + Weight::zero() + } + } From 8412f825d1b852d555e1519fc7db47e7cc1eaeb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 18 Oct 2025 20:59:03 +0200 Subject: [PATCH 151/312] Make `ONLY_OPERATIONAL` work --- .../src/block_weight/tests.rs | 69 +++++++++++++++++++ .../src/block_weight/transaction_extension.rs | 58 +++++++++++++--- 2 files changed, 119 insertions(+), 8 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index a46239b01ac7f..9a06d3214d06d 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -36,6 +36,8 @@ use sp_runtime::{ }; type TxExtension = DynamicMaxBlockWeight, ConstU32<4>>; +type TxExtensionOnlyOperational = + DynamicMaxBlockWeight, ConstU32<4>, 10, true>; type MaximumBlockWeight = MaxParachainBlockWeight>; #[test] @@ -271,6 +273,73 @@ fn tx_extension_large_tx_enables_full_core_usage() { }); } +#[test] +fn tx_extension_only_allows_large_operational_tx_to_enable_full_core_usage() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + initialize_block_finished(); + + // Create a transaction larger than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + let mut info = DispatchInfo { + call_weight: large_weight, + class: DispatchClass::Normal, + ..Default::default() + }; + + // As `Normal` transaction this should be rejected. + assert_eq!( + TxExtensionOnlyOperational::validate_and_prepare( + TxExtensionOnlyOperational::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + ) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() + ); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { first_transaction_index: None }) + ); + + info.class = DispatchClass::Operational; + + // As `Operational` transaction this is accepted. + assert_ok!(TxExtensionOnlyOperational::validate_and_prepare( + TxExtensionOnlyOperational::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + )); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::PotentialFullCore { first_transaction_index: Some(0), .. }) + ); + + let mut post_info = + PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; + + assert_ok!(TxExtension::post_dispatch((), &info, &mut post_info, 0, &Ok(()))); + + assert_eq!(crate::BlockWeightMode::::get(), Some(BlockWeightMode::FullCore)); + assert!(has_use_full_core_digest()); + assert_eq!(MaximumBlockWeight::get().ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + }); +} + #[test] fn tx_extension_large_tx_with_refund_goes_back_to_fractional() { TestExtBuilder::new() diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index 603e76fb8864b..a4eabe1e1ab50 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -93,7 +93,14 @@ pub struct DynamicMaxBlockWeight< const ONLY_OPERATIONAL: bool = false, >(pub Inner, core::marker::PhantomData<(Config, TargetBlockRate)>); -impl DynamicMaxBlockWeight { +impl< + T, + S, + TargetBlockRate, + const MAX_TRANSACTION_TO_CONSIDER: u32, + const ONLY_OPERATIONAL: bool, + > DynamicMaxBlockWeight +{ /// Create a new [`DynamicMaxBlockWeight`] instance. pub fn new(s: S) -> Self { Self(s, Default::default()) @@ -187,8 +194,10 @@ where .saturating_add(Weight::from_parts(0, len as u64)) .any_gt(target_weight) { + let class_allowed = if ONLY_OPERATIONAL { info.class == DispatchClass::Operational } else { true }; + if transaction_index.unwrap_or_default().saturating_sub(first_transaction_index.unwrap_or_default()) < MAX_TRANSACTION_TO_CONSIDER - && is_first_block_in_core_with_digest(&digest) { + && is_first_block_in_core_with_digest(&digest) && class_allowed { log::trace!( target: LOG_TARGET, "Enabling `PotentialFullCore` mode for extrinsic", @@ -203,7 +212,7 @@ where } else { log::trace!( target: LOG_TARGET, - "Transaction is over the block limit, but outside of the window of transactions to consider.", + "Transaction is over the block limit, but is either outside of the allowed window or the dispatch class is not allowed.", ); return Err(InvalidTransaction::ExhaustsResources) @@ -328,16 +337,40 @@ where } } -impl From - for DynamicMaxBlockWeight +impl< + Config, + Inner, + TargetBlockRate, + const MAX_TRANSACTION_TO_CONSIDER: u32, + const ONLY_OPERATIONAL: bool, + > From + for DynamicMaxBlockWeight< + Config, + Inner, + TargetBlockRate, + MAX_TRANSACTION_TO_CONSIDER, + ONLY_OPERATIONAL, + > { fn from(s: Inner) -> Self { Self::new(s) } } -impl core::fmt::Debug - for DynamicMaxBlockWeight +impl< + Config, + Inner: core::fmt::Debug, + TargetBlockRate, + const MAX_TRANSACTION_TO_CONSIDER: u32, + const ONLY_OPERATIONAL: bool, + > core::fmt::Debug + for DynamicMaxBlockWeight< + Config, + Inner, + TargetBlockRate, + MAX_TRANSACTION_TO_CONSIDER, + ONLY_OPERATIONAL, + > { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { write!(f, "DynamicMaxBlockWeight<{:?}>", self.0) @@ -348,8 +381,16 @@ impl< Config: crate::Config + Send + Sync, Inner: TransactionExtension, TargetBlockRate: Get + Send + Sync + 'static, + const MAX_TRANSACTION_TO_CONSIDER: u32, + const ONLY_OPERATIONAL: bool, > TransactionExtension - for DynamicMaxBlockWeight + for DynamicMaxBlockWeight< + Config, + Inner, + TargetBlockRate, + MAX_TRANSACTION_TO_CONSIDER, + ONLY_OPERATIONAL, + > where Config::RuntimeCall: Dispatchable, { @@ -421,6 +462,7 @@ where } fn bare_validate( + call: &Config::RuntimeCall, info: &DispatchInfoOf, len: usize, From 634614c785e0c5b36a6ee85ffa4229235632ee1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 22 Oct 2025 22:20:55 +0200 Subject: [PATCH 152/312] Fixes --- .../parachain-system/src/benchmarking.rs | 29 +++++++++---- .../parachain-system/src/block_weight/mock.rs | 15 +------ .../parachain-system/src/block_weight/mod.rs | 4 +- .../src/block_weight/transaction_extension.rs | 42 +++++++------------ substrate/frame/system/src/lib.rs | 2 +- 5 files changed, 39 insertions(+), 53 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/benchmarking.rs b/cumulus/pallets/parachain-system/src/benchmarking.rs index ed95b0feb874a..fd05cac40d256 100644 --- a/cumulus/pallets/parachain-system/src/benchmarking.rs +++ b/cumulus/pallets/parachain-system/src/benchmarking.rs @@ -21,10 +21,7 @@ use super::*; use crate::{ - block_weight::{ - mock::{has_use_full_core_digest, register_weight}, - BlockWeightMode, DynamicMaxBlockWeight, MaxParachainBlockWeight, - }, + block_weight::{BlockWeightMode, DynamicMaxBlockWeight, MaxParachainBlockWeight}, parachain_inherent::InboundDownwardMessages, }; use cumulus_primitives_core::{ @@ -39,6 +36,11 @@ use frame_system::RawOrigin; use sp_core::ConstU32; use sp_runtime::traits::{BlakeTwo256, DispatchTransaction, Dispatchable}; +fn has_use_full_core_digest() -> bool { + let digest = frame_system::Pallet::::digest(); + CumulusDigestItem::contains_use_full_core(&digest) +} + #[benchmarks(where T: Send + Sync, T::RuntimeCall: Dispatchable, @@ -128,7 +130,10 @@ mod benchmarks { { ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| { // Normally this is done by `CheckWeight` - register_weight(info.call_weight, DispatchClass::Normal); + frame_system::Pallet::::register_extra_weight_unchecked( + info.call_weight, + DispatchClass::Normal, + ); Ok(post_info) }) .unwrap() @@ -136,7 +141,7 @@ mod benchmarks { } assert_eq!(crate::BlockWeightMode::::get().unwrap(), BlockWeightMode::FullCore); - assert!(has_use_full_core_digest()); + assert!(has_use_full_core_digest::()); assert_eq!( MaxParachainBlockWeight::>::get(), MaxParachainBlockWeight::>::FULL_CORE_WEIGHT @@ -187,7 +192,10 @@ mod benchmarks { { ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| { // Normally this is done by `CheckWeight` - register_weight(info.call_weight, DispatchClass::Normal); + frame_system::Pallet::::register_extra_weight_unchecked( + info.call_weight, + DispatchClass::Normal, + ); Ok(post_info) }) .unwrap() @@ -198,7 +206,7 @@ mod benchmarks { crate::BlockWeightMode::::get().unwrap(), BlockWeightMode::FractionOfCore { first_transaction_index: Some(1) } ); - assert!(!has_use_full_core_digest()); + assert!(!has_use_full_core_digest::()); assert_eq!(MaxParachainBlockWeight::>::get(), target_weight); Ok(()) @@ -243,7 +251,10 @@ mod benchmarks { { ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| { // Normally this is done by `CheckWeight` - register_weight(info.call_weight, DispatchClass::Normal); + frame_system::Pallet::::register_extra_weight_unchecked( + info.call_weight, + DispatchClass::Normal, + ); Ok(post_info) }) .unwrap() diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index 68a96df50973d..e4e144d3552ff 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -197,21 +197,8 @@ impl TestExtBuilder { /// Helper to check if UseFullCore digest was deposited pub fn has_use_full_core_digest() -> bool { - use codec::Decode; - use cumulus_primitives_core::CUMULUS_CONSENSUS_ID; - use sp_runtime::DigestItem; - let digest = frame_system::Pallet::::digest(); - digest.logs.iter().any(|log| match log { - DigestItem::Consensus(id, val) if id == &CUMULUS_CONSENSUS_ID => { - if let Ok(CumulusDigestItem::UseFullCore) = CumulusDigestItem::decode(&mut &val[..]) { - true - } else { - false - } - }, - _ => false, - }) + CumulusDigestItem::contains_use_full_core(&digest) } /// Helper to register weight as consumed (simulating on_initialize) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index e2887d1ba1859..686561d23deaa 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -56,8 +56,8 @@ use scale_info::TypeInfo; use sp_core::Get; use sp_runtime::Digest; -#[cfg(any(test, feature = "runtime-benchmarks"))] -pub(crate) mod mock; +#[cfg(test)] +mod mock; pub mod pre_inherents_hook; #[cfg(test)] mod tests; diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index a4eabe1e1ab50..0f5f7b997f984 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -80,8 +80,8 @@ use sp_runtime::{ /// - `MAX_TRANSACTION`: The maximum number of transactions to consider before giving up to change /// the max block weight. /// -/// - `ONLY_OPERATIONAL`: Should only operational transactions be allowed to change the max block -/// weight? +/// - `ALLOW_NORMAL`: Should transactions with a dispatch class `Normal` be allowed to change the +/// max block weight? #[derive(Encode, Decode, DecodeWithMemTracking, TypeInfo)] #[derive_where::derive_where(Clone, Eq, PartialEq, Default; Inner)] #[scale_info(skip_type_params(Config, TargetBlockRate))] @@ -90,16 +90,11 @@ pub struct DynamicMaxBlockWeight< Inner, TargetBlockRate, const MAX_TRANSACTION_TO_CONSIDER: u32 = 10, - const ONLY_OPERATIONAL: bool = false, + const ALLOW_NORMAL: bool = true, >(pub Inner, core::marker::PhantomData<(Config, TargetBlockRate)>); -impl< - T, - S, - TargetBlockRate, - const MAX_TRANSACTION_TO_CONSIDER: u32, - const ONLY_OPERATIONAL: bool, - > DynamicMaxBlockWeight +impl + DynamicMaxBlockWeight { /// Create a new [`DynamicMaxBlockWeight`] instance. pub fn new(s: S) -> Self { @@ -112,15 +107,8 @@ impl< Inner, TargetBlockRate, const MAX_TRANSACTION_TO_CONSIDER: u32, - const ONLY_OPERATIONAL: bool, - > - DynamicMaxBlockWeight< - Config, - Inner, - TargetBlockRate, - MAX_TRANSACTION_TO_CONSIDER, - ONLY_OPERATIONAL, - > + const ALLOW_NORMAL: bool, + > DynamicMaxBlockWeight where Config: crate::Config, TargetBlockRate: Get, @@ -194,7 +182,8 @@ where .saturating_add(Weight::from_parts(0, len as u64)) .any_gt(target_weight) { - let class_allowed = if ONLY_OPERATIONAL { info.class == DispatchClass::Operational } else { true }; + // When `ALLOW_NORMAL` is `true`, we want to allow all classes of transactions. + let class_allowed = if ALLOW_NORMAL { true } else { info.class == DispatchClass::Operational }; if transaction_index.unwrap_or_default().saturating_sub(first_transaction_index.unwrap_or_default()) < MAX_TRANSACTION_TO_CONSIDER && is_first_block_in_core_with_digest(&digest) && class_allowed { @@ -342,14 +331,14 @@ impl< Inner, TargetBlockRate, const MAX_TRANSACTION_TO_CONSIDER: u32, - const ONLY_OPERATIONAL: bool, + const ALLOW_NORMAL: bool, > From for DynamicMaxBlockWeight< Config, Inner, TargetBlockRate, MAX_TRANSACTION_TO_CONSIDER, - ONLY_OPERATIONAL, + ALLOW_NORMAL, > { fn from(s: Inner) -> Self { @@ -362,14 +351,14 @@ impl< Inner: core::fmt::Debug, TargetBlockRate, const MAX_TRANSACTION_TO_CONSIDER: u32, - const ONLY_OPERATIONAL: bool, + const ALLOW_NORMAL: bool, > core::fmt::Debug for DynamicMaxBlockWeight< Config, Inner, TargetBlockRate, MAX_TRANSACTION_TO_CONSIDER, - ONLY_OPERATIONAL, + ALLOW_NORMAL, > { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { @@ -382,14 +371,14 @@ impl< Inner: TransactionExtension, TargetBlockRate: Get + Send + Sync + 'static, const MAX_TRANSACTION_TO_CONSIDER: u32, - const ONLY_OPERATIONAL: bool, + const ALLOW_NORMAL: bool, > TransactionExtension for DynamicMaxBlockWeight< Config, Inner, TargetBlockRate, MAX_TRANSACTION_TO_CONSIDER, - ONLY_OPERATIONAL, + ALLOW_NORMAL, > where Config::RuntimeCall: Dispatchable, @@ -462,7 +451,6 @@ where } fn bare_validate( - call: &Config::RuntimeCall, info: &DispatchInfoOf, len: usize, diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 483ef74ce3816..8f7669d15b51c 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -2139,7 +2139,7 @@ impl Pallet { } /// Sets the index of extrinsic that is currently executing. - #[cfg(any(feature = "std", test))] + #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] pub fn set_extrinsic_index(extrinsic_index: u32) { storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &extrinsic_index) } From ffbbf98a8a41de525acbf8cf88a634debaf02f01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 22 Oct 2025 22:56:57 +0200 Subject: [PATCH 153/312] Fix tests --- .../parachain-system/src/block_weight/tests.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 9a06d3214d06d..76590cb6dcd78 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -37,7 +37,7 @@ use sp_runtime::{ type TxExtension = DynamicMaxBlockWeight, ConstU32<4>>; type TxExtensionOnlyOperational = - DynamicMaxBlockWeight, ConstU32<4>, 10, true>; + DynamicMaxBlockWeight, ConstU32<4>, 10, false>; type MaximumBlockWeight = MaxParachainBlockWeight>; #[test] @@ -200,9 +200,6 @@ fn tx_extension_sets_fraction_of_core_mode() { .execute_with(|| { initialize_block_finished(); - // BlockWeightMode should not be set yet - assert!(crate::BlockWeightMode::::get().is_none()); - // Create a small transaction let small_weight = Weight::from_parts(100_000, 1024); let info = DispatchInfo { @@ -670,9 +667,11 @@ fn max_weight_without_bundle_info() { let max_weight = MaximumBlockWeight::get(); - // With 2 cores and 4 target blocks - let expected_weight = - Weight::from_parts(2 * 2 * WEIGHT_REF_TIME_PER_SECOND / 4, 2 * MAX_POV_SIZE as u64 / 4); + // With 2 cores and 12 target blocks + let expected_weight = Weight::from_parts( + 2 * 2 * WEIGHT_REF_TIME_PER_SECOND / TARGET_BLOCK_RATE as u64, + 2 * MAX_POV_SIZE as u64 / TARGET_BLOCK_RATE as u64, + ); assert_eq!(max_weight, expected_weight); }); From b5c7f767c325d34f0ac2962910e4948bdbc568c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 11 Nov 2025 13:26:41 +0100 Subject: [PATCH 154/312] Fixes --- Cargo.lock | 2 ++ cumulus/pallets/parachain-system/src/block_weight/mock.rs | 2 +- cumulus/pallets/parachain-system/src/lib.rs | 1 + cumulus/test/client/Cargo.toml | 2 ++ cumulus/test/client/src/block_builder.rs | 6 ++++++ substrate/client/basic-authorship/src/basic_authorship.rs | 2 -- substrate/primitives/trie/src/recorder.rs | 2 +- 7 files changed, 13 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d4d878281ce57..d475b22b1ecb6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5129,12 +5129,14 @@ dependencies = [ "sp-blockchain", "sp-consensus-aura", "sp-core 28.0.0", + "sp-externalities 0.25.0", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-runtime", "sp-timestamp", + "sp-trie", "substrate-test-client", ] diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index e4e144d3552ff..88afc42891005 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -135,7 +135,7 @@ pub struct TestExtBuilder { #[cfg(test)] impl Default for TestExtBuilder { fn default() -> Self { - sp_tracing::init_for_tests(); + sp_tracing::try_init_simple(); Self { num_cores: None, bundle_index: None, bundle_maybe_last: false } } diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index a6d8ad93c7403..8e9d67778f0b4 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -1544,6 +1544,7 @@ impl Pallet { fn send_ump_signals() { let mut ump_signals = PendingUpwardSignals::::take(); if !ump_signals.is_empty() { + UpwardMessages::::append(UMP_SEPARATOR); UpwardMessages::::mutate(|up| { up.append(&mut ump_signals); }); diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index c94448f600dae..476c7abe46732 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -26,12 +26,14 @@ sp-application-crypto = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus-aura = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-timestamp = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } substrate-test-client = { workspace = true } # Polkadot diff --git a/cumulus/test/client/src/block_builder.rs b/cumulus/test/client/src/block_builder.rs index e8d9908f6add2..c9ae642fb95cb 100644 --- a/cumulus/test/client/src/block_builder.rs +++ b/cumulus/test/client/src/block_builder.rs @@ -24,7 +24,9 @@ use polkadot_primitives::{BlockNumber as PBlockNumber, Hash as PHash}; use sc_block_builder::BlockBuilderBuilder; use sp_api::{ProofRecorder, ProofRecorderIgnoredNodes, ProvideRuntimeApi}; use sp_consensus_aura::{AuraApi, Slot}; +use sp_externalities::Extensions; use sp_runtime::{traits::Header as HeaderT, Digest, DigestItem}; +use sp_trie::proof_size_extension::ProofSizeExt; /// A struct containing a block builder and support data required to build test scenarios. pub struct BlockBuilderAndSupportData<'a> { @@ -148,12 +150,16 @@ fn init_block_builder( let proof_recorder = ProofRecorder::::with_ignored_nodes(ignored_nodes.unwrap_or_default()); + let mut extra_extensions = Extensions::default(); + extra_extensions.register(ProofSizeExt::new(proof_recorder.clone())); + let mut block_builder = BlockBuilderBuilder::new(client) .on_parent_block(at) .fetch_parent_block_number(client) .unwrap() .with_proof_recorder(Some(proof_recorder.clone())) .with_inherent_digests(pre_digests) + .with_extra_extensions(extra_extensions) .build() .expect("Creates new block builder for test runtime"); diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index e6fa247791394..d9c2fafe0e157 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -291,8 +291,6 @@ where // leave some time for evaluation and block finalization (10%) let deadline = (self.now)() + max_duration - max_duration / 10; let block_timer = time::Instant::now(); - // Determine if proof recording was requested - let proof_recording_enabled = storage_proof_recorder.is_some(); let mut block_builder = BlockBuilderBuilder::new(&*self.client) .on_parent_block(self.parent_hash) diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index 29d54bc635ef9..a2349a49716f2 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -42,7 +42,7 @@ const LOG_TARGET: &str = "trie-recorder"; /// A list of ignored nodes for [`Recorder`]. /// /// These nodes when passed to a recorder will be ignored and not recorded by the recorder. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct IgnoredNodes { nodes: HashSet, } From d303dd5f8c5f4272976c7e4bece6c9867f06e92b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 11 Nov 2025 21:25:59 +0100 Subject: [PATCH 155/312] Start more testing --- .../parachain-system/src/block_weight/mock.rs | 55 ++++++++++++++++++- .../src/block_weight/tests.rs | 14 ++++- substrate/primitives/runtime/src/testing.rs | 30 +++++++++- 3 files changed, 95 insertions(+), 4 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index 88afc42891005..be3c6ea7ecd36 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -33,7 +33,11 @@ use frame_support::{ use frame_system::limits::BlockWeights; use sp_core::ConstU32; use sp_io; -use sp_runtime::{BuildStorage, Perbill}; +use sp_runtime::{ + generic::{self, UncheckedExtrinsic}, + testing::UintAuthorityId, + BuildStorage, Perbill, +}; const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(1); @@ -41,7 +45,15 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(1); pub const CALL: &RuntimeCall = &RuntimeCall::System(frame_system::Call::set_heap_pages { pages: 0u64 }); -type Block = frame_system::mocking::MockBlock; +pub type Extrinsic = UncheckedExtrinsic< + UintAuthorityId, + RuntimeCall, + UintAuthorityId, + DynamicMaxBlockWeight>, +>; + +pub type Block = + generic::Block::Hashing>, Extrinsic>; pub const TARGET_BLOCK_RATE: u32 = 12; @@ -99,6 +111,8 @@ impl frame_system::Config for Runtime { // Just required to make it compile, but not that important for this example here. type Block = Block; type OnSetCode = crate::ParachainSetCode; + type AccountId = u64; + type Lookup = UintAuthorityId; // Rest of the types are omitted here. } @@ -117,13 +131,50 @@ impl crate::Config for Runtime { type RelayParentOffset = (); } +// Include test_pallet module inline +#[frame_support::pallet(dev_mode)] +pub mod test_pallet { + use frame_support::{ + dispatch::DispatchClass, pallet_prelude::*, weights::constants::WEIGHT_REF_TIME_PER_SECOND, + }; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + crate::Config {} + + #[pallet::call] + impl Pallet { + /// A heavy call with Normal dispatch class that consumes significant weight. + #[pallet::weight((Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024), DispatchClass::Normal))] + pub fn heavy_call_normal(_: OriginFor) -> DispatchResult { + Ok(()) + } + + /// A heavy call with Operational dispatch class that consumes significant weight. + #[pallet::weight((Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024), DispatchClass::Operational))] + pub fn heavy_call_operational(_: OriginFor) -> DispatchResult { + Ok(()) + } + } +} + +impl test_pallet::Config for Runtime {} + construct_runtime!( pub enum Runtime { System: frame_system, ParachainSystem: parachain_system, + TestPallet: test_pallet, } ); +/// Executive: handles dispatch to the various modules. +pub type Executive = + frame_executive::Executive, Runtime, ()>; + /// Builder for test externalities #[cfg(test)] pub struct TestExtBuilder { diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 76590cb6dcd78..0833d7921fa87 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -23,7 +23,7 @@ use cumulus_primitives_core::{ use frame_support::{ assert_ok, dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}, - pallet_prelude::InvalidTransaction, + pallet_prelude::{InvalidTransaction, TransactionSource}, traits::PreInherents, weights::constants::WEIGHT_REF_TIME_PER_SECOND, }; @@ -696,3 +696,15 @@ fn ref_time_and_pov_size_cap() { assert_eq!(max_weight.proof_size(), MAX_POV_SIZE as u64); }); } + +#[test] +fn executive_validate_block_accepts_normal_above_target() { + TestExtBuilder::new().build().execute_with(|| { + let call = RuntimeCall::TestPallet(test_pallet::Call::heavy_call_normal {}); + + let xt = Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), ().into()); + + let result = + Executive::validate_transaction(TransactionSource::External, xt, Default::default()); + }); +} diff --git a/substrate/primitives/runtime/src/testing.rs b/substrate/primitives/runtime/src/testing.rs index 647f5eb78d5e1..59f71d4ce4a9a 100644 --- a/substrate/primitives/runtime/src/testing.rs +++ b/substrate/primitives/runtime/src/testing.rs @@ -21,7 +21,7 @@ use crate::{ codec::{Codec, Decode, DecodeWithMemTracking, Encode, EncodeLike, MaxEncodedLen}, generic::{self, LazyBlock, UncheckedExtrinsic}, scale_info::TypeInfo, - traits::{self, BlakeTwo256, Dispatchable, LazyExtrinsic, OpaqueKeys}, + traits::{self, BlakeTwo256, Dispatchable, LazyExtrinsic, Lookup, OpaqueKeys, StaticLookup}, DispatchResultWithInfo, KeyTypeId, OpaqueExtrinsic, }; use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize}; @@ -54,6 +54,12 @@ use std::{cell::RefCell, fmt::Debug}; )] pub struct UintAuthorityId(pub u64); +impl core::fmt::Display for UintAuthorityId { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + core::fmt::Display::fmt(&self.0, f) + } +} + impl From for UintAuthorityId { fn from(id: u64) -> Self { UintAuthorityId(id) @@ -162,6 +168,28 @@ impl traits::IdentifyAccount for UintAuthorityId { } } +impl StaticLookup for UintAuthorityId { + type Source = Self; + type Target = u64; + + fn lookup(s: Self::Source) -> Result { + Ok(s.0) + } + + fn unlookup(t: Self::Target) -> Self::Source { + Self(t) + } +} + +impl Lookup for UintAuthorityId { + type Source = Self; + type Target = u64; + + fn lookup(&self, s: Self::Source) -> Result { + Ok(s.0) + } +} + impl traits::Verify for UintAuthorityId { type Signer = Self; From 36f2c35e6a105b5f2587e2aad2dab9bed34bea96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 12 Nov 2025 22:41:46 +0100 Subject: [PATCH 156/312] More tests --- .../parachain-system/src/block_weight/mock.rs | 182 ++++++++++++++---- .../parachain-system/src/block_weight/mod.rs | 24 ++- .../src/block_weight/pre_inherents_hook.rs | 2 +- .../src/block_weight/tests.rs | 144 ++++++++++---- .../src/block_weight/transaction_extension.rs | 16 +- cumulus/pallets/parachain-system/src/lib.rs | 19 +- 6 files changed, 299 insertions(+), 88 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index be3c6ea7ecd36..0eba745c88d9a 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -15,7 +15,7 @@ // limitations under the License. use super::{transaction_extension::DynamicMaxBlockWeight, *}; -use crate as parachain_system; +use crate::{self as parachain_system, PreviousCoreCount}; use codec::Compact; use cumulus_primitives_core::{ BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, @@ -45,6 +45,13 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(1); pub const CALL: &RuntimeCall = &RuntimeCall::System(frame_system::Call::set_heap_pages { pages: 0u64 }); +pub type ExtrinsicOnlyOperational = UncheckedExtrinsic< + UintAuthorityId, + only_operational_runtime::RuntimeCall, + UintAuthorityId, + DynamicMaxBlockWeight, 10, false>, +>; + pub type Extrinsic = UncheckedExtrinsic< UintAuthorityId, RuntimeCall, @@ -55,6 +62,11 @@ pub type Extrinsic = UncheckedExtrinsic< pub type Block = generic::Block::Hashing>, Extrinsic>; +pub type BlockOnlyOperational = generic::Block< + generic::Header::Hashing>, + ExtrinsicOnlyOperational, +>; + pub const TARGET_BLOCK_RATE: u32 = 12; #[docify::export(tx_extension_setup)] @@ -99,39 +111,6 @@ mod max_block_weight_setup { } } -// Configure a mock runtime to test the functionality -#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] -#[docify::export(pre_inherents_setup)] -impl frame_system::Config for Runtime { - // Setup the block weight. - type BlockWeights = max_block_weight_setup::RuntimeBlockWeights; - // Set the `PreInherents` hook. - type PreInherents = DynamicMaxBlockWeightHooks>; - - // Just required to make it compile, but not that important for this example here. - type Block = Block; - type OnSetCode = crate::ParachainSetCode; - type AccountId = u64; - type Lookup = UintAuthorityId; - // Rest of the types are omitted here. -} - -impl crate::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type OnSystemEvent = (); - type SelfParaId = (); - type OutboundXcmpMessageSource = (); - type DmpQueue = (); - type ReservedDmpWeight = (); - type XcmpMessageHandler = (); - type ReservedXcmpWeight = (); - type CheckAssociatedRelayNumber = crate::RelayNumberStrictlyIncreases; - type WeightInfo = (); - type ConsensusHook = crate::ExpectParentIncluded; - type RelayParentOffset = (); -} - -// Include test_pallet module inline #[frame_support::pallet(dev_mode)] pub mod test_pallet { use frame_support::{ @@ -158,7 +137,59 @@ pub mod test_pallet { pub fn heavy_call_operational(_: OriginFor) -> DispatchResult { Ok(()) } + + /// A heavy call with Operational dispatch class that consumes significant weight. + #[pallet::weight((Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024), DispatchClass::Mandatory))] + pub fn heavy_call_mandatory(_: OriginFor) -> DispatchResult { + Ok(()) + } } + + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + type Error = sp_inherents::MakeFatalError<()>; + const INHERENT_IDENTIFIER: InherentIdentifier = *b"testtest"; + + fn create_inherent(_data: &InherentData) -> Option { + None + } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::heavy_call_mandatory {}) + } + } +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +#[docify::export(pre_inherents_setup)] +impl frame_system::Config for Runtime { + // Setup the block weight. + type BlockWeights = max_block_weight_setup::RuntimeBlockWeights; + // Set the `PreInherents` hook. + type PreInherents = DynamicMaxBlockWeightHooks>; + + // Just required to make it compile, but not that important for this example here. + type Block = Block; + type OnSetCode = crate::ParachainSetCode; + type AccountId = u64; + type Lookup = UintAuthorityId; + // Rest of the types are omitted here. +} + +impl crate::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = (); + type OutboundXcmpMessageSource = (); + type DmpQueue = (); + type ReservedDmpWeight = (); + type XcmpMessageHandler = (); + type ReservedXcmpWeight = (); + type CheckAssociatedRelayNumber = crate::RelayNumberStrictlyIncreases; + type WeightInfo = (); + type ConsensusHook = crate::ExpectParentIncluded; + type RelayParentOffset = (); } impl test_pallet::Config for Runtime {} @@ -171,28 +202,93 @@ construct_runtime!( } ); +pub mod only_operational_runtime { + use frame_support::{construct_runtime, derive_impl}; + use sp_core::ConstU32; + use sp_runtime::testing::UintAuthorityId; + + use crate::block_weight::{mock::BlockOnlyOperational, DynamicMaxBlockWeightHooks}; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for RuntimeOnlyOperational { + // Setup the block weight. + type BlockWeights = super::max_block_weight_setup::RuntimeBlockWeights; + // Set the `PreInherents` hook. + type PreInherents = + DynamicMaxBlockWeightHooks>; + + // Just required to make it compile, but not that important for this example here. + type Block = BlockOnlyOperational; + type OnSetCode = crate::ParachainSetCode; + type AccountId = u64; + type Lookup = UintAuthorityId; + // Rest of the types are omitted here. + } + + impl crate::Config for RuntimeOnlyOperational { + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = (); + type OutboundXcmpMessageSource = (); + type DmpQueue = (); + type ReservedDmpWeight = (); + type XcmpMessageHandler = (); + type ReservedXcmpWeight = (); + type CheckAssociatedRelayNumber = crate::RelayNumberStrictlyIncreases; + type WeightInfo = (); + type ConsensusHook = crate::ExpectParentIncluded; + type RelayParentOffset = (); + } + + impl super::test_pallet::Config for RuntimeOnlyOperational {} + + construct_runtime!( + pub enum RuntimeOnlyOperational { + System: frame_system, + ParachainSystem: super::parachain_system, + TestPallet: super::test_pallet, + } + ); +} + +pub use only_operational_runtime::{ + RuntimeCall as RuntimeCallOnlyOperational, RuntimeOnlyOperational, +}; + /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive, Runtime, ()>; +/// Executive configured to only accept operational transaction to go over the limit. +pub type ExecutiveOnlyOperational = frame_executive::Executive< + RuntimeOnlyOperational, + BlockOnlyOperational, + frame_system::ChainContext, + RuntimeOnlyOperational, + (), +>; + /// Builder for test externalities -#[cfg(test)] pub struct TestExtBuilder { num_cores: Option, bundle_index: Option, bundle_maybe_last: bool, + previous_core_count: Option, } -#[cfg(test)] impl Default for TestExtBuilder { fn default() -> Self { sp_tracing::try_init_simple(); - Self { num_cores: None, bundle_index: None, bundle_maybe_last: false } + Self { + num_cores: None, + bundle_index: None, + bundle_maybe_last: false, + previous_core_count: None, + } } } -#[cfg(test)] impl TestExtBuilder { /// Create a new builder pub fn new() -> Self { @@ -205,6 +301,12 @@ impl TestExtBuilder { self } + /// Set the `PreviousCoreCount` storage value. + pub fn previous_core_count(mut self, previous_core_count: u16) -> Self { + self.previous_core_count = Some(previous_core_count); + self + } + /// Set this as the first block in the core (bundle index = 0) pub fn first_block_in_core(mut self, is_first: bool) -> Self { if is_first { @@ -240,6 +342,10 @@ impl TestExtBuilder { let digest = CumulusDigestItem::BundleInfo(bundle_info).to_digest_item(); frame_system::Pallet::::deposit_log(digest); } + + if let Some(previous_core_count) = self.previous_core_count { + PreviousCoreCount::::put(Compact(previous_core_count)); + } }); ext diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index 686561d23deaa..4a703ea21f4d4 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -46,7 +46,7 @@ //! Registering of the `PreInherents` hook: #![doc = docify::embed!("src/block_weight/mock.rs", pre_inherents_setup)] -use crate::Config; +use crate::{Config, PreviousCoreCount}; use codec::{Decode, Encode}; use core::marker::PhantomData; use cumulus_primitives_core::CumulusDigestItem; @@ -117,14 +117,13 @@ impl> /// Same as [`Self::target_block_weight`], but takes the `digests` directly. fn target_block_weight_with_digest(digest: &Digest) -> Weight { - let Some(core_info) = CumulusDigestItem::find_core_info(&digest) else { - return Self::FULL_CORE_WEIGHT; - }; + let number_of_cores = CumulusDigestItem::find_core_info(&digest).map_or_else( + || PreviousCoreCount::::get().map_or(1, |pc| pc.0), + |ci| ci.number_of_cores.0, + ) as u32; let target_blocks = TargetBlockRate::get(); - let number_of_cores = core_info.number_of_cores.0 as u32; - // Ensure we have at least one core and valid target blocks if number_of_cores == 0 || target_blocks == 0 { return Self::FULL_CORE_WEIGHT; @@ -156,7 +155,8 @@ impl> Get let digest = frame_system::Pallet::::digest(); let target_block_weight = Self::target_block_weight_with_digest(&digest); - let maybe_full_core_weight = if is_first_block_in_core_with_digest(&digest) { + let maybe_full_core_weight = if is_first_block_in_core_with_digest(&digest).unwrap_or(false) + { Self::FULL_CORE_WEIGHT } else { target_block_weight @@ -182,17 +182,21 @@ impl> Get } /// Is this the first block in a core? -fn is_first_block_in_core() -> bool { +fn is_first_block_in_core() -> Option { let digest = frame_system::Pallet::::digest(); is_first_block_in_core_with_digest(&digest) } /// Is this the first block in a core? (takes digest as parameter) -fn is_first_block_in_core_with_digest(digest: &Digest) -> bool { - CumulusDigestItem::find_bundle_info(digest).map_or(false, |bi| bi.index == 0) +/// +/// Returns `None` if the [`CumulusDigestItem::BundleInfo`] digest is not set. +fn is_first_block_in_core_with_digest(digest: &Digest) -> Option { + CumulusDigestItem::find_bundle_info(digest).map(|bi| bi.index == 0) } /// Is the `BlockWeight` already above the target block weight? +/// +/// Returns `None` if the [`CumulusDigestItem::BundleInfo`] digest is not set. fn block_weight_over_target_block_weight>() -> bool { let target_block_weight = MaxParachainBlockWeight::::target_block_weight(); diff --git a/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs b/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs index c121da9394953..0c51997c36303 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs @@ -46,7 +46,7 @@ where return } - let is_first_block_in_core = is_first_block_in_core::(); + let is_first_block_in_core = is_first_block_in_core::().unwrap_or(false); if !is_first_block_in_core { log::error!( diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 0833d7921fa87..f45e8604d9d31 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -31,7 +31,7 @@ use frame_system::{CheckWeight, RawOrigin as SystemOrigin}; use polkadot_primitives::MAX_POV_SIZE; use sp_core::ConstU32; use sp_runtime::{ - traits::{DispatchTransaction, TransactionExtension}, + traits::{DispatchTransaction, Header, TransactionExtension}, Digest, }; @@ -89,9 +89,9 @@ fn test_no_core_info() { TestExtBuilder::new().build().execute_with(|| { let weight = MaxParachainBlockWeight::>::get(); - // Without core info, should return conservative default - assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); - assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); + // Without core info, it takes the `PreviousCoreCount` into account. + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND / 4); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64 / 4); }); } @@ -142,42 +142,46 @@ fn test_max_ref_time_per_core_cap() { #[test] fn test_target_block_weight_with_digest_edge_cases() { - // Test with empty digest - let empty_digest = Digest::default(); - let weight = MaxParachainBlockWeight::>::target_block_weight_with_digest( - &empty_digest, - ); - assert_eq!(weight, MaxParachainBlockWeight::>::FULL_CORE_WEIGHT); - - // Test with digest containing core info - let core_info = CoreInfo { - selector: CoreSelector(0), - claim_queue_offset: ClaimQueueOffset(0), - number_of_cores: Compact(2u16), - }; - - let digest = Digest { logs: vec![CumulusDigestItem::CoreInfo(core_info).to_digest_item()] }; - - // With 2 cores and 4 target blocks: (2 cores * 2s) / 4 blocks = 1s - let weight = - MaxParachainBlockWeight::>::target_block_weight_with_digest(&digest); - assert_eq!(weight.ref_time(), 2 * 2 * WEIGHT_REF_TIME_PER_SECOND / 4); - assert_eq!(weight.proof_size(), (2 * MAX_POV_SIZE as u64) / 4); + TestExtBuilder::new().build().execute_with(|| { + // Test with empty digest + let empty_digest = Digest::default(); + let weight = + MaxParachainBlockWeight::>::target_block_weight_with_digest( + &empty_digest, + ); + assert_eq!(weight, MaxParachainBlockWeight::>::FULL_CORE_WEIGHT / 4); + + // Test with digest containing core info + let core_info = CoreInfo { + selector: CoreSelector(0), + claim_queue_offset: ClaimQueueOffset(0), + number_of_cores: Compact(2u16), + }; + + let digest = Digest { logs: vec![CumulusDigestItem::CoreInfo(core_info).to_digest_item()] }; + + // With 2 cores and 4 target blocks: (2 cores * 2s) / 4 blocks = 1s + let weight = + MaxParachainBlockWeight::>::target_block_weight_with_digest( + &digest, + ); + assert_eq!(weight.ref_time(), 2 * 2 * WEIGHT_REF_TIME_PER_SECOND / 4); + assert_eq!(weight.proof_size(), (2 * MAX_POV_SIZE as u64) / 4); + }); } #[test] fn test_is_first_block_in_core_functions() { TestExtBuilder::new().number_of_cores(1).build().execute_with(|| { - // Test without bundle info - should return false let empty_digest = Digest::default(); - assert!(!super::is_first_block_in_core_with_digest(&empty_digest)); + assert!(super::is_first_block_in_core_with_digest(&empty_digest).is_none()); // Test with bundle info index = 0 - should return true let bundle_info_first = BundleInfo { index: 0, maybe_last: false }; let digest_item_first = CumulusDigestItem::BundleInfo(bundle_info_first).to_digest_item(); let mut digest_first = Digest::default(); digest_first.push(digest_item_first); - assert!(super::is_first_block_in_core_with_digest(&digest_first)); + assert!(super::is_first_block_in_core_with_digest(&digest_first).unwrap()); // Test with bundle info index > 0 - should return false let bundle_info_not_first = BundleInfo { index: 5, maybe_last: true }; @@ -185,7 +189,7 @@ fn test_is_first_block_in_core_functions() { CumulusDigestItem::BundleInfo(bundle_info_not_first).to_digest_item(); let mut digest_not_first = Digest::default(); digest_not_first.push(digest_item_not_first); - assert!(!super::is_first_block_in_core_with_digest(&digest_not_first)); + assert!(!super::is_first_block_in_core_with_digest(&digest_not_first).unwrap()); }); } @@ -698,13 +702,87 @@ fn ref_time_and_pov_size_cap() { } #[test] -fn executive_validate_block_accepts_normal_above_target() { - TestExtBuilder::new().build().execute_with(|| { +fn executive_validate_block_handles_normal_transactions() { + TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { let call = RuntimeCall::TestPallet(test_pallet::Call::heavy_call_normal {}); let xt = Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), ().into()); - let result = - Executive::validate_transaction(TransactionSource::External, xt, Default::default()); + assert!(Executive::validate_transaction( + TransactionSource::External, + xt.clone(), + Default::default() + ) + .is_ok()); + }); + + TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { + let call = RuntimeCallOnlyOperational::TestPallet(test_pallet::Call::heavy_call_normal {}); + + let xt = ExtrinsicOnlyOperational::new_signed(call, 1u64.into(), 1u64.into(), ().into()); + + assert_eq!( + ExecutiveOnlyOperational::validate_transaction( + TransactionSource::External, + xt, + Default::default() + ) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() + ); + }); +} + +#[test] +fn executive_validate_block_handles_operational_transactions() { + TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { + let call = RuntimeCall::TestPallet(test_pallet::Call::heavy_call_operational {}); + + let xt = Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), ().into()); + + assert!(Executive::validate_transaction( + TransactionSource::External, + xt.clone(), + Default::default() + ) + .is_ok()); }); + + TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { + let call = + RuntimeCallOnlyOperational::TestPallet(test_pallet::Call::heavy_call_operational {}); + + let xt = ExtrinsicOnlyOperational::new_signed(call, 1u64.into(), 1u64.into(), ().into()); + + assert!(ExecutiveOnlyOperational::validate_transaction( + TransactionSource::External, + xt, + Default::default() + ) + .is_ok()); + }); +} + +#[test] +fn executive_with_operational_only_applies_big_inherent() { + TestExtBuilder::new() + .number_of_cores(1) + .first_block_in_core(true) + .build() + .execute_with(|| { + Executive::initialize_block(&Header::new( + 1, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + )); + + let call = + RuntimeCallOnlyOperational::TestPallet(test_pallet::Call::heavy_call_mandatory {}); + + let xt = ExtrinsicOnlyOperational::new_bare(call); + + ExecutiveOnlyOperational::apply_extrinsic(xt).unwrap().unwrap(); + }); } diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index 0f5f7b997f984..e54fc2985b78c 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -162,7 +162,7 @@ where CumulusDigestItem::UseFullCore.to_digest_item(), ); - if !is_first_block_in_core_with_digest(&digest) { + if !is_first_block_in_core_with_digest(&digest).unwrap_or(false) { // We are already above the allowed maximum and do not want to accept any more // extrinsics. frame_system::Pallet::::register_extra_weight_unchecked( @@ -182,11 +182,17 @@ where .saturating_add(Weight::from_parts(0, len as u64)) .any_gt(target_weight) { - // When `ALLOW_NORMAL` is `true`, we want to allow all classes of transactions. - let class_allowed = if ALLOW_NORMAL { true } else { info.class == DispatchClass::Operational }; + // When `ALLOW_NORMAL` is `true`, we want to allow all classes of transactions. Inherents are always allowed. + let class_allowed = if ALLOW_NORMAL { true } else { info.class == DispatchClass::Operational } + || info.class == DispatchClass::Mandatory; + + // If the `BundleInfo` digest is not set (function returns `None`), it means we are in some offchain + // call like `validate_block`. In this case we assume this is the first block, otherwise these big + // transactions will never be able to enter the tx pool. + let is_first_block = is_first_block_in_core_with_digest(&digest).unwrap_or(true); if transaction_index.unwrap_or_default().saturating_sub(first_transaction_index.unwrap_or_default()) < MAX_TRANSACTION_TO_CONSIDER - && is_first_block_in_core_with_digest(&digest) && class_allowed { + && is_first_block && class_allowed { log::trace!( target: LOG_TARGET, "Enabling `PotentialFullCore` mode for extrinsic", @@ -264,7 +270,7 @@ where // If this isn't the first block in a core, we register the full core weight // to ensure that we don't include any other transactions. Because we don't // know how many weight of the core was already used by the blocks before. - if !is_first_block_in_core_with_digest(&digest) { + if !is_first_block_in_core_with_digest(&digest).unwrap_or(false) { log::error!( target: LOG_TARGET, "Registering `FULL_CORE_WEIGHT` to ensure no other transaction is included \ diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 8e9d67778f0b4..f86f59df59e74 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -186,6 +186,7 @@ pub mod ump_constants { #[frame_support::pallet] pub mod pallet { use super::*; + use codec::Compact; use cumulus_primitives_core::CoreInfoExistsAtMaxOnce; use frame_support::pallet_prelude::{ValueQuery, *}; use frame_system::pallet_prelude::*; @@ -369,6 +370,11 @@ pub mod pallet { .encode(), ); }); + + PreviousCoreCount::::put(core_info.number_of_cores); + } else { + // Without the digest, we assume that it is `1`. + PreviousCoreCount::::put(Compact(1u16)); } // Send the pending UMP signals. @@ -778,11 +784,22 @@ pub mod pallet { NotScheduled, } + /// The current block weight mode. + /// + /// This is used to determine what is the maximum allowed block weight, for more information see + /// [`block_weight`]. #[pallet::storage] #[pallet::whitelist_storage] pub type BlockWeightMode = StorageValue<_, block_weight::BlockWeightMode, OptionQuery>; + /// The core count available to the parachain in the previous block. + /// + /// This is mainly used for offchain functionality to calculate the correct target block weight. + #[pallet::storage] + #[pallet::whitelist_storage] + pub type PreviousCoreCount = StorageValue<_, Compact, OptionQuery>; + /// Latest included block descendants the runtime accepted. In other words, these are /// ancestors of the currently executing block which have not been included in the observed /// relay-chain state. @@ -1481,7 +1498,7 @@ impl Pallet { // Ensure that `ValidationData` exists. We do not care about the validation data per se, // but we do care about the [`UpgradeRestrictionSignal`] which arrives with the same // inherent. - ensure!(>::exists(), Error::::ValidationDataNotAvailable,); + ensure!(>::exists(), Error::::ValidationDataNotAvailable); ensure!(>::get().is_none(), Error::::ProhibitedByPolkadot); ensure!(!>::exists(), Error::::OverlappingUpgrades); From df7f0fe9c8479038003ed9c7b7a944a5ae5e9b4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 13 Nov 2025 14:52:30 +0100 Subject: [PATCH 157/312] Introduce `MaxParachainBlockWeight` and related functionality This pull request introduces `MaxParachainBlockWeight` to calculate the max weight per parachain block. This is a preparation for [Block Bundling](https://github.com/paritytech/polkadot-sdk/issues/6495) which requires that the maximum block weight is dynamic. Block bundling requires a dynamic maximum block weight because it bundles multiple blocks into one `PoV`. Each `PoV` gets `2s` of execution time and `10MiB` of proof size. These resources need to be split up between all the blocks of one `PoV`. This doesn't require the weight to be dynamic. However, it gets complicated when a transaction should be applied that requires more resources than what one of these blocks can provide, e.g. for doing a runtime upgrade. In this case `MaxParachainBlockWeight` supports to increase the block weight of one block to take up the weight of the full `PoV`. The feature will not only be useful for things like runtime upgrade, but also could enable users to pay for running some huge contracts or whatever. For more information, please refer to the docs provided in the code of this pull request. For `MaxParachainBlockWeight` to work correctly, it provides a pre-inherent hook and a transaction extension. Both are required to track the weight correctly. --- Cargo.lock | 4 + cumulus/pallets/parachain-system/Cargo.toml | 9 +- .../parachain-system/src/benchmarking.rs | 210 ++++- .../parachain-system/src/block_weight/mock.rs | 372 +++++++++ .../parachain-system/src/block_weight/mod.rs | 206 +++++ .../src/block_weight/pre_inherents_hook.rs | 77 ++ .../src/block_weight/tests.rs | 788 ++++++++++++++++++ .../src/block_weight/transaction_extension.rs | 489 +++++++++++ cumulus/pallets/parachain-system/src/lib.rs | 114 ++- cumulus/pallets/parachain-system/src/tests.rs | 80 +- .../src/validate_block/implementation.rs | 3 + .../src/validate_block/tests.rs | 16 +- .../pallets/parachain-system/src/weights.rs | 28 + .../cumulus_pallet_parachain_system.rs | 11 + .../cumulus_pallet_parachain_system.rs | 11 + .../cumulus_pallet_parachain_system.rs | 11 + .../cumulus_pallet_parachain_system.rs | 11 + .../cumulus_pallet_parachain_system.rs | 11 + .../cumulus_pallet_parachain_system.rs | 11 + .../cumulus_pallet_parachain_system.rs | 11 + .../cumulus_pallet_parachain_system.rs | 11 + .../cumulus_pallet_parachain_system.rs | 11 + .../cumulus_pallet_parachain_system.rs | 11 + cumulus/primitives/core/src/lib.rs | 86 +- polkadot/primitives/src/v9/mod.rs | 12 + .../cumulus_pallet_parachain_system.rs | 11 + .../frame/support/src/traits/messages.rs | 10 + .../system/src/extensions/check_weight.rs | 8 +- substrate/frame/system/src/lib.rs | 2 +- substrate/primitives/runtime/src/testing.rs | 30 +- 30 files changed, 2608 insertions(+), 57 deletions(-) create mode 100644 cumulus/pallets/parachain-system/src/block_weight/mock.rs create mode 100644 cumulus/pallets/parachain-system/src/block_weight/mod.rs create mode 100644 cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs create mode 100644 cumulus/pallets/parachain-system/src/block_weight/tests.rs create mode 100644 cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs diff --git a/Cargo.lock b/Cargo.lock index 8b44504bc8868..de5dd04fe50ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4712,8 +4712,11 @@ dependencies = [ "cumulus-primitives-proof-size-hostfunction", "cumulus-test-client", "cumulus-test-relay-sproof-builder", + "derive-where", + "docify", "environmental", "frame-benchmarking", + "frame-executive", "frame-support", "frame-system", "futures", @@ -4724,6 +4727,7 @@ dependencies = [ "pallet-message-queue", "parity-scale-codec", "polkadot-parachain-primitives", + "polkadot-primitives", "polkadot-runtime-parachains", "rand 0.8.5", "rstest", diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 7c111579f0672..50c262ad8c733 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -15,6 +15,7 @@ workspace = true array-bytes = { workspace = true } bytes = { workspace = true } codec = { features = ["derive"], workspace = true } +derive-where = { workspace = true } environmental = { workspace = true } hashbrown = { workspace = true } impl-trait-for-tuples = { workspace = true } @@ -40,6 +41,7 @@ sp-version = { workspace = true } # Polkadot polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } +polkadot-primitives = { workspace = true } polkadot-runtime-parachains = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } @@ -50,6 +52,9 @@ cumulus-primitives-core = { workspace = true } cumulus-primitives-parachain-inherent = { workspace = true } cumulus-primitives-proof-size-hostfunction = { workspace = true } +# For building docs +docify = { workspace = true } + [dev-dependencies] assert_matches = { workspace = true } futures = { workspace = true } @@ -58,8 +63,8 @@ rand = { workspace = true, default-features = true } rstest = { workspace = true } trie-standardmap = { workspace = true } - # Substrate +frame-executive = { workspace = true } sc-consensus = { workspace = true } sp-api = { workspace = true, default-features = true } sp-consensus-slots = { workspace = true, default-features = true } @@ -67,6 +72,7 @@ sp-crypto-hashing = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } + # Cumulus cumulus-test-client = { workspace = true } cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } @@ -87,6 +93,7 @@ std = [ "log/std", "pallet-message-queue/std", "polkadot-parachain-primitives/std", + "polkadot-primitives/std", "polkadot-runtime-parachains/std", "scale-info/std", "sp-consensus-babe/std", diff --git a/cumulus/pallets/parachain-system/src/benchmarking.rs b/cumulus/pallets/parachain-system/src/benchmarking.rs index c3d59e82255a3..fd05cac40d256 100644 --- a/cumulus/pallets/parachain-system/src/benchmarking.rs +++ b/cumulus/pallets/parachain-system/src/benchmarking.rs @@ -20,12 +20,31 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use crate::parachain_inherent::InboundDownwardMessages; -use cumulus_primitives_core::{relay_chain::Hash as RelayHash, InboundDownwardMessage}; +use crate::{ + block_weight::{BlockWeightMode, DynamicMaxBlockWeight, MaxParachainBlockWeight}, + parachain_inherent::InboundDownwardMessages, +}; +use cumulus_primitives_core::{ + relay_chain::Hash as RelayHash, BundleInfo, CoreInfo, InboundDownwardMessage, +}; use frame_benchmarking::v2::*; -use sp_runtime::traits::BlakeTwo256; +use frame_support::{ + dispatch::{DispatchInfo, PostDispatchInfo}, + weights::constants::WEIGHT_REF_TIME_PER_SECOND, +}; +use frame_system::RawOrigin; +use sp_core::ConstU32; +use sp_runtime::traits::{BlakeTwo256, DispatchTransaction, Dispatchable}; -#[benchmarks] +fn has_use_full_core_digest() -> bool { + let digest = frame_system::Pallet::::digest(); + CumulusDigestItem::contains_use_full_core(&digest) +} + +#[benchmarks(where + T: Send + Sync, + T::RuntimeCall: Dispatchable, +)] mod benchmarks { use super::*; @@ -64,6 +83,189 @@ mod benchmarks { head } + /// The worst-case scenario for the block weight transaction extension. + /// + /// Before executing an extrinsic `FractionOfCore` is set, changed to `PotentialFullCore` and + /// post dispatch switches to `FullCore`. + #[benchmark] + fn block_weight_tx_extension_max_weight() -> Result<(), BenchmarkError> { + let caller = account("caller", 0, 0); + + frame_system::Pallet::::note_inherents_applied(); + + frame_system::Pallet::::set_extrinsic_index(1); + + frame_system::Pallet::::deposit_log( + BundleInfo { index: 0, maybe_last: false }.to_digest_item(), + ); + frame_system::Pallet::::deposit_log( + CoreInfo { + selector: 0.into(), + claim_queue_offset: 0.into(), + number_of_cores: 1.into(), + } + .to_digest_item(), + ); + let target_weight = MaxParachainBlockWeight::>::target_block_weight(); + + let info = DispatchInfo { + // The weight needs to be more than the target weight. + call_weight: target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 0)), + extension_weight: Weight::zero(), + class: DispatchClass::Normal, + ..Default::default() + }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; + let len = 0_usize; + + crate::BlockWeightMode::::put(BlockWeightMode::FractionOfCore { + first_transaction_index: None, + }); + + let ext = DynamicMaxBlockWeight::>::new(()); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| { + // Normally this is done by `CheckWeight` + frame_system::Pallet::::register_extra_weight_unchecked( + info.call_weight, + DispatchClass::Normal, + ); + Ok(post_info) + }) + .unwrap() + .unwrap(); + } + + assert_eq!(crate::BlockWeightMode::::get().unwrap(), BlockWeightMode::FullCore); + assert!(has_use_full_core_digest::()); + assert_eq!( + MaxParachainBlockWeight::>::get(), + MaxParachainBlockWeight::>::FULL_CORE_WEIGHT + ); + + Ok(()) + } + + /// A benchmark that assumes that an extrinsic was executed with `FractionOfCore` set. + #[benchmark] + fn block_weight_tx_extension_stays_fraction_of_core() -> Result<(), BenchmarkError> { + let caller = account("caller", 0, 0); + + frame_system::Pallet::::note_inherents_applied(); + + frame_system::Pallet::::set_extrinsic_index(1); + + frame_system::Pallet::::deposit_log( + BundleInfo { index: 0, maybe_last: false }.to_digest_item(), + ); + frame_system::Pallet::::deposit_log( + CoreInfo { + selector: 0.into(), + claim_queue_offset: 0.into(), + number_of_cores: 1.into(), + } + .to_digest_item(), + ); + let target_weight = MaxParachainBlockWeight::>::target_block_weight(); + + let info = DispatchInfo { + call_weight: Weight::from_parts(1024, 1024), + extension_weight: Weight::zero(), + class: DispatchClass::Normal, + ..Default::default() + }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; + let len = 0_usize; + + crate::BlockWeightMode::::put(BlockWeightMode::FractionOfCore { + first_transaction_index: None, + }); + + let ext = DynamicMaxBlockWeight::>::new(()); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| { + // Normally this is done by `CheckWeight` + frame_system::Pallet::::register_extra_weight_unchecked( + info.call_weight, + DispatchClass::Normal, + ); + Ok(post_info) + }) + .unwrap() + .unwrap(); + } + + assert_eq!( + crate::BlockWeightMode::::get().unwrap(), + BlockWeightMode::FractionOfCore { first_transaction_index: Some(1) } + ); + assert!(!has_use_full_core_digest::()); + assert_eq!(MaxParachainBlockWeight::>::get(), target_weight); + + Ok(()) + } + + /// A benchmark that assumes that `FullCore` was set already before executing an extrinsic. + #[benchmark] + fn block_weight_tx_extension_full_core() -> Result<(), BenchmarkError> { + let caller = account("caller", 0, 0); + + frame_system::Pallet::::note_inherents_applied(); + + frame_system::Pallet::::set_extrinsic_index(1); + + frame_system::Pallet::::deposit_log( + BundleInfo { index: 0, maybe_last: false }.to_digest_item(), + ); + frame_system::Pallet::::deposit_log( + CoreInfo { + selector: 0.into(), + claim_queue_offset: 0.into(), + number_of_cores: 1.into(), + } + .to_digest_item(), + ); + + let info = DispatchInfo { + call_weight: Weight::from_parts(1024, 1024), + extension_weight: Weight::zero(), + class: DispatchClass::Normal, + ..Default::default() + }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; + let len = 0_usize; + + crate::BlockWeightMode::::put(BlockWeightMode::FullCore); + + let ext = DynamicMaxBlockWeight::>::new(()); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| { + // Normally this is done by `CheckWeight` + frame_system::Pallet::::register_extra_weight_unchecked( + info.call_weight, + DispatchClass::Normal, + ); + Ok(post_info) + }) + .unwrap() + .unwrap(); + } + + assert_eq!(crate::BlockWeightMode::::get().unwrap(), BlockWeightMode::FullCore); + + Ok(()) + } + impl_benchmark_test_suite! { Pallet, crate::mock::new_test_ext(), diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs new file mode 100644 index 0000000000000..0eba745c88d9a --- /dev/null +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -0,0 +1,372 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{transaction_extension::DynamicMaxBlockWeight, *}; +use crate::{self as parachain_system, PreviousCoreCount}; +use codec::Compact; +use cumulus_primitives_core::{ + BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, +}; +use frame_support::{ + construct_runtime, derive_impl, + dispatch::DispatchClass, + parameter_types, + traits::PreInherents, + weights::{ + constants::{BlockExecutionWeight, ExtrinsicBaseWeight}, + Weight, + }, +}; +use frame_system::limits::BlockWeights; +use sp_core::ConstU32; +use sp_io; +use sp_runtime::{ + generic::{self, UncheckedExtrinsic}, + testing::UintAuthorityId, + BuildStorage, Perbill, +}; + +const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(1); + +/// A simple call, which one doesn't matter. +pub const CALL: &RuntimeCall = + &RuntimeCall::System(frame_system::Call::set_heap_pages { pages: 0u64 }); + +pub type ExtrinsicOnlyOperational = UncheckedExtrinsic< + UintAuthorityId, + only_operational_runtime::RuntimeCall, + UintAuthorityId, + DynamicMaxBlockWeight, 10, false>, +>; + +pub type Extrinsic = UncheckedExtrinsic< + UintAuthorityId, + RuntimeCall, + UintAuthorityId, + DynamicMaxBlockWeight>, +>; + +pub type Block = + generic::Block::Hashing>, Extrinsic>; + +pub type BlockOnlyOperational = generic::Block< + generic::Header::Hashing>, + ExtrinsicOnlyOperational, +>; + +pub const TARGET_BLOCK_RATE: u32 = 12; + +#[docify::export(tx_extension_setup)] +pub type TxExtension = DynamicMaxBlockWeight< + Runtime, + // Here you need to set the other extensions that are required by your runtime... + ( + frame_system::AuthorizeCall, + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + ), + ConstU32, +>; + +#[allow(dead_code)] +type NotDeadCode = TxExtension; + +#[docify::export_content(max_block_weight_setup)] +mod max_block_weight_setup { + use super::*; + + type MaximumBlockWeight = MaxParachainBlockWeight>; + + parameter_types! { + pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(MaximumBlockWeight::get()); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MaximumBlockWeight::get()); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); + } +} + +#[frame_support::pallet(dev_mode)] +pub mod test_pallet { + use frame_support::{ + dispatch::DispatchClass, pallet_prelude::*, weights::constants::WEIGHT_REF_TIME_PER_SECOND, + }; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + crate::Config {} + + #[pallet::call] + impl Pallet { + /// A heavy call with Normal dispatch class that consumes significant weight. + #[pallet::weight((Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024), DispatchClass::Normal))] + pub fn heavy_call_normal(_: OriginFor) -> DispatchResult { + Ok(()) + } + + /// A heavy call with Operational dispatch class that consumes significant weight. + #[pallet::weight((Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024), DispatchClass::Operational))] + pub fn heavy_call_operational(_: OriginFor) -> DispatchResult { + Ok(()) + } + + /// A heavy call with Operational dispatch class that consumes significant weight. + #[pallet::weight((Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024), DispatchClass::Mandatory))] + pub fn heavy_call_mandatory(_: OriginFor) -> DispatchResult { + Ok(()) + } + } + + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + type Error = sp_inherents::MakeFatalError<()>; + const INHERENT_IDENTIFIER: InherentIdentifier = *b"testtest"; + + fn create_inherent(_data: &InherentData) -> Option { + None + } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::heavy_call_mandatory {}) + } + } +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +#[docify::export(pre_inherents_setup)] +impl frame_system::Config for Runtime { + // Setup the block weight. + type BlockWeights = max_block_weight_setup::RuntimeBlockWeights; + // Set the `PreInherents` hook. + type PreInherents = DynamicMaxBlockWeightHooks>; + + // Just required to make it compile, but not that important for this example here. + type Block = Block; + type OnSetCode = crate::ParachainSetCode; + type AccountId = u64; + type Lookup = UintAuthorityId; + // Rest of the types are omitted here. +} + +impl crate::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = (); + type OutboundXcmpMessageSource = (); + type DmpQueue = (); + type ReservedDmpWeight = (); + type XcmpMessageHandler = (); + type ReservedXcmpWeight = (); + type CheckAssociatedRelayNumber = crate::RelayNumberStrictlyIncreases; + type WeightInfo = (); + type ConsensusHook = crate::ExpectParentIncluded; + type RelayParentOffset = (); +} + +impl test_pallet::Config for Runtime {} + +construct_runtime!( + pub enum Runtime { + System: frame_system, + ParachainSystem: parachain_system, + TestPallet: test_pallet, + } +); + +pub mod only_operational_runtime { + use frame_support::{construct_runtime, derive_impl}; + use sp_core::ConstU32; + use sp_runtime::testing::UintAuthorityId; + + use crate::block_weight::{mock::BlockOnlyOperational, DynamicMaxBlockWeightHooks}; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for RuntimeOnlyOperational { + // Setup the block weight. + type BlockWeights = super::max_block_weight_setup::RuntimeBlockWeights; + // Set the `PreInherents` hook. + type PreInherents = + DynamicMaxBlockWeightHooks>; + + // Just required to make it compile, but not that important for this example here. + type Block = BlockOnlyOperational; + type OnSetCode = crate::ParachainSetCode; + type AccountId = u64; + type Lookup = UintAuthorityId; + // Rest of the types are omitted here. + } + + impl crate::Config for RuntimeOnlyOperational { + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = (); + type OutboundXcmpMessageSource = (); + type DmpQueue = (); + type ReservedDmpWeight = (); + type XcmpMessageHandler = (); + type ReservedXcmpWeight = (); + type CheckAssociatedRelayNumber = crate::RelayNumberStrictlyIncreases; + type WeightInfo = (); + type ConsensusHook = crate::ExpectParentIncluded; + type RelayParentOffset = (); + } + + impl super::test_pallet::Config for RuntimeOnlyOperational {} + + construct_runtime!( + pub enum RuntimeOnlyOperational { + System: frame_system, + ParachainSystem: super::parachain_system, + TestPallet: super::test_pallet, + } + ); +} + +pub use only_operational_runtime::{ + RuntimeCall as RuntimeCallOnlyOperational, RuntimeOnlyOperational, +}; + +/// Executive: handles dispatch to the various modules. +pub type Executive = + frame_executive::Executive, Runtime, ()>; + +/// Executive configured to only accept operational transaction to go over the limit. +pub type ExecutiveOnlyOperational = frame_executive::Executive< + RuntimeOnlyOperational, + BlockOnlyOperational, + frame_system::ChainContext, + RuntimeOnlyOperational, + (), +>; + +/// Builder for test externalities +pub struct TestExtBuilder { + num_cores: Option, + bundle_index: Option, + bundle_maybe_last: bool, + previous_core_count: Option, +} + +impl Default for TestExtBuilder { + fn default() -> Self { + sp_tracing::try_init_simple(); + + Self { + num_cores: None, + bundle_index: None, + bundle_maybe_last: false, + previous_core_count: None, + } + } +} + +impl TestExtBuilder { + /// Create a new builder + pub fn new() -> Self { + Self::default() + } + + /// Set the number of cores + pub fn number_of_cores(mut self, num_cores: u16) -> Self { + self.num_cores = Some(num_cores); + self + } + + /// Set the `PreviousCoreCount` storage value. + pub fn previous_core_count(mut self, previous_core_count: u16) -> Self { + self.previous_core_count = Some(previous_core_count); + self + } + + /// Set this as the first block in the core (bundle index = 0) + pub fn first_block_in_core(mut self, is_first: bool) -> Self { + if is_first { + self.bundle_index = Some(0); + } else if self.bundle_index.is_none() { + // If not first and no bundle index set, default to index 1 + self.bundle_index = Some(1); + } + self + } + + /// Build the test externalities + pub fn build(self) -> sp_io::TestExternalities { + let storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext = sp_io::TestExternalities::from(storage); + + ext.execute_with(|| { + // Add core info if specified + if let Some(num_cores) = self.num_cores { + let core_info = CoreInfo { + selector: CoreSelector(0), + claim_queue_offset: ClaimQueueOffset(0), + number_of_cores: Compact(num_cores), + }; + let digest = CumulusDigestItem::CoreInfo(core_info).to_digest_item(); + frame_system::Pallet::::deposit_log(digest); + } + + // Add bundle info if specified + if let Some(bundle_index) = self.bundle_index { + let bundle_info = + BundleInfo { index: bundle_index, maybe_last: self.bundle_maybe_last }; + let digest = CumulusDigestItem::BundleInfo(bundle_info).to_digest_item(); + frame_system::Pallet::::deposit_log(digest); + } + + if let Some(previous_core_count) = self.previous_core_count { + PreviousCoreCount::::put(Compact(previous_core_count)); + } + }); + + ext + } +} + +/// Helper to check if UseFullCore digest was deposited +pub fn has_use_full_core_digest() -> bool { + let digest = frame_system::Pallet::::digest(); + CumulusDigestItem::contains_use_full_core(&digest) +} + +/// Helper to register weight as consumed (simulating on_initialize) +pub fn register_weight(weight: Weight, class: DispatchClass) { + frame_system::Pallet::::register_extra_weight_unchecked(weight, class); +} + +/// Emulates what happes after `initialize_block` finished. +pub fn initialize_block_finished() { + System::set_block_consumed_resources(Weight::zero(), 0); + System::note_finished_initialize(); + ::PreInherents::pre_inherents(); + System::note_inherents_applied(); +} diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs new file mode 100644 index 0000000000000..4a703ea21f4d4 --- /dev/null +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -0,0 +1,206 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Provides functionality to dynamically calculate the block weight for a parachain. +//! +//! With block bundling, parachains are relative free to choose whatever block interval they want. +//! The block interval is the time between individual blocks. The available resources per block (max +//! block weight) depend on the number of cores allocated to the parachain on the relay chain. Each +//! relay chain cores provides an execution time of `2s` and a storage size of `10MiB`. Depending on +//! the desired number of blocks to produce, the resources need to be divided between the individual +//! blocks. With small blocks that do not have that many resources available, a problem may arises +//! for bigger transactions not fitting into blocks anymore, e.g. a runtime upgrade. For these cases +//! the weight of a block can be increased to use the weight of a full core. Only the first block of +//! a core is allowed to increase its weight to use the full core weight. In the case of the first +//! block using the full core weight, there will be no further block build on the same core. This is +//! signaled to the node by setting the [`CumulusDigestItem::UseFullCore`] digest item.` +//! +//! The [`MaxParachainBlockWeight`] provides a [`Get`] implementation that will return the max block +//! weight as determined by the [`DynamicMaxBlockWeight`] transaction extension. +//! +//! [`DynamicMaxBlockWeightHooks`] needs to be registered as a pre-inherent hook. It is used to +//! handle the weight consumption of `on_initialize` and change the block weight mode based on the +//! consumed weight. +//! +//! # Setup +//! +//! Setup the transaction extension: +#![doc = docify::embed!("src/block_weight/mock.rs", tx_extension_setup)] +//! +//! Setting up `MaximumBlockWeight`: +#![doc = docify::embed!("src/block_weight/mock.rs", max_block_weight_setup)] +//! +//! Registering of the `PreInherents` hook: +#![doc = docify::embed!("src/block_weight/mock.rs", pre_inherents_setup)] + +use crate::{Config, PreviousCoreCount}; +use codec::{Decode, Encode}; +use core::marker::PhantomData; +use cumulus_primitives_core::CumulusDigestItem; +use frame_support::weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}; +use polkadot_primitives::MAX_POV_SIZE; +use scale_info::TypeInfo; +use sp_core::Get; +use sp_runtime::Digest; + +#[cfg(test)] +mod mock; +pub mod pre_inherents_hook; +#[cfg(test)] +mod tests; +pub mod transaction_extension; + +pub use pre_inherents_hook::DynamicMaxBlockWeightHooks; +pub use transaction_extension::DynamicMaxBlockWeight; + +const LOG_TARGET: &str = "runtime::parachain-system::block-weight"; + +/// The current block weight mode. +/// +/// Based on this mode [`MaxParachainBlockWeight`] determines the current allowed block weight. +#[derive(Debug, Encode, Decode, Clone, Copy, TypeInfo, PartialEq)] +pub enum BlockWeightMode { + /// The block is allowed to use the weight of a full core. + FullCore, + /// The current active transaction is allowed to use the weight of a full core. + PotentialFullCore { + /// The index of the first transaction. + first_transaction_index: Option, + /// The target weight that was used to determine that the extrinsic is above this limit. + target_weight: Weight, + }, + /// The block is only allowed to consume its fraction of the core. + /// + /// How much each block is allowed to consume, depends on the target number of blocks and the + /// available cores on the relay chain. + FractionOfCore { + /// The index of the first transaction. + first_transaction_index: Option, + }, +} + +/// Calculates the maximum block weight for a parachain. +/// +/// Based on the available cores and the number of desired blocks a block weight is calculated. +/// +/// The max block weight is partly dynamic and controlled via the [`DynamicMaxBlockWeight`] +/// transaction extension. The transaction extension is communicating the desired max block weight +/// using the [`BlockWeightMode`]. +pub struct MaxParachainBlockWeight(PhantomData<(Config, TargetBlockRate)>); + +impl> + MaxParachainBlockWeight +{ + // Maximum ref time per core + const MAX_REF_TIME_PER_CORE_NS: u64 = 2 * WEIGHT_REF_TIME_PER_SECOND; + pub(crate) const FULL_CORE_WEIGHT: Weight = + Weight::from_parts(Self::MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); + + /// Returns the target block weight for one block. + pub(crate) fn target_block_weight() -> Weight { + let digest = frame_system::Pallet::::digest(); + Self::target_block_weight_with_digest(&digest) + } + + /// Same as [`Self::target_block_weight`], but takes the `digests` directly. + fn target_block_weight_with_digest(digest: &Digest) -> Weight { + let number_of_cores = CumulusDigestItem::find_core_info(&digest).map_or_else( + || PreviousCoreCount::::get().map_or(1, |pc| pc.0), + |ci| ci.number_of_cores.0, + ) as u32; + + let target_blocks = TargetBlockRate::get(); + + // Ensure we have at least one core and valid target blocks + if number_of_cores == 0 || target_blocks == 0 { + return Self::FULL_CORE_WEIGHT; + } + + // At maximum we want to allow `6s` of ref time, because we don't want to overload nodes + // that are running with standard hardware. These nodes need to be able to import all the + // blocks in 6s. + let total_ref_time = (number_of_cores as u64) + .saturating_mul(Self::MAX_REF_TIME_PER_CORE_NS) + .min(WEIGHT_REF_TIME_PER_SECOND * 6); + let ref_time_per_block = total_ref_time + .saturating_div(target_blocks as u64) + .min(Self::MAX_REF_TIME_PER_CORE_NS); + + let total_pov_size = (number_of_cores as u64).saturating_mul(MAX_POV_SIZE as u64); + // Each block at max gets one core. + let proof_size_per_block = + total_pov_size.saturating_div(target_blocks as u64).min(MAX_POV_SIZE as u64); + + Weight::from_parts(ref_time_per_block, proof_size_per_block) + } +} + +impl> Get + for MaxParachainBlockWeight +{ + fn get() -> Weight { + let digest = frame_system::Pallet::::digest(); + let target_block_weight = Self::target_block_weight_with_digest(&digest); + + let maybe_full_core_weight = if is_first_block_in_core_with_digest(&digest).unwrap_or(false) + { + Self::FULL_CORE_WEIGHT + } else { + target_block_weight + }; + + // If we are in `on_initialize` or at applying the inherents, we allow the maximum block + // weight as allowed by the current context. + if !frame_system::Pallet::::inherents_applied() { + return maybe_full_core_weight + } + + match crate::BlockWeightMode::::get() { + // We allow the full core. + Some(BlockWeightMode::FullCore | BlockWeightMode::PotentialFullCore { .. }) => + Self::FULL_CORE_WEIGHT, + // Let's calculate below how much weight we can use. + Some(BlockWeightMode::FractionOfCore { .. }) => target_block_weight, + // Either the runtime is not using the `DynamicMaxBlockWeight` extension or there is a + // bug. The value should be set before applying the first extrinsic. + None => maybe_full_core_weight, + } + } +} + +/// Is this the first block in a core? +fn is_first_block_in_core() -> Option { + let digest = frame_system::Pallet::::digest(); + is_first_block_in_core_with_digest(&digest) +} + +/// Is this the first block in a core? (takes digest as parameter) +/// +/// Returns `None` if the [`CumulusDigestItem::BundleInfo`] digest is not set. +fn is_first_block_in_core_with_digest(digest: &Digest) -> Option { + CumulusDigestItem::find_bundle_info(digest).map(|bi| bi.index == 0) +} + +/// Is the `BlockWeight` already above the target block weight? +/// +/// Returns `None` if the [`CumulusDigestItem::BundleInfo`] digest is not set. +fn block_weight_over_target_block_weight>() -> bool { + let target_block_weight = MaxParachainBlockWeight::::target_block_weight(); + + frame_system::Pallet::::remaining_block_weight() + .consumed() + .any_gt(target_block_weight) +} diff --git a/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs b/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs new file mode 100644 index 0000000000000..0c51997c36303 --- /dev/null +++ b/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs @@ -0,0 +1,77 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + block_weight_over_target_block_weight, is_first_block_in_core, BlockWeightMode, LOG_TARGET, +}; +use crate::block_weight::MaxParachainBlockWeight; +use cumulus_primitives_core::CumulusDigestItem; +use frame_support::traits::PreInherents; +use sp_core::Get; + +/// A pre-inherent hook that may increases max block weight after `on_initialize`. +/// +/// The hook is called before applying the first inherent. It checks the used block weight of +/// `on_initialize`. If the used block weight is above the target block weight, the hook will set +/// the [`CumulusDigestItem::UseFullCore`] digest. Regardless on if this is the first block in a +/// core or not. This is done to inform the node that this is the last block for the current core. +pub struct DynamicMaxBlockWeightHooks( + pub core::marker::PhantomData<(Config, TargetBlockRate)>, +); + +impl PreInherents for DynamicMaxBlockWeightHooks +where + Config: crate::Config, + TargetBlockRate: Get, +{ + fn pre_inherents() { + if !block_weight_over_target_block_weight::() { + // We still initialize the `BlockWeightMode`. + crate::BlockWeightMode::::put(BlockWeightMode::FractionOfCore { + first_transaction_index: None, + }); + return + } + + let is_first_block_in_core = is_first_block_in_core::().unwrap_or(false); + + if !is_first_block_in_core { + log::error!( + target: LOG_TARGET, + "Inherent block logic took longer than the target block weight, THIS IS A BUG!!!", + ); + + // We are already above the allowed maximum and do not want to accept any more + // extrinsics. + frame_system::Pallet::::register_extra_weight_unchecked( + MaxParachainBlockWeight::::FULL_CORE_WEIGHT, + frame_support::dispatch::DispatchClass::Mandatory, + ); + } else { + log::debug!( + target: LOG_TARGET, + "Inherent block logic took longer than the target block weight, going to use the full core", + ); + } + + crate::BlockWeightMode::::put(BlockWeightMode::FullCore); + + // Inform the node that this block uses the full core. + frame_system::Pallet::::deposit_log( + CumulusDigestItem::UseFullCore.to_digest_item(), + ); + } +} diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs new file mode 100644 index 0000000000000..f45e8604d9d31 --- /dev/null +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -0,0 +1,788 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{mock::*, transaction_extension::DynamicMaxBlockWeight, *}; +use assert_matches::assert_matches; +use codec::Compact; +use cumulus_primitives_core::{ + BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, +}; +use frame_support::{ + assert_ok, + dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}, + pallet_prelude::{InvalidTransaction, TransactionSource}, + traits::PreInherents, + weights::constants::WEIGHT_REF_TIME_PER_SECOND, +}; +use frame_system::{CheckWeight, RawOrigin as SystemOrigin}; +use polkadot_primitives::MAX_POV_SIZE; +use sp_core::ConstU32; +use sp_runtime::{ + traits::{DispatchTransaction, Header, TransactionExtension}, + Digest, +}; + +type TxExtension = DynamicMaxBlockWeight, ConstU32<4>>; +type TxExtensionOnlyOperational = + DynamicMaxBlockWeight, ConstU32<4>, 10, false>; +type MaximumBlockWeight = MaxParachainBlockWeight>; + +#[test] +fn test_single_core_single_block() { + TestExtBuilder::new().number_of_cores(1).build().execute_with(|| { + let weight = MaxParachainBlockWeight::>::get(); + + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); + }); +} + +#[test] +fn test_single_core_multiple_blocks() { + TestExtBuilder::new().number_of_cores(1).build().execute_with(|| { + let weight = MaxParachainBlockWeight::>::get(); + + // With 1 core and 4 target blocks, should get 0.5s ref time and 1/4 PoV size per block + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND / 4); + assert_eq!(weight.proof_size(), (1 * MAX_POV_SIZE as u64) / 4); + }); +} + +#[test] +fn test_multiple_cores_single_block() { + TestExtBuilder::new().number_of_cores(3).build().execute_with(|| { + let weight = MaxParachainBlockWeight::>::get(); + + // With 3 cores and 1 target blocks, should get 2s ref time and 1 PoV size + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); + }); +} + +#[test] +fn test_multiple_cores_multiple_blocks() { + TestExtBuilder::new().number_of_cores(2).build().execute_with(|| { + let weight = MaxParachainBlockWeight::>::get(); + + // With 2 cores and 4 target blocks, should get 1s ref time and 2x PoV size / 4 per + // block + assert_eq!(weight.ref_time(), 2 * 2 * WEIGHT_REF_TIME_PER_SECOND / 4); + assert_eq!(weight.proof_size(), (2 * MAX_POV_SIZE as u64) / 4); + }); +} + +#[test] +fn test_no_core_info() { + TestExtBuilder::new().build().execute_with(|| { + let weight = MaxParachainBlockWeight::>::get(); + + // Without core info, it takes the `PreviousCoreCount` into account. + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND / 4); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64 / 4); + }); +} + +#[test] +fn test_zero_cores() { + TestExtBuilder::new().number_of_cores(0).build().execute_with(|| { + let weight = MaxParachainBlockWeight::>::get(); + + // With 0 cores, should return conservative default + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); + }); +} + +#[test] +fn test_zero_target_blocks() { + TestExtBuilder::new().number_of_cores(2).build().execute_with(|| { + let weight = MaxParachainBlockWeight::>::get(); + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); + }); +} + +#[test] +fn test_target_block_weight_calculation() { + TestExtBuilder::new().number_of_cores(4).build().execute_with(|| { + // Test target_block_weight function directly + // Both calls return the same since ConstU32<4> is fixed at compile time + let weight = MaxParachainBlockWeight::>::target_block_weight(); + + assert_eq!(weight.ref_time(), 3 * 2 * WEIGHT_REF_TIME_PER_SECOND / 4); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); + }); +} + +#[test] +fn test_max_ref_time_per_core_cap() { + TestExtBuilder::new().number_of_cores(8).build().execute_with(|| { + // With 8 cores and 4 target blocks, ref time per block should be capped at 2s per core + let weight = MaxParachainBlockWeight::>::get(); + + // 8 cores * 2s = 16s total, divided by 4 blocks = 4s, but capped at 6s for all blocks in + // total + assert_eq!(weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND * 3 / 4); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64); + }); +} + +#[test] +fn test_target_block_weight_with_digest_edge_cases() { + TestExtBuilder::new().build().execute_with(|| { + // Test with empty digest + let empty_digest = Digest::default(); + let weight = + MaxParachainBlockWeight::>::target_block_weight_with_digest( + &empty_digest, + ); + assert_eq!(weight, MaxParachainBlockWeight::>::FULL_CORE_WEIGHT / 4); + + // Test with digest containing core info + let core_info = CoreInfo { + selector: CoreSelector(0), + claim_queue_offset: ClaimQueueOffset(0), + number_of_cores: Compact(2u16), + }; + + let digest = Digest { logs: vec![CumulusDigestItem::CoreInfo(core_info).to_digest_item()] }; + + // With 2 cores and 4 target blocks: (2 cores * 2s) / 4 blocks = 1s + let weight = + MaxParachainBlockWeight::>::target_block_weight_with_digest( + &digest, + ); + assert_eq!(weight.ref_time(), 2 * 2 * WEIGHT_REF_TIME_PER_SECOND / 4); + assert_eq!(weight.proof_size(), (2 * MAX_POV_SIZE as u64) / 4); + }); +} + +#[test] +fn test_is_first_block_in_core_functions() { + TestExtBuilder::new().number_of_cores(1).build().execute_with(|| { + let empty_digest = Digest::default(); + assert!(super::is_first_block_in_core_with_digest(&empty_digest).is_none()); + + // Test with bundle info index = 0 - should return true + let bundle_info_first = BundleInfo { index: 0, maybe_last: false }; + let digest_item_first = CumulusDigestItem::BundleInfo(bundle_info_first).to_digest_item(); + let mut digest_first = Digest::default(); + digest_first.push(digest_item_first); + assert!(super::is_first_block_in_core_with_digest(&digest_first).unwrap()); + + // Test with bundle info index > 0 - should return false + let bundle_info_not_first = BundleInfo { index: 5, maybe_last: true }; + let digest_item_not_first = + CumulusDigestItem::BundleInfo(bundle_info_not_first).to_digest_item(); + let mut digest_not_first = Digest::default(); + digest_not_first.push(digest_item_not_first); + assert!(!super::is_first_block_in_core_with_digest(&digest_not_first).unwrap()); + }); +} + +#[test] +fn tx_extension_sets_fraction_of_core_mode() { + use frame_support::dispatch::{DispatchClass, DispatchInfo}; + + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + initialize_block_finished(); + + // Create a small transaction + let small_weight = Weight::from_parts(100_000, 1024); + let info = DispatchInfo { + call_weight: small_weight, + class: DispatchClass::Normal, + pays_fee: frame_support::dispatch::Pays::Yes, + ..Default::default() + }; + + assert_ok!(TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + )); + + assert_eq!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { first_transaction_index: Some(0) }) + ); + }); +} + +#[test] +fn tx_extension_large_tx_enables_full_core_usage() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + initialize_block_finished(); + + // Create a transaction larger than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + let info = DispatchInfo { + call_weight: large_weight, + class: DispatchClass::Normal, + ..Default::default() + }; + + assert_ok!(TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + )); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::PotentialFullCore { first_transaction_index: Some(0), .. }) + ); + + let mut post_info = + PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; + + assert_ok!(TxExtension::post_dispatch((), &info, &mut post_info, 0, &Ok(()))); + + assert_eq!(crate::BlockWeightMode::::get(), Some(BlockWeightMode::FullCore)); + assert!(has_use_full_core_digest()); + assert_eq!(MaximumBlockWeight::get().ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + }); +} + +#[test] +fn tx_extension_only_allows_large_operational_tx_to_enable_full_core_usage() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + initialize_block_finished(); + + // Create a transaction larger than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + let mut info = DispatchInfo { + call_weight: large_weight, + class: DispatchClass::Normal, + ..Default::default() + }; + + // As `Normal` transaction this should be rejected. + assert_eq!( + TxExtensionOnlyOperational::validate_and_prepare( + TxExtensionOnlyOperational::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + ) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() + ); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { first_transaction_index: None }) + ); + + info.class = DispatchClass::Operational; + + // As `Operational` transaction this is accepted. + assert_ok!(TxExtensionOnlyOperational::validate_and_prepare( + TxExtensionOnlyOperational::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + )); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::PotentialFullCore { first_transaction_index: Some(0), .. }) + ); + + let mut post_info = + PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; + + assert_ok!(TxExtension::post_dispatch((), &info, &mut post_info, 0, &Ok(()))); + + assert_eq!(crate::BlockWeightMode::::get(), Some(BlockWeightMode::FullCore)); + assert!(has_use_full_core_digest()); + assert_eq!(MaximumBlockWeight::get().ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + }); +} + +#[test] +fn tx_extension_large_tx_with_refund_goes_back_to_fractional() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + initialize_block_finished(); + + // Create a transaction larger than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + let info = DispatchInfo { + call_weight: large_weight, + class: DispatchClass::Normal, + ..Default::default() + }; + + assert_ok!(TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + )); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::PotentialFullCore { first_transaction_index: Some(0), .. }) + ); + + let mut post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(5000, 5000)), + pays_fee: Default::default(), + }; + + assert_ok!(TxExtension::post_dispatch((), &info, &mut post_info, 0, &Ok(()))); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { .. }) + ); + assert!(!has_use_full_core_digest()); + assert_eq!(MaximumBlockWeight::get(), target_weight); + }); +} + +#[test] +fn tx_extension_large_tx_is_rejected_on_non_first_block() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(false) + .build() + .execute_with(|| { + initialize_block_finished(); + + // Create a transaction larger than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + let info = DispatchInfo { + call_weight: large_weight, + class: DispatchClass::Normal, + ..Default::default() + }; + + assert_eq!( + TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + ) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() + ); + + // Should stay in FractionOfCore mode (not PotentialFullCore) since not first block + assert_eq!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { first_transaction_index: None }) + ); + assert!(!has_use_full_core_digest()); + assert_eq!(MaximumBlockWeight::get(), target_weight); + }); +} + +#[test] +fn tx_extension_post_dispatch_to_full_core_because_of_manual_weight() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(false) + .build() + .execute_with(|| { + initialize_block_finished(); + + let target_weight = + MaxParachainBlockWeight::>::target_block_weight(); + + // Transaction announces small weight + let small_weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND / 10, 1024); + let info = DispatchInfo { call_weight: small_weight, ..Default::default() }; + + assert_ok!(TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + )); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { first_transaction_index: Some(0) }) + ); + + // But actually uses much more weight (bug in weight annotation) + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + register_weight(large_weight, DispatchClass::Normal); + + let mut post_info = + PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; + assert_ok!(TxExtension::post_dispatch((), &info, &mut post_info, 0, &Ok(()))); + + // Should transition to FullCore due to exceeding limit + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FullCore) + ); + + assert!(has_use_full_core_digest()); + }); +} + +#[test] +fn tx_extension_large_tx_after_limit_is_rejected() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + initialize_block_finished(); + + // Set some index above the limit. + System::set_extrinsic_index(20); + + // Create a transaction larger than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + let info = DispatchInfo { call_weight: large_weight, ..Default::default() }; + + assert_eq!( + TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + ) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() + ); + + assert_eq!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { first_transaction_index: None }) + ); + assert!(!has_use_full_core_digest()); + assert_eq!(MaximumBlockWeight::get(), target_weight); + }); +} + +#[test] +fn tx_extension_large_weight_before_first_tx() { + for first_block_in_core in [true, false] { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(first_block_in_core) + .build() + .execute_with(|| { + initialize_block_finished(); + + let target_weight = MaximumBlockWeight::target_block_weight(); + let large_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + register_weight(large_weight, DispatchClass::Normal); + + let small_weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND / 10, 1024); + let info = DispatchInfo { call_weight: small_weight, ..Default::default() }; + + let res = TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + ); + + if first_block_in_core { + assert!(res.is_ok()) + } else { + assert_eq!(res.unwrap_err(), InvalidTransaction::ExhaustsResources.into()); + } + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FullCore) + ); + + assert!(has_use_full_core_digest()); + assert_eq!(MaximumBlockWeight::get().ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + + if !first_block_in_core { + // Should have registered FULL_CORE_WEIGHT to prevent more transactions + let final_remaining = frame_system::Pallet::::remaining_block_weight(); + assert!(final_remaining + .consumed() + .all_gte(MaximumBlockWeight::FULL_CORE_WEIGHT)); + } + }); + } +} + +#[test] +fn pre_inherents_hook_first_block_over_limit() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + // Simulate on_initialize consuming more than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let excessive_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + register_weight(excessive_weight, DispatchClass::Mandatory); + + // Call pre_inherents hook + DynamicMaxBlockWeightHooks::>::pre_inherents(); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FullCore) + ); + + // Should have UseFullCore digest + assert!(has_use_full_core_digest()); + }); +} + +#[test] +fn pre_inherents_hook_non_first_block_over_limit() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(false) + .build() + .execute_with(|| { + // Simulate on_initialize consuming more than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let excessive_weight = target_weight + .saturating_add(Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 1024 * 1024)); + + register_weight(excessive_weight, DispatchClass::Mandatory); + + // Call pre_inherents hook + DynamicMaxBlockWeightHooks::>::pre_inherents(); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FullCore) + ); + + assert!(has_use_full_core_digest()); + + // Should have registered FULL_CORE_WEIGHT to prevent more transactions + let final_remaining = frame_system::Pallet::::remaining_block_weight(); + assert!(final_remaining.consumed().all_gte(MaximumBlockWeight::FULL_CORE_WEIGHT)); + }); +} + +#[test] +fn pre_inherents_hook_under_limit_no_change() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + // Simulate on_initialize consuming less than target weight + let target_weight = MaximumBlockWeight::target_block_weight(); + let small_weight = + Weight::from_parts(target_weight.ref_time() / 2, target_weight.proof_size() / 2); + + register_weight(small_weight, DispatchClass::Mandatory); + + // Call pre_inherents hook + DynamicMaxBlockWeightHooks::>::pre_inherents(); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::FractionOfCore { first_transaction_index: None }) + ); + + // Should NOT have UseFullCore digest + assert!(!has_use_full_core_digest()); + }); +} + +#[test] +fn max_weight_without_bundle_info() { + TestExtBuilder::new().number_of_cores(2).build().execute_with(|| { + // Without bundle info, cannot determine if first block + // Should still work but max weight determination will be conservative + + frame_system::Pallet::::note_finished_initialize(); + + let max_weight = MaximumBlockWeight::get(); + + // With 2 cores and 12 target blocks + let expected_weight = Weight::from_parts( + 2 * 2 * WEIGHT_REF_TIME_PER_SECOND / TARGET_BLOCK_RATE as u64, + 2 * MAX_POV_SIZE as u64 / TARGET_BLOCK_RATE as u64, + ); + + assert_eq!(max_weight, expected_weight); + }); +} + +#[test] +fn ref_time_and_pov_size_cap() { + TestExtBuilder::new().number_of_cores(10).build().execute_with(|| { + frame_system::Pallet::::note_finished_initialize(); + + let max_weight = MaxParachainBlockWeight::>::get(); + + // At most one core will always only be able to use the resources of one core. + assert_eq!(max_weight.ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); + assert_eq!(max_weight.proof_size(), MAX_POV_SIZE as u64); + + let max_weight = MaxParachainBlockWeight::>::get(); + + // Each blocks get its own core (can use the max pov size), but ref time of all blocks + // together is in max `6s` + assert_eq!(max_weight.ref_time(), 6 * WEIGHT_REF_TIME_PER_SECOND / 4); + assert_eq!(max_weight.proof_size(), MAX_POV_SIZE as u64); + }); +} + +#[test] +fn executive_validate_block_handles_normal_transactions() { + TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { + let call = RuntimeCall::TestPallet(test_pallet::Call::heavy_call_normal {}); + + let xt = Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), ().into()); + + assert!(Executive::validate_transaction( + TransactionSource::External, + xt.clone(), + Default::default() + ) + .is_ok()); + }); + + TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { + let call = RuntimeCallOnlyOperational::TestPallet(test_pallet::Call::heavy_call_normal {}); + + let xt = ExtrinsicOnlyOperational::new_signed(call, 1u64.into(), 1u64.into(), ().into()); + + assert_eq!( + ExecutiveOnlyOperational::validate_transaction( + TransactionSource::External, + xt, + Default::default() + ) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() + ); + }); +} + +#[test] +fn executive_validate_block_handles_operational_transactions() { + TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { + let call = RuntimeCall::TestPallet(test_pallet::Call::heavy_call_operational {}); + + let xt = Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), ().into()); + + assert!(Executive::validate_transaction( + TransactionSource::External, + xt.clone(), + Default::default() + ) + .is_ok()); + }); + + TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { + let call = + RuntimeCallOnlyOperational::TestPallet(test_pallet::Call::heavy_call_operational {}); + + let xt = ExtrinsicOnlyOperational::new_signed(call, 1u64.into(), 1u64.into(), ().into()); + + assert!(ExecutiveOnlyOperational::validate_transaction( + TransactionSource::External, + xt, + Default::default() + ) + .is_ok()); + }); +} + +#[test] +fn executive_with_operational_only_applies_big_inherent() { + TestExtBuilder::new() + .number_of_cores(1) + .first_block_in_core(true) + .build() + .execute_with(|| { + Executive::initialize_block(&Header::new( + 1, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + )); + + let call = + RuntimeCallOnlyOperational::TestPallet(test_pallet::Call::heavy_call_mandatory {}); + + let xt = ExtrinsicOnlyOperational::new_bare(call); + + ExecutiveOnlyOperational::apply_extrinsic(xt).unwrap().unwrap(); + }); +} diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs new file mode 100644 index 0000000000000..e54fc2985b78c --- /dev/null +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -0,0 +1,489 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + block_weight_over_target_block_weight, is_first_block_in_core_with_digest, BlockWeightMode, + MaxParachainBlockWeight, LOG_TARGET, +}; +use crate::WeightInfo; +use alloc::vec::Vec; +use codec::{Decode, DecodeWithMemTracking, Encode}; +use cumulus_primitives_core::CumulusDigestItem; +use frame_support::{ + dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}, + pallet_prelude::{ + InvalidTransaction, TransactionSource, TransactionValidityError, ValidTransaction, + }, + weights::Weight, +}; +use scale_info::TypeInfo; +use sp_core::Get; +use sp_runtime::{ + traits::{DispatchInfoOf, Dispatchable, Implication, PostDispatchInfoOf, TransactionExtension}, + DispatchResult, +}; + +/// Transaction extension that dynamically changes the max block weight. +/// +/// With block bundling, parachains are running with block weights that may not allow certain +/// transactions to be applied, e.g. a runtime upgrade. To ensure that these transactions can still +/// be applied, this transaction extension can change the max block weight as required. There are +/// multiple requirements for it to change the block weight: +/// +/// 1. Only the first block of a core is allowed to change its block weight. +/// +/// 2. Any `inherent` or any transaction up to `MAX_TRANSACTION_TO_CONSIDER` requires more block +/// weight than the target block weight. Target block weight is the max weight for the respective +/// extrinsic class. +/// +/// Because the node is tracking the wall clock time while building a block to abort block +/// production if it takes too long, we do not allow any block to change the block weight. The node +/// knows that the first block of a core may runs longer. So, the node allows this block to take up +/// to `2s` of wall clock time. `2s` is the time each `PoV` gets on the relay chain for its +/// validation or in other words the maximum core execution time. The extension sets the +/// [`CumulusDigestItem::UseFullCore`] digest when the block should occupy the entire core. +/// +/// Before dispatching an extrinsic the extension will check the requirements and set the +/// appropriate [`BlockWeightMode`]. After the extrinsic has finished, the checks from before +/// dispatching the extrinsic are repeated with the post dispatch weights. The [`BlockWeightMode`] +/// may is changed properly. +/// +/// # Note +/// +/// The extension requires that any of the inner extensions sets the +/// [`BlockWeight`](frame_system::BlockWeight). Otherwise the weight tracking is not working +/// properly. Normally this is done by [`CheckWeight`](frame_system::CheckWeight). +/// +/// # Generic parameters +/// +/// - `Config`: The [`Config`](crate::Config) trait of this pallet. +/// +/// - `Inner`: The inner transaction extensions aka the other transaction extensions to be used by +/// the runtime. +/// +/// - `TargetBlockRate`: The target block rate the parachain should be running with. Or in other +/// words, the number of blocks the parachain should produce in `6s`(relay chain slot duration). +/// +/// - `MAX_TRANSACTION`: The maximum number of transactions to consider before giving up to change +/// the max block weight. +/// +/// - `ALLOW_NORMAL`: Should transactions with a dispatch class `Normal` be allowed to change the +/// max block weight? +#[derive(Encode, Decode, DecodeWithMemTracking, TypeInfo)] +#[derive_where::derive_where(Clone, Eq, PartialEq, Default; Inner)] +#[scale_info(skip_type_params(Config, TargetBlockRate))] +pub struct DynamicMaxBlockWeight< + Config, + Inner, + TargetBlockRate, + const MAX_TRANSACTION_TO_CONSIDER: u32 = 10, + const ALLOW_NORMAL: bool = true, +>(pub Inner, core::marker::PhantomData<(Config, TargetBlockRate)>); + +impl + DynamicMaxBlockWeight +{ + /// Create a new [`DynamicMaxBlockWeight`] instance. + pub fn new(s: S) -> Self { + Self(s, Default::default()) + } +} + +impl< + Config, + Inner, + TargetBlockRate, + const MAX_TRANSACTION_TO_CONSIDER: u32, + const ALLOW_NORMAL: bool, + > DynamicMaxBlockWeight +where + Config: crate::Config, + TargetBlockRate: Get, +{ + /// Should be executed before `validate` is called for any inner extension. + fn pre_validate_extrinsic( + info: &DispatchInfo, + len: usize, + ) -> Result<(), TransactionValidityError> { + let is_not_inherent = frame_system::Pallet::::inherents_applied(); + let extrinsic_index = frame_system::Pallet::::extrinsic_index().unwrap_or_default(); + let transaction_index = is_not_inherent.then(|| extrinsic_index); + + crate::BlockWeightMode::::mutate(|mode| { + let current_mode = *mode.get_or_insert_with(|| BlockWeightMode::FractionOfCore { + first_transaction_index: transaction_index, + }); + + log::trace!( + target: LOG_TARGET, + "About to pre-validate an extrinsic. current_mode={current_mode:?}, transaction_index={transaction_index:?}" + ); + + match current_mode { + // We are already allowing the full core, not that much more to do here. + BlockWeightMode::FullCore => {}, + BlockWeightMode::PotentialFullCore { first_transaction_index, .. } | + BlockWeightMode::FractionOfCore { first_transaction_index } => { + let is_potential = + matches!(current_mode, BlockWeightMode::PotentialFullCore { .. }); + debug_assert!( + !is_potential, + "`PotentialFullCore` should resolve to `FullCore` or `FractionOfCore` after applying a transaction.", + ); + + let digest = frame_system::Pallet::::digest(); + let block_weight_over_limit = extrinsic_index == 0 + && block_weight_over_target_block_weight::(); + + let block_weights = Config::BlockWeights::get(); + let target_weight = block_weights.get(info.class).max_total.unwrap_or_else( + || MaxParachainBlockWeight::::target_block_weight_with_digest(&digest).saturating_sub(block_weights.base_block) + ); + + // Protection against a misconfiguration as this should be detected by the pre-inherent hook. + if block_weight_over_limit { + *mode = Some(BlockWeightMode::FullCore); + + // Inform the node that this block uses the full core. + frame_system::Pallet::::deposit_log( + CumulusDigestItem::UseFullCore.to_digest_item(), + ); + + if !is_first_block_in_core_with_digest(&digest).unwrap_or(false) { + // We are already above the allowed maximum and do not want to accept any more + // extrinsics. + frame_system::Pallet::::register_extra_weight_unchecked( + MaxParachainBlockWeight::::FULL_CORE_WEIGHT, + DispatchClass::Mandatory, + ); + } + + log::error!( + target: LOG_TARGET, + "Inherent block logic took longer than the target block weight, \ + `DynamicMaxBlockWeightHooks` not registered as `PreInherents` hook!", + ); + } else if info + .total_weight() + // The extrinsic lengths counts towards the POV size + .saturating_add(Weight::from_parts(0, len as u64)) + .any_gt(target_weight) + { + // When `ALLOW_NORMAL` is `true`, we want to allow all classes of transactions. Inherents are always allowed. + let class_allowed = if ALLOW_NORMAL { true } else { info.class == DispatchClass::Operational } + || info.class == DispatchClass::Mandatory; + + // If the `BundleInfo` digest is not set (function returns `None`), it means we are in some offchain + // call like `validate_block`. In this case we assume this is the first block, otherwise these big + // transactions will never be able to enter the tx pool. + let is_first_block = is_first_block_in_core_with_digest(&digest).unwrap_or(true); + + if transaction_index.unwrap_or_default().saturating_sub(first_transaction_index.unwrap_or_default()) < MAX_TRANSACTION_TO_CONSIDER + && is_first_block && class_allowed { + log::trace!( + target: LOG_TARGET, + "Enabling `PotentialFullCore` mode for extrinsic", + ); + + *mode = Some(BlockWeightMode::PotentialFullCore { + target_weight, + // While applying inherents `extrinsic_index` and `first_transaction_index` will be `None`. + // When the first transaction is applied, we want to store the index. + first_transaction_index: first_transaction_index.or(transaction_index), + }); + } else { + log::trace!( + target: LOG_TARGET, + "Transaction is over the block limit, but is either outside of the allowed window or the dispatch class is not allowed.", + ); + + return Err(InvalidTransaction::ExhaustsResources) + } + } else if is_potential { + log::trace!( + target: LOG_TARGET, + "Resetting back to `FractionOfCore`" + ); + *mode = + Some(BlockWeightMode::FractionOfCore { first_transaction_index: first_transaction_index.or(transaction_index) }); + } else { + log::trace!( + target: LOG_TARGET, + "Not changing block weight mode" + ); + + *mode = + Some(BlockWeightMode::FractionOfCore { first_transaction_index: first_transaction_index.or(transaction_index) }); + } + }, + }; + + Ok(()) + }).map_err(Into::into) + } + + /// Should be called after all inner extensions have finished executing their post dispatch + /// handling. + /// + /// Returns the weight to refund. Aka the weight that wasn't used by this extension. + fn post_dispatch_extrinsic(info: &DispatchInfo) -> Weight { + crate::BlockWeightMode::::mutate(|weight_mode| { + let Some(mode) = *weight_mode else { return Weight::zero() }; + + match mode { + // If the previous mode was already `FullCore`, we are fine. + BlockWeightMode::FullCore => + Config::WeightInfo::block_weight_tx_extension_max_weight() + .saturating_sub(Config::WeightInfo::block_weight_tx_extension_full_core()), + BlockWeightMode::FractionOfCore { .. } => { + let digest = frame_system::Pallet::::digest(); + let target_block_weight = + MaxParachainBlockWeight::::target_block_weight_with_digest(&digest); + + let is_above_limit = frame_system::Pallet::::remaining_block_weight() + .consumed() + .any_gt(target_block_weight); + + // If we are above the limit, it means the transaction used more weight than + // what it had announced, which should not happen. + if is_above_limit { + log::error!( + target: LOG_TARGET, + "Extrinsic ({}) used more weight than what it had announced and pushed the \ + block above the allowed weight limit!", + frame_system::Pallet::::extrinsic_index().unwrap_or_default() + ); + + // If this isn't the first block in a core, we register the full core weight + // to ensure that we don't include any other transactions. Because we don't + // know how many weight of the core was already used by the blocks before. + if !is_first_block_in_core_with_digest(&digest).unwrap_or(false) { + log::error!( + target: LOG_TARGET, + "Registering `FULL_CORE_WEIGHT` to ensure no other transaction is included \ + in this block, because this isn't the first block in the core!", + ); + + frame_system::Pallet::::register_extra_weight_unchecked( + MaxParachainBlockWeight::::FULL_CORE_WEIGHT, + DispatchClass::Mandatory, + ); + } + + *weight_mode = Some(BlockWeightMode::FullCore); + + // Inform the node that this block uses the full core. + frame_system::Pallet::::deposit_log( + CumulusDigestItem::UseFullCore.to_digest_item(), + ); + } + + Config::WeightInfo::block_weight_tx_extension_max_weight().saturating_sub( + Config::WeightInfo::block_weight_tx_extension_stays_fraction_of_core(), + ) + }, + // Now we need to check if the transaction required more weight than a fraction of a + // core block. + BlockWeightMode::PotentialFullCore { first_transaction_index, target_weight } => { + let block_weight = frame_system::BlockWeight::::get(); + let extrinsic_class_weight = block_weight.get(info.class); + + if extrinsic_class_weight.any_gt(target_weight) { + log::trace!( + target: LOG_TARGET, + "Extrinsic class weight {extrinsic_class_weight:?} above target weight {target_weight:?}, enabling `FullCore` mode." + ); + + *weight_mode = Some(BlockWeightMode::FullCore); + + // Inform the node that this block uses the full core. + frame_system::Pallet::::deposit_log( + CumulusDigestItem::UseFullCore.to_digest_item(), + ); + } else { + log::trace!( + target: LOG_TARGET, + "Extrinsic class weight {extrinsic_class_weight:?} not above target \ + weight {target_weight:?}, going back to `FractionOfCore` mode." + ); + + *weight_mode = + Some(BlockWeightMode::FractionOfCore { first_transaction_index }); + } + + // We run into the worst case, so no refund :) + Weight::zero() + }, + } + }) + } +} + +impl< + Config, + Inner, + TargetBlockRate, + const MAX_TRANSACTION_TO_CONSIDER: u32, + const ALLOW_NORMAL: bool, + > From + for DynamicMaxBlockWeight< + Config, + Inner, + TargetBlockRate, + MAX_TRANSACTION_TO_CONSIDER, + ALLOW_NORMAL, + > +{ + fn from(s: Inner) -> Self { + Self::new(s) + } +} + +impl< + Config, + Inner: core::fmt::Debug, + TargetBlockRate, + const MAX_TRANSACTION_TO_CONSIDER: u32, + const ALLOW_NORMAL: bool, + > core::fmt::Debug + for DynamicMaxBlockWeight< + Config, + Inner, + TargetBlockRate, + MAX_TRANSACTION_TO_CONSIDER, + ALLOW_NORMAL, + > +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "DynamicMaxBlockWeight<{:?}>", self.0) + } +} + +impl< + Config: crate::Config + Send + Sync, + Inner: TransactionExtension, + TargetBlockRate: Get + Send + Sync + 'static, + const MAX_TRANSACTION_TO_CONSIDER: u32, + const ALLOW_NORMAL: bool, + > TransactionExtension + for DynamicMaxBlockWeight< + Config, + Inner, + TargetBlockRate, + MAX_TRANSACTION_TO_CONSIDER, + ALLOW_NORMAL, + > +where + Config::RuntimeCall: Dispatchable, +{ + const IDENTIFIER: &'static str = "DynamicMaxBlockWeight"; + + type Implicit = Inner::Implicit; + + type Val = Inner::Val; + + type Pre = Inner::Pre; + + fn implicit(&self) -> Result { + self.0.implicit() + } + + fn metadata() -> Vec { + let mut inner = Inner::metadata(); + inner.push(sp_runtime::traits::TransactionExtensionMetadata { + identifier: "DynamicMaxBlockWeight", + ty: scale_info::meta_type::<()>(), + implicit: scale_info::meta_type::<()>(), + }); + inner + } + + fn weight(&self, _: &Config::RuntimeCall) -> Weight { + Config::WeightInfo::block_weight_tx_extension_max_weight() + } + + fn validate( + &self, + origin: Config::RuntimeOrigin, + call: &Config::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + self_implicit: Self::Implicit, + inherited_implication: &impl Implication, + source: TransactionSource, + ) -> Result<(ValidTransaction, Self::Val, Config::RuntimeOrigin), TransactionValidityError> { + Self::pre_validate_extrinsic(info, len)?; + + self.0 + .validate(origin, call, info, len, self_implicit, inherited_implication, source) + } + + fn prepare( + self, + val: Self::Val, + origin: &Config::RuntimeOrigin, + call: &Config::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + self.0.prepare(val, origin, call, info, len) + } + + fn post_dispatch_details( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfo, + len: usize, + result: &DispatchResult, + ) -> Result { + let weight_refund = Inner::post_dispatch_details(pre, info, post_info, len, result)?; + + let extra_refund = Self::post_dispatch_extrinsic(info); + + Ok(weight_refund.saturating_add(extra_refund)) + } + + fn bare_validate( + call: &Config::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + ) -> frame_support::pallet_prelude::TransactionValidity { + Inner::bare_validate(call, info, len) + } + + fn bare_validate_and_prepare( + call: &Config::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(), TransactionValidityError> { + Self::pre_validate_extrinsic(info, len)?; + + Inner::bare_validate_and_prepare(call, info, len) + } + + fn bare_post_dispatch( + info: &DispatchInfoOf, + post_info: &mut PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + Inner::bare_post_dispatch(info, post_info, len, result)?; + + Self::post_dispatch_extrinsic(info); + + Ok(()) + } +} diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 489a7452480c0..f86f59df59e74 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -33,9 +33,10 @@ use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec}; use codec::{Decode, DecodeLimit, Encode}; use core::cmp; use cumulus_primitives_core::{ - relay_chain, AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, - CumulusDigestItem, GetChannelInfo, ListChannelInfos, MessageSendError, OutboundHrmpMessage, - ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender, XcmpMessageHandler, + relay_chain::{self, UMPSignal, UMP_SEPARATOR}, + AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, CumulusDigestItem, + GetChannelInfo, ListChannelInfos, MessageSendError, OutboundHrmpMessage, ParaId, + PersistedValidationData, UpwardMessage, UpwardMessageSender, XcmpMessageHandler, XcmpMessageSource, }; use cumulus_primitives_parachain_inherent::{v0, MessageQueueChain, ParachainInherentData}; @@ -62,18 +63,15 @@ use xcm::{latest::XcmHash, VersionedLocation, VersionedXcm, MAX_XCM_DECODE_DEPTH use xcm_builder::InspectMessageQueues; mod benchmarking; +pub mod block_weight; +pub mod consensus_hook; pub mod migration; mod mock; +pub mod relay_state_snapshot; #[cfg(test)] mod tests; -pub mod weights; - -pub use weights::WeightInfo; - mod unincluded_segment; - -pub mod consensus_hook; -pub mod relay_state_snapshot; +pub mod weights; #[macro_use] pub mod validate_block; mod descendant_validation; @@ -107,10 +105,11 @@ pub use consensus_hook::{ConsensusHook, ExpectParentIncluded}; pub use cumulus_pallet_parachain_system_proc_macro::register_validate_block; pub use relay_state_snapshot::{MessagingStateSnapshot, RelayChainStateProof}; pub use unincluded_segment::{Ancestor, UsedBandwidth}; +pub use weights::WeightInfo; pub use pallet::*; -const LOG_TARGET: &str = "parachain-system"; +const LOG_TARGET: &str = "runtime::parachain-system"; /// Something that can check the associated relay block number. /// @@ -187,8 +186,9 @@ pub mod ump_constants { #[frame_support::pallet] pub mod pallet { use super::*; + use codec::Compact; use cumulus_primitives_core::CoreInfoExistsAtMaxOnce; - use frame_support::pallet_prelude::*; + use frame_support::pallet_prelude::{ValueQuery, *}; use frame_system::pallet_prelude::*; #[pallet::pallet] @@ -361,8 +361,24 @@ pub mod pallet { UpwardMessages::::put(&up[..num as usize]); *up = up.split_off(num as usize); - // Send the core selector UMP signal. - Self::send_ump_signal(); + if let Some(core_info) = + CumulusDigestItem::find_core_info(&frame_system::Pallet::::digest()) + { + PendingUpwardSignals::::mutate(|signals| { + signals.push( + UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset) + .encode(), + ); + }); + + PreviousCoreCount::::put(core_info.number_of_cores); + } else { + // Without the digest, we assume that it is `1`. + PreviousCoreCount::::put(Compact(1u16)); + } + + // Send the pending UMP signals. + Self::send_ump_signals(); // If the total size of the pending messages is less than the threshold, // we decrease the fee factor, since the queue is less congested. @@ -472,6 +488,8 @@ pub mod pallet { weight += T::DbWeight::get().reads_writes(3, 2); } + BlockWeightMode::::kill(); + // Remove the validation from the old block. ValidationData::::kill(); // NOTE: Killing here is required to at least include the trie nodes down to the keys @@ -585,7 +603,7 @@ pub mod pallet { validation_data: vfp, relay_chain_state, relay_parent_descendants, - collator_peer_id: _, + collator_peer_id, } = data; // Check that the associated relay chain block number is as expected. @@ -693,6 +711,12 @@ pub mod pallet { ::on_validation_data(&vfp); + if let Some(collator_peer_id) = collator_peer_id { + PendingUpwardSignals::::mutate(|signals| { + signals.push(UMPSignal::ApprovedPeer(collator_peer_id).encode()); + }); + } + total_weight.saturating_accrue(Self::enqueue_inbound_downward_messages( relevant_messaging_state.dmq_mqc_head, inbound_messages_data.downward_messages, @@ -760,6 +784,22 @@ pub mod pallet { NotScheduled, } + /// The current block weight mode. + /// + /// This is used to determine what is the maximum allowed block weight, for more information see + /// [`block_weight`]. + #[pallet::storage] + #[pallet::whitelist_storage] + pub type BlockWeightMode = + StorageValue<_, block_weight::BlockWeightMode, OptionQuery>; + + /// The core count available to the parachain in the previous block. + /// + /// This is mainly used for offchain functionality to calculate the correct target block weight. + #[pallet::storage] + #[pallet::whitelist_storage] + pub type PreviousCoreCount = StorageValue<_, Compact, OptionQuery>; + /// Latest included block descendants the runtime accepted. In other words, these are /// ancestors of the currently executing block which have not been included in the observed /// relay-chain state. @@ -905,14 +945,20 @@ pub mod pallet { /// Upward messages that were sent in a block. /// - /// This will be cleared in `on_initialize` of each new block. + /// This will be cleared in `on_initialize` for each new block. #[pallet::storage] pub type UpwardMessages = StorageValue<_, Vec, ValueQuery>; - /// Upward messages that are still pending and not yet send to the relay chain. + /// Upward messages that are still pending and not yet sent to the relay chain. #[pallet::storage] pub type PendingUpwardMessages = StorageValue<_, Vec, ValueQuery>; + /// Upward signals that are still pending and not yet sent to the relay chain. + /// + /// This will be cleared in `on_finalize` for each block. + #[pallet::storage] + pub type PendingUpwardSignals = StorageValue<_, Vec, ValueQuery>; + /// The factor to multiply the base delivery fee by for UMP. #[pallet::storage] pub type UpwardDeliveryFeeFactor = @@ -1384,7 +1430,11 @@ impl Pallet { // // If this fails, the parachain needs to wait for ancestors to be included before // a new block is allowed. - assert!(new_len < capacity.get(), "no space left for the block in the unincluded segment"); + assert!( + new_len < capacity.get(), + "No space left for the block in the unincluded segment: new_len({new_len}) < capacity({})", + capacity.get() + ); weight_used } @@ -1448,7 +1498,7 @@ impl Pallet { // Ensure that `ValidationData` exists. We do not care about the validation data per se, // but we do care about the [`UpgradeRestrictionSignal`] which arrives with the same // inherent. - ensure!(>::exists(), Error::::ValidationDataNotAvailable,); + ensure!(>::exists(), Error::::ValidationDataNotAvailable); ensure!(>::get().is_none(), Error::::ProhibitedByPolkadot); ensure!(!>::exists(), Error::::OverlappingUpgrades); @@ -1507,23 +1557,15 @@ impl Pallet { CustomValidationHeadData::::put(head_data); } - /// Send the ump signals - fn send_ump_signal() { - use cumulus_primitives_core::relay_chain::{UMPSignal, UMP_SEPARATOR}; - - UpwardMessages::::mutate(|up| { - if let Some(core_info) = - CumulusDigestItem::find_core_info(&frame_system::Pallet::::digest()) - { - up.push(UMP_SEPARATOR); - - // Send the core selector signal. - up.push( - UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset) - .encode(), - ); - } - }); + /// Send the pending ump signals + fn send_ump_signals() { + let mut ump_signals = PendingUpwardSignals::::take(); + if !ump_signals.is_empty() { + UpwardMessages::::append(UMP_SEPARATOR); + UpwardMessages::::mutate(|up| { + up.append(&mut ump_signals); + }); + } } /// Open HRMP channel for using it in benchmarks or tests. diff --git a/cumulus/pallets/parachain-system/src/tests.rs b/cumulus/pallets/parachain-system/src/tests.rs index ca2767e1c87af..dc9fab04f706f 100755 --- a/cumulus/pallets/parachain-system/src/tests.rs +++ b/cumulus/pallets/parachain-system/src/tests.rs @@ -19,8 +19,12 @@ use super::*; use crate::mock::*; +use alloc::collections::BTreeMap; use core::num::NonZeroU32; -use cumulus_primitives_core::{AbridgedHrmpChannel, InboundDownwardMessage, InboundHrmpMessage}; +use cumulus_primitives_core::{ + relay_chain::ApprovedPeerId, AbridgedHrmpChannel, ClaimQueueOffset, CoreInfo, CoreSelector, + InboundDownwardMessage, InboundHrmpMessage, CUMULUS_CONSENSUS_ID, +}; use cumulus_primitives_parachain_inherent::{ v0, INHERENT_IDENTIFIER, PARACHAIN_INHERENT_IDENTIFIER_V0, }; @@ -31,6 +35,7 @@ use rand::Rng; use relay_chain::HrmpChannelId; use sp_core::H256; use sp_inherents::InherentDataProvider; +use sp_runtime::DigestItem; use sp_trie::StorageProof; #[test] @@ -180,7 +185,7 @@ fn unincluded_segment_works() { } #[test] -#[should_panic = "no space left for the block in the unincluded segment"] +#[should_panic = "No space left for the block in the unincluded segment: new_len(1) < capacity(1)"] fn unincluded_segment_is_limited() { CONSENSUS_HOOK.with(|c| { *c.borrow_mut() = Box::new(|_| (Weight::zero(), NonZeroU32::new(1).unwrap().into())) @@ -1655,3 +1660,74 @@ fn ump_fee_factor_increases_and_decreases() { }, ); } + +#[test] +fn ump_signals_are_sent_correctly() { + let core_info = CoreInfo { + selector: CoreSelector(1), + claim_queue_offset: ClaimQueueOffset(1), + number_of_cores: codec::Compact(1), + }; + + // Test cases list with the following format: + // `((expect_approved_peer, expect_select_core), expected_upward_messages)` + let test_cases = BTreeMap::from([ + ((false, false), vec![b"Test".to_vec()]), + ( + (true, false), + vec![ + b"Test".to_vec(), + UMP_SEPARATOR, + UMPSignal::ApprovedPeer(ApprovedPeerId::try_from(b"12345".to_vec()).unwrap()) + .encode(), + ], + ), + ( + (false, true), + vec![ + b"Test".to_vec(), + UMP_SEPARATOR, + UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset).encode(), + ], + ), + ( + (true, true), + vec![ + b"Test".to_vec(), + UMP_SEPARATOR, + UMPSignal::ApprovedPeer(ApprovedPeerId::try_from(b"12345".to_vec()).unwrap()) + .encode(), + UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset).encode(), + ], + ), + ]); + + for ((expect_approved_peer, expect_select_core), expected_upward_messages) in test_cases { + let core_info_digest = CumulusDigestItem::CoreInfo(core_info.clone()).encode(); + + BlockTests::new() + .with_inherent_data(move |_, _, data| { + if expect_approved_peer { + data.collator_peer_id = + Some(ApprovedPeerId::try_from(b"12345".to_vec()).unwrap()); + } + }) + .add_with_post_test( + 1, + move || { + ParachainSystem::send_upward_message(b"Test".to_vec()).unwrap(); + + if expect_select_core { + System::deposit_log(DigestItem::PreRuntime( + CUMULUS_CONSENSUS_ID, + core_info_digest.clone(), + )); + } + }, + move || { + assert_eq!(PendingUpwardSignals::::get(), Vec::>::new()); + assert_eq!(UpwardMessages::::get(), expected_upward_messages); + }, + ); + } +} diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index f77827ff68078..8b9d05e69fd62 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -83,6 +83,8 @@ where B::Extrinsic: ExtrinsicCall, ::Call: IsSubType>, { + // sp_runtime::runtime_logger::RuntimeLogger::init(); + let _guard = ( // Replace storage calls with our own implementations sp_io::storage::host_read.replace_implementation(host_storage_read), @@ -335,6 +337,7 @@ where upward_messages .try_push(UMP_SEPARATOR) .expect("UMPSignals does not fit in UMPMessages"); + upward_messages .try_extend(upward_message_signals.into_iter()) .expect("UMPSignals does not fit in UMPMessages"); diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index ad0a8cd63e859..38535eb6c2c70 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -30,14 +30,14 @@ use cumulus_test_client::{ use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use polkadot_parachain_primitives::primitives::ValidationResult; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; -use sp_api::{ApiExt, Core, ProofRecorder, ProvideRuntimeApi}; -use sp_consensus_slots::SlotDuration; +use sp_api::{ApiExt, Core, ProofRecorder, ProvideRuntimeApi, StorageProof}; +use sp_consensus_babe::SlotDuration; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, Block as BlockT, Header as HeaderT}, DigestItem, }; -use sp_trie::{proof_size_extension::ProofSizeExt, recorder::IgnoredNodes, StorageProof}; +use sp_trie::{proof_size_extension::ProofSizeExt, recorder::IgnoredNodes}; use std::{env, process::Command}; fn call_validate_block_validation_result( @@ -147,6 +147,7 @@ fn build_block_with_witness( let cumulus_test_client::BlockBuilderAndSupportData { mut block_builder, persisted_validation_data, + .. } = client.init_block_builder_with_pre_digests(Some(validation_data), sproof_builder, pre_digests); extra_extrinsics.into_iter().for_each(|e| block_builder.push(e).unwrap()); @@ -241,11 +242,10 @@ fn build_multiple_blocks_with_witness( }) .unwrap(); - ignored_nodes.extend(IgnoredNodes::from_storage_proof::( - &built_block.proof.clone().unwrap(), - )); + let proof_new = built_block.proof.unwrap(); + ignored_nodes.extend(IgnoredNodes::from_storage_proof::(&proof_new)); ignored_nodes.extend(IgnoredNodes::from_memory_db(built_block.storage_changes.transaction)); - proof = StorageProof::merge([proof, built_block.proof.unwrap()]); + proof = StorageProof::merge([proof, proof_new]); parent_head = built_block.block.header.clone(); @@ -518,7 +518,6 @@ fn state_changes_in_multiple_blocks_are_applied_in_exact_order() { sp_tracing::try_init_simple(); let blocks_per_pov = 12; - // disable the core selection logic let (client, genesis_head) = create_elastic_scaling_test_client(); // 1. Build the initial block that stores values in the map. @@ -583,7 +582,6 @@ fn validate_block_handles_ump_signal() { relay_chain::{UMPSignal, UMP_SEPARATOR}, ClaimQueueOffset, CoreInfo, CoreSelector, }; - sp_tracing::try_init_simple(); let (client, parent_head) = create_elastic_scaling_test_client(); diff --git a/cumulus/pallets/parachain-system/src/weights.rs b/cumulus/pallets/parachain-system/src/weights.rs index ba7d8b1e87f6b..086a6b993b695 100644 --- a/cumulus/pallets/parachain-system/src/weights.rs +++ b/cumulus/pallets/parachain-system/src/weights.rs @@ -55,6 +55,9 @@ use core::marker::PhantomData; /// Weight functions needed for cumulus_pallet_parachain_system. pub trait WeightInfo { fn enqueue_inbound_downward_messages(n: u32, ) -> Weight; + fn block_weight_tx_extension_max_weight() -> Weight; + fn block_weight_tx_extension_stays_fraction_of_core() -> Weight; + fn block_weight_tx_extension_full_core() -> Weight; } /// Weights for cumulus_pallet_parachain_system using the Substrate node and recommended hardware. @@ -84,6 +87,18 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } + + fn block_weight_tx_extension_max_weight() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_stays_fraction_of_core() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_full_core() -> Weight { + Weight::zero() + } } // For backwards compatibility and tests @@ -112,4 +127,17 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } + + fn block_weight_tx_extension_max_weight() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_stays_fraction_of_core() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_full_core() -> Weight { + Weight::zero() + } + } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs index 23dd800922aea..b7ba28ac2fbcc 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs @@ -74,4 +74,15 @@ impl cumulus_pallet_parachain_system::WeightInfo for We .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) } + fn block_weight_tx_extension_max_weight() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_stays_fraction_of_core() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_full_core() -> Weight { + Weight::zero() + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_parachain_system.rs index 28f8aca5f5e7e..2573210408d95 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_parachain_system.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -74,4 +74,15 @@ impl cumulus_pallet_parachain_system::WeightInfo for We .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) } + fn block_weight_tx_extension_max_weight() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_stays_fraction_of_core() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_full_core() -> Weight { + Weight::zero() + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs index 145a6e3e3cf1b..ebb497e21decd 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs @@ -74,4 +74,15 @@ impl cumulus_pallet_parachain_system::WeightInfo for We .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) } + fn block_weight_tx_extension_max_weight() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_stays_fraction_of_core() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_full_core() -> Weight { + Weight::zero() + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_parachain_system.rs index e60c9cfde30e5..5507c5ffd7f2a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_parachain_system.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -74,4 +74,15 @@ impl cumulus_pallet_parachain_system::WeightInfo for We .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) } + fn block_weight_tx_extension_max_weight() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_stays_fraction_of_core() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_full_core() -> Weight { + Weight::zero() + } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs index 9ebfbd2fbd0a3..671e4715b8219 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -74,4 +74,15 @@ impl cumulus_pallet_parachain_system::WeightInfo for We .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) } + fn block_weight_tx_extension_max_weight() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_stays_fraction_of_core() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_full_core() -> Weight { + Weight::zero() + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_parachain_system.rs index 73c4b2ba241d2..bc702a215a66a 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_parachain_system.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_parachain_system.rs @@ -74,4 +74,15 @@ impl cumulus_pallet_parachain_system::WeightInfo for We .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) } + fn block_weight_tx_extension_max_weight() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_stays_fraction_of_core() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_full_core() -> Weight { + Weight::zero() + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_parachain_system.rs index 8f5714bbe0cd7..ece41c94ab847 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_parachain_system.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -74,4 +74,15 @@ impl cumulus_pallet_parachain_system::WeightInfo for We .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) } + fn block_weight_tx_extension_max_weight() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_stays_fraction_of_core() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_full_core() -> Weight { + Weight::zero() + } } diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/cumulus_pallet_parachain_system.rs index a753f6fc78f87..4c8cc2b5fc6f8 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/cumulus_pallet_parachain_system.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -75,4 +75,15 @@ impl cumulus_pallet_parachain_system::WeightInfo for We .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) } + fn block_weight_tx_extension_max_weight() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_stays_fraction_of_core() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_full_core() -> Weight { + Weight::zero() + } } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_parachain_system.rs index 58aef8cd5ab87..6e7e6acc40e3b 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_parachain_system.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_parachain_system.rs @@ -74,4 +74,15 @@ impl cumulus_pallet_parachain_system::WeightInfo for We .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) } + fn block_weight_tx_extension_max_weight() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_stays_fraction_of_core() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_full_core() -> Weight { + Weight::zero() + } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_parachain_system.rs index 05c07f998e8e2..085b4b0fa85e7 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_parachain_system.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -74,4 +74,15 @@ impl cumulus_pallet_parachain_system::WeightInfo for We .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) } + fn block_weight_tx_extension_max_weight() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_stays_fraction_of_core() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_full_core() -> Weight { + Weight::zero() + } } diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index e06a92dcef8bc..03f337b279671 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -231,6 +231,43 @@ pub struct CoreInfo { pub number_of_cores: Compact, } +impl core::hash::Hash for CoreInfo { + fn hash(&self, state: &mut H) { + state.write_u8(self.selector.0); + state.write_u8(self.claim_queue_offset.0); + state.write_u16(self.number_of_cores.0); + } +} + +impl CoreInfo { + /// Puts this into a [`CumulusDigestItem::CoreInfo`] and then encodes it as a Substrate + /// [`DigestItem`]. + pub fn to_digest_item(&self) -> DigestItem { + CumulusDigestItem::CoreInfo(self.clone()).to_digest_item() + } +} + +/// Information about a block that is part of a PoV bundle. +#[derive(Clone, Debug, Decode, Encode, PartialEq)] +pub struct BundleInfo { + /// The index of the block in the bundle. + pub index: u8, + /// Is this the last block in the bundle from the point of view of the node? + /// + /// It is possible that at `index` zero the runtime outputs the + /// [`CumulusDigestItem::UseFullCore`] that informs the node to use an entire for one block + /// only. + pub maybe_last: bool, +} + +impl BundleInfo { + /// Puts this into a [`CumulusDigestItem::BundleInfo`] and then encodes it as a Substrate + /// [`DigestItem`]. + pub fn to_digest_item(&self) -> DigestItem { + CumulusDigestItem::BundleInfo(self.clone()).to_digest_item() + } +} + /// Return value of [`CumulusDigestItem::core_info_exists_at_max_once`] #[derive(Debug, Clone, PartialEq, Eq)] pub enum CoreInfoExistsAtMaxOnce { @@ -261,14 +298,25 @@ pub enum CumulusDigestItem { /// block. #[codec(index = 1)] CoreInfo(CoreInfo), + /// A digest item providing information about the position of the block in the bundle. + #[codec(index = 2)] + BundleInfo(BundleInfo), + /// A digest item informing the node that this block should be put alone onto a core. + /// + /// In other words, the core should not be shared with other blocks. + #[codec(index = 3)] + UseFullCore, } impl CumulusDigestItem { /// Encode this as a Substrate [`DigestItem`]. pub fn to_digest_item(&self) -> DigestItem { + let encoded = self.encode(); + match self { - Self::RelayParent(_) => DigestItem::Consensus(CUMULUS_CONSENSUS_ID, self.encode()), - Self::CoreInfo(_) => DigestItem::PreRuntime(CUMULUS_CONSENSUS_ID, self.encode()), + Self::RelayParent(_) | Self::UseFullCore => + DigestItem::Consensus(CUMULUS_CONSENSUS_ID, encoded), + _ => DigestItem::PreRuntime(CUMULUS_CONSENSUS_ID, encoded), } } @@ -347,6 +395,40 @@ impl CumulusDigestItem { _ => None, }) } + + /// Returns the [`BundleInfo`] from the given `digest`. + pub fn find_bundle_info(digest: &Digest) -> Option { + digest.convert_first(|d| match d { + DigestItem::PreRuntime(id, val) if id == &CUMULUS_CONSENSUS_ID => { + let Ok(CumulusDigestItem::BundleInfo(bundle_info)) = + CumulusDigestItem::decode_all(&mut &val[..]) + else { + return None + }; + + Some(bundle_info) + }, + _ => None, + }) + } + + /// Returns `true` if the given `digest` contains the [`Self::UseFullCore`] item. + pub fn contains_use_full_core(digest: &Digest) -> bool { + digest + .convert_first(|d| match d { + DigestItem::Consensus(id, val) if id == &CUMULUS_CONSENSUS_ID => { + let Ok(CumulusDigestItem::UseFullCore) = + CumulusDigestItem::decode_all(&mut &val[..]) + else { + return None + }; + + Some(true) + }, + _ => None, + }) + .unwrap_or_default() + } } /// diff --git a/polkadot/primitives/src/v9/mod.rs b/polkadot/primitives/src/v9/mod.rs index 360da8ff9b956..c55edd47c3c19 100644 --- a/polkadot/primitives/src/v9/mod.rs +++ b/polkadot/primitives/src/v9/mod.rs @@ -2238,10 +2238,22 @@ impl Ord for CommittedCandidateReceiptV2 { #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Debug, Copy)] pub struct CoreSelector(pub u8); +impl From for CoreSelector { + fn from(value: u8) -> Self { + Self(value) + } +} + /// An offset in the relay chain claim queue. #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Debug, Copy)] pub struct ClaimQueueOffset(pub u8); +impl From for ClaimQueueOffset { + fn from(value: u8) -> Self { + Self(value) + } +} + /// Signals that a parachain can send to the relay chain via the UMP queue. #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Debug)] pub enum UMPSignal { diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/cumulus_pallet_parachain_system.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/cumulus_pallet_parachain_system.rs index b91921ce85eb1..55bd6a43a6a0f 100644 --- a/substrate/frame/staking-async/runtimes/parachain/src/weights/cumulus_pallet_parachain_system.rs +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/cumulus_pallet_parachain_system.rs @@ -79,4 +79,15 @@ impl cumulus_pallet_parachain_system::WeightInfo for We .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(4)) } + fn block_weight_tx_extension_max_weight() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_stays_fraction_of_core() -> Weight { + Weight::zero() + } + + fn block_weight_tx_extension_full_core() -> Weight { + Weight::zero() + } } diff --git a/substrate/frame/support/src/traits/messages.rs b/substrate/frame/support/src/traits/messages.rs index 0a5c70f8f0fa5..eefe47ff53a61 100644 --- a/substrate/frame/support/src/traits/messages.rs +++ b/substrate/frame/support/src/traits/messages.rs @@ -356,6 +356,16 @@ pub trait HandleMessage { fn sweep_queue(); } +impl HandleMessage for () { + type MaxMessageLen = ConstU32<0>; + + fn handle_message(_: BoundedSlice) {} + + fn handle_messages<'a>(_: impl Iterator>) {} + + fn sweep_queue() {} +} + /// Adapter type to transform an [`EnqueueMessage`] with an origin into a [`HandleMessage`] impl. pub struct EnqueueWithOrigin(PhantomData<(E, O)>); impl, O: TypedGet> HandleMessage for EnqueueWithOrigin diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs index 16522611ca474..b64f68cb71b82 100644 --- a/substrate/frame/system/src/extensions/check_weight.rs +++ b/substrate/frame/system/src/extensions/check_weight.rs @@ -38,10 +38,16 @@ use sp_weights::Weight; /// /// This extension does not influence any fields of `TransactionValidity` in case the /// transaction is valid. -#[derive(Encode, Decode, DecodeWithMemTracking, Clone, Eq, PartialEq, Default, TypeInfo)] +#[derive(Encode, Decode, DecodeWithMemTracking, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct CheckWeight(core::marker::PhantomData); +impl Default for CheckWeight { + fn default() -> Self { + Self(Default::default()) + } +} + impl CheckWeight where T::RuntimeCall: Dispatchable, diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index c0729d47a6efe..8cb28f16561af 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -2147,7 +2147,7 @@ impl Pallet { } /// Sets the index of extrinsic that is currently executing. - #[cfg(any(feature = "std", test))] + #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] pub fn set_extrinsic_index(extrinsic_index: u32) { storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &extrinsic_index) } diff --git a/substrate/primitives/runtime/src/testing.rs b/substrate/primitives/runtime/src/testing.rs index 647f5eb78d5e1..59f71d4ce4a9a 100644 --- a/substrate/primitives/runtime/src/testing.rs +++ b/substrate/primitives/runtime/src/testing.rs @@ -21,7 +21,7 @@ use crate::{ codec::{Codec, Decode, DecodeWithMemTracking, Encode, EncodeLike, MaxEncodedLen}, generic::{self, LazyBlock, UncheckedExtrinsic}, scale_info::TypeInfo, - traits::{self, BlakeTwo256, Dispatchable, LazyExtrinsic, OpaqueKeys}, + traits::{self, BlakeTwo256, Dispatchable, LazyExtrinsic, Lookup, OpaqueKeys, StaticLookup}, DispatchResultWithInfo, KeyTypeId, OpaqueExtrinsic, }; use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize}; @@ -54,6 +54,12 @@ use std::{cell::RefCell, fmt::Debug}; )] pub struct UintAuthorityId(pub u64); +impl core::fmt::Display for UintAuthorityId { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + core::fmt::Display::fmt(&self.0, f) + } +} + impl From for UintAuthorityId { fn from(id: u64) -> Self { UintAuthorityId(id) @@ -162,6 +168,28 @@ impl traits::IdentifyAccount for UintAuthorityId { } } +impl StaticLookup for UintAuthorityId { + type Source = Self; + type Target = u64; + + fn lookup(s: Self::Source) -> Result { + Ok(s.0) + } + + fn unlookup(t: Self::Target) -> Self::Source { + Self(t) + } +} + +impl Lookup for UintAuthorityId { + type Source = Self; + type Target = u64; + + fn lookup(&self, s: Self::Source) -> Result { + Ok(s.0) + } +} + impl traits::Verify for UintAuthorityId { type Signer = Self; From 2712aefa885546317c7a0f065198995ad2037920 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 14 Nov 2025 08:55:43 +0100 Subject: [PATCH 158/312] Update cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs Co-authored-by: Guillaume Thiolliere --- .../parachain-system/src/block_weight/transaction_extension.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index e54fc2985b78c..cbfeddb3eb153 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -59,7 +59,7 @@ use sp_runtime::{ /// Before dispatching an extrinsic the extension will check the requirements and set the /// appropriate [`BlockWeightMode`]. After the extrinsic has finished, the checks from before /// dispatching the extrinsic are repeated with the post dispatch weights. The [`BlockWeightMode`] -/// may is changed properly. +/// is changed properly. /// /// # Note /// From 2917609a27a96cf480accb37c4dbc923fe1280e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 14 Nov 2025 08:55:52 +0100 Subject: [PATCH 159/312] Update cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs Co-authored-by: Guillaume Thiolliere --- .../src/block_weight/transaction_extension.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index cbfeddb3eb153..08301f2f30310 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -411,8 +411,8 @@ where inner } - fn weight(&self, _: &Config::RuntimeCall) -> Weight { - Config::WeightInfo::block_weight_tx_extension_max_weight() + fn weight(&self, call: &Config::RuntimeCall) -> Weight { + Config::WeightInfo::block_weight_tx_extension_max_weight().saturating_add(self.0.weight(call)) } fn validate( From 86ad20f689a60a7c671192c67af6a3d0d0ee02d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 14 Nov 2025 13:23:43 +0100 Subject: [PATCH 160/312] New test and some fixes --- .../parachain-system/src/block_weight/mock.rs | 12 +++- .../parachain-system/src/block_weight/mod.rs | 10 +-- .../src/block_weight/tests.rs | 70 +++++++++++++++++-- .../src/block_weight/transaction_extension.rs | 3 +- cumulus/pallets/parachain-system/src/lib.rs | 6 -- substrate/frame/support/src/traits/hooks.rs | 5 ++ 6 files changed, 84 insertions(+), 22 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index 0eba745c88d9a..8439fc8cad9c2 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -30,7 +30,7 @@ use frame_support::{ Weight, }, }; -use frame_system::limits::BlockWeights; +use frame_system::{limits::BlockWeights, CheckWeight}; use sp_core::ConstU32; use sp_io; use sp_runtime::{ @@ -49,14 +49,20 @@ pub type ExtrinsicOnlyOperational = UncheckedExtrinsic< UintAuthorityId, only_operational_runtime::RuntimeCall, UintAuthorityId, - DynamicMaxBlockWeight, 10, false>, + DynamicMaxBlockWeight< + RuntimeOnlyOperational, + CheckWeight, + ConstU32, + 10, + false, + >, >; pub type Extrinsic = UncheckedExtrinsic< UintAuthorityId, RuntimeCall, UintAuthorityId, - DynamicMaxBlockWeight>, + DynamicMaxBlockWeight, ConstU32>, >; pub type Block = diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index 4a703ea21f4d4..db1f223845c99 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -162,20 +162,14 @@ impl> Get target_block_weight }; - // If we are in `on_initialize` or at applying the inherents, we allow the maximum block - // weight as allowed by the current context. - if !frame_system::Pallet::::inherents_applied() { - return maybe_full_core_weight - } - match crate::BlockWeightMode::::get() { // We allow the full core. Some(BlockWeightMode::FullCore | BlockWeightMode::PotentialFullCore { .. }) => Self::FULL_CORE_WEIGHT, // Let's calculate below how much weight we can use. Some(BlockWeightMode::FractionOfCore { .. }) => target_block_weight, - // Either the runtime is not using the `DynamicMaxBlockWeight` extension or there is a - // bug. The value should be set before applying the first extrinsic. + // If we are in `on_initialize` or at applying the inherents, we allow the maximum block + // weight as allowed by the current context. None => maybe_full_core_weight, } } diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index f45e8604d9d31..5904c265173e7 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -706,7 +706,7 @@ fn executive_validate_block_handles_normal_transactions() { TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { let call = RuntimeCall::TestPallet(test_pallet::Call::heavy_call_normal {}); - let xt = Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), ().into()); + let xt = Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), Default::default()); assert!(Executive::validate_transaction( TransactionSource::External, @@ -719,7 +719,7 @@ fn executive_validate_block_handles_normal_transactions() { TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { let call = RuntimeCallOnlyOperational::TestPallet(test_pallet::Call::heavy_call_normal {}); - let xt = ExtrinsicOnlyOperational::new_signed(call, 1u64.into(), 1u64.into(), ().into()); + let xt = ExtrinsicOnlyOperational::new_signed(call, 1u64.into(), 1u64.into(), Default::default()); assert_eq!( ExecutiveOnlyOperational::validate_transaction( @@ -738,7 +738,7 @@ fn executive_validate_block_handles_operational_transactions() { TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { let call = RuntimeCall::TestPallet(test_pallet::Call::heavy_call_operational {}); - let xt = Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), ().into()); + let xt = Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), Default::default()); assert!(Executive::validate_transaction( TransactionSource::External, @@ -752,7 +752,12 @@ fn executive_validate_block_handles_operational_transactions() { let call = RuntimeCallOnlyOperational::TestPallet(test_pallet::Call::heavy_call_operational {}); - let xt = ExtrinsicOnlyOperational::new_signed(call, 1u64.into(), 1u64.into(), ().into()); + let xt = ExtrinsicOnlyOperational::new_signed( + call, + 1u64.into(), + 1u64.into(), + Default::default(), + ); assert!(ExecutiveOnlyOperational::validate_transaction( TransactionSource::External, @@ -786,3 +791,60 @@ fn executive_with_operational_only_applies_big_inherent() { ExecutiveOnlyOperational::apply_extrinsic(xt).unwrap().unwrap(); }); } + +#[test] +fn block_weight_mode_from_previous_block_is_ignored_in_validate_block() { + TestExtBuilder::new() + .number_of_cores(4) + .first_block_in_core(true) + .build() + .execute_with(|| { + let call = RuntimeCallOnlyOperational::TestPallet( + test_pallet::Call::heavy_call_operational {}, + ); + + let xt = ExtrinsicOnlyOperational::new_signed( + call, + 1u64.into(), + 1u64.into(), + Default::default(), + ); + + Executive::initialize_block(&Header::new( + 1, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + )); + + assert!(ExecutiveOnlyOperational::apply_extrinsic(xt,).is_ok()); + + Executive::finalize_block(); + + assert_eq!( + crate::BlockWeightMode::::get().unwrap(), + BlockWeightMode::FullCore + ); + + let call = + RuntimeCallOnlyOperational::TestPallet(test_pallet::Call::heavy_call_normal {}); + + let xt = ExtrinsicOnlyOperational::new_signed( + call, + 1u64.into(), + 1u64.into(), + Default::default(), + ); + + assert_eq!( + ExecutiveOnlyOperational::validate_transaction( + TransactionSource::External, + xt, + Default::default() + ) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() + ); + }); +} diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index 08301f2f30310..d83bf20fc62cb 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -412,7 +412,8 @@ where } fn weight(&self, call: &Config::RuntimeCall) -> Weight { - Config::WeightInfo::block_weight_tx_extension_max_weight().saturating_add(self.0.weight(call)) + Config::WeightInfo::block_weight_tx_extension_max_weight() + .saturating_add(self.0.weight(call)) } fn validate( diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index dee06a139c35c..b7ac3a26eefdc 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -715,12 +715,6 @@ pub mod pallet { ); } - if let Some(collator_peer_id) = collator_peer_id { - PendingUpwardSignals::::mutate(|signals| { - signals.push(UMPSignal::ApprovedPeer(collator_peer_id).encode()); - }); - } - total_weight.saturating_accrue(Self::enqueue_inbound_downward_messages( relevant_messaging_state.dmq_mqc_head, inbound_messages_data.downward_messages, diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index f7404ca7dbe85..b4027547c47e5 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -130,6 +130,11 @@ impl_for_tuples_attr! { &[for_tuples!( #( Tuple::on_idle ),* )]; let mut weight = Weight::zero(); let len = on_idle_functions.len(); + + if len == 0 { + return Weight::zero() + } + let start_index = n % (len as u32).into(); let start_index = start_index.try_into().ok().expect( "`start_index % len` always fits into `usize`, because `len` can be in maximum `usize::MAX`; qed" From 5b5190b08b6c54db6b86783af5775e3f340cd146 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 14 Nov 2025 15:00:50 +0100 Subject: [PATCH 161/312] Ensure we ignore the block weight mode from the previous block --- .../parachain-system/src/block_weight/mod.rs | 71 +++++++++++++++++-- .../src/block_weight/pre_inherents_hook.rs | 8 +-- .../src/block_weight/tests.rs | 39 ++++++---- .../src/block_weight/transaction_extension.rs | 59 ++++++++------- cumulus/pallets/parachain-system/src/lib.rs | 2 +- 5 files changed, 129 insertions(+), 50 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index db1f223845c99..d8d7114852efc 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -50,7 +50,11 @@ use crate::{Config, PreviousCoreCount}; use codec::{Decode, Encode}; use core::marker::PhantomData; use cumulus_primitives_core::CumulusDigestItem; -use frame_support::weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}; +use frame_support::{ + weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, + CloneNoBound, DebugNoBound, +}; +use frame_system::pallet_prelude::BlockNumberFor; use polkadot_primitives::MAX_POV_SIZE; use scale_info::TypeInfo; use sp_core::Get; @@ -71,12 +75,20 @@ const LOG_TARGET: &str = "runtime::parachain-system::block-weight"; /// The current block weight mode. /// /// Based on this mode [`MaxParachainBlockWeight`] determines the current allowed block weight. -#[derive(Debug, Encode, Decode, Clone, Copy, TypeInfo, PartialEq)] -pub enum BlockWeightMode { +#[derive(DebugNoBound, Encode, Decode, CloneNoBound, TypeInfo, PartialEq)] +#[scale_info(skip_type_params(T))] +pub enum BlockWeightMode { /// The block is allowed to use the weight of a full core. - FullCore, + FullCore { + /// The block in which this mode was set. Is used to determine if this is maybe stale mode + /// setting, e.g. when running `validate_block`. + context: BlockNumberFor, + }, /// The current active transaction is allowed to use the weight of a full core. PotentialFullCore { + /// The block in which this mode was set. Is used to determine if this is maybe stale mode + /// setting, e.g. when running `validate_block`. + context: BlockNumberFor, /// The index of the first transaction. first_transaction_index: Option, /// The target weight that was used to determine that the extrinsic is above this limit. @@ -87,11 +99,54 @@ pub enum BlockWeightMode { /// How much each block is allowed to consume, depends on the target number of blocks and the /// available cores on the relay chain. FractionOfCore { + /// The block in which this mode was set. Is used to determine if this is maybe stale mode + /// setting, e.g. when running `validate_block`. + context: BlockNumberFor, /// The index of the first transaction. first_transaction_index: Option, }, } +impl BlockWeightMode { + /// Check if this mode is stale, aka was set in a previous block. + fn is_stale(&self) -> bool { + let context = self.context(); + + context < frame_system::Pallet::::block_number() + } + + /// Returns the context (block) in which this mode was set. + fn context(&self) -> BlockNumberFor { + match self { + Self::FullCore { context } | + Self::PotentialFullCore { context, .. } | + Self::FractionOfCore { context, .. } => *context, + } + } + + /// Create a new instance of `Self::FullCore`. + fn full_core() -> Self { + Self::FullCore { context: frame_system::Pallet::::block_number() } + } + + /// Create new instance of `Self::FractionOfCore`. + fn fraction_of_core(first_transaction_index: Option) -> Self { + Self::FractionOfCore { + context: frame_system::Pallet::::block_number(), + first_transaction_index, + } + } + + /// Create new instance of `Self::PotentialFullCore`. + fn potential_full_core(first_transaction_index: Option, target_weight: Weight) -> Self { + Self::PotentialFullCore { + context: frame_system::Pallet::::block_number(), + first_transaction_index, + target_weight, + } + } +} + /// Calculates the maximum block weight for a parachain. /// /// Based on the available cores and the number of desired blocks a block weight is calculated. @@ -164,10 +219,12 @@ impl> Get match crate::BlockWeightMode::::get() { // We allow the full core. - Some(BlockWeightMode::FullCore | BlockWeightMode::PotentialFullCore { .. }) => - Self::FULL_CORE_WEIGHT, + Some( + BlockWeightMode::::FullCore { .. } | + BlockWeightMode::::PotentialFullCore { .. }, + ) => Self::FULL_CORE_WEIGHT, // Let's calculate below how much weight we can use. - Some(BlockWeightMode::FractionOfCore { .. }) => target_block_weight, + Some(BlockWeightMode::::FractionOfCore { .. }) => target_block_weight, // If we are in `on_initialize` or at applying the inherents, we allow the maximum block // weight as allowed by the current context. None => maybe_full_core_weight, diff --git a/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs b/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs index 0c51997c36303..faf766ebd2145 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs @@ -40,9 +40,9 @@ where fn pre_inherents() { if !block_weight_over_target_block_weight::() { // We still initialize the `BlockWeightMode`. - crate::BlockWeightMode::::put(BlockWeightMode::FractionOfCore { - first_transaction_index: None, - }); + crate::BlockWeightMode::::put(BlockWeightMode::::fraction_of_core( + None, + )); return } @@ -67,7 +67,7 @@ where ); } - crate::BlockWeightMode::::put(BlockWeightMode::FullCore); + crate::BlockWeightMode::::put(BlockWeightMode::::full_core()); // Inform the node that this block uses the full core. frame_system::Pallet::::deposit_log( diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 5904c265173e7..7a6c58b379f84 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -224,7 +224,7 @@ fn tx_extension_sets_fraction_of_core_mode() { assert_eq!( crate::BlockWeightMode::::get(), - Some(BlockWeightMode::FractionOfCore { first_transaction_index: Some(0) }) + Some(BlockWeightMode::fraction_of_core(Some(0))) ); }); } @@ -268,7 +268,10 @@ fn tx_extension_large_tx_enables_full_core_usage() { assert_ok!(TxExtension::post_dispatch((), &info, &mut post_info, 0, &Ok(()))); - assert_eq!(crate::BlockWeightMode::::get(), Some(BlockWeightMode::FullCore)); + assert_eq!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::full_core()) + ); assert!(has_use_full_core_digest()); assert_eq!(MaximumBlockWeight::get().ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); }); @@ -310,7 +313,7 @@ fn tx_extension_only_allows_large_operational_tx_to_enable_full_core_usage() { assert_matches!( crate::BlockWeightMode::::get(), - Some(BlockWeightMode::FractionOfCore { first_transaction_index: None }) + Some(BlockWeightMode::FractionOfCore { first_transaction_index: None, .. }) ); info.class = DispatchClass::Operational; @@ -335,7 +338,10 @@ fn tx_extension_only_allows_large_operational_tx_to_enable_full_core_usage() { assert_ok!(TxExtension::post_dispatch((), &info, &mut post_info, 0, &Ok(()))); - assert_eq!(crate::BlockWeightMode::::get(), Some(BlockWeightMode::FullCore)); + assert_eq!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::full_core()) + ); assert!(has_use_full_core_digest()); assert_eq!(MaximumBlockWeight::get().ref_time(), 2 * WEIGHT_REF_TIME_PER_SECOND); }); @@ -427,7 +433,7 @@ fn tx_extension_large_tx_is_rejected_on_non_first_block() { // Should stay in FractionOfCore mode (not PotentialFullCore) since not first block assert_eq!( crate::BlockWeightMode::::get(), - Some(BlockWeightMode::FractionOfCore { first_transaction_index: None }) + Some(BlockWeightMode::fraction_of_core(None)) ); assert!(!has_use_full_core_digest()); assert_eq!(MaximumBlockWeight::get(), target_weight); @@ -461,7 +467,7 @@ fn tx_extension_post_dispatch_to_full_core_because_of_manual_weight() { assert_matches!( crate::BlockWeightMode::::get(), - Some(BlockWeightMode::FractionOfCore { first_transaction_index: Some(0) }) + Some(BlockWeightMode::FractionOfCore { first_transaction_index: Some(0), .. }) ); // But actually uses much more weight (bug in weight annotation) @@ -476,7 +482,7 @@ fn tx_extension_post_dispatch_to_full_core_because_of_manual_weight() { // Should transition to FullCore due to exceeding limit assert_matches!( crate::BlockWeightMode::::get(), - Some(BlockWeightMode::FullCore) + Some(BlockWeightMode::FullCore { .. }) ); assert!(has_use_full_core_digest()); @@ -517,7 +523,7 @@ fn tx_extension_large_tx_after_limit_is_rejected() { assert_eq!( crate::BlockWeightMode::::get(), - Some(BlockWeightMode::FractionOfCore { first_transaction_index: None }) + Some(BlockWeightMode::fraction_of_core(None)) ); assert!(!has_use_full_core_digest()); assert_eq!(MaximumBlockWeight::get(), target_weight); @@ -560,7 +566,7 @@ fn tx_extension_large_weight_before_first_tx() { assert_matches!( crate::BlockWeightMode::::get(), - Some(BlockWeightMode::FullCore) + Some(BlockWeightMode::FullCore { .. }) ); assert!(has_use_full_core_digest()); @@ -596,7 +602,7 @@ fn pre_inherents_hook_first_block_over_limit() { assert_matches!( crate::BlockWeightMode::::get(), - Some(BlockWeightMode::FullCore) + Some(BlockWeightMode::FullCore { .. }) ); // Should have UseFullCore digest @@ -623,7 +629,7 @@ fn pre_inherents_hook_non_first_block_over_limit() { assert_matches!( crate::BlockWeightMode::::get(), - Some(BlockWeightMode::FullCore) + Some(BlockWeightMode::FullCore { .. }) ); assert!(has_use_full_core_digest()); @@ -653,7 +659,7 @@ fn pre_inherents_hook_under_limit_no_change() { assert_matches!( crate::BlockWeightMode::::get(), - Some(BlockWeightMode::FractionOfCore { first_transaction_index: None }) + Some(BlockWeightMode::FractionOfCore { first_transaction_index: None, .. }) ); // Should NOT have UseFullCore digest @@ -719,7 +725,12 @@ fn executive_validate_block_handles_normal_transactions() { TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { let call = RuntimeCallOnlyOperational::TestPallet(test_pallet::Call::heavy_call_normal {}); - let xt = ExtrinsicOnlyOperational::new_signed(call, 1u64.into(), 1u64.into(), Default::default()); + let xt = ExtrinsicOnlyOperational::new_signed( + call, + 1u64.into(), + 1u64.into(), + Default::default(), + ); assert_eq!( ExecutiveOnlyOperational::validate_transaction( @@ -824,7 +835,7 @@ fn block_weight_mode_from_previous_block_is_ignored_in_validate_block() { assert_eq!( crate::BlockWeightMode::::get().unwrap(), - BlockWeightMode::FullCore + BlockWeightMode::full_core() ); let call = diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index d83bf20fc62cb..6056f92de486b 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -123,22 +123,28 @@ where let transaction_index = is_not_inherent.then(|| extrinsic_index); crate::BlockWeightMode::::mutate(|mode| { - let current_mode = *mode.get_or_insert_with(|| BlockWeightMode::FractionOfCore { - first_transaction_index: transaction_index, - }); + let current_mode = mode.get_or_insert_with(|| BlockWeightMode::::fraction_of_core(transaction_index)); + + // If the mode is stale (from previous block), we reset it. + // + // This happens for example when running in an offchain context. + if current_mode.is_stale() { + *current_mode = BlockWeightMode::fraction_of_core(transaction_index); + } log::trace!( target: LOG_TARGET, "About to pre-validate an extrinsic. current_mode={current_mode:?}, transaction_index={transaction_index:?}" ); + let is_potential = + matches!(current_mode, &mut BlockWeightMode::PotentialFullCore { .. }); + match current_mode { // We are already allowing the full core, not that much more to do here. - BlockWeightMode::FullCore => {}, - BlockWeightMode::PotentialFullCore { first_transaction_index, .. } | - BlockWeightMode::FractionOfCore { first_transaction_index } => { - let is_potential = - matches!(current_mode, BlockWeightMode::PotentialFullCore { .. }); + BlockWeightMode::::FullCore { ..} => {}, + BlockWeightMode::::PotentialFullCore { first_transaction_index, .. } | + BlockWeightMode::::FractionOfCore { first_transaction_index, .. } => { debug_assert!( !is_potential, "`PotentialFullCore` should resolve to `FullCore` or `FractionOfCore` after applying a transaction.", @@ -155,7 +161,7 @@ where // Protection against a misconfiguration as this should be detected by the pre-inherent hook. if block_weight_over_limit { - *mode = Some(BlockWeightMode::FullCore); + *mode = Some(BlockWeightMode::::full_core()); // Inform the node that this block uses the full core. frame_system::Pallet::::deposit_log( @@ -198,12 +204,12 @@ where "Enabling `PotentialFullCore` mode for extrinsic", ); - *mode = Some(BlockWeightMode::PotentialFullCore { - target_weight, + *mode = Some(BlockWeightMode::::potential_full_core ( // While applying inherents `extrinsic_index` and `first_transaction_index` will be `None`. // When the first transaction is applied, we want to store the index. - first_transaction_index: first_transaction_index.or(transaction_index), - }); + first_transaction_index.or(transaction_index), + target_weight, + )); } else { log::trace!( target: LOG_TARGET, @@ -218,7 +224,7 @@ where "Resetting back to `FractionOfCore`" ); *mode = - Some(BlockWeightMode::FractionOfCore { first_transaction_index: first_transaction_index.or(transaction_index) }); + Some(BlockWeightMode::::fraction_of_core(first_transaction_index.or(transaction_index))); } else { log::trace!( target: LOG_TARGET, @@ -226,7 +232,7 @@ where ); *mode = - Some(BlockWeightMode::FractionOfCore { first_transaction_index: first_transaction_index.or(transaction_index) }); + Some(BlockWeightMode::::fraction_of_core(first_transaction_index.or(transaction_index))); } }, }; @@ -241,14 +247,14 @@ where /// Returns the weight to refund. Aka the weight that wasn't used by this extension. fn post_dispatch_extrinsic(info: &DispatchInfo) -> Weight { crate::BlockWeightMode::::mutate(|weight_mode| { - let Some(mode) = *weight_mode else { return Weight::zero() }; + let Some(mode) = weight_mode else { return Weight::zero() }; match mode { // If the previous mode was already `FullCore`, we are fine. - BlockWeightMode::FullCore => + BlockWeightMode::::FullCore { .. } => Config::WeightInfo::block_weight_tx_extension_max_weight() .saturating_sub(Config::WeightInfo::block_weight_tx_extension_full_core()), - BlockWeightMode::FractionOfCore { .. } => { + BlockWeightMode::::FractionOfCore { .. } => { let digest = frame_system::Pallet::::digest(); let target_block_weight = MaxParachainBlockWeight::::target_block_weight_with_digest(&digest); @@ -283,7 +289,7 @@ where ); } - *weight_mode = Some(BlockWeightMode::FullCore); + *weight_mode = Some(BlockWeightMode::::full_core()); // Inform the node that this block uses the full core. frame_system::Pallet::::deposit_log( @@ -297,17 +303,21 @@ where }, // Now we need to check if the transaction required more weight than a fraction of a // core block. - BlockWeightMode::PotentialFullCore { first_transaction_index, target_weight } => { + BlockWeightMode::::PotentialFullCore { + first_transaction_index, + target_weight, + .. + } => { let block_weight = frame_system::BlockWeight::::get(); let extrinsic_class_weight = block_weight.get(info.class); - if extrinsic_class_weight.any_gt(target_weight) { + if extrinsic_class_weight.any_gt(*target_weight) { log::trace!( target: LOG_TARGET, "Extrinsic class weight {extrinsic_class_weight:?} above target weight {target_weight:?}, enabling `FullCore` mode." ); - *weight_mode = Some(BlockWeightMode::FullCore); + *weight_mode = Some(BlockWeightMode::::full_core()); // Inform the node that this block uses the full core. frame_system::Pallet::::deposit_log( @@ -320,8 +330,9 @@ where weight {target_weight:?}, going back to `FractionOfCore` mode." ); - *weight_mode = - Some(BlockWeightMode::FractionOfCore { first_transaction_index }); + *weight_mode = Some(BlockWeightMode::::fraction_of_core( + *first_transaction_index, + )); } // We run into the worst case, so no refund :) diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index b7ac3a26eefdc..3ed529a3ce728 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -789,7 +789,7 @@ pub mod pallet { #[pallet::storage] #[pallet::whitelist_storage] pub type BlockWeightMode = - StorageValue<_, block_weight::BlockWeightMode, OptionQuery>; + StorageValue<_, block_weight::BlockWeightMode, OptionQuery>; /// The core count available to the parachain in the previous block. /// From a8641d6c6bf411c2ef2e64b1aa71c958e5ebbc09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 14 Nov 2025 15:01:51 +0100 Subject: [PATCH 162/312] Update cumulus/pallets/parachain-system/src/validate_block/implementation.rs Co-authored-by: Oliver Tale-Yazdi --- .../parachain-system/src/validate_block/implementation.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index bcad111b7bef1..22f1154517d96 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -83,8 +83,6 @@ where B::Extrinsic: ExtrinsicCall, ::Call: IsSubType>, { - // sp_runtime::runtime_logger::RuntimeLogger::init(); - let _guard = ( // Replace storage calls with our own implementations sp_io::storage::host_read.replace_implementation(host_storage_read), From eba0d1474487c106243e09bca067b4a2297d7c92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 14 Nov 2025 16:02:34 +0100 Subject: [PATCH 163/312] Allow MBMs to take the full core --- .../parachain-system/src/block_weight/mock.rs | 27 ++++++++---- .../parachain-system/src/block_weight/mod.rs | 20 ++++----- .../src/block_weight/pre_inherents_hook.rs | 28 +++++++++---- .../src/block_weight/tests.rs | 42 +++++++++++++++---- .../src/block_weight/transaction_extension.rs | 6 +-- 5 files changed, 88 insertions(+), 35 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index 8439fc8cad9c2..13dc037b4652f 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -23,6 +23,7 @@ use cumulus_primitives_core::{ use frame_support::{ construct_runtime, derive_impl, dispatch::DispatchClass, + migrations::MultiStepMigrator, parameter_types, traits::PreInherents, weights::{ @@ -208,27 +209,39 @@ construct_runtime!( } ); +parameter_types! { + pub static MbmOngoing: bool = false; +} + +pub struct Migrator; + +impl MultiStepMigrator for Migrator { + fn ongoing() -> bool { + MbmOngoing::get() + } + + fn step() -> Weight { + Weight::zero() + } +} + pub mod only_operational_runtime { + use super::{BlockOnlyOperational, Migrator}; + use crate::block_weight::DynamicMaxBlockWeightHooks; use frame_support::{construct_runtime, derive_impl}; use sp_core::ConstU32; use sp_runtime::testing::UintAuthorityId; - use crate::block_weight::{mock::BlockOnlyOperational, DynamicMaxBlockWeightHooks}; - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for RuntimeOnlyOperational { - // Setup the block weight. type BlockWeights = super::max_block_weight_setup::RuntimeBlockWeights; - // Set the `PreInherents` hook. type PreInherents = DynamicMaxBlockWeightHooks>; - - // Just required to make it compile, but not that important for this example here. type Block = BlockOnlyOperational; type OnSetCode = crate::ParachainSetCode; type AccountId = u64; type Lookup = UintAuthorityId; - // Rest of the types are omitted here. + type MultiBlockMigrator = Migrator; } impl crate::Config for RuntimeOnlyOperational { diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index d8d7114852efc..15270b21098b4 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -71,6 +71,11 @@ pub use pre_inherents_hook::DynamicMaxBlockWeightHooks; pub use transaction_extension::DynamicMaxBlockWeight; const LOG_TARGET: &str = "runtime::parachain-system::block-weight"; +/// Maximum ref time per core +const MAX_REF_TIME_PER_CORE_NS: u64 = 2 * WEIGHT_REF_TIME_PER_SECOND; +/// The available weight per core on the relay chain. +pub(crate) const FULL_CORE_WEIGHT: Weight = + Weight::from_parts(MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); /// The current block weight mode. /// @@ -159,11 +164,6 @@ pub struct MaxParachainBlockWeight(PhantomData<(Config, impl> MaxParachainBlockWeight { - // Maximum ref time per core - const MAX_REF_TIME_PER_CORE_NS: u64 = 2 * WEIGHT_REF_TIME_PER_SECOND; - pub(crate) const FULL_CORE_WEIGHT: Weight = - Weight::from_parts(Self::MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); - /// Returns the target block weight for one block. pub(crate) fn target_block_weight() -> Weight { let digest = frame_system::Pallet::::digest(); @@ -181,18 +181,18 @@ impl> // Ensure we have at least one core and valid target blocks if number_of_cores == 0 || target_blocks == 0 { - return Self::FULL_CORE_WEIGHT; + return FULL_CORE_WEIGHT; } // At maximum we want to allow `6s` of ref time, because we don't want to overload nodes // that are running with standard hardware. These nodes need to be able to import all the // blocks in 6s. let total_ref_time = (number_of_cores as u64) - .saturating_mul(Self::MAX_REF_TIME_PER_CORE_NS) + .saturating_mul(MAX_REF_TIME_PER_CORE_NS) .min(WEIGHT_REF_TIME_PER_SECOND * 6); let ref_time_per_block = total_ref_time .saturating_div(target_blocks as u64) - .min(Self::MAX_REF_TIME_PER_CORE_NS); + .min(MAX_REF_TIME_PER_CORE_NS); let total_pov_size = (number_of_cores as u64).saturating_mul(MAX_POV_SIZE as u64); // Each block at max gets one core. @@ -212,7 +212,7 @@ impl> Get let maybe_full_core_weight = if is_first_block_in_core_with_digest(&digest).unwrap_or(false) { - Self::FULL_CORE_WEIGHT + FULL_CORE_WEIGHT } else { target_block_weight }; @@ -222,7 +222,7 @@ impl> Get Some( BlockWeightMode::::FullCore { .. } | BlockWeightMode::::PotentialFullCore { .. }, - ) => Self::FULL_CORE_WEIGHT, + ) => FULL_CORE_WEIGHT, // Let's calculate below how much weight we can use. Some(BlockWeightMode::::FractionOfCore { .. }) => target_block_weight, // If we are in `on_initialize` or at applying the inherents, we allow the maximum block diff --git a/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs b/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs index faf766ebd2145..4f9074a30eb32 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs @@ -17,9 +17,9 @@ use super::{ block_weight_over_target_block_weight, is_first_block_in_core, BlockWeightMode, LOG_TARGET, }; -use crate::block_weight::MaxParachainBlockWeight; +use crate::block_weight::FULL_CORE_WEIGHT; use cumulus_primitives_core::CumulusDigestItem; -use frame_support::traits::PreInherents; +use frame_support::{migrations::MultiStepMigrator, traits::PreInherents}; use sp_core::Get; /// A pre-inherent hook that may increases max block weight after `on_initialize`. @@ -39,10 +39,24 @@ where { fn pre_inherents() { if !block_weight_over_target_block_weight::() { - // We still initialize the `BlockWeightMode`. - crate::BlockWeightMode::::put(BlockWeightMode::::fraction_of_core( - None, - )); + let new_mode = if Config::MultiBlockMigrator::ongoing() { + log::debug!( + target: LOG_TARGET, + "Multi block migrations are still ongoing, allowing the full core.", + ); + + // Inform the node that this block uses the full core. + frame_system::Pallet::::deposit_log( + CumulusDigestItem::UseFullCore.to_digest_item(), + ); + + BlockWeightMode::::full_core() + } else { + BlockWeightMode::::fraction_of_core(None) + }; + + crate::BlockWeightMode::::put(new_mode); + return } @@ -57,7 +71,7 @@ where // We are already above the allowed maximum and do not want to accept any more // extrinsics. frame_system::Pallet::::register_extra_weight_unchecked( - MaxParachainBlockWeight::::FULL_CORE_WEIGHT, + FULL_CORE_WEIGHT, frame_support::dispatch::DispatchClass::Mandatory, ); } else { diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 7a6c58b379f84..3fdcb58476ae4 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -149,7 +149,7 @@ fn test_target_block_weight_with_digest_edge_cases() { MaxParachainBlockWeight::>::target_block_weight_with_digest( &empty_digest, ); - assert_eq!(weight, MaxParachainBlockWeight::>::FULL_CORE_WEIGHT / 4); + assert_eq!(weight, FULL_CORE_WEIGHT / 4); // Test with digest containing core info let core_info = CoreInfo { @@ -575,9 +575,7 @@ fn tx_extension_large_weight_before_first_tx() { if !first_block_in_core { // Should have registered FULL_CORE_WEIGHT to prevent more transactions let final_remaining = frame_system::Pallet::::remaining_block_weight(); - assert!(final_remaining - .consumed() - .all_gte(MaximumBlockWeight::FULL_CORE_WEIGHT)); + assert!(final_remaining.consumed().all_gte(FULL_CORE_WEIGHT)); } }); } @@ -636,7 +634,7 @@ fn pre_inherents_hook_non_first_block_over_limit() { // Should have registered FULL_CORE_WEIGHT to prevent more transactions let final_remaining = frame_system::Pallet::::remaining_block_weight(); - assert!(final_remaining.consumed().all_gte(MaximumBlockWeight::FULL_CORE_WEIGHT)); + assert!(final_remaining.consumed().all_gte(FULL_CORE_WEIGHT)); }); } @@ -821,7 +819,7 @@ fn block_weight_mode_from_previous_block_is_ignored_in_validate_block() { Default::default(), ); - Executive::initialize_block(&Header::new( + ExecutiveOnlyOperational::initialize_block(&Header::new( 1, Default::default(), Default::default(), @@ -829,9 +827,9 @@ fn block_weight_mode_from_previous_block_is_ignored_in_validate_block() { Default::default(), )); - assert!(ExecutiveOnlyOperational::apply_extrinsic(xt,).is_ok()); + assert!(ExecutiveOnlyOperational::apply_extrinsic(xt).is_ok()); - Executive::finalize_block(); + ExecutiveOnlyOperational::finalize_block(); assert_eq!( crate::BlockWeightMode::::get().unwrap(), @@ -859,3 +857,31 @@ fn block_weight_mode_from_previous_block_is_ignored_in_validate_block() { ); }); } + +#[test] +fn ongoin_mbm_requests_full_core() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + MbmOngoing::set(true); + ExecutiveOnlyOperational::initialize_block(&Header::new( + 1, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + )); + + assert_eq!( + FULL_CORE_WEIGHT, + ::BlockWeights::get().max_block + ); + + ExecutiveOnlyOperational::finalize_block(); + + assert!(has_use_full_core_digest()); + MbmOngoing::set(false); + }); +} diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index 6056f92de486b..8e59a080a4e4d 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -16,7 +16,7 @@ use super::{ block_weight_over_target_block_weight, is_first_block_in_core_with_digest, BlockWeightMode, - MaxParachainBlockWeight, LOG_TARGET, + MaxParachainBlockWeight, LOG_TARGET, FULL_CORE_WEIGHT }; use crate::WeightInfo; use alloc::vec::Vec; @@ -172,7 +172,7 @@ where // We are already above the allowed maximum and do not want to accept any more // extrinsics. frame_system::Pallet::::register_extra_weight_unchecked( - MaxParachainBlockWeight::::FULL_CORE_WEIGHT, + FULL_CORE_WEIGHT, DispatchClass::Mandatory, ); } @@ -284,7 +284,7 @@ where ); frame_system::Pallet::::register_extra_weight_unchecked( - MaxParachainBlockWeight::::FULL_CORE_WEIGHT, + FULL_CORE_WEIGHT, DispatchClass::Mandatory, ); } From 9de019ec63ef2e686087332e897b7f41870b92c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 14 Nov 2025 16:15:10 +0100 Subject: [PATCH 164/312] Fixes --- .../parachain-system/src/benchmarking.rs | 25 ++++++++----------- .../parachain-system/src/block_weight/mod.rs | 6 ++--- cumulus/pallets/parachain-system/src/lib.rs | 2 ++ 3 files changed, 15 insertions(+), 18 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/benchmarking.rs b/cumulus/pallets/parachain-system/src/benchmarking.rs index fd05cac40d256..d417029ab1120 100644 --- a/cumulus/pallets/parachain-system/src/benchmarking.rs +++ b/cumulus/pallets/parachain-system/src/benchmarking.rs @@ -21,7 +21,9 @@ use super::*; use crate::{ - block_weight::{BlockWeightMode, DynamicMaxBlockWeight, MaxParachainBlockWeight}, + block_weight::{ + BlockWeightMode, DynamicMaxBlockWeight, MaxParachainBlockWeight, FULL_CORE_WEIGHT, + }, parachain_inherent::InboundDownwardMessages, }; use cumulus_primitives_core::{ @@ -120,9 +122,7 @@ mod benchmarks { let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; let len = 0_usize; - crate::BlockWeightMode::::put(BlockWeightMode::FractionOfCore { - first_transaction_index: None, - }); + crate::BlockWeightMode::::put(BlockWeightMode::fraction_of_core(None)); let ext = DynamicMaxBlockWeight::>::new(()); @@ -140,12 +140,9 @@ mod benchmarks { .unwrap(); } - assert_eq!(crate::BlockWeightMode::::get().unwrap(), BlockWeightMode::FullCore); + assert_eq!(crate::BlockWeightMode::::get().unwrap(), BlockWeightMode::full_core()); assert!(has_use_full_core_digest::()); - assert_eq!( - MaxParachainBlockWeight::>::get(), - MaxParachainBlockWeight::>::FULL_CORE_WEIGHT - ); + assert_eq!(MaxParachainBlockWeight::>::get(), FULL_CORE_WEIGHT); Ok(()) } @@ -182,9 +179,7 @@ mod benchmarks { let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; let len = 0_usize; - crate::BlockWeightMode::::put(BlockWeightMode::FractionOfCore { - first_transaction_index: None, - }); + crate::BlockWeightMode::::put(BlockWeightMode::fraction_of_core(None)); let ext = DynamicMaxBlockWeight::>::new(()); @@ -204,7 +199,7 @@ mod benchmarks { assert_eq!( crate::BlockWeightMode::::get().unwrap(), - BlockWeightMode::FractionOfCore { first_transaction_index: Some(1) } + BlockWeightMode::fraction_of_core(Some(1)) ); assert!(!has_use_full_core_digest::()); assert_eq!(MaxParachainBlockWeight::>::get(), target_weight); @@ -243,7 +238,7 @@ mod benchmarks { let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Default::default() }; let len = 0_usize; - crate::BlockWeightMode::::put(BlockWeightMode::FullCore); + crate::BlockWeightMode::::put(BlockWeightMode::full_core()); let ext = DynamicMaxBlockWeight::>::new(()); @@ -261,7 +256,7 @@ mod benchmarks { .unwrap(); } - assert_eq!(crate::BlockWeightMode::::get().unwrap(), BlockWeightMode::FullCore); + assert_eq!(crate::BlockWeightMode::::get().unwrap(), BlockWeightMode::full_core()); Ok(()) } diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index 15270b21098b4..346bd0150b6e7 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -130,12 +130,12 @@ impl BlockWeightMode { } /// Create a new instance of `Self::FullCore`. - fn full_core() -> Self { + pub(crate) fn full_core() -> Self { Self::FullCore { context: frame_system::Pallet::::block_number() } } /// Create new instance of `Self::FractionOfCore`. - fn fraction_of_core(first_transaction_index: Option) -> Self { + pub(crate) fn fraction_of_core(first_transaction_index: Option) -> Self { Self::FractionOfCore { context: frame_system::Pallet::::block_number(), first_transaction_index, @@ -143,7 +143,7 @@ impl BlockWeightMode { } /// Create new instance of `Self::PotentialFullCore`. - fn potential_full_core(first_transaction_index: Option, target_weight: Weight) -> Self { + pub(crate) fn potential_full_core(first_transaction_index: Option, target_weight: Weight) -> Self { Self::PotentialFullCore { context: frame_system::Pallet::::block_number(), first_transaction_index, diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 3ed529a3ce728..e2f23f8dfb61a 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -786,6 +786,8 @@ pub mod pallet { /// /// This is used to determine what is the maximum allowed block weight, for more information see /// [`block_weight`]. + /// + /// Killed in [`Self::on_initialize`] and set by the [`block_weight`] logic. #[pallet::storage] #[pallet::whitelist_storage] pub type BlockWeightMode = From 8901237670f58f3f908355133b630dbb68c7ecfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 14 Nov 2025 16:43:26 +0100 Subject: [PATCH 165/312] Fix benchmark --- cumulus/pallets/parachain-system/src/benchmarking.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cumulus/pallets/parachain-system/src/benchmarking.rs b/cumulus/pallets/parachain-system/src/benchmarking.rs index d417029ab1120..ebe3c66785f52 100644 --- a/cumulus/pallets/parachain-system/src/benchmarking.rs +++ b/cumulus/pallets/parachain-system/src/benchmarking.rs @@ -212,6 +212,8 @@ mod benchmarks { fn block_weight_tx_extension_full_core() -> Result<(), BenchmarkError> { let caller = account("caller", 0, 0); + frame_system::Pallet::::set_block_number(1u32.into()); + frame_system::Pallet::::note_inherents_applied(); frame_system::Pallet::::set_extrinsic_index(1); From ef278586bbd610bf2408de3e0180bb6fa684cb88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 14 Nov 2025 16:44:45 +0100 Subject: [PATCH 166/312] FMT --- cumulus/pallets/parachain-system/src/block_weight/mod.rs | 5 ++++- .../src/block_weight/transaction_extension.rs | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index 346bd0150b6e7..3afc89d7487ad 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -143,7 +143,10 @@ impl BlockWeightMode { } /// Create new instance of `Self::PotentialFullCore`. - pub(crate) fn potential_full_core(first_transaction_index: Option, target_weight: Weight) -> Self { + pub(crate) fn potential_full_core( + first_transaction_index: Option, + target_weight: Weight, + ) -> Self { Self::PotentialFullCore { context: frame_system::Pallet::::block_number(), first_transaction_index, diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index 8e59a080a4e4d..9acf46f37a975 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -16,7 +16,7 @@ use super::{ block_weight_over_target_block_weight, is_first_block_in_core_with_digest, BlockWeightMode, - MaxParachainBlockWeight, LOG_TARGET, FULL_CORE_WEIGHT + MaxParachainBlockWeight, FULL_CORE_WEIGHT, LOG_TARGET, }; use crate::WeightInfo; use alloc::vec::Vec; From cfefd6fddc77e73a7ecaeb5e6e40eba00dc36e33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 17 Nov 2025 22:49:15 +0100 Subject: [PATCH 167/312] Fixes --- cumulus/client/collator/src/service.rs | 35 ++++++++++--------- cumulus/client/consensus/aura/src/collator.rs | 2 +- cumulus/client/service/src/lib.rs | 1 - .../src/validate_block/tests.rs | 5 --- .../tests/zombie_ci/block_bundling/basic.rs | 4 +-- 5 files changed, 22 insertions(+), 25 deletions(-) diff --git a/cumulus/client/collator/src/service.rs b/cumulus/client/collator/src/service.rs index c714db431eb81..f8de89396a568 100644 --- a/cumulus/client/collator/src/service.rs +++ b/cumulus/client/collator/src/service.rs @@ -36,8 +36,7 @@ use polkadot_node_primitives::{ use codec::Encode; use futures::channel::oneshot; use parking_lot::Mutex; -use std::{collections::HashSet, sync::Arc}; - +use std::sync::Arc; /// The logging target. const LOG_TARGET: &str = "cumulus-collator"; @@ -244,7 +243,7 @@ where let mut api_version = 0; let mut upward_messages = Vec::new(); - let mut upward_message_signals = HashSet::>::with_capacity(4); + let mut upward_message_signals = Vec::>::with_capacity(4); let mut horizontal_messages = Vec::new(); let mut new_validation_code = None; let mut processed_downward_messages = 0; @@ -270,7 +269,7 @@ where // We are always using the `api_version` of the parent block. The `api_version` can only // change with a runtime upgrade and this is when we want to observe the old // `api_version`. Because this old `api_version` is the one used to validate this - // block. Otherwise we already assume the `api_version` is higher than what the relay + // block. Otherwise, we already assume the `api_version` is higher than what the relay // chain will use and this will lead to validation errors. api_version = self .runtime_api @@ -279,18 +278,22 @@ where .ok() .flatten()?; - collation_info - .upward_messages - .iter() - .rev() - .take_while(|m| **m != UMP_SEPARATOR) - .for_each(|s| { - upward_message_signals.insert(s.clone()); - }); - - upward_messages.extend( - collation_info.upward_messages.into_iter().take_while(|m| *m != UMP_SEPARATOR), - ); + let mut found_separator = false; + upward_messages.extend(collation_info.upward_messages.into_iter().filter_map(|m| { + // Filter out the `UMP_SEPARATOR` and the `UMPSignals`. + if m == UMP_SEPARATOR { + found_separator = true; + None + } else if found_separator { + if upward_message_signals.iter().all(|s| *s != m) { + upward_message_signals.push(m); + } + None + } else { + // No signal or separator + Some(m) + } + })); horizontal_messages.extend(collation_info.horizontal_messages); new_validation_code = new_validation_code.take().or(collation_info.new_validation_code); processed_downward_messages += collation_info.processed_downward_messages; diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index e3c7491dcc732..72365e788a477 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -167,7 +167,7 @@ where } /// Explicitly creates the inherent data for parachain block authoring and overrides - /// the timestamp inherent data with the one provided, if any. Additionally allows to specify + /// the timestamp inherent data with the one provided, if any. Additionally, allows to specify /// relay parent descendants that can be used to prevent authoring at the tip of the relay /// chain. pub async fn create_inherent_data_with_rp_offset( diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index da8eb51a5ec8b..c7ff377140432 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -60,7 +60,6 @@ use sp_trie::proof_size_extension::{ ProofSizeExt, RecordedProofSizeEstimations, ReplayProofSizeProvider, }; use std::{ - collections::VecDeque, sync::Arc, time::{Duration, Instant}, }; diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 023fa1bf4e3e7..c5ef1f0a28bd1 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -244,12 +244,7 @@ fn build_multiple_blocks_with_witness( }) .unwrap(); -<<<<<<< HEAD let proof_new = proof_recorder.drain_storage_proof(); -||||||| cc820273ae9 -======= - let proof_new = built_block.proof.unwrap(); ->>>>>>> origin/bkchr-parachain-block-weight ignored_nodes.extend(IgnoredNodes::from_storage_proof::(&proof_new)); ignored_nodes.extend(IgnoredNodes::from_memory_db(built_block.storage_changes.transaction)); diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs index 5447d8570918e..f998a7423d2de 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs @@ -60,7 +60,7 @@ async fn block_bundling_basic() -> Result<(), anyhow::Error> { &relay_client, 6, [(ParaId::from(PARA_ID), 4..7)], - [(ParaId::from(PARA_ID), (para_client.clone(), 48..73))], + [(ParaId::from(PARA_ID), (para_client.clone(), 44..73))], ) .await?; // 6 relay chain blocks @@ -152,7 +152,7 @@ async fn build_network_config() -> Result { .with_default_args(vec![ ("--authoring").into(), ("slot-based").into(), - ("-lparachain=debug,aura=trace").into(), + ("-lparachain=trace,aura=trace").into(), ]) .with_collator(|n| n.with_name("collator-0")) .with_collator(|n| n.with_name("collator-1")) From 345cd46dfec3dc3334523b3456127fb0deb74551 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 18 Nov 2025 11:09:34 +0100 Subject: [PATCH 168/312] Fix basic test --- .../zombienet-sdk-helpers/src/lib.rs | 27 +++++++++++++++++ .../tests/zombie_ci/block_bundling/basic.rs | 30 ++++--------------- 2 files changed, 32 insertions(+), 25 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index fae235a9f5ab2..ea3137ae9b7a9 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -843,6 +843,33 @@ pub async fn runtime_upgrade( .await } +/// Assigns the given `cores` to the given `para_id`. +/// +/// Zombienet by default adds extra core for each registered parachain additionally to the one +/// requested by `num_cores`. It then assigns the parachains to the extra cores allocated at the +/// end. So, the passed core indices should be counted from zero. +/// +/// # Example +/// +/// Genesis patch: +/// ```json +/// "configuration": { +/// "config": { +/// "scheduler_params": { +/// "num_cores": 2, +/// } +/// } +/// } +/// ``` +/// +/// Runs the relay chain with `2` cores and we also add two parachains. +/// To assign these extra `2` cores, the call would look like this: +/// +/// ```rust +/// assign_core(&node, PARA_ID, vec![0, 1]) +/// ``` +/// +/// The cores `2` and `3` are assigned to the parachains by Zombienet. pub async fn assign_cores( node: &NetworkNode, para_id: u32, diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs index f998a7423d2de..380a32acb3fb4 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs @@ -36,7 +36,7 @@ use zombienet_sdk::{ const PARA_ID: u32 = 2400; -/// A test that ensures that PoV bundling works. +/// A test that ensures that `PoV` bundling works. /// /// Initially, one core is assigned. We expect the parachain to produce 12 block per relay core. /// As we increase the number of cores via `assign_core`, we expect the blocks to spread over the @@ -66,44 +66,24 @@ async fn block_bundling_basic() -> Result<(), anyhow::Error> { // 6 relay chain blocks assert_finality_lag(¶_client, 72).await?; - let assign_cores_call = create_assign_core_call(&[(0, PARA_ID), (1, PARA_ID)]); - - relay_client - .tx() - .sign_and_submit_then_watch_default(&assign_cores_call, &dev::alice()) - .await - .inspect(|_| log::info!("Tx send, waiting for finalization"))? - .wait_for_finalized_success() - .await?; - log::info!("2 more cores assigned to the parachain"); - - let res = submit_extrinsic_and_wait_for_finalization_success_with_timeout( - &relay_client, - &assign_cores_call, - &dev::alice(), - 60u64, - ) - .await; - assert!(res.is_ok(), "Extrinsic failed to finalize: {:?}", res.unwrap_err()); - log::info!("2 more cores assigned to each parachain"); - assign_cores(relay_node, PARA_ID, vec![2, 3]).await?; + assign_cores(relay_node, PARA_ID, vec![0, 1]).await?; assert_para_throughput( &relay_client, 6, [(ParaId::from(PARA_ID), 12..19)], - [(ParaId::from(PARA_ID), (para_client.clone(), 48..73))], + [(ParaId::from(PARA_ID), (para_client.clone(), 44..73))], ) .await?; assert_finality_lag(¶_client, 72).await?; - assign_cores(relay_node, PARA_ID, vec![4, 5, 6]).await?; + assign_cores(relay_node, PARA_ID, vec![2, 3, 4]).await?; assert_para_throughput( &relay_client, 6, [(ParaId::from(PARA_ID), 24..37)], - [(ParaId::from(PARA_ID), (para_client.clone(), 48..73))], + [(ParaId::from(PARA_ID), (para_client.clone(), 44..73))], ) .await?; From 0c514fccb3075229f8dba965735d8b3258995f6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 18 Nov 2025 12:11:10 +0100 Subject: [PATCH 169/312] Fix glutton tests --- .../zombienet-sdk-helpers/src/lib.rs | 6 +++--- .../block_bundling/three_cores_glutton.rs | 21 +++++-------------- substrate/frame/glutton/src/lib.rs | 7 +++++++ 3 files changed, 15 insertions(+), 19 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index ea3137ae9b7a9..b7c8845fa251c 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -866,12 +866,12 @@ pub async fn runtime_upgrade( /// To assign these extra `2` cores, the call would look like this: /// /// ```rust -/// assign_core(&node, PARA_ID, vec![0, 1]) +/// assign_core(&relay_node, PARA_ID, vec![0, 1]) /// ``` /// /// The cores `2` and `3` are assigned to the parachains by Zombienet. pub async fn assign_cores( - node: &NetworkNode, + relay_node: &NetworkNode, para_id: u32, cores: Vec, ) -> Result<(), anyhow::Error> { @@ -880,7 +880,7 @@ pub async fn assign_cores( let assign_cores_call = create_assign_core_call(&cores.into_iter().map(|core| (core, para_id)).collect::>()); - let client: OnlineClient = node.wait_client().await?; + let client: OnlineClient = relay_node.wait_client().await?; let res = submit_extrinsic_and_wait_for_finalization_success_with_timeout( &client, &assign_cores_call, diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs index 1fd1bd24ad702..61f0f5150229e 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs @@ -17,9 +17,7 @@ use anyhow::anyhow; -use cumulus_zombienet_sdk_helpers::{ - assert_finality_lag, assert_para_throughput, create_assign_core_call, -}; +use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_para_throughput, assign_cores}; use polkadot_primitives::Id as ParaId; use serde_json::json; use zombienet_sdk::{ @@ -56,16 +54,7 @@ async fn block_bundling_three_cores_glutton() -> Result<(), anyhow::Error> { let alice = dev::alice(); // Assign cores 0 and 1 to start with 3 cores total (core 2 is assigned by Zombienet) - let assign_cores_call = create_assign_core_call(&[(0, PARA_ID), (1, PARA_ID)]); - - relay_client - .tx() - .sign_and_submit_then_watch_default(&assign_cores_call, &alice) - .await - .inspect(|_| log::info!("Tx send, waiting for finalization"))? - .wait_for_finalized_success() - .await?; - log::info!("3 cores total assigned to the parachain"); + assign_cores(&relay_node, PARA_ID, vec![0, 1]).await; // Wait for the parachain to produce 72 blocks with 3 cores and glutton active // With 3 cores, we expect roughly 3x throughput compared to single core @@ -100,7 +89,7 @@ async fn build_network_config() -> Result { "configuration": { "config": { "scheduler_params": { - "num_cores": 3, + "num_cores": 2, "max_validators_per_core": 1 } } @@ -117,11 +106,11 @@ async fn build_network_config() -> Result { .with_default_args(vec![ ("--authoring").into(), ("slot-based").into(), - ("-lparachain=debug,aura=trace").into(), + ("-lparachain=debug,aura=trace,runtime=trace").into(), ]) .with_genesis_overrides(json!({ "glutton": { - "compute": "800000000", // 80% ref time consumption + "compute": "200000000", // 20% ref time consumption "storage": "0", // No storage consumption "trashDataCount": 5000, // Initialize with some trash data "blockLength": "0" // No block length consumption diff --git a/substrate/frame/glutton/src/lib.rs b/substrate/frame/glutton/src/lib.rs index 85608ad25e727..b54062dafe608 100644 --- a/substrate/frame/glutton/src/lib.rs +++ b/substrate/frame/glutton/src/lib.rs @@ -47,6 +47,8 @@ use sp_runtime::{traits::Zero, FixedPointNumber, FixedU64}; pub use pallet::*; pub use weights::WeightInfo; +const LOG_TARGET: &str = "runtime::glutton"; + /// The size of each value in the `TrashData` storage in bytes. pub const VALUE_SIZE: usize = 1024; /// Max number of entries for the `TrashData` map. @@ -207,6 +209,8 @@ pub mod pallet { } fn on_idle(_: BlockNumberFor, remaining_weight: Weight) -> Weight { + log::debug!(target: LOG_TARGET, "Running `on_idle` with remaining weight: {remaining_weight:?}"); + let mut meter = WeightMeter::with_limit(remaining_weight); if meter.try_consume(T::WeightInfo::empty_on_idle()).is_err() { return T::WeightInfo::empty_on_idle() @@ -216,6 +220,9 @@ pub mod pallet { Storage::::get().saturating_mul_int(meter.remaining().proof_size()); let computation_weight_limit = Compute::::get().saturating_mul_int(meter.remaining().ref_time()); + + log::debug!(target: LOG_TARGET, "Going to waste: proof_size {proof_size_limit:?}; compute {computation_weight_limit:?}"); + let mut meter = WeightMeter::with_limit(Weight::from_parts( computation_weight_limit, proof_size_limit, From 9d1924e974990f6ac688d2610288afddaaffcc5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 18 Nov 2025 21:05:04 +0100 Subject: [PATCH 170/312] Fix glutton --- .../zombie_ci/block_bundling/full_core_usage_scenarios.rs | 4 ++-- .../tests/zombie_ci/block_bundling/three_cores_glutton.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs index 0cdf01681b21c..33313b89bedb3 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs @@ -101,7 +101,7 @@ async fn block_bundling_full_core_usage_scenarios() -> Result<(), anyhow::Error> let third_call = create_schedule_weight_registration_call(); let sudo_third_call = create_sudo_call(third_call); - log::info!("Testing scenario 3: Enabling an inherent that will use 1s ref time"); + log::info!("Testing scenario 5: Enabling `on_initialize` to use 1s ref time"); let block_hash = submit_extrinsic_and_wait_for_finalization_success(¶_client, &sudo_third_call, &alice) .await?; @@ -112,7 +112,7 @@ async fn block_bundling_full_core_usage_scenarios() -> Result<(), anyhow::Error> let inherent_weight_call = create_set_inherent_weight_consume_call(ref_time_1s, 0); let sudo_inherent_weight_call = create_sudo_call(inherent_weight_call); - log::info!("Testing scenario 4: Enabling `on_initialize` to use 1s ref time"); + log::info!("Testing scenario 4: Enabling an inherent that will use 1s ref time"); let block_hash = submit_extrinsic_and_wait_for_finalization_success( ¶_client, &sudo_inherent_weight_call, diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs index 61f0f5150229e..8cbcc65d58966 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs @@ -54,7 +54,7 @@ async fn block_bundling_three_cores_glutton() -> Result<(), anyhow::Error> { let alice = dev::alice(); // Assign cores 0 and 1 to start with 3 cores total (core 2 is assigned by Zombienet) - assign_cores(&relay_node, PARA_ID, vec![0, 1]).await; + assign_cores(&relay_node, PARA_ID, vec![0, 1]).await?; // Wait for the parachain to produce 72 blocks with 3 cores and glutton active // With 3 cores, we expect roughly 3x throughput compared to single core From 6a530f2b64b7e8061c7376d2db92fc4f49560f56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 18 Nov 2025 21:05:38 +0100 Subject: [PATCH 171/312] Support full core weight for pre transactions --- .../parachain-system/src/block_weight/mod.rs | 8 ++- .../src/block_weight/tests.rs | 56 ++++++++++++++++--- .../src/block_weight/transaction_extension.rs | 7 +-- 3 files changed, 57 insertions(+), 14 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index 3afc89d7487ad..bf1ef97b78763 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -220,17 +220,19 @@ impl> Get target_block_weight }; - match crate::BlockWeightMode::::get() { + match crate::BlockWeightMode::::get().filter(|m| !m.is_stale()) { // We allow the full core. Some( BlockWeightMode::::FullCore { .. } | BlockWeightMode::::PotentialFullCore { .. }, ) => FULL_CORE_WEIGHT, // Let's calculate below how much weight we can use. - Some(BlockWeightMode::::FractionOfCore { .. }) => target_block_weight, + Some(BlockWeightMode::::FractionOfCore { first_transaction_index, .. }) + if first_transaction_index.is_some() => + target_block_weight, // If we are in `on_initialize` or at applying the inherents, we allow the maximum block // weight as allowed by the current context. - None => maybe_full_core_weight, + Some(BlockWeightMode::::FractionOfCore { .. }) | None => maybe_full_core_weight, } } } diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 3fdcb58476ae4..9a0cb06529db9 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -249,6 +249,8 @@ fn tx_extension_large_tx_enables_full_core_usage() { ..Default::default() }; + System::set_extrinsic_index(1); + assert_ok!(TxExtension::validate_and_prepare( TxExtension::new(Default::default()), SystemOrigin::Signed(0).into(), @@ -260,7 +262,7 @@ fn tx_extension_large_tx_enables_full_core_usage() { assert_matches!( crate::BlockWeightMode::::get(), - Some(BlockWeightMode::PotentialFullCore { first_transaction_index: Some(0), .. }) + Some(BlockWeightMode::PotentialFullCore { first_transaction_index: Some(1), .. }) ); let mut post_info = @@ -356,6 +358,8 @@ fn tx_extension_large_tx_with_refund_goes_back_to_fractional() { .execute_with(|| { initialize_block_finished(); + System::set_extrinsic_index(1); + // Create a transaction larger than target weight let target_weight = MaximumBlockWeight::target_block_weight(); let large_weight = target_weight @@ -378,7 +382,7 @@ fn tx_extension_large_tx_with_refund_goes_back_to_fractional() { assert_matches!( crate::BlockWeightMode::::get(), - Some(BlockWeightMode::PotentialFullCore { first_transaction_index: Some(0), .. }) + Some(BlockWeightMode::PotentialFullCore { first_transaction_index: Some(1), .. }) ); let mut post_info = PostDispatchInfo { @@ -526,7 +530,6 @@ fn tx_extension_large_tx_after_limit_is_rejected() { Some(BlockWeightMode::fraction_of_core(None)) ); assert!(!has_use_full_core_digest()); - assert_eq!(MaximumBlockWeight::get(), target_weight); }); } @@ -789,7 +792,7 @@ fn executive_with_operational_only_applies_big_inherent() { Default::default(), Default::default(), Default::default(), - Default::default(), + System::digest(), )); let call = @@ -824,10 +827,10 @@ fn block_weight_mode_from_previous_block_is_ignored_in_validate_block() { Default::default(), Default::default(), Default::default(), - Default::default(), + System::digest(), )); - assert!(ExecutiveOnlyOperational::apply_extrinsic(xt).is_ok()); + assert_ok!(ExecutiveOnlyOperational::apply_extrinsic(xt)); ExecutiveOnlyOperational::finalize_block(); @@ -871,7 +874,7 @@ fn ongoin_mbm_requests_full_core() { Default::default(), Default::default(), Default::default(), - Default::default(), + System::digest(), )); assert_eq!( @@ -885,3 +888,42 @@ fn ongoin_mbm_requests_full_core() { MbmOngoing::set(false); }); } + +#[test] +fn ignores_previous_block_weight_in_on_initialize() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + crate::BlockWeightMode::::put( + BlockWeightMode::fraction_of_core(None), + ); + + // Start a new block + System::set_block_number(1); + + assert_eq!(MaximumBlockWeight::get(), FULL_CORE_WEIGHT); + }); +} + +#[test] +fn full_core_weight_in_inherent_context() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + Executive::initialize_block(&Header::new( + 1, + Default::default(), + Default::default(), + Default::default(), + System::digest(), + )); + + assert!(!frame_system::Pallet::::inherents_applied()); + + assert_eq!(MaximumBlockWeight::get(), FULL_CORE_WEIGHT); + }); +} diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index 9acf46f37a975..34e0861236550 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -142,7 +142,7 @@ where match current_mode { // We are already allowing the full core, not that much more to do here. - BlockWeightMode::::FullCore { ..} => {}, + BlockWeightMode::::FullCore { .. } => {}, BlockWeightMode::::PotentialFullCore { first_transaction_index, .. } | BlockWeightMode::::FractionOfCore { first_transaction_index, .. } => { debug_assert!( @@ -155,9 +155,8 @@ where && block_weight_over_target_block_weight::(); let block_weights = Config::BlockWeights::get(); - let target_weight = block_weights.get(info.class).max_total.unwrap_or_else( - || MaxParachainBlockWeight::::target_block_weight_with_digest(&digest).saturating_sub(block_weights.base_block) - ); + let target_weight = MaxParachainBlockWeight::::target_block_weight_with_digest(&digest) + .saturating_sub(block_weights.base_block); // Protection against a misconfiguration as this should be detected by the pre-inherent hook. if block_weight_over_limit { From 106baf0b64bace1bbe57c17199ed3636ac9c088f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 19 Nov 2025 13:53:53 +0100 Subject: [PATCH 172/312] Fix unsigned transactions --- .../parachain-system/src/block_weight/mock.rs | 14 ++ .../src/block_weight/tests.rs | 132 ++++++++++-------- .../src/block_weight/transaction_extension.rs | 2 + 3 files changed, 92 insertions(+), 56 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index 13dc037b4652f..c485af0935e5b 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -166,6 +166,20 @@ pub mod test_pallet { matches!(call, Call::heavy_call_mandatory {}) } } + + #[pallet::validate_unsigned] + impl sp_runtime::traits::ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + Ok(ValidTransaction { + priority: u64::max_value(), + requires: Vec::new(), + provides: vec![call.encode()], + longevity: TransactionLongevity::max_value(), + propagate: true, + }) + } + } } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 9a0cb06529db9..68fccca3249a0 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -710,74 +710,94 @@ fn ref_time_and_pov_size_cap() { #[test] fn executive_validate_block_handles_normal_transactions() { - TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { - let call = RuntimeCall::TestPallet(test_pallet::Call::heavy_call_normal {}); - - let xt = Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), Default::default()); + for signed in [true, false] { + TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { + let call = RuntimeCall::TestPallet(test_pallet::Call::heavy_call_normal {}); + + let xt = if signed { + Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), Default::default()) + } else { + Extrinsic::new_bare(call) + }; - assert!(Executive::validate_transaction( - TransactionSource::External, - xt.clone(), - Default::default() - ) - .is_ok()); - }); + assert_ok!(Executive::validate_transaction( + TransactionSource::External, + xt.clone(), + Default::default() + )); + }); - TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { - let call = RuntimeCallOnlyOperational::TestPallet(test_pallet::Call::heavy_call_normal {}); + TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { + let call = + RuntimeCallOnlyOperational::TestPallet(test_pallet::Call::heavy_call_normal {}); - let xt = ExtrinsicOnlyOperational::new_signed( - call, - 1u64.into(), - 1u64.into(), - Default::default(), - ); + let xt = if signed { + ExtrinsicOnlyOperational::new_signed( + call, + 1u64.into(), + 1u64.into(), + Default::default(), + ) + } else { + ExtrinsicOnlyOperational::new_bare(call) + }; - assert_eq!( - ExecutiveOnlyOperational::validate_transaction( - TransactionSource::External, - xt, - Default::default() - ) - .unwrap_err(), - InvalidTransaction::ExhaustsResources.into() - ); - }); + assert_eq!( + ExecutiveOnlyOperational::validate_transaction( + TransactionSource::External, + xt, + Default::default() + ) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() + ); + }); + } } #[test] fn executive_validate_block_handles_operational_transactions() { - TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { - let call = RuntimeCall::TestPallet(test_pallet::Call::heavy_call_operational {}); - - let xt = Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), Default::default()); + for signed in [true, false] { + TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { + let call = RuntimeCall::TestPallet(test_pallet::Call::heavy_call_operational {}); + + let xt = if signed { + Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), Default::default()) + } else { + Extrinsic::new_bare(call) + }; - assert!(Executive::validate_transaction( - TransactionSource::External, - xt.clone(), - Default::default() - ) - .is_ok()); - }); + assert_ok!(Executive::validate_transaction( + TransactionSource::External, + xt.clone(), + Default::default() + )); + }); - TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { - let call = - RuntimeCallOnlyOperational::TestPallet(test_pallet::Call::heavy_call_operational {}); + TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { + let call = RuntimeCallOnlyOperational::TestPallet( + test_pallet::Call::heavy_call_operational {}, + ); - let xt = ExtrinsicOnlyOperational::new_signed( - call, - 1u64.into(), - 1u64.into(), - Default::default(), - ); + let xt = if signed { + ExtrinsicOnlyOperational::new_signed( + call, + 1u64.into(), + 1u64.into(), + Default::default(), + ) + } else { + ExtrinsicOnlyOperational::new_bare(call) + }; - assert!(ExecutiveOnlyOperational::validate_transaction( - TransactionSource::External, - xt, - Default::default() - ) - .is_ok()); - }); + assert!(ExecutiveOnlyOperational::validate_transaction( + TransactionSource::External, + xt, + Default::default() + ) + .is_ok()); + }); + } } #[test] diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index 34e0861236550..fa218dbb353e0 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -472,6 +472,8 @@ where info: &DispatchInfoOf, len: usize, ) -> frame_support::pallet_prelude::TransactionValidity { + Self::pre_validate_extrinsic(info, len)?; + Inner::bare_validate(call, info, len) } From 923b86897c47ae0fa563f5af6d6b85c3e49f85cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 19 Nov 2025 13:59:01 +0100 Subject: [PATCH 173/312] Fix runtime upgrade --- .../zombienet/zombienet-sdk-helpers/src/lib.rs | 2 ++ .../block_bundling/runtime_upgrade.rs | 18 +++++------------- 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index b7c8845fa251c..c665217c9a9cf 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -610,6 +610,8 @@ pub async fn submit_unsigned_extrinsic_and_wait_for_finalization_success( async fn submit_tx_and_wait_for_finalization( tx: SubmittableTransaction>, ) -> Result { + log::info!("Submitting transaction: {:?}", tx.hash()); + let mut tx = tx.submit_and_watch().await?; // Below we use the low level API to replicate the `wait_for_in_block` behavior diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs index 0187f8cb33182..165d6b668bf51 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs @@ -19,9 +19,8 @@ use anyhow::anyhow; use cumulus_primitives_core::relay_chain::MAX_POV_SIZE; use cumulus_test_runtime::wasm_spec_version_incremented::WASM_BINARY_BLOATY as WASM_RUNTIME_UPGRADE; use cumulus_zombienet_sdk_helpers::{ - assert_finality_lag, assert_para_throughput, create_assign_core_call, - ensure_is_only_block_in_core, find_core_info, - submit_extrinsic_and_wait_for_finalization_success, + assert_finality_lag, assert_para_throughput, assign_cores, ensure_is_only_block_in_core, + find_core_info, submit_extrinsic_and_wait_for_finalization_success, submit_unsigned_extrinsic_and_wait_for_finalization_success, BlockToCheck, }; use polkadot_primitives::Id as ParaId; @@ -85,15 +84,8 @@ async fn block_bundling_runtime_upgrade() -> Result<(), anyhow::Error> { let alice = dev::alice(); // Assign cores 0 and 1 to start with 3 cores total (core 2 is assigned by Zombienet) - let assign_cores_call = create_assign_core_call(&[(0, PARA_ID), (1, PARA_ID)]); - - relay_client - .tx() - .sign_and_submit_then_watch_default(&assign_cores_call, &alice) - .await - .inspect(|_| log::info!("Tx send, waiting for finalization"))? - .wait_for_finalized_success() - .await?; + assign_cores(&relay_node, PARA_ID, vec![0, 1]).await; + log::info!("3 cores total assigned to the parachain"); // Step 1: Authorize the runtime upgrade @@ -183,7 +175,7 @@ async fn build_network_config() -> Result { .with_default_args(vec![ ("--authoring").into(), ("slot-based").into(), - ("-lparachain=debug,aura=trace,basic-authorship=trace,runtime=trace").into(), + ("-lparachain=debug,aura=trace,basic-authorship=trace,runtime=trace,txpool=trace").into(), ]) .with_collator(|n| n.with_name("collator-0")) .with_collator(|n| n.with_name("collator-1")) From 76ca54bf6b000ec9064021bde2c9814c7f51c2ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 19 Nov 2025 16:15:58 +0100 Subject: [PATCH 174/312] Respect the dispatch class max weight --- .../parachain-system/src/block_weight/mock.rs | 8 ++- .../parachain-system/src/block_weight/mod.rs | 23 +++++-- .../src/block_weight/tests.rs | 61 +++++++++++++++++++ .../src/block_weight/transaction_extension.rs | 21 +++++-- 4 files changed, 102 insertions(+), 11 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index c485af0935e5b..0bb3c5f643d35 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -75,6 +75,7 @@ pub type BlockOnlyOperational = generic::Block< >; pub const TARGET_BLOCK_RATE: u32 = 12; +pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); #[docify::export(tx_extension_setup)] pub type TxExtension = DynamicMaxBlockWeight< @@ -108,7 +109,7 @@ mod max_block_weight_setup { weights.base_extrinsic = ExtrinsicBaseWeight::get(); }) .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(MaximumBlockWeight::get()); + weights.max_total = Some(MaximumBlockWeight::get() * NORMAL_DISPATCH_RATIO); }) .for_class(DispatchClass::Operational, |weights| { weights.max_total = Some(MaximumBlockWeight::get()); @@ -150,6 +151,11 @@ pub mod test_pallet { pub fn heavy_call_mandatory(_: OriginFor) -> DispatchResult { Ok(()) } + + #[pallet::weight((_weight.clone(), DispatchClass::Normal))] + pub fn use_weight(_: OriginFor, _weight: Weight) -> DispatchResult { + Ok(()) + } } #[pallet::inherent] diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index bf1ef97b78763..8008b63eb5fa3 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -56,6 +56,7 @@ use frame_support::{ }; use frame_system::pallet_prelude::BlockNumberFor; use polkadot_primitives::MAX_POV_SIZE; +use polkadot_runtime_parachains::inclusion::migration::v0::PendingAvailabilityCommitments_Storage_Instance; use scale_info::TypeInfo; use sp_core::Get; use sp_runtime::Digest; @@ -77,6 +78,9 @@ const MAX_REF_TIME_PER_CORE_NS: u64 = 2 * WEIGHT_REF_TIME_PER_SECOND; pub(crate) const FULL_CORE_WEIGHT: Weight = Weight::from_parts(MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); +// Is set to `true` when we are currently inside of `pre_validate_extrinsic`. +environmental::environmental!(inside_pre_validate: bool); + /// The current block weight mode. /// /// Based on this mode [`MaxParachainBlockWeight`] determines the current allowed block weight. @@ -189,7 +193,7 @@ impl> // At maximum we want to allow `6s` of ref time, because we don't want to overload nodes // that are running with standard hardware. These nodes need to be able to import all the - // blocks in 6s. + // blocks in `6s`. let total_ref_time = (number_of_cores as u64) .saturating_mul(MAX_REF_TIME_PER_CORE_NS) .min(WEIGHT_REF_TIME_PER_SECOND * 6); @@ -220,6 +224,13 @@ impl> Get target_block_weight }; + // Check if we are inside `pre_validate_extrinsic` of the transaction extension. + // + // When `pre_validate_extrinsic` calls this code, it is interested to know the + // `target_block_weight` which is then used to calculate the weight for each dispatch class. + // If `FullCore` mode is already enabled, the target weight is not important anymore. + let in_pre_validate = inside_pre_validate::with(|v| *v).unwrap_or(false); + match crate::BlockWeightMode::::get().filter(|m| !m.is_stale()) { // We allow the full core. Some( @@ -228,11 +239,13 @@ impl> Get ) => FULL_CORE_WEIGHT, // Let's calculate below how much weight we can use. Some(BlockWeightMode::::FractionOfCore { first_transaction_index, .. }) - if first_transaction_index.is_some() => + if first_transaction_index.is_some() && !in_pre_validate => target_block_weight, - // If we are in `on_initialize` or at applying the inherents, we allow the maximum block - // weight as allowed by the current context. - Some(BlockWeightMode::::FractionOfCore { .. }) | None => maybe_full_core_weight, + // If we are in `on_initialize`, at applying the inherents or before applying the first + // transaction, we allow the maximum block weight as allowed by the current context. + Some(BlockWeightMode::::FractionOfCore { .. }) | None if !in_pre_validate => + maybe_full_core_weight, + _ => target_block_weight, } } } diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 68fccca3249a0..1ab5c14b2910d 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -947,3 +947,64 @@ fn full_core_weight_in_inherent_context() { assert_eq!(MaximumBlockWeight::get(), FULL_CORE_WEIGHT); }); } + +#[test] +fn executive_validate_transaction_respects_dispatch_class_max_block_size() { + // Create some weight which is slightly above the allowed dispatch class max size. + let call_weight = TestExtBuilder::new().previous_core_count(4).build().execute_with(|| { + MaximumBlockWeight::target_block_weight() * NORMAL_DISPATCH_RATIO + Weight::from_parts(1, 1) + }); + + for signed in [true, false] { + TestExtBuilder::new().previous_core_count(4).build().execute_with(|| { + assert!(::BlockWeights::get() + .get(DispatchClass::Normal) + .max_total + .unwrap() + .all_lt(call_weight)); + assert!(MaximumBlockWeight::target_block_weight().all_gt(call_weight)); + + let call = + RuntimeCall::TestPallet(test_pallet::Call::use_weight { weight: call_weight }); + + let xt = if signed { + Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), Default::default()) + } else { + Extrinsic::new_bare(call) + }; + + assert_ok!(Executive::validate_transaction( + TransactionSource::External, + xt.clone(), + Default::default() + )); + }); + + TestExtBuilder::new().previous_core_count(4).build().execute_with(|| { + let call = RuntimeCallOnlyOperational::TestPallet(test_pallet::Call::use_weight { + weight: call_weight, + }); + + let xt = if signed { + ExtrinsicOnlyOperational::new_signed( + call, + 1u64.into(), + 1u64.into(), + Default::default(), + ) + } else { + ExtrinsicOnlyOperational::new_bare(call) + }; + + assert_eq!( + ExecutiveOnlyOperational::validate_transaction( + TransactionSource::External, + xt, + Default::default() + ) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() + ); + }); + } +} diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index fa218dbb353e0..017468130caf5 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -15,8 +15,8 @@ // limitations under the License. use super::{ - block_weight_over_target_block_weight, is_first_block_in_core_with_digest, BlockWeightMode, - MaxParachainBlockWeight, FULL_CORE_WEIGHT, LOG_TARGET, + block_weight_over_target_block_weight, inside_pre_validate, is_first_block_in_core_with_digest, + BlockWeightMode, MaxParachainBlockWeight, FULL_CORE_WEIGHT, LOG_TARGET, }; use crate::WeightInfo; use alloc::vec::Vec; @@ -154,9 +154,20 @@ where let block_weight_over_limit = extrinsic_index == 0 && block_weight_over_target_block_weight::(); - let block_weights = Config::BlockWeights::get(); - let target_weight = MaxParachainBlockWeight::::target_block_weight_with_digest(&digest) - .saturating_sub(block_weights.base_block); + // If `BlockWeights` is configured correctly, it will internally call `MaxParachainBlockWeight::get()` + // and by setting this variable to `true`, we tell it the context. This is important as we want to get + // the `target_block_weight` and not the full core weight. Otherwise, we will here get a too huge weight + // and do not set the `PotentialFullCore` weight, leading to `CheckWeight` rejecting the extrinsic. + // + // All of this is only important for extrinsics that will enable the `PotentialFullCore` mode. + let block_weights = inside_pre_validate::using(&mut true, || Config::BlockWeights::get()); + let target_weight = block_weights + .get(info.class) + .max_total + .unwrap_or_else(|| + MaxParachainBlockWeight::::target_block_weight_with_digest(&digest) + .saturating_sub(block_weights.base_block) + ); // Protection against a misconfiguration as this should be detected by the pre-inherent hook. if block_weight_over_limit { From 066cd7c3d21742c97a9451b10052ed33ac9c6398 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 18 Nov 2025 21:05:38 +0100 Subject: [PATCH 175/312] Support full core weight for pre transactions --- .../parachain-system/src/block_weight/mod.rs | 8 ++- .../src/block_weight/tests.rs | 56 ++++++++++++++++--- .../src/block_weight/transaction_extension.rs | 7 +-- 3 files changed, 57 insertions(+), 14 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index 3afc89d7487ad..bf1ef97b78763 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -220,17 +220,19 @@ impl> Get target_block_weight }; - match crate::BlockWeightMode::::get() { + match crate::BlockWeightMode::::get().filter(|m| !m.is_stale()) { // We allow the full core. Some( BlockWeightMode::::FullCore { .. } | BlockWeightMode::::PotentialFullCore { .. }, ) => FULL_CORE_WEIGHT, // Let's calculate below how much weight we can use. - Some(BlockWeightMode::::FractionOfCore { .. }) => target_block_weight, + Some(BlockWeightMode::::FractionOfCore { first_transaction_index, .. }) + if first_transaction_index.is_some() => + target_block_weight, // If we are in `on_initialize` or at applying the inherents, we allow the maximum block // weight as allowed by the current context. - None => maybe_full_core_weight, + Some(BlockWeightMode::::FractionOfCore { .. }) | None => maybe_full_core_weight, } } } diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 3fdcb58476ae4..9a0cb06529db9 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -249,6 +249,8 @@ fn tx_extension_large_tx_enables_full_core_usage() { ..Default::default() }; + System::set_extrinsic_index(1); + assert_ok!(TxExtension::validate_and_prepare( TxExtension::new(Default::default()), SystemOrigin::Signed(0).into(), @@ -260,7 +262,7 @@ fn tx_extension_large_tx_enables_full_core_usage() { assert_matches!( crate::BlockWeightMode::::get(), - Some(BlockWeightMode::PotentialFullCore { first_transaction_index: Some(0), .. }) + Some(BlockWeightMode::PotentialFullCore { first_transaction_index: Some(1), .. }) ); let mut post_info = @@ -356,6 +358,8 @@ fn tx_extension_large_tx_with_refund_goes_back_to_fractional() { .execute_with(|| { initialize_block_finished(); + System::set_extrinsic_index(1); + // Create a transaction larger than target weight let target_weight = MaximumBlockWeight::target_block_weight(); let large_weight = target_weight @@ -378,7 +382,7 @@ fn tx_extension_large_tx_with_refund_goes_back_to_fractional() { assert_matches!( crate::BlockWeightMode::::get(), - Some(BlockWeightMode::PotentialFullCore { first_transaction_index: Some(0), .. }) + Some(BlockWeightMode::PotentialFullCore { first_transaction_index: Some(1), .. }) ); let mut post_info = PostDispatchInfo { @@ -526,7 +530,6 @@ fn tx_extension_large_tx_after_limit_is_rejected() { Some(BlockWeightMode::fraction_of_core(None)) ); assert!(!has_use_full_core_digest()); - assert_eq!(MaximumBlockWeight::get(), target_weight); }); } @@ -789,7 +792,7 @@ fn executive_with_operational_only_applies_big_inherent() { Default::default(), Default::default(), Default::default(), - Default::default(), + System::digest(), )); let call = @@ -824,10 +827,10 @@ fn block_weight_mode_from_previous_block_is_ignored_in_validate_block() { Default::default(), Default::default(), Default::default(), - Default::default(), + System::digest(), )); - assert!(ExecutiveOnlyOperational::apply_extrinsic(xt).is_ok()); + assert_ok!(ExecutiveOnlyOperational::apply_extrinsic(xt)); ExecutiveOnlyOperational::finalize_block(); @@ -871,7 +874,7 @@ fn ongoin_mbm_requests_full_core() { Default::default(), Default::default(), Default::default(), - Default::default(), + System::digest(), )); assert_eq!( @@ -885,3 +888,42 @@ fn ongoin_mbm_requests_full_core() { MbmOngoing::set(false); }); } + +#[test] +fn ignores_previous_block_weight_in_on_initialize() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + crate::BlockWeightMode::::put( + BlockWeightMode::fraction_of_core(None), + ); + + // Start a new block + System::set_block_number(1); + + assert_eq!(MaximumBlockWeight::get(), FULL_CORE_WEIGHT); + }); +} + +#[test] +fn full_core_weight_in_inherent_context() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + Executive::initialize_block(&Header::new( + 1, + Default::default(), + Default::default(), + Default::default(), + System::digest(), + )); + + assert!(!frame_system::Pallet::::inherents_applied()); + + assert_eq!(MaximumBlockWeight::get(), FULL_CORE_WEIGHT); + }); +} diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index 9acf46f37a975..34e0861236550 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -142,7 +142,7 @@ where match current_mode { // We are already allowing the full core, not that much more to do here. - BlockWeightMode::::FullCore { ..} => {}, + BlockWeightMode::::FullCore { .. } => {}, BlockWeightMode::::PotentialFullCore { first_transaction_index, .. } | BlockWeightMode::::FractionOfCore { first_transaction_index, .. } => { debug_assert!( @@ -155,9 +155,8 @@ where && block_weight_over_target_block_weight::(); let block_weights = Config::BlockWeights::get(); - let target_weight = block_weights.get(info.class).max_total.unwrap_or_else( - || MaxParachainBlockWeight::::target_block_weight_with_digest(&digest).saturating_sub(block_weights.base_block) - ); + let target_weight = MaxParachainBlockWeight::::target_block_weight_with_digest(&digest) + .saturating_sub(block_weights.base_block); // Protection against a misconfiguration as this should be detected by the pre-inherent hook. if block_weight_over_limit { From 0b98e3f39b0c27c57fae28f2fdc7d8aa841d07ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 19 Nov 2025 13:53:53 +0100 Subject: [PATCH 176/312] Fix unsigned transactions --- .../parachain-system/src/block_weight/mock.rs | 14 ++ .../src/block_weight/tests.rs | 132 ++++++++++-------- .../src/block_weight/transaction_extension.rs | 2 + 3 files changed, 92 insertions(+), 56 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index 13dc037b4652f..c485af0935e5b 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -166,6 +166,20 @@ pub mod test_pallet { matches!(call, Call::heavy_call_mandatory {}) } } + + #[pallet::validate_unsigned] + impl sp_runtime::traits::ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + Ok(ValidTransaction { + priority: u64::max_value(), + requires: Vec::new(), + provides: vec![call.encode()], + longevity: TransactionLongevity::max_value(), + propagate: true, + }) + } + } } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 9a0cb06529db9..68fccca3249a0 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -710,74 +710,94 @@ fn ref_time_and_pov_size_cap() { #[test] fn executive_validate_block_handles_normal_transactions() { - TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { - let call = RuntimeCall::TestPallet(test_pallet::Call::heavy_call_normal {}); - - let xt = Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), Default::default()); + for signed in [true, false] { + TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { + let call = RuntimeCall::TestPallet(test_pallet::Call::heavy_call_normal {}); + + let xt = if signed { + Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), Default::default()) + } else { + Extrinsic::new_bare(call) + }; - assert!(Executive::validate_transaction( - TransactionSource::External, - xt.clone(), - Default::default() - ) - .is_ok()); - }); + assert_ok!(Executive::validate_transaction( + TransactionSource::External, + xt.clone(), + Default::default() + )); + }); - TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { - let call = RuntimeCallOnlyOperational::TestPallet(test_pallet::Call::heavy_call_normal {}); + TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { + let call = + RuntimeCallOnlyOperational::TestPallet(test_pallet::Call::heavy_call_normal {}); - let xt = ExtrinsicOnlyOperational::new_signed( - call, - 1u64.into(), - 1u64.into(), - Default::default(), - ); + let xt = if signed { + ExtrinsicOnlyOperational::new_signed( + call, + 1u64.into(), + 1u64.into(), + Default::default(), + ) + } else { + ExtrinsicOnlyOperational::new_bare(call) + }; - assert_eq!( - ExecutiveOnlyOperational::validate_transaction( - TransactionSource::External, - xt, - Default::default() - ) - .unwrap_err(), - InvalidTransaction::ExhaustsResources.into() - ); - }); + assert_eq!( + ExecutiveOnlyOperational::validate_transaction( + TransactionSource::External, + xt, + Default::default() + ) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() + ); + }); + } } #[test] fn executive_validate_block_handles_operational_transactions() { - TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { - let call = RuntimeCall::TestPallet(test_pallet::Call::heavy_call_operational {}); - - let xt = Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), Default::default()); + for signed in [true, false] { + TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { + let call = RuntimeCall::TestPallet(test_pallet::Call::heavy_call_operational {}); + + let xt = if signed { + Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), Default::default()) + } else { + Extrinsic::new_bare(call) + }; - assert!(Executive::validate_transaction( - TransactionSource::External, - xt.clone(), - Default::default() - ) - .is_ok()); - }); + assert_ok!(Executive::validate_transaction( + TransactionSource::External, + xt.clone(), + Default::default() + )); + }); - TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { - let call = - RuntimeCallOnlyOperational::TestPallet(test_pallet::Call::heavy_call_operational {}); + TestExtBuilder::new().previous_core_count(3).build().execute_with(|| { + let call = RuntimeCallOnlyOperational::TestPallet( + test_pallet::Call::heavy_call_operational {}, + ); - let xt = ExtrinsicOnlyOperational::new_signed( - call, - 1u64.into(), - 1u64.into(), - Default::default(), - ); + let xt = if signed { + ExtrinsicOnlyOperational::new_signed( + call, + 1u64.into(), + 1u64.into(), + Default::default(), + ) + } else { + ExtrinsicOnlyOperational::new_bare(call) + }; - assert!(ExecutiveOnlyOperational::validate_transaction( - TransactionSource::External, - xt, - Default::default() - ) - .is_ok()); - }); + assert!(ExecutiveOnlyOperational::validate_transaction( + TransactionSource::External, + xt, + Default::default() + ) + .is_ok()); + }); + } } #[test] diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index 34e0861236550..fa218dbb353e0 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -472,6 +472,8 @@ where info: &DispatchInfoOf, len: usize, ) -> frame_support::pallet_prelude::TransactionValidity { + Self::pre_validate_extrinsic(info, len)?; + Inner::bare_validate(call, info, len) } From 3499274de9c33b5955045336bfd3e9ec5648a618 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 19 Nov 2025 16:15:58 +0100 Subject: [PATCH 177/312] Respect the dispatch class max weight --- .../parachain-system/src/block_weight/mock.rs | 8 ++- .../parachain-system/src/block_weight/mod.rs | 23 +++++-- .../src/block_weight/tests.rs | 61 +++++++++++++++++++ .../src/block_weight/transaction_extension.rs | 21 +++++-- 4 files changed, 102 insertions(+), 11 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index c485af0935e5b..0bb3c5f643d35 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -75,6 +75,7 @@ pub type BlockOnlyOperational = generic::Block< >; pub const TARGET_BLOCK_RATE: u32 = 12; +pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); #[docify::export(tx_extension_setup)] pub type TxExtension = DynamicMaxBlockWeight< @@ -108,7 +109,7 @@ mod max_block_weight_setup { weights.base_extrinsic = ExtrinsicBaseWeight::get(); }) .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(MaximumBlockWeight::get()); + weights.max_total = Some(MaximumBlockWeight::get() * NORMAL_DISPATCH_RATIO); }) .for_class(DispatchClass::Operational, |weights| { weights.max_total = Some(MaximumBlockWeight::get()); @@ -150,6 +151,11 @@ pub mod test_pallet { pub fn heavy_call_mandatory(_: OriginFor) -> DispatchResult { Ok(()) } + + #[pallet::weight((_weight.clone(), DispatchClass::Normal))] + pub fn use_weight(_: OriginFor, _weight: Weight) -> DispatchResult { + Ok(()) + } } #[pallet::inherent] diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index bf1ef97b78763..8008b63eb5fa3 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -56,6 +56,7 @@ use frame_support::{ }; use frame_system::pallet_prelude::BlockNumberFor; use polkadot_primitives::MAX_POV_SIZE; +use polkadot_runtime_parachains::inclusion::migration::v0::PendingAvailabilityCommitments_Storage_Instance; use scale_info::TypeInfo; use sp_core::Get; use sp_runtime::Digest; @@ -77,6 +78,9 @@ const MAX_REF_TIME_PER_CORE_NS: u64 = 2 * WEIGHT_REF_TIME_PER_SECOND; pub(crate) const FULL_CORE_WEIGHT: Weight = Weight::from_parts(MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); +// Is set to `true` when we are currently inside of `pre_validate_extrinsic`. +environmental::environmental!(inside_pre_validate: bool); + /// The current block weight mode. /// /// Based on this mode [`MaxParachainBlockWeight`] determines the current allowed block weight. @@ -189,7 +193,7 @@ impl> // At maximum we want to allow `6s` of ref time, because we don't want to overload nodes // that are running with standard hardware. These nodes need to be able to import all the - // blocks in 6s. + // blocks in `6s`. let total_ref_time = (number_of_cores as u64) .saturating_mul(MAX_REF_TIME_PER_CORE_NS) .min(WEIGHT_REF_TIME_PER_SECOND * 6); @@ -220,6 +224,13 @@ impl> Get target_block_weight }; + // Check if we are inside `pre_validate_extrinsic` of the transaction extension. + // + // When `pre_validate_extrinsic` calls this code, it is interested to know the + // `target_block_weight` which is then used to calculate the weight for each dispatch class. + // If `FullCore` mode is already enabled, the target weight is not important anymore. + let in_pre_validate = inside_pre_validate::with(|v| *v).unwrap_or(false); + match crate::BlockWeightMode::::get().filter(|m| !m.is_stale()) { // We allow the full core. Some( @@ -228,11 +239,13 @@ impl> Get ) => FULL_CORE_WEIGHT, // Let's calculate below how much weight we can use. Some(BlockWeightMode::::FractionOfCore { first_transaction_index, .. }) - if first_transaction_index.is_some() => + if first_transaction_index.is_some() && !in_pre_validate => target_block_weight, - // If we are in `on_initialize` or at applying the inherents, we allow the maximum block - // weight as allowed by the current context. - Some(BlockWeightMode::::FractionOfCore { .. }) | None => maybe_full_core_weight, + // If we are in `on_initialize`, at applying the inherents or before applying the first + // transaction, we allow the maximum block weight as allowed by the current context. + Some(BlockWeightMode::::FractionOfCore { .. }) | None if !in_pre_validate => + maybe_full_core_weight, + _ => target_block_weight, } } } diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 68fccca3249a0..1ab5c14b2910d 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -947,3 +947,64 @@ fn full_core_weight_in_inherent_context() { assert_eq!(MaximumBlockWeight::get(), FULL_CORE_WEIGHT); }); } + +#[test] +fn executive_validate_transaction_respects_dispatch_class_max_block_size() { + // Create some weight which is slightly above the allowed dispatch class max size. + let call_weight = TestExtBuilder::new().previous_core_count(4).build().execute_with(|| { + MaximumBlockWeight::target_block_weight() * NORMAL_DISPATCH_RATIO + Weight::from_parts(1, 1) + }); + + for signed in [true, false] { + TestExtBuilder::new().previous_core_count(4).build().execute_with(|| { + assert!(::BlockWeights::get() + .get(DispatchClass::Normal) + .max_total + .unwrap() + .all_lt(call_weight)); + assert!(MaximumBlockWeight::target_block_weight().all_gt(call_weight)); + + let call = + RuntimeCall::TestPallet(test_pallet::Call::use_weight { weight: call_weight }); + + let xt = if signed { + Extrinsic::new_signed(call, 1u64.into(), 1u64.into(), Default::default()) + } else { + Extrinsic::new_bare(call) + }; + + assert_ok!(Executive::validate_transaction( + TransactionSource::External, + xt.clone(), + Default::default() + )); + }); + + TestExtBuilder::new().previous_core_count(4).build().execute_with(|| { + let call = RuntimeCallOnlyOperational::TestPallet(test_pallet::Call::use_weight { + weight: call_weight, + }); + + let xt = if signed { + ExtrinsicOnlyOperational::new_signed( + call, + 1u64.into(), + 1u64.into(), + Default::default(), + ) + } else { + ExtrinsicOnlyOperational::new_bare(call) + }; + + assert_eq!( + ExecutiveOnlyOperational::validate_transaction( + TransactionSource::External, + xt, + Default::default() + ) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() + ); + }); + } +} diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index fa218dbb353e0..017468130caf5 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -15,8 +15,8 @@ // limitations under the License. use super::{ - block_weight_over_target_block_weight, is_first_block_in_core_with_digest, BlockWeightMode, - MaxParachainBlockWeight, FULL_CORE_WEIGHT, LOG_TARGET, + block_weight_over_target_block_weight, inside_pre_validate, is_first_block_in_core_with_digest, + BlockWeightMode, MaxParachainBlockWeight, FULL_CORE_WEIGHT, LOG_TARGET, }; use crate::WeightInfo; use alloc::vec::Vec; @@ -154,9 +154,20 @@ where let block_weight_over_limit = extrinsic_index == 0 && block_weight_over_target_block_weight::(); - let block_weights = Config::BlockWeights::get(); - let target_weight = MaxParachainBlockWeight::::target_block_weight_with_digest(&digest) - .saturating_sub(block_weights.base_block); + // If `BlockWeights` is configured correctly, it will internally call `MaxParachainBlockWeight::get()` + // and by setting this variable to `true`, we tell it the context. This is important as we want to get + // the `target_block_weight` and not the full core weight. Otherwise, we will here get a too huge weight + // and do not set the `PotentialFullCore` weight, leading to `CheckWeight` rejecting the extrinsic. + // + // All of this is only important for extrinsics that will enable the `PotentialFullCore` mode. + let block_weights = inside_pre_validate::using(&mut true, || Config::BlockWeights::get()); + let target_weight = block_weights + .get(info.class) + .max_total + .unwrap_or_else(|| + MaxParachainBlockWeight::::target_block_weight_with_digest(&digest) + .saturating_sub(block_weights.base_block) + ); // Protection against a misconfiguration as this should be detected by the pre-inherent hook. if block_weight_over_limit { From bfd636f19fca895572248d7c946811308a887eac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 19 Nov 2025 21:44:08 +0100 Subject: [PATCH 178/312] Fix `on_idle` --- .../parachain-system/src/block_weight/mock.rs | 53 +++++++++++++++++-- .../parachain-system/src/block_weight/mod.rs | 34 ++++++++---- .../src/block_weight/tests.rs | 29 +++++++++- substrate/frame/system/src/lib.rs | 5 ++ 4 files changed, 106 insertions(+), 15 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index 0bb3c5f643d35..aee3cda9bfebc 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -15,7 +15,7 @@ // limitations under the License. use super::{transaction_extension::DynamicMaxBlockWeight, *}; -use crate::{self as parachain_system, PreviousCoreCount}; +use crate::{self as parachain_system, MessagingStateSnapshot, PreviousCoreCount}; use codec::Compact; use cumulus_primitives_core::{ BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, @@ -32,6 +32,7 @@ use frame_support::{ }, }; use frame_system::{limits::BlockWeights, CheckWeight}; +use polkadot_primitives::PersistedValidationData; use sp_core::ConstU32; use sp_io; use sp_runtime::{ @@ -121,6 +122,7 @@ mod max_block_weight_setup { #[frame_support::pallet(dev_mode)] pub mod test_pallet { + use super::*; use frame_support::{ dispatch::DispatchClass, pallet_prelude::*, weights::constants::WEIGHT_REF_TIME_PER_SECOND, }; @@ -173,6 +175,17 @@ pub mod test_pallet { } } + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_idle(_block: BlockNumberFor, limit: Weight) -> Weight { + if let Some(max) = OnIdleMaxLeftWeight::get() { + assert!(limit.all_lte(max)); + } + + Weight::zero() + } + } + #[pallet::validate_unsigned] impl sp_runtime::traits::ValidateUnsigned for Pallet { type Call = Call; @@ -231,6 +244,7 @@ construct_runtime!( parameter_types! { pub static MbmOngoing: bool = false; + pub static OnIdleMaxLeftWeight: Option = None; } pub struct Migrator; @@ -295,8 +309,13 @@ pub use only_operational_runtime::{ }; /// Executive: handles dispatch to the various modules. -pub type Executive = - frame_executive::Executive, Runtime, ()>; +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, +>; /// Executive configured to only accept operational transaction to go over the limit. pub type ExecutiveOnlyOperational = frame_executive::Executive< @@ -304,7 +323,7 @@ pub type ExecutiveOnlyOperational = frame_executive::Executive< BlockOnlyOperational, frame_system::ChainContext, RuntimeOnlyOperational, - (), + only_operational_runtime::AllPalletsWithSystem, >; /// Builder for test externalities @@ -409,3 +428,29 @@ pub fn initialize_block_finished() { ::PreInherents::pre_inherents(); System::note_inherents_applied(); } + +/// Fakes the call to `set_validation_data`. +pub fn fake_set_validation_data() { + crate::ValidationData::::put(PersistedValidationData::default()); + crate::HostConfiguration::::put(cumulus_primitives_core::AbridgedHostConfiguration { + max_code_size: 2 * 1024 * 1024, + max_head_data_size: 1024 * 1024, + max_upward_queue_count: 8, + max_upward_queue_size: 1024, + max_upward_message_size: 256, + max_upward_message_num_per_candidate: 5, + hrmp_max_message_num_per_candidate: 5, + validation_upgrade_cooldown: 6, + validation_upgrade_delay: 6, + async_backing_params: cumulus_primitives_core::relay_chain::AsyncBackingParams { + allowed_ancestry_len: 0, + max_candidate_depth: 0, + }, + }); + crate::RelevantMessagingState::::put(MessagingStateSnapshot { + dmq_mqc_head: Default::default(), + relay_dispatch_queue_remaining_capacity: Default::default(), + ingress_channels: Vec::new(), + egress_channels: Vec::new(), + }); +} diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index 8008b63eb5fa3..ab12ed46c12e4 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -56,7 +56,6 @@ use frame_support::{ }; use frame_system::pallet_prelude::BlockNumberFor; use polkadot_primitives::MAX_POV_SIZE; -use polkadot_runtime_parachains::inclusion::migration::v0::PendingAvailabilityCommitments_Storage_Instance; use scale_info::TypeInfo; use sp_core::Get; use sp_runtime::Digest; @@ -99,6 +98,8 @@ pub enum BlockWeightMode { /// setting, e.g. when running `validate_block`. context: BlockNumberFor, /// The index of the first transaction. + /// + /// Stays `None` for all inherents until there is the first transaction. first_transaction_index: Option, /// The target weight that was used to determine that the extrinsic is above this limit. target_weight: Weight, @@ -112,6 +113,8 @@ pub enum BlockWeightMode { /// setting, e.g. when running `validate_block`. context: BlockNumberFor, /// The index of the first transaction. + /// + /// Stays `None` for all inherents until there is the first transaction. first_transaction_index: Option, }, } @@ -237,15 +240,26 @@ impl> Get BlockWeightMode::::FullCore { .. } | BlockWeightMode::::PotentialFullCore { .. }, ) => FULL_CORE_WEIGHT, - // Let's calculate below how much weight we can use. - Some(BlockWeightMode::::FractionOfCore { first_transaction_index, .. }) - if first_transaction_index.is_some() && !in_pre_validate => - target_block_weight, - // If we are in `on_initialize`, at applying the inherents or before applying the first - // transaction, we allow the maximum block weight as allowed by the current context. - Some(BlockWeightMode::::FractionOfCore { .. }) | None if !in_pre_validate => - maybe_full_core_weight, - _ => target_block_weight, + // We are in `pre_validate`. + _ if in_pre_validate => target_block_weight, + // Only use the fraction of a core. + Some(BlockWeightMode::::FractionOfCore { first_transaction_index, .. }) => { + let is_phase_finalization = frame_system::Pallet::::execution_phase() + .map_or(false, |p| matches!(p, frame_system::Phase::Finalization)); + + if first_transaction_index.is_none() && !is_phase_finalization { + // We are running in the context of inherents or `on_poll`, here we allow the + // full core weight. + maybe_full_core_weight + } else { + // If we are finalizing the block (e.g. `on_idle` is running and + // `finalize_block`) or nothing required more than the target block weight, we + // only allow the target block weight. + target_block_weight + } + }, + // We are in `on_initialize` or in an offchain context. + None => maybe_full_core_weight, } } } diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 1ab5c14b2910d..2ff20fd973739 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -852,6 +852,8 @@ fn block_weight_mode_from_previous_block_is_ignored_in_validate_block() { assert_ok!(ExecutiveOnlyOperational::apply_extrinsic(xt)); + fake_set_validation_data(); + ExecutiveOnlyOperational::finalize_block(); assert_eq!( @@ -882,7 +884,7 @@ fn block_weight_mode_from_previous_block_is_ignored_in_validate_block() { } #[test] -fn ongoin_mbm_requests_full_core() { +fn ongoing_mbm_requests_full_core() { TestExtBuilder::new() .number_of_cores(2) .first_block_in_core(true) @@ -902,6 +904,8 @@ fn ongoin_mbm_requests_full_core() { ::BlockWeights::get().max_block ); + fake_set_validation_data(); + ExecutiveOnlyOperational::finalize_block(); assert!(has_use_full_core_digest()); @@ -1008,3 +1012,26 @@ fn executive_validate_transaction_respects_dispatch_class_max_block_size() { }); } } + +#[test] +fn on_idle_uses_correct_weight() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + Executive::initialize_block(&Header::new( + 1, + Default::default(), + Default::default(), + Default::default(), + System::digest(), + )); + + fake_set_validation_data(); + + OnIdleMaxLeftWeight::set(Some(MaximumBlockWeight::target_block_weight())); + + Executive::finalize_block(); + }); +} diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 358f6effe6ba3..298bb1b5d4ae4 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -1884,6 +1884,11 @@ impl Pallet { AllExtrinsicsLen::::get().unwrap_or_default() } + /// Returns the current active execution phase. + pub fn execution_phase() -> Option { + ExecutionPhase::::get() + } + /// Inform the system pallet of some additional weight that should be accounted for, in the /// current block. /// From c900555e6030559435f7ad937b551bf2779d9cfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 20 Nov 2025 12:18:40 +0100 Subject: [PATCH 179/312] Test `on_poll` --- .../parachain-system/src/block_weight/mock.rs | 11 ++++++++- .../parachain-system/src/block_weight/mod.rs | 10 ++++---- .../src/block_weight/tests.rs | 23 +++++++++++++++++++ 3 files changed, 39 insertions(+), 5 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index aee3cda9bfebc..ac1a9bcdd4845 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -124,7 +124,9 @@ mod max_block_weight_setup { pub mod test_pallet { use super::*; use frame_support::{ - dispatch::DispatchClass, pallet_prelude::*, weights::constants::WEIGHT_REF_TIME_PER_SECOND, + dispatch::DispatchClass, + pallet_prelude::*, + weights::{constants::WEIGHT_REF_TIME_PER_SECOND, WeightMeter}, }; use frame_system::pallet_prelude::*; @@ -184,6 +186,12 @@ pub mod test_pallet { Weight::zero() } + + fn on_poll(_n: BlockNumberFor, weight: &mut WeightMeter) { + if let Some(max) = OnPollMaxLeftWeight::get() { + assert!(weight.remaining().all_lte(max)); + } + } } #[pallet::validate_unsigned] @@ -245,6 +253,7 @@ construct_runtime!( parameter_types! { pub static MbmOngoing: bool = false; pub static OnIdleMaxLeftWeight: Option = None; + pub static OnPollMaxLeftWeight: Option = None; } pub struct Migrator; diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index ab12ed46c12e4..9fe8502e59e78 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -246,15 +246,17 @@ impl> Get Some(BlockWeightMode::::FractionOfCore { first_transaction_index, .. }) => { let is_phase_finalization = frame_system::Pallet::::execution_phase() .map_or(false, |p| matches!(p, frame_system::Phase::Finalization)); + let inherents_applied = frame_system::Pallet::::inherents_applied(); - if first_transaction_index.is_none() && !is_phase_finalization { - // We are running in the context of inherents or `on_poll`, here we allow the + if first_transaction_index.is_none() && !is_phase_finalization && !inherents_applied + { + // We are running in the context of inherents, here we allow the // full core weight. maybe_full_core_weight } else { // If we are finalizing the block (e.g. `on_idle` is running and - // `finalize_block`) or nothing required more than the target block weight, we - // only allow the target block weight. + // `finalize_block`), running `on_poll` or nothing required more than the target + // block weight, we only allow the target block weight. target_block_weight } }, diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 2ff20fd973739..353dcdf53a338 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -1035,3 +1035,26 @@ fn on_idle_uses_correct_weight() { Executive::finalize_block(); }); } + +#[test] +fn on_poll_uses_correct_weight() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + Executive::initialize_block(&Header::new( + 1, + Default::default(), + Default::default(), + Default::default(), + System::digest(), + )); + + fake_set_validation_data(); + + OnPollMaxLeftWeight::set(Some(MaximumBlockWeight::target_block_weight())); + + Executive::finalize_block(); + }); +} From 9d1159e9132ef05cbb9f5602d421fef018023161 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 20 Nov 2025 12:29:37 +0100 Subject: [PATCH 180/312] More docs --- .../parachain-system/src/block_weight/mod.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index 9fe8502e59e78..39c97cf9975a3 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -45,6 +45,24 @@ //! //! Registering of the `PreInherents` hook: #![doc = docify::embed!("src/block_weight/mock.rs", pre_inherents_setup)] +//! # Weight per context +//! +//! Depending on the context, [`MaxParachainBlockWeight`] may returns a different max weight. The +//! max weight is only allowed to change in the first block of a core. Otherwise, all blocks need to +//! follow the target block weight determined based on the number of cores and the target block +//! rate. In the case of a first block, the following contexts may allow to access the full core +//! weight: +//! +//! - `on_initialize`: All logic that runs in this context up to the execution of `inherents` will +//! get access to the full core weight. +//! - `inherents`: Inherents also have access to the full core weight. +//! - `on_poll`: Only gets access to the target block weight. +//! - `transactions`: May get access to the full core weight, depends if they enable the access to +//! the full core weight based on the logic of [`DynamicMaxBlockWeight`]. +//! - `on_finalize`/`on_idle`: Only gets access to the target block weight. +//! +//! If any context that allows to use the full core weight, pushes the used block weight above the +//! target block weight, all other contexts will get access to the full core weight. use crate::{Config, PreviousCoreCount}; use codec::{Decode, Encode}; From 1cea26874127f20f43ddadf558ea3844846601ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 19 Nov 2025 21:44:08 +0100 Subject: [PATCH 181/312] Fix `on_idle` --- .../parachain-system/src/block_weight/mock.rs | 53 +++++++++++++++++-- .../parachain-system/src/block_weight/mod.rs | 34 ++++++++---- .../src/block_weight/tests.rs | 29 +++++++++- substrate/frame/system/src/lib.rs | 5 ++ 4 files changed, 106 insertions(+), 15 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index 0bb3c5f643d35..aee3cda9bfebc 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -15,7 +15,7 @@ // limitations under the License. use super::{transaction_extension::DynamicMaxBlockWeight, *}; -use crate::{self as parachain_system, PreviousCoreCount}; +use crate::{self as parachain_system, MessagingStateSnapshot, PreviousCoreCount}; use codec::Compact; use cumulus_primitives_core::{ BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, @@ -32,6 +32,7 @@ use frame_support::{ }, }; use frame_system::{limits::BlockWeights, CheckWeight}; +use polkadot_primitives::PersistedValidationData; use sp_core::ConstU32; use sp_io; use sp_runtime::{ @@ -121,6 +122,7 @@ mod max_block_weight_setup { #[frame_support::pallet(dev_mode)] pub mod test_pallet { + use super::*; use frame_support::{ dispatch::DispatchClass, pallet_prelude::*, weights::constants::WEIGHT_REF_TIME_PER_SECOND, }; @@ -173,6 +175,17 @@ pub mod test_pallet { } } + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_idle(_block: BlockNumberFor, limit: Weight) -> Weight { + if let Some(max) = OnIdleMaxLeftWeight::get() { + assert!(limit.all_lte(max)); + } + + Weight::zero() + } + } + #[pallet::validate_unsigned] impl sp_runtime::traits::ValidateUnsigned for Pallet { type Call = Call; @@ -231,6 +244,7 @@ construct_runtime!( parameter_types! { pub static MbmOngoing: bool = false; + pub static OnIdleMaxLeftWeight: Option = None; } pub struct Migrator; @@ -295,8 +309,13 @@ pub use only_operational_runtime::{ }; /// Executive: handles dispatch to the various modules. -pub type Executive = - frame_executive::Executive, Runtime, ()>; +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, +>; /// Executive configured to only accept operational transaction to go over the limit. pub type ExecutiveOnlyOperational = frame_executive::Executive< @@ -304,7 +323,7 @@ pub type ExecutiveOnlyOperational = frame_executive::Executive< BlockOnlyOperational, frame_system::ChainContext, RuntimeOnlyOperational, - (), + only_operational_runtime::AllPalletsWithSystem, >; /// Builder for test externalities @@ -409,3 +428,29 @@ pub fn initialize_block_finished() { ::PreInherents::pre_inherents(); System::note_inherents_applied(); } + +/// Fakes the call to `set_validation_data`. +pub fn fake_set_validation_data() { + crate::ValidationData::::put(PersistedValidationData::default()); + crate::HostConfiguration::::put(cumulus_primitives_core::AbridgedHostConfiguration { + max_code_size: 2 * 1024 * 1024, + max_head_data_size: 1024 * 1024, + max_upward_queue_count: 8, + max_upward_queue_size: 1024, + max_upward_message_size: 256, + max_upward_message_num_per_candidate: 5, + hrmp_max_message_num_per_candidate: 5, + validation_upgrade_cooldown: 6, + validation_upgrade_delay: 6, + async_backing_params: cumulus_primitives_core::relay_chain::AsyncBackingParams { + allowed_ancestry_len: 0, + max_candidate_depth: 0, + }, + }); + crate::RelevantMessagingState::::put(MessagingStateSnapshot { + dmq_mqc_head: Default::default(), + relay_dispatch_queue_remaining_capacity: Default::default(), + ingress_channels: Vec::new(), + egress_channels: Vec::new(), + }); +} diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index 8008b63eb5fa3..ab12ed46c12e4 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -56,7 +56,6 @@ use frame_support::{ }; use frame_system::pallet_prelude::BlockNumberFor; use polkadot_primitives::MAX_POV_SIZE; -use polkadot_runtime_parachains::inclusion::migration::v0::PendingAvailabilityCommitments_Storage_Instance; use scale_info::TypeInfo; use sp_core::Get; use sp_runtime::Digest; @@ -99,6 +98,8 @@ pub enum BlockWeightMode { /// setting, e.g. when running `validate_block`. context: BlockNumberFor, /// The index of the first transaction. + /// + /// Stays `None` for all inherents until there is the first transaction. first_transaction_index: Option, /// The target weight that was used to determine that the extrinsic is above this limit. target_weight: Weight, @@ -112,6 +113,8 @@ pub enum BlockWeightMode { /// setting, e.g. when running `validate_block`. context: BlockNumberFor, /// The index of the first transaction. + /// + /// Stays `None` for all inherents until there is the first transaction. first_transaction_index: Option, }, } @@ -237,15 +240,26 @@ impl> Get BlockWeightMode::::FullCore { .. } | BlockWeightMode::::PotentialFullCore { .. }, ) => FULL_CORE_WEIGHT, - // Let's calculate below how much weight we can use. - Some(BlockWeightMode::::FractionOfCore { first_transaction_index, .. }) - if first_transaction_index.is_some() && !in_pre_validate => - target_block_weight, - // If we are in `on_initialize`, at applying the inherents or before applying the first - // transaction, we allow the maximum block weight as allowed by the current context. - Some(BlockWeightMode::::FractionOfCore { .. }) | None if !in_pre_validate => - maybe_full_core_weight, - _ => target_block_weight, + // We are in `pre_validate`. + _ if in_pre_validate => target_block_weight, + // Only use the fraction of a core. + Some(BlockWeightMode::::FractionOfCore { first_transaction_index, .. }) => { + let is_phase_finalization = frame_system::Pallet::::execution_phase() + .map_or(false, |p| matches!(p, frame_system::Phase::Finalization)); + + if first_transaction_index.is_none() && !is_phase_finalization { + // We are running in the context of inherents or `on_poll`, here we allow the + // full core weight. + maybe_full_core_weight + } else { + // If we are finalizing the block (e.g. `on_idle` is running and + // `finalize_block`) or nothing required more than the target block weight, we + // only allow the target block weight. + target_block_weight + } + }, + // We are in `on_initialize` or in an offchain context. + None => maybe_full_core_weight, } } } diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 1ab5c14b2910d..2ff20fd973739 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -852,6 +852,8 @@ fn block_weight_mode_from_previous_block_is_ignored_in_validate_block() { assert_ok!(ExecutiveOnlyOperational::apply_extrinsic(xt)); + fake_set_validation_data(); + ExecutiveOnlyOperational::finalize_block(); assert_eq!( @@ -882,7 +884,7 @@ fn block_weight_mode_from_previous_block_is_ignored_in_validate_block() { } #[test] -fn ongoin_mbm_requests_full_core() { +fn ongoing_mbm_requests_full_core() { TestExtBuilder::new() .number_of_cores(2) .first_block_in_core(true) @@ -902,6 +904,8 @@ fn ongoin_mbm_requests_full_core() { ::BlockWeights::get().max_block ); + fake_set_validation_data(); + ExecutiveOnlyOperational::finalize_block(); assert!(has_use_full_core_digest()); @@ -1008,3 +1012,26 @@ fn executive_validate_transaction_respects_dispatch_class_max_block_size() { }); } } + +#[test] +fn on_idle_uses_correct_weight() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + Executive::initialize_block(&Header::new( + 1, + Default::default(), + Default::default(), + Default::default(), + System::digest(), + )); + + fake_set_validation_data(); + + OnIdleMaxLeftWeight::set(Some(MaximumBlockWeight::target_block_weight())); + + Executive::finalize_block(); + }); +} diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index c9eb684a1c3b1..a1924219a8aaa 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -1884,6 +1884,11 @@ impl Pallet { AllExtrinsicsLen::::get().unwrap_or_default() } + /// Returns the current active execution phase. + pub fn execution_phase() -> Option { + ExecutionPhase::::get() + } + /// Inform the system pallet of some additional weight that should be accounted for, in the /// current block. /// From 5ed9e2908edef61917ec47dd09ebc77d4b9560ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 20 Nov 2025 12:18:40 +0100 Subject: [PATCH 182/312] Test `on_poll` --- .../parachain-system/src/block_weight/mock.rs | 11 ++++++++- .../parachain-system/src/block_weight/mod.rs | 10 ++++---- .../src/block_weight/tests.rs | 23 +++++++++++++++++++ 3 files changed, 39 insertions(+), 5 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index aee3cda9bfebc..ac1a9bcdd4845 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -124,7 +124,9 @@ mod max_block_weight_setup { pub mod test_pallet { use super::*; use frame_support::{ - dispatch::DispatchClass, pallet_prelude::*, weights::constants::WEIGHT_REF_TIME_PER_SECOND, + dispatch::DispatchClass, + pallet_prelude::*, + weights::{constants::WEIGHT_REF_TIME_PER_SECOND, WeightMeter}, }; use frame_system::pallet_prelude::*; @@ -184,6 +186,12 @@ pub mod test_pallet { Weight::zero() } + + fn on_poll(_n: BlockNumberFor, weight: &mut WeightMeter) { + if let Some(max) = OnPollMaxLeftWeight::get() { + assert!(weight.remaining().all_lte(max)); + } + } } #[pallet::validate_unsigned] @@ -245,6 +253,7 @@ construct_runtime!( parameter_types! { pub static MbmOngoing: bool = false; pub static OnIdleMaxLeftWeight: Option = None; + pub static OnPollMaxLeftWeight: Option = None; } pub struct Migrator; diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index ab12ed46c12e4..9fe8502e59e78 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -246,15 +246,17 @@ impl> Get Some(BlockWeightMode::::FractionOfCore { first_transaction_index, .. }) => { let is_phase_finalization = frame_system::Pallet::::execution_phase() .map_or(false, |p| matches!(p, frame_system::Phase::Finalization)); + let inherents_applied = frame_system::Pallet::::inherents_applied(); - if first_transaction_index.is_none() && !is_phase_finalization { - // We are running in the context of inherents or `on_poll`, here we allow the + if first_transaction_index.is_none() && !is_phase_finalization && !inherents_applied + { + // We are running in the context of inherents, here we allow the // full core weight. maybe_full_core_weight } else { // If we are finalizing the block (e.g. `on_idle` is running and - // `finalize_block`) or nothing required more than the target block weight, we - // only allow the target block weight. + // `finalize_block`), running `on_poll` or nothing required more than the target + // block weight, we only allow the target block weight. target_block_weight } }, diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 2ff20fd973739..353dcdf53a338 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -1035,3 +1035,26 @@ fn on_idle_uses_correct_weight() { Executive::finalize_block(); }); } + +#[test] +fn on_poll_uses_correct_weight() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + Executive::initialize_block(&Header::new( + 1, + Default::default(), + Default::default(), + Default::default(), + System::digest(), + )); + + fake_set_validation_data(); + + OnPollMaxLeftWeight::set(Some(MaximumBlockWeight::target_block_weight())); + + Executive::finalize_block(); + }); +} From 3404f67ccebe536bca738ec52f565271800da66a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 20 Nov 2025 12:29:37 +0100 Subject: [PATCH 183/312] More docs --- .../parachain-system/src/block_weight/mod.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index 9fe8502e59e78..39c97cf9975a3 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -45,6 +45,24 @@ //! //! Registering of the `PreInherents` hook: #![doc = docify::embed!("src/block_weight/mock.rs", pre_inherents_setup)] +//! # Weight per context +//! +//! Depending on the context, [`MaxParachainBlockWeight`] may returns a different max weight. The +//! max weight is only allowed to change in the first block of a core. Otherwise, all blocks need to +//! follow the target block weight determined based on the number of cores and the target block +//! rate. In the case of a first block, the following contexts may allow to access the full core +//! weight: +//! +//! - `on_initialize`: All logic that runs in this context up to the execution of `inherents` will +//! get access to the full core weight. +//! - `inherents`: Inherents also have access to the full core weight. +//! - `on_poll`: Only gets access to the target block weight. +//! - `transactions`: May get access to the full core weight, depends if they enable the access to +//! the full core weight based on the logic of [`DynamicMaxBlockWeight`]. +//! - `on_finalize`/`on_idle`: Only gets access to the target block weight. +//! +//! If any context that allows to use the full core weight, pushes the used block weight above the +//! target block weight, all other contexts will get access to the full core weight. use crate::{Config, PreviousCoreCount}; use codec::{Decode, Encode}; From 0671a7ca735520e21f3d5b63e722256dd0d3095f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 20 Nov 2025 13:55:57 +0100 Subject: [PATCH 184/312] Extend the runtime ugprade test --- .../slot_based/block_builder_task.rs | 13 ++++++- .../zombienet-sdk-helpers/src/lib.rs | 37 ++++++++++++------- .../block_bundling/runtime_upgrade.rs | 7 +++- .../elastic_scaling/upgrade_to_3_cores.rs | 4 +- .../tests/zombie_ci/runtime_upgrade.rs | 11 ++---- 5 files changed, 46 insertions(+), 26 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 3eeb45c2f9180..3775fb7a9610b 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -621,12 +621,21 @@ where blocks.push(built_block.block); proofs.push(built_block.proof); - if CumulusDigestItem::contains_use_full_core(parent_header.digest()) { + let full_core_digest = CumulusDigestItem::contains_use_full_core(parent_header.digest()); + let runtime_upgrade_digest = parent_header + .digest() + .logs + .iter() + .any(|it| matches!(it, sp_runtime::DigestItem::RuntimeEnvironmentUpdated)); + + if full_core_digest || runtime_upgrade_digest { tracing::trace!( target: crate::LOG_TARGET, block_hash = ?parent_hash, time_used_by_block_in_secs = %block_start.elapsed().as_secs_f32(), - "Found `UseFullCore` digest, stopping block production for core", + %full_core_digest, + %runtime_upgrade_digest, + "Stopping block production for core", ); break } diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index c665217c9a9cf..eb4089b7120e4 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -16,7 +16,7 @@ use zombienet_sdk::subxt::{ self, backend::legacy::LegacyRpcMethods, blocks::Block, - config::{polkadot::PolkadotExtrinsicParamsBuilder, Header}, + config::{polkadot::PolkadotExtrinsicParamsBuilder, substrate::DigestItem, Header}, dynamic::Value, events::Events, ext::scale_value::value, @@ -896,19 +896,30 @@ pub async fn assign_cores( Ok(()) } -pub async fn wait_for_upgrade( - client: OnlineClient, - expected_version: u32, -) -> Result<(), anyhow::Error> { - let updater = client.updater(); - let mut update_stream = updater.runtime_updates().await?; +/// Wait until a runtime upgrade has happened. +/// +/// This checks all finalized blocks until it finds a block that sets the +/// `RuntimeEnvironmentUpdated` digest. +/// +/// Returns the hash of the block at which the runtime upgrade was applied. +pub async fn wait_for_runtime_upgrade( + client: &OnlineClient, +) -> Result { + let mut finalized_blocks = client.blocks().subscribe_finalized().await?; - while let Some(Ok(update)) = update_stream.next().await { - let version = update.runtime_version().spec_version; - log::info!("Update runtime spec version {version}"); - if version == expected_version { - break; + while let Some(Ok(block)) = finalized_blocks.next().await { + if block + .header() + .digest + .logs + .iter() + .any(|d| matches!(d, DigestItem::RuntimeEnvironmentUpdated)) + { + log::info!("Runtime upgraded in block {:?}", block.hash()); + + return Ok(block.hash()) } } - Ok(()) + + Err(anyhow!("Did not find a runtime upgrade")) } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs index 165d6b668bf51..60f1e5d97b012 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs @@ -21,7 +21,8 @@ use cumulus_test_runtime::wasm_spec_version_incremented::WASM_BINARY_BLOATY as W use cumulus_zombienet_sdk_helpers::{ assert_finality_lag, assert_para_throughput, assign_cores, ensure_is_only_block_in_core, find_core_info, submit_extrinsic_and_wait_for_finalization_success, - submit_unsigned_extrinsic_and_wait_for_finalization_success, BlockToCheck, + submit_unsigned_extrinsic_and_wait_for_finalization_success, wait_for_runtime_upgrade, + BlockToCheck, }; use polkadot_primitives::Id as ParaId; use serde_json::json; @@ -117,7 +118,9 @@ async fn block_bundling_runtime_upgrade() -> Result<(), anyhow::Error> { ensure_is_only_block_in_core(¶_client, BlockToCheck::Exact(block_hash)).await?; - //TODO: Verify that the runtime upgrade block is also using a full core. + let upgrade_block = wait_for_runtime_upgrade(¶_client).await?; + + ensure_is_only_block_in_core(¶_client, BlockToCheck::Exact(upgrade_block)).await?; Ok(()) } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs index 8aa1a338ae6fd..14e64d3887677 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs @@ -8,7 +8,7 @@ use std::time::Duration; use crate::utils::initialize_network; use cumulus_zombienet_sdk_helpers::{ - assert_para_throughput, assign_cores, runtime_upgrade, wait_for_upgrade, + assert_para_throughput, assign_cores, runtime_upgrade, wait_for_runtime_upgrade, }; use polkadot_primitives::Id as ParaId; use rstest::rstest; @@ -80,7 +80,7 @@ async fn elastic_scaling_upgrade_to_3_cores( ); tokio::time::timeout( Duration::from_secs(timeout_secs), - wait_for_upgrade(collator1_client, expected_spec_version), + wait_for_runtime_upgrade(&collator1_client), ) .await .expect("Timeout waiting for runtime upgrade")?; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs index e5056fea9d25f..1876396d10120 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs @@ -6,7 +6,7 @@ use std::time::Duration; use crate::utils::initialize_network; -use cumulus_zombienet_sdk_helpers::{assert_para_throughput, wait_for_upgrade}; +use cumulus_zombienet_sdk_helpers::{assert_para_throughput, wait_for_runtime_upgrade}; use polkadot_primitives::Id as ParaId; use zombienet_configuration::types::AssetLocation; use zombienet_sdk::{ @@ -63,12 +63,9 @@ async fn runtime_upgrade() -> Result<(), anyhow::Error> { "Waiting (up to {timeout_secs}s) for parachain runtime upgrade to version {}", expected_spec_version ); - tokio::time::timeout( - Duration::from_secs(timeout_secs), - wait_for_upgrade(dave_client, expected_spec_version), - ) - .await - .expect("Timeout waiting for runtime upgrade")?; + tokio::time::timeout(Duration::from_secs(timeout_secs), wait_for_runtime_upgrade(&dave_client)) + .await + .expect("Timeout waiting for runtime upgrade")?; let spec_version_from_charlie = charlie_client.backend().current_runtime_version().await?.spec_version; From 2cd991c703b371af24ae0a26cb1502c4f819daf8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 21 Nov 2025 17:21:18 +0100 Subject: [PATCH 185/312] Extend the basic test to verify that restarting a node works --- Cargo.lock | 1 + cumulus/test/runtime/Cargo.toml | 1 + cumulus/test/runtime/src/lib.rs | 1 - .../tests/zombie_ci/block_bundling/basic.rs | 66 +++++++++++++++---- .../block_bundling/runtime_upgrade.rs | 16 ++--- 5 files changed, 58 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 921a792d1a080..5953e5bd6b4d3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5176,6 +5176,7 @@ dependencies = [ "cumulus-pallet-weight-reclaim", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "frame-executive", "frame-support", "frame-system", diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index 1c796eebaa766..90c10c0736a6d 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -47,6 +47,7 @@ sp-version = { workspace = true } cumulus-pallet-aura-ext = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-weight-reclaim = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } parachain-info = { workspace = true } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 2897ac34380ed..a7919979d5728 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -67,7 +67,6 @@ pub mod test_pallet; extern crate alloc; use alloc::{vec, vec::Vec}; -use core::time::Duration; use frame_support::{derive_impl, traits::OnRuntimeUpgrade, PalletId}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs index 380a32acb3fb4..7b9829c5203b6 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs @@ -15,23 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -use anyhow::anyhow; - use crate::utils::initialize_network; - -use cumulus_zombienet_sdk_helpers::{ - assert_finality_lag, assert_para_throughput, assign_cores, create_assign_core_call, - submit_extrinsic_and_wait_for_finalization_success_with_timeout, -}; +use anyhow::anyhow; +use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_para_throughput, assign_cores}; use polkadot_primitives::Id as ParaId; use serde_json::json; +use tokio::{join, spawn, task::JoinHandle}; use zombienet_sdk::{ - subxt::{ - backend::{legacy::LegacyRpcMethods, rpc::RpcClient}, - OnlineClient, PolkadotConfig, - }, - subxt_signer::sr25519::dev, - NetworkConfig, NetworkConfigBuilder, + subxt::{OnlineClient, PolkadotConfig}, + NetworkConfig, NetworkConfigBuilder, NetworkNode, }; const PARA_ID: u32 = 2400; @@ -50,9 +42,11 @@ async fn block_bundling_basic() -> Result<(), anyhow::Error> { log::info!("Spawning network"); let config = build_network_config().await?; let network = initialize_network(config).await?; - let relay_node = network.get_node("validator-0")?; let para_node = network.get_node("collator-1")?; + let para_full_node = network.get_node("para-full-node")?; + + let handle = wait_for_block_and_restart_node(para_full_node.clone()); let para_client = para_node.wait_client().await?; let relay_client: OnlineClient = relay_node.wait_client().await?; @@ -88,10 +82,53 @@ async fn block_bundling_basic() -> Result<(), anyhow::Error> { .await?; assert_finality_lag(¶_client, 72).await?; + + // Ensure we restarted the node successfully + handle.await??; + + let para_full_client: OnlineClient = para_full_node.wait_client().await?; + let mut full_best_blocks = para_full_client.blocks().subscribe_best().await?; + let mut collator_best_blocks = para_client.blocks().subscribe_best().await?; + + let (Some(full_best), Some(best)) = join!(full_best_blocks.next(), collator_best_blocks.next()) + else { + return Err(anyhow!("Failed to get a best block from the full node and the collator")) + }; + + let diff = full_best?.number().abs_diff(best?.number()); + if diff > 12 { + return Err(anyhow!( + "Best block difference between full node and collator of {diff} is too big!" + )) + } + log::info!("Test finished successfully"); + Ok(()) } +/// Wait for block `13` and then restart the node. +/// +/// We take block `13`, because it should be near the beginning of a block bundle and we want to +/// test stopping the node while importing blocks in the middle of a bundle. +fn wait_for_block_and_restart_node(node: NetworkNode) -> JoinHandle> { + spawn(async move { + let para_client: OnlineClient = node.wait_client().await?; + let mut best_blocks = para_client.blocks().subscribe_best().await?; + + loop { + let Some(block) = best_blocks.next().await.transpose()? else { + return Err(anyhow!("Node stopped before reaching the block to restart")) + }; + + if block.number() >= 13 { + log::info!("Full node has imported block `13`, going to restart it"); + return node.restart(None).await + } + } + }) +} + async fn build_network_config() -> Result { // images are not relevant for `native`, but we leave it here in case we use `k8s` some day let images = zombienet_sdk::environment::get_images_from_env(); @@ -137,6 +174,7 @@ async fn build_network_config() -> Result { .with_collator(|n| n.with_name("collator-0")) .with_collator(|n| n.with_name("collator-1")) .with_collator(|n| n.with_name("collator-2")) + .with_collator(|n| n.with_name("para-full-node").validator(false)) }) .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { Ok(val) => global_settings.with_base_dir(val), diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs index 60f1e5d97b012..a9de832066bf6 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs @@ -19,15 +19,12 @@ use anyhow::anyhow; use cumulus_primitives_core::relay_chain::MAX_POV_SIZE; use cumulus_test_runtime::wasm_spec_version_incremented::WASM_BINARY_BLOATY as WASM_RUNTIME_UPGRADE; use cumulus_zombienet_sdk_helpers::{ - assert_finality_lag, assert_para_throughput, assign_cores, ensure_is_only_block_in_core, - find_core_info, submit_extrinsic_and_wait_for_finalization_success, + assign_cores, ensure_is_only_block_in_core, submit_extrinsic_and_wait_for_finalization_success, submit_unsigned_extrinsic_and_wait_for_finalization_success, wait_for_runtime_upgrade, BlockToCheck, }; -use polkadot_primitives::Id as ParaId; use serde_json::json; use sp_core::blake2_256; -use std::sync::Arc; use zombienet_sdk::{ subxt::{ ext::scale_value::{value, Value}, @@ -81,11 +78,10 @@ async fn block_bundling_runtime_upgrade() -> Result<(), anyhow::Error> { let para_node = network.get_node("collator-1")?; let para_client: OnlineClient = para_node.wait_client().await?; - let relay_client: OnlineClient = relay_node.wait_client().await?; let alice = dev::alice(); // Assign cores 0 and 1 to start with 3 cores total (core 2 is assigned by Zombienet) - assign_cores(&relay_node, PARA_ID, vec![0, 1]).await; + assign_cores(&relay_node, PARA_ID, vec![0, 1]).await?; log::info!("3 cores total assigned to the parachain"); @@ -95,12 +91,8 @@ async fn block_bundling_runtime_upgrade() -> Result<(), anyhow::Error> { let sudo_authorize_call = create_sudo_call(authorize_call); log::info!("Sending authorize_upgrade transaction"); - let block_hash = submit_extrinsic_and_wait_for_finalization_success( - ¶_client, - &sudo_authorize_call, - &alice, - ) - .await?; + submit_extrinsic_and_wait_for_finalization_success(¶_client, &sudo_authorize_call, &alice) + .await?; log::info!("Authorize upgrade transaction finalized"); // Step 2: Apply the authorized upgrade with the actual runtime code From e0f55cf66ee4989df72c7c07f96a67522ca9a156 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 21 Nov 2025 22:13:50 +0100 Subject: [PATCH 186/312] `ExecuteBlock` split up seal verification and actual execution `ExecuteBlock` exposes the `execute_block` function that is used by `validate_block` to execute a block. In case auf AuRa the block execution includes the verification of the seal and the removal of the seal. To verify the seal, the block executor needs to load the current authority set. The problem is that when we have storage proof reclaim enabled and the host function is used in `on_initialize` before `pallet_aura_ext::on_initialize` (this is where we fetch the authority set to ensure it appears in the proof) is called, it leads to `validate_block` returning a different size and thus, breaking the block. To solve this issue `ExecuteBlock` is now split into seal verification and execution of the verified block. In `validate_block` the seal verification is then run outside of the block execution, not leading to the issues of reporting different proof sizes. --- Cargo.lock | 5 + cumulus/pallets/aura-ext/Cargo.toml | 5 + cumulus/pallets/aura-ext/src/lib.rs | 5 +- cumulus/pallets/aura-ext/src/test.rs | 633 +++++++++++------- .../src/validate_block/implementation.rs | 13 +- .../src/validate_block/tests.rs | 36 +- substrate/frame/executive/src/lib.rs | 6 +- substrate/frame/support/src/traits/misc.rs | 26 +- .../primitives/state-machine/src/testing.rs | 22 +- 9 files changed, 487 insertions(+), 264 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d42e29dd885d0..e68d47656af96 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4692,9 +4692,11 @@ version = "0.7.0" dependencies = [ "cumulus-pallet-parachain-system", "cumulus-primitives-core", + "cumulus-primitives-proof-size-hostfunction", "cumulus-test-relay-sproof-builder", "frame-support", "frame-system", + "log", "pallet-aura", "pallet-timestamp", "parity-scale-codec", @@ -4704,7 +4706,10 @@ dependencies = [ "sp-consensus-aura", "sp-core 28.0.0", "sp-io", + "sp-keyring", "sp-runtime", + "sp-state-machine", + "sp-trie", "sp-version", ] diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index ddad89066ac2e..93746087192c5 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -29,15 +29,20 @@ cumulus-pallet-parachain-system = { workspace = true } [dev-dependencies] rstest = { workspace = true } +log = { workspace = true, default-features = true } # Cumulus cumulus-pallet-parachain-system = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } # Substrate sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } [features] diff --git a/cumulus/pallets/aura-ext/src/lib.rs b/cumulus/pallets/aura-ext/src/lib.rs index a9b23bf9eb5a3..8654cc1004f97 100644 --- a/cumulus/pallets/aura-ext/src/lib.rs +++ b/cumulus/pallets/aura-ext/src/lib.rs @@ -40,6 +40,7 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT, LazyBlock}; pub mod consensus_hook; pub mod migration; +#[cfg(test)] mod test; pub use consensus_hook::FixedVelocityConsensusHook; @@ -124,7 +125,7 @@ where T: Config, I: ExecuteBlock, { - fn execute_block(mut block: Block::LazyBlock) { + fn verify_and_remove_seal(block: &mut ::LazyBlock) { let header = block.header_mut(); // We need to fetch the authorities before we execute the block, to get the authorities // before any potential update. @@ -162,7 +163,9 @@ where { panic!("Invalid AuRa seal"); } + } + fn execute_verified_block(block: Block::LazyBlock) { I::execute_block(block); } } diff --git a/cumulus/pallets/aura-ext/src/test.rs b/cumulus/pallets/aura-ext/src/test.rs index ca5ab8bd8c60e..407a3b5c33a3b 100644 --- a/cumulus/pallets/aura-ext/src/test.rs +++ b/cumulus/pallets/aura-ext/src/test.rs @@ -14,27 +14,62 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![cfg(test)] -extern crate alloc; - use super::*; - use core::num::NonZeroU32; use cumulus_pallet_parachain_system::{ - consensus_hook::ExpectParentIncluded, AnyRelayNumber, ParachainSetCode, + consensus_hook::ExpectParentIncluded, Ancestor, AnyRelayNumber, ConsensusHook, + ParachainSetCode, RelayChainStateProof, UsedBandwidth, }; use cumulus_primitives_core::ParaId; use frame_support::{ derive_impl, pallet_prelude::ConstU32, parameter_types, - traits::{ConstBool, EnqueueWithOrigin}, + traits::{ConstBool, EnqueueWithOrigin, ExecuteBlock, Hooks}, + BoundedVec, }; -use sp_core::Get; +use rstest::rstest; +use sp_consensus_aura::{sr25519::AuthorityId, Slot}; +use sp_core::{Blake2Hasher, Get, H256}; use sp_io::TestExternalities; +use sp_keyring::Sr25519Keyring::*; +use sp_runtime::{generic::Digest, traits::Block as BlockT}; +use sp_trie::{proof_size_extension::ProofSizeExt, recorder::Recorder}; use sp_version::RuntimeVersion; use std::cell::RefCell; +// Test pallet that reads storage and calls storage_proof_size +#[frame_support::pallet] +pub mod test_pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::storage] + pub type TestStorage = StorageValue<_, u64, ValueQuery>; + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(_n: BlockNumberFor) -> Weight { + let proof_size = dbg!( + cumulus_primitives_proof_size_hostfunction::storage_proof_size::storage_proof_size( + ) + ); + + // We need to commit the `proof_size` to ensure that the test is failing if we are + // receiving a different proof size later on. + TestStorage::::put(proof_size); + + Weight::zero() + } + } +} + type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( @@ -43,6 +78,7 @@ frame_support::construct_runtime!( ParachainSystem: cumulus_pallet_parachain_system, Aura: pallet_aura, AuraExt: crate, + TestPallet: test_pallet, } ); @@ -69,6 +105,8 @@ impl frame_system::Config for Test { impl crate::Config for Test {} +impl test_pallet::Config for Test {} + std::thread_local! { pub static PARA_SLOT_DURATION: RefCell = RefCell::new(6000); } @@ -117,290 +155,371 @@ impl cumulus_pallet_parachain_system::Config for Test { type RelayParentOffset = ConstU32<0>; } -#[cfg(test)] -mod test { - use crate::test::*; - use cumulus_pallet_parachain_system::{ - Ancestor, ConsensusHook, RelayChainStateProof, UsedBandwidth, - }; - use rstest::rstest; - use sp_core::H256; - - fn set_ancestors() { - let mut ancestors = Vec::new(); - for i in 0..3 { - let mut ancestor = Ancestor::new_unchecked(UsedBandwidth::default(), None); - ancestor.replace_para_head_hash(H256::repeat_byte(i + 1)); - ancestors.push(ancestor); - } - cumulus_pallet_parachain_system::UnincludedSegment::::put(ancestors); - } - - pub fn new_test_ext(para_slot: u64) -> sp_io::TestExternalities { - let mut ext = TestExternalities::new_empty(); - ext.execute_with(|| { - set_ancestors(); - // Set initial parachain slot - pallet_aura::CurrentSlot::::put(Slot::from(para_slot)); - }); - ext +fn set_ancestors() { + let mut ancestors = Vec::new(); + for i in 0..3 { + let mut ancestor = Ancestor::new_unchecked(UsedBandwidth::default(), None); + ancestor.replace_para_head_hash(H256::repeat_byte(i + 1)); + ancestors.push(ancestor); } + cumulus_pallet_parachain_system::UnincludedSegment::::put(ancestors); +} - fn set_relay_slot(slot: u64, authored: u32) { - RelaySlotInfo::::put((Slot::from(slot), authored)) - } +fn new_test_ext(para_slot: u64) -> sp_io::TestExternalities { + let mut ext = TestExternalities::new_empty(); + ext.execute_with(|| { + set_ancestors(); + // Set initial parachain slot + pallet_aura::CurrentSlot::::put(Slot::from(para_slot)); + }); + ext +} - fn relay_chain_state_proof(relay_slot: u64) -> RelayChainStateProof { - let mut builder = cumulus_test_relay_sproof_builder::RelayStateSproofBuilder::default(); - builder.current_slot = relay_slot.into(); +fn set_relay_slot(slot: u64, authored: u32) { + RelaySlotInfo::::put((Slot::from(slot), authored)) +} - let (hash, state_proof) = builder.into_state_root_and_proof(); +fn relay_chain_state_proof(relay_slot: u64) -> RelayChainStateProof { + let mut builder = cumulus_test_relay_sproof_builder::RelayStateSproofBuilder::default(); + builder.current_slot = relay_slot.into(); - RelayChainStateProof::new(ParaId::from(200), hash, state_proof) - .expect("Should be able to construct state proof.") - } + let (hash, state_proof) = builder.into_state_root_and_proof(); - fn assert_slot_info(expected_slot: u64, expected_authored: u32) { - let (slot, authored) = pallet::RelaySlotInfo::::get().unwrap(); - assert_eq!(slot, Slot::from(expected_slot), "Slot stored in RelaySlotInfo is incorrect."); - assert_eq!( - authored, expected_authored, - "Number of authored blocks stored in RelaySlotInfo is incorrect." - ); - } + RelayChainStateProof::new(ParaId::from(200), hash, state_proof) + .expect("Should be able to construct state proof.") +} - const DEFAULT_TEST_VELOCITY: u32 = 2; +fn assert_slot_info(expected_slot: u64, expected_authored: u32) { + let (slot, authored) = pallet::RelaySlotInfo::::get().unwrap(); + assert_eq!(slot, Slot::from(expected_slot), "Slot stored in RelaySlotInfo is incorrect."); + assert_eq!( + authored, expected_authored, + "Number of authored blocks stored in RelaySlotInfo is incorrect." + ); +} - #[test] - fn test_velocity() { - type Hook = FixedVelocityConsensusHook; +const DEFAULT_TEST_VELOCITY: u32 = 2; - new_test_ext(10).execute_with(|| { - let state_proof = relay_chain_state_proof(10); - let (_, capacity) = Hook::on_state_proof(&state_proof); - assert_eq!(capacity, NonZeroU32::new(1).unwrap().into()); - assert_slot_info(10, 1); +#[test] +fn test_velocity() { + type Hook = FixedVelocityConsensusHook; - let (_, capacity) = Hook::on_state_proof(&state_proof); - assert_eq!(capacity, NonZeroU32::new(1).unwrap().into()); - assert_slot_info(10, 2); - }); - } + new_test_ext(10).execute_with(|| { + let state_proof = relay_chain_state_proof(10); + let (_, capacity) = Hook::on_state_proof(&state_proof); + assert_eq!(capacity, NonZeroU32::new(1).unwrap().into()); + assert_slot_info(10, 1); - #[test] - fn test_velocity_2() { - type Hook = FixedVelocityConsensusHook; + let (_, capacity) = Hook::on_state_proof(&state_proof); + assert_eq!(capacity, NonZeroU32::new(1).unwrap().into()); + assert_slot_info(10, 2); + }); +} - new_test_ext(10).execute_with(|| { - let state_proof = relay_chain_state_proof(10); - let (_, capacity) = Hook::on_state_proof(&state_proof); - assert_eq!(capacity, NonZeroU32::new(3).unwrap().into()); - assert_slot_info(10, 1); +#[test] +fn test_velocity_2() { + type Hook = FixedVelocityConsensusHook; - let (_, capacity) = Hook::on_state_proof(&state_proof); - assert_eq!(capacity, NonZeroU32::new(3).unwrap().into()); - assert_slot_info(10, 2); - }); - } + new_test_ext(10).execute_with(|| { + let state_proof = relay_chain_state_proof(10); + let (_, capacity) = Hook::on_state_proof(&state_proof); + assert_eq!(capacity, NonZeroU32::new(3).unwrap().into()); + assert_slot_info(10, 1); - #[test] - #[should_panic(expected = "authored blocks limit is reached for the slot")] - fn test_exceeding_velocity_limit() { - type Hook = FixedVelocityConsensusHook; - - new_test_ext(10).execute_with(|| { - let state_proof = relay_chain_state_proof(10); - for authored in 0..=DEFAULT_TEST_VELOCITY + 1 { - Hook::on_state_proof(&state_proof); - assert_slot_info(10, authored + 1); - } - }); - } + let (_, capacity) = Hook::on_state_proof(&state_proof); + assert_eq!(capacity, NonZeroU32::new(3).unwrap().into()); + assert_slot_info(10, 2); + }); +} - #[test] - fn test_para_slot_calculated_from_slot_duration() { - type Hook = FixedVelocityConsensusHook; +#[test] +#[should_panic(expected = "authored blocks limit is reached for the slot")] +fn test_exceeding_velocity_limit() { + type Hook = FixedVelocityConsensusHook; - new_test_ext(5).execute_with(|| { - let state_proof = relay_chain_state_proof(10); + new_test_ext(10).execute_with(|| { + let state_proof = relay_chain_state_proof(10); + for authored in 0..=DEFAULT_TEST_VELOCITY + 1 { Hook::on_state_proof(&state_proof); - }); - } + assert_slot_info(10, authored + 1); + } + }); +} - #[rstest] - #[case::short_para_slot_okay(2000, 30, 10)] - #[case::normal_para_slot_okay(6000, 10, 10)] - // Test boundaries for long parachain slots. - #[case::long_para_slot_okay(24000, 1, 7)] - #[should_panic( - expected = "must match relay-derived slot: parachain_slot=Slot(2), derived_from_relay_slot=Slot(1)" - )] - #[case::long_para_slot_mismatch(24000, 2, 7)] - #[case::long_para_slot_okay(24000, 2, 8)] - #[case::long_para_slot_okay(24000, 2, 9)] - #[case::long_para_slot_okay(24000, 2, 10)] - #[case::long_para_slot_okay(24000, 2, 11)] - #[should_panic( - expected = "must match relay-derived slot: parachain_slot=Slot(2), derived_from_relay_slot=Slot(3)" - )] - #[case::long_para_slot_mismatch(24000, 2, 12)] - #[case::long_para_slot_okay(24000, 3, 12)] - #[case::short_para_slot(2000, 30, 10)] - #[should_panic( - expected = "must match relay-derived slot: parachain_slot=Slot(31), derived_from_relay_slot=Slot(30)" - )] - #[case::short_para_slot_mismatch(2000, 31, 10)] - #[should_panic( - expected = "must match relay-derived slot: parachain_slot=Slot(32), derived_from_relay_slot=Slot(30)" - )] - #[case::short_para_slot_mismatch(2000, 32, 10)] - #[should_panic( - expected = "must match relay-derived slot: parachain_slot=Slot(29), derived_from_relay_slot=Slot(30)" - )] - #[case::short_para_slot_mismatch(2000, 29, 10)] - #[should_panic( - expected = "must match relay-derived slot: parachain_slot=Slot(1), derived_from_relay_slot=Slot(30)" - )] - #[case::short_para_slot_mismatch(2000, 1, 10)] - #[should_panic( - expected = "must match relay-derived slot: parachain_slot=Slot(1), derived_from_relay_slot=Slot(10)" - )] - #[case::normal_para_slot_mismatch(6000, 1, 10)] - #[should_panic( - expected = "must match relay-derived slot: parachain_slot=Slot(9), derived_from_relay_slot=Slot(10)" - )] - #[case::normal_para_slot_mismatch(6000, 9, 10)] - #[should_panic( - expected = "must match relay-derived slot: parachain_slot=Slot(11), derived_from_relay_slot=Slot(10)" - )] - #[case::normal_para_slot_mismatch(6000, 11, 10)] - fn test_para_slot_too_high( - #[case] para_slot_duration: u64, - #[case] para_slot: u64, - #[case] relay_slot: u64, - ) { - type Hook = FixedVelocityConsensusHook; - - TestSlotDuration::set_slot_duration(para_slot_duration); - new_test_ext(para_slot).execute_with(|| { - let state_proof = relay_chain_state_proof(relay_slot); - Hook::on_state_proof(&state_proof); - }); - } +#[test] +fn test_para_slot_calculated_from_slot_duration() { + type Hook = FixedVelocityConsensusHook; - #[test] - fn test_velocity_at_least_one() { - // Even though this is 0, one block should always be allowed. - const VELOCITY: u32 = 0; - type Hook = FixedVelocityConsensusHook; + new_test_ext(5).execute_with(|| { + let state_proof = relay_chain_state_proof(10); + Hook::on_state_proof(&state_proof); + }); +} - new_test_ext(10).execute_with(|| { - let state_proof = relay_chain_state_proof(10); - Hook::on_state_proof(&state_proof); - }); - } +#[rstest] +#[case::short_para_slot_okay(2000, 30, 10)] +#[case::normal_para_slot_okay(6000, 10, 10)] +// Test boundaries for long parachain slots. +#[case::long_para_slot_okay(24000, 1, 7)] +#[should_panic( + expected = "must match relay-derived slot: parachain_slot=Slot(2), derived_from_relay_slot=Slot(1)" +)] +#[case::long_para_slot_mismatch(24000, 2, 7)] +#[case::long_para_slot_okay(24000, 2, 8)] +#[case::long_para_slot_okay(24000, 2, 9)] +#[case::long_para_slot_okay(24000, 2, 10)] +#[case::long_para_slot_okay(24000, 2, 11)] +#[should_panic( + expected = "must match relay-derived slot: parachain_slot=Slot(2), derived_from_relay_slot=Slot(3)" +)] +#[case::long_para_slot_mismatch(24000, 2, 12)] +#[case::long_para_slot_okay(24000, 3, 12)] +#[case::short_para_slot(2000, 30, 10)] +#[should_panic( + expected = "must match relay-derived slot: parachain_slot=Slot(31), derived_from_relay_slot=Slot(30)" +)] +#[case::short_para_slot_mismatch(2000, 31, 10)] +#[should_panic( + expected = "must match relay-derived slot: parachain_slot=Slot(32), derived_from_relay_slot=Slot(30)" +)] +#[case::short_para_slot_mismatch(2000, 32, 10)] +#[should_panic( + expected = "must match relay-derived slot: parachain_slot=Slot(29), derived_from_relay_slot=Slot(30)" +)] +#[case::short_para_slot_mismatch(2000, 29, 10)] +#[should_panic( + expected = "must match relay-derived slot: parachain_slot=Slot(1), derived_from_relay_slot=Slot(30)" +)] +#[case::short_para_slot_mismatch(2000, 1, 10)] +#[should_panic( + expected = "must match relay-derived slot: parachain_slot=Slot(1), derived_from_relay_slot=Slot(10)" +)] +#[case::normal_para_slot_mismatch(6000, 1, 10)] +#[should_panic( + expected = "must match relay-derived slot: parachain_slot=Slot(9), derived_from_relay_slot=Slot(10)" +)] +#[case::normal_para_slot_mismatch(6000, 9, 10)] +#[should_panic( + expected = "must match relay-derived slot: parachain_slot=Slot(11), derived_from_relay_slot=Slot(10)" +)] +#[case::normal_para_slot_mismatch(6000, 11, 10)] +fn test_para_slot_too_high( + #[case] para_slot_duration: u64, + #[case] para_slot: u64, + #[case] relay_slot: u64, +) { + type Hook = FixedVelocityConsensusHook; + + TestSlotDuration::set_slot_duration(para_slot_duration); + new_test_ext(para_slot).execute_with(|| { + let state_proof = relay_chain_state_proof(relay_slot); + Hook::on_state_proof(&state_proof); + }); +} - #[test] - #[should_panic( - expected = "Parachain slot must match relay-derived slot: parachain_slot=Slot(8), derived_from_relay_slot=Slot(5) velocity=2" - )] - fn test_para_slot_calculated_from_slot_duration_2() { - // Note: In contrast to tests below, relay chain slot duration is 3000 here. - type Hook = FixedVelocityConsensusHook; - - new_test_ext(8).execute_with(|| { - let state_proof = relay_chain_state_proof(10); - let (_, _) = Hook::on_state_proof(&state_proof); - }); - } +#[test] +fn test_velocity_at_least_one() { + // Even though this is 0, one block should always be allowed. + const VELOCITY: u32 = 0; + type Hook = FixedVelocityConsensusHook; - #[test] - fn test_velocity_resets_on_new_relay_slot() { - type Hook = FixedVelocityConsensusHook; - - new_test_ext(10).execute_with(|| { - let state_proof = relay_chain_state_proof(10); - for authored in 0..=DEFAULT_TEST_VELOCITY { - Hook::on_state_proof(&state_proof); - assert_slot_info(10, authored + 1); - } - - // Change parachain slot to match the new relay slot - pallet_aura::CurrentSlot::::put(Slot::from(11)); - let state_proof = relay_chain_state_proof(11); - for authored in 0..=DEFAULT_TEST_VELOCITY { - Hook::on_state_proof(&state_proof); - assert_slot_info(11, authored + 1); - } - }); - } + new_test_ext(10).execute_with(|| { + let state_proof = relay_chain_state_proof(10); + Hook::on_state_proof(&state_proof); + }); +} + +#[test] +#[should_panic( + expected = "Parachain slot must match relay-derived slot: parachain_slot=Slot(8), derived_from_relay_slot=Slot(5) velocity=2" +)] +fn test_para_slot_calculated_from_slot_duration_2() { + // Note: In contrast to tests below, relay chain slot duration is 3000 here. + type Hook = FixedVelocityConsensusHook; + + new_test_ext(8).execute_with(|| { + let state_proof = relay_chain_state_proof(10); + let (_, _) = Hook::on_state_proof(&state_proof); + }); +} - #[test] - #[should_panic( - expected = "Slot moved backwards: stored_slot=Slot(10), relay_chain_slot=Slot(9)" - )] - fn test_backward_relay_slot_not_tolerated() { - type Hook = FixedVelocityConsensusHook; +#[test] +fn test_velocity_resets_on_new_relay_slot() { + type Hook = FixedVelocityConsensusHook; - new_test_ext(10).execute_with(|| { - let state_proof = relay_chain_state_proof(10); + new_test_ext(10).execute_with(|| { + let state_proof = relay_chain_state_proof(10); + for authored in 0..=DEFAULT_TEST_VELOCITY { Hook::on_state_proof(&state_proof); - assert_slot_info(10, 1); + assert_slot_info(10, authored + 1); + } - // Change parachain slot to match what would be derived from relay slot 9 - pallet_aura::CurrentSlot::::put(Slot::from(9)); - let state_proof = relay_chain_state_proof(9); + // Change parachain slot to match the new relay slot + pallet_aura::CurrentSlot::::put(Slot::from(11)); + let state_proof = relay_chain_state_proof(11); + for authored in 0..=DEFAULT_TEST_VELOCITY { Hook::on_state_proof(&state_proof); - }); - } + assert_slot_info(11, authored + 1); + } + }); +} - #[test] - fn test_can_build_upon_true_when_empty() { - type Hook = FixedVelocityConsensusHook; +#[test] +#[should_panic(expected = "Slot moved backwards: stored_slot=Slot(10), relay_chain_slot=Slot(9)")] +fn test_backward_relay_slot_not_tolerated() { + type Hook = FixedVelocityConsensusHook; + + new_test_ext(10).execute_with(|| { + let state_proof = relay_chain_state_proof(10); + Hook::on_state_proof(&state_proof); + assert_slot_info(10, 1); + + // Change parachain slot to match what would be derived from relay slot 9 + pallet_aura::CurrentSlot::::put(Slot::from(9)); + let state_proof = relay_chain_state_proof(9); + Hook::on_state_proof(&state_proof); + }); +} - new_test_ext(1).execute_with(|| { - let hash = H256::repeat_byte(0x1); - assert!(Hook::can_build_upon(hash, Slot::from(1))); - }); - } +#[test] +fn test_can_build_upon_true_when_empty() { + type Hook = FixedVelocityConsensusHook; + + new_test_ext(1).execute_with(|| { + let hash = H256::repeat_byte(0x1); + assert!(Hook::can_build_upon(hash, Slot::from(1))); + }); +} + +#[rstest] +#[case::slot_higher_ok(10, 11, DEFAULT_TEST_VELOCITY, true)] +#[case::slot_same_ok(10, 10, DEFAULT_TEST_VELOCITY, true)] +#[case::slot_decrease_illegal(10, 9, DEFAULT_TEST_VELOCITY, false)] +#[case::velocity_small_ok(10, 10, DEFAULT_TEST_VELOCITY - 1 , true)] +#[case::velocity_small_ok(10, 10, DEFAULT_TEST_VELOCITY - 2 , true)] +#[case::velocity_too_high_illegal(10, 10, DEFAULT_TEST_VELOCITY + 1 , false)] +fn test_can_build_upon_slot_can_not_decrease( + #[case] state_relay_slot: u64, + #[case] test_relay_slot: u64, + #[case] authored_in_slot: u32, + #[case] expected_result: bool, +) { + type Hook = FixedVelocityConsensusHook; + + new_test_ext(1).execute_with(|| { + let hash = H256::repeat_byte(0x1); + + set_relay_slot(state_relay_slot, authored_in_slot); + // Slot moves backwards + assert_eq!(Hook::can_build_upon(hash, Slot::from(test_relay_slot)), expected_result); + }); +} + +#[test] +fn test_can_build_upon_unincluded_segment_size() { + type Hook = FixedVelocityConsensusHook; + + new_test_ext(1).execute_with(|| { + let relay_slot = Slot::from(10); - #[rstest] - #[case::slot_higher_ok(10, 11, DEFAULT_TEST_VELOCITY, true)] - #[case::slot_same_ok(10, 10, DEFAULT_TEST_VELOCITY, true)] - #[case::slot_decrease_illegal(10, 9, DEFAULT_TEST_VELOCITY, false)] - #[case::velocity_small_ok(10, 10, DEFAULT_TEST_VELOCITY - 1 , true)] - #[case::velocity_small_ok(10, 10, DEFAULT_TEST_VELOCITY - 2 , true)] - #[case::velocity_too_high_illegal(10, 10, DEFAULT_TEST_VELOCITY + 1 , false)] - fn test_can_build_upon_slot_can_not_decrease( - #[case] state_relay_slot: u64, - #[case] test_relay_slot: u64, - #[case] authored_in_slot: u32, - #[case] expected_result: bool, - ) { - type Hook = FixedVelocityConsensusHook; - - new_test_ext(1).execute_with(|| { - let hash = H256::repeat_byte(0x1); - - set_relay_slot(state_relay_slot, authored_in_slot); - // Slot moves backwards - assert_eq!(Hook::can_build_upon(hash, Slot::from(test_relay_slot)), expected_result); - }); + set_relay_slot(10, DEFAULT_TEST_VELOCITY); + // Size after included is two, we can not build + assert!(!Hook::can_build_upon(H256::repeat_byte(0x1), relay_slot)); + + // Size after included is one, we can build + assert!(Hook::can_build_upon(H256::repeat_byte(0x2), relay_slot)); + }); +} + +/// This test ensures that when we call `BlockExecutor::execute_block` in `validate_block`, +/// it doesn't change the proof size host function return values. Otherwise, it may breaks +/// logic that is fetching the proof size in `on_initialize`. +#[test] +fn block_executor_does_not_influence_proof_size_recordings() { + fn build_block(header: ::Header) -> ::Header { + // Initialize the block + frame_system::Pallet::::initialize( + &header.number, + &header.parent_hash, + &header.digest(), + ); + + // We omit `parachain-system` as it is not important here. + as Hooks<_>>::on_initialize(header.number); + as Hooks<_>>::on_initialize(header.number); + as Hooks<_>>::on_initialize(header.number); + + as Hooks<_>>::on_finalize(header.number); + as Hooks<_>>::on_finalize(header.number); + as Hooks<_>>::on_finalize(header.number); + + // Finalize the block + frame_system::Pallet::::finalize() } - #[test] - fn test_can_build_upon_unincluded_segment_size() { - type Hook = FixedVelocityConsensusHook; + // Create a simple executive that calls on_initialize and on_finalize + struct TestExecutive; + impl ExecuteBlock for TestExecutive { + fn verify_and_remove_seal(_: &mut ::LazyBlock) {} - new_test_ext(1).execute_with(|| { - let relay_slot = Slot::from(10); + fn execute_verified_block(block: ::LazyBlock) { + let header = block.header(); - set_relay_slot(10, DEFAULT_TEST_VELOCITY); - // Size after included is two, we can not build - assert!(!Hook::can_build_upon(H256::repeat_byte(0x1), relay_slot)); + let new_header = build_block(header.clone()); - // Size after included is one, we can build - assert!(Hook::can_build_upon(H256::repeat_byte(0x2), relay_slot)); - }); + assert_eq!(*header, new_header); + } } + + let mut ext = new_test_ext(10); + + ext.execute_with(|| { + // Let's setup some authorities + let authority_id = AuthorityId::from(Alice.public()); + let authorities: BoundedVec> = + vec![authority_id.clone()].try_into().unwrap(); + pallet_aura::Authorities::::put(authorities.clone()); + Authorities::::put(authorities.clone()); + }); + + ext.commit_all().unwrap(); + + let recorder = Recorder::::default(); + + // Register the ProofSizeExt extension + ext.register_extension(ProofSizeExt::new(recorder.clone())); + + let mut header = ext.execute_with_recorder(recorder.clone(), || { + let mut digest = Digest::default(); + digest.push(CompatibleDigestItem::<()>::aura_pre_digest(10u64.into())); + + build_block(HeaderT::new( + 1, + Default::default(), + Default::default(), + Default::default(), + digest, + )) + }); + + let sig = Alice.sign(header.hash().as_ref()); + let seal = CompatibleDigestItem::aura_seal(sig); + header.digest_mut().push(seal); + + let mut block = Block::new(header, Default::default()).into(); + + ext.reset_overlay(); + ext.execute_with_recorder(recorder, || { + BlockExecutor::::verify_and_remove_seal(&mut block); + }); + + let recorder = Recorder::::default(); + + // Register the ProofSizeExt extension again to overwrite the old one. + ext.register_extension(ProofSizeExt::new(recorder.clone())); + + ext.reset_overlay(); + ext.execute_with_recorder(recorder, || { + BlockExecutor::::execute_verified_block(block); + }); } diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 29905d921f375..32351a318f3ca 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -179,7 +179,7 @@ where let cache_provider = trie_cache::CacheProvider::new(); let seen_nodes = SeenNodes::>::default(); - for (block_index, block) in blocks.into_iter().enumerate() { + for (block_index, mut block) in blocks.into_iter().enumerate() { // We use the storage root of the `parent_head` to ensure that it is the correct root. // This is already being done above while creating the in-memory db, but let's be paranoid!! let backend = sp_state_machine::TrieBackendBuilder::new_with_cache( @@ -205,6 +205,15 @@ where parent_header = block.header().clone(); + run_with_externalities_and_recorder::( + &backend, + &mut Default::default(), + &mut Default::default(), + || { + E::verify_and_remove_seal(&mut block); + }, + ); + run_with_externalities_and_recorder::( &execute_backend, // Here is the only place where we want to use the recorder. @@ -214,7 +223,7 @@ where &mut execute_recorder, &mut overlay, || { - E::execute_block(block); + E::execute_verified_block(block); }, ); diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 2b955c4514309..dcfa496486dc0 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -33,7 +33,7 @@ use polkadot_parachain_primitives::primitives::ValidationResult; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sp_api::{ApiExt, Core, ProofRecorder, ProvideRuntimeApi}; use sp_consensus_slots::SlotDuration; -use sp_core::H256; +use sp_core::{Hasher, H256}; use sp_runtime::{ traits::{BlakeTwo256, Block as BlockT, Header as HeaderT}, DigestItem, @@ -381,6 +381,40 @@ fn validate_block_returns_custom_head_data() { assert_eq!(expected_header, res_header); } +#[test] +fn validate_block_rejects_invalid_seal() { + sp_tracing::try_init_simple(); + + if env::var("RUN_TEST").is_ok() { + let (client, parent_head) = create_test_client(); + let TestBlockData { mut block, validation_data, .. } = build_block_with_witness( + &client, + Vec::new(), + parent_head.clone(), + Default::default(), + Default::default(), + ); + let (id, data) = + block.blocks_mut()[0].header.digest.logs.last().unwrap().as_seal().unwrap(); + let mut data = data.to_vec(); + let random = BlakeTwo256::hash(&data); + data[..random.as_ref().len()].copy_from_slice(random.as_ref()); + + *block.blocks_mut()[0].header.digest.logs.last_mut().unwrap() = DigestItem::Seal(id, data); + + call_validate_block(parent_head, block, validation_data.relay_parent_storage_root) + .unwrap_err(); + } else { + let output = Command::new(env::current_exe().unwrap()) + .args(["validate_block_rejects_invalid_seal", "--", "--nocapture"]) + .env("RUN_TEST", "1") + .output() + .expect("Runs the test"); + assert!(output.status.success()); + + assert!(dbg!(String::from_utf8(output.stderr).unwrap()).contains("Invalid AuRa seal")); + } +} #[test] fn validate_block_invalid_parent_hash() { sp_tracing::try_init_simple(); diff --git a/substrate/frame/executive/src/lib.rs b/substrate/frame/executive/src/lib.rs index 5fdc9ea814f7f..4446ecf1e3d9a 100644 --- a/substrate/frame/executive/src/lib.rs +++ b/substrate/frame/executive/src/lib.rs @@ -271,7 +271,11 @@ where OriginOf: From>, UnsignedValidator: ValidateUnsigned>, { - fn execute_block(block: Block::LazyBlock) { + fn verify_and_remove_seal(_: &mut ::LazyBlock) { + // Nothing to do here. + } + + fn execute_verified_block(block: Block::LazyBlock) { Executive::< System, Block, diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index 6a29e64b03848..3b808ec5cdf18 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -847,10 +847,34 @@ pub trait ExecuteBlock { /// This will execute all extrinsics in the block and check that the resulting header is /// correct. /// + /// This function is a wrapper around [`Self::verify_and_remove_seal`] and + /// [`Self::execute_verified_block`]. + /// + /// # Panic + /// + /// Panics when an extrinsics panics or the resulting header doesn't match the expected header + /// or the seal is invalid. + fn execute_block(mut block: Block::LazyBlock) { + Self::verify_and_remove_seal(&mut block); + Self::execute_verified_block(block); + } + + /// Verify and remove seal. + /// + /// Verifies any seal meant for the consensus logic represented by the implementation. An + /// implementation may also chooses to not verify anything. + /// + /// # Panic + /// + /// Panics if a seal is invalid or if a seal is required, but not present. + fn verify_and_remove_seal(block: &mut Block::LazyBlock); + + /// Executes the given `block` after it was verified by `[Self::verify_and_remove_seal]`. + /// /// # Panic /// /// Panics when an extrinsics panics or the resulting header doesn't match the expected header. - fn execute_block(block: Block::LazyBlock); + fn execute_verified_block(block: Block::LazyBlock); } /// Something that can compare privileges of two origins. diff --git a/substrate/primitives/state-machine/src/testing.rs b/substrate/primitives/state-machine/src/testing.rs index b2277176db91d..b96cc6874f416 100644 --- a/substrate/primitives/state-machine/src/testing.rs +++ b/substrate/primitives/state-machine/src/testing.rs @@ -36,7 +36,7 @@ use sp_core::{ }, }; use sp_externalities::{Extension, ExtensionStore, Extensions}; -use sp_trie::{PrefixedMemoryDB, StorageProof}; +use sp_trie::{recorder::Recorder, PrefixedMemoryDB, StorageProof}; /// Simple HashMap-based Externalities impl. pub struct TestExternalities @@ -266,6 +266,21 @@ where (outcome, proof) } + /// Execute the given closure while `self` set as externalities and the given `proof_recorder` + /// enabled. + pub fn execute_with_recorder( + &mut self, + proof_recorder: Recorder, + execute: impl FnOnce() -> R, + ) -> R { + let proving_backend = + TrieBackendBuilder::wrap(&self.backend).with_recorder(proof_recorder).build(); + let mut proving_ext = + Ext::new(&mut self.overlay, &proving_backend, Some(&mut self.extensions)); + + sp_externalities::set_and_run_with_externalities(&mut proving_ext, execute) + } + /// Execute the given closure while `self` is set as externalities. /// /// Returns the result of the given closure, if no panics occurred. @@ -280,6 +295,11 @@ where }) .map_err(|e| format!("Closure panicked: {:?}", e)) } + + /// Resets the overlay to its default state. + pub fn reset_overlay(&mut self) { + self.overlay = Default::default(); + } } impl std::fmt::Debug for TestExternalities From 988549cde028cb5c55e85788f2fefb63bc541472 Mon Sep 17 00:00:00 2001 From: "cmd[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 24 Nov 2025 12:08:16 +0000 Subject: [PATCH 187/312] Update from github-actions[bot] running command 'prdoc --audience runtime_dev --bump major' --- prdoc/pr_10396.prdoc | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 prdoc/pr_10396.prdoc diff --git a/prdoc/pr_10396.prdoc b/prdoc/pr_10396.prdoc new file mode 100644 index 0000000000000..af5504a4abcc8 --- /dev/null +++ b/prdoc/pr_10396.prdoc @@ -0,0 +1,26 @@ +title: '`ExecuteBlock` split up seal verification and actual execution' +doc: +- audience: Runtime Dev + description: "`ExecuteBlock` exposes the `execute_block` function that is used by\ + \ `validate_block` to execute a block. In case auf AuRa the block execution includes\ + \ the verification of the seal and the removal of the seal. To verify the seal,\ + \ the block executor needs to load the current authority set. The problem is that\ + \ when we have storage proof reclaim enabled and the host function is used in\ + \ `on_initialize` before `pallet_aura_ext::on_initialize` (this is where we fetch\ + \ the authority set to ensure it appears in the proof) is called, it leads to\ + \ `validate_block` returning a different size and thus, breaking the block. To\ + \ solve this issue `ExecuteBlock` is now split into seal verification and execution\ + \ of the verified block. In `validate_block` the seal verification is then run\ + \ outside of the block execution, not leading to the issues of reporting different\ + \ proof sizes.\r\n" +crates: +- name: cumulus-pallet-aura-ext + bump: major +- name: cumulus-pallet-parachain-system + bump: major +- name: frame-executive + bump: major +- name: frame-support + bump: major +- name: sp-state-machine + bump: major From e121a8e23ac27dd2bf1f4fbe7b958b0296f54112 Mon Sep 17 00:00:00 2001 From: "cmd[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 24 Nov 2025 12:30:43 +0000 Subject: [PATCH 188/312] Update from github-actions[bot] running command 'fmt' --- cumulus/pallets/aura-ext/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index 93746087192c5..8b2283fdf20da 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -28,8 +28,8 @@ sp-runtime = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } [dev-dependencies] -rstest = { workspace = true } log = { workspace = true, default-features = true } +rstest = { workspace = true } # Cumulus cumulus-pallet-parachain-system = { workspace = true, default-features = true } @@ -40,8 +40,8 @@ cumulus-test-relay-sproof-builder = { workspace = true, default-features = true # Substrate sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } -sp-state-machine = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } From 5140cbf4eea0b5a588337b3c5e3de33e817c26ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 24 Nov 2025 14:02:48 +0100 Subject: [PATCH 189/312] Remove unused dep --- Cargo.lock | 1 - cumulus/pallets/aura-ext/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e68d47656af96..5f44ba0e1d852 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4696,7 +4696,6 @@ dependencies = [ "cumulus-test-relay-sproof-builder", "frame-support", "frame-system", - "log", "pallet-aura", "pallet-timestamp", "parity-scale-codec", diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index 8b2283fdf20da..ad633865e0fb4 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -28,7 +28,6 @@ sp-runtime = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } [dev-dependencies] -log = { workspace = true, default-features = true } rstest = { workspace = true } # Cumulus From 1b0dbd3e805dec73bcb294e762bce66bfe3d5a35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 24 Nov 2025 14:03:42 +0100 Subject: [PATCH 190/312] Required for the test as well --- cumulus/test/runtime/src/test_pallet.rs | 35 +++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index 2f925931cd6a0..6a7cdb532ef91 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -24,8 +24,9 @@ pub const TEST_RUNTIME_UPGRADE_KEY: &[u8] = b"+test_runtime_upgrade_key+"; #[frame_support::pallet(dev_mode)] pub mod pallet { use crate::test_pallet::TEST_RUNTIME_UPGRADE_KEY; - use alloc::vec; + use alloc::{vec, vec::Vec}; use cumulus_primitives_core::CumulusDigestItem; + use cumulus_primitives_storage_weight_reclaim::get_proof_size; use frame_support::{ dispatch::DispatchInfo, inherent::{InherentData, InherentIdentifier, ProvideInherent}, @@ -57,9 +58,18 @@ pub mod pallet { #[pallet::storage] pub type InherentWeightConsume = StorageValue<_, Weight, OptionQuery>; + /// A map that contains on single big value at the current block. + /// + /// In every block we are moving the big value from the previous block to current block. This is + /// done to test that the storage proof size between multiple blocks in the same bundle is + /// shared. + #[pallet::storage] + pub type BigValueMove = + StorageMap<_, Twox64Concat, BlockNumberFor, Vec, OptionQuery>; + #[pallet::hooks] impl Hooks> for Pallet { - fn on_initialize(_n: BlockNumberFor) -> Weight { + fn on_initialize(n: BlockNumberFor) -> Weight { if ScheduleWeightRegistration::::get() { let weight_to_register = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 0); @@ -73,6 +83,19 @@ pub mod pallet { } } + if let Some(mut value) = BigValueMove::::take(n - 1u32.into()) { + // Modify the value a little bit. + let parent_hash = frame_system::Pallet::::parent_hash(); + value[..parent_hash.as_ref().len()].copy_from_slice(parent_hash.as_ref()); + + BigValueMove::::insert(n, value); + + // Depositing the event is important, because then we write the actual proof size + // into the state. If some node returns a different proof size on import of this + // block, we will detect it this way as the storage root will be different. + Self::deposit_event(Event::MovedBigValue { proof_size: get_proof_size().unwrap() }) + } + Weight::zero() } } @@ -227,9 +250,17 @@ pub mod pallet { impl BuildGenesisConfig for GenesisConfig { fn build(&self) { sp_io::storage::set(TEST_RUNTIME_UPGRADE_KEY, &[1, 2, 3, 4]); + + BigValueMove::::insert(BlockNumberFor::::from(0u32), vec![0u8; 4 * 1024]); } } + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + MovedBigValue { proof_size: u64 }, + } + #[derive( Encode, Decode, From 030cbe655dc3fdb59458948a3672943da49c00d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 24 Nov 2025 20:30:46 +0100 Subject: [PATCH 191/312] Work on block import --- .../src/collators/slot_based/block_import.rs | 184 +++++++++++++++--- .../aura/src/collators/slot_based/mod.rs | 6 +- 2 files changed, 166 insertions(+), 24 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs index fe4ede3d78f6d..45aae13e94bb4 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -15,11 +15,15 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use codec::Codec; +use codec::{Codec, Decode, Encode}; use cumulus_client_proof_size_recording::prepare_proof_size_recording_transaction; use cumulus_primitives_core::{CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; use futures::{stream::FusedStream, StreamExt}; -use parking_lot::Mutex; +use sc_client_api::{ + backend::AuxStore, + client::{AuxDataOperations, FinalityNotification, PreCommitActions}, + HeaderBackend, +}; use sc_consensus::{BlockImport, StateAction}; use sc_consensus_aura::{find_pre_digest, standalone::fetch_authorities}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -27,14 +31,69 @@ use sp_api::{ ApiExt, CallApiAt, CallContext, Core, ProofRecorder, ProofRecorderIgnoredNodes, ProvideRuntimeApi, StorageProof, }; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sp_consensus::BlockOrigin; use sp_consensus_aura::AuraApi; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as _}; use sp_trie::{ proof_size_extension::{ProofSizeExt, RecordingProofSizeProvider}, recorder::IgnoredNodes, + GenericMemoryDB, KeyFunction, }; -use std::{collections::HashMap, marker::PhantomData, sync::Arc}; +use std::{marker::PhantomData, sync::Arc}; + +/// The aux storage key used to store the nodes to ignore for the given block hash. +fn nodes_to_ignore_key(block_hash: H) -> Vec { + (b"cumulus_slot_based_nodes_to_ignore", block_hash).encode() +} + +fn load_decode(backend: &B, key: &[u8]) -> ClientResult> +where + B: AuxStore, + T: Decode, +{ + let corrupt = |e: codec::Error| { + ClientError::Backend(format!("Nodes to ignore DB is corrupted. Decode error: {}", e)) + }; + match backend.get_aux(key)? { + None => Ok(None), + Some(t) => T::decode(&mut &t[..]).map(Some).map_err(corrupt), + } +} + +/// Convert stored node data back to IgnoredNodes. +fn nodes_to_ignored_nodes(nodes: Vec>) -> IgnoredNodes { + if nodes.is_empty() { + return IgnoredNodes::default(); + } + + // Create a StorageProof from the node data and convert to IgnoredNodes + let storage_proof = StorageProof::new(nodes); + IgnoredNodes::from_storage_proof::>(&storage_proof) +} + +/// Prepare a transaction to write the nodes to ignore to the aux storage. +/// +/// Returns the key-value pairs that need to be written to the aux storage. +fn prepare_nodes_to_ignore_transaction( + block_hash: Block::Hash, + nodes: Vec>, +) -> impl Iterator, Vec)> { + let key = nodes_to_ignore_key(block_hash); + let encoded_nodes = nodes.encode(); + + [(key, encoded_nodes)].into_iter() +} + +/// Load the nodes to ignore associated with a block and convert to IgnoredNodes. +fn load_nodes_to_ignore( + backend: &B, + block_hash: Block::Hash, +) -> ClientResult>> { + let nodes: Option>> = + load_decode(backend, nodes_to_ignore_key(block_hash).as_slice())?; + Ok(nodes.map(nodes_to_ignored_nodes::)) +} /// Handle for receiving the block and the storage proof from the [`SlotBasedBlockImport`]. /// @@ -71,7 +130,6 @@ pub struct SlotBasedBlockImport { inner: BI, client: Arc, sender: TracingUnboundedSender<(Block, StorageProof)>, - nodes_to_ignore: Arc>>>, _phantom: PhantomData, } @@ -85,13 +143,7 @@ impl SlotBasedBlockImport SlotBasedBlockImport, ) -> Result<(), sp_consensus::Error> where - Client: ProvideRuntimeApi + CallApiAt + Send + Sync, + Client: ProvideRuntimeApi + + CallApiAt + + AuxStore + + HeaderBackend + + Send + + Sync, Client::StateBackend: Send, Client::Api: Core + AuraApi, AuthorityId: Codec + Send + Sync + std::fmt::Debug, @@ -130,8 +187,40 @@ impl SlotBasedBlockImport::default(); + let mut is_same_bundle = false; + + // Load parent block's header to check if it belongs to the same bundle + if let Ok(Some(parent_header)) = self.client.header(parent_hash) { + let parent_core_info = CumulusDigestItem::find_core_info(parent_header.digest()); + let parent_relay_block_identifier = + CumulusDigestItem::find_relay_block_identifier(parent_header.digest()); + + if let (Some(parent_core_info), Some(parent_relay_block_identifier)) = + (parent_core_info, parent_relay_block_identifier) + { + if let Ok(parent_slot) = find_pre_digest::(&parent_header) { + let parent_pov_bundle = PoVBundle { + author_index: *parent_slot as usize % authorities.len(), + core_info: parent_core_info, + relay_block_identifier: parent_relay_block_identifier, + }; + + // Only load nodes to ignore if both blocks are in the same bundle + if parent_pov_bundle == pov_bundle { + is_same_bundle = true; + if let Ok(Some(parent_nodes)) = + load_nodes_to_ignore::(&*self.client, parent_hash) + { + nodes_to_ignore = parent_nodes; + } + } + } + } + } let recorder = ProofRecorder::::with_ignored_nodes(nodes_to_ignore.clone()); let proof_size_recorder = RecordingProofSizeProvider::new(recorder.clone()); @@ -143,8 +232,6 @@ impl SlotBasedBlockImport SlotBasedBlockImport>(&storage_proof)); - nodes_to_ignore - .extend(IgnoredNodes::from_memory_db(gen_storage_changes.transaction.clone())); + // Collect new node data from this block's execution + let mut new_nodes = IgnoredNodes::from_storage_proof(&storage_proof); + new_nodes.extend(IgnoredNodes::from_memory_db(gen_storage_changes.transaction.clone())); + + // Load parent nodes if they exist (to combine with new nodes) + let mut all_nodes = if is_same_bundle { + // Load parent nodes as Vec> to combine with new nodes + load_decode(&*self.client, nodes_to_ignore_key(parent_hash).as_slice()) + .ok() + .flatten() + .unwrap_or_default() + } else { + Vec::new() + }; + + // Extend with new nodes + all_nodes.extend(new_nodes); + + // Store nodes to ignore in aux data for this block + let block_hash = params.header.hash(); + prepare_nodes_to_ignore_transaction::(block_hash, all_nodes).for_each(|(k, v)| { + params.auxiliary.push((k, Some(v))); + }); // Extract and store proof size recordings let recorded_sizes = proof_size_recorder @@ -176,7 +282,6 @@ impl SlotBasedBlockImport>(); if !recorded_sizes.is_empty() { - let block_hash = params.header.hash(); prepare_proof_size_recording_transaction(block_hash, recorded_sizes).for_each( |(k, v)| { params.auxiliary.push((k, Some(v))); @@ -199,7 +304,6 @@ impl Clone inner: self.inner.clone(), client: self.client.clone(), sender: self.sender.clone(), - nodes_to_ignore: self.nodes_to_ignore.clone(), _phantom: PhantomData, } } @@ -212,7 +316,8 @@ where Block: BlockT, BI: BlockImport + Send + Sync, BI::Error: Into, - Client: ProvideRuntimeApi + CallApiAt + Send + Sync, + Client: + ProvideRuntimeApi + CallApiAt + AuxStore + HeaderBackend + Send + Sync, Client::StateBackend: Send, Client::Api: Core + AuraApi, AuthorityId: Codec + Send + Sync + std::fmt::Debug, @@ -237,3 +342,36 @@ where self.inner.import_block(params).await.map_err(Into::into) } } + +/// Cleanup auxiliary storage for finalized blocks. +/// +/// This function removes nodes to ignore for blocks that are no longer needed +/// after finalization. It processes the finalized blocks and their stale heads to +/// determine which data can be safely removed. +fn aux_storage_cleanup(notification: &FinalityNotification) -> AuxDataOperations +where + Block: BlockT, +{ + // Convert the hashes to deletion operations + notification + .stale_blocks + .iter() + .map(|b| (nodes_to_ignore_key(b.hash), None)) + .collect() +} + +/// Register a finality action for cleaning up nodes to ignore. +/// +/// This should be called during consensus initialization to automatically clean up +/// nodes to ignore when blocks are finalized. +pub fn register_nodes_to_ignore_cleanup(client: Arc) +where + C: PreCommitActions + 'static, + Block: BlockT, +{ + let on_finality = move |notification: &FinalityNotification| -> AuxDataOperations { + aux_storage_cleanup(notification) + }; + + client.register_finality_action(Box::new(on_finality)); +} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index fd6e5a7a8f310..ae03b4ecb1145 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -67,7 +67,9 @@ //! 2. Submission to the collation-generation subsystem use self::{block_builder_task::run_block_builder, collation_task::run_collation_task}; -pub use block_import::{SlotBasedBlockImport, SlotBasedBlockImportHandle}; +pub use block_import::{ + register_nodes_to_ignore_cleanup, SlotBasedBlockImport, SlotBasedBlockImportHandle, +}; use codec::Codec; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; @@ -209,6 +211,8 @@ pub fn run Date: Mon, 24 Nov 2025 23:27:30 +0100 Subject: [PATCH 192/312] More cleanup --- .../src/collators/slot_based/block_import.rs | 57 +++++-------------- substrate/primitives/trie/src/recorder.rs | 4 +- 2 files changed, 15 insertions(+), 46 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs index 45aae13e94bb4..c8533dd069ce3 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -25,7 +25,7 @@ use sc_client_api::{ HeaderBackend, }; use sc_consensus::{BlockImport, StateAction}; -use sc_consensus_aura::{find_pre_digest, standalone::fetch_authorities}; +use sc_consensus_aura::find_pre_digest; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_api::{ ApiExt, CallApiAt, CallContext, Core, ProofRecorder, ProofRecorderIgnoredNodes, @@ -38,7 +38,6 @@ use sp_runtime::traits::{Block as BlockT, HashingFor, Header as _}; use sp_trie::{ proof_size_extension::{ProofSizeExt, RecordingProofSizeProvider}, recorder::IgnoredNodes, - GenericMemoryDB, KeyFunction, }; use std::{marker::PhantomData, sync::Arc}; @@ -61,26 +60,15 @@ where } } -/// Convert stored node data back to IgnoredNodes. -fn nodes_to_ignored_nodes(nodes: Vec>) -> IgnoredNodes { - if nodes.is_empty() { - return IgnoredNodes::default(); - } - - // Create a StorageProof from the node data and convert to IgnoredNodes - let storage_proof = StorageProof::new(nodes); - IgnoredNodes::from_storage_proof::>(&storage_proof) -} - /// Prepare a transaction to write the nodes to ignore to the aux storage. /// /// Returns the key-value pairs that need to be written to the aux storage. fn prepare_nodes_to_ignore_transaction( block_hash: Block::Hash, - nodes: Vec>, + ignored_nodes: IgnoredNodes, ) -> impl Iterator, Vec)> { let key = nodes_to_ignore_key(block_hash); - let encoded_nodes = nodes.encode(); + let encoded_nodes = ignored_nodes.encode(); [(key, encoded_nodes)].into_iter() } @@ -92,7 +80,8 @@ fn load_nodes_to_ignore( ) -> ClientResult>> { let nodes: Option>> = load_decode(backend, nodes_to_ignore_key(block_hash).as_slice())?; - Ok(nodes.map(nodes_to_ignored_nodes::)) + + nodes.map(|n| IgnoredNodes::decode(&mut &n[..])).transpose().map_err(Into::into) } /// Handle for receiving the block and the storage proof from the [`SlotBasedBlockImport`]. @@ -179,13 +168,6 @@ impl SlotBasedBlockImport(¶ms.header) .map_err(|error| sp_consensus::Error::Other(Box::new(error)))?; - let authorities = fetch_authorities(&*self.client, *params.header.parent_hash())?; - - let pov_bundle = PoVBundle { - author_index: *slot as usize % authorities.len(), - core_info, - relay_block_identifier, - }; let parent_hash = *params.header.parent_hash(); @@ -250,29 +232,16 @@ impl SlotBasedBlockImport> to combine with new nodes - load_decode(&*self.client, nodes_to_ignore_key(parent_hash).as_slice()) - .ok() - .flatten() - .unwrap_or_default() - } else { - Vec::new() - }; - - // Extend with new nodes - all_nodes.extend(new_nodes); + nodes_to_ignore.extend(IgnoredNodes::from_storage_proof(&storage_proof)); + nodes_to_ignore + .extend(IgnoredNodes::from_memory_db(gen_storage_changes.transaction.clone())); - // Store nodes to ignore in aux data for this block let block_hash = params.header.hash(); - prepare_nodes_to_ignore_transaction::(block_hash, all_nodes).for_each(|(k, v)| { - params.auxiliary.push((k, Some(v))); - }); + prepare_nodes_to_ignore_transaction::(block_hash, nodes_to_ignore).for_each( + |(k, v)| { + params.auxiliary.push((k, Some(v))); + }, + ); // Extract and store proof size recordings let recorded_sizes = proof_size_recorder diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index a2349a49716f2..a989f72cd124b 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -21,7 +21,7 @@ //! to record storage accesses to the state to generate a [`StorageProof`]. use crate::{GenericMemoryDB, NodeCodec, StorageProof}; -use codec::Encode; +use codec::{Decode, Encode}; use hash_db::Hasher; use memory_db::KeyFunction; use parking_lot::{Mutex, MutexGuard}; @@ -42,7 +42,7 @@ const LOG_TARGET: &str = "trie-recorder"; /// A list of ignored nodes for [`Recorder`]. /// /// These nodes when passed to a recorder will be ignored and not recorded by the recorder. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Encode, Decode)] pub struct IgnoredNodes { nodes: HashSet, } From 408af1c6445a342e0380e8ef9f5b4771252c6e6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 25 Nov 2025 16:45:32 +0100 Subject: [PATCH 193/312] Finish block import --- .../src/collators/slot_based/block_import.rs | 263 +++++++----------- .../collators/slot_based/collation_task.rs | 8 - .../aura/src/collators/slot_based/mod.rs | 14 +- .../polkadot-omni-node/lib/src/nodes/aura.rs | 12 +- cumulus/test/service/src/lib.rs | 16 +- .../externalities/src/extensions.rs | 6 - substrate/primitives/trie/Cargo.toml | 2 +- substrate/primitives/trie/src/recorder.rs | 20 +- 8 files changed, 138 insertions(+), 203 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs index c8533dd069ce3..3ba6f08d946cd 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -15,126 +15,141 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +use crate::LOG_TARGET; use codec::{Codec, Decode, Encode}; use cumulus_client_proof_size_recording::prepare_proof_size_recording_transaction; -use cumulus_primitives_core::{CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; -use futures::{stream::FusedStream, StreamExt}; +use cumulus_primitives_core::{BundleInfo, CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; use sc_client_api::{ backend::AuxStore, client::{AuxDataOperations, FinalityNotification, PreCommitActions}, HeaderBackend, }; use sc_consensus::{BlockImport, StateAction}; -use sc_consensus_aura::find_pre_digest; -use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_api::{ ApiExt, CallApiAt, CallContext, Core, ProofRecorder, ProofRecorderIgnoredNodes, - ProvideRuntimeApi, StorageProof, + ProvideRuntimeApi, }; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sp_consensus::BlockOrigin; use sp_consensus_aura::AuraApi; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as _}; -use sp_trie::{ - proof_size_extension::{ProofSizeExt, RecordingProofSizeProvider}, - recorder::IgnoredNodes, -}; +use sp_trie::proof_size_extension::{ProofSizeExt, RecordingProofSizeProvider}; use std::{marker::PhantomData, sync::Arc}; -/// The aux storage key used to store the nodes to ignore for the given block hash. -fn nodes_to_ignore_key(block_hash: H) -> Vec { +/// The aux storage key used to store the ignored nodes for the given block hash. +fn ignored_nodes_key(block_hash: H) -> Vec { (b"cumulus_slot_based_nodes_to_ignore", block_hash).encode() } -fn load_decode(backend: &B, key: &[u8]) -> ClientResult> -where - B: AuxStore, - T: Decode, -{ - let corrupt = |e: codec::Error| { - ClientError::Backend(format!("Nodes to ignore DB is corrupted. Decode error: {}", e)) - }; - match backend.get_aux(key)? { - None => Ok(None), - Some(t) => T::decode(&mut &t[..]).map(Some).map_err(corrupt), - } -} - -/// Prepare a transaction to write the nodes to ignore to the aux storage. +/// Prepare a transaction to write the ignored nodes to the aux storage. /// /// Returns the key-value pairs that need to be written to the aux storage. -fn prepare_nodes_to_ignore_transaction( +fn prepare_ignored_nodes_transaction( block_hash: Block::Hash, - ignored_nodes: IgnoredNodes, + ignored_nodes: ProofRecorderIgnoredNodes, ) -> impl Iterator, Vec)> { - let key = nodes_to_ignore_key(block_hash); - let encoded_nodes = ignored_nodes.encode(); + let key = ignored_nodes_key(block_hash); + let encoded_nodes = as Encode>::encode(&ignored_nodes); [(key, encoded_nodes)].into_iter() } -/// Load the nodes to ignore associated with a block and convert to IgnoredNodes. -fn load_nodes_to_ignore( +/// Load the ignored nodes associated with a block. +fn load_ignored_nodes( backend: &B, block_hash: Block::Hash, -) -> ClientResult>> { - let nodes: Option>> = - load_decode(backend, nodes_to_ignore_key(block_hash).as_slice())?; - - nodes.map(|n| IgnoredNodes::decode(&mut &n[..])).transpose().map_err(Into::into) -} - -/// Handle for receiving the block and the storage proof from the [`SlotBasedBlockImport`]. -/// -/// This handle should be passed to [`Params`](super::Params) or can also be dropped if the node is -/// not running as collator. -pub struct SlotBasedBlockImportHandle { - receiver: TracingUnboundedReceiver<(Block, StorageProof)>, -} - -impl SlotBasedBlockImportHandle { - /// Returns the next item. - /// - /// The future will never return when the internal channel is closed. - pub async fn next(&mut self) -> (Block, StorageProof) { - loop { - if self.receiver.is_terminated() { - futures::pending!() - } else if let Some(res) = self.receiver.next().await { - return res - } - } +) -> ClientResult>> { + match backend.get_aux(&ignored_nodes_key(block_hash))? { + None => Ok(None), + Some(t) => ProofRecorderIgnoredNodes::::decode(&mut &t[..]).map(Some).map_err(|e| { + ClientError::Backend(format!("Nodes to ignore DB is corrupted. Decode error: {}", e)) + }), } } -#[derive(Clone, Debug, Hash, PartialEq, Eq)] -struct PoVBundle { - relay_block_identifier: RelayBlockIdentifier, - core_info: CoreInfo, - author_index: usize, +/// Register the clean up method for cleaning ignored nodes from blocks on which no further blocks +/// will be imported. +fn register_ignored_nodes_cleanup(client: Arc) +where + C: PreCommitActions, + Block: BlockT, +{ + let on_finality = move |notification: &FinalityNotification| -> AuxDataOperations { + notification + .stale_blocks + .iter() + // Delete the ignored nodes for all stale blocks. + .map(|b| (ignored_nodes_key(b.hash), None)) + // We can not delete the ignored nodes for the finalized block, because blocks can still + // be imported on top of this block. As blocks are only finalized as bundles on the + // relay chain, we should never need them, but better safe than sorry :) + .chain(std::iter::once((ignored_nodes_key(*notification.header.parent_hash()), None))) + .collect() + }; + + client.register_finality_action(Box::new(on_finality)); } /// Special block import for the slot based collator. pub struct SlotBasedBlockImport { inner: BI, client: Arc, - sender: TracingUnboundedSender<(Block, StorageProof)>, - _phantom: PhantomData, + _phantom: PhantomData<(AuthorityId, Block)>, } impl SlotBasedBlockImport { /// Create a new instance. + pub fn new(inner: BI, client: Arc) -> Self + where + Client: PreCommitActions, + { + register_ignored_nodes_cleanup(client.clone()); + + Self { client, inner, _phantom: PhantomData } + } + + /// Get the [`ProofRecorderIgnoredNodes`] for `parent`. /// - /// The returned [`SlotBasedBlockImportHandle`] needs to be passed to the - /// [`Params`](super::Params), so that this block import instance can communicate with the - /// collation task. If the node is not running as a collator, just dropping the handle is fine. - pub fn new(inner: BI, client: Arc) -> (Self, SlotBasedBlockImportHandle) { - let (sender, receiver) = tracing_unbounded("SlotBasedBlockImportChannel", 1000); - - ( - Self { sender, client, inner, _phantom: PhantomData }, - SlotBasedBlockImportHandle { receiver }, - ) + /// If `parent` was not part of the same block bundle, the [`ProofRecorderIgnoredNodes`] are not + /// required and `None` will be returned. + fn get_ignored_nodes( + &self, + parent: Block::Hash, + core_info: &CoreInfo, + bundle_info: &BundleInfo, + relay_block_identifier: &RelayBlockIdentifier, + ) -> Option> + where + Client: AuxStore + HeaderBackend + Send + Sync, + { + let parent_header = self.client.header(parent).ok().flatten()?; + let parent_core_info = CumulusDigestItem::find_core_info(parent_header.digest())?; + let parent_bundle_info = CumulusDigestItem::find_bundle_info(parent_header.digest())?; + let parent_relay_block_identifier = + CumulusDigestItem::find_relay_block_identifier(parent_header.digest())?; + + if parent_relay_block_identifier != *relay_block_identifier { + tracing::trace!(target: LOG_TARGET, ?parent_relay_block_identifier, ?relay_block_identifier, "Relay block identifier doesn't match"); + return None; + } + + if parent_core_info != *core_info { + tracing::trace!(target: LOG_TARGET, ?parent_core_info, ?core_info, "Core info doesn't match"); + return None + } + + if parent_bundle_info.index.saturating_add(1) != bundle_info.index { + tracing::trace!(target: LOG_TARGET, ?parent_bundle_info, ?bundle_info, "Block is not a child, based on the index"); + return None + } + + match load_ignored_nodes::(&*self.client, parent) { + Ok(nodes) => nodes, + Err(error) => { + tracing::trace!(target: LOG_TARGET, ?parent, ?error, "Failed to load `IgnoredNodes` from aux store"); + None + }, + } } /// Execute the given block and collect the storage proof. @@ -158,51 +173,21 @@ impl SlotBasedBlockImport(¶ms.header) - .map_err(|error| sp_consensus::Error::Other(Box::new(error)))?; - let parent_hash = *params.header.parent_hash(); - // Try to load nodes to ignore from parent block if both blocks belong to the same bundle - let mut nodes_to_ignore = ProofRecorderIgnoredNodes::::default(); - let mut is_same_bundle = false; - - // Load parent block's header to check if it belongs to the same bundle - if let Ok(Some(parent_header)) = self.client.header(parent_hash) { - let parent_core_info = CumulusDigestItem::find_core_info(parent_header.digest()); - let parent_relay_block_identifier = - CumulusDigestItem::find_relay_block_identifier(parent_header.digest()); - - if let (Some(parent_core_info), Some(parent_relay_block_identifier)) = - (parent_core_info, parent_relay_block_identifier) - { - if let Ok(parent_slot) = find_pre_digest::(&parent_header) { - let parent_pov_bundle = PoVBundle { - author_index: *parent_slot as usize % authorities.len(), - core_info: parent_core_info, - relay_block_identifier: parent_relay_block_identifier, - }; - - // Only load nodes to ignore if both blocks are in the same bundle - if parent_pov_bundle == pov_bundle { - is_same_bundle = true; - if let Ok(Some(parent_nodes)) = - load_nodes_to_ignore::(&*self.client, parent_hash) - { - nodes_to_ignore = parent_nodes; - } - } - } - } - } + let mut nodes_to_ignore = self + .get_ignored_nodes(parent_hash, &core_info, &bundle_info, &relay_block_identifier) + .unwrap_or_default(); let recorder = ProofRecorder::::with_ignored_nodes(nodes_to_ignore.clone()); let proof_size_recorder = RecordingProofSizeProvider::new(recorder.clone()); @@ -210,7 +195,6 @@ impl SlotBasedBlockImport SlotBasedBlockImport::from_storage_proof::< + HashingFor, + >(&storage_proof)); + nodes_to_ignore.extend(ProofRecorderIgnoredNodes::::from_memory_db( + gen_storage_changes.transaction.clone(), + )); - let block_hash = params.header.hash(); - prepare_nodes_to_ignore_transaction::(block_hash, nodes_to_ignore).for_each( + let block_hash = params.post_hash(); + prepare_ignored_nodes_transaction::(block_hash, nodes_to_ignore).for_each( |(k, v)| { params.auxiliary.push((k, Some(v))); }, @@ -269,12 +256,7 @@ impl Clone for SlotBasedBlockImport { fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - client: self.client.clone(), - sender: self.sender.clone(), - _phantom: PhantomData, - } + Self { inner: self.inner.clone(), client: self.client.clone(), _phantom: PhantomData } } } @@ -311,36 +293,3 @@ where self.inner.import_block(params).await.map_err(Into::into) } } - -/// Cleanup auxiliary storage for finalized blocks. -/// -/// This function removes nodes to ignore for blocks that are no longer needed -/// after finalization. It processes the finalized blocks and their stale heads to -/// determine which data can be safely removed. -fn aux_storage_cleanup(notification: &FinalityNotification) -> AuxDataOperations -where - Block: BlockT, -{ - // Convert the hashes to deletion operations - notification - .stale_blocks - .iter() - .map(|b| (nodes_to_ignore_key(b.hash), None)) - .collect() -} - -/// Register a finality action for cleaning up nodes to ignore. -/// -/// This should be called during consensus initialization to automatically clean up -/// nodes to ignore when blocks are finalized. -pub fn register_nodes_to_ignore_cleanup(client: Arc) -where - C: PreCommitActions + 'static, - Block: BlockT, -{ - let on_finality = move |notification: &FinalityNotification| -> AuxDataOperations { - aux_storage_cleanup(notification) - }; - - client.register_finality_action(Box::new(on_finality)); -} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs index 584a31998e287..fdad1b31ca37f 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs @@ -51,8 +51,6 @@ pub struct Params { pub collator_service: CS, /// Receiver channel for communication with the block builder task. pub collator_receiver: TracingUnboundedReceiver>, - /// The handle from the special slot based block import. - pub block_import_handle: super::SlotBasedBlockImportHandle, /// When set, the collator will export every produced `POV` to this folder. pub export_pov: Option, } @@ -71,7 +69,6 @@ pub async fn run_collation_task( reinitialize, collator_service, mut collator_receiver, - mut block_import_handle, export_pov, }: Params, ) where @@ -101,11 +98,6 @@ pub async fn run_collation_task( handle_collation_message(message, &collator_service, &mut overseer_handle,relay_client.clone(),export_pov.clone()).await; }, - block_import_msg = block_import_handle.next().fuse() => { - // TODO: Implement me. - // Issue: https://github.com/paritytech/polkadot-sdk/issues/6495 - let _ = block_import_msg; - } } } } diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index ae03b4ecb1145..0137b51d48bea 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -67,9 +67,7 @@ //! 2. Submission to the collation-generation subsystem use self::{block_builder_task::run_block_builder, collation_task::run_collation_task}; -pub use block_import::{ - register_nodes_to_ignore_cleanup, SlotBasedBlockImport, SlotBasedBlockImportHandle, -}; +pub use block_import::SlotBasedBlockImport; use codec::Codec; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; @@ -109,7 +107,7 @@ mod slot_timer; mod tests; /// Parameters for [`run`]. -pub struct Params { +pub struct Params { /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. /// the timestamp, slot, and paras inherents should be omitted, as they are set by this /// collator. @@ -141,8 +139,6 @@ pub struct Params, /// Spawner for spawning futures. pub spawner: Spawner, /// Slot duration of the relay chain @@ -156,7 +152,7 @@ pub struct Params( - params: Params, + params: Params, ) where Block: BlockT, Client: ProvideRuntimeApi @@ -202,7 +198,6 @@ pub fn run( params_with_export: SlotBasedParams< - Block, ParachainBlockImport< Block, SlotBasedBlockImport< @@ -547,7 +546,7 @@ impl, RuntimeApi, AuraId> ParachainClient, ::Public, >, - SlotBasedBlockImportHandle, + (), > for StartSlotBasedAuraConsensus where RuntimeApi: ConstructNodeRuntimeApi>, @@ -580,7 +579,7 @@ where announce_block: Arc>) + Send + Sync>, backend: Arc>, node_extra_args: NodeExtraArgs, - block_import_handle: SlotBasedBlockImportHandle, + _: (), ) -> Result<(), Error> { let proposer = sc_basic_authorship::ProposerFactory::new( task_manager.spawn_handle(), @@ -616,7 +615,6 @@ where collator_service, reinitialize: false, slot_offset: Duration::from_secs(1), - block_import_handle, spawner: task_manager.spawn_essential_handle(), export_pov: node_extra_args.export_pov, max_pov_percentage: node_extra_args.max_pov_percentage, @@ -644,12 +642,12 @@ where ParachainClient, ::Public, >; - type BlockImportAuxiliaryData = SlotBasedBlockImportHandle; + type BlockImportAuxiliaryData = (); fn init_block_import( client: Arc>, ) -> sc_service::error::Result<(Self::BlockImport, Self::BlockImportAuxiliaryData)> { - Ok(SlotBasedBlockImport::new(client.clone(), client)) + Ok((SlotBasedBlockImport::new(client.clone(), client), ())) } } diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 4a0d97212c1e5..4e5b0e58ac9c3 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -27,10 +27,7 @@ use cumulus_client_collator::service::CollatorService; use cumulus_client_consensus_aura::{ collators::{ lookahead::{self as aura, Params as AuraParams}, - slot_based::{ - self as slot_based, Params as SlotBasedParams, SlotBasedBlockImport, - SlotBasedBlockImportHandle, - }, + slot_based::{self as slot_based, Params as SlotBasedParams, SlotBasedBlockImport}, }, ImportQueueParams, }; @@ -169,7 +166,7 @@ pub type Service = PartialComponents< (), sc_consensus::import_queue::BasicQueue, sc_transaction_pool::TransactionPoolHandle, - (ParachainBlockImport, SlotBasedBlockImportHandle), + ParachainBlockImport, >; /// Starts a `ServiceBuilder` for a full service. @@ -202,8 +199,7 @@ pub fn new_partial( )?; let client = Arc::new(client); - let (block_import, slot_based_handle) = - SlotBasedBlockImport::new(client.clone(), client.clone()); + let block_import = SlotBasedBlockImport::new(client.clone(), client.clone()); let block_import = ParachainBlockImport::new(block_import, backend.clone()); let transaction_pool = Arc::from( @@ -247,7 +243,7 @@ pub fn new_partial( task_manager, transaction_pool, select_chain: (), - other: (block_import, slot_based_handle), + other: block_import, }; Ok(params) @@ -331,8 +327,7 @@ where let client = params.client.clone(); let backend = params.backend.clone(); - let block_import = params.other.0; - let slot_based_handle = params.other.1; + let block_import = params.other; let relay_chain_interface = build_relay_chain_interface( relay_chain_config, parachain_config.prometheus_registry(), @@ -472,7 +467,6 @@ where collator_service, reinitialize: false, slot_offset: Duration::from_secs(1), - block_import_handle: slot_based_handle, spawner: task_manager.spawn_essential_handle(), export_pov: None, max_pov_percentage: None, diff --git a/substrate/primitives/externalities/src/extensions.rs b/substrate/primitives/externalities/src/extensions.rs index 2382f88e3015e..b509d778b2d23 100644 --- a/substrate/primitives/externalities/src/extensions.rs +++ b/substrate/primitives/externalities/src/extensions.rs @@ -175,12 +175,6 @@ macro_rules! decl_extension { pub fn type_id() -> core::any::TypeId { core::any::TypeId::of::() } - - $( - $( - $impls - )* - )* } impl core::ops::Deref for $ext_name { diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index 1d34f0fce0329..0a5e8c119d085 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -22,7 +22,7 @@ harness = false [dependencies] ahash = { optional = true, workspace = true } -codec = { workspace = true } +codec = { features = [ "derive" ], workspace = true } foldhash = { workspace = true } hash-db = { workspace = true } hashbrown = { workspace = true } diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index a989f72cd124b..8e6248c2bf156 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -21,7 +21,7 @@ //! to record storage accesses to the state to generate a [`StorageProof`]. use crate::{GenericMemoryDB, NodeCodec, StorageProof}; -use codec::{Decode, Encode}; +use codec::{Compact, Decode, Encode}; use hash_db::Hasher; use memory_db::KeyFunction; use parking_lot::{Mutex, MutexGuard}; @@ -42,11 +42,27 @@ const LOG_TARGET: &str = "trie-recorder"; /// A list of ignored nodes for [`Recorder`]. /// /// These nodes when passed to a recorder will be ignored and not recorded by the recorder. -#[derive(Clone, Debug, Encode, Decode)] +#[derive(Clone, Debug)] pub struct IgnoredNodes { nodes: HashSet, } +impl Encode for IgnoredNodes { + fn encode(&self) -> Vec { + let mut encoded = Compact::(self.nodes.len() as _).encode(); + self.nodes.iter().for_each(|n| n.encode_to(&mut encoded)); + encoded + } +} + +impl Decode for IgnoredNodes { + fn decode(input: &mut I) -> Result { + let len = Compact::::decode(input)?; + let data = codec::decode_vec_with_len(input, len.0 as _)?; + Ok(Self { nodes: HashSet::from_iter(data.into_iter()) }) + } +} + impl Default for IgnoredNodes { fn default() -> Self { Self { nodes: HashSet::default() } From e143583560d4990ca846bd40df0fce6bf12fe011 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 26 Nov 2025 14:10:33 +0100 Subject: [PATCH 194/312] Some fixes and improvements --- .../slot_based/block_builder_task.rs | 175 +++++++++++------- substrate/client/consensus/slots/src/lib.rs | 2 - 2 files changed, 112 insertions(+), 65 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 0cf260cc99e17..6e1055fe2863c 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -34,14 +34,13 @@ use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockIm use cumulus_client_proof_size_recording::prepare_proof_size_recording_transaction; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; use cumulus_primitives_core::{ - extract_relay_parent, rpsr_digest, BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, - CumulusDigestItem, PersistedValidationData, RelayParentOffsetApi, TargetBlockRate, + BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, + PersistedValidationData, RelayParentOffsetApi, TargetBlockRate, }; use cumulus_relay_chain_interface::RelayChainInterface; use futures::prelude::*; use polkadot_primitives::{ Block as RelayBlock, CoreIndex, Hash as RelayHash, Header as RelayHeader, Id as ParaId, - DEFAULT_CLAIM_QUEUE_OFFSET, }; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; use sc_consensus::BlockImport; @@ -57,7 +56,7 @@ use sp_core::crypto::Pair; use sp_externalities::Extensions; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; -use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT, Member, Zero}; +use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT, Member}; use sp_trie::{ proof_size_extension::{ProofSizeExt, RecordingProofSizeProvider}, recorder::IgnoredNodes, @@ -364,9 +363,11 @@ where target: crate::LOG_TARGET, block = ?initial_parent.hash, ?error, - "Failed to fetch `slot_schedule`, assuming one block with 2s" + "Failed to fetch `slot_schedule`, assuming one block per core" ); - 1 + + // Backwards compatible we use the number of cores as number of blocks. + cores.total_cores() }, }; @@ -386,29 +387,30 @@ where loop { let time_for_core = slot_time.time_left() / cores.cores_left(); - match build_collation_for_core( + match build_collation_for_core(BuildCollationParams { pov_parent_header, pov_parent_hash, - &relay_parent_header, - relay_parent, + relay_parent_header: &relay_parent_header, + relay_parent_hash: relay_parent, max_pov_size, para_id, - &relay_client, - &code_hash_provider, - &slot_claim, - &collator_sender, - &mut collator, + relay_client: &relay_client, + code_hash_provider: &code_hash_provider, + slot_claim: &slot_claim, + collator_sender: &collator_sender, + collator: &mut collator, allowed_pov_size, - cores.core_info(), - cores.core_index(), + core_info: cores.core_info(), + core_index: cores.core_index(), block_time, blocks_per_core, time_for_core, - cores.is_last_core() && + is_last_core_in_parachain_slot: cores.is_last_core() && slot_time.is_parachain_slot_ending(para_slot_duration.as_duration()), collator_peer_id, - rp_data.clone(), - ) + relay_parent_data: rp_data.clone(), + total_number_of_blocks: number_of_blocks, + }) .await { Ok(Some(header)) => { @@ -428,30 +430,59 @@ where } } -/// Build a collation for one core. -/// -/// One collation can be composed of multiple blocks. -async fn build_collation_for_core( +/// Parameters for [`build_collation_for_core`]. +struct BuildCollationParams<'a, Block: BlockT, P: Pair, RelayClient, BI, CIDP, Proposer, CS, CHP> { pov_parent_header: Block::Header, pov_parent_hash: Block::Hash, - relay_parent_header: &RelayHeader, + relay_parent_header: &'a RelayHeader, relay_parent_hash: RelayHash, max_pov_size: u32, para_id: ParaId, - relay_client: &impl RelayChainInterface, - code_hash_provider: &impl consensus_common::ValidationCodeHashProvider, - slot_claim: &SlotClaim, - collator_sender: &sc_utils::mpsc::TracingUnboundedSender>, - collator: &mut Collator, + relay_client: &'a RelayClient, + code_hash_provider: &'a CHP, + slot_claim: &'a SlotClaim, + collator_sender: &'a sc_utils::mpsc::TracingUnboundedSender>, + collator: &'a mut Collator, allowed_pov_size: usize, core_info: CoreInfo, core_index: CoreIndex, block_time: Duration, blocks_per_core: u32, - slot_time_for_core: Duration, + /// Time allocated for the core. + time_for_core: Duration, is_last_core_in_parachain_slot: bool, collator_peer_id: PeerId, relay_parent_data: RelayParentData, + total_number_of_blocks: u32, +} + +/// Build a collation for one core. +/// +/// One collation can be composed of multiple blocks. +async fn build_collation_for_core( + BuildCollationParams { + pov_parent_header, + pov_parent_hash, + relay_parent_header, + relay_parent_hash, + max_pov_size, + para_id, + relay_client, + code_hash_provider, + slot_claim, + collator_sender, + collator, + allowed_pov_size, + core_info, + core_index, + block_time, + blocks_per_core, + time_for_core: slot_time_for_core, + is_last_core_in_parachain_slot, + collator_peer_id, + relay_parent_data, + total_number_of_blocks, + }: BuildCollationParams<'_, Block, P, RelayClient, BI, CIDP, Proposer, CS, CHP>, ) -> Result, ()> where RelayClient: RelayChainInterface + 'static, @@ -463,6 +494,7 @@ where BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, Proposer: Environment + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, + CHP: consensus_common::ValidationCodeHashProvider + Send + Sync + 'static, { let core_start = Instant::now(); @@ -498,12 +530,15 @@ where // We require that the next node has imported our last block before it can start building // the next block. To ensure that the next node is able to do so, we are skipping the last // block in the parachain slot. In the future this can be removed again. - let is_last = block_index + 1 == blocks_per_core || - (block_index + 2 == blocks_per_core && - blocks_per_core > 1 && - is_last_core_in_parachain_slot); + let is_last_block_in_core = block_index + 1 == blocks_per_core || + // This branch here is for the case when we are going to skip the last block. + (block_index + 2 == blocks_per_core && blocks_per_core > 1); + + // If we have more than 3 blocks in total, aka a block time which is less than 2s, we are + // going to skip the last block. Otherwise, when running with 3 blocks, we are just + // adjusting the authoring duration below. if block_index + 1 == blocks_per_core && - blocks_per_core > 1 && + total_number_of_blocks > 3 && is_last_core_in_parachain_slot { tracing::debug!( @@ -513,35 +548,13 @@ where break; } - let block_start = Instant::now(); - let slot_time_for_block = slot_time_for_core.saturating_sub(core_start.elapsed()) / - (blocks_per_core - block_index) as u32; - - if slot_time_for_block <= Duration::from_millis(20) { - tracing::error!( - target: LOG_TARGET, - slot_time_for_block_ms = %slot_time_for_block.as_millis(), - blocks_left = %(blocks_per_core - block_index), - ?core_index, - "Less than 20ms slot time left to produce blocks, stopping block production for core", - ); - - break - } - tracing::trace!( target: LOG_TARGET, - slot_time_for_block_ms = %slot_time_for_block.as_millis(), %block_index, core_index = %core_index.0, - "Going to build block" + "Preparing to build block" ); - // The authoring duration is either the block time returned by the runtime or the 90% of the - // rest of the slot time for the block. We take here 90% because we still need to create the - // inherents and need to import the block afterward. - let authoring_duration = block_time.min(slot_time_for_block); - let (parachain_inherent_data, other_inherent_data) = match collator .create_inherent_data_with_rp_offset( relay_parent_hash, @@ -568,6 +581,34 @@ where let mut extra_extensions = Extensions::default(); extra_extensions.register(ProofSizeExt::new(proof_size_recorder.clone())); + let block_production_start = Instant::now(); + // The time we have left to spent for the block. + let time_left_for_block = slot_time_for_core.saturating_sub(core_start.elapsed()) / + (blocks_per_core - block_index) as u32; + + // For the special case of 3 blocks on 3 cores or 2 blocks on 2 cores, we are going to + // adjust the authoring duration on the last block. + // + //TODO: Remove when transaction streaming is implemented + let adjusted_time_left = if is_last_block_in_core && + blocks_per_core == 1 && + total_number_of_blocks <= 3 && + total_number_of_blocks >= 2 + { + time_left_for_block / 2 + } else { + time_left_for_block + }; + + // The time we will use to build the actual block. + let authoring_duration = block_time.min(adjusted_time_left); + + tracing::trace!( + target: LOG_TARGET, + ?authoring_duration, + "Building block" + ); + let Ok(Some((built_block, mut import_block))) = collator .build_block(BuildBlockAndImportParams { parent_header: &parent_header, @@ -576,7 +617,7 @@ where CumulusDigestItem::CoreInfo(core_info.clone()).to_digest_item(), CumulusDigestItem::BundleInfo(BundleInfo { index: block_index as u8, - maybe_last: is_last, + maybe_last: is_last_block_in_core, }) .to_digest_item(), ], @@ -633,7 +674,7 @@ where tracing::trace!( target: crate::LOG_TARGET, block_hash = ?parent_hash, - time_used_by_block_in_secs = %block_start.elapsed().as_secs_f32(), + time_used_by_block_in_secs = %block_production_start.elapsed().as_secs_f32(), %full_core_digest, %runtime_upgrade_digest, "Stopping block production for core", @@ -648,11 +689,11 @@ where // If there is still time left for the block in the slot, we sleep the rest of the time. // This ensures that we have some steady block rate. - if let Some(sleep) = slot_time_for_block - .checked_sub(block_start.elapsed()) + if let Some(sleep) = time_left_for_block + .checked_sub(block_production_start.elapsed()) // Let's not sleep for the last block here, to send out the collation as early as // possible. - .filter(|_| block_index + 1 < blocks_per_core) + .filter(|_| !is_last_block_in_core) { tokio::time::sleep(sleep).await; } @@ -660,6 +701,14 @@ where let proof = StorageProof::merge(proofs); + tracing::trace!( + target: LOG_TARGET, + ?core_index, + relay_parent = ?relay_parent_hash, + blocks = ?blocks.iter().map(|b| b.hash()).collect::>(), + "Sending out PoV" + ); + if let Err(err) = collator_sender.unbounded_send(CollatorMessage { relay_parent: relay_parent_hash, parent_header: pov_parent_header.clone(), diff --git a/substrate/client/consensus/slots/src/lib.rs b/substrate/client/consensus/slots/src/lib.rs index a4e55d1460c62..bb51723fc76fe 100644 --- a/substrate/client/consensus/slots/src/lib.rs +++ b/substrate/client/consensus/slots/src/lib.rs @@ -42,9 +42,7 @@ use sp_consensus::{Proposal, ProposeArgs, Proposer, SelectChain, SyncOracle}; use sp_consensus_slots::{Slot, SlotDuration}; use sp_inherents::CreateInherentDataProviders; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT}; -use sp_state_machine::StorageProof; use std::{ - fmt::Debug, ops::Deref, time::{Duration, Instant}, }; From 63817e5fc70e0af673a4ae7d5056bc184d9b0543 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 26 Nov 2025 21:45:16 +0100 Subject: [PATCH 195/312] More fixes and improvements --- Cargo.lock | 3 +++ cumulus/test/runtime/src/lib.rs | 24 +++++++++++-------- cumulus/test/runtime/src/test_pallet.rs | 10 +++++--- .../zombienet-sdk-helpers/src/lib.rs | 16 ++++++++++++- cumulus/zombienet/zombienet-sdk/Cargo.toml | 3 +++ .../full_core_usage_scenarios.rs | 11 ++++----- .../block_bundling/runtime_upgrade.rs | 15 +++++++++--- .../block_bundling/three_cores_glutton.rs | 7 +----- .../zombie_ci/block_bundling/tracing_block.rs | 24 ++----------------- .../basic-authorship/src/basic_authorship.rs | 1 - substrate/primitives/block-builder/src/lib.rs | 2 -- .../primitives/consensus/common/src/lib.rs | 1 - 12 files changed, 61 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5d4d965ad5712..a6b6b030ee91d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5332,6 +5332,8 @@ dependencies = [ "parity-scale-codec", "polkadot-primitives", "rstest", + "sc-executor", + "sc-executor-common", "sc-statement-store", "serde", "serde_json", @@ -5339,6 +5341,7 @@ dependencies = [ "sp-keyring", "sp-rpc", "sp-statement-store", + "sp-version", "tokio", "zombienet-configuration", "zombienet-orchestrator", diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index a7919979d5728..76a898a38726e 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -152,9 +152,15 @@ const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; #[cfg(all(feature = "sync-backing", not(feature = "async-backing")))] const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; -// The `+2` shouldn't be needed, https://github.com/paritytech/polkadot-sdk/issues/5260 +/// We need `VELOCITY * 3`, because the block flow is the following: +/// +/// - Collator produces the block(s) on relay chain block `X` +/// - In the mean time the relay chain is building block `X + 1` +/// - The collator sends the collation to the relay chain and it gets backed on chain in relay block +/// `X + 2` +/// - The collation then gets included on chain in relay block `X + 3` #[cfg(all(not(feature = "sync-backing"), not(feature = "async-backing")))] -const UNINCLUDED_SEGMENT_CAPACITY: u32 = BLOCK_PROCESSING_VELOCITY * (2 + RELAY_PARENT_OFFSET) + 2; +const UNINCLUDED_SEGMENT_CAPACITY: u32 = BLOCK_PROCESSING_VELOCITY * 3; #[cfg(any(feature = "sync-backing", feature = "elastic-scaling-12s-slot"))] pub const SLOT_DURATION: u64 = 12000; @@ -225,11 +231,9 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// by Operational extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -/// Target number of blocks per relay chain slot. -const TARGET_BLOCKS: u32 = 12; type MaximumBlockWeight = cumulus_pallet_parachain_system::block_weight::MaxParachainBlockWeight< Runtime, - ConstU32, + ConstU32, >; parameter_types! { @@ -283,7 +287,7 @@ impl frame_system::Config for Runtime { type MaxConsumers = frame_support::traits::ConstU32<16>; type PreInherents = cumulus_pallet_parachain_system::block_weight::DynamicMaxBlockWeightHooks< Runtime, - ConstU32, + ConstU32, >; type SingleBlockMigrations = SingleBlockMigrations; } @@ -377,8 +381,8 @@ const RELAY_PARENT_OFFSET: u32 = 0; type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< Runtime, RELAY_CHAIN_SLOT_DURATION_MILLIS, - 24, - 36, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, >; impl cumulus_pallet_parachain_system::Config for Runtime { type WeightInfo = (); @@ -474,7 +478,7 @@ pub type TxExtension = cumulus_pallet_parachain_system::block_weight::DynamicMax test_pallet::TestTransactionExtension, ), >, - ConstU32, + ConstU32, >; /// Unchecked extrinsic type as expected by this runtime. @@ -655,7 +659,7 @@ impl_runtime_apis! { impl cumulus_primitives_core::TargetBlockRate for Runtime { fn target_block_rate() -> u32 { - TARGET_BLOCKS + BLOCK_PROCESSING_VELOCITY } } } diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index 6a7cdb532ef91..595d96b5b759b 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -308,9 +308,13 @@ pub mod pallet { CumulusDigestItem::find_bundle_info(&digest) // Default being `true` to support `validate_transaction` - .map_or(true, |bi| bi.index == 0) || - // If it doesn't need to be the first block in the core, we can just always accept the transaction. - !must_be_first_block_in_core + .map_or(true, |bi| { + // Either we want that the transaction goes into the first block + // of a core + bi.index == 0 && *must_be_first_block_in_core || + // Or it goes to any block that isn't the first block + bi.index > 0 && !*must_be_first_block_in_core + }) } { Ok(( ValidTransaction { diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index eb4089b7120e4..d3291cacdc526 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -825,11 +825,25 @@ pub async fn ensure_is_only_block_in_core( } /// Checks if the specified block is the last block in a core. +/// +/// Also ensures that the last block is NOT the first block. pub async fn ensure_is_last_block_in_core( para_client: &OnlineClient, block_to_check: H256, ) -> Result<(), anyhow::Error> { - ensure_is_block_in_core_impl(para_client, block_to_check, false).await + ensure_is_block_in_core_impl(para_client, block_to_check, false).await?; + + let blocks = para_client.blocks(); + let block = blocks.at(block_to_check).await?; + let bundle_info = find_bundle_info(&block)?; + + // Above we ensure it is the last block in the core and now we want to ensure it isn't the first + // block. + if bundle_info.index == 0 { + Err(anyhow!("`{block_to_check:?}` is the first block of a core and not the last")) + } else { + Ok(()) + } } pub async fn runtime_upgrade( diff --git a/cumulus/zombienet/zombienet-sdk/Cargo.toml b/cumulus/zombienet/zombienet-sdk/Cargo.toml index 4f0a8c6484ec0..7ba255c9fc123 100644 --- a/cumulus/zombienet/zombienet-sdk/Cargo.toml +++ b/cumulus/zombienet/zombienet-sdk/Cargo.toml @@ -25,6 +25,9 @@ sp-statement-store = { workspace = true, default-features = true } sc-statement-store = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } frame-support = { workspace = true } cumulus-primitives-core = { workspace = true } cumulus-test-runtime = { workspace = true } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs index 33313b89bedb3..cc611f4eb5a9c 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs @@ -18,18 +18,13 @@ use anyhow::anyhow; use cumulus_primitives_core::relay_chain::MAX_POV_SIZE; use cumulus_zombienet_sdk_helpers::{ - assert_finality_lag, assert_para_throughput, create_assign_core_call, - ensure_is_last_block_in_core, ensure_is_only_block_in_core, find_core_info, + create_assign_core_call, ensure_is_last_block_in_core, ensure_is_only_block_in_core, submit_extrinsic_and_wait_for_finalization_success, BlockToCheck, }; use frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND; -use polkadot_primitives::Id as ParaId; use serde_json::json; -use std::sync::Arc; use zombienet_sdk::{ - subxt::{ - ext::scale_value::value, tx::DynamicPayload, utils::H256, OnlineClient, PolkadotConfig, - }, + subxt::{ext::scale_value::value, tx::DynamicPayload, OnlineClient, PolkadotConfig}, subxt_signer::sr25519::dev, NetworkConfig, NetworkConfigBuilder, }; @@ -141,6 +136,8 @@ async fn block_bundling_full_core_usage_scenarios() -> Result<(), anyhow::Error> let use_more_weight_than_announced = create_use_more_weight_than_announced_call(false); + // Here we are testing that a transaction that uses more weight than registered makes the block + // production stop for this core. Even as the block is not the first block in the core. log::info!( "Testing scenario 6: Sending a transaction which uses more weight than what \ it registered and transactions appears in the last block of a core" diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs index a9de832066bf6..b6a034bf81a49 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs @@ -17,7 +17,7 @@ use anyhow::anyhow; use cumulus_primitives_core::relay_chain::MAX_POV_SIZE; -use cumulus_test_runtime::wasm_spec_version_incremented::WASM_BINARY_BLOATY as WASM_RUNTIME_UPGRADE; +use cumulus_test_runtime::block_bundling::WASM_BINARY_BLOATY as WASM_RUNTIME_UPGRADE; use cumulus_zombienet_sdk_helpers::{ assign_cores, ensure_is_only_block_in_core, submit_extrinsic_and_wait_for_finalization_success, submit_unsigned_extrinsic_and_wait_for_finalization_success, wait_for_runtime_upgrade, @@ -67,6 +67,15 @@ async fn block_bundling_runtime_upgrade() -> Result<(), anyhow::Error> { )); } + // Let's create our own fake runtime upgrade where we just bump the `spec_version`. + // On chain nothing will change, as we only change the runtime version stored inside the wasm + // file. + let blob = sc_executor_common::runtime_blob::RuntimeBlob::uncompress_if_needed(&runtime_wasm)?; + let mut version = sc_executor::read_embedded_version(&blob)? + .ok_or_else(|| anyhow!("No runtime version found?"))?; + version.spec_version += 1; + let runtime_wasm = sp_version::embed::embed_runtime_version(&runtime_wasm, version)?; + log::info!("Runtime size validation passed: {} bytes", runtime_wasm.len()); let config = build_network_config().await?; @@ -86,7 +95,7 @@ async fn block_bundling_runtime_upgrade() -> Result<(), anyhow::Error> { log::info!("3 cores total assigned to the parachain"); // Step 1: Authorize the runtime upgrade - let code_hash = blake2_256(runtime_wasm); + let code_hash = blake2_256(&runtime_wasm); let authorize_call = create_authorize_upgrade_call(code_hash.into()); let sudo_authorize_call = create_sudo_call(authorize_call); @@ -96,7 +105,7 @@ async fn block_bundling_runtime_upgrade() -> Result<(), anyhow::Error> { log::info!("Authorize upgrade transaction finalized"); // Step 2: Apply the authorized upgrade with the actual runtime code - let apply_call = create_apply_authorized_upgrade_call(runtime_wasm.to_vec()); + let apply_call = create_apply_authorized_upgrade_call(runtime_wasm.clone()); log::info!( "Sending apply_authorized_upgrade transaction with runtime size: {} bytes", diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs index 8cbcc65d58966..294695d405bf9 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs @@ -21,11 +21,7 @@ use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_para_throughput, use polkadot_primitives::Id as ParaId; use serde_json::json; use zombienet_sdk::{ - subxt::{ - backend::{legacy::LegacyRpcMethods, rpc::RpcClient}, - OnlineClient, PolkadotConfig, - }, - subxt_signer::sr25519::dev, + subxt::{OnlineClient, PolkadotConfig}, NetworkConfig, NetworkConfigBuilder, }; @@ -51,7 +47,6 @@ async fn block_bundling_three_cores_glutton() -> Result<(), anyhow::Error> { let para_client = para_node.wait_client().await?; let relay_client: OnlineClient = relay_node.wait_client().await?; - let alice = dev::alice(); // Assign cores 0 and 1 to start with 3 cores total (core 2 is assigned by Zombienet) assign_cores(&relay_node, PARA_ID, vec![0, 1]).await?; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs index e59b2b88ae93c..b04ebff0e6cc4 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs @@ -18,16 +18,10 @@ use crate::utils::initialize_network; use anyhow::anyhow; use cumulus_zombienet_sdk_helpers::submit_extrinsic_and_wait_for_finalization_success; -use futures::stream::StreamExt; use serde_json::json; -use sp_rpc::tracing::{BlockTrace, TraceBlockResponse}; +use sp_rpc::tracing::TraceBlockResponse; use zombienet_sdk::{ - subxt::{ - backend::rpc::RpcClient, - dynamic::Value, - ext::{scale_value::value, subxt_rpcs::rpc_params}, - OnlineClient, PolkadotConfig, - }, + subxt::{dynamic::Value, ext::subxt_rpcs::rpc_params, OnlineClient, PolkadotConfig}, subxt_signer::sr25519::dev, NetworkConfig, NetworkConfigBuilder, }; @@ -49,20 +43,6 @@ async fn block_bundling_tracing_block() -> Result<(), anyhow::Error> { let para_node = network.get_node("collator-0")?; let para_client: OnlineClient = para_node.wait_client().await?; - // Wait for a few blocks to ensure the network is stable - log::info!("Waiting for network to stabilize"); - let mut finalized_stream = para_client.blocks().subscribe_finalized().await?; - let mut block_count = 0u32; - - while let Some(block) = finalized_stream.next().await { - let _block = block?; - block_count += 1; - if block_count >= 3 { - log::info!("Network stabilized after 3 blocks"); - break; - } - } - // Create a balance transfer transaction let alice = dev::alice(); let bob = dev::bob().public_key(); diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index d9c2fafe0e157..ce62997fdf354 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -41,7 +41,6 @@ use sp_runtime::{ traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT}, ExtrinsicInclusionMode, Percent, SaturatedConversion, }; -use sp_state_machine::StorageProof; use std::{pin::Pin, sync::Arc, time}; /// Default block size limit in bytes used by [`Proposer`]. diff --git a/substrate/primitives/block-builder/src/lib.rs b/substrate/primitives/block-builder/src/lib.rs index d5d3d809db6df..b6c0ab270450f 100644 --- a/substrate/primitives/block-builder/src/lib.rs +++ b/substrate/primitives/block-builder/src/lib.rs @@ -21,8 +21,6 @@ extern crate alloc; -use codec::{Decode, Encode}; -use core::time::Duration; use sp_inherents::{CheckInherentsResult, InherentData}; use sp_runtime::{traits::Block as BlockT, ApplyExtrinsicResult}; #[cfg(feature = "std")] diff --git a/substrate/primitives/consensus/common/src/lib.rs b/substrate/primitives/consensus/common/src/lib.rs index ceeca43b0a0ff..03a3dc9cbf04d 100644 --- a/substrate/primitives/consensus/common/src/lib.rs +++ b/substrate/primitives/consensus/common/src/lib.rs @@ -30,7 +30,6 @@ use sp_runtime::{ traits::{Block as BlockT, HashingFor}, Digest, }; -use sp_state_machine::StorageProof; pub mod block_validation; pub mod error; From f17515c2e1ffe6ff3661a2d6cd7a94ccdcf0210e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 27 Nov 2025 15:38:41 +0100 Subject: [PATCH 196/312] Fix more tests --- .../consensus/aura/src/collators/mod.rs | 2 + cumulus/test/runtime/src/lib.rs | 5 ++- cumulus/test/runtime/src/test_pallet.rs | 10 ++++- .../tests/zombie_ci/block_bundling/basic.rs | 5 +++ .../block_bundling/runtime_upgrade.rs | 4 +- .../tests/zombie_ci/runtime_upgrade.rs | 39 +++++++++---------- 6 files changed, 40 insertions(+), 25 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 3b49e386954bf..d9848891258ad 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -396,6 +396,8 @@ mod tests { /// we are ensuring on the node side that we are are always able to build on the included block. #[tokio::test] async fn test_can_build_upon() { + sp_tracing::try_init_simple(); + let (client, keystore) = set_up_components(6); let genesis_hash = client.chain_info().genesis_hash; diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 76a898a38726e..fa6e0278dc994 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -159,8 +159,11 @@ const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; /// - The collator sends the collation to the relay chain and it gets backed on chain in relay block /// `X + 2` /// - The collation then gets included on chain in relay block `X + 3` +/// - As we are building on `RELAY_PARENT_OFFSET` old relay parents, the included block from the +/// parachain is also `RELAY_PARENT_OFFSET` relay blocks older (one relay block may contains +/// multiple parachain blocks). #[cfg(all(not(feature = "sync-backing"), not(feature = "async-backing")))] -const UNINCLUDED_SEGMENT_CAPACITY: u32 = BLOCK_PROCESSING_VELOCITY * 3; +const UNINCLUDED_SEGMENT_CAPACITY: u32 = BLOCK_PROCESSING_VELOCITY * (3 + RELAY_PARENT_OFFSET); #[cfg(any(feature = "sync-backing", feature = "elastic-scaling-12s-slot"))] pub const SLOT_DURATION: u64 = 12000; diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index 595d96b5b759b..bd46bd798ae83 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -93,7 +93,9 @@ pub mod pallet { // Depositing the event is important, because then we write the actual proof size // into the state. If some node returns a different proof size on import of this // block, we will detect it this way as the storage root will be different. - Self::deposit_event(Event::MovedBigValue { proof_size: get_proof_size().unwrap() }) + Self::deposit_event(Event::MovedBigValue { + proof_size: get_proof_size().unwrap_or_default(), + }) } Weight::zero() @@ -244,6 +246,8 @@ pub mod pallet { pub struct GenesisConfig { #[serde(skip)] pub _config: core::marker::PhantomData, + /// Controls if the `BigValueMove` logic is enabled. + pub enable_big_value_move: bool, } #[pallet::genesis_build] @@ -251,7 +255,9 @@ pub mod pallet { fn build(&self) { sp_io::storage::set(TEST_RUNTIME_UPGRADE_KEY, &[1, 2, 3, 4]); - BigValueMove::::insert(BlockNumberFor::::from(0u32), vec![0u8; 4 * 1024]); + if self.enable_big_value_move { + BigValueMove::::insert(BlockNumberFor::::from(0u32), vec![0u8; 4 * 1024]); + } } } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs index 7b9829c5203b6..7968d86cfb7ae 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs @@ -171,6 +171,11 @@ async fn build_network_config() -> Result { ("slot-based").into(), ("-lparachain=trace,aura=trace").into(), ]) + .with_genesis_overrides(json!({ + "testPallet": { + "enableBigValueMove": true + } + })) .with_collator(|n| n.with_name("collator-0")) .with_collator(|n| n.with_name("collator-1")) .with_collator(|n| n.with_name("collator-2")) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs index b6a034bf81a49..85f7177eb0931 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs @@ -17,7 +17,7 @@ use anyhow::anyhow; use cumulus_primitives_core::relay_chain::MAX_POV_SIZE; -use cumulus_test_runtime::block_bundling::WASM_BINARY_BLOATY as WASM_RUNTIME_UPGRADE; +use cumulus_test_runtime::block_bundling::WASM_BINARY_BLOATY as WASM_RUNTIME_BINARY; use cumulus_zombienet_sdk_helpers::{ assign_cores, ensure_is_only_block_in_core, submit_extrinsic_and_wait_for_finalization_success, submit_unsigned_extrinsic_and_wait_for_finalization_success, wait_for_runtime_upgrade, @@ -57,7 +57,7 @@ async fn block_bundling_runtime_upgrade() -> Result<(), anyhow::Error> { // Validate runtime size requirement let runtime_wasm = - WASM_RUNTIME_UPGRADE.ok_or_else(|| anyhow!("WASM runtime upgrade binary not available"))?; + WASM_RUNTIME_BINARY.ok_or_else(|| anyhow!("WASM runtime upgrade binary not available"))?; if runtime_wasm.len() <= MIN_RUNTIME_SIZE_BYTES { return Err(anyhow!( diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs index 1876396d10120..1fa3b6f127f0a 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs @@ -6,18 +6,17 @@ use std::time::Duration; use crate::utils::initialize_network; -use cumulus_zombienet_sdk_helpers::{assert_para_throughput, wait_for_runtime_upgrade}; -use polkadot_primitives::Id as ParaId; -use zombienet_configuration::types::AssetLocation; +use cumulus_test_runtime::wasm_spec_version_incremented::WASM_BINARY_BLOATY as WASM_RUNTIME_UPGRADE; +use cumulus_zombienet_sdk_helpers::{ + submit_extrinsic_and_wait_for_finalization_success, wait_for_runtime_upgrade, +}; use zombienet_sdk::{ - subxt::{OnlineClient, PolkadotConfig}, - tx_helper::{ChainUpgrade, RuntimeUpgradeOptions}, + subxt::{ext::scale_value::value, tx::DynamicPayload, OnlineClient, PolkadotConfig}, + subxt_signer::sr25519::dev, NetworkConfig, NetworkConfigBuilder, }; const PARA_ID: u32 = 2000; -const WASM_WITH_SPEC_VERSION_INCREMENTED: &str = - "/tmp/wasm_binary_spec_version_incremented.rs.compact.compressed.wasm"; // This tests makes sure that it is possible to upgrade parachain's runtime // and parachain produces blocks after such upgrade. @@ -31,12 +30,6 @@ async fn runtime_upgrade() -> Result<(), anyhow::Error> { let config = build_network_config().await?; let network = initialize_network(config).await?; - let alice = network.get_node("alice")?; - let alice_client: OnlineClient = alice.wait_client().await?; - - log::info!("Ensuring parachain making progress"); - assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 2..40)], []).await?; - let timeout_secs: u64 = 250; let charlie = network.get_node("charlie")?; let charlie_client: OnlineClient = charlie.wait_client().await?; @@ -46,13 +39,9 @@ async fn runtime_upgrade() -> Result<(), anyhow::Error> { log::info!("Current runtime spec version {current_spec_version}"); log::info!("Performing runtime upgrade"); - network - .parachain(PARA_ID) - .unwrap() - .perform_runtime_upgrade( - charlie, - RuntimeUpgradeOptions::new(AssetLocation::from(WASM_WITH_SPEC_VERSION_INCREMENTED)), - ) + + let call = create_runtime_upgrade_call(); + submit_extrinsic_and_wait_for_finalization_success(&charlie_client, &call, &dev::alice()) .await?; let dave = network.get_node("dave")?; @@ -74,6 +63,16 @@ async fn runtime_upgrade() -> Result<(), anyhow::Error> { Ok(()) } +fn create_runtime_upgrade_call() -> DynamicPayload { + let runtime_upgrade_call = zombienet_sdk::subxt::tx::dynamic( + "System", + "set_code_without_checks", + vec![value!(WASM_RUNTIME_UPGRADE.expect("Wasm runtime not build").to_vec())], + ); + + zombienet_sdk::subxt::tx::dynamic("Sudo", "sudo", vec![runtime_upgrade_call.into_value()]) +} + async fn build_network_config() -> Result { // images are not relevant for `native`, but we leave it here in case we use `k8s` some day let images = zombienet_sdk::environment::get_images_from_env(); From 551787b807df06385efecc30f7b033d4140ad064 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 27 Nov 2025 22:04:32 +0100 Subject: [PATCH 197/312] Reset changes --- .../utils/wasm-builder/src/wasm_project.rs | 100 ++---------------- 1 file changed, 7 insertions(+), 93 deletions(-) diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index 9201ef130d4b0..e1bea1f04cf13 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -27,7 +27,7 @@ use std::{ borrow::ToOwned, collections::HashSet, env, fs, - hash::{DefaultHasher, Hash, Hasher}, + hash::{Hash, Hasher}, ops::Deref, path::{Path, PathBuf}, process, @@ -138,7 +138,7 @@ pub(crate) fn create_and_compile( let crate_metadata = crate_metadata(orig_project_cargo_toml); - let (project, enabled_features) = create_project( + let project = create_project( target, orig_project_cargo_toml, &runtime_workspace, @@ -196,17 +196,9 @@ pub(crate) fn create_and_compile( ) }; - let base_blob_name = + let blob_name = blob_out_name_override.unwrap_or_else(|| get_blob_name(target, &wasm_project_cargo_toml)); - // Generate feature hash for file naming - let features_hash = generate_features_hash(&enabled_features); - let blob_name = format!("{}-{}", features_hash, base_blob_name); - - // Check if we will have multiple outputs after creating this file - let should_cleanup_legacy = - will_have_multiple_outputs_after_adding(&project, &base_blob_name, target, &features_hash); - let (final_blob_binary, bloaty_blob_binary) = match target { RuntimeTarget::Wasm => { let out_path = project.join(format!("{blob_name}.wasm")); @@ -219,8 +211,6 @@ pub(crate) fn create_and_compile( &blob_name, check_for_runtime_version_section, &build_config, - should_cleanup_legacy, - &base_blob_name, ) }, RuntimeTarget::Riscv => { @@ -252,8 +242,6 @@ fn maybe_compact_and_compress_wasm( blob_name: &str, check_for_runtime_version_section: bool, build_config: &BuildConfiguration, - should_cleanup_legacy: bool, - base_blob_name: &str, ) -> (Option, WasmBinaryBloaty) { // Try to compact and compress the bloaty blob, if the *outer* profile wants it. // @@ -287,20 +275,6 @@ fn maybe_compact_and_compress_wasm( .as_ref() .map(|binary| copy_blob_to_target_directory(wasm_project_cargo_toml, binary)); - let legacy_path = project.join(format!("{}.compact.compressed.wasm", base_blob_name)); - - if should_cleanup_legacy { - // Remove legacy file since we will have multiple outputs - let _ = fs::remove_file(&legacy_path); - } else { - // Only one output file will exist, create/maintain the legacy filename too - if let Some(final_binary) = &final_blob_binary { - if final_binary.wasm_binary_path() != legacy_path { - let _ = fs::copy(final_binary.wasm_binary_path(), &legacy_path); - } - } - } - (final_blob_binary, bloaty_blob_binary) } @@ -672,71 +646,11 @@ fn has_runtime_wasm_feature_declared( package.features.keys().any(|k| k == "runtime-wasm") } -/// Generate a short hash from enabled features -fn generate_features_hash(enabled_features: &HashSet) -> String { - let mut hasher = DefaultHasher::new(); - let mut sorted_features: Vec<_> = enabled_features.iter().collect(); - sorted_features.sort(); - - for feature in sorted_features { - feature.hash(&mut hasher); - } - - // Use only the first 8 characters of the hex hash for brevity - format!("{:x}", hasher.finish())[..8].to_string() -} - -/// Check if adding a new file with the given hash will result in multiple different hashes -fn will_have_multiple_outputs_after_adding( - project: &Path, - base_blob_name: &str, - target: RuntimeTarget, - new_hash: &str, -) -> bool { - let extension = match target { - RuntimeTarget::Wasm => ".compact.compressed.wasm", - RuntimeTarget::Riscv => ".polkavm", - }; - - // Look for existing files that match the pattern: {hash}-{base_blob_name}{extension} - // Exclude the legacy file: {base_blob_name}{extension} - let legacy_file = format!("{}{}", base_blob_name, extension); - let pattern_suffix = format!("-{}{}", base_blob_name, extension); - - if let Ok(entries) = fs::read_dir(project) { - let mut unique_hashes = HashSet::new(); - unique_hashes.insert(new_hash.to_string()); // Add the hash we're about to create - - for entry in entries.filter_map(|e| e.ok()) { - if let Some(file_name) = entry.file_name().to_str() { - // Skip the legacy file without hash - if file_name == legacy_file { - continue; - } - - // Check if this matches our hash pattern - if file_name.ends_with(&pattern_suffix) { - let hash_len = file_name.len() - pattern_suffix.len(); - if hash_len == 8 { - // We use 8-character hashes - let hash = &file_name[..8]; - unique_hashes.insert(hash.to_string()); - } - } - } - } - - unique_hashes.len() > 1 - } else { - false - } -} - /// Create the project used to build the wasm binary. /// /// # Returns /// -/// The path to the created wasm project and the set of enabled features. +/// The path to the created wasm project. fn create_project( target: RuntimeTarget, project_cargo_toml: &Path, @@ -744,7 +658,7 @@ fn create_project( crate_metadata: &Metadata, workspace_root_path: &Path, features_to_enable: Vec, -) -> (PathBuf, HashSet) { +) -> PathBuf { let crate_name = get_crate_name(project_cargo_toml); let crate_path = project_cargo_toml.parent().expect("Parent path exists; qed"); let wasm_project_folder = wasm_workspace.join(&crate_name); @@ -768,7 +682,7 @@ fn create_project( workspace_root_path, &crate_name, crate_path, - enabled_features.clone().into_iter(), + enabled_features.into_iter(), ); match target { @@ -791,7 +705,7 @@ fn create_project( crate::copy_file_if_changed(crate_lock_file, wasm_project_folder.join("Cargo.lock")); } - (wasm_project_folder, enabled_features) + wasm_project_folder } /// A rustc profile. From 92dd6a9cf48fbe4393024ce8d786b0447d2e5b47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 27 Nov 2025 21:59:24 +0100 Subject: [PATCH 198/312] wasm-builder: Only overwrite wasm files if they changed When running two different `cargo` commands, they may both compile the same wasm files. When the second `cargo` command produces the same wasm files, we are now not gonna overwrite it. This has the advantage that we can run the first command again without it trying to recompile the project. Right now it would lead to the wasm files always getting recreated, which is wasting a lot of time :) --- substrate/utils/wasm-builder/src/lib.rs | 17 +-- .../utils/wasm-builder/src/wasm_project.rs | 141 ++++++++++++------ 2 files changed, 104 insertions(+), 54 deletions(-) diff --git a/substrate/utils/wasm-builder/src/lib.rs b/substrate/utils/wasm-builder/src/lib.rs index 47c156e5907f3..9cf1444fda7ae 100644 --- a/substrate/utils/wasm-builder/src/lib.rs +++ b/substrate/utils/wasm-builder/src/lib.rs @@ -118,12 +118,7 @@ //! --toolchain nightly-2024-12-26`. use prerequisites::DummyCrate; -use std::{ - env, fs, - io::BufRead, - path::{Path, PathBuf}, - process::Command, -}; +use std::{env, fs, io::BufRead, path::Path, process::Command}; use version::Version; mod builder; @@ -188,14 +183,18 @@ fn write_file_if_changed(file: impl AsRef, content: impl AsRef) { } /// Copy `src` to `dst` if the `dst` does not exist or is different. -fn copy_file_if_changed(src: PathBuf, dst: PathBuf) { - let src_file = fs::read_to_string(&src).ok(); - let dst_file = fs::read_to_string(&dst).ok(); +fn copy_file_if_changed(src: &Path, dst: &Path) -> bool { + let src_file = fs::read(src).ok(); + let dst_file = fs::read(dst).ok(); if src_file != dst_file { fs::copy(&src, &dst).unwrap_or_else(|_| { panic!("Copying `{}` to `{}` can not fail; qed", src.display(), dst.display()) }); + + true + } else { + false } } diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index e1bea1f04cf13..c3f29fe882f60 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -17,7 +17,9 @@ #[cfg(feature = "metadata-hash")] use crate::builder::MetadataExtraInfo; -use crate::{write_file_if_changed, CargoCommandVersioned, RuntimeTarget, OFFLINE}; +use crate::{ + copy_file_if_changed, write_file_if_changed, CargoCommandVersioned, RuntimeTarget, OFFLINE, +}; use build_helper::rerun_if_changed; use cargo_metadata::{DependencyKind, Metadata, MetadataCommand}; @@ -78,6 +80,40 @@ impl WasmBinary { } } +/// Helper struct for managing blob file paths. +struct BlobPaths { + /// The base name of the blob (without extension). + blob_name: String, + /// The project directory where blobs are stored. + project: PathBuf, +} + +impl BlobPaths { + fn new(blob_name: String, project: PathBuf) -> Self { + Self { blob_name, project } + } + + /// Returns the path to the bloaty wasm file. + fn bloaty(&self) -> PathBuf { + self.project.join(format!("{}.wasm", self.blob_name)) + } + + /// Returns the path to the compact wasm file. + fn compact(&self) -> PathBuf { + self.project.join(format!("{}.compact.wasm", self.blob_name)) + } + + /// Returns the path to the compact compressed wasm file. + fn compact_compressed(&self) -> PathBuf { + self.project.join(format!("{}.compact.compressed.wasm", self.blob_name)) + } + + /// Returns the blob name. + fn name(&self) -> &str { + &self.blob_name + } +} + fn crate_metadata(cargo_manifest: &Path) -> Metadata { let mut cargo_lock = cargo_manifest.to_path_buf(); cargo_lock.set_file_name("Cargo.lock"); @@ -198,25 +234,27 @@ pub(crate) fn create_and_compile( let blob_name = blob_out_name_override.unwrap_or_else(|| get_blob_name(target, &wasm_project_cargo_toml)); + let blob_paths = BlobPaths::new(blob_name, project.clone()); - let (final_blob_binary, bloaty_blob_binary) = match target { + let (final_blob_binary, bloaty_blob_binary, any_changed) = match target { RuntimeTarget::Wasm => { - let out_path = project.join(format!("{blob_name}.wasm")); - fs::copy(raw_blob_path, &out_path).expect("copying the runtime blob should never fail"); + let out_path = blob_paths.bloaty(); + let bloaty_changed = copy_file_if_changed(&raw_blob_path, &out_path); - maybe_compact_and_compress_wasm( + let (final_binary, bloaty_binary, did_compact) = maybe_compact_and_compress_wasm( &wasm_project_cargo_toml, - &project, WasmBinaryBloaty(out_path), - &blob_name, + &blob_paths, check_for_runtime_version_section, &build_config, - ) + bloaty_changed, + ); + (final_binary, bloaty_binary, bloaty_changed || did_compact) }, RuntimeTarget::Riscv => { - let out_path = project.join(format!("{blob_name}.polkavm")); - fs::copy(raw_blob_path, &out_path).expect("copying the runtime blob should never fail"); - (None, WasmBinaryBloaty(out_path)) + let out_path = project.join(format!("{}.polkavm", blob_paths.name())); + let changed = copy_file_if_changed(&raw_blob_path, &out_path); + (None, WasmBinaryBloaty(out_path), changed) }, }; @@ -228,8 +266,10 @@ pub(crate) fn create_and_compile( &bloaty_blob_binary, ); - if let Err(err) = adjust_mtime(&bloaty_blob_binary, final_blob_binary.as_ref()) { - build_helper::warning!("Error while adjusting the mtime of the blob binaries: {}", err) + if any_changed { + if let Err(err) = adjust_mtime(&bloaty_blob_binary, final_blob_binary.as_ref()) { + build_helper::warning!("Error while adjusting the mtime of the blob binaries: {}", err) + } } (final_blob_binary, bloaty_blob_binary) @@ -237,33 +277,50 @@ pub(crate) fn create_and_compile( fn maybe_compact_and_compress_wasm( wasm_project_cargo_toml: &Path, - project: &Path, bloaty_blob_binary: WasmBinaryBloaty, - blob_name: &str, + blob_paths: &BlobPaths, check_for_runtime_version_section: bool, build_config: &BuildConfiguration, -) -> (Option, WasmBinaryBloaty) { + bloaty_changed: bool, +) -> (Option, WasmBinaryBloaty, bool) { + let needs_compact = build_config.outer_build_profile.wants_compact(); + let compact_path = blob_paths.compact(); + let compressed_path = blob_paths.compact_compressed(); + let compact_or_compressed_exists = compact_path.exists() || compressed_path.exists(); + let should_regenerate = bloaty_changed || (needs_compact && !compact_or_compressed_exists); + + if !should_regenerate { + let final_blob = if compressed_path.exists() { + Some(WasmBinary(compressed_path)) + } else if compact_path.exists() { + Some(WasmBinary(compressed_path)) + } else { + None + }; + + return (final_blob, bloaty_blob_binary, false); + } + // Try to compact and compress the bloaty blob, if the *outer* profile wants it. // // This is because, by default the inner profile will be set to `Release` even when the outer // profile is `Debug`, because the blob built in `Debug` profile is too slow for normal // development activities. - let (compact_blob_path, compact_compressed_blob_path) = - if build_config.outer_build_profile.wants_compact() { - let compact_blob_path = compact_wasm(&project, blob_name, &bloaty_blob_binary); - let compact_compressed_blob_path = - compact_blob_path.as_ref().and_then(|p| try_compress_blob(&p.0, blob_name)); - (compact_blob_path, compact_compressed_blob_path) - } else { - // We at least want to lower the `sign-ext` code to `mvp`. - wasm_opt::OptimizationOptions::new_opt_level_0() - .add_pass(wasm_opt::Pass::SignextLowering) - .debug_info(true) - .run(bloaty_blob_binary.bloaty_path(), bloaty_blob_binary.bloaty_path()) - .expect("Failed to lower sign-ext in WASM binary."); - - (None, None) - }; + let (compact_blob_path, compact_compressed_blob_path) = if needs_compact { + let compact_blob_path = compact_wasm(blob_paths, &bloaty_blob_binary); + let compact_compressed_blob_path = + compact_blob_path.as_ref().and_then(|p| try_compress_blob(blob_paths, p)); + (compact_blob_path, compact_compressed_blob_path) + } else { + // We at least want to lower the `sign-ext` code to `mvp`. + wasm_opt::OptimizationOptions::new_opt_level_0() + .add_pass(wasm_opt::Pass::SignextLowering) + .debug_info(true) + .run(bloaty_blob_binary.bloaty_path(), bloaty_blob_binary.bloaty_path()) + .expect("Failed to lower sign-ext in WASM binary."); + + (None, None) + }; if check_for_runtime_version_section { ensure_runtime_version_wasm_section_exists(bloaty_blob_binary.bloaty_path()); @@ -275,7 +332,7 @@ fn maybe_compact_and_compress_wasm( .as_ref() .map(|binary| copy_blob_to_target_directory(wasm_project_cargo_toml, binary)); - (final_blob_binary, bloaty_blob_binary) + (final_blob_binary, bloaty_blob_binary, true) } /// Ensures that the `runtime_version` section exists in the given blob. @@ -702,7 +759,7 @@ fn create_project( if let Some(crate_lock_file) = find_cargo_lock(project_cargo_toml) { // Use the `Cargo.lock` of the main project. - crate::copy_file_if_changed(crate_lock_file, wasm_project_folder.join("Cargo.lock")); + copy_file_if_changed(&crate_lock_file, &wasm_project_folder.join("Cargo.lock")); } wasm_project_folder @@ -1025,12 +1082,8 @@ fn build_bloaty_blob( } } -fn compact_wasm( - project: &Path, - blob_name: &str, - bloaty_binary: &WasmBinaryBloaty, -) -> Option { - let wasm_compact_path = project.join(format!("{blob_name}.compact.wasm")); +fn compact_wasm(blob_paths: &BlobPaths, bloaty_binary: &WasmBinaryBloaty) -> Option { + let wasm_compact_path = blob_paths.compact(); let start = std::time::Instant::now(); wasm_opt::OptimizationOptions::new_opt_level_0() .mvp_features_only() @@ -1049,15 +1102,13 @@ fn compact_wasm( Some(WasmBinary(wasm_compact_path)) } -fn try_compress_blob(compact_blob_path: &Path, out_name: &str) -> Option { +fn try_compress_blob(blob_paths: &BlobPaths, compact_blob: &WasmBinary) -> Option { use sp_maybe_compressed_blob::CODE_BLOB_BOMB_LIMIT; - let project = compact_blob_path.parent().expect("blob path should have a parent directory"); - let compact_compressed_blob_path = - project.join(format!("{}.compact.compressed.wasm", out_name)); + let compact_compressed_blob_path = blob_paths.compact_compressed(); let start = std::time::Instant::now(); - let data = fs::read(compact_blob_path).expect("Failed to read WASM binary"); + let data = fs::read(compact_blob.wasm_binary_path()).expect("Failed to read WASM binary"); if let Some(compressed) = sp_maybe_compressed_blob::compress_strongly(&data, CODE_BLOB_BOMB_LIMIT) { From 0f06e077edcdad2cfb2f6499a4c9f055f61b9d44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 28 Nov 2025 12:12:55 +0100 Subject: [PATCH 199/312] Add some more tests --- cumulus/test/runtime/src/lib.rs | 2 + .../tests/zombie_ci/block_bundling/mod.rs | 2 + .../zombie_ci/block_bundling/pov_recovery.rs | 186 ++++++++++++++++++ .../block_bundling/relay_parent_offset.rs | 90 +++++++++ 4 files changed, 280 insertions(+) create mode 100644 cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs create mode 100644 cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/relay_parent_offset.rs diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index fa6e0278dc994..e90b62cf8a038 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -660,6 +660,8 @@ impl_runtime_apis! { } } + // "Elastic scaling" should run with the fallback method. + #[cfg(not(feature = "elastic-scaling"))] impl cumulus_primitives_core::TargetBlockRate for Runtime { fn target_block_rate() -> u32 { BLOCK_PROCESSING_VELOCITY diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs index a16032c40f8bc..5bbdeb4d02a60 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs @@ -17,6 +17,8 @@ mod basic; mod full_core_usage_scenarios; +mod pov_recovery; +mod relay_parent_offset; mod runtime_upgrade; mod three_cores_glutton; mod tracing_block; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs new file mode 100644 index 0000000000000..cd48f70f2d57d --- /dev/null +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs @@ -0,0 +1,186 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::anyhow; +use std::{sync::Arc, time::Duration}; + +use crate::utils::initialize_network; + +use cumulus_zombienet_sdk_helpers::{assert_para_throughput, assign_cores}; +use polkadot_primitives::Id as ParaId; +use serde_json::json; +use zombienet_orchestrator::network::node::LogLineCountOptions; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + NetworkConfig, NetworkConfigBuilder, +}; + +const PARA_ID: u32 = 2100; + +/// This test checks if parachain node is importing blocks using PoV recovery even +/// after more cores have been assigned for the parachain. +#[tokio::test(flavor = "multi_thread")] +async fn block_bundling_pov_recovery() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + log::info!("Spawning network with relay chain only"); + let config = build_network_config().await?; + let network = initialize_network(config).await?; + + let alice = network.get_node("alice")?; + let collator = network.get_node("collator")?; + + let relay_client: OnlineClient = alice.wait_client().await?; + + assign_cores(alice, PARA_ID, vec![0, 1]).await?; + + log::info!("Ensuring parachain making progress"); + assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 40..65)], []).await?; + + // We want to make sure that none of the consensus hook checks fail, even if the chain makes + // progress. If below log line occurred 1 or more times then test failed. + log::info!("Ensuring none of the consensus hook checks fail at {}", collator.name()); + let result = collator + .wait_log_line_count_with_timeout( + "set_validation_data inherent needs to be present in every block", + false, + LogLineCountOptions::no_occurences_within_timeout(Duration::from_secs(10)), + ) + .await?; + + if !result.success() { + return Err(anyhow!("Consensus hook failed at {}: {:?}", collator.name(), result)) + } + + // Wait (up to 10 seconds) until pattern occurs more than 35 times + let options = LogLineCountOptions { + predicate: Arc::new(|n| n > 35), + timeout: Duration::from_secs(10), + wait_until_timeout_elapses: false, + }; + + let name = "recovery-target"; + log::info!("Ensuring blocks are imported using PoV recovery by {name}"); + let result = network + .get_node(name)? + .wait_log_line_count_with_timeout( + "Importing blocks retrieved using pov_recovery", + false, + options, + ) + .await?; + + if !result.success() { + return Err(anyhow!("Failed importing blocks using PoV recovery by {name}: {result:?}")) + } + + log::info!("Test finished successfully"); + Ok(()) +} + +async fn build_network_config() -> Result { + // images are not relevant for `native`, but we leave it here in case we use `k8s` some day + let images = zombienet_sdk::environment::get_images_from_env(); + log::info!("Using images: {images:?}"); + + // Network setup: + // - relaychain nodes: + // - alice + // - validator + // - validator[0-3] + // - validator + // - synchronize only with alice + // - parachain nodes + // - recovery-target + // - full node + // - collator-elastic + // - collator which is the only one producing blocks + NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_resources(|resources| { + // These settings are applicable only for `k8s` provider. + // Leaving them in case we switch to `k8s` some day. + resources + .with_request_cpu(1) + .with_request_memory("2G") + .with_limit_cpu(2) + .with_limit_memory("4G") + }) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "num_cores": 2, + "max_validators_per_core": 1 + }, + "approval_voting_params": { + "max_approval_coalesce_count": 5 + } + } + } + })) + // Have to set a `with_node` outside of the loop below, so that `r` has the right + // type. + .with_node(|node| node.with_name("alice").with_args(vec![])); + + (0..4).fold(r, |acc, i| { + acc.with_node(|node| { + node.with_name(&format!("validator-{i}")).with_args(vec![ + ("-lruntime=debug,parachain=trace").into(), + ("--reserved-only").into(), + ("--reserved-nodes", "{{ZOMBIE:alice:multiaddr}}").into(), + ]) + }) + }) + }) + .with_parachain(|p| { + p.with_id(PARA_ID) + .with_chain("block-bundling") + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_default_resources(|resources| { + // These settings are applicable only for `k8s` provider. + // Leaving them in case we switch to `k8s` some day. + resources + .with_request_cpu(1) + .with_request_memory("2G") + .with_limit_cpu(2) + .with_limit_memory("4G") + }) + .with_collator(|n| + n.with_name("recovery-target") + .validator(false) + .with_args(vec![ + ("-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug").into(), + ("--disable-block-announcements").into(), + ("--in-peers", "0").into(), + ("--out-peers", "0").into(), + ("--").into(), + ("--reserved-only").into(), + ("--reserved-nodes", "{{ZOMBIE:alice:multiaddr}}").into() + ])) + .with_collator(|n| n.with_name("collator") + .with_args(vec![ + ("-laura=trace,runtime=info,cumulus-consensus=trace,consensus::common=trace,parachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug").into(), + ("--disable-block-announcements").into(), + ("--force-authoring").into(), + ("--authoring", "slot-based").into() + ]) + ) + }) + .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { + Ok(val) => global_settings.with_base_dir(val), + _ => global_settings, + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + }) +} diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/relay_parent_offset.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/relay_parent_offset.rs new file mode 100644 index 0000000000000..6de491a8ea02f --- /dev/null +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/relay_parent_offset.rs @@ -0,0 +1,90 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Test that parachains that use a single slot-based collator with elastic scaling MVP and with +// elastic scaling with RFC103 can achieve full throughput of 3 candidates per block. + +use anyhow::anyhow; +use cumulus_zombienet_sdk_helpers::assert_relay_parent_offset; +use serde_json::json; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + NetworkConfigBuilder, +}; + +use cumulus_zombienet_sdk_helpers::assign_cores; + +#[tokio::test(flavor = "multi_thread")] +async fn block_bundling_relay_parent_offset() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + // Images are not relevant for `native`, but we leave it here in case we use `k8s` some day + let images = zombienet_sdk::environment::get_images_from_env(); + + let config = NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=debug").into()]) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + // Num cores is 4, because 2 extra will be added automatically when registering the paras. + "num_cores": 4, + "max_validators_per_core": 1 + } + } + } + })) + // Have to set a `with_node` outside of the loop below, so that `r` has the right + // type. + .with_node(|node| node.with_name("validator-0")); + + (1..6).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) + }) + .with_parachain(|p| { + p.with_id(2400) + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("relay-parent-offset") + .with_default_args(vec![ + "--authoring=slot-based".into(), + ("-lparachain=debug,aura=debug").into(), + ]) + .with_collator(|n| n.with_name("collator-rp-offset")) + }) + .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { + Ok(val) => global_settings.with_base_dir(val), + _ => global_settings, + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + })?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; + + let relay_node = network.get_node("validator-0")?; + let relay_client: OnlineClient = relay_node.wait_client().await?; + + let para_node_rp_offset = network.get_node("collator-rp-offset")?; + + let para_client = para_node_rp_offset.wait_client().await?; + + assign_cores(relay_node, 2400, vec![0]).await?; + + log::info!("Checking that the parachain runs with the expected relay parent offset"); + + assert_relay_parent_offset(&relay_client, ¶_client, 2, 30).await?; + + log::info!("Test finished successfully"); + + Ok(()) +} From 9a31b28ced494d32c318a78292ecca5dd147bdc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 28 Nov 2025 12:46:53 +0100 Subject: [PATCH 200/312] Improve some of the elastic scaling tests --- cumulus/test/runtime/src/lib.rs | 5 ++++ .../zombienet-sdk-helpers/src/lib.rs | 13 +++++++++ .../elastic_scaling/upgrade_to_3_cores.rs | 27 ++++++++++++------- .../tests/zombie_ci/runtime_upgrade.rs | 24 +++++------------ 4 files changed, 42 insertions(+), 27 deletions(-) diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index e90b62cf8a038..4942a5aa45084 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -46,6 +46,11 @@ pub mod elastic_scaling { include!(concat!(env!("OUT_DIR"), "/wasm_binary_elastic_scaling.rs")); } +pub mod elastic_scaling_12s_slot { + #[cfg(feature = "std")] + include!(concat!(env!("OUT_DIR"), "/wasm_binary_elastic_scaling_12s_slot.rs")); +} + pub mod block_bundling { #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary_block_bundling.rs")); diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index d3291cacdc526..a1be1b9989099 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -910,6 +910,19 @@ pub async fn assign_cores( Ok(()) } +/// Creates a runtime upgrade call using `Sudo::sudo(System::set_code_without_checks)`. +/// +/// The `wasm_binary` should be the WASM runtime binary to upgrade to. +pub fn create_runtime_upgrade_call(wasm_binary: &[u8]) -> DynamicPayload { + let runtime_upgrade_call = zombienet_sdk::subxt::tx::dynamic( + "System", + "set_code_without_checks", + vec![value!(wasm_binary.to_vec())], + ); + + zombienet_sdk::subxt::tx::dynamic("Sudo", "sudo", vec![runtime_upgrade_call.into_value()]) +} + /// Wait until a runtime upgrade has happened. /// /// This checks all finalized blocks until it finds a block that sets the diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs index 14e64d3887677..9a5f979404933 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs @@ -2,27 +2,28 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::anyhow; +use cumulus_test_runtime::{ + elastic_scaling::WASM_BINARY as WASM_WITH_ELASTIC_SCALING, + elastic_scaling_12s_slot::WASM_BINARY as WASM_WITH_ELASTIC_SCALING_12S_SLOT, +}; use serde_json::json; use std::time::Duration; use crate::utils::initialize_network; use cumulus_zombienet_sdk_helpers::{ - assert_para_throughput, assign_cores, runtime_upgrade, wait_for_runtime_upgrade, + assert_para_throughput, assign_cores, create_runtime_upgrade_call, + submit_extrinsic_and_wait_for_finalization_success, wait_for_runtime_upgrade, }; use polkadot_primitives::Id as ParaId; use rstest::rstest; use zombienet_sdk::{ subxt::{OnlineClient, PolkadotConfig}, + subxt_signer::sr25519::dev, NetworkConfig, NetworkConfigBuilder, }; const PARA_ID: u32 = 2000; -const WASM_WITH_ELASTIC_SCALING: &str = - "/tmp/wasm_binary_elastic_scaling.rs.compact.compressed.wasm"; - -const WASM_WITH_ELASTIC_SCALING_12S_SLOT: &str = - "/tmp/wasm_binary_elastic_scaling_12s_slot.rs.compact.compressed.wasm"; // This test ensures that we can upgrade the parachain's runtime to support elastic scaling // and that the parachain produces 3 blocks per slot after the upgrade. @@ -65,10 +66,16 @@ async fn elastic_scaling_upgrade_to_3_cores( collator0_client.backend().current_runtime_version().await?.spec_version; log::info!("Current runtime spec version {current_spec_version}"); - let wasm = - if async_backing { WASM_WITH_ELASTIC_SCALING } else { WASM_WITH_ELASTIC_SCALING_12S_SLOT }; + let wasm = if async_backing { + WASM_WITH_ELASTIC_SCALING.expect("WASM not built") + } else { + WASM_WITH_ELASTIC_SCALING_12S_SLOT.expect("WASM not built") + }; - runtime_upgrade(&network, collator0, PARA_ID, wasm).await?; + log::info!("Performing runtime upgrade for parachain {PARA_ID}"); + let call = create_runtime_upgrade_call(wasm); + submit_extrinsic_and_wait_for_finalization_success(&collator0_client, &call, &dev::alice()) + .await?; let collator1 = network.get_node("collator1")?; let collator1_client: OnlineClient = collator1.wait_client().await?; @@ -83,7 +90,7 @@ async fn elastic_scaling_upgrade_to_3_cores( wait_for_runtime_upgrade(&collator1_client), ) .await - .expect("Timeout waiting for runtime upgrade")?; + .map_err(|_| anyhow!("Timeout waiting for runtime upgrade"))??; let spec_version_from_collator0 = collator0_client.backend().current_runtime_version().await?.spec_version; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs index 1fa3b6f127f0a..989bfa7245da6 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs @@ -1,17 +1,16 @@ // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 -use anyhow::anyhow; -use std::time::Duration; - use crate::utils::initialize_network; - +use anyhow::anyhow; use cumulus_test_runtime::wasm_spec_version_incremented::WASM_BINARY_BLOATY as WASM_RUNTIME_UPGRADE; use cumulus_zombienet_sdk_helpers::{ - submit_extrinsic_and_wait_for_finalization_success, wait_for_runtime_upgrade, + create_runtime_upgrade_call, submit_extrinsic_and_wait_for_finalization_success, + wait_for_runtime_upgrade, }; +use std::time::Duration; use zombienet_sdk::{ - subxt::{ext::scale_value::value, tx::DynamicPayload, OnlineClient, PolkadotConfig}, + subxt::{OnlineClient, PolkadotConfig}, subxt_signer::sr25519::dev, NetworkConfig, NetworkConfigBuilder, }; @@ -40,7 +39,8 @@ async fn runtime_upgrade() -> Result<(), anyhow::Error> { log::info!("Performing runtime upgrade"); - let call = create_runtime_upgrade_call(); + let call = + create_runtime_upgrade_call(WASM_RUNTIME_UPGRADE.expect("Wasm runtime not build")); submit_extrinsic_and_wait_for_finalization_success(&charlie_client, &call, &dev::alice()) .await?; @@ -63,16 +63,6 @@ async fn runtime_upgrade() -> Result<(), anyhow::Error> { Ok(()) } -fn create_runtime_upgrade_call() -> DynamicPayload { - let runtime_upgrade_call = zombienet_sdk::subxt::tx::dynamic( - "System", - "set_code_without_checks", - vec![value!(WASM_RUNTIME_UPGRADE.expect("Wasm runtime not build").to_vec())], - ); - - zombienet_sdk::subxt::tx::dynamic("Sudo", "sudo", vec![runtime_upgrade_call.into_value()]) -} - async fn build_network_config() -> Result { // images are not relevant for `native`, but we leave it here in case we use `k8s` some day let images = zombienet_sdk::environment::get_images_from_env(); From a54b82f7f07a136f8d8549cde8613fb7aa15393b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 28 Nov 2025 12:52:13 +0100 Subject: [PATCH 201/312] More improvements --- .../zombie_ci/elastic_scaling/pov_recovery.rs | 76 ++++++------------- 1 file changed, 24 insertions(+), 52 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs index 1a2cd7891f097..670419c52868f 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs @@ -4,17 +4,15 @@ use anyhow::anyhow; use std::{sync::Arc, time::Duration}; -use crate::utils::{initialize_network, BEST_BLOCK_METRIC}; +use crate::utils::initialize_network; -use cumulus_zombienet_sdk_helpers::{ - assert_para_is_registered, assert_para_throughput, assign_cores, -}; +use cumulus_zombienet_sdk_helpers::{assert_para_throughput, assign_cores}; use polkadot_primitives::Id as ParaId; use serde_json::json; use zombienet_orchestrator::network::node::LogLineCountOptions; use zombienet_sdk::{ subxt::{OnlineClient, PolkadotConfig}, - NetworkConfig, NetworkConfigBuilder, RegistrationStrategy, + NetworkConfig, NetworkConfigBuilder, }; const PARA_ID: u32 = 2100; @@ -29,50 +27,22 @@ async fn elastic_scaling_pov_recovery() -> Result<(), anyhow::Error> { log::info!("Spawning network with relay chain only"); let config = build_network_config().await?; - let mut network = initialize_network(config).await?; + let network = initialize_network(config).await?; let alice = network.get_node("alice")?; - let collator_elastic = network.get_node("collator-elastic")?; - - log::info!("Checking if alice is up"); - assert!(alice.wait_until_is_up(60u64).await.is_ok()); + let collator = network.get_node("collator")?; - log::info!("Checking if collator-elastic is up"); - assert!(collator_elastic.wait_until_is_up(60u64).await.is_ok()); - - assign_cores(alice, PARA_ID, vec![0, 1]).await?; - - log::info!("Waiting 20 blocks to register parachain"); - // Wait 20 blocks and register parachain. This part is important for pov-recovery. - // We need to make sure that the recovering node is able to see all relay-chain - // notifications containing the candidates to recover. - assert!(alice - .wait_metric_with_timeout(BEST_BLOCK_METRIC, |b| b >= 20.0, 250u64) - .await - .is_ok()); - - log::info!("Registering parachain para_id = {PARA_ID}"); let relay_client: OnlineClient = alice.wait_client().await?; - network.register_parachain(PARA_ID).await?; - log::info!("Ensuring parachain is registered within 30 blocks"); - assert_para_is_registered(&relay_client, ParaId::from(PARA_ID), 30).await?; + assign_cores(alice, PARA_ID, vec![0, 1]).await?; log::info!("Ensuring parachain making progress"); assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 40..65)], []).await?; - let collator_elastic = network.get_node("collator-elastic")?; - - log::info!("Checking block production"); - assert!(collator_elastic - .wait_metric_with_timeout(BEST_BLOCK_METRIC, |b| b >= 40.0, 225u64) - .await - .is_ok()); - // We want to make sure that none of the consensus hook checks fail, even if the chain makes // progress. If below log line occurred 1 or more times then test failed. - log::info!("Ensuring none of the consensus hook checks fail at {}", collator_elastic.name()); - let result = collator_elastic + log::info!("Ensuring none of the consensus hook checks fail at {}", collator.name()); + let result = collator .wait_log_line_count_with_timeout( "set_validation_data inherent needs to be present in every block", false, @@ -80,7 +50,9 @@ async fn elastic_scaling_pov_recovery() -> Result<(), anyhow::Error> { ) .await?; - assert!(result.success(), "Consensus hook failed at {}: {:?}", collator_elastic.name(), result); + if !result.success() { + return Err(anyhow!("Consensus hook failed at {}: {:?}", collator.name(), result)); + } // Wait (up to 10 seconds) until pattern occurs more than 35 times let options = LogLineCountOptions { @@ -100,7 +72,9 @@ async fn elastic_scaling_pov_recovery() -> Result<(), anyhow::Error> { ) .await?; - assert!(result.success(), "Failed importing blocks using PoV recovery by {name}: {result:?}"); + if !result.success() { + return Err(anyhow!("Failed importing blocks using PoV recovery by {name}: {result:?}")); + } log::info!("Test finished successfully"); Ok(()) @@ -121,7 +95,7 @@ async fn build_network_config() -> Result { // - parachain nodes // - recovery-target // - full node - // - collator-elastic + // - collator // - collator which is the only one producing blocks NetworkConfigBuilder::new() .with_relaychain(|r| { @@ -168,7 +142,6 @@ async fn build_network_config() -> Result { .with_parachain(|p| { p.with_id(PARA_ID) .with_chain("elastic-scaling") - .with_registration_strategy(RegistrationStrategy::Manual) .with_default_command("test-parachain") .with_default_image(images.cumulus.as_str()) .with_default_resources(|resources| { @@ -180,26 +153,25 @@ async fn build_network_config() -> Result { .with_limit_cpu(2) .with_limit_memory("4G") }) - .with_collator(|n| - n.with_name("recovery-target") - .validator(false) - .with_args(vec![ + .with_collator(|n| { + n.with_name("recovery-target").validator(false).with_args(vec![ ("-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug").into(), ("--disable-block-announcements").into(), ("--in-peers", "0").into(), ("--out-peers", "0").into(), ("--").into(), ("--reserved-only").into(), - ("--reserved-nodes", "{{ZOMBIE:alice:multiaddr}}").into() - ])) - .with_collator(|n| n.with_name("collator-elastic") - .with_args(vec![ + ("--reserved-nodes", "{{ZOMBIE:alice:multiaddr}}").into(), + ]) + }) + .with_collator(|n| { + n.with_name("collator").with_args(vec![ ("-laura=trace,runtime=info,cumulus-consensus=trace,consensus::common=trace,parachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug").into(), ("--disable-block-announcements").into(), ("--force-authoring").into(), - ("--authoring", "slot-based").into() + ("--authoring", "slot-based").into(), ]) - ) + }) }) .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { Ok(val) => global_settings.with_base_dir(val), From 90a801bd2a44dafa4ea00d7b4a2e4d8acb0e2df3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 28 Nov 2025 15:49:07 +0100 Subject: [PATCH 202/312] cumulus-zombienet-tests: Some optimizations and cleanups This removes some unneeded code in the tests. It also makes the tests work locally with native provider. --- Cargo.lock | 3 + cumulus/test/runtime/src/lib.rs | 5 + .../zombienet-sdk-helpers/Cargo.toml | 2 + .../zombienet-sdk-helpers/src/lib.rs | 255 ++++++++++++------ cumulus/zombienet/zombienet-sdk/Cargo.toml | 1 + cumulus/zombienet/zombienet-sdk/run.sh | 2 +- .../multiple_blocks_per_slot.rs | 21 +- .../zombie_ci/elastic_scaling/pov_recovery.rs | 83 ++---- .../elastic_scaling/slot_based_rp_offset.rs | 4 +- .../elastic_scaling/upgrade_to_3_cores.rs | 48 ++-- .../tests/zombie_ci/full_node_catching_up.rs | 7 +- .../tests/zombie_ci/migrate_solo.rs | 7 +- .../tests/zombie_ci/pov_recovery.rs | 7 +- .../zombie_ci/rpc_collator_build_blocks.rs | 7 +- .../tests/zombie_ci/runtime_upgrade.rs | 46 +--- .../tests/zombie_ci/sync_blocks.rs | 7 +- 16 files changed, 259 insertions(+), 246 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0bde6c6b90931..5caf73236d793 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5309,6 +5309,8 @@ dependencies = [ "log", "parity-scale-codec", "polkadot-primitives", + "sp-crypto-hashing 0.1.0", + "sp-runtime", "tokio", "zombienet-configuration", "zombienet-sdk", @@ -5319,6 +5321,7 @@ name = "cumulus-zombienet-sdk-tests" version = "0.1.0" dependencies = [ "anyhow", + "cumulus-test-runtime", "cumulus-zombienet-sdk-helpers", "env_logger 0.11.3", "futures", diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 75859680b5199..8acbc49a33834 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -51,6 +51,11 @@ pub mod elastic_scaling_multi_block_slot { include!(concat!(env!("OUT_DIR"), "/wasm_binary_elastic_scaling_multi_block_slot.rs")); } +pub mod elastic_scaling_12s_slot { + #[cfg(feature = "std")] + include!(concat!(env!("OUT_DIR"), "/wasm_binary_elastic_scaling_12s_slot.rs")); +} + pub mod sync_backing { #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary_sync_backing.rs")); diff --git a/cumulus/zombienet/zombienet-sdk-helpers/Cargo.toml b/cumulus/zombienet/zombienet-sdk-helpers/Cargo.toml index 7ce18d7732be8..7030b819fbc10 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/Cargo.toml +++ b/cumulus/zombienet/zombienet-sdk-helpers/Cargo.toml @@ -13,6 +13,8 @@ codec = { workspace = true, features = ["derive"] } log = { workspace = true } polkadot-primitives = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true } +sp-runtime = { workspace = true, default-features = true } tokio = { workspace = true, features = ["rt-multi-thread", "macros", "time"] } zombienet-sdk = { workspace = true } futures = { workspace = true } diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 1434b947f0c53..c7a6c0daa9812 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -2,43 +2,64 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::anyhow; -use codec::{Compact, Decode}; -use cumulus_primitives_core::{relay_chain, rpsr_digest::RPSR_CONSENSUS_ID}; +use codec::{Decode, Encode}; +use cumulus_primitives_core::{CumulusDigestItem, RelayBlockIdentifier}; use futures::stream::StreamExt; -use polkadot_primitives::{CandidateReceiptV2, Id as ParaId}; -use std::{ - cmp::max, - collections::{HashMap, HashSet}, - ops::Range, -}; +use polkadot_primitives::{BlakeTwo256, CandidateReceiptV2, Id as ParaId}; +use sp_runtime::traits::Hash; +use std::{cmp::max, collections::HashMap, ops::Range}; use tokio::{ join, time::{sleep, Duration}, }; -use zombienet_sdk::subxt::{ - self, - blocks::Block, - config::{polkadot::PolkadotExtrinsicParamsBuilder, substrate::DigestItem}, - dynamic::Value, - events::Events, - ext::scale_value::value, - tx::{signer::Signer, DynamicPayload, TxStatus}, - utils::H256, - OnlineClient, PolkadotConfig, -}; - +use zombienet_configuration::types::AssetLocation; use zombienet_sdk::{ + subxt::{ + self, + blocks::Block, + config::{polkadot::PolkadotExtrinsicParamsBuilder, Config}, + dynamic::Value, + events::Events, + ext::scale_value::value, + tx::{signer::Signer, DynamicPayload, TxStatus}, + utils::H256, + OnlineClient, PolkadotConfig, + }, tx_helper::{ChainUpgrade, RuntimeUpgradeOptions}, LocalFileSystem, Network, NetworkNode, }; -use zombienet_configuration::types::AssetLocation; - // Maximum number of blocks to wait for a session change. // If it does not arrive for whatever reason, we should not wait forever. const WAIT_MAX_BLOCKS_FOR_SESSION: u32 = 50; /// Create a batch call to assign cores to a parachain. +/// +/// Zombienet by default adds extra core for each registered parachain additionally to the one +/// requested by `num_cores`. It then assigns the parachains to the extra cores allocated at the +/// end. So, the passed core indices should be counted from zero. +/// +/// # Example +/// +/// Genesis patch: +/// ```json +/// "configuration": { +/// "config": { +/// "scheduler_params": { +/// "num_cores": 2, +/// } +/// } +/// } +/// ``` +/// +/// Runs the relay chain with `2` cores and we also add two parachains. +/// To assign these extra `2` cores, the call would look like this: +/// +/// ```rust +/// create_assign_core_call(&[(0, 2400), (1, 2400)]) +/// ``` +/// +/// The cores `2` and `3` are assigned to the parachains by zombienet. pub fn create_assign_core_call(core_and_para: &[(u32, u32)]) -> DynamicPayload { let mut assign_cores = vec![]; for (core, para_id) in core_and_para.iter() { @@ -56,7 +77,7 @@ pub fn create_assign_core_call(core_and_para: &[(u32, u32)]) -> DynamicPayload { ) } -/// Find an event in subxt `Events` and attempt to decode the fields fo the event. +/// Find an event in subxt `Events` and attempt to decode the fields of the event. fn find_event_and_decode_fields( events: &Events, pallet: &str, @@ -66,8 +87,7 @@ fn find_event_and_decode_fields( for event in events.iter() { let event = event?; if event.pallet_name() == pallet && event.variant_name() == variant { - let field_bytes = event.field_bytes().to_vec(); - result.push(T::decode(&mut &field_bytes[..])?); + result.push(T::decode(&mut &event.field_bytes()[..])?); } } Ok(result) @@ -87,16 +107,17 @@ async fn is_session_change( // Helper function for asserting the throughput of parachains, after the first session change. // // The throughput is measured as total number of backed candidates in a window of relay chain -// blocks. Relay chain blocks with session changes are generally ignores. +// blocks. Relay chain blocks with session changes are generally ignored. pub async fn assert_para_throughput( relay_client: &OnlineClient, stop_after: u32, - expected_candidate_ranges: HashMap>, + expected_candidate_ranges: impl Into>>, ) -> Result<(), anyhow::Error> { let mut blocks_sub = relay_client.blocks().subscribe_finalized().await?; let mut candidate_count: HashMap = HashMap::new(); let mut current_block_count = 0; + let expected_candidate_ranges = expected_candidate_ranges.into(); let valid_para_ids: Vec = expected_candidate_ranges.keys().cloned().collect(); // Wait for the first session, block production on the parachain will start after that. @@ -105,15 +126,15 @@ pub async fn assert_para_throughput( while let Some(block) = blocks_sub.next().await { let block = block?; log::debug!("Finalized relay chain block {}", block.number()); + let events = block.events().await?; // Do not count blocks with session changes, no backed blocks there. if is_session_change(&block).await? { - continue; + continue } current_block_count += 1; - let events = block.events().await?; let receipts = find_event_and_decode_fields::>( &events, "ParaInclusion", @@ -123,9 +144,11 @@ pub async fn assert_para_throughput( for receipt in receipts { let para_id = receipt.descriptor.para_id(); log::debug!("Block backed for para_id {para_id}"); + if !valid_para_ids.contains(¶_id) { return Err(anyhow!("Invalid ParaId detected: {}", para_id)); }; + *(candidate_count.entry(para_id).or_default()) += 1; } @@ -244,6 +267,33 @@ pub async fn assert_blocks_are_being_finalized( Ok(()) } +/// Returns [`RelayBlockIdentifier`] for the given parachain block. +fn find_relay_block_identifier( + block: &Block>, +) -> Result { + let substrate_digest = + sp_runtime::generic::Digest::decode(&mut &block.header().digest.encode()[..]) + .expect("`subxt::Digest` and `substrate::Digest` should encode and decode; qed"); + + CumulusDigestItem::find_relay_block_identifier(&substrate_digest) + .ok_or_else(|| anyhow!("Failed to find `RelayBlockIdentifier` digest")) +} + +/// Checks if the given `RelayBlockIdentifier` matches a relay chain header. +fn identifier_matches_header( + identifier: &RelayBlockIdentifier, + header: &::Header, +) -> bool { + match identifier { + RelayBlockIdentifier::ByHash(hash) => { + let header_hash = BlakeTwo256::hash(&header.encode()); + header_hash == *hash + }, + RelayBlockIdentifier::ByStorageRoot { storage_root, .. } => + header.state_root == *storage_root, + } +} + /// Asserts that parachain blocks have the correct relay parent offset. This also checks that the /// relay chain descendants do not contain any session changes. /// @@ -261,12 +311,12 @@ pub async fn assert_relay_parent_offset( ) -> Result<(), anyhow::Error> { let mut relay_block_stream = relay_client.blocks().subscribe_all().await?; - // First parachain header #0 does not contains RSPR digest item. + // First parachain header #0 does not contain relay block identifier digest item. let mut para_block_stream = para_client.blocks().subscribe_all().await?.skip(1); let mut highest_relay_block_seen = 0; let mut num_para_blocks_seen = 0; - let mut forbidden_parents = HashSet::new(); - let mut seen_parents = HashMap::new(); + let mut forbidden_parents = Vec::new(); + let mut seen_relay_parents = HashMap::new(); loop { tokio::select! { Some(Ok(relay_block)) = relay_block_stream.next() => { @@ -287,30 +337,33 @@ pub async fn assert_relay_parent_offset( let mut current_hash = relay_block.header().parent_hash; for _ in 0..offset { let block = relay_client.blocks().at(current_hash).await.map_err(|_| anyhow!("Unable to fetch RC header."))?; - forbidden_parents.insert(block.header().state_root); + forbidden_parents.push(block.header().clone()); current_hash = block.header().parent_hash; } } }, Some(Ok(para_block)) = para_block_stream.next() => { - let logs = ¶_block.header().digest.logs; + let relay_block_identifier = find_relay_block_identifier(¶_block)?; - let Some((relay_parent_state_root, relay_parent_number)): Option<(H256, u32)> = logs.iter().find_map(extract_relay_parent_storage_root) else { - return Err(anyhow!("No RPSR digest found in header #{}", para_block.number())); + let relay_parent_number = match &relay_block_identifier { + RelayBlockIdentifier::ByHash(block_hash) => relay_client.blocks().at(*block_hash).await?.number(), + RelayBlockIdentifier::ByStorageRoot { block_number, .. } => *block_number, }; let para_block_number = para_block.number(); - seen_parents.insert(relay_parent_state_root, para_block); - log::debug!("Parachain block #{} was built on relay parent #{relay_parent_number}, highest seen was {highest_relay_block_seen}", para_block_number); + seen_relay_parents.insert(relay_block_identifier.clone(), para_block); + log::debug!("Parachain block #{para_block_number} was built on relay parent #{relay_parent_number}, highest seen was {highest_relay_block_seen}"); assert!(highest_relay_block_seen < offset || relay_parent_number <= highest_relay_block_seen.saturating_sub(offset), "Relay parent is not at the correct offset! relay_parent: #{relay_parent_number} highest_seen_relay_block: #{highest_relay_block_seen}"); - // As per explanation above, we need to check that no parachain blocks are build + // As per explanation above, we need to check that no parachain blocks are built // on the forbidden parents. for forbidden in &forbidden_parents { - if let Some(para_block) = seen_parents.get(forbidden) { - panic!( - "Parachain block {} was built on forbidden relay parent with session change descendants (state_root: {})", - para_block.hash(), - forbidden - ); + for (identifier, para_block) in &seen_relay_parents { + if identifier_matches_header(identifier, forbidden) { + panic!( + "Parachain block {} was built on forbidden relay parent with session change descendants ({:?})", + para_block.hash(), + identifier + ); + } } } num_para_blocks_seen += 1; @@ -324,31 +377,18 @@ pub async fn assert_relay_parent_offset( Ok(()) } -/// Extract relay parent information from the digest logs. -fn extract_relay_parent_storage_root( - digest: &DigestItem, -) -> Option<(relay_chain::Hash, relay_chain::BlockNumber)> { - match digest { - DigestItem::Consensus(id, val) if id == &RPSR_CONSENSUS_ID => { - let (h, n): (relay_chain::Hash, Compact) = - Decode::decode(&mut &val[..]).ok()?; - - Some((h, n.0)) - }, - _ => None, - } -} - -/// Submits the given `call` as transaction and waits for it successful finalization. +/// Submits the given `call` as signed transaction and waits for its successful finalization. /// -/// The transaction is send as immortal transaction. +/// The transaction is sent as immortal transaction. pub async fn submit_extrinsic_and_wait_for_finalization_success>( client: &OnlineClient, call: &DynamicPayload, signer: &S, -) -> Result<(), anyhow::Error> { +) -> Result { let extensions = PolkadotExtrinsicParamsBuilder::new().immortal().build(); + log::info!("Submitting transaction..."); + let mut tx = client .tx() .create_signed(call, signer, extensions) @@ -356,26 +396,29 @@ pub async fn submit_extrinsic_and_wait_for_finalization_success { - let _result = tx_in_block.wait_for_success().await?; - let block_status = - if status.as_finalized().is_some() { "Finalized" } else { "Best" }; - log::info!("[{}] In block: {:#?}", block_status, tx_in_block.block_hash()); + while let Some(status) = tx.next().await.transpose()? { + match status { + TxStatus::InBestBlock(tx_in_block) => { + tx_in_block.wait_for_success().await?; + log::info!("[Best] In block: {:#?}", tx_in_block.block_hash()); + }, + TxStatus::InFinalizedBlock(ref tx_in_block) => { + tx_in_block.wait_for_success().await?; + log::info!("[Finalized] In block: {:#?}", tx_in_block.block_hash()); + return Ok(tx_in_block.block_hash()) }, TxStatus::Error { message } | TxStatus::Invalid { message } | TxStatus::Dropped { message } => { - return Err(anyhow::format_err!("Error submitting tx: {message}")); + return Err(anyhow::anyhow!("Error submitting tx: {message}")); }, _ => continue, } } - Ok(()) + + Err(anyhow::anyhow!("Transaction event stream ended without reaching the finalized state")) } /// Submits the given `call` as transaction and waits `timeout_secs` for it successful finalization. @@ -445,6 +488,39 @@ pub async fn assert_para_is_registered( Err(anyhow!("No more blocks to check")) } +/// Creates a runtime upgrade call using `sudo` and `set_code`. +pub fn create_runtime_upgrade_call(wasm: &[u8]) -> DynamicPayload { + zombienet_sdk::subxt::tx::dynamic( + "Sudo", + "sudo_unchecked_weight", + vec![ + value! { + System(set_code { code: Value::from_bytes(wasm) }) + }, + value! { + { + ref_time: 1u64, + proof_size: 1u64 + } + }, + ], + ) +} + +/// Waits for a runtime upgrade to complete. +pub async fn wait_for_runtime_upgrade( + client: &OnlineClient, +) -> Result<(), anyhow::Error> { + let updater = client.updater(); + let mut update_stream = updater.runtime_updates().await?; + + if let Some(Ok(update)) = update_stream.next().await { + let version = update.runtime_version().spec_version; + log::info!("Runtime upgraded to spec version {version}"); + } + Ok(()) +} + pub async fn runtime_upgrade( network: &Network, node: &NetworkNode, @@ -458,8 +534,35 @@ pub async fn runtime_upgrade( .await } +/// Assigns the given `cores` to the given `para_id`. +/// +/// Zombienet by default adds extra core for each registered parachain additionally to the one +/// requested by `num_cores`. It then assigns the parachains to the extra cores allocated at the +/// end. So, the passed core indices should be counted from zero. +/// +/// # Example +/// +/// Genesis patch: +/// ```json +/// "configuration": { +/// "config": { +/// "scheduler_params": { +/// "num_cores": 2, +/// } +/// } +/// } +/// ``` +/// +/// Runs the relay chain with `2` cores and we also add two parachains. +/// To assign these extra `2` cores, the call would look like this: +/// +/// ```rust +/// assign_core(&relay_node, PARA_ID, vec![0, 1]) +/// ``` +/// +/// The cores `2` and `3` are assigned to the parachains by Zombienet. pub async fn assign_cores( - node: &NetworkNode, + relay_node: &NetworkNode, para_id: u32, cores: Vec, ) -> Result<(), anyhow::Error> { @@ -468,7 +571,7 @@ pub async fn assign_cores( let assign_cores_call = create_assign_core_call(&cores.into_iter().map(|core| (core, para_id)).collect::>()); - let client: OnlineClient = node.wait_client().await?; + let client: OnlineClient = relay_node.wait_client().await?; let res = submit_extrinsic_and_wait_for_finalization_success_with_timeout( &client, &assign_cores_call, diff --git a/cumulus/zombienet/zombienet-sdk/Cargo.toml b/cumulus/zombienet/zombienet-sdk/Cargo.toml index 4e33a22c56c32..52f1a6e6364fb 100644 --- a/cumulus/zombienet/zombienet-sdk/Cargo.toml +++ b/cumulus/zombienet/zombienet-sdk/Cargo.toml @@ -20,6 +20,7 @@ zombienet-sdk = { workspace = true } zombienet-orchestrator = { workspace = true } zombienet-configuration = { workspace = true } cumulus-zombienet-sdk-helpers = { workspace = true } +cumulus-test-runtime = { workspace = true } sp-statement-store = { workspace = true, default-features = true } sc-statement-store = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } diff --git a/cumulus/zombienet/zombienet-sdk/run.sh b/cumulus/zombienet/zombienet-sdk/run.sh index 40d5bafc6c248..afabf28b668a1 100755 --- a/cumulus/zombienet/zombienet-sdk/run.sh +++ b/cumulus/zombienet/zombienet-sdk/run.sh @@ -6,4 +6,4 @@ cargo build --release -p cumulus-test-service --bin test-parachain -p polkadot - RELEASE_DIR=$(dirname "$(cargo locate-project --workspace --message-format plain)")/target/release export PATH=$RELEASE_DIR:$PATH -ZOMBIE_PROVIDER=native cargo test --release -p cumulus-zombienet-sdk-tests --features zombie-ci "$@" +ZOMBIE_PROVIDER=native cargo test --release -p cumulus-zombienet-sdk-tests --features zombie-ci -- --test-threads 1 "$@" diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/multiple_blocks_per_slot.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/multiple_blocks_per_slot.rs index 01f64a32b8431..2865f7cb42c92 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/multiple_blocks_per_slot.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/multiple_blocks_per_slot.rs @@ -34,32 +34,17 @@ async fn elastic_scaling_multiple_blocks_per_slot() -> Result<(), anyhow::Error> let para_node_elastic = network.get_node("collator-1")?; let relay_client: OnlineClient = relay_node.wait_client().await?; - assert_para_throughput( - &relay_client, - 10, - [(ParaId::from(PARA_ID), 3..18)].into_iter().collect(), - ) - .await?; + assert_para_throughput(&relay_client, 10, [(ParaId::from(PARA_ID), 3..18)]).await?; assert_finality_lag(¶_node_elastic.wait_client().await?, 5).await?; assign_cores(relay_node, PARA_ID, vec![2, 3]).await?; - assert_para_throughput( - &relay_client, - 15, - [(ParaId::from(PARA_ID), 39..46)].into_iter().collect(), - ) - .await?; + assert_para_throughput(&relay_client, 15, [(ParaId::from(PARA_ID), 39..46)]).await?; assert_finality_lag(¶_node_elastic.wait_client().await?, 20).await?; assign_cores(relay_node, PARA_ID, vec![4, 5, 6]).await?; - assert_para_throughput( - &relay_client, - 10, - [(ParaId::from(PARA_ID), 52..61)].into_iter().collect(), - ) - .await?; + assert_para_throughput(&relay_client, 10, [(ParaId::from(PARA_ID), 52..61)]).await?; assert_finality_lag(¶_node_elastic.wait_client().await?, 30).await?; log::info!("Test finished successfully"); Ok(()) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs index 08c7824aa8939..efdf218ec5eab 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs @@ -4,17 +4,15 @@ use anyhow::anyhow; use std::{sync::Arc, time::Duration}; -use crate::utils::{initialize_network, BEST_BLOCK_METRIC}; +use crate::utils::initialize_network; -use cumulus_zombienet_sdk_helpers::{ - assert_para_is_registered, assert_para_throughput, assign_cores, -}; +use cumulus_zombienet_sdk_helpers::{assert_para_throughput, assign_cores}; use polkadot_primitives::Id as ParaId; use serde_json::json; use zombienet_orchestrator::network::node::LogLineCountOptions; use zombienet_sdk::{ subxt::{OnlineClient, PolkadotConfig}, - NetworkConfig, NetworkConfigBuilder, RegistrationStrategy, + NetworkConfig, NetworkConfigBuilder, }; const PARA_ID: u32 = 2100; @@ -29,55 +27,22 @@ async fn elastic_scaling_pov_recovery() -> Result<(), anyhow::Error> { log::info!("Spawning network with relay chain only"); let config = build_network_config().await?; - let mut network = initialize_network(config).await?; + let network = initialize_network(config).await?; let alice = network.get_node("alice")?; - let collator_elastic = network.get_node("collator-elastic")?; - - log::info!("Checking if alice is up"); - assert!(alice.wait_until_is_up(60u64).await.is_ok()); + let collator = network.get_node("collator")?; - log::info!("Checking if collator-elastic is up"); - assert!(collator_elastic.wait_until_is_up(60u64).await.is_ok()); - - assign_cores(alice, PARA_ID, vec![0, 1]).await?; - - log::info!("Waiting 20 blocks to register parachain"); - // Wait 20 blocks and register parachain. This part is important for pov-recovery. - // We need to make sure that the recovering node is able to see all relay-chain - // notifications containing the candidates to recover. - assert!(alice - .wait_metric_with_timeout(BEST_BLOCK_METRIC, |b| b >= 20.0, 250u64) - .await - .is_ok()); - - log::info!("Registering parachain para_id = {PARA_ID}"); let relay_client: OnlineClient = alice.wait_client().await?; - network.register_parachain(PARA_ID).await?; - log::info!("Ensuring parachain is registered within 30 blocks"); - assert_para_is_registered(&relay_client, ParaId::from(PARA_ID), 30).await?; + assign_cores(alice, PARA_ID, vec![0, 1]).await?; log::info!("Ensuring parachain making progress"); - assert_para_throughput( - &relay_client, - 20, - [(ParaId::from(PARA_ID), 40..65)].into_iter().collect(), - ) - .await?; - - let collator_elastic = network.get_node("collator-elastic")?; - - log::info!("Checking block production"); - assert!(collator_elastic - .wait_metric_with_timeout(BEST_BLOCK_METRIC, |b| b >= 40.0, 225u64) - .await - .is_ok()); + assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 40..65)]).await?; // We want to make sure that none of the consensus hook checks fail, even if the chain makes // progress. If below log line occurred 1 or more times then test failed. - log::info!("Ensuring none of the consensus hook checks fail at {}", collator_elastic.name()); - let result = collator_elastic + log::info!("Ensuring none of the consensus hook checks fail at {}", collator.name()); + let result = collator .wait_log_line_count_with_timeout( "set_validation_data inherent needs to be present in every block", false, @@ -85,7 +50,9 @@ async fn elastic_scaling_pov_recovery() -> Result<(), anyhow::Error> { ) .await?; - assert!(result.success(), "Consensus hook failed at {}: {:?}", collator_elastic.name(), result); + if !result.success() { + return Err(anyhow!("Consensus hook failed at {}: {:?}", collator.name(), result)); + } // Wait (up to 10 seconds) until pattern occurs more than 35 times let options = LogLineCountOptions { @@ -105,7 +72,9 @@ async fn elastic_scaling_pov_recovery() -> Result<(), anyhow::Error> { ) .await?; - assert!(result.success(), "Failed importing blocks using PoV recovery by {name}: {result:?}"); + if !result.success() { + return Err(anyhow!("Failed importing blocks using PoV recovery by {name}: {result:?}")); + } log::info!("Test finished successfully"); Ok(()) @@ -126,7 +95,7 @@ async fn build_network_config() -> Result { // - parachain nodes // - recovery-target // - full node - // - collator-elastic + // - collator // - collator which is the only one producing blocks NetworkConfigBuilder::new() .with_relaychain(|r| { @@ -173,7 +142,6 @@ async fn build_network_config() -> Result { .with_parachain(|p| { p.with_id(PARA_ID) .with_chain("elastic-scaling") - .with_registration_strategy(RegistrationStrategy::Manual) .with_default_command("test-parachain") .with_default_image(images.cumulus.as_str()) .with_default_resources(|resources| { @@ -185,26 +153,25 @@ async fn build_network_config() -> Result { .with_limit_cpu(2) .with_limit_memory("4G") }) - .with_collator(|n| - n.with_name("recovery-target") - .validator(false) - .with_args(vec![ + .with_collator(|n| { + n.with_name("recovery-target").validator(false).with_args(vec![ ("-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug").into(), ("--disable-block-announcements").into(), ("--in-peers", "0").into(), ("--out-peers", "0").into(), ("--").into(), ("--reserved-only").into(), - ("--reserved-nodes", "{{ZOMBIE:alice:multiaddr}}").into() - ])) - .with_collator(|n| n.with_name("collator-elastic") - .with_args(vec![ + ("--reserved-nodes", "{{ZOMBIE:alice:multiaddr}}").into(), + ]) + }) + .with_collator(|n| { + n.with_name("collator").with_args(vec![ ("-laura=trace,runtime=info,cumulus-consensus=trace,consensus::common=trace,parachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug").into(), ("--disable-block-announcements").into(), ("--force-authoring").into(), - ("--authoring", "slot-based").into() + ("--authoring", "slot-based").into(), ]) - ) + }) }) .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { Ok(val) => global_settings.with_base_dir(val), diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/slot_based_rp_offset.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/slot_based_rp_offset.rs index 131fb45149cb1..916538f1ea698 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/slot_based_rp_offset.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/slot_based_rp_offset.rs @@ -53,7 +53,7 @@ async fn elastic_scaling_slot_based_relay_parent_offset_test() -> Result<(), any .with_chain("relay-parent-offset") .with_default_args(vec![ "--authoring=slot-based".into(), - ("-lparachain=debug,aura=debug,parachain::collator-protocol=debug").into(), + ("-lparachain=debug,aura=debug").into(), ]) .with_collator(|n| n.with_name("collator-rp-offset")) }) @@ -79,7 +79,7 @@ async fn elastic_scaling_slot_based_relay_parent_offset_test() -> Result<(), any assign_cores(relay_node, 2400, vec![0, 1]).await?; - assert_relay_parent_offset(&relay_client, ¶_client, 2, 45).await?; + assert_relay_parent_offset(&relay_client, ¶_client, 2, 30).await?; log::info!("Test finished successfully"); diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs index 6e2c886db521d..8645b80a31d96 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs @@ -7,22 +7,23 @@ use std::time::Duration; use crate::utils::initialize_network; +use cumulus_test_runtime::{ + elastic_scaling::WASM_BINARY_BLOATY as WASM_ELASTIC_SCALING, + elastic_scaling_12s_slot::WASM_BINARY_BLOATY as WASM_ELASTIC_SCALING_12S_SLOT, +}; use cumulus_zombienet_sdk_helpers::{ - assert_para_throughput, assign_cores, runtime_upgrade, wait_for_upgrade, + assert_para_throughput, assign_cores, create_runtime_upgrade_call, + submit_extrinsic_and_wait_for_finalization_success, wait_for_runtime_upgrade, }; use polkadot_primitives::Id as ParaId; use rstest::rstest; use zombienet_sdk::{ subxt::{OnlineClient, PolkadotConfig}, + subxt_signer::sr25519::dev, NetworkConfig, NetworkConfigBuilder, }; const PARA_ID: u32 = 2000; -const WASM_WITH_ELASTIC_SCALING: &str = - "/tmp/wasm_binary_elastic_scaling.rs.compact.compressed.wasm"; - -const WASM_WITH_ELASTIC_SCALING_12S_SLOT: &str = - "/tmp/wasm_binary_elastic_scaling_12s_slot.rs.compact.compressed.wasm"; // This test ensures that we can upgrade the parachain's runtime to support elastic scaling // and that the parachain produces 3 blocks per slot after the upgrade. @@ -50,20 +51,10 @@ async fn elastic_scaling_upgrade_to_3_cores( if async_backing { log::info!("Ensuring parachain makes progress making 6s blocks"); - assert_para_throughput( - &alice_client, - 20, - [(ParaId::from(PARA_ID), 15..21)].into_iter().collect(), - ) - .await?; + assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 15..21)]).await?; } else { log::info!("Ensuring parachain makes progress making 12s blocks"); - assert_para_throughput( - &alice_client, - 20, - [(ParaId::from(PARA_ID), 7..12)].into_iter().collect(), - ) - .await?; + assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 7..12)]).await?; } assign_cores(alice, PARA_ID, vec![1, 2]).await?; @@ -75,10 +66,16 @@ async fn elastic_scaling_upgrade_to_3_cores( collator0_client.backend().current_runtime_version().await?.spec_version; log::info!("Current runtime spec version {current_spec_version}"); - let wasm = - if async_backing { WASM_WITH_ELASTIC_SCALING } else { WASM_WITH_ELASTIC_SCALING_12S_SLOT }; + let wasm = if async_backing { + WASM_ELASTIC_SCALING.expect("Wasm runtime not build") + } else { + WASM_ELASTIC_SCALING_12S_SLOT.expect("Wasm runtime not build") + }; - runtime_upgrade(&network, collator0, PARA_ID, wasm).await?; + log::info!("Performing runtime upgrade"); + let call = create_runtime_upgrade_call(wasm); + submit_extrinsic_and_wait_for_finalization_success(&collator0_client, &call, &dev::alice()) + .await?; let collator1 = network.get_node("collator1")?; let collator1_client: OnlineClient = collator1.wait_client().await?; @@ -90,7 +87,7 @@ async fn elastic_scaling_upgrade_to_3_cores( ); tokio::time::timeout( Duration::from_secs(timeout_secs), - wait_for_upgrade(collator1_client, expected_spec_version), + wait_for_runtime_upgrade(&collator1_client), ) .await .expect("Timeout waiting for runtime upgrade")?; @@ -103,12 +100,7 @@ async fn elastic_scaling_upgrade_to_3_cores( ); log::info!("Ensure elastic scaling works, 3 blocks should be produced in each 6s slot"); - assert_para_throughput( - &alice_client, - 20, - [(ParaId::from(PARA_ID), 50..61)].into_iter().collect(), - ) - .await?; + assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 50..61)]).await?; Ok(()) } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/full_node_catching_up.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/full_node_catching_up.rs index 55333ee029c66..1c260926aaeac 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/full_node_catching_up.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/full_node_catching_up.rs @@ -32,12 +32,7 @@ async fn full_node_catching_up() -> Result<(), anyhow::Error> { let relay_client: OnlineClient = relay_alice.wait_client().await?; log::info!("Ensuring parachain making progress"); - assert_para_throughput( - &relay_client, - 20, - [(ParaId::from(PARA_ID), 2..40)].into_iter().collect(), - ) - .await?; + assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 2..40)]).await?; for (name, timeout_secs) in [("dave", 250u64), ("eve", 250u64)] { log::info!("Ensuring {name} reports expected block height"); diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/migrate_solo.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/migrate_solo.rs index 21bfc5b520a98..b4d9cdf422838 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/migrate_solo.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/migrate_solo.rs @@ -49,12 +49,7 @@ async fn migrate_solo_to_para() -> Result<(), anyhow::Error> { let alice_client: OnlineClient = alice.wait_client().await?; log::info!("Ensuring parachain making progress"); - assert_para_throughput( - &alice_client, - 20, - [(ParaId::from(PARA_ID), 2..40)].into_iter().collect(), - ) - .await?; + assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 2..40)]).await?; let dave = network.get_node("dave")?; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_recovery.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_recovery.rs index 2d10cfbb038fc..059670eacbe7c 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_recovery.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_recovery.rs @@ -54,12 +54,7 @@ async fn pov_recovery() -> Result<(), anyhow::Error> { assert_para_is_registered(&validator_client, ParaId::from(PARA_ID), 30).await?; log::info!("Ensuring parachain making progress"); - assert_para_throughput( - &validator_client, - 20, - [(ParaId::from(PARA_ID), 2..20)].into_iter().collect(), - ) - .await?; + assert_para_throughput(&validator_client, 20, [(ParaId::from(PARA_ID), 2..20)]).await?; for (name, timeout_secs) in [("bob", 600u64)] { log::info!("Checking block production for {name} within {timeout_secs}s"); diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/rpc_collator_build_blocks.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/rpc_collator_build_blocks.rs index 04fd3206a671b..819e99f68f15d 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/rpc_collator_build_blocks.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/rpc_collator_build_blocks.rs @@ -31,12 +31,7 @@ async fn rpc_collator_builds_blocks() -> Result<(), anyhow::Error> { let alice_client: OnlineClient = alice.wait_client().await?; log::info!("Ensuring parachain making progress"); - assert_para_throughput( - &alice_client, - 20, - [(ParaId::from(PARA_ID), 2..40)].into_iter().collect(), - ) - .await?; + assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 2..40)]).await?; let dave = network.get_node("dave")?; let eve = network.get_node("eve")?; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs index 324f4f23c4672..f8a5123475b4d 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs @@ -1,23 +1,21 @@ // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 +use crate::utils::initialize_network; use anyhow::anyhow; +use cumulus_test_runtime::wasm_spec_version_incremented::WASM_BINARY_BLOATY as WASM_RUNTIME_UPGRADE; +use cumulus_zombienet_sdk_helpers::{ + create_runtime_upgrade_call, submit_extrinsic_and_wait_for_finalization_success, + wait_for_runtime_upgrade, +}; use std::time::Duration; - -use crate::utils::initialize_network; - -use cumulus_zombienet_sdk_helpers::{assert_para_throughput, wait_for_upgrade}; -use polkadot_primitives::Id as ParaId; -use zombienet_configuration::types::AssetLocation; use zombienet_sdk::{ subxt::{OnlineClient, PolkadotConfig}, - tx_helper::{ChainUpgrade, RuntimeUpgradeOptions}, + subxt_signer::sr25519::dev, NetworkConfig, NetworkConfigBuilder, }; const PARA_ID: u32 = 2000; -const WASM_WITH_SPEC_VERSION_INCREMENTED: &str = - "/tmp/wasm_binary_spec_version_incremented.rs.compact.compressed.wasm"; // This tests makes sure that it is possible to upgrade parachain's runtime // and parachain produces blocks after such upgrade. @@ -31,17 +29,6 @@ async fn runtime_upgrade() -> Result<(), anyhow::Error> { let config = build_network_config().await?; let network = initialize_network(config).await?; - let alice = network.get_node("alice")?; - let alice_client: OnlineClient = alice.wait_client().await?; - - log::info!("Ensuring parachain making progress"); - assert_para_throughput( - &alice_client, - 20, - [(ParaId::from(PARA_ID), 2..40)].into_iter().collect(), - ) - .await?; - let timeout_secs: u64 = 250; let charlie = network.get_node("charlie")?; let charlie_client: OnlineClient = charlie.wait_client().await?; @@ -51,13 +38,9 @@ async fn runtime_upgrade() -> Result<(), anyhow::Error> { log::info!("Current runtime spec version {current_spec_version}"); log::info!("Performing runtime upgrade"); - network - .parachain(PARA_ID) - .unwrap() - .perform_runtime_upgrade( - charlie, - RuntimeUpgradeOptions::new(AssetLocation::from(WASM_WITH_SPEC_VERSION_INCREMENTED)), - ) + + let call = create_runtime_upgrade_call(WASM_RUNTIME_UPGRADE.expect("Wasm runtime not build")); + submit_extrinsic_and_wait_for_finalization_success(&charlie_client, &call, &dev::alice()) .await?; let dave = network.get_node("dave")?; @@ -68,12 +51,9 @@ async fn runtime_upgrade() -> Result<(), anyhow::Error> { "Waiting (up to {timeout_secs}s) for parachain runtime upgrade to version {}", expected_spec_version ); - tokio::time::timeout( - Duration::from_secs(timeout_secs), - wait_for_upgrade(dave_client, expected_spec_version), - ) - .await - .expect("Timeout waiting for runtime upgrade")?; + tokio::time::timeout(Duration::from_secs(timeout_secs), wait_for_runtime_upgrade(&dave_client)) + .await + .expect("Timeout waiting for runtime upgrade")?; let spec_version_from_charlie = charlie_client.backend().current_runtime_version().await?.spec_version; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/sync_blocks.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/sync_blocks.rs index 4f978f9c872ef..4bdcc996228f6 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/sync_blocks.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/sync_blocks.rs @@ -28,12 +28,7 @@ async fn sync_blocks_from_tip_without_connected_collator() -> Result<(), anyhow: let relay_client: OnlineClient = relay_alice.wait_client().await?; log::info!("Ensuring parachain making progress"); - assert_para_throughput( - &relay_client, - 10, - [(ParaId::from(PARA_ID), 5..11)].into_iter().collect(), - ) - .await?; + assert_para_throughput(&relay_client, 10, [(ParaId::from(PARA_ID), 5..11)]).await?; let para_ferdie = network.get_node("ferdie")?; let para_eve = network.get_node("eve")?; From 49546835f410a0b7aa2852b582bf2007d3c1583c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 28 Nov 2025 21:14:35 +0100 Subject: [PATCH 203/312] Ensure last block has `maybe_last = true` --- .../src/validate_block/implementation.rs | 102 +++++++++++++----- .../src/validate_block/tests.rs | 55 +++++++++- cumulus/test/client/src/block_builder.rs | 31 ++++++ 3 files changed, 162 insertions(+), 26 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index f6a240b65fb07..ce5d4fe77fc98 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -21,7 +21,7 @@ use alloc::vec::Vec; use codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain::{BlockNumber as RNumber, Hash as RHash, UMPSignal, UMP_SEPARATOR}, - ClaimQueueOffset, CoreSelector, ParachainBlockData, PersistedValidationData, + ClaimQueueOffset, CoreSelector, CumulusDigestItem, ParachainBlockData, PersistedValidationData, }; use frame_support::{ traits::{ExecuteBlock, Get, IsSubType}, @@ -31,8 +31,9 @@ use polkadot_parachain_primitives::primitives::{HeadData, ValidationResult}; use sp_core::storage::{well_known_keys, ChildInfo, StateVersion}; use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::{hashing::blake2_128, KillStorageResult}; -use sp_runtime::traits::{ - Block as BlockT, ExtrinsicCall, Hash as HashT, HashingFor, Header as HeaderT, LazyBlock, +use sp_runtime::{ + traits::{Block as BlockT, ExtrinsicCall, Hash as HashT, HashingFor, Header as HeaderT, LazyBlock}, + DigestItem, }; use sp_state_machine::OverlayedChanges; use sp_trie::{HashDBT, ProofSizeProvider, EMPTY_PREFIX}; @@ -83,7 +84,7 @@ where B::Extrinsic: ExtrinsicCall, ::Call: IsSubType>, { - // sp_runtime::runtime_logger::RuntimeLogger::init(); + sp_runtime::runtime_logger::RuntimeLogger::init(); let _guard = ( // Replace storage calls with our own implementations @@ -140,26 +141,7 @@ where let (blocks, proof) = block_data.into_inner(); - assert_eq!( - *blocks - .first() - .expect("BlockData should have at least one block") - .header() - .parent_hash(), - parent_header.hash(), - "Parachain head needs to be the parent of the first block" - ); - - blocks.iter().fold(parent_header.hash(), |p, b| { - assert_eq!( - p, - *b.header().parent_hash(), - "Not a valid chain of blocks :(; {:?} not a parent of {:?}?", - array_bytes::bytes2hex("0x", p.as_ref()), - array_bytes::bytes2hex("0x", b.header().parent_hash().as_ref()), - ); - b.header().hash() - }); + validate_blocks::(&blocks, &parent_header); let mut processed_downward_messages = 0; let mut upward_messages = BoundedVec::default(); @@ -384,6 +366,78 @@ fn validate_validation_data( ); } +/// Validates that the given blocks form a valid chain and have consistent BundleInfo. +fn validate_blocks(blocks: &[B::LazyBlock], parent_header: &B::Header) { + let num_blocks = blocks.len(); + + // Check first block's parent matches the given parent_header + assert_eq!( + *blocks + .first() + .expect("BlockData should have at least one block") + .header() + .parent_hash(), + parent_header.hash(), + "Parachain head needs to be the parent of the first block" + ); + + let mut first_block_has_bundle_info: Option = None; + + blocks.iter().enumerate().fold(parent_header.hash(), |expected_parent, (block_index, block)| { + // Check chain validity + assert_eq!( + expected_parent, + *block.header().parent_hash(), + "Not a valid chain of blocks :(; {:?} not a parent of {:?}?", + array_bytes::bytes2hex("0x", expected_parent.as_ref()), + array_bytes::bytes2hex("0x", block.header().parent_hash().as_ref()), + ); + + // Validate BundleInfo consistency + let bundle_info = CumulusDigestItem::find_bundle_info(block.header().digest()); + match (first_block_has_bundle_info, &bundle_info) { + (None, info) => { + first_block_has_bundle_info = Some(info.is_some()); + }, + (Some(true), None) => { + panic!("All blocks must have BundleInfo if the first block has it"); + }, + (Some(false), Some(_)) => { + panic!("No block should have BundleInfo if the first block doesn't have it"); + }, + _ => {}, + } + + if let Some(ref info) = bundle_info { + assert_eq!( + info.index as usize, + block_index, + "BundleInfo index mismatch: expected {}, got {}", + block_index, + info.index + ); + + if block_index + 1 == num_blocks { + let has_use_full_core = + CumulusDigestItem::contains_use_full_core(block.header().digest()); + let has_runtime_upgrade = block + .header() + .digest() + .logs + .iter() + .any(|d| matches!(d, DigestItem::RuntimeEnvironmentUpdated)); + + assert!( + info.maybe_last || has_use_full_core || has_runtime_upgrade, + "Last block in PoV must have maybe_last=true, UseFullCore digest, or RuntimeEnvironmentUpdated digest" + ); + } + } + + block.header().hash() + }); +} + /// Build a seed from the head data of the parachain block. /// /// Uses both the relay parent storage root and the hash of the blocks diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 0e62f9da445f6..3653731b05d36 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -16,7 +16,9 @@ use crate::{validate_block::MemoryOptimizedValidationParams, *}; use codec::{Decode, DecodeAll, Encode}; -use cumulus_primitives_core::{relay_chain, ParachainBlockData, PersistedValidationData}; +use cumulus_primitives_core::{ + relay_chain, BundleInfo, ParachainBlockData, PersistedValidationData, +}; use cumulus_test_client::{ generate_extrinsic, generate_extrinsic_with_pair, runtime::{ @@ -166,6 +168,7 @@ fn build_multiple_blocks_with_witness( mut sproof_builder: RelayStateSproofBuilder, num_blocks: u32, extra_extrinsics: impl Fn(u32) -> Vec, + pre_digests: impl Fn(u32) -> Vec, ) -> TestBlockData { let parent_head_root = *parent_head.state_root(); sproof_builder.para_id = test_runtime::PARACHAIN_ID.into(); @@ -203,12 +206,13 @@ fn build_multiple_blocks_with_witness( mut block_builder, persisted_validation_data: p_v_data, proof_recorder, - } = client.init_block_builder_with_ignored_nodes( + } = client.init_block_builder_with_ignored_nodes_and_pre_digests( parent_head.hash(), Some(validation_data.clone()), sproof_builder.clone(), timestamp, ignored_nodes.clone(), + (pre_digests)(i), ); persisted_validation_data = Some(p_v_data); @@ -302,6 +306,7 @@ fn validate_multiple_blocks_work() { Some(i), )] }, + |_| Vec::new(), ); assert!(block.proof().encoded_size() < 3 * 1024 * 1024); @@ -600,6 +605,7 @@ fn state_changes_in_multiple_blocks_are_applied_in_exact_order() { Some(i), )] }, + |_| Vec::new(), ); // 3. Validate the PoV. @@ -666,6 +672,7 @@ fn ensure_we_only_like_blockchains() { Default::default(), 4, |_| Default::default(), + |_| Vec::new(), ); // Reference some non existing parent. @@ -749,6 +756,7 @@ fn rejects_multiple_blocks_per_pov_when_applying_runtime_upgrade() { proof_builder, 4, |_| Vec::new(), + |_| Vec::new(), ); // 3. Validate the PoV. @@ -775,3 +783,46 @@ fn rejects_multiple_blocks_per_pov_when_applying_runtime_upgrade() { .contains("only one block per PoV is allowed")); } } + +#[test] +fn validate_block_rejects_incomplete_bundle() { + use sp_tracing::capture_test_logs; + + let (client, parent_head) = create_elastic_scaling_test_client(); + + // Build 2 blocks with BundleInfo + let TestBlockData { block, validation_data } = build_multiple_blocks_with_witness( + &client, + parent_head.clone(), + Default::default(), + 2, + |_| Vec::new(), + |i| vec![BundleInfo { index: i as u8, maybe_last: i == 1 }.to_digest_item()], + ); + + // Validation with only first block should fail (incomplete bundle) + let first_block_only = + ParachainBlockData::new(vec![block.blocks()[0].clone()], block.proof().clone()); + let log_capture = capture_test_logs!({ + call_validate_block_elastic_scaling( + parent_head.clone(), + first_block_only, + validation_data.relay_parent_storage_root, + ) + .unwrap_err(); + }); + assert!( + log_capture.contains("Last block in PoV must have maybe_last=true"), + "Expected log about missing maybe_last, got: {log_capture}" + ); + + // Validation with both blocks should succeed + let header = block.blocks().last().unwrap().header().clone(); + let res_header = call_validate_block_elastic_scaling( + parent_head, + block, + validation_data.relay_parent_storage_root, + ) + .expect("Calls `validate_block`"); + assert_eq!(header, res_header); +} diff --git a/cumulus/test/client/src/block_builder.rs b/cumulus/test/client/src/block_builder.rs index c9ae642fb95cb..17e4d026f6646 100644 --- a/cumulus/test/client/src/block_builder.rs +++ b/cumulus/test/client/src/block_builder.rs @@ -77,6 +77,17 @@ pub trait InitBlockBuilder { ignored_nodes: ProofRecorderIgnoredNodes, ) -> BlockBuilderAndSupportData<'_>; + /// Init a specific block builder with ignored nodes and pre-digests. + fn init_block_builder_with_ignored_nodes_and_pre_digests( + &self, + at: Hash, + validation_data: Option>, + relay_sproof_builder: RelayStateSproofBuilder, + timestamp: u64, + ignored_nodes: ProofRecorderIgnoredNodes, + pre_digests: Vec, + ) -> BlockBuilderAndSupportData<'_>; + /// Init a specific block builder using the given pre-digests. /// /// Same as [`InitBlockBuilder::init_block_builder`] besides that it takes vector of @@ -258,6 +269,26 @@ impl InitBlockBuilder for Client { ) } + fn init_block_builder_with_ignored_nodes_and_pre_digests( + &self, + at: Hash, + validation_data: Option>, + relay_sproof_builder: RelayStateSproofBuilder, + timestamp: u64, + ignored_nodes: ProofRecorderIgnoredNodes, + pre_digests: Vec, + ) -> BlockBuilderAndSupportData<'_> { + init_block_builder( + self, + at, + validation_data, + relay_sproof_builder, + Some(timestamp), + Some(ignored_nodes), + Some(pre_digests), + ) + } + fn init_block_builder_with_timestamp( &self, at: Hash, From 3afdc1907b2d53d2d876f2b0a0a498fe7f932740 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 28 Nov 2025 22:06:10 +0100 Subject: [PATCH 204/312] Apply suggestion from @bkchr --- .../tests/zombie_ci/elastic_scaling/slot_based_rp_offset.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/slot_based_rp_offset.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/slot_based_rp_offset.rs index 916538f1ea698..307d5242681d0 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/slot_based_rp_offset.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/slot_based_rp_offset.rs @@ -79,7 +79,7 @@ async fn elastic_scaling_slot_based_relay_parent_offset_test() -> Result<(), any assign_cores(relay_node, 2400, vec![0, 1]).await?; - assert_relay_parent_offset(&relay_client, ¶_client, 2, 30).await?; + assert_relay_parent_offset(&relay_client, ¶_client, 2, 45).await?; log::info!("Test finished successfully"); From ef0d57a2b20832bd7471a2d0bf5f0edf59f17a55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 28 Nov 2025 22:57:10 +0100 Subject: [PATCH 205/312] Fixes --- .../zombienet-sdk-helpers/src/lib.rs | 106 ++++++++---------- .../zombie_ci/elastic_scaling/pov_recovery.rs | 1 + 2 files changed, 45 insertions(+), 62 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index c7a6c0daa9812..6fe05dc243d23 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -12,12 +12,11 @@ use tokio::{ join, time::{sleep, Duration}, }; -use zombienet_configuration::types::AssetLocation; use zombienet_sdk::{ subxt::{ self, blocks::Block, - config::{polkadot::PolkadotExtrinsicParamsBuilder, Config}, + config::{polkadot::PolkadotExtrinsicParamsBuilder, substrate::DigestItem, Config}, dynamic::Value, events::Events, ext::scale_value::value, @@ -25,8 +24,7 @@ use zombienet_sdk::{ utils::H256, OnlineClient, PolkadotConfig, }, - tx_helper::{ChainUpgrade, RuntimeUpgradeOptions}, - LocalFileSystem, Network, NetworkNode, + NetworkNode, }; // Maximum number of blocks to wait for a session change. @@ -488,52 +486,6 @@ pub async fn assert_para_is_registered( Err(anyhow!("No more blocks to check")) } -/// Creates a runtime upgrade call using `sudo` and `set_code`. -pub fn create_runtime_upgrade_call(wasm: &[u8]) -> DynamicPayload { - zombienet_sdk::subxt::tx::dynamic( - "Sudo", - "sudo_unchecked_weight", - vec![ - value! { - System(set_code { code: Value::from_bytes(wasm) }) - }, - value! { - { - ref_time: 1u64, - proof_size: 1u64 - } - }, - ], - ) -} - -/// Waits for a runtime upgrade to complete. -pub async fn wait_for_runtime_upgrade( - client: &OnlineClient, -) -> Result<(), anyhow::Error> { - let updater = client.updater(); - let mut update_stream = updater.runtime_updates().await?; - - if let Some(Ok(update)) = update_stream.next().await { - let version = update.runtime_version().spec_version; - log::info!("Runtime upgraded to spec version {version}"); - } - Ok(()) -} - -pub async fn runtime_upgrade( - network: &Network, - node: &NetworkNode, - para_id: u32, - wasm_path: &str, -) -> Result<(), anyhow::Error> { - log::info!("Performing runtime upgrade for parachain {}, wasm: {}", para_id, wasm_path); - let para = network.parachain(para_id).unwrap(); - - para.perform_runtime_upgrade(node, RuntimeUpgradeOptions::new(AssetLocation::from(wasm_path))) - .await -} - /// Assigns the given `cores` to the given `para_id`. /// /// Zombienet by default adds extra core for each registered parachain additionally to the one @@ -585,19 +537,49 @@ pub async fn assign_cores( Ok(()) } -pub async fn wait_for_upgrade( - client: OnlineClient, - expected_version: u32, -) -> Result<(), anyhow::Error> { - let updater = client.updater(); - let mut update_stream = updater.runtime_updates().await?; +/// Creates a runtime upgrade call using `sudo` and `set_code`. +pub fn create_runtime_upgrade_call(wasm: &[u8]) -> DynamicPayload { + zombienet_sdk::subxt::tx::dynamic( + "Sudo", + "sudo_unchecked_weight", + vec![ + value! { + System(set_code { code: Value::from_bytes(wasm) }) + }, + value! { + { + ref_time: 1u64, + proof_size: 1u64 + } + }, + ], + ) +} - while let Some(Ok(update)) = update_stream.next().await { - let version = update.runtime_version().spec_version; - log::info!("Update runtime spec version {version}"); - if version == expected_version { - break; +/// Wait until a runtime upgrade has happened. +/// +/// This checks all finalized blocks until it finds a block that sets the +/// `RuntimeEnvironmentUpdated` digest. +/// +/// Returns the hash of the block at which the runtime upgrade was applied. +pub async fn wait_for_runtime_upgrade( + client: &OnlineClient, +) -> Result { + let mut finalized_blocks = client.blocks().subscribe_finalized().await?; + + while let Some(Ok(block)) = finalized_blocks.next().await { + if block + .header() + .digest + .logs + .iter() + .any(|d| matches!(d, DigestItem::RuntimeEnvironmentUpdated)) + { + log::info!("Runtime upgraded in block {:?}", block.hash()); + + return Ok(block.hash()) } } - Ok(()) + + Err(anyhow!("Did not find a runtime upgrade")) } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs index efdf218ec5eab..53691492b949f 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs @@ -166,6 +166,7 @@ async fn build_network_config() -> Result { }) .with_collator(|n| { n.with_name("collator").with_args(vec![ + ("--reserved-nodes", "{{ZOMBIE:alice:multiaddr}}").into(), ("-laura=trace,runtime=info,cumulus-consensus=trace,consensus::common=trace,parachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug").into(), ("--disable-block-announcements").into(), ("--force-authoring").into(), From d204912ee96904e4155eaddef3de967f1c185502 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 28 Nov 2025 23:23:35 +0100 Subject: [PATCH 206/312] More fixes --- .../zombie_ci/elastic_scaling/pov_recovery.rs | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs index 53691492b949f..aa918dfe8e569 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs @@ -29,8 +29,19 @@ async fn elastic_scaling_pov_recovery() -> Result<(), anyhow::Error> { let config = build_network_config().await?; let network = initialize_network(config).await?; - let alice = network.get_node("alice")?; let collator = network.get_node("collator")?; + collator.pause().await?; + + let recovery_target = network.get_node("recovery-target")?; + + // Wait for the node to be ready. We have the collator in between paused, this ensures that it + // doesn't produce any blocks in between. This is important as the recovery node needs to be up + // to observe the candidates on the relay chain, to recover them. + recovery_target.wait_until_is_up(120u64).await?; + + collator.resume().await?; + + let alice = network.get_node("alice")?; let relay_client: OnlineClient = alice.wait_client().await?; @@ -61,10 +72,8 @@ async fn elastic_scaling_pov_recovery() -> Result<(), anyhow::Error> { wait_until_timeout_elapses: false, }; - let name = "recovery-target"; - log::info!("Ensuring blocks are imported using PoV recovery by {name}"); - let result = network - .get_node(name)? + log::info!("Ensuring blocks are imported using PoV recovery by {}", recovery_target.name()); + let result = recovery_target .wait_log_line_count_with_timeout( "Importing blocks retrieved using pov_recovery", false, @@ -73,7 +82,10 @@ async fn elastic_scaling_pov_recovery() -> Result<(), anyhow::Error> { .await?; if !result.success() { - return Err(anyhow!("Failed importing blocks using PoV recovery by {name}: {result:?}")); + return Err(anyhow!( + "Failed importing blocks using PoV recovery by {}: {result:?}", + recovery_target.name() + )); } log::info!("Test finished successfully"); From fc02b180a0854ff63389374e4c22b9e5dc4ea60f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 28 Nov 2025 23:27:14 +0100 Subject: [PATCH 207/312] Fix docs --- cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 6fe05dc243d23..3791fef6bdd07 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -497,12 +497,12 @@ pub async fn assert_para_is_registered( /// Genesis patch: /// ```json /// "configuration": { -/// "config": { -/// "scheduler_params": { -/// "num_cores": 2, -/// } -/// } -/// } +/// "config": { +/// "scheduler_params": { +/// "num_cores": 2, +/// } +/// } +/// } /// ``` /// /// Runs the relay chain with `2` cores and we also add two parachains. From 68f4d2276180f937fa1b8190cd7de08d4153ed90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 28 Nov 2025 23:27:31 +0100 Subject: [PATCH 208/312] More fixes --- .../zombie_ci/block_bundling/pov_recovery.rs | 25 ++++++++++++++----- 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs index cd48f70f2d57d..9f57e36b2f670 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs @@ -29,8 +29,19 @@ async fn block_bundling_pov_recovery() -> Result<(), anyhow::Error> { let config = build_network_config().await?; let network = initialize_network(config).await?; - let alice = network.get_node("alice")?; let collator = network.get_node("collator")?; + collator.pause().await?; + + let recovery_target = network.get_node("recovery-target")?; + + // Wait for the node to be ready. We have the collator in between paused, this ensures that it + // doesn't produce any blocks in between. This is important as the recovery node needs to be up + // to observe the candidates on the relay chain, to recover them. + recovery_target.wait_until_is_up(120u64).await?; + + collator.resume().await?; + + let alice = network.get_node("alice")?; let relay_client: OnlineClient = alice.wait_client().await?; @@ -61,10 +72,8 @@ async fn block_bundling_pov_recovery() -> Result<(), anyhow::Error> { wait_until_timeout_elapses: false, }; - let name = "recovery-target"; - log::info!("Ensuring blocks are imported using PoV recovery by {name}"); - let result = network - .get_node(name)? + log::info!("Ensuring blocks are imported using PoV recovery by {}", recovery_target.name()); + let result = recovery_target .wait_log_line_count_with_timeout( "Importing blocks retrieved using pov_recovery", false, @@ -73,7 +82,10 @@ async fn block_bundling_pov_recovery() -> Result<(), anyhow::Error> { .await?; if !result.success() { - return Err(anyhow!("Failed importing blocks using PoV recovery by {name}: {result:?}")) + return Err(anyhow!( + "Failed importing blocks using PoV recovery by {}: {result:?}", + recovery_target.name() + )) } log::info!("Test finished successfully"); @@ -167,6 +179,7 @@ async fn build_network_config() -> Result { ])) .with_collator(|n| n.with_name("collator") .with_args(vec![ + ("--reserved-nodes", "{{ZOMBIE:alice:multiaddr}}").into() ("-laura=trace,runtime=info,cumulus-consensus=trace,consensus::common=trace,parachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug").into(), ("--disable-block-announcements").into(), ("--force-authoring").into(), From f91dd0124d1d011882e1ada37bbf73f498d5458c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 28 Nov 2025 23:29:18 +0100 Subject: [PATCH 209/312] Remove copying from CI files --- .github/workflows/build-publish-images.yml | 3 --- .github/workflows/zombienet_cumulus.yml | 11 ----------- 2 files changed, 14 deletions(-) diff --git a/.github/workflows/build-publish-images.yml b/.github/workflows/build-publish-images.yml index 4ac45af18051d..78cde52196cd2 100644 --- a/.github/workflows/build-publish-images.yml +++ b/.github/workflows/build-publish-images.yml @@ -153,9 +153,6 @@ jobs: mkdir -p ./artifacts mv ./target/release/test-parachain ./artifacts/. mkdir -p ./artifacts/zombienet - mv ./target/release/wbuild/cumulus-test-runtime/wasm_binary_spec_version_incremented.rs.compact.compressed.wasm ./artifacts/zombienet/. - mv ./target/release/wbuild/cumulus-test-runtime/wasm_binary_elastic_scaling.rs.compact.compressed.wasm ./artifacts/zombienet/. - mv ./target/release/wbuild/cumulus-test-runtime/wasm_binary_elastic_scaling_12s_slot.rs.compact.compressed.wasm ./artifacts/zombienet/. - name: tar run: tar -cvf artifacts.tar artifacts diff --git a/.github/workflows/zombienet_cumulus.yml b/.github/workflows/zombienet_cumulus.yml index 1bc05e398502a..173b8989c8903 100644 --- a/.github/workflows/zombienet_cumulus.yml +++ b/.github/workflows/zombienet_cumulus.yml @@ -87,17 +87,6 @@ jobs: github-token: ${{ secrets.GITHUB_TOKEN }} run-id: ${{ needs.preflight.outputs.BUILD_RUN_ID }} - - name: provide_wasm_binary - if: ${{ matrix.test.needs-wasm-binary }} - run: | - tar -xvf artifacts.tar - ls -ltr artifacts/* - cp ./artifacts/zombienet/wasm_binary_spec_version_incremented.rs.compact.compressed.wasm /tmp/ - cp ./artifacts/zombienet/wasm_binary_elastic_scaling.rs.compact.compressed.wasm /tmp/ - cp ./artifacts/zombienet/wasm_binary_elastic_scaling_12s_slot.rs.compact.compressed.wasm /tmp/ - ls -ltr /tmp - rm -rf artifacts - - name: zombienet_test timeout-minutes: 60 uses: ./.github/actions/zombienet-sdk From a6cf45d0ca37fa19ca6cde53686cfb51de9a545b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 29 Nov 2025 00:08:08 +0100 Subject: [PATCH 210/312] More log fixes --- cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 3791fef6bdd07..5ab6860433731 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -42,12 +42,12 @@ const WAIT_MAX_BLOCKS_FOR_SESSION: u32 = 50; /// Genesis patch: /// ```json /// "configuration": { -/// "config": { -/// "scheduler_params": { -/// "num_cores": 2, -/// } -/// } -/// } +/// "config": { +/// "scheduler_params": { +/// "num_cores": 2, +/// } +/// } +/// } /// ``` /// /// Runs the relay chain with `2` cores and we also add two parachains. From 0d4373dbc23f0d5c9ed4172644d4e4ea6596fc33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 29 Nov 2025 00:14:51 +0100 Subject: [PATCH 211/312] Switch to builder pattern --- .../consensus/aura/src/collators/mod.rs | 8 +- .../aura/src/equivocation_import_queue.rs | 8 +- cumulus/client/consensus/common/src/tests.rs | 33 ++- .../src/validate_block/tests.rs | 28 +- cumulus/test/client/src/block_builder.rs | 260 +++++++----------- .../test/service/benches/validate_block.rs | 9 +- .../service/benches/validate_block_glutton.rs | 16 +- 7 files changed, 160 insertions(+), 202 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index d9848891258ad..30e32ebe5e60d 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -328,7 +328,7 @@ mod tests { use cumulus_relay_chain_interface::PHash; use cumulus_test_client::{ runtime::{Block, Hash}, - Client, DefaultTestClientBuilderExt, InitBlockBuilder, TestClientBuilder, + BuildBlockBuilder, Client, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; @@ -367,7 +367,11 @@ mod tests { async fn build_and_import_block(client: &Client, included: Hash) -> Block { let sproof = sproof_with_parent_by_hash(client, included); - let block_builder = client.init_block_builder(None, sproof).block_builder; + let block_builder = client + .init_block_builder_builder() + .with_relay_sproof_builder(sproof) + .build() + .block_builder; let block = block_builder.build().unwrap().block; diff --git a/cumulus/client/consensus/aura/src/equivocation_import_queue.rs b/cumulus/client/consensus/aura/src/equivocation_import_queue.rs index 100687eb6b538..9f5ef7c1bff32 100644 --- a/cumulus/client/consensus/aura/src/equivocation_import_queue.rs +++ b/cumulus/client/consensus/aura/src/equivocation_import_queue.rs @@ -306,7 +306,7 @@ mod test { use super::*; use codec::Encode; use cumulus_test_client::{ - runtime::Block, seal_block, Client, InitBlockBuilder, TestClientBuilder, + runtime::Block, seal_block, BuildBlockBuilder, Client, TestClientBuilder, TestClientBuilderExt, }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; @@ -344,7 +344,11 @@ mod test { ..Default::default() }; - let block_builder = client.init_block_builder(Some(validation_data), sproof); + let block_builder = client + .init_block_builder_builder() + .with_validation_data(validation_data) + .with_relay_sproof_builder(sproof) + .build(); let block = block_builder.block_builder.build().unwrap(); let mut blocks = Vec::new(); diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs index ff1c8ec56508b..1fe73da8b25ea 100644 --- a/cumulus/client/consensus/common/src/tests.rs +++ b/cumulus/client/consensus/common/src/tests.rs @@ -31,7 +31,7 @@ use cumulus_relay_chain_interface::{ }; use cumulus_test_client::{ runtime::{Block, Hash, Header}, - Backend, Client, InitBlockBuilder, TestClientBuilder, TestClientBuilderExt, + Backend, BuildBlockBuilder, Client, TestClientBuilder, TestClientBuilderExt, }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use futures::{channel::mpsc, executor::block_on, select, FutureExt, Stream, StreamExt}; @@ -314,19 +314,22 @@ fn sproof_with_parent(parent: HeadData) -> RelayStateSproofBuilder { x } -fn build_block( +fn build_block( builder: &B, sproof: RelayStateSproofBuilder, at: Option, timestamp: Option, relay_parent: Option, ) -> Block { - let cumulus_test_client::BlockBuilderAndSupportData { block_builder, .. } = match at { - Some(at) => match timestamp { - Some(ts) => builder.init_block_builder_with_timestamp(at, None, sproof, ts), - None => builder.init_block_builder_at(at, None, sproof), - }, - None => builder.init_block_builder(None, sproof), + let cumulus_test_client::BlockBuilderAndSupportData { block_builder, .. } = { + let mut bb = builder.init_block_builder_builder().with_relay_sproof_builder(sproof); + if let Some(at) = at { + bb = bb.at(at); + } + if let Some(ts) = timestamp { + bb = bb.with_timestamp(ts); + } + bb.build() }; let mut block = block_builder.build().unwrap().block; @@ -560,7 +563,12 @@ async fn follow_finalized_does_not_stop_on_unknown_block() { let unknown_block = { let sproof = sproof_with_parent_by_hash(&client, block.hash()); - let block_builder = client.init_block_builder_at(block.hash(), None, sproof).block_builder; + let block_builder = client + .init_block_builder_builder() + .at(block.hash()) + .with_relay_sproof_builder(sproof) + .build() + .block_builder; block_builder.build().unwrap().block }; @@ -615,7 +623,12 @@ async fn follow_new_best_sets_best_after_it_is_imported() { let unknown_block = { let sproof = sproof_with_parent_by_hash(&client, block.hash()); - let block_builder = client.init_block_builder_at(block.hash(), None, sproof).block_builder; + let block_builder = client + .init_block_builder_builder() + .at(block.hash()) + .with_relay_sproof_builder(sproof) + .build() + .block_builder; block_builder.build().unwrap().block }; diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 3653731b05d36..48ecfefc80a9e 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -25,8 +25,8 @@ use cumulus_test_client::{ self as test_runtime, Block, Hash, Header, SudoCall, SystemCall, TestPalletCall, UncheckedExtrinsic, WASM_BINARY, }, - seal_block, transfer, BlockData, BlockOrigin, BuildParachainBlockData, Client, - DefaultTestClientBuilderExt, HeadData, InitBlockBuilder, + seal_block, transfer, BlockData, BlockOrigin, BuildBlockBuilder, BuildParachainBlockData, + Client, DefaultTestClientBuilderExt, HeadData, Sr25519Keyring::{Alice, Bob, Charlie}, TestClientBuilder, TestClientBuilderExt, ValidationParams, }; @@ -151,7 +151,12 @@ fn build_block_with_witness( mut block_builder, persisted_validation_data, .. - } = client.init_block_builder_with_pre_digests(Some(validation_data), sproof_builder, pre_digests); + } = client + .init_block_builder_builder() + .with_validation_data(validation_data) + .with_relay_sproof_builder(sproof_builder) + .with_pre_digests(pre_digests) + .build(); extra_extrinsics.into_iter().for_each(|e| block_builder.push(e).unwrap()); @@ -206,14 +211,15 @@ fn build_multiple_blocks_with_witness( mut block_builder, persisted_validation_data: p_v_data, proof_recorder, - } = client.init_block_builder_with_ignored_nodes_and_pre_digests( - parent_head.hash(), - Some(validation_data.clone()), - sproof_builder.clone(), - timestamp, - ignored_nodes.clone(), - (pre_digests)(i), - ); + } = client + .init_block_builder_builder() + .at(parent_head.hash()) + .with_validation_data(validation_data.clone()) + .with_relay_sproof_builder(sproof_builder.clone()) + .with_timestamp(timestamp) + .with_ignored_nodes(ignored_nodes.clone()) + .with_pre_digests((pre_digests)(i)) + .build(); persisted_validation_data = Some(p_v_data); diff --git a/cumulus/test/client/src/block_builder.rs b/cumulus/test/client/src/block_builder.rs index 17e4d026f6646..69370bbeb2cac 100644 --- a/cumulus/test/client/src/block_builder.rs +++ b/cumulus/test/client/src/block_builder.rs @@ -21,7 +21,6 @@ use cumulus_primitives_parachain_inherent::{ParachainInherentData, INHERENT_IDEN use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use cumulus_test_runtime::{Block, GetLastTimestamp, Hash, Header}; use polkadot_primitives::{BlockNumber as PBlockNumber, Hash as PHash}; -use sc_block_builder::BlockBuilderBuilder; use sp_api::{ProofRecorder, ProofRecorderIgnoredNodes, ProvideRuntimeApi}; use sp_consensus_aura::{AuraApi, Slot}; use sp_externalities::Extensions; @@ -35,82 +34,96 @@ pub struct BlockBuilderAndSupportData<'a> { pub proof_recorder: ProofRecorder, } -/// An extension for the Cumulus test client to init a block builder. -pub trait InitBlockBuilder { - /// Init a specific block builder that works for the test runtime. - /// - /// This will automatically create and push the inherents for you to make the block - /// valid for the test runtime. - /// - /// You can use the relay chain state sproof builder to arrange required relay chain state or - /// just use a default one. The relay chain slot in the storage proof - /// will be adjusted to align with the parachain slot to pass validation. - /// - /// Returns the block builder and validation data for further usage. - fn init_block_builder( - &self, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - ) -> BlockBuilderAndSupportData<'_>; - - /// Init a specific block builder at a specific block that works for the test runtime. - /// - /// Same as [`InitBlockBuilder::init_block_builder`] besides that it takes a - /// [`type@Hash`] to say which should be the parent block of the block that is being build. - fn init_block_builder_at( - &self, - at: Hash, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - ) -> BlockBuilderAndSupportData<'_>; - - /// Init a specific block builder at a specific block that works for the test runtime. - /// - /// Same as [`InitBlockBuilder::init_block_builder_with_timestamp`] besides that it takes - /// `ignored_nodes` that instruct the proof recorder to not record these nodes. - fn init_block_builder_with_ignored_nodes( - &self, - at: Hash, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - timestamp: u64, - ignored_nodes: ProofRecorderIgnoredNodes, - ) -> BlockBuilderAndSupportData<'_>; - - /// Init a specific block builder with ignored nodes and pre-digests. - fn init_block_builder_with_ignored_nodes_and_pre_digests( - &self, - at: Hash, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - timestamp: u64, - ignored_nodes: ProofRecorderIgnoredNodes, - pre_digests: Vec, - ) -> BlockBuilderAndSupportData<'_>; - - /// Init a specific block builder using the given pre-digests. +/// Builder for creating a block builder with customizable parameters. +pub struct BlockBuilderBuilder<'a> { + client: &'a Client, + at: Option, + validation_data: Option>, + relay_sproof_builder: RelayStateSproofBuilder, + timestamp: Option, + ignored_nodes: Option>, + pre_digests: Vec, +} + +impl<'a> BlockBuilderBuilder<'a> { + fn new(client: &'a Client) -> Self { + Self { + client, + at: None, + validation_data: None, + relay_sproof_builder: Default::default(), + timestamp: None, + ignored_nodes: None, + pre_digests: Vec::new(), + } + } + + /// Set the parent block hash for the block builder. + pub fn at(mut self, at: Hash) -> Self { + self.at = Some(at); + self + } + + /// Set the validation data for the block builder. + pub fn with_validation_data( + mut self, + validation_data: PersistedValidationData, + ) -> Self { + self.validation_data = Some(validation_data); + self + } + + /// Set the relay state proof builder for the block builder. + pub fn with_relay_sproof_builder(mut self, relay_sproof_builder: RelayStateSproofBuilder) -> Self { + self.relay_sproof_builder = relay_sproof_builder; + self + } + + /// Set the timestamp for the block builder. + pub fn with_timestamp(mut self, timestamp: u64) -> Self { + self.timestamp = Some(timestamp); + self + } + + /// Set the ignored nodes for the proof recorder. + pub fn with_ignored_nodes(mut self, ignored_nodes: ProofRecorderIgnoredNodes) -> Self { + self.ignored_nodes = Some(ignored_nodes); + self + } + + /// Set the pre-digest items for the block builder. + pub fn with_pre_digests(mut self, pre_digests: Vec) -> Self { + self.pre_digests = pre_digests; + self + } + + /// Build the block builder with the configured parameters. + pub fn build(self) -> BlockBuilderAndSupportData<'a> { + let at = self.at.unwrap_or_else(|| self.client.chain_info().best_hash); + init_block_builder( + self.client, + at, + self.validation_data, + self.relay_sproof_builder, + self.timestamp, + self.ignored_nodes, + Some(self.pre_digests), + ) + } +} + +/// An extension for the Cumulus test client to build a block builder. +pub trait BuildBlockBuilder { + /// Initialize a block builder builder that can be configured and built. /// - /// Same as [`InitBlockBuilder::init_block_builder`] besides that it takes vector of - /// [`DigestItem`]'s that are passed as pre-digest to the block builder. - fn init_block_builder_with_pre_digests( - &self, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - pre_digests: Vec, - ) -> BlockBuilderAndSupportData<'_>; - - /// Init a specific block builder that works for the test runtime. + /// This returns a builder that can be configured with various options like + /// parent block hash, validation data, relay state proof builder, timestamp, + /// ignored nodes, and pre-digests. Call `.build()` on the builder to create + /// the actual block builder. /// - /// Same as [`InitBlockBuilder::init_block_builder`] besides that it takes a - /// [`type@Hash`] to say which should be the parent block of the block that is being build and - /// it will use the given `timestamp` as input for the timestamp inherent. - fn init_block_builder_with_timestamp( - &self, - at: Hash, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - timestamp: u64, - ) -> BlockBuilderAndSupportData<'_>; + /// The builder will automatically create and push the inherents for you to make + /// the block valid for the test runtime. + fn init_block_builder_builder(&self) -> BlockBuilderBuilder<'_>; } fn init_block_builder( @@ -164,7 +177,7 @@ fn init_block_builder( let mut extra_extensions = Extensions::default(); extra_extensions.register(ProofSizeExt::new(proof_recorder.clone())); - let mut block_builder = BlockBuilderBuilder::new(client) + let mut block_builder = sc_block_builder::BlockBuilderBuilder::new(client) .on_parent_block(at) .fetch_parent_block_number(client) .unwrap() @@ -213,98 +226,9 @@ fn init_block_builder( } } -impl InitBlockBuilder for Client { - fn init_block_builder( - &self, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - ) -> BlockBuilderAndSupportData<'_> { - let chain_info = self.chain_info(); - self.init_block_builder_at(chain_info.best_hash, validation_data, relay_sproof_builder) - } - - fn init_block_builder_with_pre_digests( - &self, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - pre_digests: Vec, - ) -> BlockBuilderAndSupportData<'_> { - let chain_info = self.chain_info(); - init_block_builder( - self, - chain_info.best_hash, - validation_data, - relay_sproof_builder, - None, - None, - Some(pre_digests), - ) - } - - fn init_block_builder_at( - &self, - at: Hash, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - ) -> BlockBuilderAndSupportData<'_> { - init_block_builder(self, at, validation_data, relay_sproof_builder, None, None, None) - } - - fn init_block_builder_with_ignored_nodes( - &self, - at: Hash, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - timestamp: u64, - ignored_nodes: ProofRecorderIgnoredNodes, - ) -> BlockBuilderAndSupportData<'_> { - init_block_builder( - self, - at, - validation_data, - relay_sproof_builder, - Some(timestamp), - Some(ignored_nodes), - None, - ) - } - - fn init_block_builder_with_ignored_nodes_and_pre_digests( - &self, - at: Hash, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - timestamp: u64, - ignored_nodes: ProofRecorderIgnoredNodes, - pre_digests: Vec, - ) -> BlockBuilderAndSupportData<'_> { - init_block_builder( - self, - at, - validation_data, - relay_sproof_builder, - Some(timestamp), - Some(ignored_nodes), - Some(pre_digests), - ) - } - - fn init_block_builder_with_timestamp( - &self, - at: Hash, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - timestamp: u64, - ) -> BlockBuilderAndSupportData<'_> { - init_block_builder( - self, - at, - validation_data, - relay_sproof_builder, - Some(timestamp), - None, - None, - ) +impl BuildBlockBuilder for Client { + fn init_block_builder_builder(&self) -> BlockBuilderBuilder<'_> { + BlockBuilderBuilder::new(self) } } diff --git a/cumulus/test/service/benches/validate_block.rs b/cumulus/test/service/benches/validate_block.rs index ecfc824b571fa..bec5de746a451 100644 --- a/cumulus/test/service/benches/validate_block.rs +++ b/cumulus/test/service/benches/validate_block.rs @@ -22,7 +22,7 @@ use cumulus_primitives_core::{ relay_chain::AccountId, ParaId, PersistedValidationData, ValidationParams, }; use cumulus_test_client::{ - generate_extrinsic_with_pair, BuildParachainBlockData, InitBlockBuilder, TestClientBuilder, + generate_extrinsic_with_pair, BuildBlockBuilder, BuildParachainBlockData, TestClientBuilder, ValidationResult, }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; @@ -111,8 +111,11 @@ fn benchmark_block_validation(c: &mut Criterion) { ..Default::default() }; - let cumulus_test_client::BlockBuilderAndSupportData { mut block_builder, .. } = - client.init_block_builder(Some(validation_data), sproof_builder.clone()); + let cumulus_test_client::BlockBuilderAndSupportData { mut block_builder, .. } = client + .init_block_builder_builder() + .with_validation_data(validation_data) + .with_relay_sproof_builder(sproof_builder.clone()) + .build(); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); diff --git a/cumulus/test/service/benches/validate_block_glutton.rs b/cumulus/test/service/benches/validate_block_glutton.rs index 06ad739965146..b18c8eb376f08 100644 --- a/cumulus/test/service/benches/validate_block_glutton.rs +++ b/cumulus/test/service/benches/validate_block_glutton.rs @@ -20,8 +20,8 @@ use core::time::Duration; use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use cumulus_primitives_core::{relay_chain::AccountId, PersistedValidationData, ValidationParams}; use cumulus_test_client::{ - generate_extrinsic_with_pair, BlockBuilderAndSupportData, BuildParachainBlockData, Client, - InitBlockBuilder, ParachainBlockData, TestClientBuilder, ValidationResult, + generate_extrinsic_with_pair, BlockBuilderAndSupportData, BuildBlockBuilder, + BuildParachainBlockData, Client, ParachainBlockData, TestClientBuilder, ValidationResult, }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use cumulus_test_runtime::{Block, GluttonCall, Header, SudoCall}; @@ -88,8 +88,10 @@ fn benchmark_block_validation(c: &mut Criterion) { parent_head: parent_header.encode().into(), ..Default::default() }; - let BlockBuilderAndSupportData { block_builder, .. } = - client.init_block_builder(Some(validation_data), Default::default()); + let BlockBuilderAndSupportData { block_builder, .. } = client + .init_block_builder_builder() + .with_validation_data(validation_data) + .build(); let parachain_block = block_builder.build_parachain_block(*parent_header.state_root()); let proof_size_in_kb = parachain_block.proof().encoded_size() as f64 / 1024f64; @@ -198,8 +200,10 @@ fn set_glutton_parameters( ); extrinsics.push(set_storage); - let BlockBuilderAndSupportData { mut block_builder, .. } = - client.init_block_builder(Some(validation_data), Default::default()); + let BlockBuilderAndSupportData { mut block_builder, .. } = client + .init_block_builder_builder() + .with_validation_data(validation_data) + .build(); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); From d6ffc3d09e68e29aef617a2fb4506ff88f189b81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 29 Nov 2025 12:04:34 +0100 Subject: [PATCH 212/312] Fix test --- .../pallets/parachain-system/src/validate_block/tests.rs | 7 +++++-- cumulus/test/client/src/block_builder.rs | 5 ++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 48ecfefc80a9e..ad2fd42e91896 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -40,6 +40,7 @@ use sp_runtime::{ traits::{BlakeTwo256, Block as BlockT, Header as HeaderT}, DigestItem, }; +use sp_tracing::capture_test_logs; use sp_trie::{proof_size_extension::ProofSizeExt, recorder::IgnoredNodes}; use std::{env, process::Command}; @@ -792,7 +793,8 @@ fn rejects_multiple_blocks_per_pov_when_applying_runtime_upgrade() { #[test] fn validate_block_rejects_incomplete_bundle() { - use sp_tracing::capture_test_logs; + // Required to have the global logging enabled, so we can capture it below. + sp_tracing::try_init_simple(); let (client, parent_head) = create_elastic_scaling_test_client(); @@ -819,7 +821,8 @@ fn validate_block_rejects_incomplete_bundle() { }); assert!( log_capture.contains("Last block in PoV must have maybe_last=true"), - "Expected log about missing maybe_last, got: {log_capture}" + "Expected log about missing maybe_last, got: {}", + log_capture.get_logs() ); // Validation with both blocks should succeed diff --git a/cumulus/test/client/src/block_builder.rs b/cumulus/test/client/src/block_builder.rs index 69370bbeb2cac..106d38d17564c 100644 --- a/cumulus/test/client/src/block_builder.rs +++ b/cumulus/test/client/src/block_builder.rs @@ -74,7 +74,10 @@ impl<'a> BlockBuilderBuilder<'a> { } /// Set the relay state proof builder for the block builder. - pub fn with_relay_sproof_builder(mut self, relay_sproof_builder: RelayStateSproofBuilder) -> Self { + pub fn with_relay_sproof_builder( + mut self, + relay_sproof_builder: RelayStateSproofBuilder, + ) -> Self { self.relay_sproof_builder = relay_sproof_builder; self } From 7f45fec675492c144b2b110c12037193ade095a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 29 Nov 2025 22:49:40 +0100 Subject: [PATCH 213/312] Fix --- cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 5ab6860433731..48b1100b69817 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -53,7 +53,7 @@ const WAIT_MAX_BLOCKS_FOR_SESSION: u32 = 50; /// Runs the relay chain with `2` cores and we also add two parachains. /// To assign these extra `2` cores, the call would look like this: /// -/// ```rust +/// ```ignore /// create_assign_core_call(&[(0, 2400), (1, 2400)]) /// ``` /// @@ -508,7 +508,7 @@ pub async fn assert_para_is_registered( /// Runs the relay chain with `2` cores and we also add two parachains. /// To assign these extra `2` cores, the call would look like this: /// -/// ```rust +/// ```ignore /// assign_core(&relay_node, PARA_ID, vec![0, 1]) /// ``` /// From 9e00026ca6e7adf5ce3f35a334fc4dd571b6c43e Mon Sep 17 00:00:00 2001 From: "cmd[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 30 Nov 2025 09:20:26 +0000 Subject: [PATCH 214/312] Update from github-actions[bot] running command 'fmt' --- .../parachain-system/src/validate_block/implementation.rs | 4 +++- cumulus/test/runtime/Cargo.toml | 4 ++-- .../zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs | 3 +-- substrate/client/consensus/manual-seal/Cargo.toml | 2 +- substrate/primitives/trie/Cargo.toml | 2 +- templates/parachain/node/Cargo.toml | 2 +- 6 files changed, 9 insertions(+), 8 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index ce5d4fe77fc98..14fd852613f1c 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -32,7 +32,9 @@ use sp_core::storage::{well_known_keys, ChildInfo, StateVersion}; use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::{hashing::blake2_128, KillStorageResult}; use sp_runtime::{ - traits::{Block as BlockT, ExtrinsicCall, Hash as HashT, HashingFor, Header as HeaderT, LazyBlock}, + traits::{ + Block as BlockT, ExtrinsicCall, Hash as HashT, HashingFor, Header as HeaderT, LazyBlock, + }, DigestItem, }; use sp_state_machine::OverlayedChanges; diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index 90c10c0736a6d..a0b0944e8ba1e 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -47,9 +47,9 @@ sp-version = { workspace = true } cumulus-pallet-aura-ext = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-weight-reclaim = { workspace = true } -cumulus-primitives-storage-weight-reclaim = { workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } parachain-info = { workspace = true } [build-dependencies] @@ -95,7 +95,7 @@ std = [ "sp-transaction-pool/std", "sp-version/std", "substrate-wasm-builder", - "tracing/std" + "tracing/std", ] increment-spec-version = [] # A runtime which expects to build behind the relay chain tip. diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs index 989bfa7245da6..f8a5123475b4d 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/runtime_upgrade.rs @@ -39,8 +39,7 @@ async fn runtime_upgrade() -> Result<(), anyhow::Error> { log::info!("Performing runtime upgrade"); - let call = - create_runtime_upgrade_call(WASM_RUNTIME_UPGRADE.expect("Wasm runtime not build")); + let call = create_runtime_upgrade_call(WASM_RUNTIME_UPGRADE.expect("Wasm runtime not build")); submit_extrinsic_and_wait_for_finalization_success(&charlie_client, &call, &dev::alice()) .await?; diff --git a/substrate/client/consensus/manual-seal/Cargo.toml b/substrate/client/consensus/manual-seal/Cargo.toml index c06cf8db740a8..78d69fea97f9b 100644 --- a/substrate/client/consensus/manual-seal/Cargo.toml +++ b/substrate/client/consensus/manual-seal/Cargo.toml @@ -43,8 +43,8 @@ sp-externalities = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-trie = { workspace = true, default-features = true } sp-timestamp = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } thiserror = { workspace = true } [dev-dependencies] diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index 0a5e8c119d085..00f29de25dee4 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -22,7 +22,7 @@ harness = false [dependencies] ahash = { optional = true, workspace = true } -codec = { features = [ "derive" ], workspace = true } +codec = { features = ["derive"], workspace = true } foldhash = { workspace = true } hash-db = { workspace = true } hashbrown = { workspace = true } diff --git a/templates/parachain/node/Cargo.toml b/templates/parachain/node/Cargo.toml index 93d3a9b883f47..ef8ca6605ba39 100644 --- a/templates/parachain/node/Cargo.toml +++ b/templates/parachain/node/Cargo.toml @@ -14,8 +14,8 @@ build = "build.rs" workspace = true [dependencies] -codec = { workspace = true, default-features = true } clap = { features = ["derive"], workspace = true } +codec = { workspace = true, default-features = true } color-print = { workspace = true } docify = { workspace = true } futures = { workspace = true } From d932d48c82f330b2ce7a5dbe48851f079be01e68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sun, 30 Nov 2025 10:41:19 +0100 Subject: [PATCH 215/312] Fix feature propagation --- cumulus/pallets/parachain-system/Cargo.toml | 1 + cumulus/test/client/Cargo.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 50c262ad8c733..27ef0f9e14062 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -119,6 +119,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-primitives/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index 476c7abe46732..d1041f264e0b1 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -52,6 +52,7 @@ cumulus-test-service = { workspace = true } [features] runtime-benchmarks = [ + "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-weight-reclaim/runtime-benchmarks", "cumulus-primitives-core/runtime-benchmarks", "cumulus-test-service/runtime-benchmarks", From 98d81b3220085959c63246721f3c5b4e1647dd4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sun, 30 Nov 2025 10:50:02 +0100 Subject: [PATCH 216/312] Fixes --- Cargo.lock | 1 + cumulus/pallets/parachain-system/Cargo.toml | 2 ++ cumulus/test/runtime/Cargo.toml | 1 + substrate/primitives/block-builder/Cargo.toml | 8 +++++++- umbrella/Cargo.toml | 7 ++++++- umbrella/src/lib.rs | 4 ++++ 6 files changed, 21 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bbe82a9ed7658..499c3cc32de44 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16547,6 +16547,7 @@ dependencies = [ "cumulus-client-network", "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", + "cumulus-client-proof-size-recording", "cumulus-client-service", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 27ef0f9e14062..2d5d54def9f6f 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -109,6 +109,7 @@ std = [ "trie-db/std", "xcm-builder/std", "xcm/std", + "frame-executive/std" ] runtime-benchmarks = [ @@ -132,4 +133,5 @@ try-runtime = [ "pallet-message-queue/try-runtime", "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", + "frame-executive/try-runtime" ] diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index a0b0944e8ba1e..5ca6927df9f43 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -96,6 +96,7 @@ std = [ "sp-version/std", "substrate-wasm-builder", "tracing/std", + "cumulus-primitives-storage-weight-reclaim/std" ] increment-spec-version = [] # A runtime which expects to build behind the relay chain tip. diff --git a/substrate/primitives/block-builder/Cargo.toml b/substrate/primitives/block-builder/Cargo.toml index 95c5e2fc80726..47367e287dfd8 100644 --- a/substrate/primitives/block-builder/Cargo.toml +++ b/substrate/primitives/block-builder/Cargo.toml @@ -24,4 +24,10 @@ sp-runtime = { workspace = true } [features] default = ["std"] -std = ["sp-api/std", "sp-inherents/std", "sp-runtime/std"] +std = [ + "sp-api/std", + "sp-inherents/std", + "sp-runtime/std", + "codec/std", + "scale-info/std" +] diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index b05d15f0cf0b5..adcc651e83f18 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -866,6 +866,7 @@ node = [ "cumulus-client-network", "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", + "cumulus-client-proof-size-recording", "cumulus-client-service", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", @@ -2225,7 +2226,6 @@ default-features = false optional = true path = "../cumulus/client/consensus/common" - [dependencies.cumulus-client-consensus-relay-chain] default-features = false optional = true @@ -2246,6 +2246,11 @@ default-features = false optional = true path = "../cumulus/client/pov-recovery" +[dependencies.cumulus-client-proof-size-recording] +default-features = false +optional = true +path = "../cumulus/client/proof-size-recording" + [dependencies.cumulus-client-service] default-features = false optional = true diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index 125939c2edf76..860212e6d8195 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -109,6 +109,10 @@ pub use cumulus_client_parachain_inherent; #[cfg(feature = "cumulus-client-pov-recovery")] pub use cumulus_client_pov_recovery; +/// Storage proof size recording utilities. +#[cfg(feature = "cumulus-client-proof-size-recording")] +pub use cumulus_client_proof_size_recording; + /// Common functions used to assemble the components of a parachain node. #[cfg(feature = "cumulus-client-service")] pub use cumulus_client_service; From 6afc27544f869fd2afa58134567780e1d048560f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 1 Dec 2025 17:24:13 +0100 Subject: [PATCH 217/312] Remove merge leftover --- .../lib/src/nodes/manual_seal.rs | 294 ------------------ 1 file changed, 294 deletions(-) delete mode 100644 cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs b/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs deleted file mode 100644 index 440dbf8763cdb..0000000000000 --- a/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::common::{ - rpc::BuildRpcExtensions as BuildRpcExtensionsT, - spec::{BaseNodeSpec, BuildImportQueue, ClientBlockImport, NodeSpec as NodeSpecT}, - types::{Hash, ParachainBlockImport, ParachainClient}, -}; -use codec::Encode; -use cumulus_client_parachain_inherent::{MockValidationDataInherentDataProvider, MockXcmConfig}; -use cumulus_client_service::ParachainTracingExecuteBlock; -use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::CollectCollationInfo; -use futures::FutureExt; -use polkadot_primitives::UpgradeGoAhead; -use sc_client_api::Backend; -use sc_consensus::{DefaultImportQueue, LongestChain}; -use sc_consensus_manual_seal::rpc::{ManualSeal, ManualSealApiServer}; -use sc_network::NetworkBackend; -use sc_service::{Configuration, PartialComponents, TaskManager}; -use sc_telemetry::TelemetryHandle; -use sc_transaction_pool_api::OffchainTransactionPoolFactory; -use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_runtime::traits::Header; -use std::{marker::PhantomData, sync::Arc}; - -pub struct ManualSealNode(PhantomData); - -impl - BuildImportQueue< - NodeSpec::Block, - NodeSpec::RuntimeApi, - Arc>, - > for ManualSealNode -{ - fn build_import_queue( - client: Arc>, - _block_import: ParachainBlockImport< - NodeSpec::Block, - Arc>, - >, - config: &Configuration, - _telemetry_handle: Option, - task_manager: &TaskManager, - ) -> sc_service::error::Result> { - Ok(sc_consensus_manual_seal::import_queue( - Box::new(client.clone()), - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - )) - } -} - -impl BaseNodeSpec for ManualSealNode { - type Block = NodeSpec::Block; - type RuntimeApi = NodeSpec::RuntimeApi; - type BuildImportQueue = Self; - type InitBlockImport = ClientBlockImport; -} - -impl ManualSealNode { - pub fn new() -> Self { - Self(Default::default()) - } - - pub fn start_node( - &self, - mut config: Configuration, - block_time: u64, - ) -> sc_service::error::Result - where - Net: NetworkBackend, - { - let PartialComponents { - client, - backend, - mut task_manager, - import_queue, - keystore_container, - select_chain: _, - transaction_pool, - other: (_, mut telemetry, _, _), - } = Self::new_partial(&config)?; - let select_chain = LongestChain::new(backend.clone()); - - let para_id = - Self::parachain_id(&client, &config).ok_or("Failed to retrieve the parachain id")?; - - // Since this is a dev node, prevent it from connecting to peers. - config.network.default_peers_set.in_peers = 0; - config.network.default_peers_set.out_peers = 0; - let net_config = sc_network::config::FullNetworkConfiguration::<_, _, Net>::new( - &config.network, - config.prometheus_config.as_ref().map(|cfg| cfg.registry.clone()), - ); - let metrics = Net::register_notification_metrics( - config.prometheus_config.as_ref().map(|cfg| &cfg.registry), - ); - - let (network, system_rpc_tx, tx_handler_controller, sync_service) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - net_config, - block_announce_validator_builder: None, - warp_sync_config: None, - block_relay: None, - metrics, - })?; - - if config.offchain_worker.enabled { - let offchain_workers = - sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { - runtime_api_provider: client.clone(), - keystore: Some(keystore_container.keystore()), - offchain_db: backend.offchain_storage(), - transaction_pool: Some(OffchainTransactionPoolFactory::new( - transaction_pool.clone(), - )), - network_provider: Arc::new(network.clone()), - is_validator: config.role.is_authority(), - enable_http_requests: true, - custom_extensions: move |_| vec![], - })?; - task_manager.spawn_handle().spawn( - "offchain-workers-runner", - "offchain-work", - offchain_workers.run(client.clone(), task_manager.spawn_handle()).boxed(), - ); - } - - let proposer = sc_basic_authorship::ProposerFactory::new( - task_manager.spawn_handle(), - client.clone(), - transaction_pool.clone(), - None, - None, - ); - - let (manual_seal_sink, manual_seal_stream) = futures::channel::mpsc::channel(1024); - let mut manual_seal_sink_clone = manual_seal_sink.clone(); - task_manager - .spawn_essential_handle() - .spawn("block_authoring", None, async move { - loop { - futures_timer::Delay::new(std::time::Duration::from_millis(block_time)).await; - manual_seal_sink_clone - .try_send(sc_consensus_manual_seal::EngineCommand::SealNewBlock { - create_empty: true, - finalize: true, - parent_hash: None, - sender: None, - }) - .unwrap(); - } - }); - - let client_for_cidp = client.clone(); - let params = sc_consensus_manual_seal::ManualSealParams { - block_import: client.clone(), - env: proposer, - client: client.clone(), - pool: transaction_pool.clone(), - select_chain, - commands_stream: Box::pin(manual_seal_stream), - consensus_data_provider: None, - create_inherent_data_providers: move |block: Hash, ()| { - let current_para_head = client_for_cidp - .header(block) - .expect("Header lookup should succeed") - .expect("Header passed in as parent should be present in backend."); - - let should_send_go_ahead = client_for_cidp - .runtime_api() - .collect_collation_info(block, ¤t_para_head) - .map(|info| info.new_validation_code.is_some()) - .unwrap_or_default(); - - // The API version is relevant here because the constraints in the runtime changed - // in https://github.com/paritytech/polkadot-sdk/pull/6825. In general, the logic - // here assumes that we are using the aura-ext consensushook in the parachain - // runtime. - let requires_relay_progress = client_for_cidp - .runtime_api() - .has_api_with::, _>( - block, - |version| version > 1, - ) - .ok() - .unwrap_or_default(); - - let current_para_block_head = - Some(polkadot_primitives::HeadData(current_para_head.encode())); - let client_for_xcm = client_for_cidp.clone(); - async move { - use sp_runtime::traits::UniqueSaturatedInto; - - let mocked_parachain = MockValidationDataInherentDataProvider { - // When using manual seal we start from block 0, and it's very unlikely to - // reach a block number > u32::MAX. - current_para_block: UniqueSaturatedInto::::unique_saturated_into( - *current_para_head.number(), - ), - para_id, - current_para_block_head, - relay_offset: 0, - relay_blocks_per_para_block: requires_relay_progress - .then(|| 1) - .unwrap_or_default(), - para_blocks_per_relay_epoch: 10, - relay_randomness_config: (), - xcm_config: MockXcmConfig::new(&*client_for_xcm, block, Default::default()), - raw_downward_messages: vec![], - raw_horizontal_messages: vec![], - additional_key_values: None, - upgrade_go_ahead: should_send_go_ahead.then(|| { - log::info!( - "Detected pending validation code, sending go-ahead signal." - ); - UpgradeGoAhead::GoAhead - }), - }; - Ok(( - // This is intentional, as the runtime that we expect to run against this - // will never receive the aura-related inherents/digests, and providing - // real timestamps would cause aura <> timestamp checking to fail. - sp_timestamp::InherentDataProvider::new(sp_timestamp::Timestamp::new(0)), - mocked_parachain, - )) - } - }, - }; - let authorship_future = sc_consensus_manual_seal::run_manual_seal(params); - task_manager.spawn_essential_handle().spawn_blocking( - "manual-seal", - None, - authorship_future, - ); - let rpc_extensions_builder = { - let client = client.clone(); - let transaction_pool = transaction_pool.clone(); - let backend_for_rpc = backend.clone(); - - Box::new(move |_| { - let mut module = NodeSpec::BuildRpcExtensions::build_rpc_extensions( - client.clone(), - backend_for_rpc.clone(), - transaction_pool.clone(), - None, - )?; - module - .merge(ManualSeal::new(manual_seal_sink.clone()).into_rpc()) - .map_err(|e| sc_service::Error::Application(e.into()))?; - Ok(module) - }) - }; - - let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { - network, - client: client.clone(), - keystore: keystore_container.keystore(), - task_manager: &mut task_manager, - transaction_pool: transaction_pool.clone(), - rpc_builder: rpc_extensions_builder, - backend, - system_rpc_tx, - tx_handler_controller, - sync_service, - config, - telemetry: telemetry.as_mut(), - tracing_execute_block: Some(Arc::new(ParachainTracingExecuteBlock::new( - client.clone(), - ))), - })?; - - Ok(task_manager) - } -} From 4792e10418e5fb529c1eddc9ada49923a5db64ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 1 Dec 2025 17:26:42 +0100 Subject: [PATCH 218/312] Make clippy happy --- cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs | 6 +++--- polkadot/node/service/src/fake_runtime_api.rs | 1 - .../frame/benchmarking-cli/src/overhead/fake_runtime_api.rs | 1 - 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index c67e97f44afc6..5165d1c3bae6f 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -294,7 +294,7 @@ pub async fn assert_para_blocks_throughput( .subscribe_finalized() .await? .try_filter(|b| { - futures::future::ready(find_core_info(b).map_or(false, |info| { + futures::future::ready(find_core_info(b).is_ok_and(|info| { expected_candidates_per_relay_block.contains(&(info.number_of_cores.0 as usize)) })) }) @@ -377,8 +377,8 @@ pub async fn assert_para_blocks_throughput( .filter_map(|i| { finalized_parachain_blocks.iter().rev().find_map(|p| { (BlakeTwo256::hash_of(p.header()) == i.descriptor.para_head()).then(|| { - find_core_info(&p) - .and_then(|c| find_relay_block_identifier(&p).map(|rbi| (c, rbi))) + find_core_info(p) + .and_then(|c| find_relay_block_identifier(p).map(|rbi| (c, rbi))) }) }) }) diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index 7b10d52d1f123..f43940c8474f2 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -103,7 +103,6 @@ sp_api::impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { unimplemented!() } - } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/fake_runtime_api.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/fake_runtime_api.rs index dde8cc7e07088..933a5d13fc51b 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/fake_runtime_api.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/fake_runtime_api.rs @@ -84,7 +84,6 @@ sp_api::impl_runtime_apis! { fn check_inherents(_: ::LazyBlock, _: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { unimplemented!() } - } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { From 45c8673191e36d026cde277b5390b1ddf49d882f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 1 Dec 2025 22:58:25 +0100 Subject: [PATCH 219/312] Fix tests --- .../parachain-system/src/block_weight/mock.rs | 2 +- cumulus/test/runtime/src/lib.rs | 2 +- .../zombie_ci/block_bundling/pov_recovery.rs | 2 +- .../block_bundling/three_cores_glutton.rs | 8 ++++---- .../zombie_ci/elastic_scaling/pov_recovery.rs | 2 +- .../elastic_scaling/upgrade_to_3_cores.rs | 18 ++++++------------ .../elastic_scaling/slot_based_12cores.rs | 7 +------ .../tests/elastic_scaling/slot_based_3cores.rs | 5 ++--- .../functional/approval_voting_coalescing.rs | 6 ++---- .../tests/functional/dispute_old_finalized.rs | 9 ++------- .../spam_statement_distribution_requests.rs | 6 ++++-- .../tests/functional/validator_disabling.rs | 8 ++------ 12 files changed, 27 insertions(+), 48 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index ac1a9bcdd4845..7dba292f55de3 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -156,7 +156,7 @@ pub mod test_pallet { Ok(()) } - #[pallet::weight((_weight.clone(), DispatchClass::Normal))] + #[pallet::weight((*_weight, DispatchClass::Normal))] pub fn use_weight(_: OriginFor, _weight: Weight) -> DispatchResult { Ok(()) } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 4942a5aa45084..ee841f3e6ce40 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -666,7 +666,7 @@ impl_runtime_apis! { } // "Elastic scaling" should run with the fallback method. - #[cfg(not(feature = "elastic-scaling"))] + #[cfg(any(not(feature = "elastic-scaling"), feature = "std"))] impl cumulus_primitives_core::TargetBlockRate for Runtime { fn target_block_rate() -> u32 { BLOCK_PROCESSING_VELOCITY diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs index 9f57e36b2f670..70709ee8e724b 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs @@ -179,7 +179,7 @@ async fn build_network_config() -> Result { ])) .with_collator(|n| n.with_name("collator") .with_args(vec![ - ("--reserved-nodes", "{{ZOMBIE:alice:multiaddr}}").into() + ("--reserved-nodes", "{{ZOMBIE:alice:multiaddr}}").into(), ("-laura=trace,runtime=info,cumulus-consensus=trace,consensus::common=trace,parachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug").into(), ("--disable-block-announcements").into(), ("--force-authoring").into(), diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs index ecf22dcdbc75a..6668968bd10cf 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs @@ -66,13 +66,13 @@ async fn block_bundling_three_cores_glutton() -> Result<(), anyhow::Error> { log::info!("Test finished successfully - 72 blocks produced with 3 cores and glutton"); assign_cores(relay_node, PARA_ID, vec![2, 3]).await?; - assert_para_throughput(&relay_client, 15, [(ParaId::from(PARA_ID), 39..46)]).await?; - assert_finality_lag(¶_node_elastic.wait_client().await?, 20).await?; + assert_para_throughput(&relay_client, 15, [(ParaId::from(PARA_ID), 39..46)], []).await?; + assert_finality_lag(¶_node.wait_client().await?, 20).await?; assign_cores(relay_node, PARA_ID, vec![4, 5, 6]).await?; - assert_para_throughput(&relay_client, 10, [(ParaId::from(PARA_ID), 52..61)]).await?; - assert_finality_lag(¶_node_elastic.wait_client().await?, 30).await?; + assert_para_throughput(&relay_client, 10, [(ParaId::from(PARA_ID), 52..61)], []).await?; + assert_finality_lag(¶_node.wait_client().await?, 30).await?; log::info!("Test finished successfully"); Ok(()) } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs index 5ce69095aff89..d26b09ae90794 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs @@ -48,7 +48,7 @@ async fn elastic_scaling_pov_recovery() -> Result<(), anyhow::Error> { assign_cores(alice, PARA_ID, vec![0, 1]).await?; log::info!("Ensuring parachain making progress"); - assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 40..65)]).await?; + assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 40..65)], []).await?; // We want to make sure that none of the consensus hook checks fail, even if the chain makes // progress. If below log line occurred 1 or more times then test failed. diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs index 579f6b23f1ffb..27ce974cc34c2 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs @@ -1,16 +1,8 @@ // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 -use anyhow::anyhow; -use cumulus_test_runtime::{ - elastic_scaling::WASM_BINARY as WASM_WITH_ELASTIC_SCALING, - elastic_scaling_12s_slot::WASM_BINARY as WASM_WITH_ELASTIC_SCALING_12S_SLOT, -}; -use serde_json::json; -use std::time::Duration; - use crate::utils::initialize_network; - +use anyhow::anyhow; use cumulus_test_runtime::{ elastic_scaling::WASM_BINARY_BLOATY as WASM_ELASTIC_SCALING, elastic_scaling_12s_slot::WASM_BINARY_BLOATY as WASM_ELASTIC_SCALING_12S_SLOT, @@ -21,6 +13,8 @@ use cumulus_zombienet_sdk_helpers::{ }; use polkadot_primitives::Id as ParaId; use rstest::rstest; +use serde_json::json; +use std::time::Duration; use zombienet_sdk::{ subxt::{OnlineClient, PolkadotConfig}, subxt_signer::sr25519::dev, @@ -55,10 +49,10 @@ async fn elastic_scaling_upgrade_to_3_cores( if async_backing { log::info!("Ensuring parachain makes progress making 6s blocks"); - assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 15..21)]).await?; + assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 15..21)], []).await?; } else { log::info!("Ensuring parachain makes progress making 12s blocks"); - assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 7..12)]).await?; + assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 7..12)], []).await?; } assign_cores(alice, PARA_ID, vec![1, 2]).await?; @@ -104,7 +98,7 @@ async fn elastic_scaling_upgrade_to_3_cores( ); log::info!("Ensure elastic scaling works, 3 blocks should be produced in each 6s slot"); - assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 50..61)]).await?; + assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 50..61)], []).await?; Ok(()) } diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_12cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_12cores.rs index 9cfcab0b77150..f96c09eaae311 100644 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_12cores.rs +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_12cores.rs @@ -106,12 +106,7 @@ async fn slot_based_12cores_test() -> Result<(), anyhow::Error> { // change will be counted. // Since the calculated backed candidate count is theoretical and the CI tests are observed to // occasionally fail, let's apply 15% tolerance to the expected range: 170 - 15% = 144 - assert_para_throughput( - &relay_client, - 15, - [(ParaId::from(2300), 153..181)].into_iter().collect(), - ) - .await?; + assert_para_throughput(&relay_client, 15, [(ParaId::from(2300), 153..181)], []).await?; // Expect that `collator-5` claims at least 3 slots during this run. let result = para_node diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs index 4f53125a13c09..ddfce62db34f0 100644 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs @@ -115,9 +115,8 @@ async fn slot_based_3cores_test() -> Result<(), anyhow::Error> { assert_para_throughput( &relay_client, 15, - [(ParaId::from(2100), 35..46), (ParaId::from(2200), 35..46)] - .into_iter() - .collect(), + [(ParaId::from(2100), 35..46), (ParaId::from(2200), 35..46)], + [], ) .await?; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/approval_voting_coalescing.rs b/polkadot/zombienet-sdk-tests/tests/functional/approval_voting_coalescing.rs index 1402f32eb2d4e..30cde7ebff29d 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/approval_voting_coalescing.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/approval_voting_coalescing.rs @@ -4,7 +4,6 @@ // Test that checks approval voting coalescing does not lag finality. use anyhow::anyhow; - use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_para_throughput}; use polkadot_primitives::Id as ParaId; use serde_json::json; @@ -88,9 +87,8 @@ async fn approval_voting_coalescing_test() -> Result<(), anyhow::Error> { (ParaId::from(2005), 11..35), (ParaId::from(2006), 11..35), (ParaId::from(2007), 11..35), - ] - .into_iter() - .collect(), + ], + [], ) .await?; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/dispute_old_finalized.rs b/polkadot/zombienet-sdk-tests/tests/functional/dispute_old_finalized.rs index 8d8049debfb74..c9c87807c31f8 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/dispute_old_finalized.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/dispute_old_finalized.rs @@ -18,7 +18,6 @@ // concluded. use anyhow::anyhow; - use cumulus_zombienet_sdk_helpers::assert_para_throughput; use serde_json::json; use tokio::time::Duration; @@ -108,12 +107,8 @@ async fn dispute_old_finalized() -> Result<(), anyhow::Error> { let malus = network.get_node("malus")?; log::info!("Waiting for parablocks to be produced"); - assert_para_throughput( - &relay_client, - 20, - [(polkadot_primitives::Id::from(2000), 10..30)].into_iter().collect(), - ) - .await?; + assert_para_throughput(&relay_client, 20, [(polkadot_primitives::Id::from(2000), 10..30)], []) + .await?; let result = malus .wait_log_line_count_with_timeout( diff --git a/polkadot/zombienet-sdk-tests/tests/functional/spam_statement_distribution_requests.rs b/polkadot/zombienet-sdk-tests/tests/functional/spam_statement_distribution_requests.rs index 0e346f658d38b..318784917e38b 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/spam_statement_distribution_requests.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/spam_statement_distribution_requests.rs @@ -116,7 +116,8 @@ async fn spam_statement_distribution_requests_test() -> Result<(), anyhow::Error assert_para_throughput( &relay_client, 2, - [(ParaId::from(2000), 2..3), (ParaId::from(2001), 2..3)].into_iter().collect(), + [(ParaId::from(2000), 2..3), (ParaId::from(2001), 2..3)], + [], ) .await?; @@ -134,7 +135,8 @@ async fn spam_statement_distribution_requests_test() -> Result<(), anyhow::Error assert_para_throughput( &relay_client, 10, - [(ParaId::from(2000), 9..11), (ParaId::from(2001), 9..11)].into_iter().collect(), + [(ParaId::from(2000), 9..11), (ParaId::from(2001), 9..11)], + [], ) .await?; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/validator_disabling.rs b/polkadot/zombienet-sdk-tests/tests/functional/validator_disabling.rs index f72a2e30b1ba2..6f9d81b9a2c6c 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/validator_disabling.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/validator_disabling.rs @@ -87,12 +87,8 @@ async fn validator_disabling_test() -> Result<(), anyhow::Error> { log::info!("Waiting for parablocks to be produced"); let honest_validator = network.get_node("honest-validator-0")?; let relay_client: OnlineClient = honest_validator.wait_client().await?; - assert_para_throughput( - &relay_client, - 20, - [(polkadot_primitives::Id::from(1000), 10..30)].into_iter().collect(), - ) - .await?; + assert_para_throughput(&relay_client, 20, [(polkadot_primitives::Id::from(1000), 10..30)], []) + .await?; log::info!("Wait for a dispute to be initialized."); let mut best_blocks = relay_client.blocks().subscribe_best().await?; From d5418b3cd1bd0b0991216bcaa190784e696d8814 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 2 Dec 2025 12:06:33 +0100 Subject: [PATCH 220/312] Fix docs --- cumulus/test/runtime/src/test_pallet.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index bd46bd798ae83..25e0e22721b07 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -17,8 +17,9 @@ /// A special pallet that exposes dispatchables that are only useful for testing. pub use pallet::*; -/// Some key that we set in genesis and only read in [`TestOnRuntimeUpgrade`] to ensure that -/// [`OnRuntimeUpgrade`] works as expected. +/// Some key that we set in genesis and only read in +/// [`SingleBlockMigrations`](crate::SingleBlockMigrations) to ensure that +/// [`OnRuntimeUpgrade`](frame_support::traits::OnRuntimeUpgrade) works as expected. pub const TEST_RUNTIME_UPGRADE_KEY: &[u8] = b"+test_runtime_upgrade_key+"; #[frame_support::pallet(dev_mode)] From 4501d8f1b033d0c9121b32e5d8e48d001413598d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 2 Dec 2025 16:03:40 +0100 Subject: [PATCH 221/312] Ensure we only build on top of blocks that are last in the core --- .../consensus/aura/src/collators/lookahead.rs | 1 + .../consensus/aura/src/collators/mod.rs | 25 ++- .../slot_based/block_builder_task.rs | 22 ++- .../consensus/common/src/parent_search.rs | 2 +- .../src/validate_block/implementation.rs | 16 +- cumulus/primitives/core/src/lib.rs | 22 +++ .../zombienet-sdk-helpers/src/lib.rs | 184 +----------------- 7 files changed, 68 insertions(+), 204 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 49c74cb6a9ad2..1c0b5ab3c2390 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -315,6 +315,7 @@ where params.para_id, &*params.para_backend, ¶ms.relay_client, + |_| true, ) .await { diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 30e32ebe5e60d..b05b7aeee35a4 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -268,13 +268,19 @@ where } /// Use [`cumulus_client_consensus_common::find_potential_parents`] to find parachain blocks that -/// we can build on. Once a list of potential parents is retrieved, return the last one of the -/// longest chain. +/// we can build on. +/// +/// Once a list of potential parents is retrieved, return the last one of the +/// longest chain that passes `filter_parent`. If no parent matches the filter `included_block` is +/// returned. +/// +/// Returns `(included_block, parent)`. async fn find_parent( relay_parent: RelayHash, para_id: ParaId, para_backend: &impl sc_client_api::Backend, relay_client: &impl RelayChainInterface, + filter_parent: impl Fn(&Block::Header) -> bool, ) -> Option<(::Header, consensus_common::PotentialParent)> where Block: BlockT, @@ -297,7 +303,7 @@ where ) .await; - let potential_parents = match potential_parents { + let mut potential_parents = match potential_parents { Err(e) => { tracing::error!( target: crate::LOG_TARGET, @@ -311,11 +317,14 @@ where Ok(x) => x, }; - let included_block = potential_parents.iter().find(|x| x.depth == 0)?.header.clone(); - potential_parents - .into_iter() - .max_by_key(|a| a.depth) - .map(|parent| (included_block, parent)) + potential_parents.sort_by_key(|p| p.depth); + + let included_block = potential_parents.iter().find(|x| x.depth == 0)?.clone(); + + match potential_parents.into_iter().rev().find(|parent| filter_parent(&parent.header)) { + Some(res) => Some((included_block.header, res)), + None => Some((included_block.header.clone(), included_block)), + } } #[cfg(test)] diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 6e1055fe2863c..0434a613c8993 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -244,9 +244,25 @@ where let relay_parent = rp_data.relay_parent().hash(); let relay_parent_header = rp_data.relay_parent().clone(); - let Some((included_header, initial_parent)) = - crate::collators::find_parent(relay_parent, para_id, &*para_backend, &relay_client) - .await + let Some((included_header, initial_parent)) = crate::collators::find_parent( + relay_parent, + para_id, + &*para_backend, + &relay_client, + |parent| { + // We never want to build on any "middle block" that isn't the last block in a + // core. + match CumulusDigestItem::is_last_block_in_core(parent.digest()) { + Some(res) => res, + None => { + // When the digest item doesn't exist, we are running in compatibility + // mode and all parents are valid. + true + }, + } + }, + ) + .await else { continue }; diff --git a/cumulus/client/consensus/common/src/parent_search.rs b/cumulus/client/consensus/common/src/parent_search.rs index b037fe0dc9583..9b465c7c3ed03 100644 --- a/cumulus/client/consensus/common/src/parent_search.rs +++ b/cumulus/client/consensus/common/src/parent_search.rs @@ -51,7 +51,7 @@ pub struct ParentSearchParams { } /// A potential parent block returned from [`find_potential_parents`] -#[derive(PartialEq)] +#[derive(PartialEq, Clone)] pub struct PotentialParent { /// The hash of the block. pub hash: B::Hash, diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 14fd852613f1c..9fe62814d28b2 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -419,19 +419,9 @@ fn validate_blocks(blocks: &[B::LazyBlock], parent_header: &B::Header info.index ); - if block_index + 1 == num_blocks { - let has_use_full_core = - CumulusDigestItem::contains_use_full_core(block.header().digest()); - let has_runtime_upgrade = block - .header() - .digest() - .logs - .iter() - .any(|d| matches!(d, DigestItem::RuntimeEnvironmentUpdated)); - - assert!( - info.maybe_last || has_use_full_core || has_runtime_upgrade, - "Last block in PoV must have maybe_last=true, UseFullCore digest, or RuntimeEnvironmentUpdated digest" + if block_index + 1 == num_blocks && !CumulusDigestItem::is_last_block_in_core(block.header().digest()).unwrap_or(true) { + panic!( + "Last block in PoV must have maybe_last=true, `UseFullCore` digest, or `RuntimeEnvironmentUpdated` digest" ); } } diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 13ca4ba200903..59ce75144280b 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -428,6 +428,28 @@ impl CumulusDigestItem { }) .unwrap_or_default() } + + /// Returns `true` if the given `digest` is from a block that is the last block in a core. + /// + /// Checks the following conditions: + /// + /// - Is [`BundleInfo::maybe_last`] set to true? + /// - Or is [`Self::UseFullCore`] digest present? + /// - Or is [`DigestItem::RuntimeEnvironmentUpdated`] digest present? + /// + /// If any of these conditions is `true`, this function will return `true`. + /// + /// Returns `None` if the `BundleInfo` digest is not present, which is interpreted as the + /// associated block is not using block bundling. + pub fn is_last_block_in_core(digest: &Digest) -> Option { + let bundle_info = Self::find_bundle_info(digest)?; + + Some( + bundle_info.maybe_last || + Self::contains_use_full_core(digest) || + digest.logs.iter().any(|l| matches!(l, DigestItem::RuntimeEnvironmentUpdated)), + ) + } } /// diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 5165d1c3bae6f..fcf329c52fe84 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -4,9 +4,8 @@ use anyhow::anyhow; use codec::{Decode, Encode}; use cumulus_primitives_core::{BundleInfo, CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; -use futures::{pin_mut, select, stream::StreamExt, TryStreamExt}; +use futures::stream::StreamExt; use polkadot_primitives::{BlakeTwo256, CandidateReceiptV2, HashT, Id as ParaId}; -use sp_runtime::traits::Zero; use std::{cmp::max, collections::HashMap, ops::Range, sync::Arc}; use tokio::{ join, @@ -15,9 +14,8 @@ use tokio::{ use zombienet_sdk::{ subxt::{ self, - backend::legacy::LegacyRpcMethods, blocks::Block, - config::{polkadot::PolkadotExtrinsicParamsBuilder, substrate::DigestItem, Header}, + config::{polkadot::PolkadotExtrinsicParamsBuilder, substrate::DigestItem}, dynamic::Value, events::Events, ext::scale_value::value, @@ -114,8 +112,9 @@ async fn is_session_change( // Helper function for asserting the throughput of parachains, after the first session change. // -// The throughput is measured as total number of backed candidates in a window of relay chain -// blocks. Relay chain blocks with session changes are generally ignored. +// The throughput is measured as total number of backed candidates in a window of `stop_after` relay +// chain blocks. Relay chain blocks with session changes are generally ignored, but it is ensured +// that no blocks are build on top of these relay blocks. pub async fn assert_para_throughput( relay_client: &OnlineClient, stop_after: u32, @@ -258,179 +257,6 @@ fn find_relay_block_identifier( .ok_or_else(|| anyhow!("Failed to find `RelayBlockIdentifier` digest")) } -/// Find the `CandidateIncluded` events for the given `para_id`. -async fn find_candidate_included_events( - para_id: ParaId, - block: &Block>, -) -> Result>, anyhow::Error> { - let events = block.events().await?; - - find_event_and_decode_fields::>( - &events, - "ParaInclusion", - "CandidateIncluded", - ) - .map(|events| events.into_iter().filter(|e| e.descriptor.para_id() == para_id).collect()) -} - -/// Assert that `stop_after` parachain blocks are included via `expected_relay_blocks`. -/// -/// It waits for `stop_after` parachain blocks to be finalized. Then it ensures that these parachain -/// blocks are included on the relay chain using the given number of `expected_relay_blocks`. -pub async fn assert_para_blocks_throughput( - para_id: ParaId, - para_client: &OnlineClient, - stop_after: usize, - relay_rpc_client: &LegacyRpcMethods, - relay_client: &OnlineClient, - expected_relay_blocks: Range, - expected_candidates_per_relay_block: Range, -) -> Result<(), anyhow::Error> { - // Wait for the first session, block production on the parachain will start after that. - wait_for_first_session_change(&mut relay_client.blocks().subscribe_best().await?).await?; - - para_client - .blocks() - .subscribe_finalized() - .await? - .try_filter(|b| { - futures::future::ready(find_core_info(b).is_ok_and(|info| { - expected_candidates_per_relay_block.contains(&(info.number_of_cores.0 as usize)) - })) - }) - .next() - .await - .transpose()?; - - let finalized_stream = para_client.blocks().subscribe_finalized().await?.fuse(); - let finalized_relay_blocks = relay_client.blocks().subscribe_finalized().await?.fuse(); - let start_relay_block = relay_client - .blocks() - .subscribe_best() - .await? - .next() - .await - .ok_or_else(|| anyhow!("Could not get a best block from the relay chain"))??; - - let mut finalized_parachain_blocks = Vec::new(); - - pin_mut!(finalized_stream); - pin_mut!(finalized_relay_blocks); - - let last_finalized_relay_block = loop { - select! { - finalized = finalized_stream.select_next_some() => { - let finalized = finalized?; - if !finalized.number().is_zero() && finalized_parachain_blocks.len() < stop_after { - finalized_parachain_blocks.push(finalized); - } - }, - finalized = finalized_relay_blocks.select_next_some() => { - let finalized = finalized?; - let num_relay_chain_blocks = finalized.number().saturating_sub(start_relay_block.number()); - - // If we have recorded enough parachain blocks - if finalized_parachain_blocks.len() >= stop_after { - break finalized - } - - // `start_relay_block` maybe not being finalized at the beginning, but we just - // need some good estimation to ensure the tests ends at some point if there is some issue. - if num_relay_chain_blocks >= expected_relay_blocks.end { - return Err(anyhow!("Already processed more relay chain blocks ({num_relay_chain_blocks}) \ - than allowed in the range ({expected_relay_blocks:?}).")) - } - }, - complete => { panic!("Both streams should not finish"); } - } - }; - - // The number of cores occupied by the parachain candidates, ignoring session changes. - let mut occupied_relay_chain_blocks = 0; - // Did we found the first candidate matching one of our expected parachain blocks? - let mut found_first_candidate = false; - let mut current_relay_header = last_finalized_relay_block.header().clone(); - loop { - if current_relay_header.number().is_zero() { - return Err(anyhow!( - "Reached relay genesis block without finding all parachain blocks?" - )); - } - - let block = relay_rpc_client - .chain_get_block(Some(current_relay_header.hash_with(relay_client.hasher()))) - .await? - .ok_or_else(|| { - anyhow!( - "Could not fetch relay block: {:?}", - current_relay_header.hash_with(relay_client.hasher()) - ) - })? - .block; - - let block = relay_client.blocks().at(block.header.hash_with(relay_client.hasher())).await?; - - let included_events = find_candidate_included_events(para_id, &block).await?; - - let included_parachain_block_identifiers = included_events - .iter() - .filter_map(|i| { - finalized_parachain_blocks.iter().rev().find_map(|p| { - (BlakeTwo256::hash_of(p.header()) == i.descriptor.para_head()).then(|| { - find_core_info(p) - .and_then(|c| find_relay_block_identifier(p).map(|rbi| (c, rbi))) - }) - }) - }) - .collect::, _>>()?; - - finalized_parachain_blocks.retain(|b| { - let core_info = find_core_info(b).unwrap(); - let rbi = find_relay_block_identifier(b).unwrap(); - - !included_parachain_block_identifiers.contains(&(core_info, rbi)) - }); - - if !is_session_change(&block).await? { - found_first_candidate |= !included_parachain_block_identifiers.is_empty(); - - if found_first_candidate { - occupied_relay_chain_blocks += 1; - } - - if !included_parachain_block_identifiers.is_empty() && - !expected_candidates_per_relay_block - .contains(&included_parachain_block_identifiers.len()) - { - return Err(anyhow!( - "{} candidates did not match the expected {expected_candidates_per_relay_block:?} \ - candidates per relay chain block", included_parachain_block_identifiers.len() - )) - } - } - - if finalized_parachain_blocks.is_empty() { - break - } - - current_relay_header = relay_rpc_client - .chain_get_header(Some(current_relay_header.parent_hash)) - .await? - .ok_or_else(|| { - anyhow!( - "Could not fetch relay chain header: {:?}", - current_relay_header.parent_hash - ) - })?; - } - - if !expected_relay_blocks.contains(&occupied_relay_chain_blocks) { - return Err(anyhow!("{occupied_relay_chain_blocks} did not match the expected {expected_candidates_per_relay_block:?} relay chain blocks")) - } - - Ok(()) -} - /// Wait for the first block with a session change. /// /// The session change is detected by inspecting the events in the block. From 984517ea12e738dd4627009f9c37a9c6d1bd91e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 2 Dec 2025 16:57:24 +0100 Subject: [PATCH 222/312] Make asset hub rococo use 500ms --- .../assets/asset-hub-rococo/src/lib.rs | 2 +- cumulus/zombienet/examples/README.md | 20 ++++++++++++ cumulus/zombienet/examples/run.sh | 32 +++++++++++++++++++ 3 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 cumulus/zombienet/examples/README.md create mode 100755 cumulus/zombienet/examples/run.sh diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index bde01f335b19c..9789ee4b9eddc 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -2071,7 +2071,7 @@ impl_runtime_apis! { impl cumulus_primitives_core::TargetBlockRate for Runtime { fn target_block_rate() -> u32 { - 1 + BLOCK_PROCESSING_VELOCITY } } diff --git a/cumulus/zombienet/examples/README.md b/cumulus/zombienet/examples/README.md new file mode 100644 index 0000000000000..84e7a05dfb19d --- /dev/null +++ b/cumulus/zombienet/examples/README.md @@ -0,0 +1,20 @@ +# Zombienet Examples + +## Prerequisites + +Install the zombienet CLI: + +```bash +cargo install zombie-cli +``` + +## Usage + +```bash +./run.sh +``` + +The script will: +1. Build `polkadot`, `polkadot-prepare-worker`, `polkadot-execute-worker`, and `polkadot-parachain` in release mode +2. Add the release directory to `PATH` +3. Spawn the network using `zombie-cli` diff --git a/cumulus/zombienet/examples/run.sh b/cumulus/zombienet/examples/run.sh new file mode 100755 index 0000000000000..8e25815dc6b2d --- /dev/null +++ b/cumulus/zombienet/examples/run.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +set -e + +if [ -z "$1" ]; then + echo "Usage: $0 " + echo "Available networks:" + ls -1 "$(dirname "$0")"/*.toml + exit 1 +fi + +NETWORK_FILE="$1" +SCRIPT_DIR="$(dirname "$0")" + +# Resolve to absolute path if relative +if [[ ! "$NETWORK_FILE" = /* ]]; then + if [ -f "$SCRIPT_DIR/$NETWORK_FILE" ]; then + NETWORK_FILE="$SCRIPT_DIR/$NETWORK_FILE" + fi +fi + +if [ ! -f "$NETWORK_FILE" ]; then + echo "Error: Network file '$NETWORK_FILE' not found" + exit 1 +fi + +cargo build --release -p polkadot --bin polkadot-prepare-worker --bin polkadot-execute-worker --bin polkadot -p polkadot-parachain-bin --bin polkadot-parachain + +RELEASE_DIR=$(dirname "$(cargo locate-project --workspace --message-format plain)")/target/release + +export PATH=$RELEASE_DIR:$PATH + +zombie-cli spawn --provider native "$NETWORK_FILE" From a587616653a9e2d6283ff845ba549cf8a2ba5578 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 2 Dec 2025 17:11:44 +0100 Subject: [PATCH 223/312] Please the clippy lord --- .../src/collators/slot_based/block_builder_task.rs | 11 +++-------- .../tests/zombie_ci/block_bundling/runtime_upgrade.rs | 6 +++--- .../zombie_ci/block_bundling/three_cores_glutton.rs | 2 +- 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 0434a613c8993..63d10b177f48f 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -252,14 +252,9 @@ where |parent| { // We never want to build on any "middle block" that isn't the last block in a // core. - match CumulusDigestItem::is_last_block_in_core(parent.digest()) { - Some(res) => res, - None => { - // When the digest item doesn't exist, we are running in compatibility - // mode and all parents are valid. - true - }, - } + // When the digest item doesn't exist, we are running in compatibility + // mode and all parents are valid. + CumulusDigestItem::is_last_block_in_core(parent.digest()).unwrap_or(true) }, ) .await diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs index 85f7177eb0931..e74a34cc785fd 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs @@ -70,11 +70,11 @@ async fn block_bundling_runtime_upgrade() -> Result<(), anyhow::Error> { // Let's create our own fake runtime upgrade where we just bump the `spec_version`. // On chain nothing will change, as we only change the runtime version stored inside the wasm // file. - let blob = sc_executor_common::runtime_blob::RuntimeBlob::uncompress_if_needed(&runtime_wasm)?; + let blob = sc_executor_common::runtime_blob::RuntimeBlob::uncompress_if_needed(runtime_wasm)?; let mut version = sc_executor::read_embedded_version(&blob)? .ok_or_else(|| anyhow!("No runtime version found?"))?; version.spec_version += 1; - let runtime_wasm = sp_version::embed::embed_runtime_version(&runtime_wasm, version)?; + let runtime_wasm = sp_version::embed::embed_runtime_version(runtime_wasm, version)?; log::info!("Runtime size validation passed: {} bytes", runtime_wasm.len()); @@ -90,7 +90,7 @@ async fn block_bundling_runtime_upgrade() -> Result<(), anyhow::Error> { let alice = dev::alice(); // Assign cores 0 and 1 to start with 3 cores total (core 2 is assigned by Zombienet) - assign_cores(&relay_node, PARA_ID, vec![0, 1]).await?; + assign_cores(relay_node, PARA_ID, vec![0, 1]).await?; log::info!("3 cores total assigned to the parachain"); diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs index 6668968bd10cf..22d6756f4c45d 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs @@ -49,7 +49,7 @@ async fn block_bundling_three_cores_glutton() -> Result<(), anyhow::Error> { let relay_client: OnlineClient = relay_node.wait_client().await?; // Assign cores 0 and 1 to start with 3 cores total (core 2 is assigned by Zombienet) - assign_cores(&relay_node, PARA_ID, vec![0, 1]).await?; + assign_cores(relay_node, PARA_ID, vec![0, 1]).await?; // Wait for the parachain to produce 72 blocks with 3 cores and glutton active // With 3 cores, we expect roughly 3x throughput compared to single core From 443dd5f93a63d9aa255ea60b0425efb722cba887 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 2 Dec 2025 21:20:57 +0100 Subject: [PATCH 224/312] More warnings.. --- .../parachain-system/src/validate_block/implementation.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 9fe62814d28b2..be2f9990723ff 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -31,11 +31,8 @@ use polkadot_parachain_primitives::primitives::{HeadData, ValidationResult}; use sp_core::storage::{well_known_keys, ChildInfo, StateVersion}; use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::{hashing::blake2_128, KillStorageResult}; -use sp_runtime::{ - traits::{ - Block as BlockT, ExtrinsicCall, Hash as HashT, HashingFor, Header as HeaderT, LazyBlock, - }, - DigestItem, +use sp_runtime::traits::{ + Block as BlockT, ExtrinsicCall, Hash as HashT, HashingFor, Header as HeaderT, LazyBlock, }; use sp_state_machine::OverlayedChanges; use sp_trie::{HashDBT, ProofSizeProvider, EMPTY_PREFIX}; From 4aad17fffa5882f2beff0e9d293d3c69225d5d5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 3 Dec 2025 15:13:17 +0100 Subject: [PATCH 225/312] Fix test --- .../src/block_weight/transaction_extension.rs | 30 ++++++++++++------- .../src/validate_block/tests.rs | 9 +++--- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index 017468130caf5..088e77ed45cae 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -46,8 +46,13 @@ use sp_runtime::{ /// 1. Only the first block of a core is allowed to change its block weight. /// /// 2. Any `inherent` or any transaction up to `MAX_TRANSACTION_TO_CONSIDER` requires more block -/// weight than the target block weight. Target block weight is the max weight for the respective -/// extrinsic class. +/// weight than the target extrinsic weight. Target extrinsic weight is the max weight for the +/// respective extrinsic class. The priority to determine the target e weight is the following, we +/// start checking if +/// [`WeightsPerClass::max_extrinsic`](frame_system::limits::WeightsPerClass::max_extrinsic) is +/// set, after this +/// [`WeightsPerClass::max_total`](frame_system::limits::WeightsPerClass::max_total) and if both +/// of these are `None` we fall back to the actual target block weight. /// /// Because the node is tracking the wall clock time while building a block to abort block /// production if it takes too long, we do not allow any block to change the block weight. The node @@ -161,13 +166,18 @@ where // // All of this is only important for extrinsics that will enable the `PotentialFullCore` mode. let block_weights = inside_pre_validate::using(&mut true, || Config::BlockWeights::get()); - let target_weight = block_weights - .get(info.class) - .max_total - .unwrap_or_else(|| - MaxParachainBlockWeight::::target_block_weight_with_digest(&digest) - .saturating_sub(block_weights.base_block) - ); + let class_weights = block_weights.get(info.class); + let target_block_weight = + MaxParachainBlockWeight::::target_block_weight_with_digest(&digest) + .saturating_sub(block_weights.base_block); + + // `max_extrinsic` determines the maximum weight allowed for one transaction. + // If that isn't set, we fall back to `max_total` which represents the total allowed weight for + // this dispatch class. If all previous weights are `None`, we fall back to the target block weight. + let target_weight = class_weights + .max_extrinsic + .or(class_weights.max_total) + .unwrap_or(target_block_weight); // Protection against a misconfiguration as this should be detected by the pre-inherent hook. if block_weight_over_limit { @@ -214,7 +224,7 @@ where "Enabling `PotentialFullCore` mode for extrinsic", ); - *mode = Some(BlockWeightMode::::potential_full_core ( + *mode = Some(BlockWeightMode::::potential_full_core( // While applying inherents `extrinsic_index` and `first_transaction_index` will be `None`. // When the first transaction is applied, we want to store the index. first_transaction_index.or(transaction_index), diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index ad2fd42e91896..10a80cedaaecb 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -17,7 +17,10 @@ use crate::{validate_block::MemoryOptimizedValidationParams, *}; use codec::{Decode, DecodeAll, Encode}; use cumulus_primitives_core::{ - relay_chain, BundleInfo, ParachainBlockData, PersistedValidationData, + relay_chain, + relay_chain::{UMPSignal, UMP_SEPARATOR}, + BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, ParachainBlockData, + PersistedValidationData, }; use cumulus_test_client::{ generate_extrinsic, generate_extrinsic_with_pair, @@ -628,10 +631,6 @@ fn state_changes_in_multiple_blocks_are_applied_in_exact_order() { #[test] fn validate_block_handles_ump_signal() { - use cumulus_primitives_core::{ - relay_chain::{UMPSignal, UMP_SEPARATOR}, - ClaimQueueOffset, CoreInfo, CoreSelector, - }; sp_tracing::try_init_simple(); let (client, parent_head) = create_elastic_scaling_test_client(); From 836b0915d6a862fd6f468e53d104adbde705e936 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 3 Dec 2025 15:13:17 +0100 Subject: [PATCH 226/312] Fix test --- .../src/block_weight/transaction_extension.rs | 30 ++++++++++++------- .../src/validate_block/tests.rs | 6 +--- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index 017468130caf5..088e77ed45cae 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -46,8 +46,13 @@ use sp_runtime::{ /// 1. Only the first block of a core is allowed to change its block weight. /// /// 2. Any `inherent` or any transaction up to `MAX_TRANSACTION_TO_CONSIDER` requires more block -/// weight than the target block weight. Target block weight is the max weight for the respective -/// extrinsic class. +/// weight than the target extrinsic weight. Target extrinsic weight is the max weight for the +/// respective extrinsic class. The priority to determine the target e weight is the following, we +/// start checking if +/// [`WeightsPerClass::max_extrinsic`](frame_system::limits::WeightsPerClass::max_extrinsic) is +/// set, after this +/// [`WeightsPerClass::max_total`](frame_system::limits::WeightsPerClass::max_total) and if both +/// of these are `None` we fall back to the actual target block weight. /// /// Because the node is tracking the wall clock time while building a block to abort block /// production if it takes too long, we do not allow any block to change the block weight. The node @@ -161,13 +166,18 @@ where // // All of this is only important for extrinsics that will enable the `PotentialFullCore` mode. let block_weights = inside_pre_validate::using(&mut true, || Config::BlockWeights::get()); - let target_weight = block_weights - .get(info.class) - .max_total - .unwrap_or_else(|| - MaxParachainBlockWeight::::target_block_weight_with_digest(&digest) - .saturating_sub(block_weights.base_block) - ); + let class_weights = block_weights.get(info.class); + let target_block_weight = + MaxParachainBlockWeight::::target_block_weight_with_digest(&digest) + .saturating_sub(block_weights.base_block); + + // `max_extrinsic` determines the maximum weight allowed for one transaction. + // If that isn't set, we fall back to `max_total` which represents the total allowed weight for + // this dispatch class. If all previous weights are `None`, we fall back to the target block weight. + let target_weight = class_weights + .max_extrinsic + .or(class_weights.max_total) + .unwrap_or(target_block_weight); // Protection against a misconfiguration as this should be detected by the pre-inherent hook. if block_weight_over_limit { @@ -214,7 +224,7 @@ where "Enabling `PotentialFullCore` mode for extrinsic", ); - *mode = Some(BlockWeightMode::::potential_full_core ( + *mode = Some(BlockWeightMode::::potential_full_core( // While applying inherents `extrinsic_index` and `first_transaction_index` will be `None`. // When the first transaction is applied, we want to store the index. first_transaction_index.or(transaction_index), diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 2c68fb63fa4df..aea995ce7394a 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -16,7 +16,7 @@ use crate::{validate_block::MemoryOptimizedValidationParams, *}; use codec::{Decode, DecodeAll, Encode}; -use cumulus_primitives_core::{relay_chain, ParachainBlockData, PersistedValidationData}; +use cumulus_primitives_core::{relay_chain, ParachainBlockData, PersistedValidationData, relay_chain::{UMPSignal, UMP_SEPARATOR}, ClaimQueueOffset, CoreInfo, CoreSelector, }; use cumulus_test_client::{ generate_extrinsic, generate_extrinsic_with_pair, runtime::{ @@ -615,10 +615,6 @@ fn state_changes_in_multiple_blocks_are_applied_in_exact_order() { #[test] fn validate_block_handles_ump_signal() { - use cumulus_primitives_core::{ - relay_chain::{UMPSignal, UMP_SEPARATOR}, - ClaimQueueOffset, CoreInfo, CoreSelector, - }; sp_tracing::try_init_simple(); let (client, parent_head) = create_elastic_scaling_test_client(); From 915f6ae80ca370e80310fbc52d6b90b2accf9379 Mon Sep 17 00:00:00 2001 From: "cmd[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 4 Dec 2025 12:49:41 +0000 Subject: [PATCH 227/312] Update from github-actions[bot] running command 'fmt' --- .../src/block_weight/transaction_extension.rs | 4 ++-- .../pallets/parachain-system/src/validate_block/tests.rs | 6 +++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index 088e77ed45cae..757e323d5a4fa 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -47,8 +47,8 @@ use sp_runtime::{ /// /// 2. Any `inherent` or any transaction up to `MAX_TRANSACTION_TO_CONSIDER` requires more block /// weight than the target extrinsic weight. Target extrinsic weight is the max weight for the -/// respective extrinsic class. The priority to determine the target e weight is the following, we -/// start checking if +/// respective extrinsic class. The priority to determine the target e weight is the following, +/// we start checking if /// [`WeightsPerClass::max_extrinsic`](frame_system::limits::WeightsPerClass::max_extrinsic) is /// set, after this /// [`WeightsPerClass::max_total`](frame_system::limits::WeightsPerClass::max_total) and if both diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index aea995ce7394a..78e8f3926d173 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -16,7 +16,11 @@ use crate::{validate_block::MemoryOptimizedValidationParams, *}; use codec::{Decode, DecodeAll, Encode}; -use cumulus_primitives_core::{relay_chain, ParachainBlockData, PersistedValidationData, relay_chain::{UMPSignal, UMP_SEPARATOR}, ClaimQueueOffset, CoreInfo, CoreSelector, }; +use cumulus_primitives_core::{ + relay_chain, + relay_chain::{UMPSignal, UMP_SEPARATOR}, + ClaimQueueOffset, CoreInfo, CoreSelector, ParachainBlockData, PersistedValidationData, +}; use cumulus_test_client::{ generate_extrinsic, generate_extrinsic_with_pair, runtime::{ From c0e0c53aefeb980762b1110b6525253b7cbceec6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 4 Dec 2025 15:10:25 +0100 Subject: [PATCH 228/312] FMT --- .../src/block_weight/transaction_extension.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index 088e77ed45cae..757e323d5a4fa 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -47,8 +47,8 @@ use sp_runtime::{ /// /// 2. Any `inherent` or any transaction up to `MAX_TRANSACTION_TO_CONSIDER` requires more block /// weight than the target extrinsic weight. Target extrinsic weight is the max weight for the -/// respective extrinsic class. The priority to determine the target e weight is the following, we -/// start checking if +/// respective extrinsic class. The priority to determine the target e weight is the following, +/// we start checking if /// [`WeightsPerClass::max_extrinsic`](frame_system::limits::WeightsPerClass::max_extrinsic) is /// set, after this /// [`WeightsPerClass::max_total`](frame_system::limits::WeightsPerClass::max_total) and if both From 7d8f73826f5da65a7242c287340f267e43b2ac93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 4 Dec 2025 22:55:21 +0100 Subject: [PATCH 229/312] Apply suggestion from @bkchr --- polkadot/zombienet-sdk-tests/tests/parachains/weights.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/zombienet-sdk-tests/tests/parachains/weights.rs b/polkadot/zombienet-sdk-tests/tests/parachains/weights.rs index 54502c1c3a991..66689bb993fd9 100644 --- a/polkadot/zombienet-sdk-tests/tests/parachains/weights.rs +++ b/polkadot/zombienet-sdk-tests/tests/parachains/weights.rs @@ -306,7 +306,7 @@ async fn instantiate_params( // Make sure we have enough gas and multiply by 4, since without it the calls fail not enough // gas. - Ok((dry_run.gas_required.ref_time * 4, dry_run.gas_required.proof_size * 4, deposit * 4)) + Ok((dry_run.weight_required.ref_time * 4, dry_run.weight_required.proof_size * 4, deposit * 4)) } async fn call_params( From c0871cdc247e5cacd36df0106e8227ce980836a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 4 Dec 2025 22:55:51 +0100 Subject: [PATCH 230/312] Apply suggestion from @bkchr --- polkadot/zombienet-sdk-tests/tests/parachains/weights.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/zombienet-sdk-tests/tests/parachains/weights.rs b/polkadot/zombienet-sdk-tests/tests/parachains/weights.rs index 66689bb993fd9..78b739dcce6e9 100644 --- a/polkadot/zombienet-sdk-tests/tests/parachains/weights.rs +++ b/polkadot/zombienet-sdk-tests/tests/parachains/weights.rs @@ -323,7 +323,7 @@ async fn call_params( StorageDeposit::Refund(_) => 0, }; - Ok((dry_run.gas_required.ref_time, dry_run.gas_required.proof_size, deposit)) + Ok((dry_run.weight_required.ref_time, dry_run.weight_required.proof_size, deposit)) } async fn call_contract( From f9752f70e50ca81fd3cebfe5dcf1ea0d6c53c08f Mon Sep 17 00:00:00 2001 From: "cmd[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 5 Dec 2025 15:03:01 +0000 Subject: [PATCH 231/312] Update from github-actions[bot] running command 'fmt' --- cumulus/pallets/parachain-system/Cargo.toml | 4 ++-- cumulus/test/runtime/Cargo.toml | 2 +- substrate/primitives/block-builder/Cargo.toml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 2d5d54def9f6f..26bcaf3199caa 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -88,6 +88,7 @@ std = [ "cumulus-primitives-proof-size-hostfunction/std", "environmental/std", "frame-benchmarking/std", + "frame-executive/std", "frame-support/std", "frame-system/std", "log/std", @@ -109,7 +110,6 @@ std = [ "trie-db/std", "xcm-builder/std", "xcm/std", - "frame-executive/std" ] runtime-benchmarks = [ @@ -128,10 +128,10 @@ runtime-benchmarks = [ ] try-runtime = [ + "frame-executive/try-runtime", "frame-support/try-runtime", "frame-system/try-runtime", "pallet-message-queue/try-runtime", "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", - "frame-executive/try-runtime" ] diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index 5ca6927df9f43..425989eab1488 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -64,6 +64,7 @@ std = [ "cumulus-pallet-weight-reclaim/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "frame-executive/std", "frame-support/std", "frame-system-rpc-runtime-api/std", @@ -96,7 +97,6 @@ std = [ "sp-version/std", "substrate-wasm-builder", "tracing/std", - "cumulus-primitives-storage-weight-reclaim/std" ] increment-spec-version = [] # A runtime which expects to build behind the relay chain tip. diff --git a/substrate/primitives/block-builder/Cargo.toml b/substrate/primitives/block-builder/Cargo.toml index 47367e287dfd8..72bb691f2dd86 100644 --- a/substrate/primitives/block-builder/Cargo.toml +++ b/substrate/primitives/block-builder/Cargo.toml @@ -25,9 +25,9 @@ sp-runtime = { workspace = true } [features] default = ["std"] std = [ + "codec/std", + "scale-info/std", "sp-api/std", "sp-inherents/std", "sp-runtime/std", - "codec/std", - "scale-info/std" ] From db7362cdab1a07e7c4cb8b0847d2e6c5ce64d8e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 9 Dec 2025 22:52:08 +0100 Subject: [PATCH 232/312] Only send UMP signal in last block of a bundle --- cumulus/pallets/parachain-system/src/lib.rs | 13 +- .../src/validate_block/tests.rs | 130 +++++++++++++++++- cumulus/test/runtime/src/test_pallet.rs | 8 ++ 3 files changed, 145 insertions(+), 6 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index e2f23f8dfb61a..449a53ba87a67 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -361,9 +361,9 @@ pub mod pallet { UpwardMessages::::put(&up[..num as usize]); *up = up.split_off(num as usize); - if let Some(core_info) = - CumulusDigestItem::find_core_info(&frame_system::Pallet::::digest()) - { + let digest = frame_system::Pallet::::digest(); + + if let Some(core_info) = CumulusDigestItem::find_core_info(&digest) { PendingUpwardSignals::::append( UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset) .encode(), @@ -375,8 +375,11 @@ pub mod pallet { PreviousCoreCount::::put(Compact(1u16)); } - // Send the pending UMP signals. - Self::send_ump_signals(); + // Only send UMP signals on the last block of a bundle. + // For single-block PoVs (no BundleInfo), always send signals. + if CumulusDigestItem::is_last_block_in_core(&digest).unwrap_or(true) { + Self::send_ump_signals(); + } // If the total size of the pending messages is less than the threshold, // we decrease the fee factor, since the queue is less congested. diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 10a80cedaaecb..327de025e6a56 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -19,7 +19,7 @@ use codec::{Decode, DecodeAll, Encode}; use cumulus_primitives_core::{ relay_chain, relay_chain::{UMPSignal, UMP_SEPARATOR}, - BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, ParachainBlockData, + BundleInfo, ClaimQueueOffset, CollectCollationInfo, CoreInfo, CoreSelector, ParachainBlockData, PersistedValidationData, }; use cumulus_test_client::{ @@ -834,3 +834,131 @@ fn validate_block_rejects_incomplete_bundle() { .expect("Calls `validate_block`"); assert_eq!(header, res_header); } + +#[test] +fn only_send_ump_signal_on_last_block_in_bundle() { + sp_tracing::try_init_simple(); + + let (client, parent_head) = create_elastic_scaling_test_client(); + + // Build 4 blocks with BundleInfo and CoreInfo on all blocks + let TestBlockData { block, .. } = build_multiple_blocks_with_witness( + &client, + parent_head.clone(), + Default::default(), + 4, + |_| Vec::new(), + |i| { + vec![ + BundleInfo { index: i as u8, maybe_last: i == 3 }.to_digest_item(), + CumulusDigestItem::CoreInfo(CoreInfo { + selector: CoreSelector(0), + claim_queue_offset: ClaimQueueOffset(0), + number_of_cores: 1.into(), + }) + .to_digest_item(), + ] + }, + ); + + let blocks = block.blocks(); + + // Check CollectCollationInfo for each block + for (i, b) in blocks.iter().enumerate() { + let is_last = i == blocks.len() - 1; + let block_hash = b.header().hash(); + + let collation_info = client + .runtime_api() + .collect_collation_info(block_hash, b.header()) + .expect("Failed to collect collation info"); + + let has_separator = collation_info.upward_messages.contains(&UMP_SEPARATOR); + + if is_last { + assert!( + has_separator, + "Block {} (last) should have UMP_SEPARATOR, got: {:?}", + i, + collation_info.upward_messages + ); + } else { + assert!( + !has_separator, + "Block {} should NOT have UMP_SEPARATOR, got: {:?}", + i, + collation_info.upward_messages + ); + } + } +} + +#[test] +fn validate_block_accepts_single_block_with_use_full_core() { + sp_tracing::try_init_simple(); + + let (client, parent_head) = create_elastic_scaling_test_client(); + + // Build a single block with BundleInfo (maybe_last=false) and UseFullCore set via extrinsic + // UseFullCore should make validation succeed even without maybe_last=true + let TestBlockData { block, validation_data } = build_block_with_witness( + &client, + vec![generate_extrinsic(&client, Alice, TestPalletCall::set_use_full_core {})], + parent_head.clone(), + Default::default(), + vec![BundleInfo { index: 0, maybe_last: false }.to_digest_item()], + ); + + // Validation should succeed because UseFullCore marks it as last block + let header = block.blocks()[0].header().clone(); + let res_header = call_validate_block_elastic_scaling( + parent_head, + block, + validation_data.relay_parent_storage_root, + ) + .expect("Calls `validate_block`"); + assert_eq!(header, res_header); +} + +#[test] +fn only_send_ump_signal_on_single_block_with_use_full_core() { + sp_tracing::try_init_simple(); + + let (client, parent_head) = create_elastic_scaling_test_client(); + + // Build a single block with BundleInfo (maybe_last=false), CoreInfo, and UseFullCore set via + // extrinsic. UseFullCore makes this block the last block in the core. + let TestBlockData { block, .. } = build_multiple_blocks_with_witness( + &client, + parent_head.clone(), + Default::default(), + 1, + |_| vec![generate_extrinsic(&client, Alice, TestPalletCall::set_use_full_core {})], + |_| { + vec![ + BundleInfo { index: 0, maybe_last: false }.to_digest_item(), + CumulusDigestItem::CoreInfo(CoreInfo { + selector: CoreSelector(0), + claim_queue_offset: ClaimQueueOffset(0), + number_of_cores: 1.into(), + }) + .to_digest_item(), + ] + }, + ); + + let b = &block.blocks()[0]; + let block_hash = b.header().hash(); + + let collation_info = client + .runtime_api() + .collect_collation_info(block_hash, b.header()) + .expect("Failed to collect collation info"); + + // Block with UseFullCore should have UMP_SEPARATOR (it's the last block) + assert!( + collation_info.upward_messages.contains(&UMP_SEPARATOR), + "Single block with UseFullCore should have UMP_SEPARATOR, got: {:?}", + collation_info.upward_messages + ); +} diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index 25e0e22721b07..e28557fc12612 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -214,6 +214,14 @@ pub mod pallet { Ok(()) } + + /// Deposits the `UseFullCore` digest item to signal that this block should use the full + /// core. + #[pallet::weight(0)] + pub fn set_use_full_core(_: OriginFor) -> DispatchResult { + frame_system::Pallet::::deposit_log(CumulusDigestItem::UseFullCore.to_digest_item()); + Ok(()) + } } #[pallet::inherent] From ee19d58f9d50edcdc055700c91009a10f55ec57c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 10 Dec 2025 17:32:29 +0100 Subject: [PATCH 233/312] Ensure we support uneven number of blocks --- .../parachain-system/src/block_weight/mod.rs | 30 +++++++++++-------- .../src/block_weight/tests.rs | 11 +++++++ 2 files changed, 28 insertions(+), 13 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index 39c97cf9975a3..43d7e051822a5 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -203,29 +203,33 @@ impl> let number_of_cores = CumulusDigestItem::find_core_info(&digest).map_or_else( || PreviousCoreCount::::get().map_or(1, |pc| pc.0), |ci| ci.number_of_cores.0, - ) as u32; + ) as u64; - let target_blocks = TargetBlockRate::get(); + let target_blocks = TargetBlockRate::get() as u64; // Ensure we have at least one core and valid target blocks if number_of_cores == 0 || target_blocks == 0 { return FULL_CORE_WEIGHT; } + let blocks_per_core = target_blocks.div_ceil(number_of_cores); + + let ref_time_per_block = MAX_REF_TIME_PER_CORE_NS / blocks_per_core; + // At maximum we want to allow `6s` of ref time, because we don't want to overload nodes // that are running with standard hardware. These nodes need to be able to import all the // blocks in `6s`. - let total_ref_time = (number_of_cores as u64) - .saturating_mul(MAX_REF_TIME_PER_CORE_NS) - .min(WEIGHT_REF_TIME_PER_SECOND * 6); - let ref_time_per_block = total_ref_time - .saturating_div(target_blocks as u64) - .min(MAX_REF_TIME_PER_CORE_NS); - - let total_pov_size = (number_of_cores as u64).saturating_mul(MAX_POV_SIZE as u64); - // Each block at max gets one core. - let proof_size_per_block = - total_pov_size.saturating_div(target_blocks as u64).min(MAX_POV_SIZE as u64); + let total_ref_time = ref_time_per_block * target_blocks; + let ref_time_per_block = if total_ref_time > 6 * WEIGHT_REF_TIME_PER_SECOND { + ref_time_per_block - + (total_ref_time - 6 * WEIGHT_REF_TIME_PER_SECOND).div_ceil(target_blocks) + } else { + ref_time_per_block + }; + + // PoV size we can use as much as we can get from the cores, but at maximum it is one block + // per core. Or in other words, one block can not span across multiple cores. + let proof_size_per_block = MAX_POV_SIZE as u64 / blocks_per_core; Weight::from_parts(ref_time_per_block, proof_size_per_block) } diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 353dcdf53a338..aa3c8ba6b9ba1 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -106,6 +106,17 @@ fn test_zero_cores() { }); } +#[test] +fn test_uneven_number_of_blocks_on_even_number_of_cores() { + TestExtBuilder::new().number_of_cores(2).build().execute_with(|| { + let weight = MaxParachainBlockWeight::>::get(); + + // Each block should get half of a core. + assert_eq!(weight.ref_time(), WEIGHT_REF_TIME_PER_SECOND); + assert_eq!(weight.proof_size(), MAX_POV_SIZE as u64 / 2); + }); +} + #[test] fn test_zero_target_blocks() { TestExtBuilder::new().number_of_cores(2).build().execute_with(|| { From 001c7de0e00ad8f2828cdff391353feb8198c41c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 11 Dec 2025 08:54:48 +0100 Subject: [PATCH 234/312] Make clippy happy --- cumulus/pallets/parachain-system/src/block_weight/mock.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index ac1a9bcdd4845..7dba292f55de3 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -156,7 +156,7 @@ pub mod test_pallet { Ok(()) } - #[pallet::weight((_weight.clone(), DispatchClass::Normal))] + #[pallet::weight((*_weight, DispatchClass::Normal))] pub fn use_weight(_: OriginFor, _weight: Weight) -> DispatchResult { Ok(()) } From 4afef298ed70927a9ca8d74e60824214d7d271f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 11 Dec 2025 09:20:30 +0100 Subject: [PATCH 235/312] Rename `BundleInfo` to `BlockBundleInfo` --- .../slot_based/block_builder_task.rs | 4 ++-- .../src/collators/slot_based/block_import.rs | 8 +++---- .../parachain-system/src/benchmarking.rs | 8 +++---- .../parachain-system/src/block_weight/mock.rs | 6 ++--- .../parachain-system/src/block_weight/mod.rs | 6 ++--- .../src/block_weight/tests.rs | 11 +++++----- .../src/block_weight/transaction_extension.rs | 2 +- cumulus/pallets/parachain-system/src/lib.rs | 2 +- .../src/validate_block/implementation.rs | 12 +++++----- .../src/validate_block/tests.rs | 19 ++++++++-------- cumulus/primitives/core/src/lib.rs | 22 +++++++++---------- cumulus/test/runtime/src/test_pallet.rs | 2 +- .../zombienet-sdk-helpers/src/lib.rs | 16 +++++++------- 13 files changed, 60 insertions(+), 58 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 63d10b177f48f..423a70f8240e8 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -34,7 +34,7 @@ use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockIm use cumulus_client_proof_size_recording::prepare_proof_size_recording_transaction; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; use cumulus_primitives_core::{ - BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, + BlockBundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, PersistedValidationData, RelayParentOffsetApi, TargetBlockRate, }; use cumulus_relay_chain_interface::RelayChainInterface; @@ -626,7 +626,7 @@ where slot_claim, additional_pre_digest: vec![ CumulusDigestItem::CoreInfo(core_info.clone()).to_digest_item(), - CumulusDigestItem::BundleInfo(BundleInfo { + CumulusDigestItem::BlockBundleInfo(BlockBundleInfo { index: block_index as u8, maybe_last: is_last_block_in_core, }) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs index 3ba6f08d946cd..b63e1ebd13514 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -18,7 +18,7 @@ use crate::LOG_TARGET; use codec::{Codec, Decode, Encode}; use cumulus_client_proof_size_recording::prepare_proof_size_recording_transaction; -use cumulus_primitives_core::{BundleInfo, CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; +use cumulus_primitives_core::{BlockBundleInfo, CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; use sc_client_api::{ backend::AuxStore, client::{AuxDataOperations, FinalityNotification, PreCommitActions}, @@ -116,7 +116,7 @@ impl SlotBasedBlockImport Option> where @@ -124,7 +124,7 @@ impl SlotBasedBlockImport SlotBasedBlockImport::set_extrinsic_index(1); frame_system::Pallet::::deposit_log( - BundleInfo { index: 0, maybe_last: false }.to_digest_item(), + BlockBundleInfo { index: 0, maybe_last: false }.to_digest_item(), ); frame_system::Pallet::::deposit_log( CoreInfo { @@ -157,7 +157,7 @@ mod benchmarks { frame_system::Pallet::::set_extrinsic_index(1); frame_system::Pallet::::deposit_log( - BundleInfo { index: 0, maybe_last: false }.to_digest_item(), + BlockBundleInfo { index: 0, maybe_last: false }.to_digest_item(), ); frame_system::Pallet::::deposit_log( CoreInfo { @@ -219,7 +219,7 @@ mod benchmarks { frame_system::Pallet::::set_extrinsic_index(1); frame_system::Pallet::::deposit_log( - BundleInfo { index: 0, maybe_last: false }.to_digest_item(), + BlockBundleInfo { index: 0, maybe_last: false }.to_digest_item(), ); frame_system::Pallet::::deposit_log( CoreInfo { diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index 7dba292f55de3..b38ee17e15796 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -18,7 +18,7 @@ use super::{transaction_extension::DynamicMaxBlockWeight, *}; use crate::{self as parachain_system, MessagingStateSnapshot, PreviousCoreCount}; use codec::Compact; use cumulus_primitives_core::{ - BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, + BlockBundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, }; use frame_support::{ construct_runtime, derive_impl, @@ -405,8 +405,8 @@ impl TestExtBuilder { // Add bundle info if specified if let Some(bundle_index) = self.bundle_index { let bundle_info = - BundleInfo { index: bundle_index, maybe_last: self.bundle_maybe_last }; - let digest = CumulusDigestItem::BundleInfo(bundle_info).to_digest_item(); + BlockBundleInfo { index: bundle_index, maybe_last: self.bundle_maybe_last }; + let digest = CumulusDigestItem::BlockBundleInfo(bundle_info).to_digest_item(); frame_system::Pallet::::deposit_log(digest); } diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index 39c97cf9975a3..1a91156c592b2 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -292,14 +292,14 @@ fn is_first_block_in_core() -> Option { /// Is this the first block in a core? (takes digest as parameter) /// -/// Returns `None` if the [`CumulusDigestItem::BundleInfo`] digest is not set. +/// Returns `None` if the [`CumulusDigestItem::BlockBundleInfo`] digest is not set. fn is_first_block_in_core_with_digest(digest: &Digest) -> Option { - CumulusDigestItem::find_bundle_info(digest).map(|bi| bi.index == 0) + CumulusDigestItem::find_block_bundle_info(digest).map(|bi| bi.index == 0) } /// Is the `BlockWeight` already above the target block weight? /// -/// Returns `None` if the [`CumulusDigestItem::BundleInfo`] digest is not set. +/// Returns `None` if the [`CumulusDigestItem::BlockBundleInfo`] digest is not set. fn block_weight_over_target_block_weight>() -> bool { let target_block_weight = MaxParachainBlockWeight::::target_block_weight(); diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index 353dcdf53a338..7006a65ff9ee3 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -18,7 +18,7 @@ use super::{mock::*, transaction_extension::DynamicMaxBlockWeight, *}; use assert_matches::assert_matches; use codec::Compact; use cumulus_primitives_core::{ - BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, + BlockBundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, }; use frame_support::{ assert_ok, @@ -177,16 +177,17 @@ fn test_is_first_block_in_core_functions() { assert!(super::is_first_block_in_core_with_digest(&empty_digest).is_none()); // Test with bundle info index = 0 - should return true - let bundle_info_first = BundleInfo { index: 0, maybe_last: false }; - let digest_item_first = CumulusDigestItem::BundleInfo(bundle_info_first).to_digest_item(); + let bundle_info_first = BlockBundleInfo { index: 0, maybe_last: false }; + let digest_item_first = + CumulusDigestItem::BlockBundleInfo(bundle_info_first).to_digest_item(); let mut digest_first = Digest::default(); digest_first.push(digest_item_first); assert!(super::is_first_block_in_core_with_digest(&digest_first).unwrap()); // Test with bundle info index > 0 - should return false - let bundle_info_not_first = BundleInfo { index: 5, maybe_last: true }; + let bundle_info_not_first = BlockBundleInfo { index: 5, maybe_last: true }; let digest_item_not_first = - CumulusDigestItem::BundleInfo(bundle_info_not_first).to_digest_item(); + CumulusDigestItem::BlockBundleInfo(bundle_info_not_first).to_digest_item(); let mut digest_not_first = Digest::default(); digest_not_first.push(digest_item_not_first); assert!(!super::is_first_block_in_core_with_digest(&digest_not_first).unwrap()); diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index 757e323d5a4fa..051730c3c5a27 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -212,7 +212,7 @@ where let class_allowed = if ALLOW_NORMAL { true } else { info.class == DispatchClass::Operational } || info.class == DispatchClass::Mandatory; - // If the `BundleInfo` digest is not set (function returns `None`), it means we are in some offchain + // If the `BlockBundleInfo` digest is not set (function returns `None`), it means we are in some offchain // call like `validate_block`. In this case we assume this is the first block, otherwise these big // transactions will never be able to enter the tx pool. let is_first_block = is_first_block_in_core_with_digest(&digest).unwrap_or(true); diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 449a53ba87a67..3122c57a8b5a6 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -376,7 +376,7 @@ pub mod pallet { } // Only send UMP signals on the last block of a bundle. - // For single-block PoVs (no BundleInfo), always send signals. + // For single-block PoVs (no BlockBundleInfo), always send signals. if CumulusDigestItem::is_last_block_in_core(&digest).unwrap_or(true) { Self::send_ump_signals(); } diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index be2f9990723ff..6d6bed4598259 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -365,7 +365,7 @@ fn validate_validation_data( ); } -/// Validates that the given blocks form a valid chain and have consistent BundleInfo. +/// Validates that the given blocks form a valid chain and have consistent BlockBundleInfo. fn validate_blocks(blocks: &[B::LazyBlock], parent_header: &B::Header) { let num_blocks = blocks.len(); @@ -392,17 +392,17 @@ fn validate_blocks(blocks: &[B::LazyBlock], parent_header: &B::Header array_bytes::bytes2hex("0x", block.header().parent_hash().as_ref()), ); - // Validate BundleInfo consistency - let bundle_info = CumulusDigestItem::find_bundle_info(block.header().digest()); + // Validate BlockBundleInfo consistency + let bundle_info = CumulusDigestItem::find_block_bundle_info(block.header().digest()); match (first_block_has_bundle_info, &bundle_info) { (None, info) => { first_block_has_bundle_info = Some(info.is_some()); }, (Some(true), None) => { - panic!("All blocks must have BundleInfo if the first block has it"); + panic!("All blocks must have BlockBundleInfo if the first block has it"); }, (Some(false), Some(_)) => { - panic!("No block should have BundleInfo if the first block doesn't have it"); + panic!("No block should have BlockBundleInfo if the first block doesn't have it"); }, _ => {}, } @@ -411,7 +411,7 @@ fn validate_blocks(blocks: &[B::LazyBlock], parent_header: &B::Header assert_eq!( info.index as usize, block_index, - "BundleInfo index mismatch: expected {}, got {}", + "BlockBundleInfo index mismatch: expected {}, got {}", block_index, info.index ); diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 327de025e6a56..db3caba697a78 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -19,7 +19,8 @@ use codec::{Decode, DecodeAll, Encode}; use cumulus_primitives_core::{ relay_chain, relay_chain::{UMPSignal, UMP_SEPARATOR}, - BundleInfo, ClaimQueueOffset, CollectCollationInfo, CoreInfo, CoreSelector, ParachainBlockData, + BlockBundleInfo, ClaimQueueOffset, CollectCollationInfo, CoreInfo, CoreSelector, + ParachainBlockData, PersistedValidationData, }; use cumulus_test_client::{ @@ -797,14 +798,14 @@ fn validate_block_rejects_incomplete_bundle() { let (client, parent_head) = create_elastic_scaling_test_client(); - // Build 2 blocks with BundleInfo + // Build 2 blocks with BlockBundleInfo let TestBlockData { block, validation_data } = build_multiple_blocks_with_witness( &client, parent_head.clone(), Default::default(), 2, |_| Vec::new(), - |i| vec![BundleInfo { index: i as u8, maybe_last: i == 1 }.to_digest_item()], + |i| vec![BlockBundleInfo { index: i as u8, maybe_last: i == 1 }.to_digest_item()], ); // Validation with only first block should fail (incomplete bundle) @@ -841,7 +842,7 @@ fn only_send_ump_signal_on_last_block_in_bundle() { let (client, parent_head) = create_elastic_scaling_test_client(); - // Build 4 blocks with BundleInfo and CoreInfo on all blocks + // Build 4 blocks with BlockBundleInfo and CoreInfo on all blocks let TestBlockData { block, .. } = build_multiple_blocks_with_witness( &client, parent_head.clone(), @@ -850,7 +851,7 @@ fn only_send_ump_signal_on_last_block_in_bundle() { |_| Vec::new(), |i| { vec![ - BundleInfo { index: i as u8, maybe_last: i == 3 }.to_digest_item(), + BlockBundleInfo { index: i as u8, maybe_last: i == 3 }.to_digest_item(), CumulusDigestItem::CoreInfo(CoreInfo { selector: CoreSelector(0), claim_queue_offset: ClaimQueueOffset(0), @@ -899,14 +900,14 @@ fn validate_block_accepts_single_block_with_use_full_core() { let (client, parent_head) = create_elastic_scaling_test_client(); - // Build a single block with BundleInfo (maybe_last=false) and UseFullCore set via extrinsic + // Build a single block with BlockBundleInfo (maybe_last=false) and UseFullCore set via extrinsic // UseFullCore should make validation succeed even without maybe_last=true let TestBlockData { block, validation_data } = build_block_with_witness( &client, vec![generate_extrinsic(&client, Alice, TestPalletCall::set_use_full_core {})], parent_head.clone(), Default::default(), - vec![BundleInfo { index: 0, maybe_last: false }.to_digest_item()], + vec![BlockBundleInfo { index: 0, maybe_last: false }.to_digest_item()], ); // Validation should succeed because UseFullCore marks it as last block @@ -926,7 +927,7 @@ fn only_send_ump_signal_on_single_block_with_use_full_core() { let (client, parent_head) = create_elastic_scaling_test_client(); - // Build a single block with BundleInfo (maybe_last=false), CoreInfo, and UseFullCore set via + // Build a single block with BlockBundleInfo (maybe_last=false), CoreInfo, and UseFullCore set via // extrinsic. UseFullCore makes this block the last block in the core. let TestBlockData { block, .. } = build_multiple_blocks_with_witness( &client, @@ -936,7 +937,7 @@ fn only_send_ump_signal_on_single_block_with_use_full_core() { |_| vec![generate_extrinsic(&client, Alice, TestPalletCall::set_use_full_core {})], |_| { vec![ - BundleInfo { index: 0, maybe_last: false }.to_digest_item(), + BlockBundleInfo { index: 0, maybe_last: false }.to_digest_item(), CumulusDigestItem::CoreInfo(CoreInfo { selector: CoreSelector(0), claim_queue_offset: ClaimQueueOffset(0), diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 59ce75144280b..1d8e31ee90e19 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -248,7 +248,7 @@ impl CoreInfo { /// Information about a block that is part of a PoV bundle. #[derive(Clone, Debug, Decode, Encode, PartialEq)] -pub struct BundleInfo { +pub struct BlockBundleInfo { /// The index of the block in the bundle. pub index: u8, /// Is this the last block in the bundle from the point of view of the node? @@ -259,11 +259,11 @@ pub struct BundleInfo { pub maybe_last: bool, } -impl BundleInfo { - /// Puts this into a [`CumulusDigestItem::BundleInfo`] and then encodes it as a Substrate +impl BlockBundleInfo { + /// Puts this into a [`CumulusDigestItem::BlockBundleInfo`] and then encodes it as a Substrate /// [`DigestItem`]. pub fn to_digest_item(&self) -> DigestItem { - CumulusDigestItem::BundleInfo(self.clone()).to_digest_item() + CumulusDigestItem::BlockBundleInfo(self.clone()).to_digest_item() } } @@ -299,7 +299,7 @@ pub enum CumulusDigestItem { CoreInfo(CoreInfo), /// A digest item providing information about the position of the block in the bundle. #[codec(index = 2)] - BundleInfo(BundleInfo), + BlockBundleInfo(BlockBundleInfo), /// A digest item informing the node that this block should be put alone onto a core. /// /// In other words, the core should not be shared with other blocks. @@ -395,11 +395,11 @@ impl CumulusDigestItem { }) } - /// Returns the [`BundleInfo`] from the given `digest`. - pub fn find_bundle_info(digest: &Digest) -> Option { + /// Returns the [`BlockBundleInfo`] from the given `digest`. + pub fn find_block_bundle_info(digest: &Digest) -> Option { digest.convert_first(|d| match d { DigestItem::PreRuntime(id, val) if id == &CUMULUS_CONSENSUS_ID => { - let Ok(CumulusDigestItem::BundleInfo(bundle_info)) = + let Ok(CumulusDigestItem::BlockBundleInfo(bundle_info)) = CumulusDigestItem::decode_all(&mut &val[..]) else { return None @@ -433,16 +433,16 @@ impl CumulusDigestItem { /// /// Checks the following conditions: /// - /// - Is [`BundleInfo::maybe_last`] set to true? + /// - Is [`BlockBundleInfo::maybe_last`] set to true? /// - Or is [`Self::UseFullCore`] digest present? /// - Or is [`DigestItem::RuntimeEnvironmentUpdated`] digest present? /// /// If any of these conditions is `true`, this function will return `true`. /// - /// Returns `None` if the `BundleInfo` digest is not present, which is interpreted as the + /// Returns `None` if the `BlockBundleInfo` digest is not present, which is interpreted as the /// associated block is not using block bundling. pub fn is_last_block_in_core(digest: &Digest) -> Option { - let bundle_info = Self::find_bundle_info(digest)?; + let bundle_info = Self::find_block_bundle_info(digest)?; Some( bundle_info.maybe_last || diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index e28557fc12612..b793faeb14884 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -321,7 +321,7 @@ pub mod pallet { if { let digest = frame_system::Pallet::::digest(); - CumulusDigestItem::find_bundle_info(&digest) + CumulusDigestItem::find_block_bundle_info(&digest) // Default being `true` to support `validate_transaction` .map_or(true, |bi| { // Either we want that the transaction goes into the first block diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 36562ccefd918..36c2c32c1628d 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -3,7 +3,7 @@ use anyhow::anyhow; use codec::{Decode, Encode}; -use cumulus_primitives_core::{BundleInfo, CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; +use cumulus_primitives_core::{BlockBundleInfo, CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; use futures::stream::StreamExt; use polkadot_primitives::{BlakeTwo256, CandidateReceiptV2, HashT, Id as ParaId}; use std::{cmp::max, collections::HashMap, ops::Range, sync::Arc}; @@ -529,16 +529,16 @@ pub async fn assert_para_is_registered( Err(anyhow!("No more blocks to check")) } -/// Returns [`BundleInfo`] for the given parachain block. -fn find_bundle_info( +/// Returns [`BlockBundleInfo`] for the given parachain block. +fn find_block_bundle_info( block: &Block>, -) -> Result { +) -> Result { let substrate_digest = sp_runtime::generic::Digest::decode(&mut &block.header().digest.encode()[..]) .expect("`subxt::Digest` and `substrate::Digest` should encode and decode; qed"); - CumulusDigestItem::find_bundle_info(&substrate_digest) - .ok_or_else(|| anyhow!("Failed to find `BundleInfo` digest")) + CumulusDigestItem::find_block_bundle_info(&substrate_digest) + .ok_or_else(|| anyhow!("Failed to find `BlockBundleInfo` digest")) } /// Validates that the given block is a "special" block in the core. @@ -625,7 +625,7 @@ pub async fn ensure_is_only_block_in_core( let mut next_first_bundle_block = None; while let Some(mut block) = best_block_stream.next().await.transpose()? { while block.number() > start_block.number() { - if find_bundle_info(&block)?.index == 0 { + if find_block_bundle_info(&block)?.index == 0 { next_first_bundle_block = Some(block.hash()); } @@ -657,7 +657,7 @@ pub async fn ensure_is_last_block_in_core( let blocks = para_client.blocks(); let block = blocks.at(block_to_check).await?; - let bundle_info = find_bundle_info(&block)?; + let bundle_info = find_block_bundle_info(&block)?; // Above we ensure it is the last block in the core and now we want to ensure it isn't the first // block. From 5e33c506e4bc01abb16664e9a2406876f5de0f76 Mon Sep 17 00:00:00 2001 From: "cmd[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 11 Dec 2025 13:08:19 +0000 Subject: [PATCH 236/312] Update from github-actions[bot] running command 'fmt' --- .../src/validate_block/tests.rs | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index db3caba697a78..98ffde1d5112e 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -20,8 +20,7 @@ use cumulus_primitives_core::{ relay_chain, relay_chain::{UMPSignal, UMP_SEPARATOR}, BlockBundleInfo, ClaimQueueOffset, CollectCollationInfo, CoreInfo, CoreSelector, - ParachainBlockData, - PersistedValidationData, + ParachainBlockData, PersistedValidationData, }; use cumulus_test_client::{ generate_extrinsic, generate_extrinsic_with_pair, @@ -880,15 +879,13 @@ fn only_send_ump_signal_on_last_block_in_bundle() { assert!( has_separator, "Block {} (last) should have UMP_SEPARATOR, got: {:?}", - i, - collation_info.upward_messages + i, collation_info.upward_messages ); } else { assert!( !has_separator, "Block {} should NOT have UMP_SEPARATOR, got: {:?}", - i, - collation_info.upward_messages + i, collation_info.upward_messages ); } } @@ -900,8 +897,8 @@ fn validate_block_accepts_single_block_with_use_full_core() { let (client, parent_head) = create_elastic_scaling_test_client(); - // Build a single block with BlockBundleInfo (maybe_last=false) and UseFullCore set via extrinsic - // UseFullCore should make validation succeed even without maybe_last=true + // Build a single block with BlockBundleInfo (maybe_last=false) and UseFullCore set via + // extrinsic UseFullCore should make validation succeed even without maybe_last=true let TestBlockData { block, validation_data } = build_block_with_witness( &client, vec![generate_extrinsic(&client, Alice, TestPalletCall::set_use_full_core {})], @@ -927,8 +924,8 @@ fn only_send_ump_signal_on_single_block_with_use_full_core() { let (client, parent_head) = create_elastic_scaling_test_client(); - // Build a single block with BlockBundleInfo (maybe_last=false), CoreInfo, and UseFullCore set via - // extrinsic. UseFullCore makes this block the last block in the core. + // Build a single block with BlockBundleInfo (maybe_last=false), CoreInfo, and UseFullCore set + // via extrinsic. UseFullCore makes this block the last block in the core. let TestBlockData { block, .. } = build_multiple_blocks_with_witness( &client, parent_head.clone(), From ea5c5d989a70949fe67332a0bc3ab6f46535e708 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 15 Dec 2025 14:13:40 +0100 Subject: [PATCH 237/312] Ensure we check the block weight after executing a transaction --- .../src/block_weight/tests.rs | 60 ++++++++++++++++++- .../src/block_weight/transaction_extension.rs | 9 ++- 2 files changed, 64 insertions(+), 5 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index aa3c8ba6b9ba1..be27cb89049c9 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -35,9 +35,10 @@ use sp_runtime::{ Digest, }; -type TxExtension = DynamicMaxBlockWeight, ConstU32<4>>; +type TxExtension = + DynamicMaxBlockWeight, ConstU32>; type TxExtensionOnlyOperational = - DynamicMaxBlockWeight, ConstU32<4>, 10, false>; + DynamicMaxBlockWeight, ConstU32, 10, false>; type MaximumBlockWeight = MaxParachainBlockWeight>; #[test] @@ -1069,3 +1070,58 @@ fn on_poll_uses_correct_weight() { Executive::finalize_block(); }); } + +// This test ensures when a transaction enables `PotentialFullCore` in `pre-dispatch`, but in post +// dispatch the transaction has a lower weight we don't go back to `FractionalCore` if the total +// block weight is above the target block weight. +#[test] +fn post_dispatch_is_taking_block_weight_into_account() { + TestExtBuilder::new() + .number_of_cores(2) + .first_block_in_core(true) + .build() + .execute_with(|| { + initialize_block_finished(); + System::set_extrinsic_index(1); + + let target_weight = MaximumBlockWeight::target_block_weight(); + + let sixty_percent = Weight::from_parts( + target_weight.ref_time() * 60 / 100, + target_weight.proof_size() * 60 / 100, + ); + + // Simulate on_initialize using 60% of target weight + register_weight(sixty_percent, DispatchClass::Mandatory); + + let info = DispatchInfo { + call_weight: target_weight, + class: DispatchClass::Normal, + ..Default::default() + }; + + assert_ok!(TxExtension::validate_and_prepare( + TxExtension::new(Default::default()), + SystemOrigin::Signed(0).into(), + &CALL, + &info, + 100, + 0, + )); + + assert_matches!( + crate::BlockWeightMode::::get(), + Some(BlockWeightMode::PotentialFullCore { .. }) + ); + + // Post-dispatch with actual_weight = 60% of target + let mut post_info = PostDispatchInfo { + actual_weight: Some(sixty_percent), + pays_fee: Default::default(), + }; + + assert_ok!(TxExtension::post_dispatch((), &info, &mut post_info, 0, &Ok(()))); + + assert!(has_use_full_core_digest()); + }); +} diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index 757e323d5a4fa..bb850aeec30b9 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -321,8 +321,7 @@ where Config::WeightInfo::block_weight_tx_extension_stays_fraction_of_core(), ) }, - // Now we need to check if the transaction required more weight than a fraction of a - // core block. + // Now we check if the transaction required more weight than the target weight. BlockWeightMode::::PotentialFullCore { first_transaction_index, target_weight, @@ -331,7 +330,11 @@ where let block_weight = frame_system::BlockWeight::::get(); let extrinsic_class_weight = block_weight.get(info.class); - if extrinsic_class_weight.any_gt(*target_weight) { + // The transaction weight after execution is may not above the target weight, + // but the full block weight is maybe now above the target weight. + if extrinsic_class_weight.any_gt(*target_weight) || + block_weight_over_target_block_weight::() + { log::trace!( target: LOG_TARGET, "Extrinsic class weight {extrinsic_class_weight:?} above target weight {target_weight:?}, enabling `FullCore` mode." From 87238ec033ea1865d94d25a35c7d18a4214c48ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 17 Dec 2025 11:58:26 +0100 Subject: [PATCH 238/312] Change derive macro --- cumulus/test/runtime/src/test_pallet.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index b793faeb14884..945d986769daf 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -34,6 +34,7 @@ pub mod pallet { pallet_prelude::*, traits::IsSubType, weights::constants::WEIGHT_REF_TIME_PER_SECOND, + DebugNoBound, }; use frame_system::pallet_prelude::*; use sp_runtime::traits::{Dispatchable, Implication, TransactionExtension}; @@ -277,13 +278,13 @@ pub mod pallet { } #[derive( + DebugNoBound, Encode, Decode, CloneNoBound, EqNoBound, PartialEqNoBound, TypeInfo, - RuntimeDebugNoBound, DecodeWithMemTracking, )] #[scale_info(skip_type_params(T))] From a92ca4305f2aa384141fdbe37c70b789b8077135 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 6 Feb 2026 17:43:37 +0100 Subject: [PATCH 239/312] Review feedback --- .../parachain-system/src/block_weight/mod.rs | 33 +++++++++-------- .../src/block_weight/pre_inherents_hook.rs | 4 +- .../src/block_weight/transaction_extension.rs | 37 ++++++++----------- cumulus/primitives/core/src/lib.rs | 8 +++- 4 files changed, 42 insertions(+), 40 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index 43d7e051822a5..52c124495d8e5 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -16,7 +16,7 @@ //! Provides functionality to dynamically calculate the block weight for a parachain. //! -//! With block bundling, parachains are relative free to choose whatever block interval they want. +//! With block bundling, parachains are relatively free to choose whatever block interval they want. //! The block interval is the time between individual blocks. The available resources per block (max //! block weight) depend on the number of cores allocated to the parachain on the relay chain. Each //! relay chain cores provides an execution time of `2s` and a storage size of `10MiB`. Depending on @@ -47,7 +47,7 @@ #![doc = docify::embed!("src/block_weight/mock.rs", pre_inherents_setup)] //! # Weight per context //! -//! Depending on the context, [`MaxParachainBlockWeight`] may returns a different max weight. The +//! Depending on the context, [`MaxParachainBlockWeight`] may return a different max weight. The //! max weight is only allowed to change in the first block of a core. Otherwise, all blocks need to //! follow the target block weight determined based on the number of cores and the target block //! rate. In the case of a first block, the following contexts may allow to access the full core @@ -73,7 +73,7 @@ use frame_support::{ CloneNoBound, DebugNoBound, }; use frame_system::pallet_prelude::BlockNumberFor; -use polkadot_primitives::MAX_POV_SIZE; +use polkadot_primitives::{executor_params::DEFAULT_BACKING_EXECUTION_TIMEOUT, MAX_POV_SIZE}; use scale_info::TypeInfo; use sp_core::Get; use sp_runtime::Digest; @@ -90,12 +90,16 @@ pub use transaction_extension::DynamicMaxBlockWeight; const LOG_TARGET: &str = "runtime::parachain-system::block-weight"; /// Maximum ref time per core -const MAX_REF_TIME_PER_CORE_NS: u64 = 2 * WEIGHT_REF_TIME_PER_SECOND; +const MAX_REF_TIME_PER_CORE_NS: u64 = + DEFAULT_BACKING_EXECUTION_TIMEOUT.as_secs() * WEIGHT_REF_TIME_PER_SECOND; /// The available weight per core on the relay chain. pub(crate) const FULL_CORE_WEIGHT: Weight = Weight::from_parts(MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); // Is set to `true` when we are currently inside of `pre_validate_extrinsic`. +// +// Forces `MaxParachainBlockWeight::get()` to return fractional weight, enabling detection of +// transactions that exceed the fractional target limit. environmental::environmental!(inside_pre_validate: bool); /// The current block weight mode. @@ -214,18 +218,13 @@ impl> let blocks_per_core = target_blocks.div_ceil(number_of_cores); - let ref_time_per_block = MAX_REF_TIME_PER_CORE_NS / blocks_per_core; - // At maximum we want to allow `6s` of ref time, because we don't want to overload nodes // that are running with standard hardware. These nodes need to be able to import all the // blocks in `6s`. - let total_ref_time = ref_time_per_block * target_blocks; - let ref_time_per_block = if total_ref_time > 6 * WEIGHT_REF_TIME_PER_SECOND { - ref_time_per_block - - (total_ref_time - 6 * WEIGHT_REF_TIME_PER_SECOND).div_ceil(target_blocks) - } else { - ref_time_per_block - }; + let ref_time_per_block = core::cmp::min( + MAX_REF_TIME_PER_CORE_NS / blocks_per_core, // Core allocation limit + (6 * WEIGHT_REF_TIME_PER_SECOND) / target_blocks, // Full node import limit + ); // PoV size we can use as much as we can get from the cores, but at maximum it is one block // per core. Or in other words, one block can not span across multiple cores. @@ -252,8 +251,12 @@ impl> Get // Check if we are inside `pre_validate_extrinsic` of the transaction extension. // // When `pre_validate_extrinsic` calls this code, it is interested to know the - // `target_block_weight` which is then used to calculate the weight for each dispatch class. - // If `FullCore` mode is already enabled, the target weight is not important anymore. + // fractional `target_block_weight` which is then used to calculate the weight for each + // dispatch class. Fractional weight is returned to detect transactions exceeding the + // fractional target, enabling proper transition to `PotentialFullCore` mode. + // + // If `FullCore` mode is already enabled, the fractional target weight is not important + // anymore. let in_pre_validate = inside_pre_validate::with(|v| *v).unwrap_or(false); match crate::BlockWeightMode::::get().filter(|m| !m.is_stale()) { diff --git a/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs b/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs index 4f9074a30eb32..6103bb03c6cc8 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs @@ -65,7 +65,7 @@ where if !is_first_block_in_core { log::error!( target: LOG_TARGET, - "Inherent block logic took longer than the target block weight, THIS IS A BUG!!!", + "Block initialization logic took longer than the target block weight, THIS IS A BUG!!!", ); // We are already above the allowed maximum and do not want to accept any more @@ -77,7 +77,7 @@ where } else { log::debug!( target: LOG_TARGET, - "Inherent block logic took longer than the target block weight, going to use the full core", + "Block initialization logic took longer than the target block weight, going to use the full core", ); } diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index bb850aeec30b9..11fe9411c28af 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -161,7 +161,7 @@ where // If `BlockWeights` is configured correctly, it will internally call `MaxParachainBlockWeight::get()` // and by setting this variable to `true`, we tell it the context. This is important as we want to get - // the `target_block_weight` and not the full core weight. Otherwise, we will here get a too huge weight + // the fractional `target_block_weight` and not the full core weight. Otherwise, we will here get a too huge weight // and do not set the `PotentialFullCore` weight, leading to `CheckWeight` rejecting the extrinsic. // // All of this is only important for extrinsics that will enable the `PotentialFullCore` mode. @@ -209,11 +209,10 @@ where .any_gt(target_weight) { // When `ALLOW_NORMAL` is `true`, we want to allow all classes of transactions. Inherents are always allowed. - let class_allowed = if ALLOW_NORMAL { true } else { info.class == DispatchClass::Operational } - || info.class == DispatchClass::Mandatory; + let class_allowed = ALLOW_NORMAL || matches!(info.class, DispatchClass::Operational | DispatchClass::Mandatory); // If the `BundleInfo` digest is not set (function returns `None`), it means we are in some offchain - // call like `validate_block`. In this case we assume this is the first block, otherwise these big + // call like `validate_transaction`. In this case we assume this is the first block, otherwise these big // transactions will never be able to enter the tx pool. let is_first_block = is_first_block_in_core_with_digest(&digest).unwrap_or(true); @@ -238,18 +237,18 @@ where return Err(InvalidTransaction::ExhaustsResources) } - } else if is_potential { - log::trace!( - target: LOG_TARGET, - "Resetting back to `FractionOfCore`" - ); - *mode = - Some(BlockWeightMode::::fraction_of_core(first_transaction_index.or(transaction_index))); } else { - log::trace!( - target: LOG_TARGET, - "Not changing block weight mode" - ); + if is_potential { + log::trace!( + target: LOG_TARGET, + "Resetting back to `FractionOfCore`" + ); + } else { + log::trace!( + target: LOG_TARGET, + "Not changing block weight mode" + ); + } *mode = Some(BlockWeightMode::::fraction_of_core(first_transaction_index.or(transaction_index))); @@ -276,12 +275,8 @@ where .saturating_sub(Config::WeightInfo::block_weight_tx_extension_full_core()), BlockWeightMode::::FractionOfCore { .. } => { let digest = frame_system::Pallet::::digest(); - let target_block_weight = - MaxParachainBlockWeight::::target_block_weight_with_digest(&digest); - - let is_above_limit = frame_system::Pallet::::remaining_block_weight() - .consumed() - .any_gt(target_block_weight); + let is_above_limit = + block_weight_over_target_block_weight::(); // If we are above the limit, it means the transaction used more weight than // what it had announced, which should not happen. diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 13ca4ba200903..602ab1f2dcaa8 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -253,8 +253,8 @@ pub struct BundleInfo { pub index: u8, /// Is this the last block in the bundle from the point of view of the node? /// - /// It is possible that at `index` zero the runtime outputs the - /// [`CumulusDigestItem::UseFullCore`] that informs the node to use an entire for one block + /// It is possible that the runtime outputs the + /// [`CumulusDigestItem::UseFullCore`] to inform the node to use an entire for one block /// only. pub maybe_last: bool, } @@ -303,6 +303,10 @@ pub enum CumulusDigestItem { /// A digest item informing the node that this block should be put alone onto a core. /// /// In other words, the core should not be shared with other blocks. + /// + /// Under certain conditions (mainly runtime misconfigurations) the digest is still set when + /// there are muliple blocks per core. This is done to communicate to the collator that block + /// production for this core should be stopped. #[codec(index = 3)] UseFullCore, } From 3dbfc10badc330771063829520b6fc08b0c17764 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 10 Feb 2026 12:51:22 +0100 Subject: [PATCH 240/312] Unify checks --- .../collators/slot_based/block_builder_task.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 0da3755829b38..62ad0601b619d 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -537,21 +537,21 @@ where let mut parent_header = pov_parent_header.clone(); for block_index in 0..blocks_per_core { - // TODO: Remove when transaction streaming is implemented + // TODO: With transaction streaming we do not need to skip anything any more and can just + // set `is_last`. + + // If we have more than 3 blocks in total, aka a block time which is less than 2s, we are + // going to skip the last block. Otherwise, when running with 3 blocks, we are just + // adjusting the authoring duration below. + let skip_last_block_in_slot = total_number_of_blocks > 3 && is_last_block_in_core; // We require that the next node has imported our last block before it can start building // the next block. To ensure that the next node is able to do so, we are skipping the last // block in the parachain slot. In the future this can be removed again. let is_last_block_in_core = block_index + 1 == blocks_per_core || // This branch here is for the case when we are going to skip the last block. - (block_index + 2 == blocks_per_core && blocks_per_core > 1); + (block_index + 2 == blocks_per_core && skip_last_block_in_slot); - // If we have more than 3 blocks in total, aka a block time which is less than 2s, we are - // going to skip the last block. Otherwise, when running with 3 blocks, we are just - // adjusting the authoring duration below. - if block_index + 1 == blocks_per_core && - total_number_of_blocks > 3 && - is_last_core_in_parachain_slot - { + if block_index + 1 == blocks_per_core && skip_last_block_in_slot { tracing::debug!( target: LOG_TARGET, "Skipping block production so that the next node is able to import all blocks before its slot." From 45b0da9ef9cd0a9f52301e47f51ccfbc951509bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 10 Feb 2026 16:50:06 +0100 Subject: [PATCH 241/312] Distribute the blocks correctly over the cores --- .../slot_based/block_builder_task.rs | 29 ++++++++++++++++--- .../client/proof-size-recording/src/lib.rs | 5 ++-- .../src/block_weight/pre_inherents_hook.rs | 2 +- .../src/block_weight/transaction_extension.rs | 5 ++-- .../polkadot-omni-node/lib/src/nodes/aura.rs | 3 +- cumulus/primitives/core/src/lib.rs | 9 +++--- cumulus/test/runtime/src/test_pallet.rs | 7 +++-- .../tests/zombie_ci/block_bundling/basic.rs | 8 ++--- .../zombie_ci/block_bundling/pov_recovery.rs | 4 +-- .../zombie_ci/block_bundling/tracing_block.rs | 5 ++-- substrate/primitives/trie/src/recorder.rs | 2 +- 11 files changed, 53 insertions(+), 26 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 62ad0601b619d..751d20ffde32f 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -382,11 +382,27 @@ where }, }; - let blocks_per_core = (number_of_blocks / cores.total_cores()).max(1); + // In total we want to have at max `number_of_blocks` cores to use. + cores.truncate_cores(number_of_blocks); + let raw_blocks_per_core = (number_of_blocks / cores.total_cores()).max(1); + let mut left_over_blocks = number_of_blocks % cores.total_cores(); + let blocks_per_cores = (0..cores.total_cores()) + .into_iter() + .map(|_| { + // We distribute the left over blocks across the cores. + raw_blocks_per_core + + if left_over_blocks > 1 { + left_over_blocks -= 1; + 1 + } else { + 0 + } + }) + .collect::>(); tracing::debug!( target: crate::LOG_TARGET, - %blocks_per_core, + ?blocks_per_cores, core_indices = ?cores.core_indices(), "Core configuration", ); @@ -395,7 +411,7 @@ where let mut pov_parent_hash = initial_parent.hash; let block_time = Duration::from_secs(6) / number_of_blocks; - loop { + for blocks_per_core in blocks_per_cores { let time_for_core = slot_time.time_left() / cores.cores_left(); match build_collation_for_core(BuildCollationParams { @@ -543,7 +559,7 @@ where // If we have more than 3 blocks in total, aka a block time which is less than 2s, we are // going to skip the last block. Otherwise, when running with 3 blocks, we are just // adjusting the authoring duration below. - let skip_last_block_in_slot = total_number_of_blocks > 3 && is_last_block_in_core; + let skip_last_block_in_slot = total_number_of_blocks > 3 && is_last_core_in_parachain_slot; // We require that the next node has imported our last block before it can start building // the next block. To ensure that the next node is able to do so, we are skipping the last // block in the parachain slot. In the future this can be removed again. @@ -888,6 +904,11 @@ impl Cores { self.core_indices.len() as u32 } + /// Truncate `cores` to `max_cores`. + pub fn truncate_cores(&mut self, max_cores: u32) { + self.core_indices.truncate(max_cores as usize); + } + /// Returns the number of cores left. fn cores_left(&self) -> u32 { self.total_cores() - self.selector.0 as u32 diff --git a/cumulus/client/proof-size-recording/src/lib.rs b/cumulus/client/proof-size-recording/src/lib.rs index 1fcd2fe1d8425..6a71c0dc2ea43 100644 --- a/cumulus/client/proof-size-recording/src/lib.rs +++ b/cumulus/client/proof-size-recording/src/lib.rs @@ -71,8 +71,9 @@ pub fn load_proof_size_recording( match version { None => Ok(None), - Some(PROOF_SIZE_RECORDING_CURRENT_VERSION) => - load_decode(backend, proof_size_recording_key(block_hash).as_slice()), + Some(PROOF_SIZE_RECORDING_CURRENT_VERSION) => { + load_decode(backend, proof_size_recording_key(block_hash).as_slice()) + }, Some(other) => Err(ClientError::Backend(format!( "Unsupported proof size recording DB version: {:?}", other diff --git a/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs b/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs index 6103bb03c6cc8..4cb4af9b32530 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/pre_inherents_hook.rs @@ -57,7 +57,7 @@ where crate::BlockWeightMode::::put(new_mode); - return + return; } let is_first_block_in_core = is_first_block_in_core::().unwrap_or(false); diff --git a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs index 11fe9411c28af..f896b041f9697 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/transaction_extension.rs @@ -270,9 +270,10 @@ where match mode { // If the previous mode was already `FullCore`, we are fine. - BlockWeightMode::::FullCore { .. } => + BlockWeightMode::::FullCore { .. } => { Config::WeightInfo::block_weight_tx_extension_max_weight() - .saturating_sub(Config::WeightInfo::block_weight_tx_extension_full_core()), + .saturating_sub(Config::WeightInfo::block_weight_tx_extension_full_core()) + }, BlockWeightMode::::FractionOfCore { .. } => { let digest = frame_system::Pallet::::digest(); let is_above_limit = diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs index 1b5ea9bdf33fa..943c1b6cdb1bc 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs @@ -49,7 +49,8 @@ use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; use cumulus_client_parachain_inherent::MockValidationDataInherentDataProvider; use cumulus_client_service::CollatorSybilResistance; use cumulus_primitives_core::{ - relay_chain::ValidationCode, CollectCollationInfo, GetParachainInfo, ParaId, TargetBlockRate, RelayParentOffsetApi, + relay_chain::ValidationCode, CollectCollationInfo, GetParachainInfo, ParaId, + RelayParentOffsetApi, TargetBlockRate, }; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; use futures::{prelude::*, FutureExt}; diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 6dcbcc39d4797..1d9bf4ffe197e 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -317,8 +317,9 @@ impl CumulusDigestItem { let encoded = self.encode(); match self { - Self::RelayParent(_) | Self::UseFullCore => - DigestItem::Consensus(CUMULUS_CONSENSUS_ID, encoded), + Self::RelayParent(_) | Self::UseFullCore => { + DigestItem::Consensus(CUMULUS_CONSENSUS_ID, encoded) + }, _ => DigestItem::PreRuntime(CUMULUS_CONSENSUS_ID, encoded), } } @@ -406,7 +407,7 @@ impl CumulusDigestItem { let Ok(CumulusDigestItem::BlockBundleInfo(bundle_info)) = CumulusDigestItem::decode_all(&mut &val[..]) else { - return None + return None; }; Some(bundle_info) @@ -423,7 +424,7 @@ impl CumulusDigestItem { let Ok(CumulusDigestItem::UseFullCore) = CumulusDigestItem::decode_all(&mut &val[..]) else { - return None + return None; }; Some(true) diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index 945d986769daf..4f936a0eadf7f 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -81,7 +81,7 @@ pub mod pallet { tracing::info!("Consuming 1s of weight :)"); // We have enough capacity, consume the flag and register the weight ScheduleWeightRegistration::::kill(); - return weight_to_register + return weight_to_register; } } @@ -318,7 +318,7 @@ pub mod pallet { ) -> ValidateResult { if let Some(call) = call.is_sub_type() { match call { - Call::use_more_weight_than_announced { must_be_first_block_in_core } => + Call::use_more_weight_than_announced { must_be_first_block_in_core } => { if { let digest = frame_system::Pallet::::digest(); @@ -344,7 +344,8 @@ pub mod pallet { Err(TransactionValidityError::Invalid( InvalidTransaction::ExhaustsResources, )) - }, + } + }, _ => Ok((Default::default(), (), origin)), } } else { diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs index f91b0308ab8d6..9100ef127bb86 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs @@ -92,14 +92,14 @@ async fn block_bundling_basic() -> Result<(), anyhow::Error> { let (Some(full_best), Some(best)) = join!(full_best_blocks.next(), collator_best_blocks.next()) else { - return Err(anyhow!("Failed to get a best block from the full node and the collator")) + return Err(anyhow!("Failed to get a best block from the full node and the collator")); }; let diff = full_best?.number().abs_diff(best?.number()); if diff > 12 { return Err(anyhow!( "Best block difference between full node and collator of {diff} is too big!" - )) + )); } log::info!("Test finished successfully"); @@ -118,12 +118,12 @@ fn wait_for_block_and_restart_node(node: NetworkNode) -> JoinHandle= 13 { log::info!("Full node has imported block `13`, going to restart it"); - return node.restart(None).await + return node.restart(None).await; } } }) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs index 5fdea17a3258a..50490b735b268 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs @@ -60,7 +60,7 @@ async fn block_bundling_pov_recovery() -> Result<(), anyhow::Error> { .await?; if !result.success() { - return Err(anyhow!("Consensus hook failed at {}: {:?}", collator.name(), result)) + return Err(anyhow!("Consensus hook failed at {}: {:?}", collator.name(), result)); } // Wait (up to 10 seconds) until pattern occurs more than 35 times @@ -83,7 +83,7 @@ async fn block_bundling_pov_recovery() -> Result<(), anyhow::Error> { return Err(anyhow!( "Failed importing blocks using PoV recovery by {}: {result:?}", recovery_target.name() - )) + )); } log::info!("Test finished successfully"); diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs index b04ebff0e6cc4..81cbbd049a5ed 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs @@ -85,8 +85,9 @@ async fn block_bundling_tracing_block() -> Result<(), anyhow::Error> { // Decode and verify the BlockTrace is successful match trace_result { - TraceBlockResponse::TraceError(error) => - Err(anyhow!("Block tracing failed: {}", error.error)), + TraceBlockResponse::TraceError(error) => { + Err(anyhow!("Block tracing failed: {}", error.error)) + }, TraceBlockResponse::BlockTrace(_) => { log::info!("✅ Block trace successful!"); Ok(()) diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index 62c7b389b0f43..f3b4617bd56bd 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -450,7 +450,7 @@ impl<'a, H: Hasher> trie_db::TrieRecorder for TrieRecorder<'a, H> { ?hash, "Ignoring node", ); - return + return; } inner.accessed_nodes.entry(hash).or_insert_with(|| { From 33a709678b1484f679c4135ef5a1bded2c536a20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 23 Feb 2026 21:54:31 +0100 Subject: [PATCH 242/312] Split up `can_build_upon` and include the check in the slot based block production --- .../consensus/aura/src/collators/lookahead.rs | 53 +++++++------ .../consensus/aura/src/collators/mod.rs | 58 ++++++++------ .../slot_based/block_builder_task.rs | 77 ++++++++++++++++-- .../src/collators/slot_based/slot_timer.rs | 79 ------------------- 4 files changed, 131 insertions(+), 136 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 7e4e8ee3e28b2..c28379ff624ff 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -325,24 +325,7 @@ where let para_client = &*params.para_client; let keystore = ¶ms.keystore; - let can_build_upon = |block_hash| { - let (slot_now, relay_slot, timestamp) = get_parachain_slot::<_, _, P::Public>( - para_client, - block_hash, - &relay_parent_header, - params.relay_chain_slot_duration, - )?; - - Some(super::can_build_upon::<_, _, P>( - slot_now, - relay_slot, - timestamp, - block_hash, - included_block.hash(), - para_client, - &keystore, - )) - }; + let included_block_hash = included_block.hash(); // Build in a loop until not allowed. Note that the authorities can change // at any block, so we need to re-claim our slot every time. @@ -371,14 +354,36 @@ where // This needs to change to support elastic scaling, but for continuously // scheduled chains this ensures that the backlog will grow steadily. for n_built in 0..2 { - let slot_claim = match can_build_upon(parent_hash) { - Some(fut) => match fut.await { - None => break, - Some(c) => c, - }, - None => break, + let Some((slot_now, relay_slot, timestamp)) = + get_parachain_slot::<_, _, P::Public>( + para_client, + parent_hash, + &relay_parent_header, + params.relay_chain_slot_duration, + ) + else { + break; }; + let Some(slot_claim) = + super::claim_slot::<_, _, P>(slot_now, timestamp, parent_hash, para_client, &keystore) + .await + else { + break; + }; + + if !super::can_build_upon::<_, _>( + parent_hash, + included_block_hash, + relay_slot, + slot_now, + para_client, + ) + .await + { + break; + } + tracing::debug!( target: crate::LOG_TARGET, ?relay_parent, diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index a81b73650c217..c238a09b75347 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -211,20 +211,17 @@ async fn claim_queue_at( } } -// Checks if we own the slot at the given block and whether there -// is space in the unincluded segment. -async fn can_build_upon( +// Checks if we own the slot at the given block. +async fn claim_slot( para_slot: Slot, - relay_slot: Slot, timestamp: Timestamp, parent_hash: Block::Hash, - included_block: Block::Hash, client: &Client, keystore: &KeystorePtr, ) -> Option> where Client: ProvideRuntimeApi, - Client::Api: AuraApi + AuraUnincludedSegmentApi + ApiExt, + Client::Api: AuraApi + ApiExt, P: Pair, P::Public: Codec, P::Signature: Codec, @@ -232,27 +229,46 @@ where let runtime_api = client.runtime_api(); let authorities = runtime_api.authorities(parent_hash).ok()?; let author_pub = aura_internal::claim_slot::

(para_slot, &authorities, keystore).await?; + Some(SlotClaim::unchecked::

(author_pub, para_slot, timestamp)) +} +// Checks if there is space in the unincluded segment. +async fn can_build_upon( + parent_hash: Block::Hash, + included_block: Block::Hash, + relay_slot: Slot, + para_slot: Slot, + client: &Client, +) -> bool +where + Client: ProvideRuntimeApi, + Client::Api: AuraUnincludedSegmentApi + ApiExt, +{ // This function is typically called when we want to build block N. At that point, the // unincluded segment in the runtime is unaware of the hash of block N-1. If the unincluded // segment in the runtime is full, but block N-1 is the included block, the unincluded segment // should have length 0 and we can build. Since the hash is not available to the runtime // however, we need this extra check here. if parent_hash == included_block { - return Some(SlotClaim::unchecked::

(author_pub, para_slot, timestamp)); + return true; } - let api_version = runtime_api + let runtime_api = client.runtime_api(); + let api_version = match runtime_api .api_version::>(parent_hash) .ok() - .flatten()?; + .flatten() + { + Some(v) => v, + None => return false, + }; let slot = if api_version > 1 { relay_slot } else { para_slot }; runtime_api .can_build_upon(parent_hash, included_block, slot) - .ok()? - .then(|| SlotClaim::unchecked::

(author_pub, para_slot, timestamp)) + .ok() + .unwrap_or(false) } /// Use [`cumulus_client_consensus_common::find_potential_parents`] to find parachain blocks that @@ -317,7 +333,7 @@ where #[cfg(test)] mod tests { use super::*; - use crate::collators::{can_build_upon, BackingGroupConnectionHelper}; + use crate::collators::BackingGroupConnectionHelper; use codec::Encode; use cumulus_primitives_aura::Slot; use cumulus_primitives_core::BlockT; @@ -404,12 +420,10 @@ mod tests { let mut last_hash = genesis_hash; // Fill up the unincluded segment tracker in the runtime. - while can_build_upon::<_, _, sp_consensus_aura::sr25519::AuthorityPair>( - Slot::from(u64::MAX), + while claim_slot::<_, _, sp_consensus_aura::sr25519::AuthorityPair>( Slot::from(u64::MAX), Timestamp::default(), last_hash, - genesis_hash, &*client, &keystore, ) @@ -422,17 +436,9 @@ mod tests { // Blocks were built with the genesis hash set as included block. // We call `can_build_upon` with the last built block as the included block. - let result = can_build_upon::<_, _, sp_consensus_aura::sr25519::AuthorityPair>( - Slot::from(u64::MAX), - Slot::from(u64::MAX), - Timestamp::default(), - last_hash, - last_hash, - &*client, - &keystore, - ) - .await; - assert!(result.is_some()); + let result = + can_build_upon::<_, _>(last_hash, last_hash, Slot::from(u64::MAX), Slot::from(u64::MAX), &*client).await; + assert!(result); } /// Helper to create a mock overseer handle and message recorder diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 751d20ffde32f..37daac6ed9bbe 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -46,7 +46,7 @@ use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; use sc_consensus::BlockImport; use sc_consensus_aura::SlotDuration; use sc_network_types::PeerId; -use sp_api::{ProofRecorder, ProvideRuntimeApi, StorageProof}; +use sp_api::{ApiExt, ProofRecorder, ProvideRuntimeApi, StorageProof}; use sp_application_crypto::AppPublic; use sp_block_builder::BlockBuilder; use sp_blockchain::HeaderBackend; @@ -300,12 +300,10 @@ where connection_helper.update::

(slot_info.slot, &authorities).await; } - let Some(slot_claim) = crate::collators::can_build_upon::<_, _, P>( + let Some(slot_claim) = crate::collators::claim_slot::<_, _, P>( slot_info.slot, - relay_slot, slot_info.timestamp, initial_parent.hash, - included_header_hash, &*para_client, &keystore, ) @@ -437,6 +435,10 @@ where collator_peer_id, relay_parent_data: rp_data.clone(), total_number_of_blocks: number_of_blocks, + included_header_hash, + relay_slot, + para_slot: slot_info.slot, + para_client: &*para_client, }) .await { @@ -458,7 +460,18 @@ where } /// Parameters for [`build_collation_for_core`]. -struct BuildCollationParams<'a, Block: BlockT, P: Pair, RelayClient, BI, CIDP, Proposer, CS, CHP> { +struct BuildCollationParams< + 'a, + Block: BlockT, + P: Pair, + RelayClient, + BI, + CIDP, + Proposer, + CS, + CHP, + Client, +> { pov_parent_header: Block::Header, pov_parent_hash: Block::Hash, relay_parent_header: &'a RelayHeader, @@ -481,12 +494,26 @@ struct BuildCollationParams<'a, Block: BlockT, P: Pair, RelayClient, BI, CIDP, P collator_peer_id: PeerId, relay_parent_data: RelayParentData, total_number_of_blocks: u32, + included_header_hash: Block::Hash, + relay_slot: cumulus_primitives_aura::Slot, + para_slot: cumulus_primitives_aura::Slot, + para_client: &'a Client, } /// Build a collation for one core. /// /// One collation can be composed of multiple blocks. -async fn build_collation_for_core( +async fn build_collation_for_core< + Block: BlockT, + P, + RelayClient, + BI, + CIDP, + Proposer, + CS, + CHP, + Client, +>( BuildCollationParams { pov_parent_header, pov_parent_hash, @@ -509,7 +536,11 @@ async fn build_collation_for_core, + included_header_hash, + relay_slot, + para_slot, + para_client, + }: BuildCollationParams<'_, Block, P, RelayClient, BI, CIDP, Proposer, CS, CHP, Client>, ) -> Result, ()> where RelayClient: RelayChainInterface + 'static, @@ -522,6 +553,8 @@ where Proposer: Environment + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, CHP: consensus_common::ValidationCodeHashProvider + Send + Sync + 'static, + Client: ProvideRuntimeApi, + Client::Api: AuraUnincludedSegmentApi + ApiExt, { let core_start = Instant::now(); @@ -553,6 +586,25 @@ where let mut parent_header = pov_parent_header.clone(); for block_index in 0..blocks_per_core { + // Check if we can build the next block + if !crate::collators::can_build_upon::( + parent_hash, + included_header_hash, + relay_slot, + para_slot, + para_client, + ) + .await + { + tracing::debug!( + target: LOG_TARGET, + ?parent_hash, + ?included_header_hash, + "Cannot build next block due to unincluded segment constraints" + ); + break; + } + // TODO: With transaction streaming we do not need to skip anything any more and can just // set `is_last`. @@ -726,6 +778,17 @@ where } } + if blocks.is_empty() { + tracing::debug!( + target: LOG_TARGET, + ?core_index, + relay_parent = ?relay_parent_hash, + "Did not build any blocks, returning" + ); + + return Ok(None); + } + let proof = StorageProof::merge(proofs); tracing::trace!( diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs b/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs index 13173b6ce6303..92420b5c42b7e 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs @@ -101,87 +101,8 @@ fn duration_now() -> Duration { }) } -<<<<<<< HEAD /// Returns the duration until the next block production slot and the timestamp at this slot. fn time_until_next_slot( -||||||| 9972470602d -/// Adjust the authoring duration. -fn adjust_authoring_duration( - mut authoring_duration: Duration, - next_block: (Duration, Slot), - next_slot_change: (Duration, Slot), - different_authors: bool, -) -> Option { - let (duration, next_block_slot) = next_block; - let (duration_until_next_slot, next_slot) = next_slot_change; - - // The authoring of blocks must stop 1 second before the slot ends. - let duration_until_deadline = - duration_until_next_slot.saturating_sub(BLOCK_PRODUCTION_ADJUSTMENT_MS); - tracing::debug!( - target: LOG_TARGET, - ?authoring_duration, - ?duration, - ?next_block_slot, - ?duration_until_next_slot, - ?next_slot, - ?duration_until_deadline, - ?different_authors, - "Adjusting authoring duration for slot.", - ); - - // Ensure no blocks are produced in the last second of the slot, - // regardless of authoring duration. - if duration_until_deadline == Duration::ZERO { - if different_authors { - tracing::warn!( - target: LOG_TARGET, - ?duration_until_next_slot, - ?next_slot, - "Not enough time left in the slot to adjust authoring duration. Skipping block production for the slot." - ); - - return None; - } - - // If authors are the same, we can still attempt producing the block - // considering the next block duration. - return Some(authoring_duration.min(duration)); - } - - // Clamp the authoring duration to fit into the slot deadline only if authors are different. - // For most cases, the deadline is farther in the future than the authoring duration. - if different_authors && authoring_duration >= duration_until_deadline { - authoring_duration = duration_until_deadline; - - // Ensure we are not going below the minimum interval within a reasonable threshold. - // For 12 cores, we might have a scenario where the last 3 blocks are skipped: - // - Block 10: next slot change in 1.493s: - // - After adjusting the deadline: 1.493s - 1s = 0.493s the block could be produced - // without issues. - // - Block 11: next slot change in 0.993s - skipped by the deadline - // - Block 12: next slot change in 0.493s - skipped by the deadline - if authoring_duration < - BLOCK_PRODUCTION_MINIMUM_INTERVAL_MS.saturating_sub(BLOCK_PRODUCTION_THRESHOLD_MS) - { - tracing::debug!( - target: LOG_TARGET, - ?authoring_duration, - ?next_slot, - "Authoring duration is below minimum. Skipping block production for the slot." - ); - return None; - } - } - - // The `duration` intends to slightly adjust when then block production - // attempt happens. This goes slightly below the `BLOCK_PRODUCTION_MINIMUM_INTERVAL_MS` - // threshold. - Some(authoring_duration.min(duration)) -} - -/// Returns the duration until the next block production should be attempted. -fn time_until_next_attempt( now: Duration, block_production_interval: Duration, offset: Duration, From 9dd5c92487a054f687c420cc801f98119125fa35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 24 Feb 2026 22:04:05 +0100 Subject: [PATCH 243/312] Fixes etc --- .../consensus/aura/src/collators/lookahead.rs | 25 +++++++++++-------- .../consensus/aura/src/collators/mod.rs | 10 ++++++-- .../slot_based/block_builder_task.rs | 20 +++++++-------- .../zombienet-sdk-helpers/src/lib.rs | 7 +++--- .../tests/zombie_ci/block_bundling/basic.rs | 10 +++++--- .../full_core_usage_scenarios.rs | 6 +++-- .../zombie_ci/block_bundling/pov_recovery.rs | 6 ++--- .../block_bundling/relay_parent_offset.rs | 10 +++++--- .../block_bundling/runtime_upgrade.rs | 6 +++-- .../block_bundling/three_cores_glutton.rs | 2 +- .../zombie_ci/block_bundling/tracing_block.rs | 10 +++++--- .../elastic_scaling/asset_hub_westend.rs | 4 +-- .../tests/zombie_ci/elastic_scaling/mod.rs | 1 - .../elastic_scaling/upgrade_to_3_cores.rs | 3 +-- .../collators_reputation_persistence.rs | 1 + 15 files changed, 70 insertions(+), 51 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 449a38b2ba99a..255f323e5e931 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -367,20 +367,23 @@ where // This needs to change to support elastic scaling, but for continuously // scheduled chains this ensures that the backlog will grow steadily. for n_built in 0..2u32 { - let Some((slot_now, relay_slot, timestamp)) = - get_parachain_slot::<_, _, P::Public>( - para_client, - parent_hash, - &relay_parent_header, - params.relay_chain_slot_duration, - ) - else { + let Some((slot_now, relay_slot, timestamp)) = get_parachain_slot::<_, _, P::Public>( + para_client, + parent_hash, + &relay_parent_header, + params.relay_chain_slot_duration, + ) else { break; }; - let Some(slot_claim) = - super::claim_slot::<_, _, P>(slot_now, timestamp, parent_hash, para_client, &keystore) - .await + let Some(slot_claim) = super::claim_slot::<_, _, P>( + slot_now, + timestamp, + parent_hash, + para_client, + &keystore, + ) + .await else { break; }; diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 750d7c6196522..b52ccc24302ba 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -431,8 +431,14 @@ mod tests { // Blocks were built with the genesis hash set as included block. // We call `can_build_upon` with the last built block as the included block. - let result = - can_build_upon::<_, _>(last_hash, last_hash, Slot::from(u64::MAX), Slot::from(u64::MAX), &*client).await; + let result = can_build_upon::<_, _>( + last_hash, + last_hash, + Slot::from(u64::MAX), + Slot::from(u64::MAX), + &*client, + ) + .await; assert!(result); } diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 7cb7fba82eca8..6e872f2419803 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -234,6 +234,7 @@ where continue; }; + // Use the slot calculated from relay parent let Some(para_slot) = adjust_para_to_relay_parent_slot( rp_data.relay_parent(), relay_chain_slot_duration, @@ -242,9 +243,6 @@ where continue; }; - // Use the slot calculated from relay parent - let slot_info = para_slot; - let relay_parent = rp_data.relay_parent().hash(); let relay_parent_header = rp_data.relay_parent().clone(); @@ -307,12 +305,12 @@ where let included_header_hash = included_header.hash(); if let Ok(authorities) = para_client.runtime_api().authorities(initial_parent_hash) { - connection_helper.update::

(slot_info.slot, &authorities).await; + connection_helper.update::

(para_slot.slot, &authorities).await; } let Some(slot_claim) = crate::collators::claim_slot::<_, _, P>( - slot_info.slot, - slot_info.timestamp, + para_slot.slot, + para_slot.timestamp, initial_parent_hash, &*para_client, &keystore, @@ -327,7 +325,7 @@ where included_hash = ?included_header_hash, included_num = %included_header.number(), initial_parent = ?initial_parent_hash, - slot = ?slot_info.slot, + slot = ?para_slot.slot, "Not eligible to claim slot." ); continue; @@ -342,7 +340,7 @@ where included_hash = ?included_header_hash, included_num = %included_header.number(), initial_parent = ?initial_parent_hash, - slot = ?slot_info.slot, + slot = ?para_slot.slot, "Claiming slot." ); @@ -447,7 +445,7 @@ where total_number_of_blocks: number_of_blocks, included_header_hash, relay_slot, - para_slot: slot_info.slot, + para_slot: para_slot.slot, para_client: &*para_client, }) .await @@ -564,7 +562,9 @@ where CS: CollatorServiceInterface + Send + Sync + 'static, CHP: consensus_common::ValidationCodeHashProvider + Send + Sync + 'static, Client: ProvideRuntimeApi, - Client::Api: AuraUnincludedSegmentApi + ApiExt + cumulus_primitives_core::KeyToIncludeInRelayProof, + Client::Api: AuraUnincludedSegmentApi + + ApiExt + + cumulus_primitives_core::KeyToIncludeInRelayProof, { let core_start = Instant::now(); diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index c3bf193d052fe..85aa0833e0751 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -132,11 +132,12 @@ pub async fn assert_para_throughput( ); for (para_id, expected_candidate_range) in expected_candidate_ranges { - let receipts = candidate_count + let actual = candidate_count .get(¶_id) - .ok_or_else(|| anyhow!("ParaId {} did not have any backed candidates", para_id))?; + .ok_or_else(|| anyhow!("ParaId {} did not have any backed candidates", para_id))? + .len() as u32; - if !expected_candidate_range.contains(&(receipts.len() as u32)) { + if !expected_candidate_range.contains(&actual) { return Err(anyhow!( "Candidate count {actual} not within range {expected_candidate_range:?}" )); diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs index 9100ef127bb86..76e3cc6581f26 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs @@ -156,10 +156,12 @@ async fn build_network_config() -> Result { } } })) - // Have to set a `with_node` outside of the loop below, so that `r` has the right - // type. - .with_node(|node| node.with_name("validator-0")); - (1..9).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) + // Have to set a `with_validator` outside of the loop below, so that `r` has the + // right type. + .with_validator(|node| node.with_name("validator-0")); + (1..9).fold(r, |acc, i| { + acc.with_validator(|node| node.with_name(&format!("validator-{i}"))) + }) }) .with_parachain(|p| { p.with_id(PARA_ID) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs index d395dd553748b..6eccfbda631da 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs @@ -223,8 +223,10 @@ async fn build_network_config() -> Result { } } })) - .with_node(|node| node.with_name("validator-0")); - (1..9).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) + .with_validator(|node| node.with_name("validator-0")); + (1..9).fold(r, |acc, i| { + acc.with_validator(|node| node.with_name(&format!("validator-{i}"))) + }) }) .with_parachain(|p| { p.with_id(PARA_ID) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs index 50490b735b268..369a824c2c4fb 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs @@ -135,12 +135,12 @@ async fn build_network_config() -> Result { } } })) - // Have to set a `with_node` outside of the loop below, so that `r` has the right + // Have to set a `with_validator` outside of the loop below, so that `r` has the right // type. - .with_node(|node| node.with_name("alice").with_args(vec![])); + .with_validator(|node| node.with_name("alice").with_args(vec![])); (0..4).fold(r, |acc, i| { - acc.with_node(|node| { + acc.with_validator(|node| { node.with_name(&format!("validator-{i}")).with_args(vec![ ("-lruntime=debug,parachain=trace").into(), ("--reserved-only").into(), diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/relay_parent_offset.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/relay_parent_offset.rs index 4013bbf6b05c2..073fda9389e4f 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/relay_parent_offset.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/relay_parent_offset.rs @@ -41,11 +41,13 @@ async fn block_bundling_relay_parent_offset() -> Result<(), anyhow::Error> { } } })) - // Have to set a `with_node` outside of the loop below, so that `r` has the right - // type. - .with_node(|node| node.with_name("validator-0")); + // Have to set a `with_validator` outside of the loop below, so that `r` has the + // right type. + .with_validator(|node| node.with_name("validator-0")); - (1..6).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) + (1..6).fold(r, |acc, i| { + acc.with_validator(|node| node.with_name(&format!("validator-{i}"))) + }) }) .with_parachain(|p| { p.with_id(2400) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs index 841846c57fb2e..1756787777a4d 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs @@ -169,8 +169,10 @@ async fn build_network_config() -> Result { } } })) - .with_node(|node| node.with_name("validator-0")); - (1..9).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) + .with_validator(|node| node.with_name("validator-0")); + (1..9).fold(r, |acc, i| { + acc.with_validator(|node| node.with_name(&format!("validator-{i}"))) + }) }) .with_parachain(|p| { p.with_id(PARA_ID) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs index fef0e87a02507..c569a95f5fd02 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs @@ -90,7 +90,7 @@ async fn build_network_config() -> Result { } } })) - .with_node(|node| node.with_name("validator-0")); + .with_validator(|node| node.with_name("validator-0")); (1..9).fold(r, |acc, i| { acc.with_validator(|node| node.with_name(&format!("validator-{i}"))) }) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs index 81cbbd049a5ed..baf1e719d1a57 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs @@ -122,10 +122,12 @@ async fn build_network_config() -> Result { } } })) - // Have to set a `with_node` outside of the loop below, so that `r` has the right - // type. - .with_node(|node| node.with_name("validator-0")); - (1..9).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) + // Have to set a `with_validator` outside of the loop below, so that `r` has the + // right type. + .with_validator(|node| node.with_name("validator-0")); + (1..9).fold(r, |acc, i| { + acc.with_validator(|node| node.with_name(&format!("validator-{i}"))) + }) }) .with_parachain(|p| { p.with_id(PARA_ID) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/asset_hub_westend.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/asset_hub_westend.rs index ea18fb310b322..f05f604941f3d 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/asset_hub_westend.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/asset_hub_westend.rs @@ -86,14 +86,14 @@ async fn elastic_scaling_asset_hub_westend() -> Result<(), anyhow::Error> { assign_cores(&relay_client, PARA_ID, vec![0]).await?; - assert_para_throughput(&relay_client, 10, [(ParaId::from(PARA_ID), 3..18)]).await?; + assert_para_throughput(&relay_client, 10, [(ParaId::from(PARA_ID), 3..18)], []).await?; // 1 core is assigned by default, we are assigning 2 more cores: 0 and 1. assign_cores(&relay_client, PARA_ID, vec![1]).await?; log::info!("Ensure elastic scaling works, 3 blocks should be produced in each 6s slot"); - assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 50..61)]).await?; + assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 50..61)], []).await?; log::info!("Test finished successfully."); diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/mod.rs index 48f5506b3ef05..6bc71464fdcbc 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/mod.rs @@ -16,7 +16,6 @@ // limitations under the License. mod asset_hub_westend; -mod multiple_blocks_per_slot; mod pov_recovery; mod slot_based_authoring; mod slot_based_rp_offset; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs index 0302afe425892..865df2914fd68 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs @@ -1,6 +1,7 @@ // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 +use crate::utils::initialize_network; use anyhow::anyhow; use cumulus_test_runtime::{ elastic_scaling::WASM_BINARY_BLOATY as WASM_ELASTIC_SCALING, @@ -32,8 +33,6 @@ const PARA_ID: u32 = 2000; async fn elastic_scaling_upgrade_to_3_cores( #[case] async_backing: bool, ) -> Result<(), anyhow::Error> { - use crate::utils::initialize_network; - let _ = env_logger::try_init_from_env( env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), ); diff --git a/polkadot/zombienet-sdk-tests/tests/functional/collators_reputation_persistence.rs b/polkadot/zombienet-sdk-tests/tests/functional/collators_reputation_persistence.rs index 6c15fd385389e..43e1504bad0a3 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/collators_reputation_persistence.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/collators_reputation_persistence.rs @@ -187,6 +187,7 @@ async fn comprehensive_reputation_persistence_test() -> Result<(), anyhow::Error &relay_client, 5, [(ParaId::from(PARA_ID_1), 3..7), (ParaId::from(PARA_ID_2), 3..7)], + [], ) .await?; From 7b2363c086ab3e0ee0675822b394f924d8868a6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 25 Feb 2026 12:17:46 +0100 Subject: [PATCH 244/312] Fix the `pov_recovery` tests --- .../aura/src/collators/slot_based/block_builder_task.rs | 2 +- .../tests/zombie_ci/block_bundling/pov_recovery.rs | 3 +-- .../tests/zombie_ci/elastic_scaling/pov_recovery.rs | 4 ++-- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 6e872f2419803..6070546af5951 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -397,7 +397,7 @@ where .map(|_| { // We distribute the left over blocks across the cores. raw_blocks_per_core + - if left_over_blocks > 1 { + if left_over_blocks > 0 { left_over_blocks -= 1; 1 } else { diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs index 369a824c2c4fb..3a3009a5179d3 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs @@ -163,9 +163,8 @@ async fn build_network_config() -> Result { .with_limit_cpu(2) .with_limit_memory("4G") }) - .with_collator(|n| + .with_fullnode(|n| n.with_name("recovery-target") - .validator(false) .with_args(vec![ ("-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug").into(), ("--disable-block-announcements").into(), diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs index 53238e110c540..de562bc7928cf 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs @@ -165,8 +165,8 @@ async fn build_network_config() -> Result { .with_limit_cpu(2) .with_limit_memory("4G") }) - .with_collator(|n| { - n.with_name("recovery-target").validator(false).with_args(vec![ + .with_fullnode(|n| { + n.with_name("recovery-target").with_args(vec![ ("-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug").into(), ("--disable-block-announcements").into(), ("--in-peers", "0").into(), From 583ed3573800d0242c4b6dae110333e8990a4ccb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 26 Feb 2026 22:12:31 +0100 Subject: [PATCH 245/312] Adds the block bundling zombienet tests --- .../zombienet_cumulus_tests.yml | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/.github/zombienet-tests/zombienet_cumulus_tests.yml b/.github/zombienet-tests/zombienet_cumulus_tests.yml index a991d4930450d..2cbf7636f0260 100644 --- a/.github/zombienet-tests/zombienet_cumulus_tests.yml +++ b/.github/zombienet-tests/zombienet_cumulus_tests.yml @@ -87,3 +87,46 @@ cumulus-image: "test-parachain" use-zombienet-sdk: true needs-wasm-binary: true + +- job-name: "zombienet-cumulus-0016-block_bundling_basic" + test-filter: "zombie_ci::block_bundling::basic::block_bundling_basic" + runner-type: "default" + cumulus-image: "test-parachain" + use-zombienet-sdk: true + +- job-name: "zombienet-cumulus-0017-block_bundling_pov_recovery" + test-filter: "zombie_ci::block_bundling::pov_recovery::block_bundling_pov_recovery" + runner-type: "default" + cumulus-image: "test-parachain" + use-zombienet-sdk: true + +- job-name: "zombienet-cumulus-0018-block_bundling_full_core_usage_scenarios" + test-filter: "zombie_ci::block_bundling::full_core_usage_scenarios::block_bundling_full_core_usage_scenarios" + runner-type: "default" + cumulus-image: "test-parachain" + use-zombienet-sdk: true + +- job-name: "zombienet-cumulus-0019-block_bundling_tracing_block" + test-filter: "zombie_ci::block_bundling::tracing_block::block_bundling_tracing_block" + runner-type: "default" + cumulus-image: "test-parachain" + use-zombienet-sdk: true + +- job-name: "zombienet-cumulus-0020-block_bundling_relay_parent_offset" + test-filter: "zombie_ci::block_bundling::relay_parent_offset::block_bundling_relay_parent_offset" + runner-type: "default" + cumulus-image: "test-parachain" + use-zombienet-sdk: true + +- job-name: "zombienet-cumulus-0021-block_bundling_runtime_upgrade" + test-filter: "zombie_ci::block_bundling::runtime_upgrade::block_bundling_runtime_upgrade" + runner-type: "default" + cumulus-image: "test-parachain" + use-zombienet-sdk: true + needs-wasm-binary: true + +- job-name: "zombienet-cumulus-0022-block_bundling_three_cores_glutton" + test-filter: "zombie_ci::block_bundling::three_cores_glutton::block_bundling_three_cores_glutton" + runner-type: "default" + cumulus-image: "test-parachain" + use-zombienet-sdk: true \ No newline at end of file From 8e173280404496be1ae63e7bf4a28afc5ea7962f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 9 Mar 2026 22:41:32 +0100 Subject: [PATCH 246/312] Comments --- cumulus/client/collator/src/service.rs | 2 +- .../aura/src/collators/slot_based/block_builder_task.rs | 2 +- .../aura/src/collators/slot_based/block_import.rs | 7 +++++-- .../aura/src/collators/slot_based/collation_task.rs | 7 ++++++- .../consensus/aura/src/collators/slot_based/slot_timer.rs | 6 +++--- .../tests/zombie_ci/elastic_scaling/pov_recovery.rs | 1 - polkadot/node/subsystem-util/src/runtime/mod.rs | 6 +++--- substrate/client/tracing/src/block/mod.rs | 2 +- substrate/frame/support/src/traits/hooks.rs | 8 ++++---- 9 files changed, 24 insertions(+), 17 deletions(-) diff --git a/cumulus/client/collator/src/service.rs b/cumulus/client/collator/src/service.rs index 156aea58a67c1..2c004089d66c3 100644 --- a/cumulus/client/collator/src/service.rs +++ b/cumulus/client/collator/src/service.rs @@ -285,7 +285,7 @@ where found_separator = true; None } else if found_separator { - if upward_message_signals.iter().all(|s| *s != m) { + if !upward_message_signals.contains(&m) { upward_message_signals.push(m); } None diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 3feeb17353514..8d156aa624527 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -419,7 +419,7 @@ where let mut pov_parent_header = initial_parent_header; let mut pov_parent_hash = initial_parent_hash; - let block_time = Duration::from_secs(6) / number_of_blocks; + let block_time = relay_chain_slot_duration / number_of_blocks; for blocks_per_core in blocks_per_cores { let time_for_core = slot_time.time_left() / cores.cores_left(); diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs index a045768c4d5e1..bffb4dcc993e4 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -62,7 +62,7 @@ fn load_ignored_nodes( match backend.get_aux(&ignored_nodes_key(block_hash))? { None => Ok(None), Some(t) => ProofRecorderIgnoredNodes::::decode(&mut &t[..]).map(Some).map_err(|e| { - ClientError::Backend(format!("Nodes to ignore DB is corrupted. Decode error: {}", e)) + ClientError::Backend(format!("Ignored nodes DB: decode error: {}", e)) }), } } @@ -201,7 +201,7 @@ impl SlotBasedBlockImport)?; let storage_proof = @@ -218,6 +218,9 @@ impl SlotBasedBlockImport::from_storage_proof::< HashingFor, >(&storage_proof)); diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs index 2d4d055df07ae..772e33000662b 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs @@ -162,7 +162,12 @@ async fn handle_collation_message>(), + "Submitting collation for core.", + ); overseer_handle .send_msg( diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs b/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs index 465524c2f223e..b0ff80e6c56b7 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs @@ -28,12 +28,12 @@ pub(crate) struct SlotInfo { pub slot: Slot, } -/// Information about a slot timing, including the slot duration and exact start timestamp. +/// Information about a slot timing, including the relay chain slot duration and exact start timestamp. #[derive(Debug, Clone)] pub(crate) struct SlotTime { - /// The slot duration used for this timing + /// The relay chain slot duration used for this timing slot_duration: Duration, - /// The exact timestamp when this slot started + /// The exact timestamp when this relay chain slot started slot_start_timestamp: Timestamp, /// Time offset to apply when calculating time remaining time_offset: Duration, diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs index de562bc7928cf..220ba829912ae 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs @@ -108,7 +108,6 @@ async fn build_network_config() -> Result { // - full node // - collator // - collator which is the only one producing blocks - NetworkConfigBuilder::new() .with_relaychain(|r| { let r = r diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 8de7eeebf840f..3508251dde0cd 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -537,14 +537,14 @@ impl ClaimQueueSnapshot { self.0.iter() } - /// Find cores for the given `para_id` at the given `claim_queue_offset`. + /// Find the earliest cores for the given `para_id` starting from the given `claim_queue_offset`. /// /// It is not guaranteed that at the given `claim_queue_offset` cores are available for /// the `para_id`. Thus, the claim queue offset for the core indices is returned as well. pub fn find_cores( &self, para_id: ParaId, - claim_queue_offset: u32, + claim_queue_offset: u8, ) -> Option<(Vec, ClaimQueueOffset)> { let mut offset_to_cores = BTreeMap::>::new(); @@ -558,7 +558,7 @@ impl ClaimQueueSnapshot { }); offset_to_cores.into_iter().find_map(|(offset, cores)| { - if (offset as u32) >= claim_queue_offset { + if offset >= claim_queue_offset as usize { Some((cores, ClaimQueueOffset(offset as u8))) } else { None diff --git a/substrate/client/tracing/src/block/mod.rs b/substrate/client/tracing/src/block/mod.rs index 64062fd9b399c..ea6e01b87e6d2 100644 --- a/substrate/client/tracing/src/block/mod.rs +++ b/substrate/client/tracing/src/block/mod.rs @@ -53,7 +53,7 @@ const TRACE_TARGET: &str = "block_trace"; const REQUIRED_EVENT_FIELD: &str = "method"; /// Something that can execute a block in a tracing context. -pub trait TracingExecuteBlock: Send + Sync + Send + Sync { +pub trait TracingExecuteBlock: Send + Sync { /// Execute the given `block`. /// /// The `block` is prepared to be executed right away, this means that any `Seal` was already diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index b4fc657f37d2e..d74aba5c6c95e 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -128,13 +128,13 @@ impl_for_tuples_attr! { fn on_idle(n: BlockNumber, remaining_weight: Weight) -> Weight { let on_idle_functions: &[fn(BlockNumber, Weight) -> Weight] = &[for_tuples!( #( Tuple::on_idle ),* )]; + if on_idle_functions.is_empty() { + return Weight::zero(); + } + let mut weight = Weight::zero(); let len = on_idle_functions.len(); - if len == 0 { - return Weight::zero() - } - let start_index = n % (len as u32).into(); let start_index = start_index.try_into().ok().expect( "`start_index % len` always fits into `usize`, because `len` can be in maximum `usize::MAX`; qed" From 1c8302e0abe200be86324b56889aebd1f47bc75f Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 16 Mar 2026 13:22:00 +0100 Subject: [PATCH 247/312] Reintroduce slot-based handle --- .../src/collators/slot_based/block_import.rs | 53 +++++++++++++++++-- .../collators/slot_based/collation_task.rs | 10 +++- .../aura/src/collators/slot_based/mod.rs | 10 ++-- .../polkadot-omni-node/lib/src/nodes/aura.rs | 12 +++-- cumulus/test/service/src/lib.rs | 9 +++- 5 files changed, 78 insertions(+), 16 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs index bffb4dcc993e4..94491cb7088b3 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -19,15 +19,17 @@ use crate::LOG_TARGET; use codec::{Codec, Decode, Encode}; use cumulus_client_proof_size_recording::prepare_proof_size_recording_transaction; use cumulus_primitives_core::{BlockBundleInfo, CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; +use futures::{stream::FusedStream, StreamExt}; use sc_client_api::{ backend::AuxStore, client::{AuxDataOperations, FinalityNotification, PreCommitActions}, HeaderBackend, }; use sc_consensus::{BlockImport, StateAction}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_api::{ ApiExt, CallApiAt, CallContext, Core, ProofRecorder, ProofRecorderIgnoredNodes, - ProvideRuntimeApi, + ProvideRuntimeApi, StorageProof, }; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sp_consensus::BlockOrigin; @@ -67,6 +69,29 @@ fn load_ignored_nodes( } } +/// Handle for receiving the block and the storage proof from the [`SlotBasedBlockImport`]. +/// +/// This handle should be passed to [`Params`](super::Params) or can also be dropped if the node is +/// not running as collator. +pub struct SlotBasedBlockImportHandle { + receiver: TracingUnboundedReceiver<(Block, StorageProof)>, +} + +impl SlotBasedBlockImportHandle { + /// Returns the next item. + /// + /// The future will never return when the internal channel is closed. + pub async fn next(&mut self) -> (Block, StorageProof) { + loop { + if self.receiver.is_terminated() { + futures::pending!() + } else if let Some(res) = self.receiver.next().await { + return res + } + } + } +} + /// Register the clean up method for cleaning ignored nodes from blocks on which no further blocks /// will be imported. fn register_ignored_nodes_cleanup(client: Arc) @@ -94,18 +119,31 @@ where pub struct SlotBasedBlockImport { inner: BI, client: Arc, - _phantom: PhantomData<(AuthorityId, Block)>, + sender: TracingUnboundedSender<(Block, StorageProof)>, + _phantom: PhantomData, } impl SlotBasedBlockImport { /// Create a new instance. - pub fn new(inner: BI, client: Arc) -> Self + /// + /// The returned [`SlotBasedBlockImportHandle`] needs to be passed to the + /// [`Params`](super::Params), so that this block import instance can communicate with the + /// collation task. If the node is not running as a collator, just dropping the handle is fine. + pub fn new( + inner: BI, + client: Arc, + ) -> (Self, SlotBasedBlockImportHandle) where Client: PreCommitActions, { + let (sender, receiver) = tracing_unbounded("SlotBasedBlockImportChannel", 1000); + register_ignored_nodes_cleanup(client.clone()); - Self { client, inner, _phantom: PhantomData } + ( + Self { sender, client, inner, _phantom: PhantomData }, + SlotBasedBlockImportHandle { receiver }, + ) } /// Get the [`ProofRecorderIgnoredNodes`] for `parent`. @@ -261,7 +299,12 @@ impl Clone for SlotBasedBlockImport { fn clone(&self) -> Self { - Self { inner: self.inner.clone(), client: self.client.clone(), _phantom: PhantomData } + Self { + inner: self.inner.clone(), + client: self.client.clone(), + sender: self.sender.clone(), + _phantom: PhantomData, + } } } diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs index 772e33000662b..b5682bf9c5760 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs @@ -51,6 +51,8 @@ pub struct Params { pub collator_service: CS, /// Receiver channel for communication with the block builder task. pub collator_receiver: TracingUnboundedReceiver>, + /// The handle from the special slot based block import. + pub block_import_handle: super::SlotBasedBlockImportHandle, /// When set, the collator will export every produced `POV` to this folder. pub export_pov: Option, } @@ -69,6 +71,7 @@ pub async fn run_collation_task( reinitialize, collator_service, mut collator_receiver, + mut block_import_handle, export_pov, }: Params, ) where @@ -98,6 +101,11 @@ pub async fn run_collation_task( handle_collation_message(message, &collator_service, &mut overseer_handle,relay_client.clone(),export_pov.clone()).await; }, + block_import_msg = block_import_handle.next().fuse() => { + // TODO: Implement me. + // Issue: https://github.com/paritytech/polkadot-sdk/issues/6495 + let _ = block_import_msg; + } } } } @@ -165,7 +173,7 @@ async fn handle_collation_message>(), + block_numbers = ?block_data.blocks().iter().map(|b| *b.header().number()).collect::>(), "Submitting collation for core.", ); diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index 5a16f8d44435d..d7fd0c7d72129 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -67,7 +67,7 @@ //! 2. Submission to the collation-generation subsystem use self::{block_builder_task::run_block_builder, collation_task::run_collation_task}; -pub use block_import::SlotBasedBlockImport; +pub use block_import::{SlotBasedBlockImport, SlotBasedBlockImportHandle}; use codec::Codec; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; @@ -107,7 +107,7 @@ mod slot_timer; mod tests; /// Parameters for [`run`]. -pub struct Params { +pub struct Params { /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. /// the timestamp, slot, and paras inherents should be omitted, as they are set by this /// collator. @@ -139,6 +139,8 @@ pub struct Params, /// Spawner for spawning futures. pub spawner: Spawner, /// Slot duration of the relay chain @@ -152,7 +154,7 @@ pub struct Params( - params: Params, + params: Params, ) where Block: BlockT, Client: ProvideRuntimeApi @@ -199,6 +201,7 @@ pub fn run( params_with_export: SlotBasedParams< + Block, ParachainBlockImport< Block, SlotBasedBlockImport< @@ -576,7 +577,7 @@ impl, RuntimeApi, AuraId> ParachainClient, ::Public, >, - (), + SlotBasedBlockImportHandle, > for StartSlotBasedAuraConsensus where RuntimeApi: ConstructNodeRuntimeApi>, @@ -609,7 +610,7 @@ where announce_block: Arc>) + Send + Sync>, backend: Arc>, node_extra_args: NodeExtraArgs, - _: (), + block_import_handle: SlotBasedBlockImportHandle, ) -> Result<(), Error> { let proposer = sc_basic_authorship::ProposerFactory::new( task_manager.spawn_handle(), @@ -665,6 +666,7 @@ where collator_service, reinitialize: false, slot_offset: Duration::from_secs(1), + block_import_handle, spawner: task_manager.spawn_essential_handle(), export_pov: node_extra_args.export_pov, max_pov_percentage: node_extra_args.max_pov_percentage, @@ -691,12 +693,12 @@ where ParachainClient, ::Public, >; - type BlockImportAuxiliaryData = (); + type BlockImportAuxiliaryData = SlotBasedBlockImportHandle; fn init_block_import( client: Arc>, ) -> sc_service::error::Result<(Self::BlockImport, Self::BlockImportAuxiliaryData)> { - Ok((SlotBasedBlockImport::new(client.clone(), client), ())) + Ok(SlotBasedBlockImport::new(client.clone(), client)) } } diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 3afe9bb35d1ca..f779c19c038af 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -27,7 +27,10 @@ use cumulus_client_collator::service::CollatorService; use cumulus_client_consensus_aura::{ collators::{ lookahead::{self as aura, Params as AuraParams}, - slot_based::{self as slot_based, Params as SlotBasedParams, SlotBasedBlockImport}, + slot_based::{ + self as slot_based, Params as SlotBasedParams, SlotBasedBlockImport, + SlotBasedBlockImportHandle, + }, }, ImportQueueParams, }; @@ -200,7 +203,8 @@ pub fn new_partial( )?; let client = Arc::new(client); - let block_import = SlotBasedBlockImport::new(client.clone(), client.clone()); + let (block_import, block_import_handle) = + SlotBasedBlockImport::new(client.clone(), client.clone()); let block_import = ParachainBlockImport::new(block_import, backend.clone()); let transaction_pool = Arc::from( @@ -470,6 +474,7 @@ where collator_service, reinitialize: false, slot_offset: Duration::from_secs(1), + block_import_handle, spawner: task_manager.spawn_essential_handle(), export_pov: None, max_pov_percentage: None, From bc6564d94e6fcc39266cbe20752a24d1427a54aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 24 Mar 2026 21:16:29 +0100 Subject: [PATCH 248/312] Improvements --- Cargo.lock | 2 +- cumulus/client/collator/src/service.rs | 41 ++++---- .../src/collators/slot_based/block_import.rs | 88 ++++++++++-------- .../src/collators/slot_based/slot_timer.rs | 35 ++++--- .../client/proof-size-recording/Cargo.toml | 1 + .../client/proof-size-recording/src/lib.rs | 6 +- cumulus/client/service/src/lib.rs | 23 ++--- .../src/validate_block/implementation.rs | 93 ++++++++++--------- .../polkadot-omni-node/lib/src/nodes/aura.rs | 14 +-- cumulus/test/service/src/lib.rs | 9 +- substrate/primitives/block-builder/Cargo.toml | 2 - .../trie/src/proof_size_extension.rs | 6 ++ substrate/primitives/trie/src/recorder.rs | 9 -- 13 files changed, 163 insertions(+), 166 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fa97b27966388..e8ee9384719e4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4504,6 +4504,7 @@ dependencies = [ "sc-client-api 28.0.0", "sp-blockchain 28.0.0", "sp-runtime 31.0.1", + "sp-trie 29.0.0", ] [[package]] @@ -23366,7 +23367,6 @@ name = "sp-block-builder" version = "26.0.0" dependencies = [ "parity-scale-codec", - "scale-info", "sp-api 26.0.0", "sp-inherents 26.0.0", "sp-runtime 31.0.1", diff --git a/cumulus/client/collator/src/service.rs b/cumulus/client/collator/src/service.rs index 2c004089d66c3..1920822ac469c 100644 --- a/cumulus/client/collator/src/service.rs +++ b/cumulus/client/collator/src/service.rs @@ -36,7 +36,7 @@ use polkadot_node_primitives::{ use codec::Encode; use futures::channel::oneshot; use parking_lot::Mutex; -use std::sync::Arc; +use std::{collections::HashSet, sync::Arc}; /// The logging target. const LOG_TARGET: &str = "cumulus-collator"; @@ -114,6 +114,11 @@ where RA: ProvideRuntimeApi, RA::Api: CollectCollationInfo, { + fn split_at_separator(messages: Vec>) -> (Vec>, Vec>) { + let mut parts = messages.splitn(2, |m: &Vec| m.is_empty()); + (parts.next().unwrap_or(&[]).to_vec(), parts.next().unwrap_or(&[]).to_vec()) + } + /// Create a new instance. pub fn new( block_status: Arc, @@ -243,7 +248,7 @@ where let mut api_version = 0; let mut upward_messages = Vec::new(); - let mut upward_message_signals = Vec::>::with_capacity(4); + let mut upward_message_signals = HashSet::>::with_capacity(4); let mut horizontal_messages = Vec::new(); let mut new_validation_code = None; let mut processed_downward_messages = 0; @@ -264,8 +269,6 @@ where .ok() .flatten()?; - // Workaround for: https://github.com/paritytech/polkadot-sdk/issues/64 - // // We are always using the `api_version` of the parent block. The `api_version` can only // change with a runtime upgrade and this is when we want to observe the old // `api_version`. Because this old `api_version` is the one used to validate this @@ -278,24 +281,20 @@ where .ok() .flatten()?; - let mut found_separator = false; - upward_messages.extend(collation_info.upward_messages.into_iter().filter_map(|m| { - // Filter out the `UMP_SEPARATOR` and the `UMPSignals`. - if m == UMP_SEPARATOR { - found_separator = true; - None - } else if found_separator { - if !upward_message_signals.contains(&m) { - upward_message_signals.push(m); - } - None - } else { - // No signal or separator - Some(m) - } - })); + let (messages, signals) = Self::split_at_separator(collation_info.upward_messages); + + upward_messages.extend(messages); + upward_message_signals.extend(signals.into_iter()); horizontal_messages.extend(collation_info.horizontal_messages); - new_validation_code = new_validation_code.take().or(collation_info.new_validation_code); + if let Some(new_code) = collation_info.new_validation_code { + if new_validation_code.replace(new_code).is_some() { + tracing::warn!( + target: LOG_TARGET, + block = ?block.hash(), + "Overwriting validation code from an earlier block in the bundle.", + ); + } + } processed_downward_messages += collation_info.processed_downward_messages; hrmp_watermark = Some(collation_info.hrmp_watermark); head_data = Some(collation_info.head_data); diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs index 94491cb7088b3..641a419df3594 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -16,7 +16,7 @@ // along with Cumulus. If not, see . use crate::LOG_TARGET; -use codec::{Codec, Decode, Encode}; +use codec::{Decode, Encode}; use cumulus_client_proof_size_recording::prepare_proof_size_recording_transaction; use cumulus_primitives_core::{BlockBundleInfo, CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; use futures::{stream::FusedStream, StreamExt}; @@ -33,10 +33,9 @@ use sp_api::{ }; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sp_consensus::BlockOrigin; -use sp_consensus_aura::AuraApi; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as _}; use sp_trie::proof_size_extension::{ProofSizeExt, RecordingProofSizeProvider}; -use std::{marker::PhantomData, sync::Arc}; +use std::sync::Arc; /// The aux storage key used to store the ignored nodes for the given block hash. fn ignored_nodes_key(block_hash: H) -> Vec { @@ -63,9 +62,9 @@ fn load_ignored_nodes( ) -> ClientResult>> { match backend.get_aux(&ignored_nodes_key(block_hash))? { None => Ok(None), - Some(t) => ProofRecorderIgnoredNodes::::decode(&mut &t[..]).map(Some).map_err(|e| { - ClientError::Backend(format!("Ignored nodes DB: decode error: {}", e)) - }), + Some(t) => ProofRecorderIgnoredNodes::::decode(&mut &t[..]) + .map(Some) + .map_err(|e| ClientError::Backend(format!("Failed to decode ignored nodes: {}", e))), } } @@ -86,7 +85,7 @@ impl SlotBasedBlockImportHandle { if self.receiver.is_terminated() { futures::pending!() } else if let Some(res) = self.receiver.next().await { - return res + return res; } } } @@ -106,9 +105,25 @@ where // Delete the ignored nodes for all stale blocks. .map(|b| (ignored_nodes_key(b.hash), None)) // We can not delete the ignored nodes for the finalized block, because blocks can still - // be imported on top of this block. As blocks are only finalized as bundles on the - // relay chain, we should never need them, but better safe than sorry :) - .chain(std::iter::once((ignored_nodes_key(*notification.header.parent_hash()), None))) + // be imported on top of this block. However, once multiple blocks are finalized at + // once, blocks on the route to the finalized parent can no longer become parents + // either. + .chain( + notification + .tree_route + .iter() + .copied() + .map(|hash| (ignored_nodes_key(hash), None)), + ) + // Include the old last finalized block as well. + .chain( + notification + .tree_route + .first() + .copied() + .into_iter() + .map(|hash| (ignored_nodes_key(hash), None)), + ) .collect() }; @@ -116,23 +131,19 @@ where } /// Special block import for the slot based collator. -pub struct SlotBasedBlockImport { +pub struct SlotBasedBlockImport { inner: BI, client: Arc, sender: TracingUnboundedSender<(Block, StorageProof)>, - _phantom: PhantomData, } -impl SlotBasedBlockImport { +impl SlotBasedBlockImport { /// Create a new instance. /// /// The returned [`SlotBasedBlockImportHandle`] needs to be passed to the /// [`Params`](super::Params), so that this block import instance can communicate with the /// collation task. If the node is not running as a collator, just dropping the handle is fine. - pub fn new( - inner: BI, - client: Arc, - ) -> (Self, SlotBasedBlockImportHandle) + pub fn new(inner: BI, client: Arc) -> (Self, SlotBasedBlockImportHandle) where Client: PreCommitActions, { @@ -140,10 +151,7 @@ impl SlotBasedBlockImport SlotBasedBlockImport, @@ -207,17 +219,22 @@ impl SlotBasedBlockImport + AuraApi, - AuthorityId: Codec + Send + Sync + std::fmt::Debug, + Client::Api: Core, { let core_info = CumulusDigestItem::find_core_info(params.header.digest()); let bundle_info = CumulusDigestItem::find_block_bundle_info(params.header.digest()); let relay_block_identifier = CumulusDigestItem::find_relay_block_identifier(params.header.digest()); - let (Some(core_info), Some(bundle_info), Some(relay_block_identifier)) = - (core_info, bundle_info, relay_block_identifier) - else { + let Some(core_info) = core_info else { + return Err(sp_consensus::Error::ClientImport("Missing `CoreInfo` digest".into())); + }; + let Some(relay_block_identifier) = relay_block_identifier else { + return Err(sp_consensus::Error::ClientImport( + "Missing `RelayBlockIdentifier` digest".into(), + )); + }; + let Some(bundle_info) = bundle_info else { return Ok(()); }; @@ -232,6 +249,8 @@ impl SlotBasedBlockImport SlotBasedBlockImport Clone - for SlotBasedBlockImport -{ +impl Clone for SlotBasedBlockImport { fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - client: self.client.clone(), - sender: self.sender.clone(), - _phantom: PhantomData, - } + Self { inner: self.inner.clone(), client: self.client.clone(), sender: self.sender.clone() } } } #[async_trait::async_trait] -impl BlockImport - for SlotBasedBlockImport +impl BlockImport for SlotBasedBlockImport where Block: BlockT, BI: BlockImport + Send + Sync, @@ -318,8 +329,7 @@ where Client: ProvideRuntimeApi + CallApiAt + AuxStore + HeaderBackend + Send + Sync, Client::StateBackend: Send, - Client::Api: Core + AuraApi, - AuthorityId: Codec + Send + Sync + std::fmt::Debug, + Client::Api: Core, { type Error = sp_consensus::Error; diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs b/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs index b0ff80e6c56b7..ac039786f796e 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs @@ -28,11 +28,12 @@ pub(crate) struct SlotInfo { pub slot: Slot, } -/// Information about a slot timing, including the relay chain slot duration and exact start timestamp. +/// Information about a slot timing, including the relay chain slot duration and exact start +/// timestamp. #[derive(Debug, Clone)] pub(crate) struct SlotTime { /// The relay chain slot duration used for this timing - slot_duration: Duration, + relay_slot_duration: Duration, /// The exact timestamp when this relay chain slot started slot_start_timestamp: Timestamp, /// Time offset to apply when calculating time remaining @@ -42,11 +43,11 @@ pub(crate) struct SlotTime { impl SlotTime { /// Create a new SlotTime pub fn new( - slot_duration: Duration, + relay_slot_duration: Duration, slot_start_timestamp: Timestamp, time_offset: Duration, ) -> Self { - Self { slot_duration, slot_start_timestamp, time_offset } + Self { relay_slot_duration, slot_start_timestamp, time_offset } } /// Get the time remaining in this slot @@ -58,7 +59,7 @@ impl SlotTime { fn time_left_internal(&self, now: Duration) -> Duration { let now = now.saturating_sub(self.time_offset); let slot_end_time_millis = - self.slot_start_timestamp.as_millis() + self.slot_duration.as_millis() as u64; + self.slot_start_timestamp.as_millis() + self.relay_slot_duration.as_millis() as u64; let slot_end_time = Duration::from_millis(slot_end_time_millis); slot_end_time.saturating_sub(now) @@ -68,7 +69,7 @@ impl SlotTime { pub fn is_parachain_slot_ending(&self, parachain_slot_duration: Duration) -> bool { let now = duration_now().saturating_sub(self.time_offset); let next_relay_slot_start_time = - self.slot_start_timestamp.as_duration() + self.slot_duration; + self.slot_start_timestamp.as_duration() + self.relay_slot_duration; // Calculate current parachain slot let current_parachain_slot = now.as_millis() / parachain_slot_duration.as_millis(); @@ -130,7 +131,7 @@ impl SlotTimer { // Calculate the current slot using the relay chain slot duration let relay_slot_duration_for_slot = SlotDuration::from(self.relay_slot_duration); - let mut current_slot = Slot::from_timestamp(timestamp, relay_slot_duration_for_slot); + let mut next_slot = Slot::from_timestamp(timestamp, relay_slot_duration_for_slot); // Calculate the actual slot start timestamp (may be different if we're catching up) let mut slot_start_timestamp = timestamp; @@ -138,16 +139,16 @@ impl SlotTimer { match self.last_reported_slot { // If we already reported a slot, we don't want to skip a slot. But we also don't want // to go through all the slots if a node was halted for some reason. - Some(ls) if ls + 1 < current_slot && current_slot <= ls + 3 => { - current_slot = ls + 1u64; + Some(ls) if ls + 1 < next_slot && next_slot <= ls + 3 => { + next_slot = ls + 1u64; // Calculate the timestamp for the adjusted slot slot_start_timestamp = - current_slot.timestamp(relay_slot_duration_for_slot).ok_or(())?; + next_slot.timestamp(relay_slot_duration_for_slot).ok_or(())?; // Don't sleep since we're catching up tracing::debug!( target: LOG_TARGET, last_slot = ?ls, - current_slot = ?current_slot, + next_slot = ?next_slot, "Catching up on skipped slot." ); }, @@ -158,21 +159,25 @@ impl SlotTimer { "Feeling sleepy 😴" ); - // Sleep based on relay chain timing - tokio::time::sleep(time_until_next_attempt).await; + // Wake up slightly before the next slot to avoid noisy "catching up" logs caused by + // scheduler jitter right at the slot boundary. + tokio::time::sleep( + time_until_next_attempt.saturating_sub(Duration::from_millis(2)), + ) + .await; }, } tracing::debug!( target: LOG_TARGET, relay_slot_duration = ?self.relay_slot_duration, - ?current_slot, + ?next_slot, ?slot_start_timestamp, "New block production slot." ); // Update internal slot tracking - self.last_reported_slot = Some(current_slot); + self.last_reported_slot = Some(next_slot); Ok(SlotTime::new(self.relay_slot_duration, slot_start_timestamp, self.time_offset)) } diff --git a/cumulus/client/proof-size-recording/Cargo.toml b/cumulus/client/proof-size-recording/Cargo.toml index 520be6ab0a509..1b3f206bbbef2 100644 --- a/cumulus/client/proof-size-recording/Cargo.toml +++ b/cumulus/client/proof-size-recording/Cargo.toml @@ -16,3 +16,4 @@ codec = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } diff --git a/cumulus/client/proof-size-recording/src/lib.rs b/cumulus/client/proof-size-recording/src/lib.rs index 6a71c0dc2ea43..1aabad4f65026 100644 --- a/cumulus/client/proof-size-recording/src/lib.rs +++ b/cumulus/client/proof-size-recording/src/lib.rs @@ -24,6 +24,7 @@ use sc_client_api::{ }; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sp_runtime::traits::Block as BlockT; +use sp_trie::proof_size_extension::RecordedProofSizeEstimations; use std::sync::Arc; const PROOF_SIZE_RECORDING_VERSION: &[u8] = b"cumulus_proof_size_recording_version"; @@ -66,13 +67,14 @@ pub fn prepare_proof_size_recording_transaction( pub fn load_proof_size_recording( backend: &B, block_hash: H, -) -> ClientResult>> { +) -> ClientResult> { let version = load_decode::<_, u32>(backend, PROOF_SIZE_RECORDING_VERSION)?; match version { None => Ok(None), Some(PROOF_SIZE_RECORDING_CURRENT_VERSION) => { - load_decode(backend, proof_size_recording_key(block_hash).as_slice()) + load_decode::<_, Vec>(backend, proof_size_recording_key(block_hash).as_slice()) + .map(|recordings| recordings.map(Into::into)) }, Some(other) => Err(ClientError::Backend(format!( "Unsupported proof size recording DB version: {:?}", diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index ff2a1310167a5..e7cfe3b70d227 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -58,9 +58,7 @@ use sp_runtime::{ traits::{Block as BlockT, BlockIdTo, Header}, SaturatedConversion, Saturating, }; -use sp_trie::proof_size_extension::{ - ProofSizeExt, RecordedProofSizeEstimations, ReplayProofSizeProvider, -}; +use sp_trie::proof_size_extension::{ProofSizeExt, ReplayProofSizeProvider}; use std::{ sync::Arc, time::{Duration, Instant}, @@ -632,20 +630,11 @@ where let mut runtime_api = self.client.runtime_api(); let storage_proof_recorder = ProofRecorder::::default(); - // Try to load proof size recordings for this block - match load_proof_size_recording(&*self.client, orig_hash)? { - Some(recordings) => { - let recorded = RecordedProofSizeEstimations( - recordings.into_iter().map(|x| x as usize).collect(), - ); - let replay_provider = ReplayProofSizeProvider::from_recorded(recorded); - runtime_api.register_extension(ProofSizeExt::new(replay_provider)); - }, - None => { - // No recordings found or error loading, fall back to default recorder - runtime_api.register_extension(ProofSizeExt::new(storage_proof_recorder.clone())); - }, - } + let proof_size_ext = load_proof_size_recording(&*self.client, orig_hash)?.map_or_else( + || ProofSizeExt::new(storage_proof_recorder.clone()), + |recordings| ProofSizeExt::new(ReplayProofSizeProvider::from(recordings)), + ); + runtime_api.register_extension(proof_size_ext); runtime_api.record_proof_with_recorder(storage_proof_recorder); diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 28c6b05064fab..3bfaa32dc10f1 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -146,7 +146,7 @@ where let (blocks, proof) = block_data.into_inner(); - validate_blocks::(&blocks, &parent_header); + verify_blocks_form_chain::(&blocks, &parent_header); let mut processed_downward_messages = 0; let mut upward_messages = BoundedVec::default(); @@ -379,8 +379,7 @@ fn validate_validation_data( ); } -/// Validates that the given blocks form a valid chain and have consistent BlockBundleInfo. -fn validate_blocks(blocks: &[B::LazyBlock], parent_header: &B::Header) { +fn verify_blocks_form_chain(blocks: &[B::LazyBlock], parent_header: &B::Header) { let num_blocks = blocks.len(); // Check first block's parent matches the given parent_header @@ -396,55 +395,59 @@ fn validate_blocks(blocks: &[B::LazyBlock], parent_header: &B::Header let mut first_block_has_bundle_info: Option = None; - blocks.iter().enumerate().fold(parent_header.hash(), |expected_parent, (block_index, block)| { - // Check chain validity - assert_eq!( - expected_parent, - *block.header().parent_hash(), - "Not a valid chain of blocks :(; {:?} not a parent of {:?}?", - array_bytes::bytes2hex("0x", expected_parent.as_ref()), - array_bytes::bytes2hex("0x", block.header().parent_hash().as_ref()), - ); + blocks.iter().enumerate().fold( + parent_header.hash(), + |expected_parent, (block_index, block)| { + // Check chain validity + assert_eq!( + expected_parent, + *block.header().parent_hash(), + "Not a valid chain of blocks :(; {:?} not a parent of {:?}?", + array_bytes::bytes2hex("0x", expected_parent.as_ref()), + array_bytes::bytes2hex("0x", block.header().parent_hash().as_ref()), + ); - let encoded_header_size = block.header().encoded_size(); - assert!( - encoded_header_size <= MAX_HEAD_DATA_SIZE as usize, - "Header size {encoded_header_size} exceeds MAX_HEAD_DATA_SIZE {MAX_HEAD_DATA_SIZE}", - ); + let encoded_header_size = block.header().encoded_size(); + assert!( + encoded_header_size <= MAX_HEAD_DATA_SIZE as usize, + "Header size {encoded_header_size} exceeds MAX_HEAD_DATA_SIZE {MAX_HEAD_DATA_SIZE}", + ); - // Validate BlockBundleInfo consistency - let bundle_info = CumulusDigestItem::find_block_bundle_info(block.header().digest()); - match (first_block_has_bundle_info, &bundle_info) { - (None, info) => { - first_block_has_bundle_info = Some(info.is_some()); - }, - (Some(true), None) => { - panic!("All blocks must have BlockBundleInfo if the first block has it"); - }, - (Some(false), Some(_)) => { - panic!("No block should have BlockBundleInfo if the first block doesn't have it"); - }, - _ => {}, - } + // Validate BlockBundleInfo consistency + let bundle_info = CumulusDigestItem::find_block_bundle_info(block.header().digest()); + match (first_block_has_bundle_info, &bundle_info) { + (None, info) => { + first_block_has_bundle_info = Some(info.is_some()); + }, + (Some(true), None) => { + panic!("All blocks in a bundled PoV must include `BlockBundleInfo`"); + }, + (Some(false), Some(_)) => { + panic!("A PoV without `BlockBundleInfo` may only contain a single block"); + }, + _ => {}, + } - if let Some(ref info) = bundle_info { - assert_eq!( - info.index as usize, - block_index, - "BlockBundleInfo index mismatch: expected {}, got {}", - block_index, - info.index - ); + if let Some(ref info) = bundle_info { + assert_eq!( + info.index as usize, block_index, + "BlockBundleInfo index mismatch: expected {}, got {}", + block_index, info.index + ); - if block_index + 1 == num_blocks && !CumulusDigestItem::is_last_block_in_core(block.header().digest()).unwrap_or(true) { - panic!( - "Last block in PoV must have maybe_last=true, `UseFullCore` digest, or `RuntimeEnvironmentUpdated` digest" + if block_index + 1 == num_blocks && + !CumulusDigestItem::is_last_block_in_core(block.header().digest()) + .unwrap_or(true) + { + panic!( + "Last block in PoV must include the digest that marks it as the last block in the core" ); + } } - } - block.header().hash() - }); + block.header().hash() + }, + ); } /// Build a seed from the head data of the parachain block. diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs index 434f89a7586ce..3940b4f9903b0 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs @@ -70,7 +70,7 @@ use sc_transaction_pool::TransactionPoolHandle; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_consensus::Environment; -use sp_core::{traits::SpawnEssentialNamed, Pair}; +use sp_core::traits::SpawnEssentialNamed; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::{ @@ -78,7 +78,7 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT, UniqueSaturatedInto}, }; use sp_transaction_storage_proof::runtime_api::TransactionStorageApi; -use std::{fmt::Debug, marker::PhantomData, ops::Sub, sync::Arc, time::Duration}; +use std::{marker::PhantomData, ops::Sub, sync::Arc, time::Duration}; struct Verifier { client: Arc, @@ -493,7 +493,7 @@ where + substrate_frame_rpc_system::AccountNonceApi + TargetBlockRate + GetParachainInfo, - AuraId: AuraIdT + Sync + Debug + Send, + AuraId: AuraIdT + Sync + Send, ::Pair: Send + Sync, { if extra_args.authoring_policy == AuthoringPolicy::SlotBased { @@ -525,7 +525,7 @@ impl, RuntimeApi, AuraId> where RuntimeApi: ConstructNodeRuntimeApi>, RuntimeApi::RuntimeApi: AuraRuntimeApi + TargetBlockRate, - AuraId: AuraIdT + Sync + Debug + Send, + AuraId: AuraIdT + Sync + Send, ::Pair: Send + Sync, { #[docify::export_content] @@ -538,7 +538,6 @@ where Block, Arc>, ParachainClient, - ::Public, >, >, CIDP, @@ -575,14 +574,13 @@ impl, RuntimeApi, AuraId> Block, Arc>, ParachainClient, - ::Public, >, SlotBasedBlockImportHandle, > for StartSlotBasedAuraConsensus where RuntimeApi: ConstructNodeRuntimeApi>, RuntimeApi::RuntimeApi: AuraRuntimeApi + TargetBlockRate, - AuraId: AuraIdT + Sync + Debug + Send, + AuraId: AuraIdT + Sync + Send, ::Pair: Send + Sync, { fn start_consensus( @@ -593,7 +591,6 @@ where Block, Arc>, ParachainClient, - ::Public, >, >, prometheus_registry: Option<&Registry>, @@ -691,7 +688,6 @@ where Block, Arc>, ParachainClient, - ::Public, >; type BlockImportAuxiliaryData = SlotBasedBlockImportHandle; diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index a85e018884b87..cbff93aa9d5b2 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -37,7 +37,7 @@ use cumulus_client_consensus_aura::{ use prometheus::Registry; use runtime::AccountId; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; -use sp_consensus_aura::sr25519::{AuthorityId, AuthorityPair}; +use sp_consensus_aura::sr25519::AuthorityPair; use std::{ collections::HashSet, future::Future, @@ -112,11 +112,8 @@ pub type Client = TFullClient; /// The block-import type being used by the test service. -pub type ParachainBlockImport = TParachainBlockImport< - Block, - SlotBasedBlockImport, Client, AuthorityId>, - Backend, ->; +pub type ParachainBlockImport = + TParachainBlockImport, Client>, Backend>; /// Transaction pool type used by the test service pub type TransactionPool = Arc>; diff --git a/substrate/primitives/block-builder/Cargo.toml b/substrate/primitives/block-builder/Cargo.toml index 72bb691f2dd86..1266626e2daa4 100644 --- a/substrate/primitives/block-builder/Cargo.toml +++ b/substrate/primitives/block-builder/Cargo.toml @@ -17,7 +17,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-api = { workspace = true } sp-inherents = { workspace = true } sp-runtime = { workspace = true } @@ -26,7 +25,6 @@ sp-runtime = { workspace = true } default = ["std"] std = [ "codec/std", - "scale-info/std", "sp-api/std", "sp-inherents/std", "sp-runtime/std", diff --git a/substrate/primitives/trie/src/proof_size_extension.rs b/substrate/primitives/trie/src/proof_size_extension.rs index 49d3036c4add0..36f7396a81d1f 100644 --- a/substrate/primitives/trie/src/proof_size_extension.rs +++ b/substrate/primitives/trie/src/proof_size_extension.rs @@ -62,6 +62,12 @@ impl ProofSizeExt { /// need to be replayed in the exact same order. pub struct RecordedProofSizeEstimations(pub VecDeque); +impl From> for RecordedProofSizeEstimations { + fn from(recordings: Vec) -> Self { + Self(recordings.into_iter().map(|x| x as usize).collect()) + } +} + /// Inner structure of [`RecordingProofSizeProvider`]. struct RecordingProofSizeProviderInner { inner: Box, diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index f3b4617bd56bd..eaa8028975cd2 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -444,15 +444,6 @@ impl<'a, H: Hasher> trie_db::TrieRecorder for TrieRecorder<'a, H> { "Recording node", ); - if inner.ignored_nodes.is_ignored(&hash) { - tracing::trace!( - target: LOG_TARGET, - ?hash, - "Ignoring node", - ); - return; - } - inner.accessed_nodes.entry(hash).or_insert_with(|| { let node = encoded_node.into_owned(); From 502fc13d0cb675410997578b12eebfe1648aad73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 24 Mar 2026 21:53:59 +0100 Subject: [PATCH 249/312] Fix issue --- .../src/collators/slot_based/block_builder_task.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 8d156aa624527..dee186be193e6 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -697,8 +697,14 @@ where time_left_for_block }; - // The time we will use to build the actual block. - let authoring_duration = block_time.min(adjusted_time_left); + // The first block on a core gets the full remaining core time so that the runtime's + // `FullCore` weight mode can actually be utilized. Subsequent blocks are capped at + // `block_time` because they only carry fractional weight. + let authoring_duration = if block_index == 0 { + slot_time_for_core.saturating_sub(core_start.elapsed()) + } else { + block_time.min(adjusted_time_left) + }; tracing::trace!( target: LOG_TARGET, From 5e4446aa8f6a455fcf6fa29fbbcbb47a263d18af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 24 Mar 2026 22:04:25 +0100 Subject: [PATCH 250/312] Revert --- .../aura/src/collators/slot_based/block_import.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs index 641a419df3594..4506d2fc2af02 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -226,15 +226,9 @@ impl SlotBasedBlockImport { let relay_block_identifier = CumulusDigestItem::find_relay_block_identifier(params.header.digest()); - let Some(core_info) = core_info else { - return Err(sp_consensus::Error::ClientImport("Missing `CoreInfo` digest".into())); - }; - let Some(relay_block_identifier) = relay_block_identifier else { - return Err(sp_consensus::Error::ClientImport( - "Missing `RelayBlockIdentifier` digest".into(), - )); - }; - let Some(bundle_info) = bundle_info else { + let (Some(core_info), Some(bundle_info), Some(relay_block_identifier)) = + (core_info, bundle_info, relay_block_identifier) + else { return Ok(()); }; From aff9e604fa7197e169095fa18044e61a709f7893 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 24 Mar 2026 22:17:48 +0100 Subject: [PATCH 251/312] Review --- .../aura/src/collators/slot_based/block_builder_task.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index dee186be193e6..54f7199928313 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -687,7 +687,7 @@ where // adjust the authoring duration on the last block. // // TODO: Remove when transaction streaming is implemented - let adjusted_time_left = if is_last_block_in_core && + let adjusted_time_left = if is_last_block_in_core && is_last_core_in_parachain_slot && blocks_per_core == 1 && total_number_of_blocks <= 3 && total_number_of_blocks >= 2 From 49f27b378dacdbe6912b2cd501b7aca7aa7ed432 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 24 Mar 2026 22:21:46 +0100 Subject: [PATCH 252/312] Fix issue --- .../consensus/aura/src/collators/slot_based/slot_timer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs b/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs index ac039786f796e..4d3998b39673e 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs @@ -241,7 +241,7 @@ mod tests { #[case] expected_duration: u128, ) { let slot_time = SlotTime { - slot_duration: Duration::from_millis(para_slot_millis), + relay_slot_duration: Duration::from_millis(para_slot_millis), time_offset: Duration::from_millis(offset_millis), slot_start_timestamp: Timestamp::new( Duration::from_millis(para_slot_millis).as_millis() as u64 * *last_reported_slot, From d58c2fa3945244f4fc2e5841d078ab68c9dd8eaa Mon Sep 17 00:00:00 2001 From: "cmd[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 24 Mar 2026 21:25:29 +0000 Subject: [PATCH 253/312] Update from github-actions[bot] running command 'fmt' --- .../aura/src/collators/slot_based/block_builder_task.rs | 3 ++- cumulus/test/runtime/src/test_pallet.rs | 3 +-- polkadot/node/subsystem-util/src/runtime/mod.rs | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 54f7199928313..4a81a3398e7e5 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -687,7 +687,8 @@ where // adjust the authoring duration on the last block. // // TODO: Remove when transaction streaming is implemented - let adjusted_time_left = if is_last_block_in_core && is_last_core_in_parachain_slot && + let adjusted_time_left = if is_last_block_in_core && + is_last_core_in_parachain_slot && blocks_per_core == 1 && total_number_of_blocks <= 3 && total_number_of_blocks >= 2 diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index 4cd5789afc658..845c8098948a7 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -41,9 +41,8 @@ pub fn relay_alice_account_key() -> alloc::vec::Vec { pub mod pallet { use crate::test_pallet::TEST_RUNTIME_UPGRADE_KEY; use alloc::{vec, vec::Vec}; - use cumulus_primitives_core::CumulusDigestItem; + use cumulus_primitives_core::{CumulusDigestItem, ParaId, XcmpMessageSource}; use cumulus_primitives_storage_weight_reclaim::get_proof_size; - use cumulus_primitives_core::{ParaId, XcmpMessageSource}; use frame_support::{ dispatch::DispatchInfo, inherent::{InherentData, InherentIdentifier, ProvideInherent}, diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 3508251dde0cd..671248884d554 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -537,7 +537,8 @@ impl ClaimQueueSnapshot { self.0.iter() } - /// Find the earliest cores for the given `para_id` starting from the given `claim_queue_offset`. + /// Find the earliest cores for the given `para_id` starting from the given + /// `claim_queue_offset`. /// /// It is not guaranteed that at the given `claim_queue_offset` cores are available for /// the `para_id`. Thus, the claim queue offset for the core indices is returned as well. From 66163670b6f4381d7c293b4c55368385d06da319 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 25 Mar 2026 11:21:54 +0100 Subject: [PATCH 254/312] Fix CI tests --- .../chunk_fetching_network_compatibility.rs | 2 +- .../collators_reputation_persistence.rs | 4 +- .../coretime_collation_fetching_fairness.rs | 1 + .../functional/dispute_freshly_finalized.rs | 2 +- .../tests/functional/parachains_disputes.rs | 2 +- .../parachains_disputes_garbage_candidate.rs | 2 +- .../functional/parachains_max_tranche0.rs | 1 + .../tests/functional/parachains_pvf.rs | 2 +- .../functional/systematic_chunk_recovery.rs | 2 +- .../tests/smoke/coretime_smoke.rs | 4 +- .../tests/smoke/parachains_smoke.rs | 2 +- .../tests/smoke/precompile_pvf_smoke.rs | 2 +- prdoc/pr_10477.prdoc | 53 +++++++++++++++++++ 13 files changed, 69 insertions(+), 10 deletions(-) create mode 100644 prdoc/pr_10477.prdoc diff --git a/polkadot/zombienet-sdk-tests/tests/functional/chunk_fetching_network_compatibility.rs b/polkadot/zombienet-sdk-tests/tests/functional/chunk_fetching_network_compatibility.rs index 945f96fb711a7..545fbb8eb9d1f 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/chunk_fetching_network_compatibility.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/chunk_fetching_network_compatibility.rs @@ -48,7 +48,7 @@ async fn chunk_fetching_network_compatibility_test() -> Result<(), anyhow::Error let relay_client = validator_nodes[0].wait_client().await?; log::info!("Checking parachain block production (all paras registered at genesis)"); let para_throughput: [(ParaId, Range); 2] = PARAS.map(|id| (ParaId::from(id), 2..6)); - assert_para_throughput(&relay_client, 5, para_throughput).await?; + assert_para_throughput(&relay_client, 5, para_throughput, []).await?; log::info!("All parachains producing blocks"); log::info!("Ensure approval checking works."); diff --git a/polkadot/zombienet-sdk-tests/tests/functional/collators_reputation_persistence.rs b/polkadot/zombienet-sdk-tests/tests/functional/collators_reputation_persistence.rs index 2d852a2a176a7..3f892e0225bbe 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/collators_reputation_persistence.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/collators_reputation_persistence.rs @@ -145,6 +145,7 @@ async fn comprehensive_reputation_persistence_test() -> Result<(), anyhow::Error &validator0_client, 10, [(ParaId::from(PARA_ID_1), 8..11), (ParaId::from(PARA_ID_2), 8..11)], + [], ) .await?; @@ -327,7 +328,8 @@ async fn comprehensive_reputation_persistence_test() -> Result<(), anyhow::Error // Verify para 2000 continues normal operation log::info!("Verifying para {} continues normal operation", PARA_ID_1); - assert_para_throughput(&validator0_client_after, 5, [(ParaId::from(PARA_ID_1), 3..7)]).await?; + assert_para_throughput(&validator0_client_after, 5, [(ParaId::from(PARA_ID_1), 3..7)], []) + .await?; log::info!("Phase 3 passed: Pruning successfully removed deregistered parachain"); Ok(()) diff --git a/polkadot/zombienet-sdk-tests/tests/functional/coretime_collation_fetching_fairness.rs b/polkadot/zombienet-sdk-tests/tests/functional/coretime_collation_fetching_fairness.rs index 49c4731375740..624879f1d39b7 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/coretime_collation_fetching_fairness.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/coretime_collation_fetching_fairness.rs @@ -125,6 +125,7 @@ async fn coretime_collation_fetching_fairness_test() -> Result<(), anyhow::Error &relay_client, 12, [(ParaId::from(2000), 6..10), (ParaId::from(2001), 2..5)], + [], ) .await?; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/dispute_freshly_finalized.rs b/polkadot/zombienet-sdk-tests/tests/functional/dispute_freshly_finalized.rs index 757038cb03cf9..d32192d83f18c 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/dispute_freshly_finalized.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/dispute_freshly_finalized.rs @@ -62,7 +62,7 @@ async fn dispute_freshly_finalized_test() -> Result<(), anyhow::Error> { // Ensure parachain made progress log::info!("Waiting for parachain {} to produce blocks", PARA_ID); - assert_para_throughput(&relay_client, 5, [(ParaId::from(PARA_ID), 2..6)]).await?; + assert_para_throughput(&relay_client, 5, [(ParaId::from(PARA_ID), 2..6)], []).await?; log::info!("Parachain {} is producing blocks", PARA_ID); // Ensure that malus is already attempting to dispute diff --git a/polkadot/zombienet-sdk-tests/tests/functional/parachains_disputes.rs b/polkadot/zombienet-sdk-tests/tests/functional/parachains_disputes.rs index cff78fdfa99e8..36341a0e1ae05 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/parachains_disputes.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/parachains_disputes.rs @@ -50,7 +50,7 @@ async fn parachains_disputes_test() -> Result<(), anyhow::Error> { // Check that all parachains produce at least 5 blocks within 1 session and 5 blocks (RC) log::info!("Checking parachain block production (all paras registered at genesis)"); let para_throughput: [(ParaId, Range); 4] = PARAS.map(|id| (ParaId::from(id), 2..6)); - assert_para_throughput(&relay_client, 5, para_throughput).await?; + assert_para_throughput(&relay_client, 5, para_throughput, []).await?; log::info!("All parachains producing blocks"); // Check if disputes are initiated and concluded. diff --git a/polkadot/zombienet-sdk-tests/tests/functional/parachains_disputes_garbage_candidate.rs b/polkadot/zombienet-sdk-tests/tests/functional/parachains_disputes_garbage_candidate.rs index 7e27c9255aeff..ab9f5773ee860 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/parachains_disputes_garbage_candidate.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/parachains_disputes_garbage_candidate.rs @@ -52,7 +52,7 @@ async fn parachains_disputes_garbage_candidate_test() -> Result<(), anyhow::Erro // Check that all parachains produce at least 5 blocks within 1 session and 5 blocks (RC) log::info!("Checking parachain block production (all paras registered at genesis)"); let para_throughput: [(ParaId, Range); 3] = PARAS.map(|id| (ParaId::from(id), 2..6)); - assert_para_throughput(&relay_client, 5, para_throughput).await?; + assert_para_throughput(&relay_client, 5, para_throughput, []).await?; log::info!("All parachains producing blocks"); log::info!("Check there is an offence report after dispute conclusion."); diff --git a/polkadot/zombienet-sdk-tests/tests/functional/parachains_max_tranche0.rs b/polkadot/zombienet-sdk-tests/tests/functional/parachains_max_tranche0.rs index 955fb5825abc7..7d18650bee3f3 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/parachains_max_tranche0.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/parachains_max_tranche0.rs @@ -70,6 +70,7 @@ async fn parachains_max_tranche0_test() -> Result<(), anyhow::Error> { (ParaId::from(2003u32), 5..100), (ParaId::from(2004u32), 5..100), ], + [], ) .await?; log::info!("All parachains producing blocks"); diff --git a/polkadot/zombienet-sdk-tests/tests/functional/parachains_pvf.rs b/polkadot/zombienet-sdk-tests/tests/functional/parachains_pvf.rs index 7692afd90c7f7..466a700cda263 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/parachains_pvf.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/parachains_pvf.rs @@ -49,7 +49,7 @@ async fn parachains_pvf_preparation_and_execution_test() -> Result<(), anyhow::E // Using 60 relay blocks as window (~180 seconds with 3s block time) log::info!("Checking parachain block production"); let para_throughput: [(ParaId, Range); 8] = PARAS.map(|id| (ParaId::from(id), 5..61)); - assert_para_throughput(&relay_client, 60, para_throughput).await?; + assert_para_throughput(&relay_client, 60, para_throughput, []).await?; log::info!("All parachains producing blocks"); relay_node diff --git a/polkadot/zombienet-sdk-tests/tests/functional/systematic_chunk_recovery.rs b/polkadot/zombienet-sdk-tests/tests/functional/systematic_chunk_recovery.rs index d0f4b04e08b99..e26687b2b636e 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/systematic_chunk_recovery.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/systematic_chunk_recovery.rs @@ -61,7 +61,7 @@ async fn systematic_chunk_recovery_test() -> Result<(), anyhow::Error> { // Check that all parachains produce at least 5 blocks within 1 session and 5 blocks (RC) log::info!("Checking parachain block production (all paras registered at genesis)"); let para_throughput: [(ParaId, Range); 2] = PARAS.map(|id| (ParaId::from(id), 2..6)); - assert_para_throughput(&alice_client, 5, para_throughput).await?; + assert_para_throughput(&alice_client, 5, para_throughput, []).await?; log::info!("All parachains producing blocks"); let mut validator_nodes = vec![]; diff --git a/polkadot/zombienet-sdk-tests/tests/smoke/coretime_smoke.rs b/polkadot/zombienet-sdk-tests/tests/smoke/coretime_smoke.rs index 8740a3a940912..092b1e74e9998 100644 --- a/polkadot/zombienet-sdk-tests/tests/smoke/coretime_smoke.rs +++ b/polkadot/zombienet-sdk-tests/tests/smoke/coretime_smoke.rs @@ -83,7 +83,8 @@ async fn coretime_smoke_test() -> Result<(), anyhow::Error> { // Wait for coretime chain to produce blocks log::info!("Waiting for coretime chain to produce blocks"); - assert_para_throughput(&alice_client, 30, [(ParaId::from(CORETIME_PARA_ID), 5..31)]).await?; + assert_para_throughput(&alice_client, 30, [(ParaId::from(CORETIME_PARA_ID), 5..31)], []) + .await?; log::info!("Coretime chain is producing blocks"); // Configure broker chain @@ -103,6 +104,7 @@ async fn coretime_smoke_test() -> Result<(), anyhow::Error> { &alice_client, 30, [(ParaId::from(CORETIME_PARA_ID), 5..31), (ParaId::from(TEST_PARA_ID), 5..31)], + [], ) .await?; log::info!("Parachain {} is producing blocks", TEST_PARA_ID); diff --git a/polkadot/zombienet-sdk-tests/tests/smoke/parachains_smoke.rs b/polkadot/zombienet-sdk-tests/tests/smoke/parachains_smoke.rs index 0dfcee7c48de2..5e898952a5535 100644 --- a/polkadot/zombienet-sdk-tests/tests/smoke/parachains_smoke.rs +++ b/polkadot/zombienet-sdk-tests/tests/smoke/parachains_smoke.rs @@ -40,7 +40,7 @@ async fn parachains_smoke_test() -> Result<(), anyhow::Error> { // Check parachain produces at least 5 blocks (60 seconds) // Using 10 relay blocks as measurement window log::info!("Checking parachain {} is producing blocks", PARA_ID); - assert_para_throughput(&alice_client, 5, [(ParaId::from(PARA_ID), 2..6)]).await?; + assert_para_throughput(&alice_client, 5, [(ParaId::from(PARA_ID), 2..6)], []).await?; log::info!("Parachain {} is producing blocks successfully", PARA_ID); log::info!("Test finished successfully"); diff --git a/polkadot/zombienet-sdk-tests/tests/smoke/precompile_pvf_smoke.rs b/polkadot/zombienet-sdk-tests/tests/smoke/precompile_pvf_smoke.rs index da406b7bfc8f8..9ea54daf9f3ce 100644 --- a/polkadot/zombienet-sdk-tests/tests/smoke/precompile_pvf_smoke.rs +++ b/polkadot/zombienet-sdk-tests/tests/smoke/precompile_pvf_smoke.rs @@ -124,7 +124,7 @@ async fn precompile_pvf_smoke_test() -> Result<(), anyhow::Error> { // Wait for parachain to produce blocks log::info!("Waiting for parachain {} to be registered and produce blocks", PARA_ID); - assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 5..21)]).await?; + assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 5..21)], []).await?; log::info!("Parachain {} is producing blocks", PARA_ID); // Check Dave didn't prepare PVF diff --git a/prdoc/pr_10477.prdoc b/prdoc/pr_10477.prdoc new file mode 100644 index 0000000000000..a90c3e01e2242 --- /dev/null +++ b/prdoc/pr_10477.prdoc @@ -0,0 +1,53 @@ +title: "Block Bundling Node Side" +doc: +- audience: Node Dev + description: | + Implements the node-side logic for block bundling (aka 500ms blocks) in parachains. + The main changes are in the slot-based collator: instead of building one block per core, + blocks are built as requested and distributed over the available cores. + +crates: +- name: frame-support + bump: patch +- name: sp-trie + bump: patch +- name: frame-system + bump: patch +- name: sc-block-builder + bump: major +- name: sp-block-builder + bump: major +- name: sc-consensus + bump: patch +- name: sp-consensus-slots + bump: patch +- name: cumulus-primitives-core + bump: major +- name: cumulus-pallet-parachain-system + bump: major +- name: pallet-glutton + bump: patch +- name: cumulus-client-collator + bump: major +- name: cumulus-client-consensus-common + bump: major +- name: polkadot-node-subsystem-util + bump: patch +- name: sc-basic-authorship + bump: patch +- name: cumulus-client-consensus-aura + bump: major +- name: cumulus-client-proof-size-recording + bump: patch +- name: cumulus-client-service + bump: patch +- name: polkadot-omni-node-lib + bump: patch +- name: testnet-parachains-constants + bump: patch +- name: asset-hub-rococo-runtime + bump: patch +- name: coretime-westend-runtime + bump: patch +- name: penpal-runtime + bump: patch From 108b394a06519a6b1abc6bb9111dab5c124435bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 25 Mar 2026 15:46:16 +0100 Subject: [PATCH 255/312] Fix tests --- cumulus/client/consensus/aura/src/collators/mod.rs | 12 +++++------- .../parachain-system/src/validate_block/tests.rs | 6 ++++-- .../tests/functional/coretime_shared_core.rs | 2 +- polkadot/zombienet-sdk-tests/tests/misc/paritydb.rs | 2 +- prdoc/pr_10477.prdoc | 2 ++ 5 files changed, 13 insertions(+), 11 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index fd6519f115f12..e36b43f50af0f 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -353,7 +353,6 @@ mod tests { use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sp_consensus::BlockOrigin; use sp_keystore::{Keystore, KeystorePtr}; - use sp_timestamp::Timestamp; use std::sync::{Arc, Mutex}; async fn import_block>( @@ -417,21 +416,20 @@ mod tests { async fn test_can_build_upon() { sp_tracing::try_init_simple(); - let (client, keystore) = set_up_components(6); + let (client, _keystore) = set_up_components(6); let genesis_hash = client.chain_info().genesis_hash; let mut last_hash = genesis_hash; // Fill up the unincluded segment tracker in the runtime. - while claim_slot::<_, _, sp_consensus_aura::sr25519::AuthorityPair>( - Slot::from(u64::MAX), - Timestamp::default(), + while can_build_upon::<_, _>( last_hash, + genesis_hash, + Slot::from(u64::MAX), + Slot::from(u64::MAX), &*client, - &keystore, ) .await - .is_some() { let block = build_and_import_block(&client, genesis_hash).await; last_hash = block.header().hash(); diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 2c756a0cf5dcc..7d495840d1b30 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -855,8 +855,10 @@ fn validate_block_rejects_incomplete_bundle() { .unwrap_err(); }); assert!( - log_capture.contains("Last block in PoV must have maybe_last=true"), - "Expected log about missing maybe_last, got: {}", + log_capture.contains( + "Last block in PoV must include the digest that marks it as the last block in the core" + ), + "Expected log about missing last block digest, got: {}", log_capture.get_logs() ); diff --git a/polkadot/zombienet-sdk-tests/tests/functional/coretime_shared_core.rs b/polkadot/zombienet-sdk-tests/tests/functional/coretime_shared_core.rs index 56194d5857150..0ce63b831674d 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/coretime_shared_core.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/coretime_shared_core.rs @@ -113,7 +113,7 @@ async fn coretime_shared_core_test() -> Result<(), anyhow::Error> { // time=6s. 4 paras share 1 core → slot every 24s, ~2 para blocks/slot (async backing). log::info!("Checking parachain block production"); let para_throughput: [(ParaId, Range); 4] = PARAS.map(|id| (ParaId::from(id), 5..15)); - assert_para_throughput(&relay_client, 40, para_throughput).await?; + assert_para_throughput(&relay_client, 40, para_throughput, []).await?; log::info!("All parachains producing blocks"); log::info!("Test finished successfully"); diff --git a/polkadot/zombienet-sdk-tests/tests/misc/paritydb.rs b/polkadot/zombienet-sdk-tests/tests/misc/paritydb.rs index 2258f985d5d3a..49c1f853760e1 100644 --- a/polkadot/zombienet-sdk-tests/tests/misc/paritydb.rs +++ b/polkadot/zombienet-sdk-tests/tests/misc/paritydb.rs @@ -55,7 +55,7 @@ async fn paritydb_test() -> Result<(), anyhow::Error> { // Check that all parachains produce at least 5 blocks within 1 session and 5 blocks (RC) log::info!("Checking parachain block production (all paras registered at genesis)"); let para_throughput: [(ParaId, Range); 10] = PARAS.map(|id| (ParaId::from(id), 2..6)); - assert_para_throughput(&relay_client, 5, para_throughput).await?; + assert_para_throughput(&relay_client, 5, para_throughput, []).await?; log::info!("All parachains producing blocks"); log::info!("Check lag - approval / dispute conclusion."); diff --git a/prdoc/pr_10477.prdoc b/prdoc/pr_10477.prdoc index a90c3e01e2242..bb8e8c5067f95 100644 --- a/prdoc/pr_10477.prdoc +++ b/prdoc/pr_10477.prdoc @@ -51,3 +51,5 @@ crates: bump: patch - name: penpal-runtime bump: patch +- name: polkadot-sdk + bump: patch From 5a7ca5766c489fcc02158a70eed87f34aa0a3586 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 25 Mar 2026 17:07:08 +0100 Subject: [PATCH 256/312] Fix doc link --- cumulus/client/consensus/common/src/parent_search.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/client/consensus/common/src/parent_search.rs b/cumulus/client/consensus/common/src/parent_search.rs index fbd54eb05834e..2914498eaed9f 100644 --- a/cumulus/client/consensus/common/src/parent_search.rs +++ b/cumulus/client/consensus/common/src/parent_search.rs @@ -44,7 +44,7 @@ pub struct ParentSearchParams { pub ancestry_lookback: usize, } -/// A potential parent block returned from [`find_potential_parents`] +/// A potential parent block returned from [`find_parent_for_building`] #[derive(PartialEq, Clone)] pub struct ParentSearchResult { /// The header of the included block (confirmed on relay chain). From 6b514c46150a38c66a05807da50431c0af4142c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 25 Mar 2026 22:29:23 +0100 Subject: [PATCH 257/312] Fix failing tests --- cumulus/client/collator/src/service.rs | 10 +++++++--- prdoc/pr_10477.prdoc | 8 ++++---- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/cumulus/client/collator/src/service.rs b/cumulus/client/collator/src/service.rs index 1920822ac469c..2e81fba8775f9 100644 --- a/cumulus/client/collator/src/service.rs +++ b/cumulus/client/collator/src/service.rs @@ -36,7 +36,7 @@ use polkadot_node_primitives::{ use codec::Encode; use futures::channel::oneshot; use parking_lot::Mutex; -use std::{collections::HashSet, sync::Arc}; +use std::sync::Arc; /// The logging target. const LOG_TARGET: &str = "cumulus-collator"; @@ -248,7 +248,7 @@ where let mut api_version = 0; let mut upward_messages = Vec::new(); - let mut upward_message_signals = HashSet::>::with_capacity(4); + let mut upward_message_signals = Vec::>::with_capacity(4); let mut horizontal_messages = Vec::new(); let mut new_validation_code = None; let mut processed_downward_messages = 0; @@ -284,7 +284,11 @@ where let (messages, signals) = Self::split_at_separator(collation_info.upward_messages); upward_messages.extend(messages); - upward_message_signals.extend(signals.into_iter()); + signals.into_iter().for_each(|s| { + if upward_message_signals.iter().all(|existing| *existing != s) { + upward_message_signals.push(s); + } + }); horizontal_messages.extend(collation_info.horizontal_messages); if let Some(new_code) = collation_info.new_validation_code { if new_validation_code.replace(new_code).is_some() { diff --git a/prdoc/pr_10477.prdoc b/prdoc/pr_10477.prdoc index bb8e8c5067f95..733ec4a6d4cbd 100644 --- a/prdoc/pr_10477.prdoc +++ b/prdoc/pr_10477.prdoc @@ -10,7 +10,7 @@ crates: - name: frame-support bump: patch - name: sp-trie - bump: patch + bump: minor - name: frame-system bump: patch - name: sc-block-builder @@ -20,7 +20,7 @@ crates: - name: sc-consensus bump: patch - name: sp-consensus-slots - bump: patch + bump: minor - name: cumulus-primitives-core bump: major - name: cumulus-pallet-parachain-system @@ -32,7 +32,7 @@ crates: - name: cumulus-client-consensus-common bump: major - name: polkadot-node-subsystem-util - bump: patch + bump: minor - name: sc-basic-authorship bump: patch - name: cumulus-client-consensus-aura @@ -52,4 +52,4 @@ crates: - name: penpal-runtime bump: patch - name: polkadot-sdk - bump: patch + bump: minor From de96d25defd155836beb7aa413fc67c53ad14b5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 30 Mar 2026 09:29:42 +0200 Subject: [PATCH 258/312] Ensure we honor the HRMP rules --- cumulus/pallets/parachain-system/src/lib.rs | 17 +- cumulus/pallets/parachain-system/src/mock.rs | 7 +- .../src/validate_block/implementation.rs | 2 + .../src/validate_block/tests.rs | 155 ++++++++++++++++++ cumulus/pallets/xcmp-queue/src/lib.rs | 23 ++- cumulus/pallets/xcmp-queue/src/tests.rs | 32 ++-- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 8 +- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 4 +- .../parachains/runtimes/test-utils/src/lib.rs | 2 +- cumulus/primitives/core/src/lib.rs | 14 +- cumulus/test/runtime/src/test_pallet.rs | 14 +- 11 files changed, 235 insertions(+), 43 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index ab02743e4ad98..942b5d21cbb00 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -125,6 +125,8 @@ pub struct PoVMessages { pub ump_msg_count: u32, /// Cumulative count of HRMP outbound messages sent in this PoV. pub hrmp_outbound_count: u32, + /// Recipients already used for HRMP outbound messages in this PoV. + pub hrmp_outbound_recipients: Vec, } /// Something that can check the associated relay block number. @@ -462,12 +464,17 @@ pub mod pallet { // Note: this internally calls the `GetChannelInfo` implementation for this // pallet, which draws on the `RelevantMessagingState`. That in turn has // been adjusted above to reflect the correct limits in all channels. - let outbound_messages = - T::OutboundXcmpMessageSource::take_outbound_messages(maximum_channels) - .into_iter() - .map(|(recipient, data)| OutboundHrmpMessage { recipient, data }) - .collect::>(); + let outbound_messages = T::OutboundXcmpMessageSource::take_outbound_messages( + maximum_channels, + &pov_tracker.hrmp_outbound_recipients, + ) + .into_iter() + .map(|(recipient, data)| OutboundHrmpMessage { recipient, data }) + .collect::>(); + pov_tracker + .hrmp_outbound_recipients + .extend(outbound_messages.iter().map(|m| m.recipient)); pov_tracker.hrmp_outbound_count = pov_tracker.hrmp_outbound_count.saturating_add(outbound_messages.len() as u32); PoVMessagesTracker::::put(pov_tracker); diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index 15acd522d8ff0..e4dcc3e7e564a 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -150,8 +150,11 @@ pub fn send_message(dest: ParaId, message: Vec) { } impl XcmpMessageSource for FromThreadLocal { - fn take_outbound_messages(maximum_channels: usize) -> Vec<(ParaId, Vec)> { - let mut ids = std::collections::BTreeSet::::new(); + fn take_outbound_messages( + maximum_channels: usize, + excluded_recipients: &[ParaId], + ) -> Vec<(ParaId, Vec)> { + let mut ids = std::collections::BTreeSet::::from_iter(excluded_recipients.iter().copied()); let mut taken_messages = 0; let mut taken_bytes = 0; let mut result = Vec::new(); diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 3bfaa32dc10f1..61a387203ab3b 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -351,6 +351,8 @@ where .expect("UMPSignals does not fit in UMPMessages"); } + horizontal_messages.sort_by(|a, b| a.recipient.cmp(&b.recipient)); + ValidationResult { head_data: head_data.expect("HeadData not set"), new_validation_code: new_validation_code.map(Into::into), diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 7d495840d1b30..bf0b41749ca18 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -1106,6 +1106,161 @@ fn validate_block_with_max_hrmp_messages_and_4_blocks_per_pov() { assert_eq!(result.horizontal_messages.len(), max_per_candidate as usize); } +#[test] +fn validate_block_hrmp_messages_sorted_across_blocks_in_bundle() { + sp_tracing::try_init_simple(); + + let blocks_per_pov = 2; + let recipient_a = ParaId::from(200); + let recipient_b = ParaId::from(300); + let (client, parent_head) = create_elastic_scaling_test_client(); + + let mut sproof_builder = + RelayStateSproofBuilder { current_slot: 1.into(), ..Default::default() }; + sproof_builder.host_config.hrmp_max_message_num_per_candidate = 10; + sproof_builder.para_id = ParaId::from(100); + + for recipient in [recipient_a, recipient_b] { + let channel = sproof_builder.upsert_outbound_channel(recipient); + channel.max_capacity = blocks_per_pov; + channel.max_total_size = blocks_per_pov * 10 * 256; + channel.max_message_size = 256; + } + + let TestBlockData { block, validation_data } = build_multiple_blocks_with_witness( + &client, + parent_head.clone(), + sproof_builder, + blocks_per_pov, + |i| { + // Block 0 sends to recipient_b (300), block 1 sends to recipient_a (200). + // Naive concatenation would produce [300, 200] which violates the + // strictly-ascending-by-recipient requirement enforced by the relay chain. + let recipient = if i == 0 { recipient_b } else { recipient_a }; + vec![generate_extrinsic_with_pair( + &client, + Charlie.into(), + TestPalletCall::queue_hrmp_messages { n: 1, recipient }, + Some(i), + )] + }, + |i| { + vec![BlockBundleInfo { index: i as u8, maybe_last: i as u32 + 1 == blocks_per_pov } + .to_digest_item()] + }, + ); + + let result = call_validate_block_validation_result( + test_runtime::elastic_scaling_500ms::WASM_BINARY + .expect("You need to build the WASM binaries to run the tests!"), + parent_head, + block, + validation_data.relay_parent_storage_root, + ) + .expect("Calls `validate_block`"); + + assert_eq!(result.horizontal_messages.len(), 2); + + // The relay chain requires strictly ascending recipient order and at most one message + // per recipient (see `hrmp::Pallet::check_outbound_hrmp`). + assert!( + result.horizontal_messages[0].recipient < result.horizontal_messages[1].recipient, + "HRMP messages must be strictly sorted by recipient, got {:?} before {:?}", + result.horizontal_messages[0].recipient, + result.horizontal_messages[1].recipient, + ); +} + +#[test] +fn validate_block_hrmp_duplicate_recipient_across_blocks_in_bundle() { + sp_tracing::try_init_simple(); + + let blocks_per_pov = 2; + let recipient = ParaId::from(300); + let (client, parent_head) = create_elastic_scaling_test_client(); + + let mut sproof_builder = + RelayStateSproofBuilder { current_slot: 1.into(), ..Default::default() }; + sproof_builder.host_config.hrmp_max_message_num_per_candidate = 10; + sproof_builder.para_id = ParaId::from(100); + + let channel = sproof_builder.upsert_outbound_channel(recipient); + channel.max_capacity = 10; + channel.max_total_size = 10 * 256; + channel.max_message_size = 256; + + // PoV 1: Two blocks both queue HRMP messages to the same recipient. + // Only one message per recipient is allowed per candidate, so the first PoV + // should contain exactly 1 HRMP message. The second message stays pending. + let TestBlockData { block: pov1_block, validation_data: pov1_vdata } = + build_multiple_blocks_with_witness( + &client, + parent_head.clone(), + sproof_builder.clone(), + blocks_per_pov, + |i| { + vec![generate_extrinsic_with_pair( + &client, + Charlie.into(), + TestPalletCall::queue_hrmp_messages { n: 1, recipient }, + Some(i), + )] + }, + |i| { + vec![BlockBundleInfo { index: i as u8, maybe_last: i as u32 + 1 == blocks_per_pov } + .to_digest_item()] + }, + ); + + let pov1_result = call_validate_block_validation_result( + test_runtime::elastic_scaling_500ms::WASM_BINARY + .expect("You need to build the WASM binaries to run the tests!"), + parent_head, + pov1_block.clone(), + pov1_vdata.relay_parent_storage_root, + ) + .expect("Calls `validate_block` for PoV 1"); + + assert_eq!( + pov1_result.horizontal_messages.len(), + 1, + "PoV 1: expected 1 HRMP message, got {} (duplicate recipient)", + pov1_result.horizontal_messages.len(), + ); + + // PoV 2: A single block with no new HRMP extrinsics. The pending message from PoV 1 + // should now be sent. + let pov2_parent_head = pov1_block.blocks().last().unwrap().header().clone(); + sproof_builder.current_slot = 2.into(); + sproof_builder.included_para_head = Some(HeadData(pov2_parent_head.encode())); + + let TestBlockData { block: pov2_block, validation_data: pov2_vdata } = + build_multiple_blocks_with_witness( + &client, + pov2_parent_head.clone(), + sproof_builder, + 1, + |_| vec![], + |_| vec![], + ); + + let pov2_result = call_validate_block_validation_result( + test_runtime::elastic_scaling_500ms::WASM_BINARY + .expect("You need to build the WASM binaries to run the tests!"), + pov2_parent_head, + pov2_block, + pov2_vdata.relay_parent_storage_root, + ) + .expect("Calls `validate_block` for PoV 2"); + + assert_eq!( + pov2_result.horizontal_messages.len(), + 1, + "PoV 2: expected 1 HRMP message (the pending one from PoV 1), got {}", + pov2_result.horizontal_messages.len(), + ); +} + #[test] fn validate_block_with_ump_size_constraint_and_4_blocks_per_pov() { sp_tracing::try_init_simple(); diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index 2ad3af5c37950..49c24d081e200 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -1082,7 +1082,10 @@ impl XcmpMessageHandler for Pallet { } impl XcmpMessageSource for Pallet { - fn take_outbound_messages(maximum_channels: usize) -> Vec<(ParaId, Vec)> { + fn take_outbound_messages( + maximum_channels: usize, + excluded_recipients: &[ParaId], + ) -> Vec<(ParaId, Vec)> { let mut statuses = >::get().into_inner(); let old_statuses_len = statuses.len(); let max_message_count = statuses.len().min(maximum_channels); @@ -1098,6 +1101,17 @@ impl XcmpMessageSource for Pallet { flags, } = status; + if excluded_recipients.contains(para_id) { + return true; + } + + // This is a hard limit from the host config; not even signals can bypass it. + if result.len() == max_message_count { + // We check this condition in the beginning of the loop so that we don't include + // a message where the limit is 0. + return true; + } + let (max_size_now, max_size_ever) = match T::ChannelInfo::get_channel_status(*para_id) { ChannelStatus::Closed => { // This means that there is no such channel anymore. Nothing to be done but @@ -1114,13 +1128,6 @@ impl XcmpMessageSource for Pallet { ChannelStatus::Ready(max_size_now, max_size_ever) => (max_size_now, max_size_ever), }; - // This is a hard limit from the host config; not even signals can bypass it. - if result.len() == max_message_count { - // We check this condition in the beginning of the loop so that we don't include - // a message where the limit is 0. - return true; - } - let page = 'page_fetch: { if *signals_exist { let page = >::get(*para_id); diff --git a/cumulus/pallets/xcmp-queue/src/tests.rs b/cumulus/pallets/xcmp-queue/src/tests.rs index b9da912434329..a3923f8cd244f 100644 --- a/cumulus/pallets/xcmp-queue/src/tests.rs +++ b/cumulus/pallets/xcmp-queue/src/tests.rs @@ -720,7 +720,7 @@ fn send_xcm_nested_works() { new_test_ext().execute_with(|| { assert_ok!(send_xcm::(dest.into(), good.clone())); assert_eq!( - XcmpQueue::take_outbound_messages(usize::MAX), + XcmpQueue::take_outbound_messages(usize::MAX, &[]), vec![( HRMP_PARA_ID.into(), (XcmpMessageFormat::ConcatenatedVersionedXcm, VersionedXcm::from(good.clone())) @@ -733,7 +733,7 @@ fn send_xcm_nested_works() { let bad = Xcm(vec![SetAppendix(good)]); new_test_ext().execute_with(|| { assert_err!(send_xcm::(dest.into(), bad), SendError::ExceedsMaxMessageSize); - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + assert!(XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty()); }); } @@ -766,7 +766,7 @@ fn hrmp_signals_are_prioritized() { }, ); - let taken = XcmpQueue::take_outbound_messages(130); + let taken = XcmpQueue::take_outbound_messages(130, &[]); assert_eq!(taken, vec![]); // Enqueue some messages @@ -783,14 +783,14 @@ fn hrmp_signals_are_prioritized() { } hypothetically!({ - let taken = XcmpQueue::take_outbound_messages(usize::MAX); + let taken = XcmpQueue::take_outbound_messages(usize::MAX, &[]); assert_eq!(taken, vec![(sibling_para_id.into(), expected_msg,)]); }); // But a signal gets prioritized instead of the messages: assert_ok!(XcmpQueue::send_signal(sibling_para_id.into(), ChannelSignal::Suspend)); - let taken = XcmpQueue::take_outbound_messages(130); + let taken = XcmpQueue::take_outbound_messages(130, &[]); assert_eq!( taken, vec![( @@ -1030,13 +1030,13 @@ fn xcmp_queue_send_xcm_works() { ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(sibling_para_id); // check empty outbound queue - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + assert!(XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty()); // now send works assert_ok!(send_xcm::(dest, msg)); // check outbound queue contains message/page for sibling_para_id - assert!(XcmpQueue::take_outbound_messages(usize::MAX) + assert!(XcmpQueue::take_outbound_messages(usize::MAX, &[]) .iter() .any(|(para_id, _)| para_id == &sibling_para_id)); }) @@ -1074,7 +1074,7 @@ fn xcmp_queue_send_too_big_xcm_fails() { assert_eq!(encoded_message_size, max_message_size as usize - versioned_size); // check empty outbound queue - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + assert!(XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty()); // Message is too big because after adding the VersionedXcm enum, it would reach // `max_message_size` Then, adding the format, which is the worst case scenario in which a @@ -1082,7 +1082,7 @@ fn xcmp_queue_send_too_big_xcm_fails() { assert_eq!(send_xcm::(dest, message), Err(SendError::Transport("TooBig")),); // outbound queue is still empty - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + assert!(XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty()); }); } @@ -1099,7 +1099,7 @@ fn concatenated_opaque_version_xcm_negotiation_works() { // If there is a message in the queue, the notification is not sent assert_ok!(send_xcm::(dest.clone(), msg.clone())); assert_eq!( - XcmpQueue::take_outbound_messages(usize::MAX), + XcmpQueue::take_outbound_messages(usize::MAX, &[]), vec![( sibling_para_id, [ConcatenatedVersionedXcm.encode(), VersionedXcm::V5(msg.clone()).encode()] @@ -1109,12 +1109,12 @@ fn concatenated_opaque_version_xcm_negotiation_works() { // The queue is empty. The notification should be sent. assert_eq!( - XcmpQueue::take_outbound_messages(usize::MAX), + XcmpQueue::take_outbound_messages(usize::MAX, &[]), vec![(sibling_para_id, ConcatenatedOpaqueVersionedXcm.encode())] ); // The notification should not be sent again - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + assert!(XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty()); // The recipient parachain still uses the `ConcatenatedVersionedXcm`. let page = generate_mock_xcm_page(0, 1, XcmEncoding::Simple); @@ -1122,7 +1122,7 @@ fn concatenated_opaque_version_xcm_negotiation_works() { // The next message is still sent using the `ConcatenatedVersionedXcm` format. assert_ok!(send_xcm::(dest.clone(), msg.clone())); assert_eq!( - XcmpQueue::take_outbound_messages(usize::MAX), + XcmpQueue::take_outbound_messages(usize::MAX, &[]), vec![( sibling_para_id, [ConcatenatedVersionedXcm.encode(), VersionedXcm::V5(msg.clone()).encode()] @@ -1136,7 +1136,7 @@ fn concatenated_opaque_version_xcm_negotiation_works() { // The next message is sent using the `ConcatenatedOpaqueVersionedXcm` format. assert_ok!(send_xcm::(dest, msg.clone())); assert_eq!( - XcmpQueue::take_outbound_messages(usize::MAX), + XcmpQueue::take_outbound_messages(usize::MAX, &[]), vec![( sibling_para_id, [ConcatenatedOpaqueVersionedXcm.encode(), VersionedXcm::V5(msg).encode().encode()] @@ -1215,10 +1215,10 @@ fn verify_fee_factor_increase_and_decrease() { // Fee factor only decreases in `take_outbound_messages` for _ in 0..5 { // We take 5 100 byte pages - XcmpQueue::take_outbound_messages(1); + XcmpQueue::take_outbound_messages(1, &[]); } assert!(DeliveryFeeFactor::::get(sibling_para_id) < FixedU128::from_float(1.72)); - XcmpQueue::take_outbound_messages(1); + XcmpQueue::take_outbound_messages(1, &[]); assert!(DeliveryFeeFactor::::get(sibling_para_id) < FixedU128::from_float(1.63)); }); } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 8731840e0bcc7..8715e7f71241f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -1379,7 +1379,7 @@ impl_runtime_apis! { params: MessageProofParams>, ) -> (bridge_to_westend_config::FromWestendBridgeHubMessagesProof, Weight) { use cumulus_primitives_core::XcmpMessageSource; - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + assert!(XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty()); ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(42.into()); let universal_source = bridge_to_westend_config::open_bridge_for_benchmarks::< Runtime, @@ -1410,7 +1410,7 @@ impl_runtime_apis! { fn is_message_successfully_dispatched(_nonce: bp_messages::MessageNonce) -> bool { use cumulus_primitives_core::XcmpMessageSource; - !XcmpQueue::take_outbound_messages(usize::MAX).is_empty() + !XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty() } } @@ -1424,7 +1424,7 @@ impl_runtime_apis! { params: MessageProofParams>, ) -> (bridge_to_bulletin_config::FromRococoBulletinMessagesProof, Weight) { use cumulus_primitives_core::XcmpMessageSource; - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + assert!(XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty()); ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(42.into()); let universal_source = bridge_to_bulletin_config::open_bridge_for_benchmarks::< Runtime, @@ -1455,7 +1455,7 @@ impl_runtime_apis! { fn is_message_successfully_dispatched(_nonce: bp_messages::MessageNonce) -> bool { use cumulus_primitives_core::XcmpMessageSource; - !XcmpQueue::take_outbound_messages(usize::MAX).is_empty() + !XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty() } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 0583130a3f025..8a814df9c687e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -1339,7 +1339,7 @@ impl_runtime_apis! { params: MessageProofParams>, ) -> (bridge_to_rococo_config::FromRococoBridgeHubMessagesProof, Weight) { use cumulus_primitives_core::XcmpMessageSource; - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + assert!(XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty()); ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(42.into()); let universal_source = bridge_to_rococo_config::open_bridge_for_benchmarks::< Runtime, @@ -1370,7 +1370,7 @@ impl_runtime_apis! { fn is_message_successfully_dispatched(_nonce: bp_messages::MessageNonce) -> bool { use cumulus_primitives_core::XcmpMessageSource; - !XcmpQueue::take_outbound_messages(usize::MAX).is_empty() + !XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty() } } diff --git a/cumulus/parachains/runtimes/test-utils/src/lib.rs b/cumulus/parachains/runtimes/test-utils/src/lib.rs index 189025f2ee9f4..abbaead9ea862 100644 --- a/cumulus/parachains/runtimes/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/test-utils/src/lib.rs @@ -763,7 +763,7 @@ impl { pub fn take_xcm(sent_to_para_id: ParaId) -> Option> { - match HrmpChannelSource::take_outbound_messages(10)[..] { + match HrmpChannelSource::take_outbound_messages(10, &[])[..] { [(para_id, ref mut xcm_message_data)] if para_id.eq(&sent_to_para_id.into()) => { let mut xcm_message_data = &xcm_message_data[..]; // decode diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 3860e353bce25..dc2e6c6f0b166 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -194,12 +194,20 @@ pub enum ChannelStatus { /// A means of figuring out what outbound XCMP messages should be being sent. pub trait XcmpMessageSource { - /// Take a single XCMP message from the queue for the given `dest`, if one exists. - fn take_outbound_messages(maximum_channels: usize) -> Vec<(ParaId, Vec)>; + /// Take outbound XCMP messages from the queue. + /// + /// `excluded_recipients` contains para IDs that must be skipped. + fn take_outbound_messages( + maximum_channels: usize, + excluded_recipients: &[ParaId], + ) -> Vec<(ParaId, Vec)>; } impl XcmpMessageSource for () { - fn take_outbound_messages(_maximum_channels: usize) -> Vec<(ParaId, Vec)> { + fn take_outbound_messages( + _maximum_channels: usize, + _excluded_recipients: &[ParaId], + ) -> Vec<(ParaId, Vec)> { Vec::new() } } diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index 845c8098948a7..48dc9c0c5fa64 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -75,10 +75,20 @@ pub mod pallet { impl XcmpMessageSource for Pallet { fn take_outbound_messages( maximum_channels: usize, + excluded_recipients: &[ParaId], ) -> alloc::vec::Vec<(ParaId, alloc::vec::Vec)> { PendingOutboundHrmpMessages::::mutate(|messages| { - let to_take = messages.len().min(maximum_channels); - messages.drain(..to_take).collect() + let mut taken = 0; + let mut result = alloc::vec::Vec::new(); + messages.retain(|(recipient, data)| { + if taken >= maximum_channels || excluded_recipients.contains(recipient) { + return true; + } + taken += 1; + result.push((*recipient, data.clone())); + false + }); + result }) } } From 1710b51cbacaa6015e8a83fbd3bc94cbb1d97cfd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 30 Mar 2026 12:22:48 +0200 Subject: [PATCH 259/312] Fix tests --- cumulus/pallets/parachain-system/src/mock.rs | 3 +- .../src/validate_block/tests.rs | 28 ++++++++++--------- cumulus/test/runtime/src/test_pallet.rs | 25 +++++++++++++++-- 3 files changed, 39 insertions(+), 17 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index e4dcc3e7e564a..be722a838aa1a 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -154,7 +154,8 @@ impl XcmpMessageSource for FromThreadLocal { maximum_channels: usize, excluded_recipients: &[ParaId], ) -> Vec<(ParaId, Vec)> { - let mut ids = std::collections::BTreeSet::::from_iter(excluded_recipients.iter().copied()); + let mut ids = + std::collections::BTreeSet::::from_iter(excluded_recipients.iter().copied()); let mut taken_messages = 0; let mut taken_bytes = 0; let mut result = Vec::new(); diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index bf0b41749ca18..fcb8b831f9072 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -1057,8 +1057,9 @@ fn validate_block_with_max_hrmp_messages_and_4_blocks_per_pov() { sp_tracing::try_init_simple(); let blocks_per_pov = 4; - let max_per_candidate = 100; - let recipient = ParaId::from(300); + let msgs_per_block: u32 = 25; + let max_per_candidate = msgs_per_block * blocks_per_pov; + let first_recipient = 300u32; let (client, parent_head) = create_elastic_scaling_test_client(); let mut sproof_builder = @@ -1066,10 +1067,12 @@ fn validate_block_with_max_hrmp_messages_and_4_blocks_per_pov() { sproof_builder.host_config.hrmp_max_message_num_per_candidate = max_per_candidate; sproof_builder.para_id = ParaId::from(100); - let channel = sproof_builder.upsert_outbound_channel(recipient); - channel.max_capacity = blocks_per_pov; - channel.max_total_size = blocks_per_pov * max_per_candidate * 256; - channel.max_message_size = 256; + for i in 0..max_per_candidate { + let channel = sproof_builder.upsert_outbound_channel(ParaId::from(first_recipient + i)); + channel.max_capacity = blocks_per_pov; + channel.max_total_size = blocks_per_pov * max_per_candidate * 256; + channel.max_message_size = 256; + } let TestBlockData { block, validation_data } = build_multiple_blocks_with_witness( &client, @@ -1077,10 +1080,14 @@ fn validate_block_with_max_hrmp_messages_and_4_blocks_per_pov() { sproof_builder, blocks_per_pov, |i| { + let block_first_recipient = ParaId::from(first_recipient + i * msgs_per_block); vec![generate_extrinsic_with_pair( &client, Charlie.into(), - TestPalletCall::queue_hrmp_messages { n: max_per_candidate, recipient }, + TestPalletCall::queue_hrmp_messages_to_n_recipients { + n: msgs_per_block, + first_recipient: block_first_recipient, + }, Some(i), )] }, @@ -1189,9 +1196,6 @@ fn validate_block_hrmp_duplicate_recipient_across_blocks_in_bundle() { channel.max_total_size = 10 * 256; channel.max_message_size = 256; - // PoV 1: Two blocks both queue HRMP messages to the same recipient. - // Only one message per recipient is allowed per candidate, so the first PoV - // should contain exactly 1 HRMP message. The second message stays pending. let TestBlockData { block: pov1_block, validation_data: pov1_vdata } = build_multiple_blocks_with_witness( &client, @@ -1224,12 +1228,10 @@ fn validate_block_hrmp_duplicate_recipient_across_blocks_in_bundle() { assert_eq!( pov1_result.horizontal_messages.len(), 1, - "PoV 1: expected 1 HRMP message, got {} (duplicate recipient)", + "PoV 1: expected 1 HRMP message, got {}", pov1_result.horizontal_messages.len(), ); - // PoV 2: A single block with no new HRMP extrinsics. The pending message from PoV 1 - // should now be sent. let pov2_parent_head = pov1_block.blocks().last().unwrap().header().clone(); sproof_builder.current_slot = 2.into(); sproof_builder.included_para_head = Some(HeadData(pov2_parent_head.encode())); diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index 48dc9c0c5fa64..f7cfd65929f89 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -78,13 +78,16 @@ pub mod pallet { excluded_recipients: &[ParaId], ) -> alloc::vec::Vec<(ParaId, alloc::vec::Vec)> { PendingOutboundHrmpMessages::::mutate(|messages| { - let mut taken = 0; + let mut taken_recipients = alloc::vec::Vec::new(); let mut result = alloc::vec::Vec::new(); messages.retain(|(recipient, data)| { - if taken >= maximum_channels || excluded_recipients.contains(recipient) { + if result.len() >= maximum_channels || + excluded_recipients.contains(recipient) || + taken_recipients.contains(recipient) + { return true; } - taken += 1; + taken_recipients.push(*recipient); result.push((*recipient, data.clone())); false }); @@ -238,6 +241,22 @@ pub mod pallet { Ok(()) } + /// Queues one HRMP message each to `n` consecutive recipients starting from + /// `first_recipient`. + #[pallet::weight(0)] + pub fn queue_hrmp_messages_to_n_recipients( + _: OriginFor, + n: u32, + first_recipient: ParaId, + ) -> DispatchResult { + PendingOutboundHrmpMessages::::mutate(|messages| { + for i in 0..n { + messages.push((ParaId::from(u32::from(first_recipient) + i), vec![i as u8])); + } + }); + Ok(()) + } + /// Schedule a 1 second weight registration in the next `on_initialize`. #[pallet::weight(0)] pub fn schedule_weight_registration(_: OriginFor) -> DispatchResult { From 481a84caec795b2aeb09bb8a61467148eeb81968 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 30 Mar 2026 14:04:52 +0200 Subject: [PATCH 260/312] Fix issues --- .../zombienet-sdk-helpers/src/lib.rs | 37 ++++++++++++++----- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 59e7a89c889db..602a36caee315 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -75,17 +75,13 @@ pub async fn assert_para_throughput( expected_number_of_blocks: impl Into, Range)>>, ) -> Result<(), anyhow::Error> { let ranges = expected_candidate_ranges.into(); + let expected_number_of_blocks = expected_number_of_blocks.into(); let valid_para_ids: Vec = ranges.keys().cloned().collect(); - assert_para_throughput_with(relay_client, stop_after, ranges, |receipt| { - let para_id = receipt.descriptor.para_id(); - if !valid_para_ids.contains(¶_id) { - return Err(anyhow!("Invalid ParaId detected: {}", para_id)); - } + let candidate_count = + collect_para_throughput(relay_client, stop_after, ranges, |_| Ok(true)).await?; - Ok(true) - }) - .await + assert_expected_number_of_blocks(candidate_count, expected_number_of_blocks).await } /// Like [`assert_para_throughput`], but accepts a closure to validate each backed candidate @@ -103,6 +99,20 @@ pub async fn assert_para_throughput_with( expected_candidate_ranges: impl Into>>, validate: F, ) -> Result<(), anyhow::Error> +where + F: Fn(&CandidateReceiptV2) -> Result, +{ + collect_para_throughput(relay_client, stop_after, expected_candidate_ranges, validate) + .await + .map(|_| ()) +} + +async fn collect_para_throughput( + relay_client: &OnlineClient, + stop_after: u32, + expected_candidate_ranges: impl Into>>, + validate: F, +) -> Result>>, anyhow::Error> where F: Fn(&CandidateReceiptV2) -> Result, { @@ -111,7 +121,6 @@ where let mut current_block_count = 0; let expected_candidate_ranges = expected_candidate_ranges.into(); - let expected_number_of_blocks = expected_number_of_blocks.into(); let valid_para_ids: Vec = expected_candidate_ranges.keys().cloned().collect(); log::info!( @@ -180,6 +189,13 @@ where } } + Ok(candidate_count) +} + +async fn assert_expected_number_of_blocks( + candidate_count: HashMap>>, + expected_number_of_blocks: HashMap, Range)>, +) -> Result<(), anyhow::Error> { for (para_id, (para_client, expected_number_of_blocks)) in expected_number_of_blocks { let receipts = candidate_count .get(¶_id) @@ -196,7 +212,8 @@ where let mut core_info = None; loop { - let block = para_client.blocks().at(next_para_block_hash).await?; + let block: Block> = + para_client.blocks().at(next_para_block_hash).await?; // Genesis block is not part of a candidate :) if block.number() == 0 { From 10c449c4964ebb10608bdc9b67ed9c4fed17eedc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 30 Mar 2026 14:20:20 +0200 Subject: [PATCH 261/312] Fix warning --- cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 602a36caee315..79a2f8b7d2575 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -76,7 +76,6 @@ pub async fn assert_para_throughput( ) -> Result<(), anyhow::Error> { let ranges = expected_candidate_ranges.into(); let expected_number_of_blocks = expected_number_of_blocks.into(); - let valid_para_ids: Vec = ranges.keys().cloned().collect(); let candidate_count = collect_para_throughput(relay_client, stop_after, ranges, |_| Ok(true)).await?; @@ -226,6 +225,7 @@ async fn assert_expected_number_of_blocks( // If the core changes or the relay identifier, we found all blocks for the // candidate. if *relay_identifier.get_or_insert(ri.clone()) != ri || + *core_info.get_or_insert(ci.clone()) != ci { break; From c3ae59c609e3d9a374b0da14bc2320c1a57f9c54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 30 Mar 2026 18:29:21 +0200 Subject: [PATCH 262/312] Use correct number of cores --- .../tests/zombie_ci/block_bundling/pov_recovery.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs index 3a3009a5179d3..690304f97f01a 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs @@ -126,7 +126,7 @@ async fn build_network_config() -> Result { "configuration": { "config": { "scheduler_params": { - "num_cores": 2, + "num_cores": 3, "max_validators_per_core": 1 }, "approval_voting_params": { From 0f4e3405acd1f612b48c39681760406077a28e41 Mon Sep 17 00:00:00 2001 From: "cmd[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 30 Mar 2026 16:38:26 +0000 Subject: [PATCH 263/312] Update from github-actions[bot] running command 'fmt' --- cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 79a2f8b7d2575..4fde0d20d7d76 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -225,7 +225,6 @@ async fn assert_expected_number_of_blocks( // If the core changes or the relay identifier, we found all blocks for the // candidate. if *relay_identifier.get_or_insert(ri.clone()) != ri || - *core_info.get_or_insert(ci.clone()) != ci { break; From 69a04a42989b318e2d93743adae3339be770b6da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 30 Mar 2026 23:48:57 +0200 Subject: [PATCH 264/312] Use large runners --- .github/zombienet-tests/zombienet_cumulus_tests.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/zombienet-tests/zombienet_cumulus_tests.yml b/.github/zombienet-tests/zombienet_cumulus_tests.yml index 79aad15bc20ab..9953bf807258d 100644 --- a/.github/zombienet-tests/zombienet_cumulus_tests.yml +++ b/.github/zombienet-tests/zombienet_cumulus_tests.yml @@ -17,7 +17,7 @@ - job-name: "zombienet-cumulus-0004-runtime_upgrade" test-filter: "zombie_ci::runtime_upgrade::runtime_upgrade" - runner-type: "default" + runner-type: "large" cumulus-image: "test-parachain" needs-wasm-binary: true @@ -76,21 +76,21 @@ - job-name: "zombienet-cumulus-0014-elastic_scaling_upgrade_to_3_cores" test-filter: "zombie_ci::elastic_scaling::upgrade_to_3_cores::elastic_scaling_upgrade_to_3_cores" - runner-type: "default" + runner-type: "large" cumulus-image: "test-parachain" needs-wasm-binary: true - job-name: "zombienet-cumulus-0015-parachain-runtime-upgrade" test-filter: "zombie_ci::parachain_runtime_upgrade_slot_duration_18s::parachain_runtime_upgrade_slot_duration_18s" - runner-type: "default" + runner-type: "large" cumulus-image: "test-parachain" needs-wasm-binary: true - job-name: "zombienet-cumulus-0016-block_bundling_basic" test-filter: "zombie_ci::block_bundling::basic::block_bundling_basic" - runner-type: "default" + runner-type: "large" cumulus-image: "test-parachain" use-zombienet-sdk: true @@ -102,7 +102,7 @@ - job-name: "zombienet-cumulus-0018-block_bundling_full_core_usage_scenarios" test-filter: "zombie_ci::block_bundling::full_core_usage_scenarios::block_bundling_full_core_usage_scenarios" - runner-type: "default" + runner-type: "large" cumulus-image: "test-parachain" use-zombienet-sdk: true @@ -120,7 +120,7 @@ - job-name: "zombienet-cumulus-0021-block_bundling_runtime_upgrade" test-filter: "zombie_ci::block_bundling::runtime_upgrade::block_bundling_runtime_upgrade" - runner-type: "default" + runner-type: "large" cumulus-image: "test-parachain" use-zombienet-sdk: true needs-wasm-binary: true From 429d45a32f9bfd8070f847debee882d24e2cb856 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 31 Mar 2026 13:32:21 +0200 Subject: [PATCH 265/312] Increase the max code size --- .../tests/zombie_ci/block_bundling/runtime_upgrade.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs index 1756787777a4d..82b04cc474593 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs @@ -162,6 +162,7 @@ async fn build_network_config() -> Result { .with_genesis_overrides(json!({ "configuration": { "config": { + "max_code_size": 5242880, "scheduler_params": { "num_cores": 3, "max_validators_per_core": 1 From a90e789e7491bcfde34f21bd25e3145aa14c2536 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 1 Apr 2026 11:45:03 +0200 Subject: [PATCH 266/312] Apply suggestion from @bkchr --- .../parachain-system/src/validate_block/implementation.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 61a387203ab3b..014515d5920e4 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -85,8 +85,6 @@ where B::Extrinsic: ExtrinsicCall, ::Call: IsSubType>, { - sp_runtime::runtime_logger::RuntimeLogger::init(); - let _guard = ( // Replace storage calls with our own implementations sp_io::storage::host_read.replace_implementation(host_storage_read), From 2091af910809488748fb691109b4a30ca86d16b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 1 Apr 2026 12:30:01 +0200 Subject: [PATCH 267/312] Remove unused `find_cores` method --- .../node/subsystem-util/src/runtime/mod.rs | 122 ------------------ 1 file changed, 122 deletions(-) diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 671248884d554..0cc26f154b969 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -537,35 +537,6 @@ impl ClaimQueueSnapshot { self.0.iter() } - /// Find the earliest cores for the given `para_id` starting from the given - /// `claim_queue_offset`. - /// - /// It is not guaranteed that at the given `claim_queue_offset` cores are available for - /// the `para_id`. Thus, the claim queue offset for the core indices is returned as well. - pub fn find_cores( - &self, - para_id: ParaId, - claim_queue_offset: u8, - ) -> Option<(Vec, ClaimQueueOffset)> { - let mut offset_to_cores = BTreeMap::>::new(); - - self.0.iter().for_each(|(core_index, ids)| { - ids.iter() - .enumerate() - .filter_map(|(i, id)| (*id == para_id).then(|| i)) - .for_each(|offset| { - offset_to_cores.entry(offset).or_default().push(*core_index); - }); - }); - - offset_to_cores.into_iter().find_map(|(offset, cores)| { - if offset >= claim_queue_offset as usize { - Some((cores, ClaimQueueOffset(offset as u8))) - } else { - None - } - }) - } /// Get all claimed cores for the given `para_id` at the specified depth. pub fn iter_claims_at_depth_for_para( &self, @@ -648,96 +619,3 @@ pub async fn fetch_validation_code_bomb_limit( res } } - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn find_cores_works() { - let claim_queue = ClaimQueueSnapshot(BTreeMap::from_iter( - [ - ( - CoreIndex(0), - VecDeque::from_iter([ParaId::from(1), ParaId::from(2), ParaId::from(1)]), - ), - ( - CoreIndex(1), - VecDeque::from_iter([ParaId::from(1), ParaId::from(1), ParaId::from(2)]), - ), - ( - CoreIndex(2), - VecDeque::from_iter([ParaId::from(1), ParaId::from(2), ParaId::from(3)]), - ), - ( - CoreIndex(3), - VecDeque::from_iter([ParaId::from(2), ParaId::from(1), ParaId::from(3)]), - ), - ] - .into_iter(), - )); - - // Test finding cores for para_id 1 at offset 0 - let (cores, actual_offset) = claim_queue.find_cores(1u32.into(), 0).unwrap(); - assert_eq!(cores.len(), 3); - assert!(cores.contains(&CoreIndex(0))); - assert!(cores.contains(&CoreIndex(1))); - assert!(cores.contains(&CoreIndex(2))); - assert_eq!(actual_offset, ClaimQueueOffset(0)); - - // Test finding cores for para_id 1 at offset 1 - let (cores, actual_offset) = claim_queue.find_cores(1u32.into(), 1).unwrap(); - assert_eq!(cores.len(), 2); - assert!(cores.contains(&CoreIndex(1))); - assert!(cores.contains(&CoreIndex(3))); - assert_eq!(actual_offset, ClaimQueueOffset(1)); - - // Test finding cores for para_id 1 at offset 2 - let (cores, actual_offset) = claim_queue.find_cores(1u32.into(), 2).unwrap(); - assert_eq!(cores.len(), 1); - assert!(cores.contains(&CoreIndex(0))); - assert_eq!(actual_offset, ClaimQueueOffset(2)); - - // Test finding cores for para_id 1 at offset 3 (no cores at this offset) - assert_eq!(claim_queue.find_cores(1u32.into(), 3), None); - - // Test finding cores for para_id 2 at offset 0 - let (cores, actual_offset) = claim_queue.find_cores(2u32.into(), 0).unwrap(); - assert_eq!(cores.len(), 1); - assert!(cores.contains(&CoreIndex(3))); - assert_eq!(actual_offset, ClaimQueueOffset(0)); - - // Test finding cores for para_id 2 at offset 1 - let (cores, actual_offset) = claim_queue.find_cores(2u32.into(), 1).unwrap(); - assert_eq!(cores.len(), 2); - assert!(cores.contains(&CoreIndex(0))); - assert!(cores.contains(&CoreIndex(2))); - assert_eq!(actual_offset, ClaimQueueOffset(1)); - - // Test finding cores for para_id 2 at offset 2 - let (cores, actual_offset) = claim_queue.find_cores(2u32.into(), 2).unwrap(); - assert_eq!(cores.len(), 1); - assert!(cores.contains(&CoreIndex(1))); - assert_eq!(actual_offset, ClaimQueueOffset(2)); - - // Test finding cores for para_id 3 at offset 0 (should find at offset 2) - let (cores, actual_offset) = claim_queue.find_cores(3u32.into(), 0).unwrap(); - assert_eq!(cores.len(), 2); - assert!(cores.contains(&CoreIndex(2))); - assert!(cores.contains(&CoreIndex(3))); - assert_eq!(actual_offset, ClaimQueueOffset(2)); - - // Test finding cores for para_id 3 at offset 2 - let (cores, actual_offset) = claim_queue.find_cores(3u32.into(), 2).unwrap(); - assert_eq!(cores.len(), 2); - assert!(cores.contains(&CoreIndex(2))); - assert!(cores.contains(&CoreIndex(3))); - assert_eq!(actual_offset, ClaimQueueOffset(2)); - - // Test finding cores for para_id 3 at offset 3 (no cores at this offset) - assert_eq!(claim_queue.find_cores(3u32.into(), 3), None); - - // Test finding cores for non-existent para_id - assert_eq!(claim_queue.find_cores(99u32.into(), 0), None); - } -} From c9e954fd35422d093394d92d36a06df2a9497f52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 1 Apr 2026 14:56:33 +0200 Subject: [PATCH 268/312] Apply michals proposal --- .../slot_based/block_builder_task.rs | 359 ++++++++++++++++-- 1 file changed, 326 insertions(+), 33 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 07d101d9713a4..1513f1004219e 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -619,21 +619,15 @@ where break; } - // TODO: With transaction streaming we do not need to skip anything any more and can just - // set `is_last`. - - // If we have more than 3 blocks in total, aka a block time which is less than 2s, we are - // going to skip the last block. Otherwise, when running with 3 blocks, we are just - // adjusting the authoring duration below. - let skip_last_block_in_slot = total_number_of_blocks > 3 && is_last_core_in_parachain_slot; - // We require that the next node has imported our last block before it can start building - // the next block. To ensure that the next node is able to do so, we are skipping the last - // block in the parachain slot. In the future this can be removed again. - let is_last_block_in_core = block_index + 1 == blocks_per_core || - // This branch here is for the case when we are going to skip the last block. - (block_index + 2 == blocks_per_core && skip_last_block_in_slot); - - if block_index + 1 == blocks_per_core && skip_last_block_in_slot { + // Create schedule for this block to determine timing decisions + let schedule = BlockProductionSchedule::new( + block_index, + blocks_per_core, + total_number_of_blocks, + is_last_core_in_parachain_slot, + ); + + if schedule.should_skip_production() { tracing::debug!( target: LOG_TARGET, "Skipping block production so that the next node is able to import all blocks before its slot." @@ -683,28 +677,13 @@ where let time_left_for_block = slot_time_for_core.saturating_sub(core_start.elapsed()) / (blocks_per_core - block_index) as u32; - // For the special case of 3 blocks on 3 cores or 2 blocks on 2 cores, we are going to - // adjust the authoring duration on the last block. - // - // TODO: Remove when transaction streaming is implemented - let adjusted_time_left = if is_last_block_in_core && - is_last_core_in_parachain_slot && - blocks_per_core == 1 && - total_number_of_blocks <= 3 && - total_number_of_blocks >= 2 - { - time_left_for_block / 2 - } else { - time_left_for_block - }; - // The first block on a core gets the full remaining core time so that the runtime's // `FullCore` weight mode can actually be utilized. Subsequent blocks are capped at // `block_time` because they only carry fractional weight. let authoring_duration = if block_index == 0 { slot_time_for_core.saturating_sub(core_start.elapsed()) } else { - block_time.min(adjusted_time_left) + schedule.authoring_duration(time_left_for_block, block_time) }; tracing::trace!( @@ -721,7 +700,7 @@ where CumulusDigestItem::CoreInfo(core_info.clone()).to_digest_item(), CumulusDigestItem::BlockBundleInfo(BlockBundleInfo { index: block_index as u8, - maybe_last: is_last_block_in_core, + maybe_last: schedule.is_effective_last_block(), }) .to_digest_item(), ], @@ -797,7 +776,7 @@ where .checked_sub(block_production_start.elapsed()) // Let's not sleep for the last block here, to send out the collation as early as // possible. - .filter(|_| !is_last_block_in_core) + .filter(|_| !schedule.is_effective_last_block()) { tokio::time::sleep(sleep).await; } @@ -1008,6 +987,122 @@ impl Cores { } } +/// The three block production modes based on total block rate. +/// +/// These modes exist because without transaction streaming, the next author +/// must sequentially import all blocks before building their own. Each mode +/// uses a different strategy to provide import buffer time. +// TODO: Once transaction streaming is implemented, this can be removed. +#[derive(Debug, Clone, Copy)] +enum BlockProductionMode { + /// 0-1 blocks per slot - no special handling needed. + /// The next author has plenty of time to import. + Normal, + + /// 2-3 blocks per slot (~2-3s block time) - reduce authoring time. + Legacy { + /// Time adjustment factor of last block authoring time. + time_factor: f32, + }, + + /// >3 blocks per slot (<2s block time) - skip last block. + /// + /// Block time is too fast for time reduction alone, so we skip + /// producing the last block in each parachain slot entirely. + Bundling, +} + +impl BlockProductionMode { + /// Determine the appropriate mode based on total blocks per relay slot. + fn from_total_blocks(total_blocks: u32) -> Self { + match total_blocks { + 0..=1 => Self::Normal, + 2..=3 => Self::Legacy { time_factor: 0.5 }, + _ => Self::Bundling, + } + } + + /// Whether this mode skips the last block (vs adjusting time). + fn skips_last_block(&self) -> bool { + matches!(self, Self::Bundling) + } +} + +/// Policy object that determines block production timing decisions. +/// +/// Encapsulates the complex timing logic for block production, making decisions +/// about when to skip blocks, how long to spend authoring, and when to sleep. +#[derive(Debug, Clone, Copy)] +struct BlockProductionSchedule { + mode: BlockProductionMode, + block_index: u32, + blocks_per_core: u32, + is_last_core_in_parachain_slot: bool, +} + +impl BlockProductionSchedule { + fn new( + block_index: u32, + blocks_per_core: u32, + total_blocks: u32, + is_last_core_in_parachain_slot: bool, + ) -> Self { + Self { + mode: BlockProductionMode::from_total_blocks(total_blocks), + block_index, + blocks_per_core, + is_last_core_in_parachain_slot, + } + } + + /// Whether this is the actual last block index in the core. + fn is_actual_last_block(&self) -> bool { + self.block_index + 1 == self.blocks_per_core + } + + /// Whether this is the second-to-last block index. + fn is_second_to_last(&self) -> bool { + self.block_index + 2 == self.blocks_per_core + } + + /// Whether to skip producing this block entirely. + /// + /// In Bundling mode, we skip the last block in the parachain slot + /// to give the next author time to import all previous blocks. + fn should_skip_production(&self) -> bool { + self.mode.skips_last_block() && + self.is_actual_last_block() && + self.is_last_core_in_parachain_slot + } + + /// Whether this is effectively the last block we'll produce for this core. + /// + /// Used for `BundleInfo { maybe_last }` - validators need to know which + /// block might be final. Also used for sleep decisions - we don't sleep + /// after the last or second-to-last block to speed up the final stretch. + /// + /// The second-to-last block is always included because: + /// 1. In Bundling mode on the last core, we skip the actual last block + /// 2. Even when not skipping, avoiding sleep on the last two blocks speeds things up + fn is_effective_last_block(&self) -> bool { + self.is_actual_last_block() || self.is_second_to_last() + } + + /// Compute the authoring duration given available time. + fn authoring_duration(&self, time_left: Duration, block_time: Duration) -> Duration { + let adjusted = match &self.mode { + BlockProductionMode::Legacy { time_factor } + if self.is_effective_last_block() && self.blocks_per_core == 1 => + { + time_left.mul_f32(*time_factor) + }, + _ => time_left, + }; + + block_time.min(adjusted) + } +} + /// Determine the cores for the given `para_id`. /// /// Takes into account the `parent` core to find the next available cores. @@ -1036,3 +1131,201 @@ pub async fn determine_cores( }) }) } + +#[cfg(test)] +mod block_production_schedule_tests { + use super::*; + + mod mode_tests { + use super::*; + + #[test] + fn mode_selection_from_total_blocks() { + // 0-1 blocks = Normal + assert!(matches!( + BlockProductionMode::from_total_blocks(0), + BlockProductionMode::Normal + )); + assert!(matches!( + BlockProductionMode::from_total_blocks(1), + BlockProductionMode::Normal + )); + + // 2-3 blocks = Medium with half time + assert!(matches!( + BlockProductionMode::from_total_blocks(2), + BlockProductionMode::Legacy { time_factor: 0.5 } + )); + assert!(matches!( + BlockProductionMode::from_total_blocks(3), + BlockProductionMode::Legacy { time_factor: 0.5 } + )); + + // >3 blocks = Fast + assert!(matches!( + BlockProductionMode::from_total_blocks(4), + BlockProductionMode::Bundling + )); + assert!(matches!( + BlockProductionMode::from_total_blocks(12), + BlockProductionMode::Bundling + )); + } + + #[test] + fn mode_behavior_flags() { + assert!(!BlockProductionMode::Normal.skips_last_block()); + + let medium = BlockProductionMode::Legacy { time_factor: 0.5 }; + assert!(!medium.skips_last_block()); + + assert!(BlockProductionMode::Bundling.skips_last_block()); + } + } + + mod schedule_tests { + use super::*; + + // fn new( + // block_index: u32, + // blocks_per_core: u32, + // total_blocks: u32, + // is_last_core_in_parachain_slot: bool, + // ) + + #[test] + fn skip_production_only_in_fast_mode_last_core_last_block() { + // Should skip: Fast mode, last core, last block + assert!(BlockProductionSchedule::new(0, 1, 4, true).should_skip_production()); + + // Should NOT skip: not last core in parachain slot + assert!(!BlockProductionSchedule::new(0, 1, 4, false).should_skip_production()); + + // Should NOT skip: Medium mode (uses time adjustment instead) + assert!(!BlockProductionSchedule::new(0, 1, 3, true).should_skip_production()); + + // Should NOT skip: not last block in core + assert!(!BlockProductionSchedule::new(0, 2, 4, true).should_skip_production()); + + // Should skip: Fast mode, last core, last block + assert!(BlockProductionSchedule::new(3, 4, 12, true).should_skip_production()); + // Should skip: Fast mode, last core, second to last block + assert!(!BlockProductionSchedule::new(2, 4, 12, true).should_skip_production()); + + // Should NOT skip: Fast mode, not last core, last block + assert!(!BlockProductionSchedule::new(3, 4, 12, false).should_skip_production()); + assert!(!BlockProductionSchedule::new(2, 4, 12, false).should_skip_production()); + } + + #[test] + fn effective_last_block_includes_second_to_last() { + // block_index 2 is second-to-last (2+2 == 4), always effective last + let schedule = BlockProductionSchedule::new(2, 4, 12, true); + assert!(schedule.is_effective_last_block()); + assert!(!schedule.is_actual_last_block()); + assert!(schedule.is_second_to_last()); + + // Same config but not last core - second-to-last is STILL effective last + // (original logic doesn't gate on is_last_core_in_parachain_slot) + let schedule = BlockProductionSchedule::new(2, 4, 12, false); + assert!(schedule.is_effective_last_block()); + + let schedule = BlockProductionSchedule::new(3, 4, 12, false); + assert!(schedule.is_effective_last_block()); + + // First block is not effective last + let schedule = BlockProductionSchedule::new(0, 4, 12, true); + assert!(!schedule.is_effective_last_block()); + + // With only 1 block per core, there's no second-to-last + let schedule = BlockProductionSchedule::new(0, 1, 3, true); + assert!(schedule.is_effective_last_block()); // actual last + assert!(!schedule.is_second_to_last()); + } + + #[test] + fn authoring_duration_halved_in_medium_mode() { + let time_left = Duration::from_millis(2000); + let block_time = Duration::from_millis(3000); + + // Medium mode, last block, 1 block per core -> halved + let schedule = BlockProductionSchedule::new(0, 1, 2, true); + assert_eq!( + schedule.authoring_duration(time_left, block_time), + Duration::from_millis(1000) // halved, capped by time_left/2 + ); + + // Medium mode but NOT last block -> full time + let schedule = BlockProductionSchedule::new(0, 2, 2, true); + assert_eq!( + schedule.authoring_duration(time_left, block_time), + Duration::from_millis(2000) // full time_left (< block_time) + ); + + // Fast mode -> no time adjustment (uses skip instead) + let schedule = BlockProductionSchedule::new(0, 1, 4, true); + assert_eq!( + schedule.authoring_duration(time_left, block_time), + Duration::from_millis(2000) + ); + } + + /// This test verifies that the new schedule logic matches the original inline logic + /// for various block/core configurations. + #[test] + fn schedule_matches_original_logic() { + // Test various configurations to ensure schedule matches original behavior + let test_cases = [ + // (block_index, blocks_per_core, total_blocks, is_last_core) + (0, 1, 1, false), // Normal: 1 block, not last core + (0, 1, 1, true), // Normal: 1 block, last core + (0, 1, 2, true), // Medium: 2 blocks, last core + (0, 1, 3, true), // Medium: 3 blocks, last core + (0, 1, 4, true), // Fast: 4 blocks, last core (should skip) + (0, 1, 4, false), // Fast: 4 blocks, not last core + (0, 2, 6, true), // Fast: 6 blocks, 2 per core, block 0 + (1, 2, 6, true), // Fast: 6 blocks, 2 per core, block 1 (last) + (0, 4, 12, true), // Fast: 12 blocks, 4 per core, block 0 + (2, 4, 12, true), // Fast: 12 blocks, 4 per core, block 2 (second-to-last) + (3, 4, 12, true), // Fast: 12 blocks, 4 per core, block 3 (last, should skip) + ]; + + for (block_index, blocks_per_core, total_blocks, is_last_core) in test_cases { + let schedule = BlockProductionSchedule::new( + block_index, + blocks_per_core, + total_blocks, + is_last_core, + ); + + // Original is_last_block_in_core logic + let original_is_last = block_index + 1 == blocks_per_core || + (block_index + 2 == blocks_per_core && blocks_per_core > 1); + + // Original skip logic + let original_skip = + block_index + 1 == blocks_per_core && total_blocks > 3 && is_last_core; + + assert_eq!( + schedule.is_effective_last_block(), + original_is_last, + "is_effective_last_block mismatch for ({}, {}, {}, {})", + block_index, + blocks_per_core, + total_blocks, + is_last_core + ); + + assert_eq!( + schedule.should_skip_production(), + original_skip, + "should_skip_production mismatch for ({}, {}, {}, {})", + block_index, + blocks_per_core, + total_blocks, + is_last_core + ); + } + } + } +} From 3c26670d030051a9154311aec59b0bb48151d977 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 1 Apr 2026 23:55:22 +0200 Subject: [PATCH 269/312] FIx imports --- polkadot/node/subsystem-util/src/runtime/mod.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 0cc26f154b969..62e664d881d46 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -31,11 +31,11 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::UnpinHandle; use polkadot_primitives::{ - node_features::FeatureIndex, slashing, CandidateEvent, CandidateHash, ClaimQueueOffset, - CoreIndex, CoreState, EncodeAs, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, - Id as ParaId, IndexedVec, NodeFeatures, OccupiedCore, ScrapedOnChainVotes, SessionIndex, - SessionInfo, Signed, SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, DEFAULT_SCHEDULING_LOOKAHEAD, + node_features::FeatureIndex, slashing, CandidateEvent, CandidateHash, CoreIndex, CoreState, + EncodeAs, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, IndexedVec, + NodeFeatures, OccupiedCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, + SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorId, + ValidatorIndex, DEFAULT_SCHEDULING_LOOKAHEAD, }; use std::collections::{BTreeMap, VecDeque}; From 2994b1c25feeda90a5b9edbc2bc51eb88b8efa50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 2 Apr 2026 00:10:42 +0200 Subject: [PATCH 270/312] Use correct Cargo.lock --- Cargo.lock | 100 ++++++++++++++++++++++++++--------------------------- 1 file changed, 50 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a936e1fd6e51e..bcf86fe5ab55a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1131,9 +1131,9 @@ dependencies = [ [[package]] name = "ark-models-ext" -version = "0.5.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff772c552d00e9c092eab0608632342c553abbf6bca984008b55100a9a78a3a6" +checksum = "6294fd6ddc4996910adf2a9d3b56e3aa6a1f605ea315952169d2ddebc304dc4c" dependencies = [ "ark-ec 0.5.0", "ark-ff 0.5.0", @@ -1902,15 +1902,15 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.5.1" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" +checksum = "c96bf972d85afc50bf5ab8fe2d54d1586b4e0b46c97c50a0c9e71e2f7bcd812a" dependencies = [ - "async-lock 2.8.0", "async-task", "concurrent-queue", - "fastrand 1.9.0", - "futures-lite 1.13.0", + "fastrand 2.3.0", + "futures-lite 2.3.0", + "pin-project-lite", "slab", ] @@ -1920,7 +1920,7 @@ version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebcd09b382f40fcd159c2d695175b2ae620ffa5f3bd6f664131efff4e8b9e04a" dependencies = [ - "async-lock 3.4.0", + "async-lock", "blocking", "futures-lite 2.3.0", ] @@ -1931,7 +1931,7 @@ version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d6baa8f0178795da0e71bc42c9e5d13261aac7ee549853162e66a241ba17964" dependencies = [ - "async-lock 3.4.0", + "async-lock", "cfg-if", "concurrent-queue", "futures-io", @@ -1944,15 +1944,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "async-lock" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" -dependencies = [ - "event-listener 2.5.3", -] - [[package]] name = "async-lock" version = "3.4.0" @@ -1983,7 +1974,7 @@ checksum = "63255f1dc2381611000436537bbedfe83183faa303a5a0edaf191edef06526bb" dependencies = [ "async-channel 2.3.0", "async-io", - "async-lock 3.4.0", + "async-lock", "async-signal", "async-task", "blocking", @@ -2001,7 +1992,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfb3634b73397aa844481f814fad23bbf07fdb0eabec10f2eb95e58944b1ec32" dependencies = [ "async-io", - "async-lock 3.4.0", + "async-lock", "atomic-waker", "cfg-if", "futures-core", @@ -2507,17 +2498,15 @@ dependencies = [ [[package]] name = "blocking" -version = "1.3.1" +version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" +checksum = "e83f8d02be6967315521be875afa792a316e28d57b5a2d401897e2a7921b7f21" dependencies = [ - "async-channel 1.9.0", - "async-lock 2.8.0", + "async-channel 2.3.0", "async-task", - "atomic-waker", - "fastrand 1.9.0", - "futures-lite 1.13.0", - "log", + "futures-io", + "futures-lite 2.3.0", + "piper", ] [[package]] @@ -6284,13 +6273,13 @@ dependencies = [ [[package]] name = "ed25519-zebra" -version = "4.2.0" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "775765289f7c6336c18d3d66127527820dd45ffd9eb3b6b8ee4708590e6c20f5" +checksum = "0017d969298eec91e3db7a2985a8cab4df6341d86e6f3a6f5878b13fb7846bc9" dependencies = [ "curve25519-dalek", "ed25519", - "hashbrown 0.16.1", + "hashbrown 0.15.3", "pkcs8", "rand_core 0.6.4", "sha2 0.10.9", @@ -9101,9 +9090,9 @@ checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2" [[package]] name = "iri-string" -version = "0.7.12" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25e659a4bb38e810ebc252e53b5814ff908a8c58c2a9ce2fae1bbec24cbf4e20" +checksum = "d8e7418f59cc01c88316161279a7f665217ae316b388e58a0d10e29f54f1e5eb" dependencies = [ "memchr", "serde", @@ -9279,9 +9268,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.94" +version = "0.3.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e04e2ef80ce82e13552136fabeef8a5ed1f985a96805761cbb9a2c34e7664d9" +checksum = "cc4c90f45aa2e6eacbe8645f77fdea542ac97a494bcd117a67df9ff4d611f995" dependencies = [ "once_cell", "wasm-bindgen", @@ -15565,6 +15554,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c835479a4443ded371d6c535cbfd8d31ad92c5d23ae9770a61bc155e4992a3c1" +dependencies = [ + "atomic-waker", + "fastrand 2.3.0", + "futures-io", +] + [[package]] name = "pkcs1" version = "0.7.5" @@ -22990,7 +22990,7 @@ dependencies = [ "async-executor", "async-fs", "async-io", - "async-lock 3.4.0", + "async-lock", "async-net", "async-process", "blocking", @@ -23004,7 +23004,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "966e72d77a3b2171bb7461d0cb91f43670c63558c62d7cf42809cae6c8b6b818" dependencies = [ "arrayvec 0.7.6", - "async-lock 3.4.0", + "async-lock", "atomic-take", "base64 0.22.1", "bip39", @@ -23058,7 +23058,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e16e5723359f0048bf64bfdfba64e5732a56847d42c4fd3fe56f18280c813413" dependencies = [ "arrayvec 0.7.6", - "async-lock 3.4.0", + "async-lock", "atomic-take", "base64 0.22.1", "bip39", @@ -23112,7 +23112,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "724ab10d6485cccb4bab080ce436c0b361295274aec7847d7ba84ab1a79a5132" dependencies = [ "arrayvec 0.7.6", - "async-lock 3.4.0", + "async-lock", "atomic-take", "base64 0.22.1", "bip39", @@ -23166,7 +23166,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a33b06891f687909632ce6a4e3fd7677b24df930365af3d0bcb078310129f3f" dependencies = [ "async-channel 2.3.0", - "async-lock 3.4.0", + "async-lock", "base64 0.22.1", "blake2-rfc", "bs58", @@ -23202,7 +23202,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bba9e591716567d704a8252feeb2f1261a286e1e2cbdd4e49e9197c34a14e2" dependencies = [ "async-channel 2.3.0", - "async-lock 3.4.0", + "async-lock", "base64 0.22.1", "blake2-rfc", "bs58", @@ -23238,7 +23238,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8b4d4971f06f2471f4e57a662dbe8047fa0cc020957764a6211f3fad371f7bd" dependencies = [ "async-channel 2.3.0", - "async-lock 3.4.0", + "async-lock", "base64 0.22.1", "blake2-rfc", "bs58", @@ -28614,9 +28614,9 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.117" +version = "0.2.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0551fc1bb415591e3372d0bc4780db7e587d84e2a7e79da121051c5c4b89d0b0" +checksum = "6523d69017b7633e396a89c5efab138161ed5aafcbc8d3e5c5a42ae38f50495a" dependencies = [ "cfg-if", "once_cell", @@ -28641,9 +28641,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.117" +version = "0.2.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fbdf9a35adf44786aecd5ff89b4563a90325f9da0923236f6104e603c7e86be" +checksum = "4e3a6c758eb2f701ed3d052ff5737f5bfe6614326ea7f3bbac7156192dc32e67" dependencies = [ "quote 1.0.40", "wasm-bindgen-macro-support", @@ -28651,9 +28651,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.117" +version = "0.2.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dca9693ef2bab6d4e6707234500350d8dad079eb508dca05530c85dc3a529ff2" +checksum = "921de2737904886b52bcbb237301552d05969a6f9c40d261eb0533c8b055fedf" dependencies = [ "bumpalo", "proc-macro2 1.0.95", @@ -28664,9 +28664,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.117" +version = "0.2.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39129a682a6d2d841b6c429d0c51e5cb0ed1a03829d8b3d1e69a011e62cb3d3b" +checksum = "a93e946af942b58934c604527337bad9ae33ba1d5c6900bbb41c2c07c2364a93" dependencies = [ "unicode-ident", ] From 7a10a32cd866b3645274969521d6a5ed724ed215 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 3 Apr 2026 08:52:30 +0200 Subject: [PATCH 271/312] Abort the bundle when reaching unincluded segment constraints --- .../aura/src/collators/slot_based/block_builder_task.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 1513f1004219e..5a3af537784f6 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -614,9 +614,10 @@ where target: LOG_TARGET, ?parent_hash, ?included_header_hash, - "Cannot build next block due to unincluded segment constraints" + "Cannot build next block due to unincluded segment constraints, skipping entire bundle. Will continue at the next slot." ); - break; + + return Ok(None); } // Create schedule for this block to determine timing decisions From ea52367441f18f2106fdb85c393695be46aa0647 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 3 Apr 2026 09:14:31 +0200 Subject: [PATCH 272/312] Use correct block for pruning --- .../src/collators/slot_based/block_import.rs | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs index 4506d2fc2af02..613fd80099cb8 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -95,10 +95,20 @@ impl SlotBasedBlockImportHandle { /// will be imported. fn register_ignored_nodes_cleanup(client: Arc) where - C: PreCommitActions, + C: PreCommitActions + HeaderBackend + 'static, Block: BlockT, { + let client_for_closure = client.clone(); let on_finality = move |notification: &FinalityNotification| -> AuxDataOperations { + // The old finalized block is the parent of the first block in the tree route, + // or the parent of the finalized block if the tree route is empty. + let old_finalized_hash = notification + .tree_route + .first() + .and_then(|hash| client_for_closure.header(*hash).ok().flatten()) + .map(|h| *h.parent_hash()) + .unwrap_or_else(|| *notification.header.parent_hash()); + notification .stale_blocks .iter() @@ -115,15 +125,7 @@ where .copied() .map(|hash| (ignored_nodes_key(hash), None)), ) - // Include the old last finalized block as well. - .chain( - notification - .tree_route - .first() - .copied() - .into_iter() - .map(|hash| (ignored_nodes_key(hash), None)), - ) + .chain(std::iter::once((ignored_nodes_key(old_finalized_hash), None))) .collect() }; @@ -145,7 +147,7 @@ impl SlotBasedBlockImport { /// collation task. If the node is not running as a collator, just dropping the handle is fine. pub fn new(inner: BI, client: Arc) -> (Self, SlotBasedBlockImportHandle) where - Client: PreCommitActions, + Client: PreCommitActions + HeaderBackend + 'static, { let (sender, receiver) = tracing_unbounded("SlotBasedBlockImportChannel", 1000); From 5cf845c86147fabd58bb73a8af613ad4dae9145b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 3 Apr 2026 09:32:15 +0200 Subject: [PATCH 273/312] Apply renamings --- .../slot_based/block_builder_task.rs | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 5a3af537784f6..f9c85eb150cb6 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -988,20 +988,20 @@ impl Cores { } } -/// The three block production modes based on total block rate. +/// Slot handover adjustment strategy based on total block rate. /// -/// These modes exist because without transaction streaming, the next author -/// must sequentially import all blocks before building their own. Each mode +/// These adjustments exist because without transaction streaming, the next author +/// must sequentially import all blocks before building their own. Each variant /// uses a different strategy to provide import buffer time. // TODO: Once transaction streaming is implemented, this can be removed. #[derive(Debug, Clone, Copy)] -enum BlockProductionMode { - /// 0-1 blocks per slot - no special handling needed. +enum SlotHandoverAdjustment { + /// 0-1 blocks per slot - no adjustment needed. /// The next author has plenty of time to import. - Normal, + None, - /// 2-3 blocks per slot (~2-3s block time) - reduce authoring time. - Legacy { + /// 2-3 blocks per slot (~2-3s block time) - shorten authoring time. + Shorten { /// Time adjustment factor of last block authoring time. time_factor: f32, }, @@ -1010,22 +1010,22 @@ enum BlockProductionMode { /// /// Block time is too fast for time reduction alone, so we skip /// producing the last block in each parachain slot entirely. - Bundling, + Skip, } -impl BlockProductionMode { - /// Determine the appropriate mode based on total blocks per relay slot. +impl SlotHandoverAdjustment { + /// Determine the appropriate adjustment based on total blocks per relay slot. fn from_total_blocks(total_blocks: u32) -> Self { match total_blocks { - 0..=1 => Self::Normal, - 2..=3 => Self::Legacy { time_factor: 0.5 }, - _ => Self::Bundling, + 0..=1 => Self::None, + 2..=3 => Self::Shorten { time_factor: 0.5 }, + _ => Self::Skip, } } - /// Whether this mode skips the last block (vs adjusting time). + /// Whether this adjustment skips the last block (vs adjusting time). fn skips_last_block(&self) -> bool { - matches!(self, Self::Bundling) + matches!(self, Self::Skip) } } @@ -1035,7 +1035,7 @@ impl BlockProductionMode { /// about when to skip blocks, how long to spend authoring, and when to sleep. #[derive(Debug, Clone, Copy)] struct BlockProductionSchedule { - mode: BlockProductionMode, + mode: SlotHandoverAdjustment, block_index: u32, blocks_per_core: u32, is_last_core_in_parachain_slot: bool, @@ -1049,7 +1049,7 @@ impl BlockProductionSchedule { is_last_core_in_parachain_slot: bool, ) -> Self { Self { - mode: BlockProductionMode::from_total_blocks(total_blocks), + mode: SlotHandoverAdjustment::from_total_blocks(total_blocks), block_index, blocks_per_core, is_last_core_in_parachain_slot, @@ -1142,45 +1142,45 @@ mod block_production_schedule_tests { #[test] fn mode_selection_from_total_blocks() { - // 0-1 blocks = Normal + // 0-1 blocks = None assert!(matches!( - BlockProductionMode::from_total_blocks(0), - BlockProductionMode::Normal + SlotHandoverAdjustment::from_total_blocks(0), + SlotHandoverAdjustment::None )); assert!(matches!( - BlockProductionMode::from_total_blocks(1), - BlockProductionMode::Normal + SlotHandoverAdjustment::from_total_blocks(1), + SlotHandoverAdjustment::None )); - // 2-3 blocks = Medium with half time + // 2-3 blocks = Shorten with half time assert!(matches!( - BlockProductionMode::from_total_blocks(2), - BlockProductionMode::Legacy { time_factor: 0.5 } + SlotHandoverAdjustment::from_total_blocks(2), + SlotHandoverAdjustment::Shorten { time_factor: 0.5 } )); assert!(matches!( - BlockProductionMode::from_total_blocks(3), - BlockProductionMode::Legacy { time_factor: 0.5 } + SlotHandoverAdjustment::from_total_blocks(3), + SlotHandoverAdjustment::Shorten { time_factor: 0.5 } )); - // >3 blocks = Fast + // >3 blocks = Skip assert!(matches!( - BlockProductionMode::from_total_blocks(4), - BlockProductionMode::Bundling + SlotHandoverAdjustment::from_total_blocks(4), + SlotHandoverAdjustment::Skip )); assert!(matches!( - BlockProductionMode::from_total_blocks(12), - BlockProductionMode::Bundling + SlotHandoverAdjustment::from_total_blocks(12), + SlotHandoverAdjustment::Skip )); } #[test] fn mode_behavior_flags() { - assert!(!BlockProductionMode::Normal.skips_last_block()); + assert!(!SlotHandoverAdjustment::None.skips_last_block()); - let medium = BlockProductionMode::Legacy { time_factor: 0.5 }; - assert!(!medium.skips_last_block()); + let shorten = SlotHandoverAdjustment::Shorten { time_factor: 0.5 }; + assert!(!shorten.skips_last_block()); - assert!(BlockProductionMode::Bundling.skips_last_block()); + assert!(SlotHandoverAdjustment::Skip.skips_last_block()); } } From 8f4fb19d969695b5ac52651072a6918e3679b3eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 3 Apr 2026 09:32:27 +0200 Subject: [PATCH 274/312] Fix authoring duration calculation --- .../aura/src/collators/slot_based/block_builder_task.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index f9c85eb150cb6..411201083da76 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -1092,8 +1092,8 @@ impl BlockProductionSchedule { /// Compute the authoring duration given available time. fn authoring_duration(&self, time_left: Duration, block_time: Duration) -> Duration { let adjusted = match &self.mode { - BlockProductionMode::Legacy { time_factor } - if self.is_effective_last_block() && self.blocks_per_core == 1 => + SlotHandoverAdjustment::Shorten { time_factor } + if self.is_last_core_in_parachain_slot => { time_left.mul_f32(*time_factor) }, From bfc5403b6d9172d72b94a6d21efe69a241acaeb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 3 Apr 2026 09:39:34 +0200 Subject: [PATCH 275/312] Shorten should only be used when having one block per core --- .../slot_based/block_builder_task.rs | 32 +++++++++---------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 411201083da76..2887edcec4a57 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -1014,11 +1014,11 @@ enum SlotHandoverAdjustment { } impl SlotHandoverAdjustment { - /// Determine the appropriate adjustment based on total blocks per relay slot. - fn from_total_blocks(total_blocks: u32) -> Self { + /// Determine the appropriate adjustment based on total blocks per relay slot and blocks per core. + fn from_total_blocks(total_blocks: u32, blocks_per_core: u32) -> Self { match total_blocks { 0..=1 => Self::None, - 2..=3 => Self::Shorten { time_factor: 0.5 }, + 2..=3 if blocks_per_core == 1 => Self::Shorten { time_factor: 0.5 }, _ => Self::Skip, } } @@ -1049,7 +1049,7 @@ impl BlockProductionSchedule { is_last_core_in_parachain_slot: bool, ) -> Self { Self { - mode: SlotHandoverAdjustment::from_total_blocks(total_blocks), + mode: SlotHandoverAdjustment::from_total_blocks(total_blocks, blocks_per_core), block_index, blocks_per_core, is_last_core_in_parachain_slot, @@ -1144,31 +1144,36 @@ mod block_production_schedule_tests { fn mode_selection_from_total_blocks() { // 0-1 blocks = None assert!(matches!( - SlotHandoverAdjustment::from_total_blocks(0), + SlotHandoverAdjustment::from_total_blocks(0, 1), SlotHandoverAdjustment::None )); assert!(matches!( - SlotHandoverAdjustment::from_total_blocks(1), + SlotHandoverAdjustment::from_total_blocks(1, 1), SlotHandoverAdjustment::None )); // 2-3 blocks = Shorten with half time assert!(matches!( - SlotHandoverAdjustment::from_total_blocks(2), + SlotHandoverAdjustment::from_total_blocks(2, 1), SlotHandoverAdjustment::Shorten { time_factor: 0.5 } )); assert!(matches!( - SlotHandoverAdjustment::from_total_blocks(3), + SlotHandoverAdjustment::from_total_blocks(3, 1), SlotHandoverAdjustment::Shorten { time_factor: 0.5 } )); + assert!(matches!( + SlotHandoverAdjustment::from_total_blocks(3, 2), + SlotHandoverAdjustment::Skip + )); + // >3 blocks = Skip assert!(matches!( - SlotHandoverAdjustment::from_total_blocks(4), + SlotHandoverAdjustment::from_total_blocks(4, 2), SlotHandoverAdjustment::Skip )); assert!(matches!( - SlotHandoverAdjustment::from_total_blocks(12), + SlotHandoverAdjustment::from_total_blocks(12, 4), SlotHandoverAdjustment::Skip )); } @@ -1187,13 +1192,6 @@ mod block_production_schedule_tests { mod schedule_tests { use super::*; - // fn new( - // block_index: u32, - // blocks_per_core: u32, - // total_blocks: u32, - // is_last_core_in_parachain_slot: bool, - // ) - #[test] fn skip_production_only_in_fast_mode_last_core_last_block() { // Should skip: Fast mode, last core, last block From 628838e5b739aa44e60e23dbe7e0faea038e7fc3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 3 Apr 2026 09:44:13 +0200 Subject: [PATCH 276/312] Rename --- .../src/collators/slot_based/block_builder_task.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 2887edcec4a57..d422fe71b2317 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -1014,7 +1014,8 @@ enum SlotHandoverAdjustment { } impl SlotHandoverAdjustment { - /// Determine the appropriate adjustment based on total blocks per relay slot and blocks per core. + /// Determine the appropriate adjustment based on total blocks per relay slot and blocks per + /// core. fn from_total_blocks(total_blocks: u32, blocks_per_core: u32) -> Self { match total_blocks { 0..=1 => Self::None, @@ -1057,7 +1058,7 @@ impl BlockProductionSchedule { } /// Whether this is the actual last block index in the core. - fn is_actual_last_block(&self) -> bool { + fn is_last_block_in_core(&self) -> bool { self.block_index + 1 == self.blocks_per_core } @@ -1072,7 +1073,7 @@ impl BlockProductionSchedule { /// to give the next author time to import all previous blocks. fn should_skip_production(&self) -> bool { self.mode.skips_last_block() && - self.is_actual_last_block() && + self.is_last_block_in_core() && self.is_last_core_in_parachain_slot } @@ -1086,7 +1087,7 @@ impl BlockProductionSchedule { /// 1. In Bundling mode on the last core, we skip the actual last block /// 2. Even when not skipping, avoiding sleep on the last two blocks speeds things up fn is_effective_last_block(&self) -> bool { - self.is_actual_last_block() || self.is_second_to_last() + self.is_last_block_in_core() || self.is_second_to_last() } /// Compute the authoring duration given available time. @@ -1221,7 +1222,7 @@ mod block_production_schedule_tests { // block_index 2 is second-to-last (2+2 == 4), always effective last let schedule = BlockProductionSchedule::new(2, 4, 12, true); assert!(schedule.is_effective_last_block()); - assert!(!schedule.is_actual_last_block()); + assert!(!schedule.is_last_block_in_core()); assert!(schedule.is_second_to_last()); // Same config but not last core - second-to-last is STILL effective last From e3564ed852b215515dc9b949938a02aa68facc7b Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 7 Apr 2026 20:06:25 +0200 Subject: [PATCH 277/312] Resolve conflict, fix empty lines --- .../zombienet-tests/zombienet_cumulus_tests.yml | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/.github/zombienet-tests/zombienet_cumulus_tests.yml b/.github/zombienet-tests/zombienet_cumulus_tests.yml index 4bad24a3a28be..251313e0c3463 100644 --- a/.github/zombienet-tests/zombienet_cumulus_tests.yml +++ b/.github/zombienet-tests/zombienet_cumulus_tests.yml @@ -3,13 +3,11 @@ runner-type: "default" cumulus-image: "test-parachain" - - job-name: "zombienet-cumulus-0002-pov_recovery" test-filter: "zombie_ci::pov_recovery::pov_recovery" runner-type: "default" cumulus-image: "test-parachain" - - job-name: "zombienet-cumulus-0003-full_node_catching_up" test-filter: "zombie_ci::full_node_catching_up::full_node_catching_up" runner-type: "default" @@ -21,31 +19,26 @@ cumulus-image: "test-parachain" needs-wasm-binary: true - - job-name: "zombienet-cumulus-0005-migrate_solo_to_para" test-filter: "zombie_ci::migrate_solo::migrate_solo_to_para" runner-type: "default" cumulus-image: "test-parachain" - - job-name: "zombienet-cumulus-0006-rpc_collator_builds_blocks" test-filter: "zombie_ci::rpc_collator_build_blocks::rpc_collator_builds_blocks" runner-type: "large" cumulus-image: "test-parachain" - - job-name: "zombienet-cumulus-0007-full_node_warp_sync" test-filter: "zombie_ci::full_node_warp_sync::full_node_warp_sync" runner-type: "large" cumulus-image: "test-parachain" - - job-name: "zombienet-cumulus-0008-elastic_authoring" test-filter: "zombie_ci::elastic_scaling::slot_based_authoring::elastic_scaling_slot_based_authoring" runner-type: "default" cumulus-image: "test-parachain" - # Disabled, occasionally fails # See https://github.com/paritytech/polkadot-sdk/issues/8986 - job-name: "zombienet-cumulus-0009-elastic_scaling_pov_recovery" @@ -53,7 +46,6 @@ runner-type: "default" cumulus-image: "test-parachain" - # Disabled, occasionally fails. # See https://github.com/paritytech/polkadot-sdk/issues/8999 - job-name: "zombienet-cumulus-0010-elastic_scaling_multiple_block_per_slot" @@ -61,34 +53,30 @@ runner-type: "default" cumulus-image: "test-parachain" - - job-name: "zombienet-cumulus-0011-dht-bootnodes" test-filter: "zombie_ci::bootnodes::dht_bootnodes_test" runner-type: "default" cumulus-image: "polkadot-parachain-debug" - - job-name: "zombienet-cumulus-0013-elastic_scaling_slot_based_rp_offset" test-filter: "zombie_ci::elastic_scaling::slot_based_rp_offset::elastic_scaling_slot_based_relay_parent_offset_test" runner-type: "default" cumulus-image: "test-parachain" - - job-name: "zombienet-cumulus-0014-elastic_scaling_upgrade_to_3_cores" test-filter: "zombie_ci::elastic_scaling::upgrade_to_3_cores::elastic_scaling_upgrade_to_3_cores" runner-type: "large" cumulus-image: "test-parachain" - + use-zombienet-sdk: true needs-wasm-binary: true - job-name: "zombienet-cumulus-0015-parachain-runtime-upgrade" test-filter: "zombie_ci::parachain_runtime_upgrade_slot_duration_18s::parachain_runtime_upgrade_slot_duration_18s" runner-type: "large" cumulus-image: "test-parachain" - + use-zombienet-sdk: true needs-wasm-binary: true -<<<<<<< HEAD - job-name: "zombienet-cumulus-0016-block_bundling_basic" test-filter: "zombie_ci::block_bundling::basic::block_bundling_basic" runner-type: "large" From 63d2bef28b32ec6c1ec005ebe3661f95c381a5b8 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 7 Apr 2026 21:00:30 +0200 Subject: [PATCH 278/312] Make 2 cores 2 blocks shorten the second block --- .../src/collators/slot_based/block_builder_task.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index d422fe71b2317..cb2d704f3885e 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -678,10 +678,13 @@ where let time_left_for_block = slot_time_for_core.saturating_sub(core_start.elapsed()) / (blocks_per_core - block_index) as u32; - // The first block on a core gets the full remaining core time so that the runtime's - // `FullCore` weight mode can actually be utilized. Subsequent blocks are capped at - // `block_time` because they only carry fractional weight. - let authoring_duration = if block_index == 0 { + // The first block on a multi-block core gets the full remaining core time so that the + // runtime's `FullCore` weight mode can actually be utilized. Subsequent blocks are + // capped at `block_time` because they only carry fractional weight. + // + // Single-block cores (blocks_per_core == 1) go through schedule.authoring_duration() + // so that slot handover adjustments (e.g., Shorten) are applied on the last core. + let authoring_duration = if block_index == 0 && blocks_per_core > 1 { slot_time_for_core.saturating_sub(core_start.elapsed()) } else { schedule.authoring_duration(time_left_for_block, block_time) From c4b3954f63fbd358cb022286c81e3dfd21d8339e Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 7 Apr 2026 21:23:34 +0200 Subject: [PATCH 279/312] Make 2 blocks 1 core possible --- .../aura/src/collators/slot_based/block_builder_task.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index cb2d704f3885e..4d442ef031e27 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -1022,7 +1022,7 @@ impl SlotHandoverAdjustment { fn from_total_blocks(total_blocks: u32, blocks_per_core: u32) -> Self { match total_blocks { 0..=1 => Self::None, - 2..=3 if blocks_per_core == 1 => Self::Shorten { time_factor: 0.5 }, + 2..=3 if blocks_per_core == 1 || blocks_per_core == total_blocks => Self::Shorten { time_factor: 0.5 }, _ => Self::Skip, } } From 2e364106afc76714982a4c1b6c62b0b9d51076e3 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 7 Apr 2026 21:28:55 +0200 Subject: [PATCH 280/312] Make 2 blocks 1 core possible --- .../aura/src/collators/slot_based/block_builder_task.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 4d442ef031e27..7257d19d11d12 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -1262,7 +1262,7 @@ mod block_production_schedule_tests { let schedule = BlockProductionSchedule::new(0, 2, 2, true); assert_eq!( schedule.authoring_duration(time_left, block_time), - Duration::from_millis(2000) // full time_left (< block_time) + Duration::from_millis(1000) // halved ); // Fast mode -> no time adjustment (uses skip instead) From f61cd0fb52604da3ff3d720d64a530935d14b764 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Wed, 8 Apr 2026 13:08:04 +0200 Subject: [PATCH 281/312] maybe_last -> is_last, fix wrong last detection --- .../slot_based/block_builder_task.rs | 49 +++++++++++++++++-- .../parachain-system/src/benchmarking.rs | 6 +-- .../parachain-system/src/block_weight/mock.rs | 6 +-- .../src/block_weight/tests.rs | 4 +- .../src/validate_block/tests.rs | 26 +++++----- cumulus/primitives/core/src/lib.rs | 6 +-- 6 files changed, 70 insertions(+), 27 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 7257d19d11d12..fc6d694baa874 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -704,7 +704,7 @@ where CumulusDigestItem::CoreInfo(core_info.clone()).to_digest_item(), CumulusDigestItem::BlockBundleInfo(BlockBundleInfo { index: block_index as u8, - maybe_last: schedule.is_effective_last_block(), + is_last: schedule.block_ends_bundle(), }) .to_digest_item(), ], @@ -1022,7 +1022,9 @@ impl SlotHandoverAdjustment { fn from_total_blocks(total_blocks: u32, blocks_per_core: u32) -> Self { match total_blocks { 0..=1 => Self::None, - 2..=3 if blocks_per_core == 1 || blocks_per_core == total_blocks => Self::Shorten { time_factor: 0.5 }, + 2..=3 if blocks_per_core == 1 || blocks_per_core == total_blocks => { + Self::Shorten { time_factor: 0.5 } + }, _ => Self::Skip, } } @@ -1082,7 +1084,7 @@ impl BlockProductionSchedule { /// Whether this is effectively the last block we'll produce for this core. /// - /// Used for `BundleInfo { maybe_last }` - validators need to know which + /// Used for `BundleInfo { is_last }` - validators need to know which /// block might be final. Also used for sleep decisions - we don't sleep /// after the last or second-to-last block to speed up the final stretch. /// @@ -1093,6 +1095,19 @@ impl BlockProductionSchedule { self.is_last_block_in_core() || self.is_second_to_last() } + /// Whether the node stops block production after this block for this bundle. + /// + /// Returns `true` when: + /// - This is the last block in the core, OR + /// - This is the second-to-last and the actual last will be skipped (Skip mode on the last core + /// of the parachain slot). + fn block_ends_bundle(&self) -> bool { + self.is_last_block_in_core() || + (self.is_second_to_last() && + self.mode.skips_last_block() && + self.is_last_core_in_parachain_slot) + } + /// Compute the authoring duration given available time. fn authoring_duration(&self, time_left: Duration, block_time: Duration) -> Duration { let adjusted = match &self.mode { @@ -1273,6 +1288,34 @@ mod block_production_schedule_tests { ); } + #[test] + fn block_ends_bundle_only_on_true_last_block() { + // 6 blocks per core, Skip mode, last core: + // only the actual last (index 5) and second-to-last (index 4, because last + // will be skipped) should return true. + assert!(!BlockProductionSchedule::new(0, 6, 12, true).block_ends_bundle()); + assert!(!BlockProductionSchedule::new(3, 6, 12, true).block_ends_bundle()); + assert!(BlockProductionSchedule::new(4, 6, 12, true).block_ends_bundle()); + assert!(BlockProductionSchedule::new(5, 6, 12, true).block_ends_bundle()); + + // Same config but NOT last core: second-to-last must NOT end the bundle + // (skip only applies on last core). + assert!(!BlockProductionSchedule::new(4, 6, 12, false).block_ends_bundle()); + assert!(BlockProductionSchedule::new(5, 6, 12, false).block_ends_bundle()); + + // Shorten mode (2 blocks, 1 per core, last core): no skipping, so only the + // actual last block ends the bundle. + assert!(BlockProductionSchedule::new(0, 1, 2, true).block_ends_bundle()); + + // None mode (1 block total): trivially the last. + assert!(BlockProductionSchedule::new(0, 1, 1, true).block_ends_bundle()); + assert!(BlockProductionSchedule::new(0, 1, 1, false).block_ends_bundle()); + + // 2 blocks on 1 core (Shorten mode): only index 1 ends the bundle. + assert!(!BlockProductionSchedule::new(0, 2, 2, true).block_ends_bundle()); + assert!(BlockProductionSchedule::new(1, 2, 2, true).block_ends_bundle()); + } + /// This test verifies that the new schedule logic matches the original inline logic /// for various block/core configurations. #[test] diff --git a/cumulus/pallets/parachain-system/src/benchmarking.rs b/cumulus/pallets/parachain-system/src/benchmarking.rs index fd6195f085125..c8e7d8a4da46b 100644 --- a/cumulus/pallets/parachain-system/src/benchmarking.rs +++ b/cumulus/pallets/parachain-system/src/benchmarking.rs @@ -98,7 +98,7 @@ mod benchmarks { frame_system::Pallet::::set_extrinsic_index(1); frame_system::Pallet::::deposit_log( - BlockBundleInfo { index: 0, maybe_last: false }.to_digest_item(), + BlockBundleInfo { index: 0, is_last: false }.to_digest_item(), ); frame_system::Pallet::::deposit_log( CoreInfo { @@ -157,7 +157,7 @@ mod benchmarks { frame_system::Pallet::::set_extrinsic_index(1); frame_system::Pallet::::deposit_log( - BlockBundleInfo { index: 0, maybe_last: false }.to_digest_item(), + BlockBundleInfo { index: 0, is_last: false }.to_digest_item(), ); frame_system::Pallet::::deposit_log( CoreInfo { @@ -219,7 +219,7 @@ mod benchmarks { frame_system::Pallet::::set_extrinsic_index(1); frame_system::Pallet::::deposit_log( - BlockBundleInfo { index: 0, maybe_last: false }.to_digest_item(), + BlockBundleInfo { index: 0, is_last: false }.to_digest_item(), ); frame_system::Pallet::::deposit_log( CoreInfo { diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index 995e0cbc1af17..29d3d22774ad1 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -340,7 +340,7 @@ pub type ExecutiveOnlyOperational = frame_executive::Executive< pub struct TestExtBuilder { num_cores: Option, bundle_index: Option, - bundle_maybe_last: bool, + bundle_is_last: bool, previous_core_count: Option, } @@ -351,7 +351,7 @@ impl Default for TestExtBuilder { Self { num_cores: None, bundle_index: None, - bundle_maybe_last: false, + bundle_is_last: false, previous_core_count: None, } } @@ -406,7 +406,7 @@ impl TestExtBuilder { // Add bundle info if specified if let Some(bundle_index) = self.bundle_index { let bundle_info = - BlockBundleInfo { index: bundle_index, maybe_last: self.bundle_maybe_last }; + BlockBundleInfo { index: bundle_index, is_last: self.bundle_is_last }; let digest = CumulusDigestItem::BlockBundleInfo(bundle_info).to_digest_item(); frame_system::Pallet::::deposit_log(digest); } diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index b6b9ed6b3c139..430eaa9b08669 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -189,7 +189,7 @@ fn test_is_first_block_in_core_functions() { assert!(super::is_first_block_in_core_with_digest(&empty_digest).is_none()); // Test with bundle info index = 0 - should return true - let bundle_info_first = BlockBundleInfo { index: 0, maybe_last: false }; + let bundle_info_first = BlockBundleInfo { index: 0, is_last: false }; let digest_item_first = CumulusDigestItem::BlockBundleInfo(bundle_info_first).to_digest_item(); let mut digest_first = Digest::default(); @@ -197,7 +197,7 @@ fn test_is_first_block_in_core_functions() { assert!(super::is_first_block_in_core_with_digest(&digest_first).unwrap()); // Test with bundle info index > 0 - should return false - let bundle_info_not_first = BlockBundleInfo { index: 5, maybe_last: true }; + let bundle_info_not_first = BlockBundleInfo { index: 5, is_last: true }; let digest_item_not_first = CumulusDigestItem::BlockBundleInfo(bundle_info_not_first).to_digest_item(); let mut digest_not_first = Digest::default(); diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index fcb8b831f9072..abf6142f40fd2 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -840,7 +840,7 @@ fn validate_block_rejects_incomplete_bundle() { Default::default(), 2, |_| Vec::new(), - |i| vec![BlockBundleInfo { index: i as u8, maybe_last: i == 1 }.to_digest_item()], + |i| vec![BlockBundleInfo { index: i as u8, is_last: i == 1 }.to_digest_item()], ); // Validation with only first block should fail (incomplete bundle) @@ -888,7 +888,7 @@ fn only_send_ump_signal_on_last_block_in_bundle() { |_| Vec::new(), |i| { vec![ - BlockBundleInfo { index: i as u8, maybe_last: i == 3 }.to_digest_item(), + BlockBundleInfo { index: i as u8, is_last: i == 3 }.to_digest_item(), CumulusDigestItem::CoreInfo(CoreInfo { selector: CoreSelector(0), claim_queue_offset: ClaimQueueOffset(0), @@ -935,14 +935,14 @@ fn validate_block_accepts_single_block_with_use_full_core() { let (client, parent_head) = create_elastic_scaling_test_client(); - // Build a single block with BlockBundleInfo (maybe_last=false) and UseFullCore set via - // extrinsic UseFullCore should make validation succeed even without maybe_last=true + // Build a single block with BlockBundleInfo (is_last=false) and UseFullCore set via + // extrinsic UseFullCore should make validation succeed even without is_last=true let TestBlockData { block, validation_data } = build_block_with_witness( &client, vec![generate_extrinsic(&client, Alice, TestPalletCall::set_use_full_core {})], parent_head.clone(), Default::default(), - vec![BlockBundleInfo { index: 0, maybe_last: false }.to_digest_item()], + vec![BlockBundleInfo { index: 0, is_last: false }.to_digest_item()], ); // Validation should succeed because UseFullCore marks it as last block @@ -962,7 +962,7 @@ fn only_send_ump_signal_on_single_block_with_use_full_core() { let (client, parent_head) = create_elastic_scaling_test_client(); - // Build a single block with BlockBundleInfo (maybe_last=false), CoreInfo, and UseFullCore set + // Build a single block with BlockBundleInfo (is_last=false), CoreInfo, and UseFullCore set // via extrinsic. UseFullCore makes this block the last block in the core. let TestBlockData { block, .. } = build_multiple_blocks_with_witness( &client, @@ -972,7 +972,7 @@ fn only_send_ump_signal_on_single_block_with_use_full_core() { |_| vec![generate_extrinsic(&client, Alice, TestPalletCall::set_use_full_core {})], |_| { vec![ - BlockBundleInfo { index: 0, maybe_last: false }.to_digest_item(), + BlockBundleInfo { index: 0, is_last: false }.to_digest_item(), CumulusDigestItem::CoreInfo(CoreInfo { selector: CoreSelector(0), claim_queue_offset: ClaimQueueOffset(0), @@ -1030,7 +1030,7 @@ fn validate_block_with_max_ump_messages_and_4_blocks_per_pov() { )] }, |i| { - vec![BlockBundleInfo { index: i as u8, maybe_last: i as u32 + 1 == blocks_per_pov } + vec![BlockBundleInfo { index: i as u8, is_last: i as u32 + 1 == blocks_per_pov } .to_digest_item()] }, ); @@ -1092,7 +1092,7 @@ fn validate_block_with_max_hrmp_messages_and_4_blocks_per_pov() { )] }, |i| { - vec![BlockBundleInfo { index: i as u8, maybe_last: i as u32 + 1 == blocks_per_pov } + vec![BlockBundleInfo { index: i as u8, is_last: i as u32 + 1 == blocks_per_pov } .to_digest_item()] }, ); @@ -1152,7 +1152,7 @@ fn validate_block_hrmp_messages_sorted_across_blocks_in_bundle() { )] }, |i| { - vec![BlockBundleInfo { index: i as u8, maybe_last: i as u32 + 1 == blocks_per_pov } + vec![BlockBundleInfo { index: i as u8, is_last: i as u32 + 1 == blocks_per_pov } .to_digest_item()] }, ); @@ -1211,7 +1211,7 @@ fn validate_block_hrmp_duplicate_recipient_across_blocks_in_bundle() { )] }, |i| { - vec![BlockBundleInfo { index: i as u8, maybe_last: i as u32 + 1 == blocks_per_pov } + vec![BlockBundleInfo { index: i as u8, is_last: i as u32 + 1 == blocks_per_pov } .to_digest_item()] }, ); @@ -1294,7 +1294,7 @@ fn validate_block_with_ump_size_constraint_and_4_blocks_per_pov() { )] }, |i| { - vec![BlockBundleInfo { index: i as u8, maybe_last: i as u32 + 1 == blocks_per_pov } + vec![BlockBundleInfo { index: i as u8, is_last: i as u32 + 1 == blocks_per_pov } .to_digest_item()] }, ); @@ -1348,7 +1348,7 @@ fn validate_block_with_ump_capacity_constraint_and_4_blocks_per_pov() { )] }, |i| { - vec![BlockBundleInfo { index: i as u8, maybe_last: i as u32 + 1 == blocks_per_pov } + vec![BlockBundleInfo { index: i as u8, is_last: i as u32 + 1 == blocks_per_pov } .to_digest_item()] }, ); diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index dc2e6c6f0b166..b47764b61367c 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -264,7 +264,7 @@ pub struct BlockBundleInfo { /// It is possible that the runtime outputs the /// [`CumulusDigestItem::UseFullCore`] to inform the node to use an entire for one block /// only. - pub maybe_last: bool, + pub is_last: bool, } impl BlockBundleInfo { @@ -446,7 +446,7 @@ impl CumulusDigestItem { /// /// Checks the following conditions: /// - /// - Is [`BlockBundleInfo::maybe_last`] set to true? + /// - Is [`BlockBundleInfo::is_last`] set to true? /// - Or is [`Self::UseFullCore`] digest present? /// - Or is [`DigestItem::RuntimeEnvironmentUpdated`] digest present? /// @@ -458,7 +458,7 @@ impl CumulusDigestItem { let bundle_info = Self::find_block_bundle_info(digest)?; Some( - bundle_info.maybe_last || + bundle_info.is_last || Self::contains_use_full_core(digest) || digest.logs.iter().any(|l| matches!(l, DigestItem::RuntimeEnvironmentUpdated)), ) From 7f64812f73df8a814512fcc39a7e0ff798c43960 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 9 Apr 2026 12:38:08 +0200 Subject: [PATCH 282/312] Perform additional check --- .../src/validate_block/implementation.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 014515d5920e4..baa2c7b942267 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -435,6 +435,15 @@ fn verify_blocks_form_chain(blocks: &[B::LazyBlock], parent_header: & block_index, info.index ); + if block_index + 1 < num_blocks { + assert!( + !info.is_last, + "Intermediate block at index {} has `is_last` set, \ + but more blocks follow in the PoV", + block_index + ); + } + if block_index + 1 == num_blocks && !CumulusDigestItem::is_last_block_in_core(block.header().digest()) .unwrap_or(true) From 8025fbd5ab24a6d40523c56db27ed5225fc25f86 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 9 Apr 2026 12:43:04 +0200 Subject: [PATCH 283/312] Remove outdated comment --- .../parachain-system/src/validate_block/implementation.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index baa2c7b942267..77389c8bec44d 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -186,8 +186,6 @@ where .with_recorder(execute_recorder.clone()) .build(); - // We let all blocks contribute to the same overlay. Data written by a previous block will - // be directly accessible without going to the db. let mut overlay = OverlayedChanges::default(); parent_header = block.header().clone(); From ff2e7305eb1d75f34f6570172a4ee8cd2b994b6f Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 9 Apr 2026 14:44:28 +0200 Subject: [PATCH 284/312] Additional paranoid check for multi-block in validate_block --- .../parachain-system/src/validate_block/implementation.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 77389c8bec44d..9d86f6932a48f 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -420,7 +420,7 @@ fn verify_blocks_form_chain(blocks: &[B::LazyBlock], parent_header: & (Some(true), None) => { panic!("All blocks in a bundled PoV must include `BlockBundleInfo`"); }, - (Some(false), Some(_)) => { + (Some(false), _) => { panic!("A PoV without `BlockBundleInfo` may only contain a single block"); }, _ => {}, From d1dacd0a5d69459722758aebb6a13b2b3350296b Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 9 Apr 2026 16:35:55 +0200 Subject: [PATCH 285/312] Review comments --- cumulus/client/collator/src/service.rs | 2 +- .../client/consensus/aura/src/collators/slot_based/mod.rs | 4 +++- .../parachain-system/src/validate_block/implementation.rs | 5 +++-- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/cumulus/client/collator/src/service.rs b/cumulus/client/collator/src/service.rs index 2e81fba8775f9..45d831b220103 100644 --- a/cumulus/client/collator/src/service.rs +++ b/cumulus/client/collator/src/service.rs @@ -285,7 +285,7 @@ where upward_messages.extend(messages); signals.into_iter().for_each(|s| { - if upward_message_signals.iter().all(|existing| *existing != s) { + if !upward_message_signals.contains(&s) { upward_message_signals.push(s); } }); diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index c0d1c97245613..1b1b524a0a2f7 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -43,7 +43,9 @@ //! //! - Parachain slot duration //! - Number of assigned parachain cores -//! - Parachain runtime configuration +//! - The `target_block_rate` runtime API, which determines how many blocks to produce per relay +//! chain slot. When this API is unavailable, the block builder falls back to one block per +//! core. When the target exceeds the number of cores, multiple blocks are bundled per core. //! //! ## Timing Examples //! diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 9d86f6932a48f..a5593be2a8e81 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -435,8 +435,9 @@ fn verify_blocks_form_chain(blocks: &[B::LazyBlock], parent_header: & if block_index + 1 < num_blocks { assert!( - !info.is_last, - "Intermediate block at index {} has `is_last` set, \ + !CumulusDigestItem::is_last_block_in_core(block.header().digest()) + .unwrap_or(false), + "Intermediate block at index {} is marked as last block in core, \ but more blocks follow in the PoV", block_index ); From fe36370f5dda58b3f94c9003b9d849cf932b3efd Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 9 Apr 2026 17:13:18 +0200 Subject: [PATCH 286/312] Review nits --- .../aura/src/collators/slot_based/block_builder_task.rs | 4 ++-- .../consensus/aura/src/collators/slot_based/block_import.rs | 4 ++-- .../consensus/aura/src/collators/slot_based/collation_task.rs | 1 + cumulus/client/proof-size-recording/src/lib.rs | 4 ++-- .../parachains/runtimes/coretime/coretime-westend/src/lib.rs | 1 - 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index b8847b4636025..e6d685a861a06 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -32,7 +32,7 @@ use crate::{ use codec::{Codec, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; -use cumulus_client_proof_size_recording::prepare_proof_size_recording_transaction; +use cumulus_client_proof_size_recording::prepare_proof_size_recording_aux_data; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; use cumulus_primitives_core::{ BlockBundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, @@ -755,7 +755,7 @@ where .collect::>(); if !recorded_sizes.is_empty() { - prepare_proof_size_recording_transaction(parent_hash, recorded_sizes).for_each( + prepare_proof_size_recording_aux_data(parent_hash, recorded_sizes).for_each( |(k, v)| { import_block.auxiliary.push((k, Some(v))); }, diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs index 613fd80099cb8..759694fad0445 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -17,7 +17,7 @@ use crate::LOG_TARGET; use codec::{Decode, Encode}; -use cumulus_client_proof_size_recording::prepare_proof_size_recording_transaction; +use cumulus_client_proof_size_recording::prepare_proof_size_recording_aux_data; use cumulus_primitives_core::{BlockBundleInfo, CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; use futures::{stream::FusedStream, StreamExt}; use sc_client_api::{ @@ -296,7 +296,7 @@ impl SlotBasedBlockImport { .collect::>(); if !recorded_sizes.is_empty() { - prepare_proof_size_recording_transaction(block_hash, recorded_sizes).for_each( + prepare_proof_size_recording_aux_data(block_hash, recorded_sizes).for_each( |(k, v)| { params.auxiliary.push((k, Some(v))); }, diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs index 5cc60433a01c9..a77fb2b89531a 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs @@ -164,6 +164,7 @@ async fn handle_collation_message>(), "Compressed PoV size: {}kb", pov.block_data.0.len() as f64 / 1024f64, ); diff --git a/cumulus/client/proof-size-recording/src/lib.rs b/cumulus/client/proof-size-recording/src/lib.rs index 1aabad4f65026..a8c09f227ab1b 100644 --- a/cumulus/client/proof-size-recording/src/lib.rs +++ b/cumulus/client/proof-size-recording/src/lib.rs @@ -49,10 +49,10 @@ where } } -/// Prepare a transaction to write the proof size recordings to the aux storage. +/// Prepare aux storage key-value pairs for persisting proof size recordings. /// /// Returns the key-value pairs that need to be written to the aux storage. -pub fn prepare_proof_size_recording_transaction( +pub fn prepare_proof_size_recording_aux_data( block_hash: H, recordings: Vec, ) -> impl Iterator, Vec)> { diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index f39d60c85fe17..35b9172bc470f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -823,7 +823,6 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } - } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { From 5c6c9457dc0eaaac63965918f13ac7754358698b Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 9 Apr 2026 17:19:17 +0200 Subject: [PATCH 287/312] simplify blocks_per_cores (#11708) --- .../src/collators/slot_based/block_builder_task.rs | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index e6d685a861a06..27a42a70126be 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -418,18 +418,11 @@ where // In total we want to have at max `number_of_blocks` cores to use. cores.truncate_cores(number_of_blocks); let raw_blocks_per_core = (number_of_blocks / cores.total_cores()).max(1); - let mut left_over_blocks = number_of_blocks % cores.total_cores(); + let left_over_blocks = number_of_blocks % cores.total_cores(); let blocks_per_cores = (0..cores.total_cores()) - .into_iter() - .map(|_| { + .map(|i| { // We distribute the left over blocks across the cores. - raw_blocks_per_core + - if left_over_blocks > 0 { - left_over_blocks -= 1; - 1 - } else { - 0 - } + raw_blocks_per_core + u32::from(i < left_over_blocks) }) .collect::>(); From 560cf062e8cde3c0dc8eef0d5ed84bbaa04dcfb3 Mon Sep 17 00:00:00 2001 From: "cmd[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 9 Apr 2026 15:22:30 +0000 Subject: [PATCH 288/312] Update from github-actions[bot] running command 'fmt' --- .../aura/src/collators/slot_based/block_builder_task.rs | 3 +-- .../aura/src/collators/slot_based/block_import.rs | 8 +++----- .../client/consensus/aura/src/collators/slot_based/mod.rs | 4 ++-- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 27a42a70126be..0b03947983fa1 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -24,8 +24,7 @@ use crate::{ relay_chain_data_cache::RelayChainDataCache, slot_timer::{SlotInfo, SlotTimer}, }, - RelayHash, - BackingGroupConnectionHelper, RelayParentData, + BackingGroupConnectionHelper, RelayHash, RelayParentData, }, LOG_TARGET, }; diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs index 759694fad0445..236e3fb784efc 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -296,11 +296,9 @@ impl SlotBasedBlockImport { .collect::>(); if !recorded_sizes.is_empty() { - prepare_proof_size_recording_aux_data(block_hash, recorded_sizes).for_each( - |(k, v)| { - params.auxiliary.push((k, Some(v))); - }, - ); + prepare_proof_size_recording_aux_data(block_hash, recorded_sizes).for_each(|(k, v)| { + params.auxiliary.push((k, Some(v))); + }); } params.state_action = diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index 1b1b524a0a2f7..38eb099e7532d 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -44,8 +44,8 @@ //! - Parachain slot duration //! - Number of assigned parachain cores //! - The `target_block_rate` runtime API, which determines how many blocks to produce per relay -//! chain slot. When this API is unavailable, the block builder falls back to one block per -//! core. When the target exceeds the number of cores, multiple blocks are bundled per core. +//! chain slot. When this API is unavailable, the block builder falls back to one block per core. +//! When the target exceeds the number of cores, multiple blocks are bundled per core. //! //! ## Timing Examples //! From 9181fd916ed24156c858c33cf6313b8a8e2715a5 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 9 Apr 2026 19:00:02 +0200 Subject: [PATCH 289/312] Remove duplicate condition --- substrate/frame/support/src/traits/hooks.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index c98b0f23e81d2..07ad76244c715 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -128,9 +128,6 @@ impl_for_tuples_attr! { fn on_idle(n: BlockNumber, remaining_weight: Weight) -> Weight { let on_idle_functions: &[fn(BlockNumber, Weight) -> Weight] = &[for_tuples!( #( Tuple::on_idle ),* )]; - if on_idle_functions.is_empty() { - return Weight::zero(); - } let mut weight = Weight::zero(); let len = on_idle_functions.len(); From ba558af551efc4c81b2adc874b5dbb4143f69c07 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 10 Apr 2026 15:18:48 +0200 Subject: [PATCH 290/312] Fix validate_block tests --- .../src/validate_block/tests.rs | 38 +++++++++++++------ prdoc/pr_10477.prdoc | 12 +++++- 2 files changed, 37 insertions(+), 13 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index abf6142f40fd2..6a451cc8718f6 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -301,7 +301,7 @@ fn validate_block_works() { fn validate_multiple_blocks_work() { sp_tracing::try_init_simple(); - let blocks_per_pov = 4; + let blocks_per_pov = 4u32; let (client, parent_head) = create_elastic_scaling_test_client(); let TestBlockData { block, validation_data } = build_multiple_blocks_with_witness( &client, @@ -316,7 +316,10 @@ fn validate_multiple_blocks_work() { Some(i), )] }, - |_| Vec::new(), + |i| { + vec![BlockBundleInfo { index: i as u8, is_last: i + 1 == blocks_per_pov } + .to_digest_item()] + }, ); assert!(block.proof().encoded_size() < 3 * 1024 * 1024); @@ -569,7 +572,7 @@ fn validate_block_works_with_child_tries() { fn state_changes_in_multiple_blocks_are_applied_in_exact_order() { sp_tracing::try_init_simple(); - let blocks_per_pov = 12; + let blocks_per_pov = 12u32; let (client, genesis_head) = create_elastic_scaling_test_client(); // 1. Build the initial block that stores values in the map. @@ -615,7 +618,10 @@ fn state_changes_in_multiple_blocks_are_applied_in_exact_order() { Some(i), )] }, - |_| Vec::new(), + |i| { + vec![BlockBundleInfo { index: i as u8, is_last: i + 1 == blocks_per_pov } + .to_digest_item()] + }, ); // 3. Validate the PoV. @@ -672,13 +678,17 @@ fn ensure_we_only_like_blockchains() { if env::var("RUN_TEST").is_ok() { let (client, parent_head) = create_elastic_scaling_test_client(); + let num_blocks = 4u32; let TestBlockData { mut block, validation_data } = build_multiple_blocks_with_witness( &client, parent_head.clone(), Default::default(), - 4, + num_blocks, |_| Default::default(), - |_| Vec::new(), + |i| { + vec![BlockBundleInfo { index: i as u8, is_last: i + 1 == num_blocks } + .to_digest_item()] + }, ); // Reference some non existing parent. @@ -704,7 +714,9 @@ fn ensure_we_only_like_blockchains() { } #[test] -fn rejects_multiple_blocks_per_pov_when_applying_runtime_upgrade() { +fn rejects_blocks_in_bundle_after_block_marked_as_last() { + // Note: This test also covers the case where a runtime upgrade contains following blocks. + // A block with a runtime upgrade is considered last in bundle. sp_tracing::try_init_simple(); if env::var("RUN_TEST").is_ok() { @@ -755,14 +767,18 @@ fn rejects_multiple_blocks_per_pov_when_applying_runtime_upgrade() { proof_builder.host_config.max_code_size = code_len * 2; // 2. Build a PoV that consists of multiple blocks. + let num_blocks = 4u32; let TestBlockData { block: pov_block_data, validation_data: pov_validation_data } = build_multiple_blocks_with_witness( &client, initial_block_header.clone(), // Start building PoV from the initial block's header proof_builder, - 4, - |_| Vec::new(), + num_blocks, |_| Vec::new(), + |i| { + vec![BlockBundleInfo { index: i as u8, is_last: i + 1 == num_blocks } + .to_digest_item()] + }, ); // 3. Validate the PoV. @@ -775,7 +791,7 @@ fn rejects_multiple_blocks_per_pov_when_applying_runtime_upgrade() { } else { let output = Command::new(env::current_exe().unwrap()) .args([ - "rejects_multiple_blocks_per_pov_when_applying_runtime_upgrade", + "rejects_blocks_in_bundle_after_block_marked_as_last", "--", "--nocapture", ]) @@ -786,7 +802,7 @@ fn rejects_multiple_blocks_per_pov_when_applying_runtime_upgrade() { assert!(output.status.success()); assert!(dbg!(String::from_utf8(output.stderr).unwrap()) - .contains("only one block per PoV is allowed")); + .contains("is marked as last block in core, but more blocks follow in the PoV")); } } diff --git a/prdoc/pr_10477.prdoc b/prdoc/pr_10477.prdoc index 733ec4a6d4cbd..0fe85f988362f 100644 --- a/prdoc/pr_10477.prdoc +++ b/prdoc/pr_10477.prdoc @@ -18,7 +18,7 @@ crates: - name: sp-block-builder bump: major - name: sc-consensus - bump: patch + bump: minor - name: sp-consensus-slots bump: minor - name: cumulus-primitives-core @@ -40,7 +40,7 @@ crates: - name: cumulus-client-proof-size-recording bump: patch - name: cumulus-client-service - bump: patch + bump: minor - name: polkadot-omni-node-lib bump: patch - name: testnet-parachains-constants @@ -51,5 +51,13 @@ crates: bump: patch - name: penpal-runtime bump: patch +- name: cumulus-pallet-xcmp-queue + bump: patch +- name: parachains-runtimes-test-utils + bump: patch +- name: bridge-hub-rococo-runtime + bump: patch +- name: bridge-hub-westend-runtime + bump: patch - name: polkadot-sdk bump: minor From 26aad9b75d485f790e09d71df3e9810812cfd47e Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 10 Apr 2026 22:07:17 +0200 Subject: [PATCH 291/312] warp sync fix and little test (#11725) - Small fix for warp-sync / gap-sync. - some logs for block import, - ci zombie test added --- .../zombienet_cumulus_tests.yml | 6 + .../src/collators/slot_based/block_import.rs | 19 +- .../tests/zombie_ci/block_bundling/mod.rs | 1 + .../zombie_ci/block_bundling/warp_sync.rs | 183 ++++++++++++++++++ .../consensus/common/src/block_import.rs | 11 ++ 5 files changed, 219 insertions(+), 1 deletion(-) create mode 100644 cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs diff --git a/.github/zombienet-tests/zombienet_cumulus_tests.yml b/.github/zombienet-tests/zombienet_cumulus_tests.yml index 251313e0c3463..c4dd4ee32e3e0 100644 --- a/.github/zombienet-tests/zombienet_cumulus_tests.yml +++ b/.github/zombienet-tests/zombienet_cumulus_tests.yml @@ -120,6 +120,12 @@ cumulus-image: "test-parachain" use-zombienet-sdk: true +- job-name: "zombienet-cumulus-0023-block_bundling_warp_sync" + test-filter: "zombie_ci::block_bundling::warp_sync::warp_sync_with_bundled_blocks" + runner-type: "large" + cumulus-image: "test-parachain" + use-zombienet-sdk: true + - job-name: "zombienet-cumulus-0016-statement_store_basic_propagation" test-filter: "zombie_ci::statement_store::integration::statement_store_basic_propagation" runner-type: "default" diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs index 236e3fb784efc..832f3179436f1 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -231,6 +231,11 @@ impl SlotBasedBlockImport { let (Some(core_info), Some(bundle_info), Some(relay_block_identifier)) = (core_info, bundle_info, relay_block_identifier) else { + tracing::debug!( + target: LOG_TARGET, + number = ?params.header.number(), + "no bundle digests, skipping execute_block_and_collect_storage_proof", + ); return Ok(()); }; @@ -253,6 +258,15 @@ impl SlotBasedBlockImport { let block = Block::new(params.header.clone(), params.body.clone().unwrap_or_default()); + tracing::debug!( + target: LOG_TARGET, + ?parent_hash, + number = ?params.header.number(), + ?core_info, + ?bundle_info, + "execute_block_and_collect_storage_proof: calling runtime_api.execute_block", + ); + runtime_api .execute_block(parent_hash, block.into()) .map_err(|e| Box::new(e) as Box<_>)?; @@ -338,7 +352,10 @@ where &self, mut params: sc_consensus::BlockImportParams, ) -> Result { - if params.origin != BlockOrigin::Own { + if !(params.origin == BlockOrigin::Own || + params.with_state() || + params.state_action.skip_execution_checks()) + { self.execute_block_and_collect_storage_proof(&mut params)?; } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs index 5bbdeb4d02a60..d468fefed19e8 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs @@ -17,6 +17,7 @@ mod basic; mod full_core_usage_scenarios; +mod warp_sync; mod pov_recovery; mod relay_parent_offset; mod runtime_upgrade; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs new file mode 100644 index 0000000000000..c0ed0861143fb --- /dev/null +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs @@ -0,0 +1,183 @@ +// This file is part of Cumulus. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::utils::{initialize_network, BEST_BLOCK_METRIC}; +use anyhow::anyhow; +use cumulus_zombienet_sdk_helpers::{assert_para_throughput, assign_cores}; +use polkadot_primitives::Id as ParaId; +use serde_json::json; +use std::time::Duration; +use zombienet_orchestrator::network::node::LogLineCountOptions; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + AddCollatorOptions, NetworkConfig, NetworkConfigBuilder, +}; + +const PARA_ID: u32 = 2400; + +/// Warp-sync regression test for block bundling. +/// +/// Verifies that a fresh full node can warp-sync a chain that already has bundled blocks +/// (with BundleInfo/CoreInfo digests). +/// +/// When a fresh node joins, it warp-syncs the relay chain (jumping to a finalized target +/// with `StateAction::ApplyChanges`), then backfills the gap (blocks #1..#target) via +/// gap sync with `StateAction::Skip`. +/// +/// `SlotBasedBlockImport::import_block` must respect both `StateAction::Skip` and +/// `ApplyChanges`, and not attempt to call `execute_block_and_collect_storage_proof` +/// for these blocks, since the parent state is unavailable. +/// +/// If the guard is wrong, the full node fails to import blocks and never catches up. +#[tokio::test(flavor = "multi_thread")] +async fn warp_sync_with_bundled_blocks() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + log::info!("Spawning network without full node"); + let config = build_network_config().await?; + let mut network = initialize_network(config).await?; + + let relay_node = network.get_node("validator-0")?; + let relay_client: OnlineClient = relay_node.wait_client().await?; + + // Assign 2 extra cores (zombienet auto-assigns 1), for 3 total. + assign_cores(&relay_client, PARA_ID, vec![0, 1]).await?; + + // Wait for steady-state bundled block production. + log::info!("Waiting for steady-state block production"); + assert_para_throughput(&relay_client, 6, [(ParaId::from(PARA_ID), 12..19)], []).await?; + + // Query collator's current best block to set a sync target. + let target_block = network + .get_node("collator-0")? + .reports(BEST_BLOCK_METRIC) + .await? as u64; + log::info!("Full node sync target: #{target_block}"); + + // Add a fresh full node that will warp-sync to the already-running chain. + log::info!("Adding fresh full node with warp sync"); + let col_opts = AddCollatorOptions { + is_validator: false, + args: vec![ + ("--sync=warp").try_into()?, + ("-lsync=debug,parachain=debug,sync::cumulus=debug,aura=trace").try_into()?, + ("--relay-chain-rpc-urls", "{{ZOMBIE:validator-0:ws_uri}}").try_into()?, + ], + ..Default::default() + }; + network.add_collator("para-full-node", col_opts, PARA_ID).await?; + + let full_node = network.get_node("para-full-node")?; + + // Wait for the full node to sync and catch up. + // If the bug is present, the node fails to import bundled blocks and never advances. + log::info!("Waiting for full node best block to reach #{target_block}"); + full_node + .wait_metric_with_timeout(BEST_BLOCK_METRIC, |b| b >= target_block as f64, 120u64) + .await?; + log::info!("Full node synced past #{target_block}"); + + // Verify the full node actually used warp sync (not full sync). + log::info!("Verifying warp sync was used"); + let option_1_line = LogLineCountOptions::new(|n| n == 1, Duration::from_secs(5), false); + let result = full_node + .wait_log_line_count_with_timeout( + r"\[Parachain\] Warp sync is complete", + false, + option_1_line, + ) + .await?; + if !result.success() { + return Err(anyhow!("Full node did not complete parachain warp sync")); + } + + // Make sure the full node keeps progressing on live blocks after the initial sync. + // Wait for it to advance 24 blocks beyond the collator's current best. + let collator_best = network + .get_node("collator-0")? + .reports(BEST_BLOCK_METRIC) + .await? as u64; + let live_target = (collator_best + 24) as f64; + log::info!("Collator best: #{collator_best}, waiting for full node to reach #{live_target}"); + + full_node + .wait_metric_with_timeout(BEST_BLOCK_METRIC, |b| b >= live_target, 120u64) + .await?; + + log::info!("Test finished successfully"); + Ok(()) +} + +async fn build_network_config() -> Result { + let images = zombienet_sdk::environment::get_images_from_env(); + log::info!("Using images: {images:?}"); + + NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=trace").into()]) + .with_default_resources(|resources| { + resources.with_request_cpu(4).with_request_memory("4G") + }) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "num_cores": 3, + "max_validators_per_core": 1 + } + } + } + })) + .with_validator(|node| node.with_name("validator-0")); + (1..5).fold(r, |acc, i| { + acc.with_validator(|node| node.with_name(&format!("validator-{i}"))) + }) + }) + .with_parachain(|p| { + p.with_id(PARA_ID) + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("block-bundling") + .with_default_args(vec![ + ("--authoring").into(), + ("slot-based").into(), + ("-lparachain=trace,aura=trace,sync::cumulus=trace,consensus::common::parent_search=debug,runtime::parachain-system=debug").into(), + ]) + .with_genesis_overrides(json!({ + "testPallet": { + "enableBigValueMove": true + } + })) + .with_collator(|n| n.with_name("collator-0")) + .with_collator(|n| n.with_name("collator-1")) + }) + .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { + Ok(val) => global_settings.with_base_dir(val), + _ => global_settings, + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + }) +} diff --git a/substrate/client/consensus/common/src/block_import.rs b/substrate/client/consensus/common/src/block_import.rs index 3e319a30165a9..076a7151cb8de 100644 --- a/substrate/client/consensus/common/src/block_import.rs +++ b/substrate/client/consensus/common/src/block_import.rs @@ -153,6 +153,17 @@ pub enum StateAction { Skip, } +impl std::fmt::Debug for StateAction { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::ApplyChanges(_) => fmt.write_str("ApplyChanges(..)"), + Self::Execute => fmt.write_str("Execute"), + Self::ExecuteIfPossible => fmt.write_str("ExecuteIfPossible"), + Self::Skip => fmt.write_str("Skip"), + } + } +} + impl StateAction { /// Check if execution checks that require runtime calls should be skipped. pub fn skip_execution_checks(&self) -> bool { From 6ed90fb65abb2a2e6e9f62d34a18122f37eeab06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 13 Apr 2026 14:56:14 +0200 Subject: [PATCH 292/312] Small improvements --- .../src/validate_block/implementation.rs | 26 +++++++------------ 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index a5593be2a8e81..dabfa9ae724e4 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -176,9 +176,8 @@ where ) .build(); - // We use the same recorder when executing all blocks. So, each node only contributes once - // to the total size of the storage proof. This recorder should only be used for - // `execute_block`. + // Each node only contributes once to the total size of the storage proof. So, we keep track + // of them inside `seen_nodes` to always return the correct proof size. let mut execute_recorder = SizeOnlyRecorderProvider::with_seen_nodes(seen_nodes.clone()); // `backend` with the `execute_recorder`. As the `execute_recorder`, this should only be // used for `execute_block`. @@ -429,27 +428,22 @@ fn verify_blocks_form_chain(blocks: &[B::LazyBlock], parent_header: & if let Some(ref info) = bundle_info { assert_eq!( info.index as usize, block_index, - "BlockBundleInfo index mismatch: expected {}, got {}", - block_index, info.index + "BlockBundleInfo index mismatch: expected {block_index}, got {}", + info.index ); if block_index + 1 < num_blocks { assert!( - !CumulusDigestItem::is_last_block_in_core(block.header().digest()) - .unwrap_or(false), - "Intermediate block at index {} is marked as last block in core, \ + !CumulusDigestItem::is_last_block_in_core(block.header().digest()).unwrap_or(false), + "Intermediate block at index {block_index} is marked as last block in core, \ but more blocks follow in the PoV", - block_index ); - } - - if block_index + 1 == num_blocks && - !CumulusDigestItem::is_last_block_in_core(block.header().digest()) - .unwrap_or(true) + } else if !CumulusDigestItem::is_last_block_in_core(block.header().digest()) + .unwrap_or(true) { panic!( - "Last block in PoV must include the digest that marks it as the last block in the core" - ); + "Last block in PoV must include the digest that marks it as the last block in the core" + ); } } From 9bf54b6ab087ea3cffe0a24e7bc07a3da2fe0186 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 14 Apr 2026 15:03:09 +0200 Subject: [PATCH 293/312] Do not process upward messages anymore on the node --- cumulus/client/collator/src/service.rs | 6 +----- .../parachain-system/src/validate_block/implementation.rs | 4 +--- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/cumulus/client/collator/src/service.rs b/cumulus/client/collator/src/service.rs index 45d831b220103..578f39061bed4 100644 --- a/cumulus/client/collator/src/service.rs +++ b/cumulus/client/collator/src/service.rs @@ -284,11 +284,7 @@ where let (messages, signals) = Self::split_at_separator(collation_info.upward_messages); upward_messages.extend(messages); - signals.into_iter().for_each(|s| { - if !upward_message_signals.contains(&s) { - upward_message_signals.push(s); - } - }); + upward_message_signals.extend(signals); horizontal_messages.extend(collation_info.horizontal_messages); if let Some(new_code) = collation_info.new_validation_code { if new_validation_code.replace(new_code).is_some() { diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index dabfa9ae724e4..277c8c4f92579 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -250,9 +250,7 @@ where found_separator = true; None } else if found_separator { - if upward_message_signals.iter().all(|s| *s != m) { - upward_message_signals.push(m); - } + upward_message_signals.push(m); None } else { // No signal or separator From 559dfece9d6d5d6aa6dabfc580303f47f96a216f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 14 Apr 2026 15:06:36 +0200 Subject: [PATCH 294/312] FMT --- bridges/primitives/runtime/src/lib.rs | 6 +++--- .../parachain-system/src/validate_block/tests.rs | 6 +----- .../tests/zombie_ci/block_bundling/mod.rs | 2 +- .../tests/zombie_ci/block_bundling/warp_sync.rs | 10 ++-------- 4 files changed, 7 insertions(+), 17 deletions(-) diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs index 8d9bbd306115b..7971b92cfe6ab 100644 --- a/bridges/primitives/runtime/src/lib.rs +++ b/bridges/primitives/runtime/src/lib.rs @@ -34,9 +34,9 @@ use sp_runtime::{ use sp_std::{ops::RangeInclusive, vec, vec::Vec}; pub use chain::{ - AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain, EncodedOrDecodedCall, HashOf, - HasherOf, HeaderOf, NonceOf, Parachain, ParachainIdOf, SignatureOf, TransactionEraOf, - UnderlyingChainOf, UnderlyingChainProvider, __private, + __private, AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain, EncodedOrDecodedCall, + HashOf, HasherOf, HeaderOf, NonceOf, Parachain, ParachainIdOf, SignatureOf, TransactionEraOf, + UnderlyingChainOf, UnderlyingChainProvider, }; pub use frame_support::storage::storage_prefix as storage_value_final_key; use num_traits::{CheckedAdd, CheckedSub, One, SaturatingAdd, Zero}; diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 6a451cc8718f6..491676651e42e 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -790,11 +790,7 @@ fn rejects_blocks_in_bundle_after_block_marked_as_last() { .unwrap_err(); } else { let output = Command::new(env::current_exe().unwrap()) - .args([ - "rejects_blocks_in_bundle_after_block_marked_as_last", - "--", - "--nocapture", - ]) + .args(["rejects_blocks_in_bundle_after_block_marked_as_last", "--", "--nocapture"]) .env("RUN_TEST", "1") .output() .expect("Runs the test"); diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs index d468fefed19e8..e48bc0f8a9c5b 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs @@ -17,9 +17,9 @@ mod basic; mod full_core_usage_scenarios; -mod warp_sync; mod pov_recovery; mod relay_parent_offset; mod runtime_upgrade; mod three_cores_glutton; mod tracing_block; +mod warp_sync; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs index c0ed0861143fb..64ff7fdbd4c9c 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs @@ -64,10 +64,7 @@ async fn warp_sync_with_bundled_blocks() -> Result<(), anyhow::Error> { assert_para_throughput(&relay_client, 6, [(ParaId::from(PARA_ID), 12..19)], []).await?; // Query collator's current best block to set a sync target. - let target_block = network - .get_node("collator-0")? - .reports(BEST_BLOCK_METRIC) - .await? as u64; + let target_block = network.get_node("collator-0")?.reports(BEST_BLOCK_METRIC).await? as u64; log::info!("Full node sync target: #{target_block}"); // Add a fresh full node that will warp-sync to the already-running chain. @@ -109,10 +106,7 @@ async fn warp_sync_with_bundled_blocks() -> Result<(), anyhow::Error> { // Make sure the full node keeps progressing on live blocks after the initial sync. // Wait for it to advance 24 blocks beyond the collator's current best. - let collator_best = network - .get_node("collator-0")? - .reports(BEST_BLOCK_METRIC) - .await? as u64; + let collator_best = network.get_node("collator-0")?.reports(BEST_BLOCK_METRIC).await? as u64; let live_target = (collator_best + 24) as f64; log::info!("Collator best: #{collator_best}, waiting for full node to reach #{live_target}"); From a0522c0e2a182c736192bdd2fe4b3f0c1d67ced1 Mon Sep 17 00:00:00 2001 From: "cmd[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 14 Apr 2026 13:14:14 +0000 Subject: [PATCH 295/312] Update from github-actions[bot] running command 'fmt' --- bridges/primitives/runtime/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs index 7971b92cfe6ab..8d9bbd306115b 100644 --- a/bridges/primitives/runtime/src/lib.rs +++ b/bridges/primitives/runtime/src/lib.rs @@ -34,9 +34,9 @@ use sp_runtime::{ use sp_std::{ops::RangeInclusive, vec, vec::Vec}; pub use chain::{ - __private, AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain, EncodedOrDecodedCall, - HashOf, HasherOf, HeaderOf, NonceOf, Parachain, ParachainIdOf, SignatureOf, TransactionEraOf, - UnderlyingChainOf, UnderlyingChainProvider, + AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain, EncodedOrDecodedCall, HashOf, + HasherOf, HeaderOf, NonceOf, Parachain, ParachainIdOf, SignatureOf, TransactionEraOf, + UnderlyingChainOf, UnderlyingChainProvider, __private, }; pub use frame_support::storage::storage_prefix as storage_value_final_key; use num_traits::{CheckedAdd, CheckedSub, One, SaturatingAdd, Zero}; From 83125fd70dfab929ec585bee7c48d9ca3af98237 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 15 Apr 2026 09:13:36 +0200 Subject: [PATCH 296/312] Small improvements --- cumulus/client/consensus/aura/src/collators/lookahead.rs | 5 +---- cumulus/client/consensus/aura/src/collators/mod.rs | 7 +++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 811051fd43608..34e35d9d93ec4 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -287,10 +287,7 @@ where let Some(core_index) = claim_queue_at(relay_parent, &mut params.relay_client) .await - .iter_claims_at_depth(0) - .find_map( - |(core, para_id)| if para_id == params.para_id { Some(core) } else { None }, - ) + .iter_claims_at_depth_for_para(0, params.para_id) else { tracing::trace!( target: crate::LOG_TARGET, diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index dd33e938462ea..8ffb6fabb5eae 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -257,13 +257,12 @@ where } let runtime_api = client.runtime_api(); - let api_version = match runtime_api + let Some(api_version) = runtime_api .api_version::>(parent_hash) .ok() .flatten() - { - Some(v) => v, - None => return false, + else { + return false; }; let slot = if api_version > 1 { relay_slot } else { para_slot }; From db509a8c4811ac973e20bff1e4261bfcb41a3129 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 15 Apr 2026 10:15:13 +0200 Subject: [PATCH 297/312] Ensure we only send the signals at the end of a bundle/pov --- cumulus/pallets/parachain-system/src/lib.rs | 49 ++++++++++++--------- 1 file changed, 28 insertions(+), 21 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 942b5d21cbb00..d06e085d7de02 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -34,9 +34,9 @@ use codec::{Decode, DecodeLimit, Encode}; use core::cmp; use cumulus_primitives_core::{ relay_chain::{self, UMPSignal, UMP_SEPARATOR}, - AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, CumulusDigestItem, - GetChannelInfo, ListChannelInfos, MessageSendError, OutboundHrmpMessage, ParaId, - PersistedValidationData, UpwardMessage, UpwardMessageSender, XcmpMessageHandler, + AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, CoreInfo, + CumulusDigestItem, GetChannelInfo, ListChannelInfos, MessageSendError, OutboundHrmpMessage, + ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender, XcmpMessageHandler, XcmpMessageSource, }; use cumulus_primitives_parachain_inherent::{v0, MessageQueueChain, ParachainInherentData}; @@ -412,22 +412,15 @@ pub mod pallet { let digest = frame_system::Pallet::::digest(); - if let Some(core_info) = CumulusDigestItem::find_core_info(&digest) { - PendingUpwardSignals::::append( - UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset) - .encode(), - ); - - PreviousCoreCount::::put(core_info.number_of_cores); - } else { - // Without the digest, we assume that it is `1`. - PreviousCoreCount::::put(Compact(1u16)); - } + let core_info = CumulusDigestItem::find_core_info(&digest); + PreviousCoreCount::::put( + core_info.as_ref().map_or(Compact(1u16), |ci| ci.number_of_cores), + ); - // Only send UMP signals on the last block of a bundle. + // Only send UMP signals on the last block of a PoV. // For single-block PoVs (no BlockBundleInfo), always send signals. if CumulusDigestItem::is_last_block_in_core(&digest).unwrap_or(true) { - Self::send_ump_signals(); + Self::send_ump_signals(core_info); } // If the total size of the pending messages is less than the threshold, @@ -779,9 +772,7 @@ pub mod pallet { ::on_validation_data(&vfp); if let Some(collator_peer_id) = collator_peer_id { - PendingUpwardSignals::::append( - UMPSignal::ApprovedPeer(collator_peer_id).encode(), - ); + PendingApprovedPeer::::put(collator_peer_id); } total_weight.saturating_accrue(Self::enqueue_inbound_downward_messages( @@ -1028,6 +1019,11 @@ pub mod pallet { #[pallet::storage] pub type PendingUpwardSignals = StorageValue<_, Vec, ValueQuery>; + /// The approved peer id to be sent as a UMP signal on the last block of the PoV. + #[pallet::storage] + pub type PendingApprovedPeer = + StorageValue<_, relay_chain::ApprovedPeerId, OptionQuery>; + /// The factor to multiply the base delivery fee by for UMP. #[pallet::storage] pub type UpwardDeliveryFeeFactor = @@ -1670,8 +1666,19 @@ impl Pallet { } /// Send the pending ump signals - fn send_ump_signals() { - let ump_signals = PendingUpwardSignals::::take(); + fn send_ump_signals(core_info: Option) { + let mut ump_signals = PendingUpwardSignals::::take(); + + if let Some(core_info) = core_info { + ump_signals.push( + UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset).encode(), + ); + } + + if let Some(approved_peer) = PendingApprovedPeer::::take() { + ump_signals.push(UMPSignal::ApprovedPeer(approved_peer).encode()); + } + if !ump_signals.is_empty() { UpwardMessages::::append(UMP_SEPARATOR); ump_signals.into_iter().for_each(|s| UpwardMessages::::append(s)); From 2d58c06ceb027564955d0494e707acaa065777c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 15 Apr 2026 10:56:14 +0200 Subject: [PATCH 298/312] Inflate the wasm manually --- Cargo.lock | 2 + cumulus/zombienet/zombienet-sdk/Cargo.toml | 2 + .../block_bundling/runtime_upgrade.rs | 69 +++++++++++++------ 3 files changed, 52 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2bfd055db51b5..d197e668422c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5594,6 +5594,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", + "parity-wasm", "polkadot-primitives", "rstest", "sc-executor 0.32.0", @@ -5606,6 +5607,7 @@ dependencies = [ "sp-consensus-slots", "sp-core 28.0.0", "sp-keyring", + "sp-maybe-compressed-blob 11.0.0", "sp-rpc", "sp-runtime 31.0.1", "sp-statement-store", diff --git a/cumulus/zombienet/zombienet-sdk/Cargo.toml b/cumulus/zombienet/zombienet-sdk/Cargo.toml index 4ef43bdbee0a6..69808b7aadddc 100644 --- a/cumulus/zombienet/zombienet-sdk/Cargo.toml +++ b/cumulus/zombienet/zombienet-sdk/Cargo.toml @@ -26,6 +26,8 @@ sc-statement-store = { workspace = true, default-features = true, features = ["t sp-keyring = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +parity-wasm = { workspace = true } sc-executor = { workspace = true, default-features = true } sc-executor-common = { workspace = true, default-features = true } frame-support = { workspace = true } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs index 82b04cc474593..944e115b1765e 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs @@ -17,7 +17,7 @@ use anyhow::anyhow; use cumulus_primitives_core::relay_chain::MAX_POV_SIZE; -use cumulus_test_runtime::block_bundling::WASM_BINARY_BLOATY as WASM_RUNTIME_BINARY; +use cumulus_test_runtime::block_bundling::WASM_BINARY; use cumulus_zombienet_sdk_helpers::{ assign_cores, ensure_is_only_block_in_core, submit_extrinsic_and_wait_for_finalization_success, submit_unsigned_extrinsic_and_wait_for_finalization_success, wait_for_runtime_upgrade, @@ -55,26 +55,12 @@ async fn block_bundling_runtime_upgrade() -> Result<(), anyhow::Error> { env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), ); - // Validate runtime size requirement - let runtime_wasm = - WASM_RUNTIME_BINARY.ok_or_else(|| anyhow!("WASM runtime upgrade binary not available"))?; + let compressed_wasm = + WASM_BINARY.ok_or_else(|| anyhow!("WASM runtime binary not available"))?; - if runtime_wasm.len() <= MIN_RUNTIME_SIZE_BYTES { - return Err(anyhow!( - "Runtime size {} bytes is below minimum required {} bytes (2.5MiB)", - runtime_wasm.len(), - MIN_RUNTIME_SIZE_BYTES - )); - } - - // Let's create our own fake runtime upgrade where we just bump the `spec_version`. - // On chain nothing will change, as we only change the runtime version stored inside the wasm - // file. - let blob = sc_executor_common::runtime_blob::RuntimeBlob::uncompress_if_needed(runtime_wasm)?; - let mut version = sc_executor::read_embedded_version(&blob)? - .ok_or_else(|| anyhow!("No runtime version found?"))?; - version.spec_version += 1; - let runtime_wasm = sp_version::embed::embed_runtime_version(runtime_wasm, version)?; + // Decompress and inflate with a custom wasm section containing pseudo-random data until + // the compressed size exceeds `MIN_RUNTIME_SIZE_BYTES`. + let runtime_wasm = inflate_runtime_wasm(compressed_wasm, MIN_RUNTIME_SIZE_BYTES)?; log::info!("Runtime size validation passed: {} bytes", runtime_wasm.len()); @@ -146,6 +132,48 @@ fn create_sudo_call(inner_call: DynamicPayload) -> DynamicPayload { zombienet_sdk::subxt::tx::dynamic("Sudo", "sudo", vec![inner_call.into_value()]) } +/// Decompress the WASM binary and pad with a custom section containing pseudo-random data +/// until the compressed size exceeds `min_compressed_size`. +fn inflate_runtime_wasm( + compressed_wasm: &[u8], + min_compressed_size: usize, +) -> Result, anyhow::Error> { + let mut wasm = sp_maybe_compressed_blob::decompress(compressed_wasm, 50 * 1024 * 1024) + .map_err(|e| anyhow!("Decompression failed: {:?}", e))? + .into_owned(); + + let mut rng_state: u64 = 0xdeadbeef; + let chunk_size = 256 * 1024; + loop { + let padding: Vec = (0..chunk_size) + .map(|_| { + // xorshift64 + rng_state ^= rng_state << 13; + rng_state ^= rng_state >> 7; + rng_state ^= rng_state << 17; + rng_state as u8 + }) + .collect(); + + let mut module: parity_wasm::elements::Module = + parity_wasm::deserialize_buffer(&wasm).map_err(|e| anyhow!("wasm parse: {e:?}"))?; + module.set_custom_section("padding", padding); + wasm = parity_wasm::serialize(module).map_err(|e| anyhow!("wasm serialize: {e:?}"))?; + + let compressed = sp_maybe_compressed_blob::compress(&wasm, 50 * 1024 * 1024) + .ok_or_else(|| anyhow!("Compression failed"))?; + log::info!( + "Inflated WASM: uncompressed={} bytes, compressed={} bytes (target={})", + wasm.len(), + compressed.len(), + min_compressed_size, + ); + if compressed.len() >= min_compressed_size { + return Ok(wasm); + } + } +} + async fn build_network_config() -> Result { let images = zombienet_sdk::environment::get_images_from_env(); log::info!("Using images: {images:?}"); @@ -162,7 +190,6 @@ async fn build_network_config() -> Result { .with_genesis_overrides(json!({ "configuration": { "config": { - "max_code_size": 5242880, "scheduler_params": { "num_cores": 3, "max_validators_per_core": 1 From d4457ccdcec7873ea3a21677a43545db7409664c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 15 Apr 2026 12:27:08 +0200 Subject: [PATCH 299/312] Fixes --- cumulus/client/consensus/aura/src/collators/lookahead.rs | 1 + cumulus/pallets/parachain-system/src/lib.rs | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 34e35d9d93ec4..e8756cdacc076 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -288,6 +288,7 @@ where let Some(core_index) = claim_queue_at(relay_parent, &mut params.relay_client) .await .iter_claims_at_depth_for_para(0, params.para_id) + .next() else { tracing::trace!( target: crate::LOG_TARGET, diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index d06e085d7de02..83ac8a0044b4e 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -771,8 +771,9 @@ pub mod pallet { ::on_validation_data(&vfp); - if let Some(collator_peer_id) = collator_peer_id { - PendingApprovedPeer::::put(collator_peer_id); + match collator_peer_id { + Some(peer_id) => PendingApprovedPeer::::put(peer_id), + None => PendingApprovedPeer::::kill(), } total_weight.saturating_accrue(Self::enqueue_inbound_downward_messages( From 9ea8f91998ac2986160bc2434e78a0834afb3733 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 15 Apr 2026 14:37:24 +0200 Subject: [PATCH 300/312] I love fixing fixes --- .../tests/zombie_ci/block_bundling/runtime_upgrade.rs | 2 +- .../tests/zombie_ci/block_bundling/warp_sync.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs index 944e115b1765e..c2e467429b50f 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs @@ -160,7 +160,7 @@ fn inflate_runtime_wasm( module.set_custom_section("padding", padding); wasm = parity_wasm::serialize(module).map_err(|e| anyhow!("wasm serialize: {e:?}"))?; - let compressed = sp_maybe_compressed_blob::compress(&wasm, 50 * 1024 * 1024) + let compressed = sp_maybe_compressed_blob::compress_weakly(&wasm, 50 * 1024 * 1024) .ok_or_else(|| anyhow!("Compression failed"))?; log::info!( "Inflated WASM: uncompressed={} bytes, compressed={} bytes (target={})", diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs index 64ff7fdbd4c9c..309d7e22b2c03 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs @@ -72,9 +72,9 @@ async fn warp_sync_with_bundled_blocks() -> Result<(), anyhow::Error> { let col_opts = AddCollatorOptions { is_validator: false, args: vec![ - ("--sync=warp").try_into()?, - ("-lsync=debug,parachain=debug,sync::cumulus=debug,aura=trace").try_into()?, - ("--relay-chain-rpc-urls", "{{ZOMBIE:validator-0:ws_uri}}").try_into()?, + ("--sync=warp").into(), + ("-lsync=debug,parachain=debug,sync::cumulus=debug,aura=trace").into(), + ("--relay-chain-rpc-urls", "{{ZOMBIE:validator-0:ws_uri}}").into(), ], ..Default::default() }; From e929d33998b2365683a758e5e04a1828aaa5cfe8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 15 Apr 2026 22:38:11 +0200 Subject: [PATCH 301/312] Fix tesst --- cumulus/pallets/parachain-system/src/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/pallets/parachain-system/src/tests.rs b/cumulus/pallets/parachain-system/src/tests.rs index eb295f5d78fef..4d739ab46cf6f 100755 --- a/cumulus/pallets/parachain-system/src/tests.rs +++ b/cumulus/pallets/parachain-system/src/tests.rs @@ -1790,9 +1790,9 @@ fn ump_signals_are_sent_correctly() { vec![ b"Test".to_vec(), UMP_SEPARATOR, + UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset).encode(), UMPSignal::ApprovedPeer(ApprovedPeerId::try_from(b"12345".to_vec()).unwrap()) .encode(), - UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset).encode(), ], ), ]); From a941955d82f4fc63a3e3664b52e4871cf0497861 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 15 Apr 2026 22:41:18 +0200 Subject: [PATCH 302/312] Do not loop into eternity --- .../block_bundling/runtime_upgrade.rs | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs index c2e467429b50f..589f03126a5c6 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs @@ -143,21 +143,20 @@ fn inflate_runtime_wasm( .into_owned(); let mut rng_state: u64 = 0xdeadbeef; + let mut padding = Vec::new(); let chunk_size = 256 * 1024; loop { - let padding: Vec = (0..chunk_size) - .map(|_| { - // xorshift64 - rng_state ^= rng_state << 13; - rng_state ^= rng_state >> 7; - rng_state ^= rng_state << 17; - rng_state as u8 - }) - .collect(); + padding.extend((0..chunk_size).map(|_| { + // xorshift64 + rng_state ^= rng_state << 13; + rng_state ^= rng_state >> 7; + rng_state ^= rng_state << 17; + rng_state as u8 + })); let mut module: parity_wasm::elements::Module = parity_wasm::deserialize_buffer(&wasm).map_err(|e| anyhow!("wasm parse: {e:?}"))?; - module.set_custom_section("padding", padding); + module.set_custom_section("padding", padding.clone()); wasm = parity_wasm::serialize(module).map_err(|e| anyhow!("wasm serialize: {e:?}"))?; let compressed = sp_maybe_compressed_blob::compress_weakly(&wasm, 50 * 1024 * 1024) From 18751bb4c7cf467a50600e87d95b759c794a1042 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 16 Apr 2026 10:37:51 +0200 Subject: [PATCH 303/312] Use the compressed wasm --- .../tests/zombie_ci/block_bundling/runtime_upgrade.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs index 589f03126a5c6..5cc779aa6149d 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs @@ -168,7 +168,7 @@ fn inflate_runtime_wasm( min_compressed_size, ); if compressed.len() >= min_compressed_size { - return Ok(wasm); + return Ok(compressed); } } } From 508bc40c3d93f55ba281d16d293d29c04d6dbff2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 16 Apr 2026 12:03:15 +0200 Subject: [PATCH 304/312] Find the first block that fulfills the filter --- .../consensus/aura/src/collators/mod.rs | 27 +++++++++++++++---- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 8ffb6fabb5eae..f5c7fc535aca2 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -35,10 +35,12 @@ use polkadot_primitives::{ Hash as RelayHash, Id as ParaId, OccupiedCoreAssumption, ValidationCodeHash, DEFAULT_SCHEDULING_LOOKAHEAD, }; +use sc_client_api::HeaderBackend; use sc_consensus_aura::{standalone as aura_internal, AuraApi}; use sp_api::{ApiExt, ProvideRuntimeApi, RuntimeApiInfo}; use sp_core::Pair; use sp_keystore::KeystorePtr; +use sp_runtime::traits::Header; use sp_timestamp::Timestamp; pub mod basic; @@ -276,8 +278,8 @@ where /// Use [`cumulus_client_consensus_common::find_parent_for_building`] to find the best parachain /// block to build on. /// -/// If the best parent does not pass `filter_parent`, falls back to building on the included -/// block. +/// If the best parent does not pass `filter_parent`, walks backwards through ancestors +/// until finding one that does, or reaching the included block. async fn find_parent( relay_parent: RelayHash, para_id: ParaId, @@ -324,9 +326,24 @@ where }, }; - // If the best parent doesn't pass the filter, fall back to the included block. - if !filter_parent(&result.best_parent_header) { - result.best_parent_header = result.included_header.clone(); + // If the best parent doesn't pass the filter (e.g. it's a middle block in a bundle), + // walk backwards towards the included block until we find one that does. + // This avoids falling all the way back to the included block when there are valid + // last-in-core ancestors closer to the chain tip. + while !filter_parent(&result.best_parent_header) { + let parent_hash = *result.best_parent_header.parent_hash(); + match para_backend.blockchain().header(parent_hash) { + Ok(Some(header)) => { + result.best_parent_header = header; + if parent_hash == result.included_header.hash() { + break; + } + }, + _ => { + result.best_parent_header = result.included_header.clone(); + break; + }, + } } Some(result) From 95d6fb237d21b8540c7671e27eda4bd9f85d3a22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 16 Apr 2026 14:17:05 +0200 Subject: [PATCH 305/312] Start counting after having seen the first candidates --- .../zombienet-sdk-helpers/src/lib.rs | 44 +++++++++++++++++-- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 7546ef144f7e7..8eecb5767d6d1 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -86,8 +86,9 @@ async fn is_session_change( // Helper function for asserting the throughput of parachains, after the first session change. // // The throughput is measured as total number of backed candidates in a window of `stop_after` relay -// chain blocks. Relay chain blocks with session changes are generally ignored, but it is ensured -// that no blocks are build on top of these relay blocks. +// chain blocks. The counting window starts from the relay chain block after the first one that +// contains a backed candidate for a tracked para. Relay chain blocks with session changes are +// generally ignored, but it is ensured that no blocks are build on top of these relay blocks. pub async fn assert_para_throughput( relay_client: &OnlineClient, stop_after: u32, @@ -149,9 +150,46 @@ where // Wait for the first session, block production on the parachain will start after that. wait_for_first_session_change(&mut blocks_sub).await?; log::info!( - "First session change detected. Counting {stop_after} finalized relay chain blocks." + "First session change detected. Waiting for backed candidates from all tracked paras before counting." ); + // Skip relay chain blocks until every tracked para has had at least one backed candidate. + // This avoids counting the initial warm-up period where the backing pipeline (PVF + // compilation, first collation) hasn't reached steady state yet. + let mut paras_seen = std::collections::HashSet::new(); + loop { + let block = blocks_sub + .next() + .await + .ok_or_else(|| anyhow!("Block stream ended while waiting for first candidate"))??; + + if is_session_change(&block).await? { + continue; + } + + let events = block.events().await?; + let receipts = find_event_and_decode_fields::>( + &events, + "ParaInclusion", + "CandidateBacked", + )?; + + for receipt in &receipts { + let para_id = receipt.descriptor.para_id(); + if valid_para_ids.contains(¶_id) { + paras_seen.insert(para_id); + } + } + + if paras_seen.len() == valid_para_ids.len() { + log::info!( + "All tracked paras have produced candidates by relay block {}. Counting {stop_after} blocks from the next one.", + block.number() + ); + break; + } + } + while let Some(block) = blocks_sub.next().await { let block = block?; log::debug!("Finalized relay chain block {}", block.number()); From 539bad0c5ca05496b17647e76e1e4b09ac603cf3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 16 Apr 2026 14:18:45 +0200 Subject: [PATCH 306/312] Let's use less compute with glutton --- .../block_bundling/three_cores_glutton.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs index c569a95f5fd02..4df102cb9e3de 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs @@ -27,9 +27,9 @@ use zombienet_sdk::{ const PARA_ID: u32 = 2400; -/// A test that ensures that PoV bundling works with 3 cores and glutton consuming 80% ref time. +/// A test that ensures that PoV bundling works with 3 cores and glutton consuming 10% ref time. /// -/// This test starts with 3 cores assigned and configures glutton to use 80% of ref time, +/// This test starts with 3 cores assigned and configures glutton to use 10% of ref time, /// then validates that the parachain produces 72 blocks. #[tokio::test(flavor = "multi_thread")] async fn block_bundling_three_cores_glutton() -> Result<(), anyhow::Error> { @@ -106,12 +106,12 @@ async fn build_network_config() -> Result { ("-lparachain=debug,aura=trace,runtime=trace").into(), ]) .with_genesis_overrides(json!({ - "glutton": { - "compute": "200000000", // 20% ref time consumption - "storage": "0", // No storage consumption - "trashDataCount": 5000, // Initialize with some trash data - "blockLength": "0" // No block length consumption - } + "glutton": { + "compute": "0.1", + "storage": "0", + "trashDataCount": 5000, + "blockLength": "0" + } })) .with_collator(|n| n.with_name("collator-0")) .with_collator(|n| n.with_name("collator-1")) From 114aef66b3af8531e179393d07b0303484781512 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 16 Apr 2026 15:46:13 +0200 Subject: [PATCH 307/312] Improve error output --- cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 8eecb5767d6d1..d2d60a8a1839b 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -236,12 +236,12 @@ where for (para_id, expected_candidate_range) in expected_candidate_ranges { let actual = candidate_count .get(¶_id) - .ok_or_else(|| anyhow!("ParaId {} did not have any backed candidates", para_id))? + .ok_or_else(|| anyhow!("ParaId {para_id} did not have any backed candidates"))? .len() as u32; if !expected_candidate_range.contains(&actual) { return Err(anyhow!( - "Candidate count {actual} not within range {expected_candidate_range:?}" + "ParaId {para_id}: candidate count {actual} not within expected range {expected_candidate_range:?}" )); } } From f01c4a66864b1afc91e05fea6963f36d4d812cf7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 16 Apr 2026 21:05:54 +0200 Subject: [PATCH 308/312] Bump the spec_version --- .../tests/zombie_ci/block_bundling/runtime_upgrade.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs index 5cc779aa6149d..b33d42f5fb9c7 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs @@ -142,6 +142,15 @@ fn inflate_runtime_wasm( .map_err(|e| anyhow!("Decompression failed: {:?}", e))? .into_owned(); + // Bump the `spec_version` so that `apply_authorized_upgrade`'s version check passes. + // On chain nothing will change, as we only change the runtime version stored inside the wasm + // file. + let blob = sc_executor_common::runtime_blob::RuntimeBlob::new(&wasm)?; + let mut version = sc_executor::read_embedded_version(&blob)? + .ok_or_else(|| anyhow!("No runtime version found?"))?; + version.spec_version += 1; + wasm = sp_version::embed::embed_runtime_version(&wasm, version)?; + let mut rng_state: u64 = 0xdeadbeef; let mut padding = Vec::new(); let chunk_size = 256 * 1024; From 72ceb956626a7c57ce92cdb98b14c42cc2b748c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 17 Apr 2026 10:29:28 +0200 Subject: [PATCH 309/312] Ensure we also sort on the node side --- cumulus/client/collator/src/service.rs | 4 ++ cumulus/test/runtime/src/test_pallet.rs | 28 ++++++++++ .../tests/zombie_ci/block_bundling/basic.rs | 51 +++++++++++++++++-- 3 files changed, 79 insertions(+), 4 deletions(-) diff --git a/cumulus/client/collator/src/service.rs b/cumulus/client/collator/src/service.rs index 578f39061bed4..38755a706774a 100644 --- a/cumulus/client/collator/src/service.rs +++ b/cumulus/client/collator/src/service.rs @@ -286,6 +286,7 @@ where upward_messages.extend(messages); upward_message_signals.extend(signals); horizontal_messages.extend(collation_info.horizontal_messages); + if let Some(new_code) = collation_info.new_validation_code { if new_validation_code.replace(new_code).is_some() { tracing::warn!( @@ -300,6 +301,9 @@ where head_data = Some(collation_info.head_data); } + // Sort by recipient as required by the relay chain rules. + horizontal_messages.sort_by(|a, b| a.recipient.cmp(&b.recipient)); + let block_data = ParachainBlockData::::new(blocks, compact_proof); let pov = polkadot_node_primitives::maybe_compress_pov(PoV { diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index f7cfd65929f89..0d324dc2904ea 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -96,6 +96,13 @@ pub mod pallet { } } + /// When active, `on_initialize` queues one HRMP message per block, alternating + /// between `HRMP_RECIPIENT_HIGH` (odd blocks) and `HRMP_RECIPIENT_LOW` (even blocks). + /// This produces descending recipient order across consecutive blocks in a bundle, + /// exercising the HRMP message sorting in the collation path. + #[pallet::storage] + pub type HrmpSendingActive = StorageValue<_, bool, ValueQuery>; + /// Flag to indicate if a 1s weight should be registered in the next `on_initialize`. #[pallet::storage] pub type ScheduleWeightRegistration = StorageValue<_, bool, ValueQuery>; @@ -113,9 +120,24 @@ pub mod pallet { pub type BigValueMove = StorageMap<_, Twox64Concat, BlockNumberFor, Vec, OptionQuery>; + pub const HRMP_RECIPIENT_LOW: u32 = 2500; + pub const HRMP_RECIPIENT_HIGH: u32 = 2600; + #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(n: BlockNumberFor) -> Weight { + if HrmpSendingActive::::get() { + let block_num: u32 = n.try_into().unwrap_or(0); + let recipient = if block_num % 2 == 1 { + ParaId::from(HRMP_RECIPIENT_HIGH) + } else { + ParaId::from(HRMP_RECIPIENT_LOW) + }; + PendingOutboundHrmpMessages::::mutate(|messages| { + messages.push((recipient, vec![block_num as u8])); + }); + } + if ScheduleWeightRegistration::::get() { let weight_to_register = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 0); @@ -346,6 +368,8 @@ pub mod pallet { pub _config: core::marker::PhantomData, /// Controls if the `BigValueMove` logic is enabled. pub enable_big_value_move: bool, + /// Activate HRMP sending with descending recipients from genesis. + pub enable_hrmp_sending: bool, } #[pallet::genesis_build] @@ -356,6 +380,10 @@ pub mod pallet { if self.enable_big_value_move { BigValueMove::::insert(BlockNumberFor::::from(0u32), vec![0u8; 4 * 1024]); } + + if self.enable_hrmp_sending { + HrmpSendingActive::::set(true); + } } } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs index 76e3cc6581f26..c8be4c5642a8c 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs @@ -17,12 +17,17 @@ use crate::utils::initialize_network; use anyhow::anyhow; -use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_para_throughput, assign_cores}; +use cumulus_test_runtime::test_pallet::{HRMP_RECIPIENT_HIGH, HRMP_RECIPIENT_LOW}; +use cumulus_zombienet_sdk_helpers::{ + assert_finality_lag, assert_para_throughput, assign_cores, + submit_extrinsic_and_wait_for_finalization_success, +}; use polkadot_primitives::Id as ParaId; use serde_json::json; use tokio::{join, spawn, task::JoinHandle}; use zombienet_sdk::{ - subxt::{OnlineClient, PolkadotConfig}, + subxt::{ext::scale_value::value, OnlineClient, PolkadotConfig}, + subxt_signer::sr25519::dev, NetworkConfig, NetworkConfigBuilder, NetworkNode, }; @@ -50,6 +55,25 @@ async fn block_bundling_basic() -> Result<(), anyhow::Error> { let para_client = para_node.wait_client().await?; let relay_client: OnlineClient = relay_node.wait_client().await?; + + for recipient in [HRMP_RECIPIENT_LOW, HRMP_RECIPIENT_HIGH] { + let call = zombienet_sdk::subxt::tx::dynamic( + "Sudo", + "sudo", + vec![value! { + Hrmp(force_open_hrmp_channel { + sender: PARA_ID, + recipient: recipient, + max_capacity: 1000u32, + max_message_size: 1024u32 + }) + }], + ); + submit_extrinsic_and_wait_for_finalization_success(&relay_client, &call, &dev::alice()) + .await?; + } + log::info!("HRMP channels opened to {HRMP_RECIPIENT_LOW} and {HRMP_RECIPIENT_HIGH}"); + assert_para_throughput( &relay_client, 6, @@ -152,7 +176,11 @@ async fn build_network_config() -> Result { "scheduler_params": { "num_cores": 7, "max_validators_per_core": 1 - } + }, + "hrmp_channel_max_capacity": 1000, + "hrmp_channel_max_message_size": 1024, + "hrmp_max_message_num_per_candidate": 100, + "hrmp_max_parachain_outbound_channels": 10 } } })) @@ -175,7 +203,8 @@ async fn build_network_config() -> Result { ]) .with_genesis_overrides(json!({ "testPallet": { - "enableBigValueMove": true + "enableBigValueMove": true, + "enableHrmpSending": true } })) .with_collator(|n| n.with_name("collator-0")) @@ -183,6 +212,20 @@ async fn build_network_config() -> Result { .with_collator(|n| n.with_name("collator-2")) .with_collator(|n| n.with_name("para-full-node").validator(false)) }) + .with_parachain(|p| { + p.with_id(HRMP_RECIPIENT_LOW) + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("sync-backing") + .with_collator(|n| n.with_name("hrmp-recipient-low")) + }) + .with_parachain(|p| { + p.with_id(HRMP_RECIPIENT_HIGH) + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("async-backing") + .with_collator(|n| n.with_name("hrmp-recipient-high")) + }) .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { Ok(val) => global_settings.with_base_dir(val), _ => global_settings, From 63be8bf0da0dd2a887df7bdc00eb7ad6e8f06e7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 17 Apr 2026 10:44:57 +0200 Subject: [PATCH 310/312] Test this --- .../zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs index 309d7e22b2c03..d61a4dffe9192 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs @@ -136,14 +136,14 @@ async fn build_network_config() -> Result { "configuration": { "config": { "scheduler_params": { - "num_cores": 3, + "num_cores": 2, "max_validators_per_core": 1 } } } })) .with_validator(|node| node.with_name("validator-0")); - (1..5).fold(r, |acc, i| { + (1..9).fold(r, |acc, i| { acc.with_validator(|node| node.with_name(&format!("validator-{i}"))) }) }) From 7c5fd34fba6bcd907645ed80d431f2e04e6f8585 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 17 Apr 2026 13:41:54 +0200 Subject: [PATCH 311/312] Close channels directly before doing the other checks --- cumulus/pallets/xcmp-queue/src/lib.rs | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index 49c24d081e200..4f891cee075bf 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -1101,17 +1101,6 @@ impl XcmpMessageSource for Pallet { flags, } = status; - if excluded_recipients.contains(para_id) { - return true; - } - - // This is a hard limit from the host config; not even signals can bypass it. - if result.len() == max_message_count { - // We check this condition in the beginning of the loop so that we don't include - // a message where the limit is 0. - return true; - } - let (max_size_now, max_size_ever) = match T::ChannelInfo::get_channel_status(*para_id) { ChannelStatus::Closed => { // This means that there is no such channel anymore. Nothing to be done but @@ -1128,6 +1117,18 @@ impl XcmpMessageSource for Pallet { ChannelStatus::Ready(max_size_now, max_size_ever) => (max_size_now, max_size_ever), }; + // Check if we should omit the recipient. + if excluded_recipients.contains(para_id) { + return true; + } + + // This is a hard limit from the host config; not even signals can bypass it. + if result.len() == max_message_count { + // We check this condition in the beginning of the loop so that we don't include + // a message where the limit is 0. + return true; + } + let page = 'page_fetch: { if *signals_exist { let page = >::get(*para_id); From 922e2c2ee3bfe09ccee256489df3aecb9259fff0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 17 Apr 2026 16:27:19 +0200 Subject: [PATCH 312/312] Wait for finality --- cumulus/zombienet/zombienet-sdk/tests/utils.rs | 1 + .../tests/zombie_ci/block_bundling/warp_sync.rs | 14 ++++++++------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/cumulus/zombienet/zombienet-sdk/tests/utils.rs b/cumulus/zombienet/zombienet-sdk/tests/utils.rs index 70ecb56b099ae..ca9bc679f9f3f 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/utils.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/utils.rs @@ -4,6 +4,7 @@ use zombienet_sdk::{LocalFileSystem, Network, NetworkConfig}; pub const BEST_BLOCK_METRIC: &str = "block_height{status=\"best\"}"; +pub const FINALIZED_BLOCK_METRIC: &str = "block_height{status=\"finalized\"}"; pub async fn initialize_network( config: NetworkConfig, diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs index d61a4dffe9192..908bc959659a7 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs @@ -15,10 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::utils::{initialize_network, BEST_BLOCK_METRIC}; +use crate::utils::{initialize_network, BEST_BLOCK_METRIC, FINALIZED_BLOCK_METRIC}; use anyhow::anyhow; -use cumulus_zombienet_sdk_helpers::{assert_para_throughput, assign_cores}; -use polkadot_primitives::Id as ParaId; +use cumulus_zombienet_sdk_helpers::assign_cores; use serde_json::json; use std::time::Duration; use zombienet_orchestrator::network::node::LogLineCountOptions; @@ -59,9 +58,12 @@ async fn warp_sync_with_bundled_blocks() -> Result<(), anyhow::Error> { // Assign 2 extra cores (zombienet auto-assigns 1), for 3 total. assign_cores(&relay_client, PARA_ID, vec![0, 1]).await?; - // Wait for steady-state bundled block production. - log::info!("Waiting for steady-state block production"); - assert_para_throughput(&relay_client, 6, [(ParaId::from(PARA_ID), 12..19)], []).await?; + // Wait for steady-state bundled block production: collator finalizes parachain block #72. + log::info!("Waiting for collator to finalize parachain block #72"); + network + .get_node("collator-0")? + .wait_metric_with_timeout(FINALIZED_BLOCK_METRIC, |b| b >= 72.0, 200u64) + .await?; // Query collator's current best block to set a sync target. let target_block = network.get_node("collator-0")?.reports(BEST_BLOCK_METRIC).await? as u64;