,
P: Pair,
P::Public: Codec,
P::Signature: Codec,
@@ -235,36 +234,58 @@ where
runtime_api.set_call_context(sp_core::traits::CallContext::Onchain { import: false });
let authorities = runtime_api.authorities(parent_hash).ok()?;
let author_pub = aura_internal::claim_slot::(para_slot, &authorities, keystore).await?;
+ Some(SlotClaim::unchecked::
(author_pub, para_slot, timestamp))
+}
+// Checks if there is space in the unincluded segment.
+async fn can_build_upon(
+ parent_hash: Block::Hash,
+ included_block: Block::Hash,
+ relay_slot: Slot,
+ para_slot: Slot,
+ client: &Client,
+) -> bool
+where
+ Client: ProvideRuntimeApi,
+ Client::Api: AuraUnincludedSegmentApi + ApiExt,
+{
// This function is typically called when we want to build block N. At that point, the
// unincluded segment in the runtime is unaware of the hash of block N-1. If the unincluded
// segment in the runtime is full, but block N-1 is the included block, the unincluded segment
// should have length 0 and we can build. Since the hash is not available to the runtime
// however, we need this extra check here.
if parent_hash == included_block {
- return Some(SlotClaim::unchecked::(author_pub, para_slot, timestamp));
+ return true;
}
- let api_version = runtime_api
+ let runtime_api = client.runtime_api();
+ let Some(api_version) = runtime_api
.api_version::>(parent_hash)
.ok()
- .flatten()?;
+ .flatten()
+ else {
+ return false;
+ };
let slot = if api_version > 1 { relay_slot } else { para_slot };
runtime_api
.can_build_upon(parent_hash, included_block, slot)
- .ok()?
- .then(|| SlotClaim::unchecked::(author_pub, para_slot, timestamp))
+ .ok()
+ .unwrap_or(false)
}
/// Use [`cumulus_client_consensus_common::find_parent_for_building`] to find the best parachain
/// block to build on.
+///
+/// If the best parent does not pass `filter_parent`, walks backwards through ancestors
+/// until finding one that does, or reaching the included block.
async fn find_parent(
relay_parent: RelayHash,
para_id: ParaId,
para_backend: &impl sc_client_api::Backend,
relay_client: &impl RelayChainInterface,
+ filter_parent: impl Fn(&Block::Header) -> bool,
) -> Option>
where
Block: BlockT,
@@ -278,21 +299,21 @@ where
.saturating_sub(1) as usize,
};
- match cumulus_client_consensus_common::find_parent_for_building::(
+ let mut result = match cumulus_client_consensus_common::find_parent_for_building::(
parent_search_params,
para_backend,
relay_client,
)
.await
{
- Ok(Some(result)) => Some(result),
+ Ok(Some(result)) => result,
Ok(None) => {
tracing::warn!(
target: crate::LOG_TARGET,
?relay_parent,
"Could not find parent to build upon.",
);
- None
+ return None;
},
Err(e) => {
tracing::error!(
@@ -301,22 +322,44 @@ where
err = ?e,
"Could not find parent to build upon"
);
- None
+ return None;
},
+ };
+
+ // If the best parent doesn't pass the filter (e.g. it's a middle block in a bundle),
+ // walk backwards towards the included block until we find one that does.
+ // This avoids falling all the way back to the included block when there are valid
+ // last-in-core ancestors closer to the chain tip.
+ while !filter_parent(&result.best_parent_header) {
+ let parent_hash = *result.best_parent_header.parent_hash();
+ match para_backend.blockchain().header(parent_hash) {
+ Ok(Some(header)) => {
+ result.best_parent_header = header;
+ if parent_hash == result.included_header.hash() {
+ break;
+ }
+ },
+ _ => {
+ result.best_parent_header = result.included_header.clone();
+ break;
+ },
+ }
}
+
+ Some(result)
}
#[cfg(test)]
mod tests {
use super::*;
- use crate::collators::{can_build_upon, BackingGroupConnectionHelper};
+ use crate::collators::BackingGroupConnectionHelper;
use codec::Encode;
use cumulus_primitives_aura::Slot;
use cumulus_primitives_core::BlockT;
use cumulus_relay_chain_interface::PHash;
use cumulus_test_client::{
runtime::{Block, Hash},
- Client, DefaultTestClientBuilderExt, InitBlockBuilder, TestClientBuilder,
+ BuildBlockBuilder, Client, DefaultTestClientBuilderExt, TestClientBuilder,
TestClientBuilderExt,
};
use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder;
@@ -326,7 +369,6 @@ mod tests {
use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy};
use sp_consensus::BlockOrigin;
use sp_keystore::{Keystore, KeystorePtr};
- use sp_timestamp::Timestamp;
use std::sync::{Arc, Mutex};
async fn import_block>(
@@ -355,7 +397,11 @@ mod tests {
async fn build_and_import_block(client: &Client, included: Hash) -> Block {
let sproof = sproof_with_parent_by_hash(client, included);
- let block_builder = client.init_block_builder(None, sproof).block_builder;
+ let block_builder = client
+ .init_block_builder_builder()
+ .with_relay_sproof_builder(sproof)
+ .build()
+ .block_builder;
let block = block_builder.build().unwrap().block;
@@ -384,23 +430,22 @@ mod tests {
/// we are ensuring on the node side that we are are always able to build on the included block.
#[tokio::test]
async fn test_can_build_upon() {
- let (client, keystore) = set_up_components(6);
+ sp_tracing::try_init_simple();
+
+ let (client, _keystore) = set_up_components(6);
let genesis_hash = client.chain_info().genesis_hash;
let mut last_hash = genesis_hash;
// Fill up the unincluded segment tracker in the runtime.
- while can_build_upon::<_, _, sp_consensus_aura::sr25519::AuthorityPair>(
- Slot::from(u64::MAX),
- Slot::from(u64::MAX),
- Timestamp::default(),
+ while can_build_upon::<_, _>(
last_hash,
genesis_hash,
+ Slot::from(u64::MAX),
+ Slot::from(u64::MAX),
&*client,
- &keystore,
)
.await
- .is_some()
{
let block = build_and_import_block(&client, genesis_hash).await;
last_hash = block.header().hash();
@@ -408,17 +453,15 @@ mod tests {
// Blocks were built with the genesis hash set as included block.
// We call `can_build_upon` with the last built block as the included block.
- let result = can_build_upon::<_, _, sp_consensus_aura::sr25519::AuthorityPair>(
- Slot::from(u64::MAX),
- Slot::from(u64::MAX),
- Timestamp::default(),
+ let result = can_build_upon::<_, _>(
last_hash,
last_hash,
+ Slot::from(u64::MAX),
+ Slot::from(u64::MAX),
&*client,
- &keystore,
)
.await;
- assert!(result.is_some());
+ assert!(result);
}
/// Helper to create a mock overseer handle and message recorder
@@ -656,7 +699,7 @@ mod tests {
/// (both top-level and child trie keys) should be included in the relay chain state proof.
///
/// Falls back to an empty request if the runtime API call fails or is not implemented.
-fn get_relay_proof_request(
+pub(crate) fn get_relay_proof_request(
client: &Client,
parent_hash: Block::Hash,
) -> RelayProofRequest
@@ -676,6 +719,7 @@ where
}
/// Holds a relay parent and its descendants.
+#[derive(Clone)]
pub struct RelayParentData {
/// The relay parent block header
relay_parent: RelayHeader,
diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs
index 248167e6d995c..7f6b4b4142ccc 100644
--- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs
+++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs
@@ -15,27 +15,27 @@
// You should have received a copy of the GNU General Public License
// along with Cumulus. If not, see .
-use codec::{Codec, Encode};
-
use super::CollatorMessage;
use crate::{
- collator::{self as collator_util, BuildBlockAndImportParams},
+ collator::{self as collator_util, BuildBlockAndImportParams, Collator, SlotClaim},
collators::{
check_validation_code_or_log,
slot_based::{
- relay_chain_data_cache::{RelayChainData, RelayChainDataCache},
+ relay_chain_data_cache::RelayChainDataCache,
slot_timer::{SlotInfo, SlotTimer},
},
- BackingGroupConnectionHelper, RelayParentData,
+ BackingGroupConnectionHelper, RelayHash, RelayParentData,
},
LOG_TARGET,
};
+use codec::{Codec, Encode};
use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface;
use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker};
+use cumulus_client_proof_size_recording::prepare_proof_size_recording_aux_data;
use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot};
use cumulus_primitives_core::{
- extract_relay_parent, rpsr_digest, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem,
- KeyToIncludeInRelayProof, PersistedValidationData, RelayParentOffsetApi,
+ BlockBundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem,
+ PersistedValidationData, RelayParentOffsetApi, TargetBlockRate,
};
use cumulus_relay_chain_interface::RelayChainInterface;
use futures::prelude::*;
@@ -44,19 +44,29 @@ use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider};
use sc_consensus::BlockImport;
use sc_consensus_aura::SlotDuration;
use sc_network_types::PeerId;
-use sp_api::{ApiExt, ProvideRuntimeApi};
+use sp_api::{ApiExt, ProofRecorder, ProvideRuntimeApi, StorageProof};
use sp_application_crypto::AppPublic;
+use sp_block_builder::BlockBuilder;
use sp_blockchain::HeaderBackend;
use sp_consensus::Environment;
use sp_consensus_aura::AuraApi;
use sp_core::crypto::Pair;
+use sp_externalities::Extensions;
use sp_inherents::CreateInherentDataProviders;
use sp_keystore::KeystorePtr;
use sp_runtime::{
- traits::{Block as BlockT, Header as HeaderT, Member, Zero},
+ traits::{Block as BlockT, HashingFor, Header as HeaderT, Member},
Saturating,
};
-use std::{collections::VecDeque, sync::Arc, time::Duration};
+use sp_trie::{
+ proof_size_extension::{ProofSizeExt, RecordingProofSizeProvider},
+ recorder::IgnoredNodes,
+};
+use std::{
+ collections::VecDeque,
+ sync::Arc,
+ time::{Duration, Instant},
+};
/// Parameters for [`run_block_builder`].
pub struct BuilderTaskParams<
@@ -94,8 +104,6 @@ pub struct BuilderTaskParams<
pub proposer: Proposer,
/// The generic collator service used to plug into this consensus engine.
pub collator_service: CS,
- /// The amount of time to spend authoring each block.
- pub authoring_duration: Duration,
/// Channel to send built blocks to the collation task.
pub collator_sender: sc_utils::mpsc::TracingUnboundedSender>,
/// Slot duration of the relay chain.
@@ -131,7 +139,9 @@ where
Client::Api: AuraApi
+ RelayParentOffsetApi
+ AuraUnincludedSegmentApi
- + KeyToIncludeInRelayProof,
+ + TargetBlockRate
+ + BlockBuilder
+ + cumulus_primitives_core::KeyToIncludeInRelayProof,
Backend: sc_client_api::Backend + 'static,
RelayClient: RelayChainInterface + Clone + 'static,
CIDP: CreateInherentDataProviders + 'static,
@@ -139,7 +149,7 @@ where
BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static,
Proposer: Environment + Send + Sync + 'static,
CS: CollatorServiceInterface + Send + Sync + 'static,
- CHP: consensus_common::ValidationCodeHashProvider + Send + 'static,
+ CHP: consensus_common::ValidationCodeHashProvider + Send + Sync + 'static,
P: Pair + Send + Sync + 'static,
P::Public: AppPublic + Member + Codec,
P::Signature: TryFrom> + Member + Codec,
@@ -158,18 +168,13 @@ where
collator_service,
collator_sender,
code_hash_provider,
- authoring_duration,
relay_chain_slot_duration,
para_backend,
slot_offset,
max_pov_percentage,
} = params;
- let mut slot_timer = SlotTimer::<_, _, P>::new_with_offset(
- para_client.clone(),
- slot_offset,
- relay_chain_slot_duration,
- );
+ let mut slot_timer = SlotTimer::new_with_offset(slot_offset, relay_chain_slot_duration);
let mut collator = {
let params = collator_util::Params {
@@ -210,7 +215,7 @@ where
loop {
// We wait here until the next slot arrives.
- if slot_timer.wait_until_next_slot().await.is_err() {
+ let Ok(slot_time) = slot_timer.wait_until_next_slot().await else {
tracing::error!(target: LOG_TARGET, "Unable to wait for next slot.");
return;
};
@@ -251,6 +256,7 @@ where
continue;
};
+ // Use the slot calculated from relay parent
let Some(para_slot) = adjust_para_to_relay_parent_slot(
rp_data.relay_parent(),
relay_chain_slot_duration,
@@ -262,72 +268,53 @@ where
let relay_parent = rp_data.relay_parent().hash();
let relay_parent_header = rp_data.relay_parent().clone();
- let Some(parent_search_result) =
- crate::collators::find_parent(relay_parent, para_id, &*para_backend, &relay_client)
- .await
+ let Some(parent_search_result) = crate::collators::find_parent(
+ relay_parent,
+ para_id,
+ &*para_backend,
+ &relay_client,
+ |parent| {
+ // We never want to build on any "middle block" that isn't the last block in a
+ // core.
+ // When the digest item doesn't exist, we are running in compatibility
+ // mode and all parents are valid.
+ CumulusDigestItem::is_last_block_in_core(parent.digest()).unwrap_or(true)
+ },
+ )
+ .await
else {
continue;
};
- let parent_hash = parent_search_result.best_parent_header.hash();
let included_header = parent_search_result.included_header;
- let parent_header = &parent_search_result.best_parent_header;
- // Distance from included block to best parent (unincluded segment length).
+ let initial_parent_hash = parent_search_result.best_parent_header.hash();
+ let initial_parent_header = parent_search_result.best_parent_header;
let unincluded_segment_len =
- parent_header.number().saturating_sub(*included_header.number());
-
- // Retrieve the core.
- let core = match determine_core(
- &mut relay_chain_data_cache,
- &relay_parent_header,
- para_id,
- parent_header,
- relay_parent_offset,
- )
- .await
- {
- Err(()) => {
- tracing::debug!(
- target: LOG_TARGET,
- ?relay_parent,
- "Failed to determine core"
- );
+ initial_parent_header.number().saturating_sub(*included_header.number());
- continue;
- },
- Ok(Some(cores)) => {
- tracing::debug!(
- target: LOG_TARGET,
- ?relay_parent,
- core_selector = ?cores.selector,
- claim_queue_offset = ?cores.claim_queue_offset,
- "Going to claim core",
- );
-
- cores
- },
- Ok(None) => {
- tracing::debug!(
- target: LOG_TARGET,
- ?relay_parent,
- "No core scheduled"
- );
-
- continue;
- },
- };
-
- let Ok(RelayChainData { max_pov_size, last_claimed_core_selector, .. }) =
- relay_chain_data_cache.get_mut_relay_chain_data(relay_parent).await
+ let Ok(max_pov_size) = relay_chain_data_cache
+ .get_mut_relay_chain_data(relay_parent)
+ .await
+ .map(|d| d.max_pov_size)
else {
continue;
};
- slot_timer.update_scheduling(core.total_cores().into());
+ let allowed_pov_size = if let Some(max_pov_percentage) = max_pov_percentage {
+ max_pov_size * max_pov_percentage / 100
+ } else {
+ // Set the block limit to 85% of the maximum PoV size.
+ //
+ // Once https://github.com/paritytech/polkadot-sdk/issues/6020 issue is
+ // fixed, this should be removed.
+ max_pov_size * 85 / 100
+ } as usize;
// We mainly call this to inform users at genesis if there is a mismatch with the
// on-chain data.
- collator.collator_service().check_block_status(parent_hash, parent_header);
+ collator
+ .collator_service()
+ .check_block_status(initial_parent_hash, &initial_parent_header);
let Ok(relay_slot) =
sc_consensus_babe::find_pre_digest::(&relay_parent_header)
@@ -343,167 +330,518 @@ where
let mut runtime_api = para_client.runtime_api();
runtime_api
.set_call_context(sp_core::traits::CallContext::Onchain { import: false });
- if let Ok(authorities) = runtime_api.authorities(parent_hash) {
+ if let Ok(authorities) = runtime_api.authorities(initial_parent_hash) {
connection_helper.update::(para_slot.slot, &authorities).await;
}
}
- let slot_claim = match crate::collators::can_build_upon::<_, _, P>(
+ let Some(slot_claim) = crate::collators::claim_slot::<_, _, P>(
para_slot.slot,
- relay_slot,
para_slot.timestamp,
- parent_hash,
- included_header_hash,
+ initial_parent_hash,
&*para_client,
&keystore,
)
.await
- {
- Some(slot) => slot,
- None => {
- tracing::debug!(
- target: crate::LOG_TARGET,
- ?unincluded_segment_len,
- relay_parent = ?relay_parent,
- relay_parent_num = %relay_parent_header.number(),
- included_hash = ?included_header_hash,
- included_num = %included_header.number(),
- parent = ?parent_hash,
- slot = ?para_slot.slot,
- "Not building block."
- );
- continue;
- },
+ else {
+ tracing::debug!(
+ target: crate::LOG_TARGET,
+ ?unincluded_segment_len,
+ relay_parent = ?relay_parent,
+ relay_parent_num = %relay_parent_header.number(),
+ included_hash = ?included_header_hash,
+ included_num = %included_header.number(),
+ initial_parent = ?initial_parent_hash,
+ slot = ?para_slot.slot,
+ "Not eligible to claim slot."
+ );
+ continue;
};
tracing::debug!(
target: crate::LOG_TARGET,
?unincluded_segment_len,
- relay_parent = %relay_parent,
+ relay_parent = ?relay_parent,
relay_parent_num = %relay_parent_header.number(),
relay_parent_offset,
- included_hash = %included_header_hash,
+ included_hash = ?included_header_hash,
included_num = %included_header.number(),
- parent = %parent_hash,
+ initial_parent = ?initial_parent_hash,
slot = ?para_slot.slot,
- "Building block."
+ "Claiming slot."
);
- let validation_data = PersistedValidationData {
- parent_head: parent_header.encode().into(),
- relay_parent_number: *relay_parent_header.number(),
- relay_parent_storage_root: *relay_parent_header.state_root(),
- max_pov_size: *max_pov_size,
- };
-
- let relay_proof_request =
- super::super::get_relay_proof_request(&*para_client, parent_hash);
-
- let (parachain_inherent_data, other_inherent_data) = match collator
- .create_inherent_data_with_rp_offset(
- relay_parent,
- &validation_data,
- parent_hash,
- slot_claim.timestamp(),
- Some(rp_data),
- relay_proof_request,
- collator_peer_id,
- )
- .await
+ let mut cores = match determine_cores(
+ &mut relay_chain_data_cache,
+ &relay_parent_header,
+ para_id,
+ relay_parent_offset,
+ )
+ .await
{
- Err(err) => {
- tracing::error!(target: crate::LOG_TARGET, ?err);
- break;
+ Ok(Some(core)) => core,
+ Ok(None) => {
+ tracing::debug!(
+ target: crate::LOG_TARGET,
+ relay_parent = ?relay_parent,
+ "No cores scheduled."
+ );
+ continue;
},
- Ok(x) => x,
- };
+ Err(()) => {
+ tracing::error!(
+ target: crate::LOG_TARGET,
+ relay_parent = ?relay_parent,
+ "Failed to determine cores."
+ );
- let validation_code_hash = match code_hash_provider.code_hash_at(parent_hash) {
- None => {
- tracing::error!(target: crate::LOG_TARGET, ?parent_hash, "Could not fetch validation code hash");
break;
},
- Some(v) => v,
};
- check_validation_code_or_log(
- &validation_code_hash,
- para_id,
- &relay_client,
- relay_parent,
+ let number_of_blocks =
+ match para_client.runtime_api().target_block_rate(initial_parent_hash) {
+ Ok(interval) => interval,
+ Err(error) => {
+ tracing::debug!(
+ target: crate::LOG_TARGET,
+ block = ?initial_parent_hash,
+ ?error,
+ "Failed to fetch `slot_schedule`, assuming one block per core"
+ );
+
+ // Backwards compatible we use the number of cores as number of blocks.
+ cores.total_cores()
+ },
+ };
+
+ // In total we want to have at max `number_of_blocks` cores to use.
+ cores.truncate_cores(number_of_blocks);
+ let raw_blocks_per_core = (number_of_blocks / cores.total_cores()).max(1);
+ let left_over_blocks = number_of_blocks % cores.total_cores();
+ let blocks_per_cores = (0..cores.total_cores())
+ .map(|i| {
+ // We distribute the left over blocks across the cores.
+ raw_blocks_per_core + u32::from(i < left_over_blocks)
+ })
+ .collect::>();
+
+ tracing::debug!(
+ target: crate::LOG_TARGET,
+ ?blocks_per_cores,
+ core_indices = ?cores.core_indices(),
+ "Core configuration",
+ );
+
+ let mut pov_parent_header = initial_parent_header;
+ let mut pov_parent_hash = initial_parent_hash;
+ let block_time = relay_chain_slot_duration / number_of_blocks;
+
+ for blocks_per_core in blocks_per_cores {
+ let time_for_core = slot_time.time_left() / cores.cores_left();
+
+ match build_collation_for_core(BuildCollationParams {
+ pov_parent_header,
+ pov_parent_hash,
+ relay_parent_header: &relay_parent_header,
+ relay_parent_hash: relay_parent,
+ max_pov_size,
+ para_id,
+ relay_client: &relay_client,
+ code_hash_provider: &code_hash_provider,
+ slot_claim: &slot_claim,
+ collator_sender: &collator_sender,
+ collator: &mut collator,
+ allowed_pov_size,
+ core_info: cores.core_info(),
+ core_index: cores.core_index(),
+ block_time,
+ blocks_per_core,
+ time_for_core,
+ is_last_core_in_parachain_slot: cores.is_last_core() &&
+ slot_time.is_parachain_slot_ending(para_slot_duration.as_duration()),
+ collator_peer_id,
+ relay_parent_data: rp_data.clone(),
+ total_number_of_blocks: number_of_blocks,
+ included_header_hash,
+ relay_slot,
+ para_slot: para_slot.slot,
+ para_client: &*para_client,
+ })
+ .await
+ {
+ Ok(Some(header)) => {
+ pov_parent_header = header;
+ pov_parent_hash = pov_parent_header.hash();
+ },
+ // Let's wait for the next slot
+ Ok(None) => break,
+ Err(()) => return,
+ }
+
+ if !cores.advance() {
+ break;
+ }
+ }
+ }
+ }
+}
+
+/// Parameters for [`build_collation_for_core`].
+struct BuildCollationParams<
+ 'a,
+ Block: BlockT,
+ P: Pair,
+ RelayClient,
+ BI,
+ CIDP,
+ Proposer,
+ CS,
+ CHP,
+ Client,
+> {
+ pov_parent_header: Block::Header,
+ pov_parent_hash: Block::Hash,
+ relay_parent_header: &'a RelayHeader,
+ relay_parent_hash: RelayHash,
+ max_pov_size: u32,
+ para_id: ParaId,
+ relay_client: &'a RelayClient,
+ code_hash_provider: &'a CHP,
+ slot_claim: &'a SlotClaim,
+ collator_sender: &'a sc_utils::mpsc::TracingUnboundedSender>,
+ collator: &'a mut Collator,
+ allowed_pov_size: usize,
+ core_info: CoreInfo,
+ core_index: CoreIndex,
+ block_time: Duration,
+ blocks_per_core: u32,
+ /// Time allocated for the core.
+ time_for_core: Duration,
+ is_last_core_in_parachain_slot: bool,
+ collator_peer_id: PeerId,
+ relay_parent_data: RelayParentData,
+ total_number_of_blocks: u32,
+ included_header_hash: Block::Hash,
+ relay_slot: cumulus_primitives_aura::Slot,
+ para_slot: cumulus_primitives_aura::Slot,
+ para_client: &'a Client,
+}
+
+/// Build a collation for one core.
+///
+/// One collation can be composed of multiple blocks.
+async fn build_collation_for_core<
+ Block: BlockT,
+ P,
+ RelayClient,
+ BI,
+ CIDP,
+ Proposer,
+ CS,
+ CHP,
+ Client,
+>(
+ BuildCollationParams {
+ pov_parent_header,
+ pov_parent_hash,
+ relay_parent_header,
+ relay_parent_hash,
+ max_pov_size,
+ para_id,
+ relay_client,
+ code_hash_provider,
+ slot_claim,
+ collator_sender,
+ collator,
+ allowed_pov_size,
+ core_info,
+ core_index,
+ block_time,
+ blocks_per_core,
+ time_for_core: slot_time_for_core,
+ is_last_core_in_parachain_slot,
+ collator_peer_id,
+ relay_parent_data,
+ total_number_of_blocks,
+ included_header_hash,
+ relay_slot,
+ para_slot,
+ para_client,
+ }: BuildCollationParams<'_, Block, P, RelayClient, BI, CIDP, Proposer, CS, CHP, Client>,
+) -> Result