diff --git a/Cargo.lock b/Cargo.lock index 6ee5718427628..61e43246e429c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4634,6 +4634,7 @@ dependencies = [ "cumulus-relay-chain-minimal-node", "cumulus-relay-chain-streams", "futures", + "polkadot-overseer", "polkadot-primitives", "prometheus", "sc-client-api", @@ -15297,6 +15298,7 @@ dependencies = [ "fatality", "futures", "futures-timer", + "itertools 0.11.0", "parity-scale-codec", "polkadot-node-network-protocol", "polkadot-node-primitives", diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 55835ef4dcb8b..86f8eddbb4a9c 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -45,7 +45,11 @@ use polkadot_node_subsystem::messages::CollationGenerationMessage; use polkadot_overseer::Handle as OverseerHandle; use polkadot_primitives::{CollatorPair, Id as ParaId, OccupiedCoreAssumption}; -use crate::{collator as collator_util, collators::claim_queue_at, export_pov_to_path}; +use crate::{ + collator as collator_util, + collators::{claim_queue_at, BackingGroupConnectionHelper}, + export_pov_to_path, +}; use futures::prelude::*; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; use sc_consensus::BlockImport; @@ -122,7 +126,7 @@ where Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, - P: Pair, + P: Pair + Send + Sync + 'static, P::Public: AppPublic + Member + Codec, P::Signature: TryFrom> + Member + Codec, { @@ -174,7 +178,7 @@ where Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, - P: Pair, + P: Pair + Send + Sync + 'static, P::Public: AppPublic + Member + Codec, P::Signature: TryFrom> + Member + Codec, { @@ -215,6 +219,13 @@ where collator_util::Collator::::new(params) }; + let mut connection_helper: BackingGroupConnectionHelper = + BackingGroupConnectionHelper::new( + params.para_client.clone(), + params.keystore.clone(), + params.overseer_handle.clone(), + ); + while let Some(relay_parent_header) = import_notifications.next().await { let relay_parent = relay_parent_header.hash(); @@ -290,14 +301,17 @@ where relay_chain_slot_duration = ?params.relay_chain_slot_duration, "Adjusted relay-chain slot to parachain slot" ); - Some(super::can_build_upon::<_, _, P>( + Some(( slot_now, - relay_slot, - timestamp, - block_hash, - included_block.hash(), - para_client, - &keystore, + super::can_build_upon::<_, _, P>( + slot_now, + relay_slot, + timestamp, + block_hash, + included_block.hash(), + para_client, + &keystore, + ), )) }; @@ -316,8 +330,11 @@ where // scheduled chains this ensures that the backlog will grow steadily. for n_built in 0..2 { let slot_claim = match can_build_upon(parent_hash) { - Some(fut) => match fut.await { - None => break, + Some((current_slot, fut)) => match fut.await { + None => { + connection_helper.update::(current_slot, parent_hash).await; + break + }, Some(c) => c, }, None => break, diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 0310f2944dc58..1a945236b392c 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -26,8 +26,8 @@ use codec::Codec; use cumulus_client_consensus_common::{self as consensus_common, ParentSearchParams}; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; use cumulus_primitives_core::{relay_chain::Header as RelayHeader, BlockT}; -use cumulus_relay_chain_interface::RelayChainInterface; -use polkadot_node_subsystem::messages::RuntimeApiRequest; +use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; +use polkadot_node_subsystem::messages::{CollatorProtocolMessage, RuntimeApiRequest}; use polkadot_node_subsystem_util::runtime::ClaimQueueSnapshot; use polkadot_primitives::{ Hash as RelayHash, Id as ParaId, OccupiedCoreAssumption, ValidationCodeHash, @@ -56,6 +56,69 @@ pub mod slot_based; // sanity check. const PARENT_SEARCH_DEPTH: usize = 40; +// Helper to pre-connect to the backing group we got assigned to and keep the connection +// open until backing group changes or own slot ends. +struct BackingGroupConnectionHelper { + client: std::sync::Arc, + keystore: sp_keystore::KeystorePtr, + overseer_handle: OverseerHandle, + our_slot: Option, +} + +impl BackingGroupConnectionHelper { + pub fn new( + client: std::sync::Arc, + keystore: sp_keystore::KeystorePtr, + overseer_handle: OverseerHandle, + ) -> Self { + Self { client, keystore, overseer_handle, our_slot: None } + } + + async fn send_subsystem_message(&mut self, message: CollatorProtocolMessage) { + self.overseer_handle.send_msg(message, "BackingGroupConnectionHelper").await; + } + + /// Update the current slot and initiate connections to backing groups if needed. + pub async fn update(&mut self, current_slot: Slot, best_block: Block::Hash) + where + Block: sp_runtime::traits::Block, + Client: + sc_client_api::HeaderBackend + Send + Sync + ProvideRuntimeApi + 'static, + Client::Api: AuraApi, + P: sp_core::Pair + Send + Sync, + P::Public: Codec, + { + if Some(current_slot) <= self.our_slot { + // Current slot or next slot is ours. + // We already sent pre-connect message, no need to proceed further. + return + } + + let Some(authorities) = self.client.runtime_api().authorities(best_block).ok() else { + return + }; + + let next_slot = current_slot + 1; + let next_slot_is_ours = + aura_internal::claim_slot::

(next_slot, &authorities, &self.keystore) + .await + .is_some(); + + if next_slot_is_ours { + // Next slot is ours, send connect message. + tracing::debug!(target: crate::LOG_TARGET, "Our slot {} is next, connecting to backing groups", next_slot); + self.send_subsystem_message(CollatorProtocolMessage::ConnectToBackingGroups) + .await; + self.our_slot = Some(next_slot); + } else if self.our_slot.take().is_some() { + // Next slot is not ours, send disconnect only if we had a slot before. + tracing::debug!(target: crate::LOG_TARGET, "Current slot = {}, disconnecting from backing groups", current_slot); + self.send_subsystem_message(CollatorProtocolMessage::DisconnectFromBackingGroups) + .await; + } + } +} + /// Check the `local_validation_code_hash` against the validation code hash in the relay chain /// state. /// @@ -266,7 +329,8 @@ where #[cfg(test)] mod tests { - use crate::collators::can_build_upon; + use super::*; + use crate::collators::{can_build_upon, BackingGroupConnectionHelper}; use codec::Encode; use cumulus_primitives_aura::Slot; use cumulus_primitives_core::BlockT; @@ -277,12 +341,14 @@ mod tests { TestClientBuilderExt, }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; + use futures::StreamExt; + use polkadot_overseer::{Event, Handle}; use polkadot_primitives::HeadData; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sp_consensus::BlockOrigin; use sp_keystore::{Keystore, KeystorePtr}; use sp_timestamp::Timestamp; - use std::sync::Arc; + use std::sync::{Arc, Mutex}; async fn import_block>( importer: &I, @@ -319,9 +385,9 @@ mod tests { block } - fn set_up_components() -> (Arc, KeystorePtr) { + fn set_up_components(num_authorities: usize) -> (Arc, KeystorePtr) { let keystore = Arc::new(sp_keystore::testing::MemoryKeystore::new()) as Arc<_>; - for key in sp_keyring::Sr25519Keyring::iter() { + for key in sp_keyring::Sr25519Keyring::iter().take(num_authorities) { Keystore::sr25519_generate_new( &*keystore, sp_application_crypto::key_types::AURA, @@ -339,7 +405,7 @@ mod tests { /// we are ensuring on the node side that we are are always able to build on the included block. #[tokio::test] async fn test_can_build_upon() { - let (client, keystore) = set_up_components(); + let (client, keystore) = set_up_components(6); let genesis_hash = client.chain_info().genesis_hash; let mut last_hash = genesis_hash; @@ -375,6 +441,222 @@ mod tests { .await; assert!(result.is_some()); } + + /// Helper to create a mock overseer handle and message recorder + fn create_overseer_handle() -> (OverseerHandle, Arc>>) { + let messages = Arc::new(Mutex::new(Vec::new())); + let messages_clone = messages.clone(); + + let (tx, mut rx) = polkadot_node_subsystem_util::metered::channel(100); + + // Spawn a task to receive and record overseer messages + tokio::spawn(async move { + while let Some(event) = rx.next().await { + if let Event::MsgToSubsystem { msg, .. } = event { + if let polkadot_node_subsystem::AllMessages::CollatorProtocol(cp_msg) = msg { + messages_clone.lock().unwrap().push(cp_msg); + } + } + } + }); + + (Handle::new(tx), messages) + } + + #[tokio::test] + async fn preconnect_when_next_slot_is_ours() { + let (client, keystore) = set_up_components(6); + let genesis_hash = client.chain_info().genesis_hash; + let (overseer_handle, messages_recorder) = create_overseer_handle(); + + let mut helper = BackingGroupConnectionHelper::new(client, keystore, overseer_handle); + + // Update with slot 0, next slot (1) should be ours + helper + .update::(Slot::from(0), genesis_hash) + .await; + + // Give time for message to be processed + tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; + + let messages = messages_recorder.lock().unwrap(); + assert_eq!(messages.len(), 1); + assert!(matches!(messages[0], CollatorProtocolMessage::ConnectToBackingGroups)); + assert_eq!(helper.our_slot, Some(Slot::from(1))); + } + + #[tokio::test] + async fn preconnect_no_duplicate_connect_message() { + let (client, keystore) = set_up_components(6); + let genesis_hash = client.chain_info().genesis_hash; + let (overseer_handle, messages_recorder) = create_overseer_handle(); + + let mut helper = BackingGroupConnectionHelper::new(client, keystore, overseer_handle); + + // Update with slot 0, next slot (1) is ours + helper + .update::(Slot::from(0), genesis_hash) + .await; + + // Give time for message to be processed + tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; + assert_eq!(messages_recorder.lock().unwrap().len(), 1); + messages_recorder.lock().unwrap().clear(); + + // Update with slot 0 again - should not send another message + helper + .update::(Slot::from(0), genesis_hash) + .await; + tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; + assert_eq!(messages_recorder.lock().unwrap().len(), 0); + + // Update with slot 1 (our slot) - should not send another message + helper + .update::(Slot::from(1), genesis_hash) + .await; + tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; + assert_eq!(messages_recorder.lock().unwrap().len(), 0); + } + + #[tokio::test] + async fn preconnect_disconnect_when_slot_passes() { + let (client, keystore) = set_up_components(1); + let genesis_hash = client.chain_info().genesis_hash; + let (overseer_handle, messages_recorder) = create_overseer_handle(); + + let mut helper = BackingGroupConnectionHelper::new(client, keystore, overseer_handle); + + // Slot 0 -> Alice, Slot 1 -> Bob, Slot 2 -> Charlie, Slot 3 -> Dave, Slot 4 -> Eve, + // Slot 5 -> Ferdie, Slot 6 -> Alice + + // Update with slot 5, next slot (6) is ours -> should connect + helper + .update::(Slot::from(5), genesis_hash) + .await; + tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; + assert_eq!(helper.our_slot, Some(Slot::from(6))); + messages_recorder.lock().unwrap().clear(); + + // Update with slot 8, next slot (9) is Charlie's -> should disconnect + helper + .update::(Slot::from(8), genesis_hash) + .await; + tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; + + { + let messages = messages_recorder.lock().unwrap(); + assert_eq!(messages.len(), 1, "Expected exactly one disconnect message"); + assert!(matches!(messages[0], CollatorProtocolMessage::DisconnectFromBackingGroups)); + assert_eq!(helper.our_slot, None); + } + + messages_recorder.lock().unwrap().clear(); + + // Update again with slot 8, next slot (9) is Charlie's -> should not send another + // disconnect message + helper + .update::(Slot::from(8), genesis_hash) + .await; + tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; + + let messages = messages_recorder.lock().unwrap(); + assert_eq!(messages.len(), 0, "Expected no messages"); + assert_eq!(helper.our_slot, None); + } + + #[tokio::test] + async fn preconnect_no_disconnect_without_previous_connection() { + let (client, keystore) = set_up_components(1); + let genesis_hash = client.chain_info().genesis_hash; + let (overseer_handle, messages_recorder) = create_overseer_handle(); + + let mut helper = BackingGroupConnectionHelper::new(client, keystore, overseer_handle); + + // Slot 0 -> Alice, Slot 1 -> Bob, Slot 2 -> Charlie, Slot 3 -> Dave, Slot 4 -> Eve, + // Slot 5 -> Ferdie + + // Update with slot 1 (Bob's slot), next slot (2) is Charlie's + // Since we never connected before (our_slot is None), we should not send disconnect + helper + .update::(Slot::from(1), genesis_hash) + .await; + + tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; + // Should not send any message since we never connected + assert_eq!(messages_recorder.lock().unwrap().len(), 0); + assert_eq!(helper.our_slot, None); + } + + #[tokio::test] + async fn preconnect_multiple_cycles() { + let (client, keystore) = set_up_components(1); + let genesis_hash = client.chain_info().genesis_hash; + let (overseer_handle, messages_recorder) = create_overseer_handle(); + + let mut helper = BackingGroupConnectionHelper::new(client, keystore, overseer_handle); + + // Slot 0 -> Alice, Slot 1 -> Bob, Slot 2 -> Charlie, Slot 3 -> Dave, Slot 4 -> Eve, + // Slot 5 -> Ferdie, Slot 6 -> Alice, Slot 7 -> Bob, ... + + // Cycle 1: Connect at slot 5, next slot (6) is ours + helper + .update::(Slot::from(5), genesis_hash) + .await; + tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; + { + let messages = messages_recorder.lock().unwrap(); + assert_eq!(messages.len(), 1); + assert!(matches!(messages[0], CollatorProtocolMessage::ConnectToBackingGroups)); + } + assert_eq!(helper.our_slot, Some(Slot::from(6))); + messages_recorder.lock().unwrap().clear(); + + // Cycle 1: Disconnect at slot 7, next slot (8) is Charlie's + helper + .update::(Slot::from(7), genesis_hash) + .await; + tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; + { + let messages = messages_recorder.lock().unwrap(); + assert_eq!(messages.len(), 1); + assert!(matches!(messages[0], CollatorProtocolMessage::DisconnectFromBackingGroups)); + } + assert_eq!(helper.our_slot, None); + messages_recorder.lock().unwrap().clear(); + + // Cycle 2: Connect again at slot 11, next slot (12) is ours + helper + .update::( + Slot::from(11), + genesis_hash, + ) + .await; + tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; + { + let messages = messages_recorder.lock().unwrap(); + assert_eq!(messages.len(), 1); + assert!(matches!(messages[0], CollatorProtocolMessage::ConnectToBackingGroups)); + } + assert_eq!(helper.our_slot, Some(Slot::from(12))); + } + + #[tokio::test] + async fn preconnect_handles_runtime_api_error() { + let keystore = Arc::new(sp_keystore::testing::MemoryKeystore::new()) as Arc<_>; + let client = Arc::new(TestClientBuilder::new().build()); + let (overseer_handle, messages_recorder) = create_overseer_handle(); + + let mut helper = BackingGroupConnectionHelper::new(client, keystore, overseer_handle); + + let invalid_hash = Hash::default(); + helper + .update::(Slot::from(0), invalid_hash) + .await; + + tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; + // Should not send any message if runtime API fails + assert_eq!(messages_recorder.lock().unwrap().len(), 0); + } } /// Holds a relay parent and its descendants. diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index e6360a3c8408e..20ce8b284e4c2 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -26,7 +26,7 @@ use crate::{ relay_chain_data_cache::{RelayChainData, RelayChainDataCache}, slot_timer::{SlotInfo, SlotTimer}, }, - RelayParentData, + BackingGroupConnectionHelper, RelayParentData, }, LOG_TARGET, }; @@ -134,7 +134,7 @@ where Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, - P: Pair, + P: Pair + Send + Sync + 'static, P::Public: AppPublic + Member + Codec, P::Signature: TryFrom> + Member + Codec, { @@ -180,6 +180,16 @@ where let mut relay_chain_data_cache = RelayChainDataCache::new(relay_client.clone(), para_id); + let mut maybe_connection_helper = relay_client + .overseer_handle() + .ok() + .map(|h| BackingGroupConnectionHelper::new(para_client.clone(), keystore.clone(), h.clone())) + .or_else(|| { + tracing::warn!(target: LOG_TARGET, + "Relay chain interface does not provide overseer handle. Backing group pre-connect is disabled."); + None + }); + loop { // We wait here until the next slot arrives. if slot_timer.wait_until_next_slot().await.is_err() { @@ -319,6 +329,9 @@ where slot = ?para_slot.slot, "Not building block." ); + if let Some(ref mut connection_helper) = maybe_connection_helper { + connection_helper.update::(para_slot.slot, parent_hash).await; + } continue }, }; diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index c939fb8d1275a..2fcb662f88e91 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -172,10 +172,10 @@ pub fn run + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + Clone + 'static, CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, - P: Pair + 'static, + P: Pair + Send + Sync + 'static, P::Public: AppPublic + Member + Codec, P::Signature: TryFrom> + Member + Codec, - Spawner: SpawnNamed, + Spawner: SpawnNamed + Clone + 'static, { let Params { create_inherent_data_providers, diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index c7e953a33d0a6..4120c312deba1 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -40,6 +40,7 @@ sp-transaction-pool = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } # Polkadot +polkadot-overseer = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } # Cumulus diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs index ec4eb36689dcc..086b50c0819f5 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs @@ -441,7 +441,8 @@ where + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + substrate_frame_rpc_system::AccountNonceApi + GetParachainInfo, - AuraId: AuraIdT + Sync, + AuraId: AuraIdT + Sync + Send, + ::Pair: Send + Sync, { if extra_args.authoring_policy == AuthoringPolicy::SlotBased { Box::new(AuraNode::< @@ -472,7 +473,8 @@ impl, RuntimeApi, AuraId> where RuntimeApi: ConstructNodeRuntimeApi>, RuntimeApi::RuntimeApi: AuraRuntimeApi, - AuraId: AuraIdT + Sync, + AuraId: AuraIdT + Sync + Send, + ::Pair: Send + Sync, { #[docify::export_content] fn launch_slot_based_collator( @@ -501,7 +503,7 @@ where CHP: cumulus_client_consensus_common::ValidationCodeHashProvider + Send + 'static, Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + Clone + 'static, - Spawner: SpawnNamed, + Spawner: SpawnNamed + Clone + 'static, { slot_based::run::::Pair, _, _, _, _, _, _, _, _, _>( params_with_export, @@ -523,7 +525,8 @@ impl, RuntimeApi, AuraId> where RuntimeApi: ConstructNodeRuntimeApi>, RuntimeApi::RuntimeApi: AuraRuntimeApi, - AuraId: AuraIdT + Sync, + AuraId: AuraIdT + Sync + Send, + ::Pair: Send + Sync, { fn start_consensus( client: Arc>, @@ -654,7 +657,8 @@ impl, RuntimeApi, AuraId> where RuntimeApi: ConstructNodeRuntimeApi>, RuntimeApi::RuntimeApi: AuraRuntimeApi, - AuraId: AuraIdT + Sync, + AuraId: AuraIdT + Sync + Send, + ::Pair: Send + Sync, { fn start_consensus( client: Arc>, diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index cd9ea08446a1f..92592045c0e44 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -47,6 +47,7 @@ sc-network = { workspace = true, default-features = true } sp-core = { features = ["std"], workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } +itertools = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index 8bd0c3bf2da61..97c95d1b06ad1 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -382,6 +382,9 @@ struct State { /// An utility for tracking all collations produced by the collator. collation_tracker: CollationTracker, + + /// Should we be connected to backers ? + connect_to_backers: bool, } impl State { @@ -409,6 +412,7 @@ impl State { advertisement_timeouts: Default::default(), reputation, collation_tracker: Default::default(), + connect_to_backers: false, } } } @@ -437,7 +441,15 @@ async fn distribute_collation( // We should already be connected to the validators, but if we aren't, we will try to connect to // them now. - connect_to_validators(ctx, &state.implicit_view, &state.per_relay_parent, id).await; + update_validator_connections( + ctx, + &state.peer_ids, + &state.implicit_view, + &state.per_relay_parent, + id, + true, + ) + .await; let per_relay_parent = match state.per_relay_parent.get_mut(&candidate_relay_parent) { Some(per_relay_parent) => per_relay_parent, @@ -710,41 +722,63 @@ fn list_of_backing_validators_in_view( backing_validators.into_iter().collect() } -/// Updates a set of connected validators based on their advertisement-bits -/// in a validators buffer. +/// Connect or disconnect to/from all backers at all viable relay parents. #[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] -async fn connect_to_validators( +async fn update_validator_connections( ctx: &mut Context, + peer_ids: &HashMap>, implicit_view: &Option, per_relay_parent: &HashMap, para_id: ParaId, + connect: bool, ) { - let cores_assigned = has_assigned_cores(implicit_view, per_relay_parent); - // If no cores are assigned to the para, we still need to send a ConnectToValidators request to - // the network bridge passing an empty list of validator ids. Otherwise, it will keep connecting - // to the last requested validators until a new request is issued. - let validator_ids = if cores_assigned { - list_of_backing_validators_in_view(implicit_view, per_relay_parent, para_id) + gum::trace!(target: LOG_TARGET, ?para_id, ?connect, "update_validator_connections"); + + // Ignore address resolution failure, will reissue a new request on new collation. + let (failed, _) = oneshot::channel(); + + let msg = if connect { + let cores_assigned = has_assigned_cores(implicit_view, per_relay_parent); + // If no cores are assigned to the para, we still need to send a ConnectToValidators request + // to the network bridge passing an empty list of validator ids. Otherwise, it will keep + // connecting to the last requested validators until a new request is issued. + let validator_ids = if cores_assigned { + list_of_backing_validators_in_view(implicit_view, per_relay_parent, para_id) + } else { + Vec::new() + }; + + gum::trace!( + target: LOG_TARGET, + ?cores_assigned, + "Sending connection request to validators: {:?}", + validator_ids, + ); + NetworkBridgeTxMessage::ConnectToValidators { + validator_ids, + peer_set: PeerSet::Collation, + failed, + } } else { - Vec::new() - }; + if peer_ids.is_empty() { + return + } - gum::trace!( - target: LOG_TARGET, - ?cores_assigned, - "Sending connection request to validators: {:?}", - validator_ids, - ); + gum::trace!( + target: LOG_TARGET, + "Disconnecting from validators: {:?}", + peer_ids.keys(), + ); - // ignore address resolution failure - // will reissue a new request on new collation - let (failed, _) = oneshot::channel(); - ctx.send_message(NetworkBridgeTxMessage::ConnectToValidators { - validator_ids, - peer_set: PeerSet::Collation, - failed, - }) - .await; + // Disconnect from all connected validators on the `Collation` protocol. + NetworkBridgeTxMessage::ConnectToValidators { + validator_ids: vec![], + peer_set: PeerSet::Collation, + failed, + } + }; + + ctx.send_message(msg).await; } /// Advertise collation to the given `peer`. @@ -839,6 +873,44 @@ async fn process_msg( use CollatorProtocolMessage::*; match msg { + ConnectToBackingGroups => { + gum::debug!( + target: LOG_TARGET, + "Received PreConnectToBackingGroups message." + ); + state.connect_to_backers = true; + + if let Some(para_id) = state.collating_on { + update_validator_connections( + ctx, + &state.peer_ids, + &state.implicit_view, + &state.per_relay_parent, + para_id, + state.connect_to_backers, + ) + .await; + } + }, + DisconnectFromBackingGroups => { + gum::debug!( + target: LOG_TARGET, + "Received DisconnectFromBackingGroups message." + ); + state.connect_to_backers = false; + + if let Some(para_id) = state.collating_on { + update_validator_connections( + ctx, + &state.peer_ids, + &state.implicit_view, + &state.per_relay_parent, + para_id, + state.connect_to_backers, + ) + .await; + } + }, CollateOn(id) => { state.collating_on = Some(id); state.implicit_view = Some(ImplicitView::new(Some(id))); @@ -1312,8 +1384,15 @@ async fn handle_network_msg( handle_our_view_change(ctx, runtime, state, view).await?; // Connect only if we are collating on a para. if let Some(para_id) = state.collating_on { - connect_to_validators(ctx, &state.implicit_view, &state.per_relay_parent, para_id) - .await; + update_validator_connections( + ctx, + &state.peer_ids, + &state.implicit_view, + &state.per_relay_parent, + para_id, + state.connect_to_backers, + ) + .await; } }, PeerMessage(remote, msg) => { @@ -1868,10 +1947,17 @@ async fn run_inner( ); } _ = reconnect_timeout => { - // Connect only if we are collating on a para. if let Some(para_id) = state.collating_on { - connect_to_validators(&mut ctx, &state.implicit_view, &state.per_relay_parent, para_id).await; + update_validator_connections( + &mut ctx, + &state.peer_ids, + &state.implicit_view, + &state.per_relay_parent, + para_id, + state.connect_to_backers, + ) + .await; } gum::trace!( diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index de14b5bb0389e..6256cd6614f42 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -32,13 +32,14 @@ use sp_core::crypto::Pair; use sp_keyring::Sr25519Keyring; use sp_runtime::traits::AppVerify; +use itertools::Itertools; use polkadot_node_network_protocol::{ peer_set::CollationVersion, request_response::{ v2::{CollationFetchingRequest, CollationFetchingResponse}, IncomingRequest, ReqProtocolNames, }, - view, + view, ObservedRole, }; use polkadot_node_primitives::BlockData; use polkadot_node_subsystem::{ @@ -564,6 +565,7 @@ fn v1_protocol_rejected() { ReputationAggregator::new(|_| true), |mut test_harness| async move { let virtual_overseer = &mut test_harness.virtual_overseer; + overseer_send(virtual_overseer, CollatorProtocolMessage::ConnectToBackingGroups).await; overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) .await; @@ -620,6 +622,9 @@ fn advertise_and_send_collation() { let mut virtual_overseer = test_harness.virtual_overseer; let mut req_v2_cfg = test_harness.req_v2_cfg; + overseer_send(&mut virtual_overseer, CollatorProtocolMessage::ConnectToBackingGroups) + .await; + overseer_send( &mut virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id), @@ -820,6 +825,9 @@ fn delay_reputation_change() { let mut virtual_overseer = test_harness.virtual_overseer; let mut req_v2_cfg = test_harness.req_v2_cfg; + overseer_send(&mut virtual_overseer, CollatorProtocolMessage::ConnectToBackingGroups) + .await; + overseer_send( &mut virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id), @@ -973,6 +981,12 @@ fn collators_declare_to_connected_peers() { let peer = test_state.validator_peer_id[0]; let validator_id = test_state.current_group_validator_authority_ids()[0].clone(); + overseer_send( + &mut test_harness.virtual_overseer, + CollatorProtocolMessage::ConnectToBackingGroups, + ) + .await; + overseer_send( &mut test_harness.virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id), @@ -1021,6 +1035,8 @@ fn collations_are_only_advertised_to_validators_with_correct_view() { let peer2 = test_state.current_group_validator_peer_ids()[1]; let validator_id2 = test_state.current_group_validator_authority_ids()[1].clone(); + overseer_send(virtual_overseer, CollatorProtocolMessage::ConnectToBackingGroups).await; + overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) .await; @@ -1096,6 +1112,8 @@ fn collate_on_two_different_relay_chain_blocks() { let peer2 = test_state.current_group_validator_peer_ids()[1]; let validator_id2 = test_state.current_group_validator_authority_ids()[1].clone(); + overseer_send(virtual_overseer, CollatorProtocolMessage::ConnectToBackingGroups).await; + overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) .await; @@ -1186,6 +1204,8 @@ fn validator_reconnect_does_not_advertise_a_second_time() { let peer = test_state.current_group_validator_peer_ids()[0]; let validator_id = test_state.current_group_validator_authority_ids()[0].clone(); + overseer_send(virtual_overseer, CollatorProtocolMessage::ConnectToBackingGroups).await; + overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) .await; @@ -1250,6 +1270,8 @@ fn collators_reject_declare_messages() { let peer = test_state.current_group_validator_peer_ids()[0]; let validator_id = test_state.current_group_validator_authority_ids()[0].clone(); + overseer_send(virtual_overseer, CollatorProtocolMessage::ConnectToBackingGroups).await; + overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) .await; @@ -1317,6 +1339,8 @@ where let virtual_overseer = &mut test_harness.virtual_overseer; let req_cfg = &mut test_harness.req_v2_cfg; + overseer_send(virtual_overseer, CollatorProtocolMessage::ConnectToBackingGroups).await; + overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) .await; @@ -1468,6 +1492,9 @@ fn connect_to_group_in_view() { let mut virtual_overseer = test_harness.virtual_overseer; let mut req_cfg = test_harness.req_v2_cfg; + overseer_send(&mut virtual_overseer, CollatorProtocolMessage::ConnectToBackingGroups) + .await; + overseer_send( &mut virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id), @@ -1599,6 +1626,9 @@ fn connect_with_no_cores_assigned() { let mut virtual_overseer = test_harness.virtual_overseer; let req_cfg = test_harness.req_v2_cfg; + overseer_send(&mut virtual_overseer, CollatorProtocolMessage::ConnectToBackingGroups) + .await; + overseer_send( &mut virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id), @@ -1668,3 +1698,261 @@ fn connect_with_no_cores_assigned() { }, ); } + +#[test] +fn no_connection_without_preconnect_message() { + let test_state = TestState::default(); + let local_peer_id = test_state.local_peer_id; + let collator_pair = test_state.collator_pair.clone(); + + test_harness( + local_peer_id, + collator_pair, + ReputationAggregator::new(|_| true), + |test_harness| async move { + let mut virtual_overseer = test_harness.virtual_overseer; + let req_cfg = test_harness.req_v2_cfg; + + // NOTE: We intentionally DO NOT send ConnectToBackingGroups here + // to verify that connections are not made without the pre-connect message. + + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::CollateOn(test_state.para_id), + ) + .await; + + // Update view without expecting any connections (None parameter) + update_view( + None, // No connections should be made + &test_state, + &mut virtual_overseer, + vec![(test_state.relay_parent, 10)], + 1, + ) + .await; + + // Verify that no ConnectToValidators message was sent + // by attempting to receive a message with a short timeout. + let timeout = Duration::from_millis(250); + match overseer_recv_with_timeout(&mut virtual_overseer, timeout).await { + None => { + // Timeout is fine - no messages were sent + }, + Some(msg) => { + // No message expected here + panic!("Unexpected message was sent by subsystem: {:?}", msg); + }, + } + + TestHarness { virtual_overseer, req_v2_cfg: req_cfg } + }, + ); +} + +#[test] +fn distribute_collation_forces_connect() { + let test_state = TestState::default(); + let local_peer_id = test_state.local_peer_id; + let collator_pair = test_state.collator_pair.clone(); + + test_harness( + local_peer_id, + collator_pair, + ReputationAggregator::new(|_| true), + |test_harness| async move { + let mut virtual_overseer = test_harness.virtual_overseer; + let req_cfg = test_harness.req_v2_cfg; + + // NOTE: We intentionally DO NOT send ConnectToBackingGroups here + // to verify that connections are not made without the pre-connect message. + + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::CollateOn(test_state.para_id), + ) + .await; + + // Update view without expecting any connections (None parameter) + update_view( + None, // No connections should be made + &test_state, + &mut virtual_overseer, + vec![(test_state.relay_parent, 10)], + 1, + ) + .await; + + // Verify that no ConnectToValidators message was sent + // by attempting to receive a message with a short timeout. + // We expect timeout here. + let timeout = Duration::from_millis(250); + match overseer_recv_with_timeout(&mut virtual_overseer, timeout).await { + None => { + // Timeout is fine - no messages were sent + }, + Some(msg) => { + // No message expected here + panic!("Unexpected message was sent by subsystem: {:?}", msg); + }, + } + + // Distribute a collation + let _ = distribute_collation( + &mut virtual_overseer, + test_state.current_group_validator_authority_ids(), + &test_state, + test_state.relay_parent, + ) + .await; + + for (val, peer) in test_state + .current_group_validator_authority_ids() + .into_iter() + .zip(test_state.current_group_validator_peer_ids()) + { + connect_peer(&mut virtual_overseer, peer, CollationVersion::V2, Some(val.clone())) + .await; + } + + // Expect advertisement for the candidate + expect_declare_msg( + &mut virtual_overseer, + &test_state, + &test_state.current_group_validator_peer_ids()[0], + ) + .await; + + TestHarness { virtual_overseer, req_v2_cfg: req_cfg } + }, + ); +} + +#[test] +fn connect_advertise_disconnect_three_backing_groups() { + // Create a test state with 3 non-empty backing groups + let mut test_state = TestState::default(); + let para_id = test_state.para_id; + + // We have 5 validators total (indices 0-4) + // Group 0: validators [0, 1] + // Group 1: validators [2, 3] + // Group 2: validators [4] + test_state.session_info.validator_groups = vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2), ValidatorIndex(3)], + vec![ValidatorIndex(4)], + vec![], + ] + .into_iter() + .collect(); + + // Assign our para_id to 3 cores (0, 1, 2) which will map to 3 groups + test_state.claim_queue.clear(); + test_state.claim_queue.insert(CoreIndex(0), [para_id].into_iter().collect()); + test_state.claim_queue.insert(CoreIndex(1), [para_id].into_iter().collect()); + test_state.claim_queue.insert(CoreIndex(2), [para_id].into_iter().collect()); + test_state.claim_queue.insert(CoreIndex(3), VecDeque::new()); + + let local_peer_id = test_state.local_peer_id; + let collator_pair = test_state.collator_pair.clone(); + + test_harness( + local_peer_id, + collator_pair, + ReputationAggregator::new(|_| true), + |test_harness| async move { + let mut virtual_overseer = test_harness.virtual_overseer; + let req_cfg = test_harness.req_v2_cfg; + + // Send the pre-connect message + overseer_send(&mut virtual_overseer, CollatorProtocolMessage::ConnectToBackingGroups) + .await; + + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::CollateOn(test_state.para_id), + ) + .await; + + // Get validators from all 3 backing groups + let mut expected_validators = Vec::new(); + for core_idx in [0, 1, 2] { + let validators = test_state.validator_authority_ids_for_core(CoreIndex(core_idx)); + expected_validators.extend(validators); + } + + // Remove duplicates while preserving order + let mut seen = std::collections::HashSet::new(); + expected_validators.retain(|v| seen.insert(v.clone())); + + // Verify we're connecting to all 5 validators (from 3 groups) + // Group 0 (Core 0): 2 validators + // Group 1 (Core 1): 2 validators + // Group 2 (Core 2): 1 validator + assert_eq!( + expected_validators.len(), + 5, + "Expected 5 unique validators from 3 backing groups" + ); + + // Update view and expect connections to all validators from all 3 backing groups + update_view( + Some(expected_validators.clone()), + &test_state, + &mut virtual_overseer, + vec![(test_state.relay_parent, 10)], + 1, + ) + .await; + + // Generate NetworkBridgeEvent::PeerConnected messages for each expected validator peer + // Use some random peer ids + let validator_peer_ids: Vec<_> = + (0..expected_validators.len()).map(|_| PeerId::random()).sorted().collect(); + + for (auth_id, peer_id) in expected_validators.iter().zip(validator_peer_ids.iter()) { + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::PeerConnected( + *peer_id, + ObservedRole::Authority, + CollationVersion::V2.into(), + Some(HashSet::from([auth_id.clone()])), + ), + ), + ) + .await; + } + + // Expect collation advertisement for each validator + for peer_id in validator_peer_ids.iter() { + expect_declare_msg(&mut virtual_overseer, &test_state, peer_id).await; + } + + // Send the disconnect message + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::DisconnectFromBackingGroups, + ) + .await; + + // Expect a DisconnectPeers for all connected validators + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ConnectToValidators{ + validator_ids, + peer_set, + failed: _, + }) => { + // We should disconnect from all validators we were connected to + assert_eq!(validator_ids, vec![], "Expected to disconnect from all validators"); + assert_eq!(peer_set, PeerSet::Collation); + } + ); + + TestHarness { virtual_overseer, req_v2_cfg: req_cfg } + }, + ); +} diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs index 4f4c3526d7f2f..21e3ab152bb1e 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -341,6 +341,8 @@ fn distribute_collation_from_implicit_view(#[case] validator_sends_view_first: b |mut test_harness| async move { let virtual_overseer = &mut test_harness.virtual_overseer; + overseer_send(virtual_overseer, CollatorProtocolMessage::ConnectToBackingGroups).await; + // Set collating para id. overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) .await; @@ -515,6 +517,8 @@ fn distribute_collation_up_to_limit() { // Grandparent of head `a`. let head_b = Hash::from_low_u64_be(130); + overseer_send(virtual_overseer, CollatorProtocolMessage::ConnectToBackingGroups).await; + // Set collating para id. overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) .await; @@ -642,6 +646,9 @@ fn send_parent_head_data_for_elastic_scaling() { let head_b = Hash::from_low_u64_be(129); let head_b_num: u32 = 63; + overseer_send(&mut virtual_overseer, CollatorProtocolMessage::ConnectToBackingGroups) + .await; + // Set collating para id. overseer_send( &mut virtual_overseer, @@ -770,6 +777,9 @@ fn advertise_and_send_collation_by_hash() { let head_b = Hash::from_low_u64_be(129); let head_b_num: u32 = 63; + overseer_send(&mut virtual_overseer, CollatorProtocolMessage::ConnectToBackingGroups) + .await; + // Set collating para id. overseer_send( &mut virtual_overseer, diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 3ed792b18925f..e0a02650cc8cf 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -1701,6 +1701,18 @@ async fn process_msg( let _timer = state.metrics.time_process_msg(); match msg { + ConnectToBackingGroups => { + gum::warn!( + target: LOG_TARGET, + "PreConnectToBackingGroups message is not expected on the validator side of the protocol", + ); + }, + DisconnectFromBackingGroups => { + gum::warn!( + target: LOG_TARGET, + "DisconnectFromBackingGroups message is not expected on the validator side of the protocol", + ); + }, CollateOn(id) => { gum::warn!( target: LOG_TARGET, diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 28d8c0ebf7675..6866979fa8dce 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -262,6 +262,12 @@ pub enum CollatorProtocolMessage { /// /// The hash is the relay parent. Seconded(Hash, SignedFullStatement), + /// A message sent by Cumulus consensus engine to the collator protocol to + /// pre-connect to backing groups at all allowed relay parents. + ConnectToBackingGroups, + /// A message sent by Cumulus consensus engine to the collator protocol to + /// disconnect from backing groups. + DisconnectFromBackingGroups, } impl Default for CollatorProtocolMessage { diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_12cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_12cores.rs index cbcb0ebc2dd9d..9cfcab0b77150 100644 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_12cores.rs +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_12cores.rs @@ -4,6 +4,8 @@ // Test that a parachain that uses a single slot-based collator with elastic scaling can use 12 // cores in order to achieve 500ms blocks. +use std::time::Duration; + use anyhow::anyhow; use cumulus_zombienet_sdk_helpers::{ @@ -11,6 +13,7 @@ use cumulus_zombienet_sdk_helpers::{ }; use polkadot_primitives::Id as ParaId; use serde_json::json; +use zombienet_orchestrator::network::node::LogLineCountOptions; use zombienet_sdk::{ subxt::{OnlineClient, PolkadotConfig}, subxt_signer::sr25519::dev, @@ -60,9 +63,14 @@ async fn slot_based_12cores_test() -> Result<(), anyhow::Error> { .with_chain("elastic-scaling-500ms") .with_default_args(vec![ "--authoring=slot-based".into(), - ("-lparachain=debug,aura=debug").into(), + ("-lparachain=debug,aura=debug,parachain::collator-protocol=trace").into(), ]) - .with_collator(|n| n.with_name("collator-elastic")) + .with_collator(|n| n.with_name("collator-0")) + .with_collator(|n| n.with_name("collator-1")) + .with_collator(|n| n.with_name("collator-2")) + .with_collator(|n| n.with_name("collator-3")) + .with_collator(|n| n.with_name("collator-4")) + .with_collator(|n| n.with_name("collator-5")) }) .build() .map_err(|e| { @@ -74,7 +82,7 @@ async fn slot_based_12cores_test() -> Result<(), anyhow::Error> { let network = spawn_fn(config).await?; let relay_node = network.get_node("validator-0")?; - let para_node = network.get_node("collator-elastic")?; + let para_node = network.get_node("collator-5")?; let relay_client: OnlineClient = relay_node.wait_client().await?; let alice = dev::alice(); @@ -105,6 +113,26 @@ async fn slot_based_12cores_test() -> Result<(), anyhow::Error> { ) .await?; + // Expect that `collator-5` claims at least 3 slots during this run. + let result = para_node + .wait_log_line_count_with_timeout( + "*Received PreConnectToBackingGroups message*", + true, + LogLineCountOptions::new(|n| n >= 3, Duration::from_secs(120), false), + ) + .await?; + assert!(result.success()); + + // It should disconnect at least 3 times after it's slot passes. + let result = para_node + .wait_log_line_count_with_timeout( + "*Received DisconnectFromBackingGroups message*", + true, + LogLineCountOptions::new(|n| n >= 3, Duration::from_secs(120), false), + ) + .await?; + assert!(result.success()); + // Assert the parachain finalized block height is also on par with the number of backed // candidates. assert_finality_lag(¶_node.wait_client().await?, 60).await?; diff --git a/prdoc/pr_9929.prdoc b/prdoc/pr_9929.prdoc new file mode 100644 index 0000000000000..af9dec337b381 --- /dev/null +++ b/prdoc/pr_9929.prdoc @@ -0,0 +1,18 @@ +title: Pre-connect to backers before own slot +doc: +- audience: Node Dev + description: |- + Implements a mechanism to pre-connect to backers, see https://github.com/paritytech/polkadot-sdk/issues/9767#issuecomment-3306292493. + Improve backing group connectivity. + +crates: +- name: cumulus-client-service + bump: minor +- name: polkadot-omni-node-lib + bump: minor +- name: polkadot-collator-protocol + bump: patch +- name: polkadot-node-subsystem-types + bump: major +- name: cumulus-client-consensus-aura + bump: minor