diff --git a/Cargo.lock b/Cargo.lock index 80f8578ff0034..6e5c0dd02aaac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4640,6 +4640,28 @@ dependencies = [ "sp-runtime", ] +[[package]] +name = "cumulus-pallet-subscriber" +version = "0.1.0" +dependencies = [ + "cumulus-pallet-parachain-system", + "cumulus-primitives-core", + "frame-benchmarking", + "frame-support", + "frame-system", + "hash-db", + "parity-scale-codec", + "polkadot-primitives", + "scale-info", + "sp-core 28.0.0", + "sp-io", + "sp-runtime", + "sp-state-machine", + "sp-std 14.0.0", + "sp-trie", + "trie-db", +] + [[package]] name = "cumulus-pallet-weight-reclaim" version = "1.0.0" @@ -4870,6 +4892,7 @@ dependencies = [ "sp-api", "sp-blockchain", "sp-state-machine", + "sp-storage 19.0.0", "sp-version", "thiserror 1.0.65", ] diff --git a/Cargo.toml b/Cargo.toml index 539fa3162ca4f..020251fa0a53c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,6 +85,7 @@ members = [ "cumulus/pallets/parachain-system/proc-macro", "cumulus/pallets/session-benchmarking", "cumulus/pallets/solo-to-para", + "cumulus/pallets/subscriber", "cumulus/pallets/weight-reclaim", "cumulus/pallets/xcm", "cumulus/pallets/xcmp-queue", @@ -748,6 +749,7 @@ cumulus-pallet-aura-ext = { path = "cumulus/pallets/aura-ext", default-features cumulus-pallet-dmp-queue = { default-features = false, path = "cumulus/pallets/dmp-queue" } cumulus-pallet-parachain-system = { path = "cumulus/pallets/parachain-system", default-features = false } cumulus-pallet-parachain-system-proc-macro = { path = "cumulus/pallets/parachain-system/proc-macro", default-features = false } +cumulus-pallet-subscriber = { path = "cumulus/pallets/subscriber", default-features = false } cumulus-pallet-session-benchmarking = { path = "cumulus/pallets/session-benchmarking", default-features = false } cumulus-pallet-solo-to-para = { path = "cumulus/pallets/solo-to-para", default-features = false } cumulus-pallet-weight-reclaim = { path = "cumulus/pallets/weight-reclaim", default-features = false } diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index 3999352322e20..2fab2f7e424d9 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -177,6 +177,7 @@ where parent_hash: Block::Hash, timestamp: impl Into>, relay_parent_descendants: Option, + relay_proof_request: cumulus_primitives_core::RelayProofRequest, collator_peer_id: PeerId, ) -> Result<(ParachainInherentData, InherentData), Box> { let paras_inherent_data = ParachainInherentDataProvider::create_at( @@ -188,6 +189,7 @@ where .map(RelayParentData::into_inherent_descendant_list) .unwrap_or_default(), Vec::new(), + relay_proof_request, collator_peer_id, ) .await; @@ -224,6 +226,7 @@ where validation_data: &PersistedValidationData, parent_hash: Block::Hash, timestamp: impl Into>, + relay_proof_request: cumulus_primitives_core::RelayProofRequest, collator_peer_id: PeerId, ) -> Result<(ParachainInherentData, InherentData), Box> { self.create_inherent_data_with_rp_offset( @@ -232,6 +235,7 @@ where parent_hash, timestamp, None, + relay_proof_request, collator_peer_id, ) .await diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs index 1f99e2f6e5cc0..532da7ede18e3 100644 --- a/cumulus/client/consensus/aura/src/collators/basic.rs +++ b/cumulus/client/consensus/aura/src/collators/basic.rs @@ -238,6 +238,7 @@ where &validation_data, parent_hash, claim.timestamp(), + Default::default(), params.collator_peer_id, ) .await diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 303b5268095c5..c33ce1c41d6c9 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -36,7 +36,7 @@ use codec::{Codec, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::{CollectCollationInfo, PersistedValidationData}; +use cumulus_primitives_core::{CollectCollationInfo, KeyToIncludeInRelayProofApi, PersistedValidationData}; use cumulus_relay_chain_interface::RelayChainInterface; use sp_consensus::Environment; @@ -164,8 +164,10 @@ where + Send + Sync + 'static, - Client::Api: - AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, + Client::Api: AuraApi + + CollectCollationInfo + + AuraUnincludedSegmentApi + + KeyToIncludeInRelayProofApi, Backend: sc_client_api::Backend + 'static, RClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, @@ -216,8 +218,10 @@ where + Send + Sync + 'static, - Client::Api: - AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, + Client::Api: AuraApi + + CollectCollationInfo + + AuraUnincludedSegmentApi + + KeyToIncludeInRelayProofApi, Backend: sc_client_api::Backend + 'static, RClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, @@ -392,12 +396,15 @@ where // Build and announce collations recursively until // `can_build_upon` fails or building a collation fails. + let relay_proof_request = super::get_relay_proof_request(&*params.para_client, parent_hash); + let (parachain_inherent_data, other_inherent_data) = match collator .create_inherent_data( relay_parent, &validation_data, parent_hash, slot_claim.timestamp(), + relay_proof_request, params.collator_peer_id, ) .await diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index d938dca69282f..0b2981691fc24 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -25,7 +25,9 @@ use crate::collator::SlotClaim; use codec::Codec; use cumulus_client_consensus_common::{self as consensus_common, ParentSearchParams}; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; -use cumulus_primitives_core::{relay_chain::Header as RelayHeader, BlockT}; +use cumulus_primitives_core::{ + relay_chain::Header as RelayHeader, BlockT, KeyToIncludeInRelayProofApi, +}; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; use polkadot_node_subsystem::messages::{CollatorProtocolMessage, RuntimeApiRequest}; use polkadot_node_subsystem_util::runtime::ClaimQueueSnapshot; @@ -662,6 +664,34 @@ mod tests { } } +/// Fetches relay chain storage proof requests from the parachain runtime. +/// +/// Queries the runtime API to determine which relay chain storage keys +/// (both top-level and child trie keys) should be included in the relay chain state proof. +/// +/// Falls back to an empty request if the runtime API call fails or is not implemented. +fn get_relay_proof_request( + client: &Client, + parent_hash: Block::Hash, +) -> cumulus_primitives_core::RelayProofRequest +where + Block: BlockT, + Client: ProvideRuntimeApi, + Client::Api: KeyToIncludeInRelayProofApi, +{ + client + .runtime_api() + .keys_to_prove(parent_hash) + .unwrap_or_else(|e| { + tracing::warn!( + target: crate::LOG_TARGET, + error = ?e, + "Failed to fetch relay proof requests from runtime, using empty request" + ); + Default::default() + }) +} + /// Holds a relay parent and its descendants. pub struct RelayParentData { /// The relay parent block header diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 4a58ed81426af..173550d995b63 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -359,7 +359,6 @@ where relay_parent_storage_root: *relay_parent_header.state_root(), max_pov_size: *max_pov_size, }; - let (parachain_inherent_data, other_inherent_data) = match collator .create_inherent_data_with_rp_offset( relay_parent, @@ -367,6 +366,7 @@ where parent_hash, slot_claim.timestamp(), Some(rp_data), + Default::default(), collator_peer_id, ) .await diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs b/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs index e0ba35e558afe..ef4ed09c6dc66 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs @@ -566,6 +566,15 @@ impl RelayChainInterface for TestRelayClient { unimplemented!("Not needed for test") } + async fn prove_child_read( + &self, + _: RelayHash, + _: &cumulus_relay_chain_interface::ChildInfo, + _: &[Vec], + ) -> RelayChainResult { + unimplemented!("Not needed for test") + } + async fn wait_for_block(&self, _: RelayHash) -> RelayChainResult<()> { unimplemented!("Not needed for test") } diff --git a/cumulus/client/parachain-inherent/src/lib.rs b/cumulus/client/parachain-inherent/src/lib.rs index 5e994cd472f70..8ea3c4f96c5ae 100644 --- a/cumulus/client/parachain-inherent/src/lib.rs +++ b/cumulus/client/parachain-inherent/src/lib.rs @@ -30,6 +30,8 @@ pub use cumulus_primitives_parachain_inherent::{ParachainInherentData, INHERENT_ use cumulus_relay_chain_interface::RelayChainInterface; pub use mock::{MockValidationDataInherentDataProvider, MockXcmConfig}; use sc_network_types::PeerId; +use sp_state_machine::StorageProof; +use sp_storage::ChildInfo; const LOG_TARGET: &str = "parachain-inherent"; @@ -157,6 +159,84 @@ async fn collect_relay_storage_proof( .ok() } +/// Collect storage proofs for relay chain data. +/// +/// Generates proofs for both top-level relay chain storage and child trie data. +/// Top-level keys are proven directly. Child trie roots are automatically included +/// from their standard storage locations (`:child_storage:default:` + identifier). +/// +/// Returns a merged proof combining all requested data, or `None` if there are no requests. +async fn collect_relay_storage_proofs( + relay_chain_interface: &impl RelayChainInterface, + relay_parent: PHash, + relay_proof_request: cumulus_primitives_core::RelayProofRequest, +) -> Option { + use cumulus_primitives_core::RelayStorageKey; + + let cumulus_primitives_core::RelayProofRequest { keys } = relay_proof_request; + + if keys.is_empty() { + return None; + } + + let mut combined_proof: Option = None; + + // Group keys by storage type + let mut top_keys = Vec::new(); + let mut child_keys: std::collections::BTreeMap, Vec>> = + std::collections::BTreeMap::new(); + + for key in keys { + match key { + RelayStorageKey::Top(k) => top_keys.push(k), + RelayStorageKey::Child { storage_key, key } => { + child_keys.entry(storage_key).or_default().push(key); + }, + } + } + + // Collect top-level storage proofs + if !top_keys.is_empty() { + match relay_chain_interface.prove_read(relay_parent, &top_keys).await { + Ok(top_proof) => { + combined_proof = Some(top_proof); + }, + Err(e) => { + tracing::error!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + error = ?e, + "Cannot obtain top-level storage proof from relay chain.", + ); + }, + } + } + + // Collect child trie proofs + for (storage_key, data_keys) in child_keys { + let child_info = ChildInfo::new_default(&storage_key); + match relay_chain_interface.prove_child_read(relay_parent, &child_info, &data_keys).await { + Ok(child_proof) => { + combined_proof = match combined_proof { + None => Some(child_proof), + Some(existing) => Some(StorageProof::merge([existing, child_proof])), + }; + }, + Err(e) => { + tracing::error!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + child_trie_id = ?child_info.storage_key(), + error = ?e, + "Cannot obtain child trie proof from relay chain.", + ); + }, + } + } + + combined_proof +} + pub struct ParachainInherentDataProvider; impl ParachainInherentDataProvider { @@ -170,6 +250,7 @@ impl ParachainInherentDataProvider { para_id: ParaId, relay_parent_descendants: Vec, additional_relay_state_keys: Vec>, + relay_proof_request: cumulus_primitives_core::RelayProofRequest, collator_peer_id: PeerId, ) -> Option { let collator_peer_id = ApprovedPeerId::try_from(collator_peer_id.to_bytes()) @@ -188,7 +269,7 @@ impl ParachainInherentDataProvider { .iter() .skip(1) .any(sc_consensus_babe::contains_epoch_change::); - let relay_chain_state = collect_relay_storage_proof( + let mut relay_chain_state = collect_relay_storage_proof( relay_chain_interface, para_id, relay_parent, @@ -198,6 +279,14 @@ impl ParachainInherentDataProvider { ) .await?; + // Collect additional requested storage proofs (top-level and child tries) + if let Some(additional_proofs) = + collect_relay_storage_proofs(relay_chain_interface, relay_parent, relay_proof_request) + .await + { + relay_chain_state = StorageProof::merge([relay_chain_state, additional_proofs]); + } + let downward_messages = relay_chain_interface .retrieve_dmq_contents(para_id, relay_parent) .await diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index b989f81efd5dc..f7b3f810b6015 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -240,6 +240,18 @@ impl RelayChainInterface for RelayChainInProcessInterface { .map_err(RelayChainError::StateMachineError) } + async fn prove_child_read( + &self, + relay_parent: PHash, + child_info: &cumulus_relay_chain_interface::ChildInfo, + child_keys: &[Vec], + ) -> RelayChainResult { + let state_backend = self.backend.state_at(relay_parent, TrieCacheContext::Untrusted)?; + + sp_state_machine::prove_child_read(state_backend, child_info, child_keys) + .map_err(RelayChainError::StateMachineError) + } + /// Wait for a given relay chain block in an async way. /// /// The caller needs to pass the hash of a block it waits for and the function will return when diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index be19f99526659..db89a573b3537 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -21,6 +21,7 @@ sc-network = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } sp-version = { workspace = true } async-trait = { workspace = true } diff --git a/cumulus/client/relay-chain-interface/src/lib.rs b/cumulus/client/relay-chain-interface/src/lib.rs index dd03738ed0029..8f87ccc6997b2 100644 --- a/cumulus/client/relay-chain-interface/src/lib.rs +++ b/cumulus/client/relay-chain-interface/src/lib.rs @@ -42,6 +42,7 @@ pub use cumulus_primitives_core::{ }; pub use polkadot_overseer::Handle as OverseerHandle; pub use sp_state_machine::StorageValue; +pub use sp_storage::ChildInfo; pub type RelayChainResult = Result; @@ -213,6 +214,14 @@ pub trait RelayChainInterface: Send + Sync { relevant_keys: &Vec>, ) -> RelayChainResult; + /// Generate a child trie storage read proof. + async fn prove_child_read( + &self, + relay_parent: PHash, + child_info: &ChildInfo, + child_keys: &[Vec], + ) -> RelayChainResult; + /// Returns the validation code hash for the given `para_id` using the given /// `occupied_core_assumption`. async fn validation_code_hash( @@ -354,6 +363,15 @@ where (**self).prove_read(relay_parent, relevant_keys).await } + async fn prove_child_read( + &self, + relay_parent: PHash, + child_info: &ChildInfo, + child_keys: &[Vec], + ) -> RelayChainResult { + (**self).prove_child_read(relay_parent, child_info, child_keys).await + } + async fn wait_for_block(&self, hash: PHash) -> RelayChainResult<()> { (**self).wait_for_block(hash).await } diff --git a/cumulus/client/relay-chain-rpc-interface/src/lib.rs b/cumulus/client/relay-chain-rpc-interface/src/lib.rs index 84d22676789cf..9c7732e6e452e 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/lib.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/lib.rs @@ -210,6 +210,20 @@ impl RelayChainInterface for RelayChainRpcInterface { }) } + async fn prove_child_read( + &self, + _relay_parent: RelayHash, + _child_info: &cumulus_relay_chain_interface::ChildInfo, + _child_keys: &[Vec], + ) -> RelayChainResult { + // Not implemented: requires relay chain RPC to expose child trie proof method. + tracing::warn!( + target: "relay-chain-rpc-interface", + "prove_child_read not implemented for RPC interface, returning empty proof" + ); + Ok(StorageProof::empty()) + } + /// Wait for a given relay chain block /// /// The hash of the block to wait for is passed. We wait for the block to arrive or return after diff --git a/cumulus/pallets/aura-ext/src/test.rs b/cumulus/pallets/aura-ext/src/test.rs index 7c4c78ab2a5b0..3486e56a5c2e4 100644 --- a/cumulus/pallets/aura-ext/src/test.rs +++ b/cumulus/pallets/aura-ext/src/test.rs @@ -151,6 +151,7 @@ impl cumulus_pallet_parachain_system::Config for Test { type CheckAssociatedRelayNumber = AnyRelayNumber; type ConsensusHook = ExpectParentIncluded; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } fn set_ancestors() { diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 5ff4af131f565..ed55293a88db0 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -85,6 +85,7 @@ use unincluded_segment::{ }; pub use consensus_hook::{ConsensusHook, ExpectParentIncluded}; +pub use relay_state_snapshot::ProcessRelayProofKeys; /// Register the `validate_block` function that is used by parachains to validate blocks on a /// validator. /// @@ -263,6 +264,13 @@ pub mod pallet { /// /// If set to 0, this config has no impact. type RelayParentOffset: Get; + + /// Processor for relay chain proof keys. + /// + /// This allows parachains to process data from the relay chain state proof, + /// including both child trie keys and main trie keys that were requested + /// via `KeyToIncludeInRelayProofApi`. + type RelayProofKeysProcessor: relay_state_snapshot::ProcessRelayProofKeys; } #[pallet::hooks] @@ -701,6 +709,8 @@ pub mod pallet { >::put(relevant_messaging_state.clone()); >::put(host_config); + total_weight.saturating_accrue(T::RelayProofKeysProcessor::process_relay_proof_keys(&relay_state_proof)); + ::on_validation_data(&vfp); if let Some(collator_peer_id) = collator_peer_id { diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index d3c7cef52b637..b361031be2c37 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -99,6 +99,7 @@ impl Config for Test { type ConsensusHook = TestConsensusHook; type WeightInfo = (); type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } std::thread_local! { diff --git a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs index 7138d61edd277..7c6efb5ddf73e 100644 --- a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs +++ b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs @@ -21,11 +21,26 @@ use codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain, AbridgedHostConfiguration, AbridgedHrmpChannel, ParaId, }; +use frame_support::weights::Weight; use scale_info::TypeInfo; use sp_runtime::traits::HashingFor; use sp_state_machine::{Backend, TrieBackend, TrieBackendBuilder}; use sp_trie::{HashDBT, MemoryDB, StorageProof, EMPTY_PREFIX}; +/// Process keys from verified relay chain state proofs. +/// +/// This trait allows processing of relay chain storage data from the verified proof. +pub trait ProcessRelayProofKeys { + /// Process keys from a verified relay state proof. + fn process_relay_proof_keys(verified_proof: &RelayChainStateProof) -> Weight; +} + +impl ProcessRelayProofKeys for () { + fn process_relay_proof_keys(_verified_proof: &RelayChainStateProof) -> Weight { + Weight::zero() + } +} + /// The capacity of the upward message queue of a parachain on the relay chain. // The field order should stay the same as the data can be found in the proof to ensure both are // have the same encoded representation. @@ -383,4 +398,20 @@ impl RelayChainStateProof { { read_optional_entry(&self.trie_backend, key).map_err(Error::ReadOptionalEntry) } + + /// Read a value from a child trie in the relay chain state proof. + /// + /// Returns `Ok(Some(value))` if the key exists in the child trie, + /// `Ok(None)` if the key doesn't exist, + /// or `Err` if there was a proof error. + pub fn read_child_storage( + &self, + child_info: &sp_core::storage::ChildInfo, + key: &[u8], + ) -> Result>, Error> { + use sp_state_machine::Backend; + self.trie_backend + .child_storage(child_info, key) + .map_err(|_| Error::ReadEntry(ReadEntryErr::Proof)) + } } diff --git a/cumulus/pallets/subscriber/Cargo.toml b/cumulus/pallets/subscriber/Cargo.toml new file mode 100644 index 0000000000000..4dc03f3517939 --- /dev/null +++ b/cumulus/pallets/subscriber/Cargo.toml @@ -0,0 +1,65 @@ +[package] +name = "cumulus-pallet-subscriber" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +description = "Subscriber pallet for processing published data from relay chain state proofs" +license = "Apache-2.0" + +[lints] +workspace = true + +[dependencies] +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } + +# Substrate +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } + +# Cumulus +cumulus-pallet-parachain-system = { workspace = true } +cumulus-primitives-core = { workspace = true } + +# Benchmarking +frame-benchmarking = { workspace = true } +hash-db = { workspace = true } +polkadot-primitives = { workspace = true } +sp-runtime = { workspace = true } +sp-state-machine = { workspace = true } +sp-trie = { workspace = true } +sp-io = { workspace = true } +trie-db = { workspace = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "cumulus-pallet-parachain-system/std", + "cumulus-primitives-core/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "hash-db/std", + "polkadot-primitives/std", + "scale-info/std", + "sp-core/std", + "sp-runtime/std", + "sp-state-machine/std", + "sp-std/std", + "sp-trie/std", + "trie-db/std", +] +runtime-benchmarks = [ + "cumulus-pallet-parachain-system/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", +] +try-runtime = [ + "cumulus-pallet-parachain-system/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", +] diff --git a/cumulus/pallets/subscriber/src/benchmarking.rs b/cumulus/pallets/subscriber/src/benchmarking.rs new file mode 100644 index 0000000000000..ca0b7c4b98b4a --- /dev/null +++ b/cumulus/pallets/subscriber/src/benchmarking.rs @@ -0,0 +1,131 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarking setup for cumulus-pallet-subscriber + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use crate::Pallet as Subscriber; +use cumulus_pallet_parachain_system::RelayChainStateProof; +use cumulus_primitives_core::ParaId; +use frame_benchmarking::v2::*; +use frame_support::traits::Get; +use frame_system::RawOrigin; + +/// Create test subscriptions for benchmarking. +fn create_subscriptions(n: u32, keys_per_publisher: u32) -> Vec<(ParaId, Vec>)> { + (0..n) + .map(|i| { + let para_id = ParaId::from(1000 + i); + let keys: Vec> = if keys_per_publisher == 0 { + vec![vec![i as u8], vec![i as u8, i as u8]] + } else { + (0..keys_per_publisher).map(|j| vec![i as u8, j as u8]).collect() + }; + (para_id, keys) + }) + .collect() +} + +/// Create a relay chain state proof for benchmarking with actual child trie data. +fn benchmark_relay_proof(publishers: &[(ParaId, Vec<(Vec, Vec)>)]) -> RelayChainStateProof { + crate::test_util::bench_proof_builder::build_sproof_with_child_data(publishers) +} + +#[benchmarks] +mod benchmarks { + use super::*; + + /// Benchmark collecting publisher roots from the relay state proof. + /// + /// Cost scales with the number of publishers `n`. + #[benchmark] + fn collect_publisher_roots( + n: Linear<1, { T::MaxPublishers::get() }>, + ) { + let subscriptions = create_subscriptions(n, 1); + let publishers: Vec<_> = (0..n) + .map(|i| (ParaId::from(1000 + i), vec![(vec![i as u8], vec![25u8])])) + .collect(); + let proof = benchmark_relay_proof(&publishers); + let roots; + #[block] + { + roots = Subscriber::::collect_publisher_roots(&proof, &subscriptions); + } + assert_eq!(roots.len(), n as usize); + } + + /// Benchmark processing published data from the relay proof. + /// + /// Worst case: all publishers have updated data requiring processing. + /// + /// Parameters: + /// - `n`: Number of publishers with updated data + /// - `k`: Number of keys per publisher + /// - `s`: Total encoded bytes per publisher (max 2KiB) + #[benchmark] + fn process_published_data( + n: Linear<1, { T::MaxPublishers::get() }>, + k: Linear<1, 10>, + s: Linear<1, 2048>, + ) { + let subscriptions = create_subscriptions(n, k); + // SCALE encoding overhead (1-4 bytes) ignored as negligible compared to data benchmark ranges + let value_size_per_key = (s / k.max(1)) as usize; + let publishers: Vec<_> = (0..n) + .map(|i| { + let para_id = ParaId::from(1000 + i); + let child_data: Vec<(Vec, Vec)> = (0..k) + .map(|j| { + let value = vec![25u8; value_size_per_key]; + let encoded_value = value.encode(); + (vec![i as u8, j as u8], encoded_value) + }) + .collect(); + (para_id, child_data) + }) + .collect(); + let proof = benchmark_relay_proof(&publishers); + let current_roots = Subscriber::::collect_publisher_roots(&proof, &subscriptions); + + #[block] + { + let _ = Subscriber::::process_published_data(&proof, ¤t_roots, &subscriptions); + } + assert_eq!(PreviousPublishedDataRoots::::get().len(), n as usize); + } + + #[benchmark] + fn clear_stored_roots() { + let publisher = ParaId::from(1000); + let root = BoundedVec::try_from(vec![0u8; 32]).unwrap(); + PreviousPublishedDataRoots::::mutate(|roots| { + let _ = roots.try_insert(publisher, root); + }); + + #[extrinsic_call] + _(RawOrigin::Root, publisher); + + assert!(!PreviousPublishedDataRoots::::get().contains_key(&publisher)); + } + + impl_benchmark_test_suite! { + Subscriber, + crate::mock::new_test_ext(), + crate::mock::Test + } +} diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs new file mode 100644 index 0000000000000..728a72bc6d79c --- /dev/null +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -0,0 +1,302 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +//! Process child trie data from relay chain state proofs via configurable handler. +//! +//! This pallet is heavily opinionated toward a parachain-to-parachain publish-subscribe model. +//! It assumes ParaId as the identifier for each child trie and is designed specifically for +//! extracting published data from relay chain proofs in a pubsub mechanism. + +extern crate alloc; + +use alloc::{collections::btree_map::BTreeMap, vec::Vec}; +use codec::Decode; +use cumulus_pallet_parachain_system::relay_state_snapshot::{ + ProcessRelayProofKeys, RelayChainStateProof, +}; +use cumulus_primitives_core::ParaId; +use frame_support::{ + defensive, + pallet_prelude::*, + storage::bounded_btree_map::BoundedBTreeMap, + traits::{Get, StorageVersion}, +}; +use sp_std::vec; + +pub use pallet::*; +pub use weights::{WeightInfo, SubstrateWeight}; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +#[cfg(any(test, feature = "runtime-benchmarks"))] +mod test_util; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; +pub mod weights; + +/// Define subscriptions and handle received data. +pub trait SubscriptionHandler { + /// List of subscriptions as (ParaId, keys) tuples. + /// Returns (subscriptions, weight) where weight is the cost of computing the subscriptions. + fn subscriptions() -> (Vec<(ParaId, Vec>)>, Weight); + + /// Called when subscribed data is updated. + /// Returns the weight consumed by processing the data. + fn on_data_updated(publisher: ParaId, key: Vec, value: Vec) -> Weight; +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_system::pallet_prelude::*; + + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// Handler for defining subscriptions and processing received data. + type SubscriptionHandler: SubscriptionHandler; + /// Weight information for extrinsics and operations. + type WeightInfo: WeightInfo; + /// Maximum number of publishers that can be tracked simultaneously. + #[pallet::constant] + type MaxPublishers: Get; + } + + /// Child trie roots from previous block for change detection. + #[pallet::storage] + pub type PreviousPublishedDataRoots = StorageValue< + _, + BoundedBTreeMap>, T::MaxPublishers>, + ValueQuery, + >; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Data was received and processed from a publisher. + DataProcessed { + publisher: ParaId, + key: Vec, + value_size: u32, + }, + /// A stored publisher root was cleared. + PublisherRootCleared { publisher: ParaId }, + } + + #[pallet::error] + pub enum Error { + /// Publisher root not found. + PublisherRootNotFound, + } + + #[pallet::call] + impl Pallet { + /// Clear the stored root for a specific publisher. + /// + /// This forces reprocessing of data from that publisher in the next block. + /// Useful for recovery scenarios or when a specific publisher's data needs to be refreshed. + /// + /// - `origin`: Must be root. + /// - `publisher`: The ParaId of the publisher whose root should be cleared. + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::clear_stored_roots())] + pub fn clear_stored_roots( + origin: OriginFor, + publisher: ParaId, + ) -> DispatchResult { + ensure_root(origin)?; + + >::try_mutate(|roots| -> DispatchResult { + roots.remove(&publisher).ok_or(Error::::PublisherRootNotFound)?; + Ok(()) + })?; + + Self::deposit_event(Event::PublisherRootCleared { publisher }); + Ok(()) + } + } + + impl Pallet { + /// Build relay proof requests from subscriptions. + /// + /// Returns a `RelayProofRequest` with child trie proof requests for subscribed data. + pub fn get_relay_proof_requests() -> cumulus_primitives_core::RelayProofRequest { + let (subscriptions, _weight) = T::SubscriptionHandler::subscriptions(); + let storage_keys = subscriptions + .into_iter() + .flat_map(|(para_id, data_keys)| { + let storage_key = Self::derive_storage_key(para_id); + data_keys.into_iter().map(move |key| { + cumulus_primitives_core::RelayStorageKey::Child { + storage_key: storage_key.clone(), + key, + } + }) + }) + .collect(); + + cumulus_primitives_core::RelayProofRequest { keys: storage_keys } + } + + /// Derives the child trie storage key for a publisher. + /// + /// Uses the same encoding pattern as the broadcaster pallet: + /// `(b"pubsub", para_id).encode()` to ensure compatibility. + fn derive_storage_key(publisher_para_id: ParaId) -> Vec { + use codec::Encode; + (b"pubsub", publisher_para_id).encode() + } + + fn derive_child_info(publisher_para_id: ParaId) -> sp_core::storage::ChildInfo { + sp_core::storage::ChildInfo::new_default(&Self::derive_storage_key(publisher_para_id)) + } + + pub fn collect_publisher_roots( + relay_state_proof: &RelayChainStateProof, + subscriptions: &[(ParaId, Vec>)], + ) -> BTreeMap> { + subscriptions + .iter() + .take(T::MaxPublishers::get() as usize) + .filter_map(|(publisher_para_id, _keys)| { + let child_info = Self::derive_child_info(*publisher_para_id); + let prefixed_key = child_info.prefixed_storage_key(); + + relay_state_proof + .read_optional_entry::<[u8; 32]>(&prefixed_key) + .ok() + .flatten() + .map(|root_hash| (*publisher_para_id, root_hash.to_vec())) + }) + .collect() + } + + pub fn process_published_data( + relay_state_proof: &RelayChainStateProof, + current_roots: &BTreeMap>, + subscriptions: &[(ParaId, Vec>)], + ) -> (Weight, u32) { + // Load roots from previous block for change detection. + let previous_roots = >::get(); + + // Early exit if no publishers have any data. + if current_roots.is_empty() && previous_roots.is_empty() { + return (T::DbWeight::get().reads(1), 0); + } + + let mut total_handler_weight = Weight::zero(); + let mut total_bytes_decoded = 0u32; + + // Process each subscription. + for (publisher, subscription_keys) in subscriptions { + // Check if publisher has published data in this block. + if let Some(current_root) = current_roots.get(publisher) { + // Detect if child trie root changed since last block. + let should_update = previous_roots + .get(publisher) + .map_or(true, |prev_root| prev_root.as_slice() != current_root.as_slice()); + + // Only process if data changed. + if should_update { + let child_info = Self::derive_child_info(*publisher); + + // Read each subscribed key from relay proof. + for key in subscription_keys.iter() { + match relay_state_proof.read_child_storage(&child_info, key) { + Ok(Some(encoded_value)) => { + let encoded_size = encoded_value.len() as u32; + total_bytes_decoded = total_bytes_decoded.saturating_add(encoded_size); + + match Vec::::decode(&mut &encoded_value[..]) { + Ok(value) => { + let value_size = value.len() as u32; + + // Notify handler of new data. + let handler_weight = T::SubscriptionHandler::on_data_updated( + *publisher, + key.clone(), + value.clone(), + ); + total_handler_weight = total_handler_weight.saturating_add(handler_weight); + + Self::deposit_event(Event::DataProcessed { + publisher: *publisher, + key: key.clone(), + value_size, + }); + }, + Err(_) => { + defensive!("Failed to decode published data value"); + }, + } + }, + Ok(None) => { + // Key not published yet - expected. + }, + Err(_) => { + defensive!("Failed to read child storage from relay chain proof"); + }, + } + } + } + } + } + + // Store current roots for next block's comparison. + let bounded_roots: BoundedBTreeMap>, T::MaxPublishers> = + current_roots + .iter() + .filter_map(|(para_id, root)| { + BoundedVec::try_from(root.clone()).ok().map(|bounded_root| (*para_id, bounded_root)) + }) + .collect::>() + .try_into() + .expect("MaxPublishers limit enforced in collect_publisher_roots; qed"); + >::put(bounded_roots); + + (total_handler_weight, total_bytes_decoded) + } + } + + impl ProcessRelayProofKeys for Pallet { + /// Process child trie data from the relay proof. + /// + /// Note: This implementation only processes child trie keys (pubsub data). + /// Main trie keys in the proof are intentionally ignored. + fn process_relay_proof_keys(verified_proof: &RelayChainStateProof) -> Weight { + let (subscriptions, subscriptions_weight) = T::SubscriptionHandler::subscriptions(); + let num_publishers = subscriptions.len() as u32; + let total_keys = subscriptions.iter().map(|(_, keys)| keys.len() as u32).sum(); + + let current_roots = Self::collect_publisher_roots(verified_proof, &subscriptions); + let (handler_weight, total_bytes_decoded) = Self::process_published_data(verified_proof, ¤t_roots, &subscriptions); + + // Return total weight for all operations + subscriptions_weight + .saturating_add(handler_weight) + .saturating_add(T::WeightInfo::process_proof_excluding_handler(num_publishers, total_keys, total_bytes_decoded)) + } + } +} diff --git a/cumulus/pallets/subscriber/src/mock.rs b/cumulus/pallets/subscriber/src/mock.rs new file mode 100644 index 0000000000000..c7120df75f407 --- /dev/null +++ b/cumulus/pallets/subscriber/src/mock.rs @@ -0,0 +1,56 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +#![cfg(test)] + +use super::*; +use cumulus_primitives_core::ParaId; +use frame_support::{derive_impl, parameter_types}; +use sp_runtime::BuildStorage; + +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test { + System: frame_system, + Subscriber: crate, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type Block = Block; +} + +// Test handler that records calls +parameter_types! { + pub static ReceivedData: Vec<(ParaId, Vec, Vec)> = vec![]; + pub static TestSubscriptions: Vec<(ParaId, Vec>)> = vec![]; +} + +pub struct TestHandler; +impl SubscriptionHandler for TestHandler { + fn subscriptions() -> (Vec<(ParaId, Vec>)>, Weight) { + (TestSubscriptions::get(), Weight::zero()) + } + + fn on_data_updated(publisher: ParaId, key: Vec, value: Vec) -> Weight { + ReceivedData::mutate(|d| d.push((publisher, key, value))); + Weight::zero() + } +} + +parameter_types! { + pub const MaxPublishers: u32 = 100; +} + +impl crate::Config for Test { + type SubscriptionHandler = TestHandler; + type WeightInfo = (); + type MaxPublishers = MaxPublishers; +} + +pub fn new_test_ext() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + t.into() +} diff --git a/cumulus/pallets/subscriber/src/test_util.rs b/cumulus/pallets/subscriber/src/test_util.rs new file mode 100644 index 0000000000000..e1b4a8efdd3a0 --- /dev/null +++ b/cumulus/pallets/subscriber/src/test_util.rs @@ -0,0 +1,161 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +use codec::Encode; +use cumulus_primitives_core::ParaId; + +#[cfg(test)] +mod std_proof_builder { + use super::*; + use cumulus_pallet_parachain_system::RelayChainStateProof; + use sp_runtime::StateVersion; + use sp_state_machine::{Backend, TrieBackendBuilder}; + use sp_trie::{PrefixedMemoryDB, StorageProof}; + + /// Build a relay chain state proof with child trie data for multiple publishers. + pub fn build_sproof_with_child_data( + publishers: &[(ParaId, Vec<(Vec, Vec)>)], + ) -> RelayChainStateProof { + use sp_runtime::traits::HashingFor; + + let (db, root) = PrefixedMemoryDB::>::default_with_root(); + let state_version = StateVersion::default(); + let mut backend = TrieBackendBuilder::new(db, root).build(); + + let mut all_proofs = vec![]; + let mut main_trie_updates = vec![]; + + // Process each publisher + for (publisher_para_id, child_data) in publishers { + let child_info = sp_core::storage::ChildInfo::new_default(&(b"pubsub", *publisher_para_id).encode()); + + // Insert child trie data + let child_kv: Vec<_> = child_data.iter().map(|(k, v)| (k.clone(), Some(v.clone()))).collect(); + backend.insert(vec![(Some(child_info.clone()), child_kv)], state_version); + + // Get child trie root and prepare to insert it in main trie + let child_root = backend.child_storage_root(&child_info, core::iter::empty(), state_version).0; + let prefixed_key = child_info.prefixed_storage_key(); + main_trie_updates.push((prefixed_key.to_vec(), Some(child_root.encode()))); + + // Prove child trie keys + let child_keys: Vec<_> = child_data.iter().map(|(k, _)| k.clone()).collect(); + if !child_keys.is_empty() { + let child_proof = sp_state_machine::prove_child_read_on_trie_backend(&backend, &child_info, child_keys) + .expect("prove child read"); + all_proofs.push(child_proof); + } + } + + // Insert all child roots in main trie + backend.insert(vec![(None, main_trie_updates.clone())], state_version); + let root = *backend.root(); + + // Prove all child roots in main trie + let main_keys: Vec<_> = main_trie_updates.iter().map(|(k, _)| k.clone()).collect(); + let main_proof = sp_state_machine::prove_read_on_trie_backend(&backend, main_keys) + .expect("prove read"); + all_proofs.push(main_proof); + + // Merge all proofs + let proof = StorageProof::merge(all_proofs); + + RelayChainStateProof::new(ParaId::from(100), root, proof).expect("valid proof") + } +} + +#[cfg(test)] +pub use std_proof_builder::build_sproof_with_child_data; + +/// no_std-compatible proof builder for benchmarks +#[cfg(feature = "runtime-benchmarks")] +pub mod bench_proof_builder { + use super::*; + use alloc::vec::Vec; + use cumulus_pallet_parachain_system::RelayChainStateProof; + use sp_runtime::traits::BlakeTwo256; + use sp_trie::{trie_types::TrieDBMutBuilderV1, recorder_ext::RecorderExt, LayoutV1, MemoryDB, Recorder, StorageProof, TrieDBBuilder, TrieMut}; + use trie_db::Trie; + + /// Record all trie keys + fn record_all_trie_keys( + db: &DB, + root: &sp_trie::TrieHash, + ) -> Result>, sp_std::boxed::Box>> + where + DB: hash_db::HashDBRef, + { + let mut recorder = Recorder::::new(); + let trie = TrieDBBuilder::::new(db, root).with_recorder(&mut recorder).build(); + for x in trie.iter()? { + let (key, _) = x?; + trie.get(&key)?; + } + Ok(recorder.into_raw_storage_proof()) + } + + /// Build relay chain state proof w/ child trie data + pub fn build_sproof_with_child_data( + publishers: &[(ParaId, Vec<(Vec, Vec)>)], + ) -> RelayChainStateProof { + use polkadot_primitives::Hash as RelayHash; + use sp_trie::empty_trie_root; + + // Build child tries and collect roots + let mut child_roots = alloc::vec::Vec::new(); + let mut all_nodes = alloc::vec::Vec::new(); + + for (publisher_para_id, child_data) in publishers { + use hash_db::{HashDB, EMPTY_PREFIX}; + + let empty_root = empty_trie_root::>(); + let mut child_root = empty_root; + let mut child_mdb = MemoryDB::::new(&[]); + // Insert empty trie node so TrieDBMut can find it + child_mdb.insert(EMPTY_PREFIX, &[0u8]); + + { + let mut child_trie = TrieDBMutBuilderV1::::new(&mut child_mdb, &mut child_root).build(); + for (key, value) in child_data { + child_trie.insert(key, value).expect("insert in bench"); + } + } + + // Collect child trie nodes + let child_nodes = record_all_trie_keys::, _>(&child_mdb, &child_root) + .expect("record child trie"); + all_nodes.extend(child_nodes); + + // Store child root for main trie + let child_info = sp_core::storage::ChildInfo::new_default(&(b"pubsub", *publisher_para_id).encode()); + let prefixed_key = child_info.prefixed_storage_key(); + child_roots.push((prefixed_key.to_vec(), child_root.encode())); + } + + // Build main trie w/ child roots + use hash_db::{HashDB, EMPTY_PREFIX}; + + let empty_root = empty_trie_root::>(); + let mut main_root = empty_root; + let mut main_mdb = MemoryDB::::new(&[]); + // Insert empty trie node so TrieDBMut can find it + main_mdb.insert(EMPTY_PREFIX, &[0u8]); + + { + let mut main_trie = TrieDBMutBuilderV1::::new(&mut main_mdb, &mut main_root).build(); + for (key, value) in &child_roots { + main_trie.insert(key, value).expect("insert in bench"); + } + } + + // Collect main trie nodes + let main_nodes = record_all_trie_keys::, _>(&main_mdb, &main_root) + .expect("record main trie"); + all_nodes.extend(main_nodes); + + let proof = StorageProof::new(all_nodes); + let root: RelayHash = main_root.into(); + + RelayChainStateProof::new(ParaId::from(100), root, proof).expect("valid proof") + } +} diff --git a/cumulus/pallets/subscriber/src/tests.rs b/cumulus/pallets/subscriber/src/tests.rs new file mode 100644 index 0000000000000..f4667ec55ca89 --- /dev/null +++ b/cumulus/pallets/subscriber/src/tests.rs @@ -0,0 +1,199 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +#![cfg(test)] + +use super::*; +use crate::{mock::*, test_util::build_sproof_with_child_data}; +use codec::Encode; +use cumulus_primitives_core::ParaId; +use frame_support::assert_ok; + +/// Build a relay chain state proof with child trie data for a single publisher. +fn build_test_proof( + publisher_para_id: ParaId, + child_data: Vec<(Vec, Vec)>, +) -> cumulus_pallet_parachain_system::RelayChainStateProof { + build_sproof_with_child_data(&[(publisher_para_id, child_data)]) +} + +#[test] +fn process_relay_proof_keys_with_new_data_calls_handler() { + new_test_ext().execute_with(|| { + ReceivedData::set(vec![]); + let publisher = ParaId::from(1000); + let key = vec![0x12, 0x34]; + let value = vec![0xAA, 0xBB].encode(); + + TestSubscriptions::set(vec![(publisher, vec![key.clone()])]); + + let proof = build_test_proof(publisher, vec![(key.clone(), value.clone())]); + + Pallet::::process_relay_proof_keys(&proof); + + let received = ReceivedData::get(); + assert_eq!(received.len(), 1); + assert_eq!(received[0].0, publisher); + assert_eq!(received[0].1, key); + assert_eq!(received[0].2, Vec::::decode(&mut &value[..]).unwrap()); + }); +} + +#[test] +fn process_empty_subscriptions() { + new_test_ext().execute_with(|| { + ReceivedData::set(vec![]); + TestSubscriptions::set(vec![]); + + let proof = build_test_proof(ParaId::from(1000), vec![]); + + Pallet::::process_relay_proof_keys(&proof); + + assert_eq!(ReceivedData::get().len(), 0); + }); +} + +#[test] +fn root_change_triggers_processing() { + new_test_ext().execute_with(|| { + ReceivedData::set(vec![]); + let publisher = ParaId::from(1000); + let key = vec![0x01]; + let value1 = vec![0x11].encode(); + let value2 = vec![0x22].encode(); + + TestSubscriptions::set(vec![(publisher, vec![key.clone()])]); + + // First block + let proof1 = build_test_proof(publisher, vec![(key.clone(), value1.clone())]); + Pallet::::process_relay_proof_keys(&proof1); + assert_eq!(ReceivedData::get().len(), 1); + + // Second block with different value (root changed) + ReceivedData::set(vec![]); + let proof2 = build_test_proof(publisher, vec![(key.clone(), value2.clone())]); + Pallet::::process_relay_proof_keys(&proof2); + + assert_eq!(ReceivedData::get().len(), 1); + assert_eq!(ReceivedData::get()[0].2, Vec::::decode(&mut &value2[..]).unwrap()); + }); +} + +#[test] +fn unchanged_root_skips_processing() { + new_test_ext().execute_with(|| { + ReceivedData::set(vec![]); + let publisher = ParaId::from(1000); + let key = vec![0x01]; + let value = vec![0x11].encode(); + + TestSubscriptions::set(vec![(publisher, vec![key.clone()])]); + + // First block + let proof = build_test_proof(publisher, vec![(key.clone(), value.clone())]); + Pallet::::process_relay_proof_keys(&proof); + assert_eq!(ReceivedData::get().len(), 1); + + // Second block with same data (unchanged root) + ReceivedData::set(vec![]); + let proof2 = build_test_proof(publisher, vec![(key.clone(), value)]); + Pallet::::process_relay_proof_keys(&proof2); + + assert_eq!(ReceivedData::get().len(), 0, "Handler should not be called for unchanged root"); + }); +} + +#[test] +fn clear_stored_roots_extrinsic() { + new_test_ext().execute_with(|| { + let publisher = ParaId::from(1000); + TestSubscriptions::set(vec![(publisher, vec![vec![0x01]])]); + + // Store a root for the publisher + let proof = build_test_proof(publisher, vec![(vec![0x01], vec![0x11].encode())]); + Pallet::::process_relay_proof_keys(&proof); + + // Verify root is stored + assert!(PreviousPublishedDataRoots::::get().contains_key(&publisher)); + + // Clear the publisher's root + assert_ok!(Pallet::::clear_stored_roots( + frame_system::RawOrigin::Root.into(), + publisher + )); + + // Verify the root was cleared + assert!(!PreviousPublishedDataRoots::::get().contains_key(&publisher)); + }); +} + +#[test] +fn clear_stored_roots_only_clears_specified_publisher() { + new_test_ext().execute_with(|| { + let publisher1 = ParaId::from(1000); + let publisher2 = ParaId::from(2000); + + // Manually set up storage with 2 publisher roots + let mut roots = BoundedBTreeMap::new(); + roots.try_insert(publisher1, BoundedVec::try_from(vec![0u8; 32]).unwrap()).unwrap(); + roots.try_insert(publisher2, BoundedVec::try_from(vec![1u8; 32]).unwrap()).unwrap(); + PreviousPublishedDataRoots::::put(roots); + + assert_eq!(PreviousPublishedDataRoots::::get().len(), 2); + + // Clear only publisher1's root + assert_ok!(Pallet::::clear_stored_roots( + frame_system::RawOrigin::Root.into(), + publisher1 + )); + + // Publisher1's root should be cleared, but publisher2's should remain + let roots = PreviousPublishedDataRoots::::get(); + assert_eq!(roots.len(), 1); + assert!(!roots.contains_key(&publisher1)); + assert!(roots.contains_key(&publisher2)); + }); +} + +#[test] +fn clear_stored_roots_fails_if_not_found() { + use frame_support::assert_noop; + + new_test_ext().execute_with(|| { + let publisher = ParaId::from(1000); + + // Try to clear root for publisher that doesn't exist + assert_noop!( + Pallet::::clear_stored_roots(frame_system::RawOrigin::Root.into(), publisher), + Error::::PublisherRootNotFound + ); + }); +} + +#[test] +fn data_processed_event_emitted() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + let publisher = ParaId::from(1000); + let key = vec![0x12]; + let value = vec![0xAA].encode(); + + TestSubscriptions::set(vec![(publisher, vec![key.clone()])]); + + let proof = build_test_proof(publisher, vec![(key.clone(), value.clone())]); + Pallet::::process_relay_proof_keys(&proof); + + // value_size is the decoded Vec length, not the encoded length + let decoded_len = Vec::::decode(&mut &value[..]).unwrap().len() as u32; + + System::assert_has_event( + Event::DataProcessed { + publisher, + key: key.try_into().unwrap(), + value_size: decoded_len, + } + .into(), + ); + }); +} diff --git a/cumulus/pallets/subscriber/src/weights.rs b/cumulus/pallets/subscriber/src/weights.rs new file mode 100644 index 0000000000000..2a8774f12dce5 --- /dev/null +++ b/cumulus/pallets/subscriber/src/weights.rs @@ -0,0 +1,137 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Generated weights for `cumulus_pallet_subscriber` +//! +//! THESE WEIGHTS WERE GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-12-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `asset-hub-westend`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `asset-hub-westend-dev`, DB CACHE: `1024` + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --pallet +// cumulus-pallet-subscriber +// --chain +// asset-hub-westend-dev +// --output +// cumulus/pallets/subscriber/src/weights.rs +// --template +// substrate/.maintain/frame-weight-template.hbs +// --extrinsic +// + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] +#![allow(dead_code)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `cumulus_pallet_subscriber`. +pub trait WeightInfo { + fn collect_publisher_roots(n: u32) -> Weight; + fn process_published_data(n: u32, k: u32, s: u32) -> Weight; + fn clear_stored_roots() -> Weight; + + /// Weight for processing relay proof excluding handler execution. + /// Benchmarked with no-op handler. Handler weights are added at runtime. + /// + /// Parameters: + /// - `num_publishers`: Number of publishers being processed + /// - `num_keys`: Total number of keys across all publishers + /// - `total_bytes`: Total bytes of data being decoded + fn process_proof_excluding_handler(num_publishers: u32, num_keys: u32, total_bytes: u32) -> Weight { + Self::collect_publisher_roots(num_publishers) + .saturating_add(Self::process_published_data(num_publishers, num_keys, total_bytes)) + } +} + +/// Weights for `cumulus_pallet_subscriber` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// The range of component `n` is `[1, 100]`. + fn collect_publisher_roots(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_000_000 picoseconds. + Weight::from_parts(1_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_289 + .saturating_add(Weight::from_parts(1_853_718, 0).saturating_mul(n.into())) + } + /// Storage: `Subscriber::PreviousPublishedDataRoots` (r:1 w:1) + /// Proof: `Subscriber::PreviousPublishedDataRoots` (`max_values`: Some(1), `max_size`: Some(3702), added: 4197, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 100]`. + /// The range of component `k` is `[1, 10]`. + /// The range of component `s` is `[1, 2048]`. + fn process_published_data(n: u32, k: u32, _s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `5187` + // Minimum execution time: 51_000_000 picoseconds. + Weight::from_parts(51_000_000, 0) + .saturating_add(Weight::from_parts(0, 5187)) + // Standard Error: 448_042 + .saturating_add(Weight::from_parts(33_087_314, 0).saturating_mul(n.into())) + // Standard Error: 4_535_424 + .saturating_add(Weight::from_parts(311_706_924, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Subscriber::PreviousPublishedDataRoots` (r:1 w:1) + /// Proof: `Subscriber::PreviousPublishedDataRoots` (`max_values`: Some(1), `max_size`: Some(3702), added: 4197, mode: `MaxEncodedLen`) + fn clear_stored_roots() -> Weight { + // Proof Size summary in bytes: + // Measured: `206` + // Estimated: `5187` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(Weight::from_parts(0, 5187)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + fn collect_publisher_roots(n: u32) -> Weight { + Weight::from_parts(1_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(Weight::from_parts(1_853_718, 0).saturating_mul(n.into())) + } + + fn process_published_data(n: u32, k: u32, _s: u32) -> Weight { + Weight::from_parts(51_000_000, 0) + .saturating_add(Weight::from_parts(0, 5187)) + .saturating_add(Weight::from_parts(33_087_314, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(311_706_924, 0).saturating_mul(k.into())) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + + fn clear_stored_roots() -> Weight { + Weight::from_parts(9_000_000, 0) + .saturating_add(Weight::from_parts(0, 5187)) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(1)) + } +} diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index 3be87221c052e..1e32c9003a948 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -106,6 +106,7 @@ impl cumulus_pallet_parachain_system::Config for Test { type CheckAssociatedRelayNumber = AnyRelayNumber; type ConsensusHook = cumulus_pallet_parachain_system::consensus_hook::ExpectParentIncluded; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } parameter_types! { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 85386572af623..42732402b0ede 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -744,6 +744,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs index 3dc3e82a62ff9..f2a13347eaac0 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs @@ -271,4 +271,8 @@ impl XcmWeightInfo for AssetHubRococoXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } + fn publish(_data: &PublishData) -> Weight { + // AssetHubRococo does not currently support Publish operations + Weight::MAX + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index 66ffddf5c8339..5fc891d3c14be 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -416,6 +416,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = PolkadotXcm; + type BroadcastHandler = (); } /// Converts a local signed origin into an XCM location. Forms the basis for local origins diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index ebbc143e4d9cb..f9e7cfe040e50 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -903,6 +903,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs index 27532ac431e7a..ad08905dd60e5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs @@ -302,4 +302,8 @@ impl XcmWeightInfo for AssetHubWestendXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } + fn publish(_data: &PublishData) -> Weight { + // AssetHubWestend does not currently support Publish operations + Weight::MAX + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index efeca0fede196..159f7517f60cd 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -472,6 +472,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = PolkadotXcm; + type BroadcastHandler = (); } parameter_types! { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index f61813c49a2f2..af62c41ccf6d1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -400,6 +400,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs index 21708ec743821..505da0d89d915 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs @@ -272,4 +272,8 @@ impl XcmWeightInfo for BridgeHubRococoXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } + fn publish(_data: &PublishData) -> Weight { + // BridgeHubRococo does not currently support Publish operations + Weight::MAX + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index 8a661ed53236e..f316e7437736c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -240,6 +240,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = PolkadotXcm; + type BroadcastHandler = (); } pub type PriceForParentDelivery = diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 65ca1b2a4b49d..50ac7616c4305 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -390,6 +390,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs index 3706bfe22a3c8..8857f83b53a06 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs @@ -272,4 +272,8 @@ impl XcmWeightInfo for BridgeHubWestendXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } + fn publish(_data: &PublishData) -> Weight { + // BridgeHubWestend does not currently support Publish operations + Weight::MAX + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs index d1b1e78ef8343..0d08e25f911a4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs @@ -251,6 +251,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = PolkadotXcm; + type BroadcastHandler = (); } pub type PriceForParentDelivery = diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index d1f0577c70497..e7b296c3b64dc 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -423,6 +423,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs index 7c44ce449383f..85e4268db2213 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs @@ -270,4 +270,8 @@ impl XcmWeightInfo for CollectivesWestendXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } + fn publish(_data: &PublishData) -> Weight { + // CollectivesWestend does not currently support Publish operations + Weight::MAX + } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs index b3a7f2bd9af05..91c67bc8e641c 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -258,6 +258,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = PolkadotXcm; + type BroadcastHandler = (); } /// Converts a local signed origin into an XCM location. Forms the basis for local origins diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index a652f560e06af..7290beaed9e63 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -304,6 +304,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs index 75e0908cb395d..7f21c91ee4a83 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs @@ -269,4 +269,8 @@ impl XcmWeightInfo for CoretimeWestendXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } + fn publish(_data: &PublishData) -> Weight { + // CoretimeWestend does not currently support Publish operations + Weight::MAX + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs index 391972f24572c..1fe19b7c953b4 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs @@ -272,6 +272,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = PolkadotXcm; + type BroadcastHandler = (); } /// Converts a local signed origin into an XCM location. Forms the basis for local origins diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index ac563cb4912dd..7b45c087d149a 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -190,6 +190,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } parameter_types! { diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs index f32cb211444c2..53dc0c85c2422 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs @@ -98,6 +98,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = (); + type BroadcastHandler = (); } impl cumulus_pallet_xcm::Config for Runtime { diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index ec4cf642d8fef..5daf12f576fad 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -281,6 +281,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs index a7d394b603b2f..f9206b53b1781 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs @@ -268,4 +268,8 @@ impl XcmWeightInfo for PeopleWestendXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } + fn publish(_data: &PublishData) -> Weight { + // PeopleWestend does not currently support Publish operations + Weight::MAX + } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs index e5203f39c8814..c41a07142be97 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs @@ -278,6 +278,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = PolkadotXcm; + type BroadcastHandler = (); } /// Converts a local signed origin into an XCM location. Forms the basis for local origins diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 01f6dd1700d0d..a97c9d023feb6 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -664,6 +664,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { >; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } impl parachain_info::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index f8a9cdbdf56c8..e92e8c8b49166 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -441,6 +441,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = PolkadotXcm; + type BroadcastHandler = (); } /// Multiplier used for dedicated `TakeFirstAssetTrader` with `ForeignAssets` instance. diff --git a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs index 92df3d950cb0e..f5243379ebad7 100644 --- a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs @@ -366,6 +366,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32; + type RelayProofKeysProcessor = (); } impl pallet_message_queue::Config for Runtime { diff --git a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/xcm_config.rs index c1b83f5dbd74e..4d783f6fe6739 100644 --- a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/xcm_config.rs @@ -165,6 +165,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = PolkadotXcm; + type BroadcastHandler = (); } /// No local origins on this chain are allowed to dispatch XCM sends/executions. diff --git a/cumulus/polkadot-omni-node/lib/src/common/aura.rs b/cumulus/polkadot-omni-node/lib/src/common/aura.rs index 9ca725ff3279a..10a631306b33a 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/aura.rs @@ -18,6 +18,7 @@ use codec::Codec; use cumulus_primitives_aura::AuraUnincludedSegmentApi; +use cumulus_primitives_core::KeyToIncludeInRelayProofApi; use sp_consensus_aura::AuraApi; use sp_runtime::{ app_crypto::{AppCrypto, AppPair, AppSignature, Pair}, @@ -53,6 +54,7 @@ pub trait AuraRuntimeApi: sp_api::ApiExt + AuraApi::Public> + AuraUnincludedSegmentApi + + KeyToIncludeInRelayProofApi + Sized { /// Check if the runtime has the Aura API. @@ -66,5 +68,6 @@ impl AuraRuntimeApi for T wher T: sp_api::ApiExt + AuraApi::Public> + AuraUnincludedSegmentApi + + KeyToIncludeInRelayProofApi { } diff --git a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs index f609f13446354..26bb164874ce5 100644 --- a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs +++ b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs @@ -174,6 +174,13 @@ macro_rules! impl_node_runtime_apis { unimplemented!() } } + + impl cumulus_primitives_core::KeyToIncludeInRelayProofApi<$block> for $runtime { + fn keys_to_prove() -> cumulus_primitives_core::RelayProofRequest { + unimplemented!() + } + } + #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime<$block> for $runtime { fn on_runtime_upgrade( diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 774961b6b7e6b..58dc02fd75279 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -466,6 +466,32 @@ pub struct CollationInfo { pub head_data: HeadData, } +/// A relay chain storage key to be included in the storage proof. +#[derive(Clone, Debug, Encode, Decode, TypeInfo, PartialEq, Eq)] +pub enum RelayStorageKey { + /// Top-level relay chain storage key. + Top(Vec), + /// Child trie storage key. + Child { + /// Unprefixed storage key identifying the child trie root location. + /// Prefix `:child_storage:default:` is added when accessing storage. + /// Used to derive `ChildInfo` for reading child trie data. + storage_key: Vec, + /// Key within the child trie. + key: Vec, + }, +} + +/// Request for proving relay chain storage data. +/// +/// Contains a list of storage keys (either top-level or child trie keys) +/// to be included in the relay chain state proof. +#[derive(Clone, Debug, Encode, Decode, TypeInfo, PartialEq, Eq, Default)] +pub struct RelayProofRequest { + /// Storage keys to include in the relay chain state proof. + pub keys: Vec, +} + sp_api::decl_runtime_apis! { /// Runtime api to collect information about a collation. /// @@ -513,4 +539,21 @@ sp_api::decl_runtime_apis! { /// Returns the target number of blocks per relay chain slot. fn target_block_rate() -> u32; } + + /// API for specifying which relay chain storage data to include in storage proofs. + /// + /// This API allows parachains to request both top-level relay chain storage keys + /// and child trie storage keys to be included in the relay chain state proof. + pub trait KeyToIncludeInRelayProofApi { + /// Returns relay chain storage proof requests. + /// + /// The returned `RelayProofRequest` contains a list of storage keys where each key + /// can be either: + /// - `RelayStorageKey::Top`: Top-level relay chain storage key + /// - `RelayStorageKey::Child`: Child trie storage, containing the child trie identifier + /// and the key to prove from that child trie + /// + /// The collator generates proofs for these and includes them in the relay chain state proof. + fn keys_to_prove() -> RelayProofRequest; + } } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 8acbc49a33834..99222191f4c00 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -385,6 +385,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32; + type RelayProofKeysProcessor = (); } impl parachain_info::Config for Runtime {} @@ -641,6 +642,12 @@ impl_runtime_apis! { 1 } } + + impl cumulus_primitives_core::KeyToIncludeInRelayProofApi for Runtime { + fn keys_to_prove() -> cumulus_primitives_core::RelayProofRequest { + Default::default() + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/polkadot/runtime/parachains/src/broadcaster/benchmarking.rs b/polkadot/runtime/parachains/src/broadcaster/benchmarking.rs new file mode 100644 index 0000000000000..58831cce8515a --- /dev/null +++ b/polkadot/runtime/parachains/src/broadcaster/benchmarking.rs @@ -0,0 +1,108 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +#![cfg(feature = "runtime-benchmarks")] + +use super::{Pallet as Broadcaster, *}; +use frame_benchmarking::v2::*; +use frame_support::traits::fungible::{Inspect as FunInspect, Mutate}; +use polkadot_primitives::Id as ParaId; +use sp_core::hashing::blake2_256; + +type BalanceOf = + <::Currency as FunInspect<::AccountId>>::Balance; + +#[benchmarks] +mod benchmarks { + use super::*; + use alloc::vec; + use frame_system::RawOrigin; + + #[benchmark] + fn register_publisher() { + let caller: T::AccountId = whitelisted_caller(); + let para_id = ParaId::from(2000); + let deposit = T::PublisherDeposit::get(); + + T::Currency::set_balance(&caller, deposit * 2u32.into()); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), para_id); + + assert!(RegisteredPublishers::::contains_key(para_id)); + } + + #[benchmark] + fn force_register_publisher() { + let manager: T::AccountId = whitelisted_caller(); + let para_id = ParaId::from(1000); + let deposit = BalanceOf::::from(0u32); + + #[extrinsic_call] + _(RawOrigin::Root, manager.clone(), deposit, para_id); + + assert!(RegisteredPublishers::::contains_key(para_id)); + } + + #[benchmark] + fn do_cleanup_publisher(k: Linear<1, { T::MaxStoredKeys::get() }>) { + let caller: T::AccountId = whitelisted_caller(); + let para_id = ParaId::from(2000); + let deposit = T::PublisherDeposit::get(); + + T::Currency::set_balance(&caller, deposit * 2u32.into()); + Broadcaster::::register_publisher(RawOrigin::Signed(caller).into(), para_id) + .unwrap(); + + // Calculate max value size to stay within MaxTotalStorageSize and MaxValueLength + // Total size = sum of (32 bytes key + value_len) for all keys + let max_total_size = T::MaxTotalStorageSize::get() as usize; + let max_value_length = T::MaxValueLength::get() as usize; + let key_size = 32usize; + let max_value_size = (max_total_size / k as usize) + .saturating_sub(key_size) + .min(max_value_length) + .max(1); + let value = vec![0u8; max_value_size]; + + // Publish k keys in batches to respect MaxPublishItems limit + let max_items = T::MaxPublishItems::get(); + for batch_start in (0..k).step_by(max_items as usize) { + let batch_end = (batch_start + max_items).min(k); + let mut data = Vec::new(); + for i in batch_start..batch_end { + let mut key_data = b"key_".to_vec(); + key_data.extend_from_slice(&i.to_be_bytes()); + let key = blake2_256(&key_data); + data.push((key, value.clone())); + } + Broadcaster::::handle_publish(para_id, data).unwrap(); + } + + #[block] + { + Broadcaster::::do_cleanup_publisher(para_id).unwrap(); + } + + assert!(!PublisherExists::::get(para_id)); + } + + impl_benchmark_test_suite!( + Broadcaster, + crate::mock::new_test_ext(Default::default()), + crate::mock::Test + ); +} diff --git a/polkadot/runtime/parachains/src/broadcaster/mod.rs b/polkadot/runtime/parachains/src/broadcaster/mod.rs new file mode 100644 index 0000000000000..d8edfe5ee9fd5 --- /dev/null +++ b/polkadot/runtime/parachains/src/broadcaster/mod.rs @@ -0,0 +1,732 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Broadcaster pallet for managing parachain data publishing. +//! +//! This pallet provides a publishing mechanism for parachains to efficiently share data +//! through the relay chain storage using child tries per publisher. +//! +//! ## Publisher Registration +//! +//! Parachains must register before they can publish data: +//! +//! - System parachains (ID < 2000): Registered via `force_register_publisher` (Root origin) +//! with custom deposit amounts (typically zero). +//! - Public parachains (ID >= 2000): Registered via `register_publisher` requiring a deposit. +//! +//! The deposit is held using the native fungible traits with the `PublisherDeposit` hold reason. +//! +//! ## Storage Organization +//! +//! Each publisher gets a dedicated child trie identified by `(b"pubsub", ParaId)`. The child +//! trie root is stored on-chain and can be included in storage proofs to verify +//! published data. +//! +//! Published data uses: +//! - Keys: 32-byte hashes (fixed size) +//! - Values: Bounded by `MaxValueLength` +//! - Total storage limit: `MaxTotalStorageSize` per publisher +//! +//! The total storage size is calculated as the sum of all (32-byte key + value length) pairs. +//! +//! ## Storage Lifecycle +//! +//! Publishers can deregister to reclaim their deposit and remove their data: +//! +//! 1. Call `cleanup_published_data` to remove all published key-value pairs from the child trie +//! 2. Call `deregister_publisher` to release the deposit and complete deregistration +//! +//! Root can force deregistration with `force_deregister_publisher`, which removes all data +//! and releases the deposit in a single call. + +use alloc::vec::Vec; +use codec::{Decode, Encode}; +use frame_support::{ + pallet_prelude::*, + storage::child::ChildInfo, + traits::{ + defensive_prelude::*, + fungible::{ + hold::{Balanced as FunHoldBalanced, Mutate as FunHoldMutate}, + Inspect as FunInspect, Mutate as FunMutate, + }, + tokens::Precision::Exact, + Get, + }, +}; +use frame_system::{ensure_root, ensure_signed, pallet_prelude::BlockNumberFor}; +use polkadot_primitives::Id as ParaId; +use scale_info::TypeInfo; +use sp_runtime::traits::Zero; + +pub use pallet::*; + +mod traits; +pub use traits::Publish; + +pub mod weights; +pub use weights::WeightInfo; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + +#[cfg(test)] +mod tests; + +/// Information about a registered publisher. +#[derive(Encode, Decode, Clone, PartialEq, Eq, Debug, TypeInfo, MaxEncodedLen)] +pub struct PublisherInfo { + /// The account that registered and manages this publisher. + pub manager: AccountId, + /// The amount held as deposit for registration. + pub deposit: Balance, +} + + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_system::pallet_prelude::*; + + const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); + + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + /// Reasons for the pallet placing a hold on funds. + #[pallet::composite_enum] + pub enum HoldReason { + /// The funds are held as deposit for publisher registration. + #[codec(index = 0)] + PublisherDeposit, + } + + type BalanceOf = + <::Currency as FunInspect<::AccountId>>::Balance; + + #[pallet::config] + pub trait Config: frame_system::Config>> { + /// Currency mechanism for managing publisher deposits. + type Currency: FunHoldMutate + + FunMutate + + FunHoldBalanced; + + /// Overarching hold reason. + type RuntimeHoldReason: From; + + /// Weight information for extrinsics and operations. + type WeightInfo: WeightInfo; + + /// Maximum number of items that can be published in a single operation. + /// + /// Must not exceed `xcm::v5::MaxPublishItems`. + #[pallet::constant] + type MaxPublishItems: Get; + + /// Maximum length of a published value in bytes. + /// + /// Must not exceed `xcm::v5::MaxPublishValueLength`. + #[pallet::constant] + type MaxValueLength: Get; + + /// Maximum number of unique keys a publisher can store. + #[pallet::constant] + type MaxStoredKeys: Get; + + /// Maximum total storage size per publisher in bytes. + /// + /// This is the sum of all (32-byte key + value) pairs. + /// Typically set to ~2048 bytes (2 KiB) to limit storage overhead per publisher. + #[pallet::constant] + type MaxTotalStorageSize: Get; + + /// Maximum number of parachains that can register as publishers. + #[pallet::constant] + type MaxPublishers: Get; + + /// The deposit required for a parachain to register as a publisher. + /// + /// System parachains may use `force_register_publisher` with a custom deposit amount. + #[pallet::constant] + type PublisherDeposit: Get>; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Data published by a parachain. + DataPublished { publisher: ParaId, items_count: u32 }, + /// A publisher has been registered. + PublisherRegistered { para_id: ParaId, manager: T::AccountId }, + /// A publisher has been deregistered. + PublisherDeregistered { para_id: ParaId }, + /// Published data has been cleaned up. + DataCleanedUp { para_id: ParaId }, + } + + /// Registered publishers and their deposit information. + /// + /// Parachains must be registered before they can publish data. The registration includes + /// information about the managing account and the deposit held for the registration. + #[pallet::storage] + pub type RegisteredPublishers = StorageMap< + _, + Twox64Concat, + ParaId, + PublisherInfo>, + OptionQuery, + >; + + /// Tracks which parachains have published data. + /// + /// Maps parachain ID to a boolean indicating whether they have a child trie. + /// The actual child trie info is derived deterministically from the ParaId. + #[pallet::storage] + pub type PublisherExists = StorageMap< + _, + Twox64Concat, + ParaId, + bool, + ValueQuery, + >; + + /// Tracks all published keys per parachain. + #[pallet::storage] + pub type PublishedKeys = StorageMap< + _, + Twox64Concat, + ParaId, + BoundedBTreeSet<[u8; 32], T::MaxStoredKeys>, + ValueQuery, + >; + + /// Total storage size in bytes for each publisher. + /// + /// Calculated as the sum of all (32-byte key + value length) pairs. + #[pallet::storage] + pub type TotalStorageSize = StorageMap< + _, + Twox64Concat, + ParaId, + u32, + ValueQuery, + >; + + + #[pallet::error] + pub enum Error { + /// Too many items in a single publish operation. + TooManyPublishItems, + /// Value length exceeds maximum allowed. + ValueTooLong, + /// Too many unique keys stored for this publisher. + TooManyStoredKeys, + /// Total storage size exceeds maximum allowed for this publisher. + TotalStorageSizeExceeded, + /// Maximum number of publishers reached. + TooManyPublishers, + /// Para is not registered as a publisher. + NotRegistered, + /// Para is already registered as a publisher. + AlreadyRegistered, + /// Cannot publish without being registered first. + PublishNotAuthorized, + /// Caller is not authorized to perform this action. + NotAuthorized, + /// Cannot deregister while published data exists. Call cleanup_published_data first. + MustCleanupDataFirst, + /// No published data to cleanup. + NoDataToCleanup, + /// Cannot publish empty data. + EmptyPublish, + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + assert!( + T::MaxPublishItems::get() <= xcm::v5::MaxPublishItems::get(), + "Broadcaster MaxPublishItems exceeds XCM MaxPublishItems upper bound" + ); + assert!( + T::MaxValueLength::get() <= xcm::v5::MaxPublishValueLength::get(), + "Broadcaster MaxValueLength exceeds XCM MaxPublishValueLength upper bound" + ); + } + } + + #[pallet::call] + impl Pallet { + /// Register a parachain as a publisher with the calling account as manager. + /// + /// Requires `PublisherDeposit` to be held from the caller's account. + /// + /// Parameters: + /// - `origin`: Signed origin that will become the publisher manager and pay the deposit. + /// - `para_id`: The parachain to register as a publisher. + /// + /// Errors: + /// - `AlreadyRegistered` + /// - `InsufficientBalance` (from Currency trait) + /// + /// Events: + /// - `PublisherRegistered` + #[pallet::call_index(0)] + #[pallet::weight(::WeightInfo::register_publisher())] + pub fn register_publisher( + origin: OriginFor, + para_id: ParaId, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + Self::do_register_publisher(who, para_id, T::PublisherDeposit::get()) + } + + /// Register a parachain as a publisher with a custom deposit amount. + /// + /// Allows Root to register system parachains with zero or reduced deposits. + /// + /// Parameters: + /// - `origin`: Root origin. + /// - `manager`: Account that will manage the publisher. + /// - `deposit`: Custom deposit amount to hold (typically zero for system parachains). + /// - `para_id`: The parachain to register as a publisher. + /// + /// Errors: + /// - `AlreadyRegistered` + /// - `InsufficientBalance` (from Currency trait if deposit is non-zero) + /// + /// Events: + /// - `PublisherRegistered` + #[pallet::call_index(1)] + #[pallet::weight(::WeightInfo::force_register_publisher())] + pub fn force_register_publisher( + origin: OriginFor, + manager: T::AccountId, + deposit: BalanceOf, + para_id: ParaId, + ) -> DispatchResult { + ensure_root(origin)?; + Self::do_register_publisher(manager, para_id, deposit) + } + + /// Remove all published data for a parachain. + /// + /// Must be called before `deregister_publisher`. Only callable by the publisher manager. + /// + /// Parameters: + /// - `origin`: Signed origin, must be the publisher manager. + /// - `para_id`: The parachain to clean up. + /// + /// Errors: + /// - `NotRegistered` + /// - `NotAuthorized` + /// - `NoDataToCleanup` + /// + /// Events: + /// - `DataCleanedUp` + #[pallet::call_index(2)] + #[pallet::weight( + ::WeightInfo::do_cleanup_publisher(T::MaxStoredKeys::get()) + .saturating_add(T::DbWeight::get().reads(2)) + )] + pub fn cleanup_published_data( + origin: OriginFor, + para_id: ParaId, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + let info = RegisteredPublishers::::get(para_id) + .ok_or(Error::::NotRegistered)?; + + ensure!(who == info.manager, Error::::NotAuthorized); + ensure!(PublisherExists::::get(para_id), Error::::NoDataToCleanup); + + let actual_keys = PublishedKeys::::get(para_id).len() as u32; + Self::do_cleanup_publisher(para_id)?; + + Self::deposit_event(Event::DataCleanedUp { para_id }); + + Ok(Some( + ::WeightInfo::do_cleanup_publisher(actual_keys) + .saturating_add(T::DbWeight::get().reads(2)) + ).into()) + } + + /// Deregister a publisher and release their deposit. + /// + /// All published data must be cleaned up first via `cleanup_published_data`. + /// + /// Parameters: + /// - `origin`: Signed origin, must be the publisher manager. + /// - `para_id`: The parachain to deregister. + /// + /// Errors: + /// - `NotRegistered` + /// - `NotAuthorized` + /// - `MustCleanupDataFirst` + /// + /// Events: + /// - `PublisherDeregistered` + #[pallet::call_index(3)] + #[pallet::weight(T::DbWeight::get().reads_writes(2, 1))] + pub fn deregister_publisher( + origin: OriginFor, + para_id: ParaId, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + let info = RegisteredPublishers::::get(para_id) + .ok_or(Error::::NotRegistered)?; + + ensure!(who == info.manager, Error::::NotAuthorized); + ensure!(!PublisherExists::::get(para_id), Error::::MustCleanupDataFirst); + + Self::do_deregister(para_id, info)?; + + Self::deposit_event(Event::PublisherDeregistered { para_id }); + Ok(()) + } + + /// Force deregister a publisher, cleaning up data if necessary. + /// + /// Combines cleanup and deregistration in a single call. Only callable by Root. + /// + /// Parameters: + /// - `origin`: Root origin. + /// - `para_id`: The parachain to force deregister. + /// + /// Errors: + /// - `NotRegistered` + /// + /// Events: + /// - `DataCleanedUp` (if data existed) + /// - `PublisherDeregistered` + #[pallet::call_index(4)] + #[pallet::weight( + ::WeightInfo::do_cleanup_publisher(T::MaxStoredKeys::get()) + .saturating_add(T::DbWeight::get().reads_writes(2, 1)) + )] + pub fn force_deregister_publisher( + origin: OriginFor, + para_id: ParaId, + ) -> DispatchResult { + ensure_root(origin)?; + + let info = RegisteredPublishers::::get(para_id) + .ok_or(Error::::NotRegistered)?; + + // Clean up data if it exists + if PublisherExists::::get(para_id) { + Self::do_cleanup_publisher(para_id)?; + Self::deposit_event(Event::DataCleanedUp { para_id }); + } + + Self::do_deregister(para_id, info)?; + + Self::deposit_event(Event::PublisherDeregistered { para_id }); + Ok(()) + } + } + + impl Pallet { + /// Register a publisher, holding the deposit from the manager account. + fn do_register_publisher( + manager: T::AccountId, + para_id: ParaId, + deposit: BalanceOf, + ) -> DispatchResult { + // Check not already registered + ensure!( + !RegisteredPublishers::::contains_key(para_id), + Error::::AlreadyRegistered + ); + + // Enforce MaxPublishers limit at registration time + let current_count = RegisteredPublishers::::iter().count() as u32; + ensure!(current_count < T::MaxPublishers::get(), Error::::TooManyPublishers); + + // Hold the deposit if non-zero + if !deposit.is_zero() { + ::Currency::hold( + &HoldReason::PublisherDeposit.into(), + &manager, + deposit, + )?; + } + + let info = PublisherInfo { manager: manager.clone(), deposit }; + + RegisteredPublishers::::insert(para_id, info); + Self::deposit_event(Event::PublisherRegistered { para_id, manager }); + + Ok(()) + } + + pub(crate) fn do_cleanup_publisher(para_id: ParaId) -> DispatchResult { + let child_info = Self::derive_child_info(para_id); + let published_keys = PublishedKeys::::get(para_id); + + // Remove all key-value pairs from the child trie + for key in published_keys.iter() { + frame_support::storage::child::kill(&child_info, key); + } + + // Clean up tracking storage + PublishedKeys::::remove(para_id); + TotalStorageSize::::remove(para_id); + PublisherExists::::remove(para_id); + + Ok(()) + } + + fn do_deregister( + para_id: ParaId, + info: PublisherInfo>, + ) -> DispatchResult { + // Release deposit if non-zero + if !info.deposit.is_zero() { + let released = ::Currency::release( + &HoldReason::PublisherDeposit.into(), + &info.manager, + info.deposit, + Exact, + )?; + + defensive_assert!( + released == info.deposit, + "deposit should be fully released" + ); + } + + // Remove registration + RegisteredPublishers::::remove(para_id); + + Ok(()) + } + + /// Called by the initializer to note that a new session has started. + pub(crate) fn initializer_on_new_session( + _notification: &crate::initializer::SessionChangeNotification>, + outgoing_paras: &[ParaId], + ) -> Weight { + Self::cleanup_outgoing_publishers(outgoing_paras) + } + + /// Remove all storage for offboarded parachains. + fn cleanup_outgoing_publishers(outgoing: &[ParaId]) -> Weight { + let mut total_weight = Weight::zero(); + for outgoing_para in outgoing { + total_weight = total_weight.saturating_add(Self::cleanup_outgoing_publisher(outgoing_para)); + } + total_weight + } + + /// Remove all relevant storage items for an outgoing parachain. + fn cleanup_outgoing_publisher(outgoing_para: &ParaId) -> Weight { + if let Some(info) = RegisteredPublishers::::get(outgoing_para) { + let weight = if PublisherExists::::get(outgoing_para) { + let published_keys = PublishedKeys::::get(outgoing_para); + let key_count = published_keys.len() as u32; + let _ = Self::do_cleanup_publisher(*outgoing_para); + ::WeightInfo::do_cleanup_publisher(key_count) + } else { + Weight::zero() + }; + + let _ = Self::do_deregister(*outgoing_para, info); + + // Account for reads (RegisteredPublishers, PublisherExists) and writes (deregister) + return weight + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)); + } + T::DbWeight::get().reads(1) // Just the RegisteredPublishers read + } + + /// Processes a publish operation from a parachain. + /// + /// Validates the publisher is registered, checks all bounds, and stores the provided + /// key-value pairs in the publisher's dedicated child trie. Updates the child trie root + /// and published keys tracking. + /// + /// Keys must be 32-byte hashes. + pub fn handle_publish( + origin_para_id: ParaId, + data: Vec<([u8; 32], Vec)>, + ) -> DispatchResult { + // Check publisher is registered + ensure!( + RegisteredPublishers::::contains_key(origin_para_id), + Error::::PublishNotAuthorized + ); + + // Reject empty publishes to avoid wasting execution weight + ensure!(!data.is_empty(), Error::::EmptyPublish); + + let items_count = data.len() as u32; + + // Validate input limits first before making any changes + ensure!( + data.len() <= T::MaxPublishItems::get() as usize, + Error::::TooManyPublishItems + ); + + // Validate all values before creating publisher entry + for (_key, value) in &data { + ensure!( + value.len() <= T::MaxValueLength::get() as usize, + Error::::ValueTooLong + ); + } + + let mut published_keys = PublishedKeys::::get(origin_para_id); + let current_total_size = TotalStorageSize::::get(origin_para_id); + + // Count new unique keys to prevent exceeding MaxStoredKeys + let mut new_keys_count = 0u32; + for (key, _) in &data { + if !published_keys.contains(key) { + new_keys_count += 1; + } + } + + let current_keys_count = published_keys.len() as u32; + ensure!( + current_keys_count.saturating_add(new_keys_count) <= T::MaxStoredKeys::get(), + Error::::TooManyStoredKeys + ); + + // Calculate storage delta: each item is 32 bytes (key) + value length + let child_info = Self::derive_child_info(origin_para_id); + let mut size_delta: i64 = 0; + + for (key, value) in &data { + // 32 bytes for the hash key + let new_size = 32u32.saturating_add(value.len() as u32); + + // If key already exists, subtract old value size + if let Some(old_value) = frame_support::storage::child::get::>(&child_info, key) { + let old_size = 32u32.saturating_add(old_value.len() as u32); + size_delta = size_delta.saturating_add(new_size as i64).saturating_sub(old_size as i64); + } else { + size_delta = size_delta.saturating_add(new_size as i64); + } + } + + // Calculate new total size + let new_total_size = if size_delta >= 0 { + current_total_size.saturating_add(size_delta as u32) + } else { + current_total_size.saturating_sub((-size_delta) as u32) + }; + + // Ensure we don't exceed the total storage limit + ensure!( + new_total_size <= T::MaxTotalStorageSize::get(), + Error::::TotalStorageSizeExceeded + ); + + // Get or create child trie for this publisher + if !PublisherExists::::contains_key(origin_para_id) { + PublisherExists::::insert(origin_para_id, true); + } + + // Write to child trie and track keys for enumeration + for (key, value) in data { + frame_support::storage::child::put(&child_info, &key, &value); + published_keys.try_insert(key).defensive_ok(); + } + + PublishedKeys::::insert(origin_para_id, published_keys); + TotalStorageSize::::insert(origin_para_id, new_total_size); + + Self::deposit_event(Event::DataPublished { publisher: origin_para_id, items_count }); + + Ok(()) + } + + /// Returns the child trie root hash for a specific publisher. + /// + /// The root can be included in storage proofs to verify published data. + pub fn get_publisher_child_root(para_id: ParaId) -> Option> { + PublisherExists::::get(para_id).then(|| { + let child_info = Self::derive_child_info(para_id); + frame_support::storage::child::root(&child_info, sp_runtime::StateVersion::V1) + }) + } + + /// Derives a deterministic child trie identifier from a parachain ID. + /// + /// The child trie identifier is `(b"pubsub", para_id)` encoded. + pub fn derive_child_info(para_id: ParaId) -> ChildInfo { + ChildInfo::new_default(&(b"pubsub", para_id).encode()) + } + + /// Retrieves a value from a publisher's child trie. + /// + /// Returns `None` if the publisher doesn't exist or the key is not found. + pub fn get_published_value(para_id: ParaId, key: &[u8]) -> Option> { + PublisherExists::::get(para_id).then(|| { + let child_info = Self::derive_child_info(para_id); + frame_support::storage::child::get(&child_info, key) + })? + } + + /// Returns all published data for a parachain. + /// + /// Iterates over all tracked keys for the publisher and retrieves their values from the + /// child trie. + pub fn get_all_published_data(para_id: ParaId) -> Vec<([u8; 32], Vec)> { + if !PublisherExists::::get(para_id) { + return Vec::new(); + } + + let child_info = Self::derive_child_info(para_id); + let published_keys = PublishedKeys::::get(para_id); + + published_keys + .into_iter() + .filter_map(|key| { + frame_support::storage::child::get(&child_info, &key) + .map(|value| (key, value)) + }) + .collect() + } + + /// Returns a list of all parachains that have published data. + pub fn get_all_publishers() -> Vec { + PublisherExists::::iter_keys().collect() + } + } +} + +// Implement Publish trait +impl Publish for Pallet { + fn publish_data(publisher: ParaId, data: Vec<([u8; 32], Vec)>) -> DispatchResult { + Self::handle_publish(publisher, data) + } +} + +// Implement OnNewSessionOutgoing for cleanup of offboarded parachains +impl crate::initializer::OnNewSessionOutgoing> for Pallet { + fn on_new_session_outgoing( + notification: &crate::initializer::SessionChangeNotification>, + outgoing_paras: &[ParaId], + ) { + let _ = Self::initializer_on_new_session(notification, outgoing_paras); + } +} \ No newline at end of file diff --git a/polkadot/runtime/parachains/src/broadcaster/tests.rs b/polkadot/runtime/parachains/src/broadcaster/tests.rs new file mode 100644 index 0000000000000..8396a9d3e98d9 --- /dev/null +++ b/polkadot/runtime/parachains/src/broadcaster/tests.rs @@ -0,0 +1,760 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::*; +use crate::mock::{new_test_ext, Balances, Broadcaster, RuntimeOrigin, Test}; +use frame_support::{ + assert_err, assert_ok, + traits::fungible::{hold::Inspect as HoldInspect, Inspect}, +}; +use polkadot_primitives::Id as ParaId; +use sp_core::hashing::blake2_256; + +const ALICE: u64 = 1; +const BOB: u64 = 2; + +fn setup_account(who: u64, balance: u128) { + let _ = Balances::mint_into(&who, balance); +} + +// Helper to create hash keys from strings for tests +fn hash_key(data: &[u8]) -> [u8; 32] { + blake2_256(data) +} + +fn register_test_publisher(para_id: ParaId) { + setup_account(ALICE, 10000); + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); +} + +#[test] +fn register_publisher_works() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 1000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + let info = RegisteredPublishers::::get(para_id).unwrap(); + assert_eq!(info.manager, ALICE); + assert_eq!(info.deposit, 100); + + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &ALICE), 100); + assert_eq!(Balances::balance(&ALICE), 900); + }); +} + +#[test] +fn force_register_system_chain_works() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(1000); // System chain + setup_account(ALICE, 1000); + + assert_ok!(Broadcaster::force_register_publisher( + RuntimeOrigin::root(), + ALICE, + 0, + para_id + )); + + let info = RegisteredPublishers::::get(para_id).unwrap(); + assert_eq!(info.manager, ALICE); + assert_eq!(info.deposit, 0); + + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &ALICE), 0); + assert_eq!(Balances::balance(&ALICE), 1000); + }); +} + +#[test] +fn force_register_with_custom_deposit_works() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(BOB, 1000); + + assert_ok!(Broadcaster::force_register_publisher( + RuntimeOrigin::root(), + BOB, + 500, + para_id + )); + + let info = RegisteredPublishers::::get(para_id).unwrap(); + assert_eq!(info.manager, BOB); + assert_eq!(info.deposit, 500); + + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &BOB), 500); + assert_eq!(Balances::balance(&BOB), 500); + }); +} + +#[test] +fn cannot_register_twice() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 1000); + setup_account(BOB, 1000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + assert_err!( + Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id), + Error::::AlreadyRegistered + ); + + assert_err!( + Broadcaster::register_publisher(RuntimeOrigin::signed(BOB), para_id), + Error::::AlreadyRegistered + ); + + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &ALICE), 100); + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &BOB), 0); + }); +} + +#[test] +fn force_register_requires_root() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(1000); + setup_account(ALICE, 1000); + + assert_err!( + Broadcaster::force_register_publisher(RuntimeOrigin::signed(ALICE), ALICE, 0, para_id), + sp_runtime::DispatchError::BadOrigin + ); + + assert!(!RegisteredPublishers::::contains_key(para_id)); + }); +} + +#[test] +fn register_publisher_requires_sufficient_balance() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 50); // Less than required deposit + + let result = Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id); + assert!(result.is_err()); + + assert!(!RegisteredPublishers::::contains_key(para_id)); + }); +} + +#[test] +fn publish_requires_registration() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + let data = vec![(hash_key(b"key"), b"value".to_vec())]; + + assert_err!( + Broadcaster::handle_publish(para_id, data), + Error::::PublishNotAuthorized + ); + + assert!(!PublisherExists::::get(para_id)); + }); +} + +#[test] +fn registered_publisher_can_publish() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 1000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + let data = vec![(hash_key(b"key"), b"value".to_vec())]; + assert_ok!(Broadcaster::handle_publish(para_id, data)); + + assert_eq!(Broadcaster::get_published_value(para_id, &hash_key(b"key")), Some(b"value".to_vec())); + }); +} + +#[test] +fn publish_store_retrieve_and_update_data() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 1000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + assert!(!PublisherExists::::get(para_id)); + assert!(Broadcaster::get_publisher_child_root(para_id).is_none()); + + let initial_data = + vec![(hash_key(b"key1"), b"value1".to_vec()), (hash_key(b"key2"), b"value2".to_vec())]; + Broadcaster::handle_publish(para_id, initial_data.clone()).unwrap(); + + assert!(PublisherExists::::get(para_id)); + let root_after_initial = Broadcaster::get_publisher_child_root(para_id); + assert!(root_after_initial.is_some()); + assert!(!root_after_initial.as_ref().unwrap().is_empty()); + + assert_eq!(Broadcaster::get_published_value(para_id, &hash_key(b"key1")), Some(b"value1".to_vec())); + assert_eq!(Broadcaster::get_published_value(para_id, &hash_key(b"key2")), Some(b"value2".to_vec())); + assert_eq!(Broadcaster::get_published_value(para_id, &hash_key(b"key3")), None); + + let update_data = vec![ + (hash_key(b"key1"), b"updated_value1".to_vec()), + (hash_key(b"key3"), b"value3".to_vec()), + ]; + Broadcaster::handle_publish(para_id, update_data).unwrap(); + + let root_after_update = Broadcaster::get_publisher_child_root(para_id); + assert!(root_after_update.is_some()); + assert_ne!(root_after_initial.unwrap(), root_after_update.unwrap()); + + assert_eq!( + Broadcaster::get_published_value(para_id, &hash_key(b"key1")), + Some(b"updated_value1".to_vec()) + ); + assert_eq!( + Broadcaster::get_published_value(para_id, &hash_key(b"key2")), + Some(b"value2".to_vec()) // Should remain unchanged + ); + assert_eq!(Broadcaster::get_published_value(para_id, &hash_key(b"key3")), Some(b"value3".to_vec())); + }); +} + +#[test] +fn handle_publish_respects_max_items_limit() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + register_test_publisher(para_id); + + let mut data = Vec::new(); + for i in 0..17 { + data.push((hash_key(&format!("key{}", i).into_bytes()), b"value".to_vec())); + } + + let result = Broadcaster::handle_publish(para_id, data); + assert!(result.is_err()); + }); +} + +#[test] +fn handle_publish_respects_value_length_limit() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + register_test_publisher(para_id); + + let long_value = vec![b'v'; 1025]; + let data = vec![(hash_key(b"key"), long_value)]; + + let result = Broadcaster::handle_publish(para_id, data); + assert!(result.is_err()); + }); +} + +#[test] +fn total_storage_size_limit_enforced() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + register_test_publisher(para_id); + + // Try to publish data that exceeds 2048 bytes total + // Each item is 32 (key) + 1024 (value) = 1056 bytes + // Two items would be 2112 bytes, exceeding the 2048 limit + let data1 = vec![(hash_key(b"key1"), vec![b'a'; 1024])]; + assert_ok!(Broadcaster::handle_publish(para_id, data1)); + + // Second item should fail due to total storage size + let data2 = vec![(hash_key(b"key2"), vec![b'b'; 1024])]; + let result = Broadcaster::handle_publish(para_id, data2); + assert_err!(result, Error::::TotalStorageSizeExceeded); + + // But updating the existing key with a smaller value should work + let data3 = vec![(hash_key(b"key1"), vec![b'c'; 100])]; + assert_ok!(Broadcaster::handle_publish(para_id, data3)); + + // Now we should have room for more data + let data4 = vec![(hash_key(b"key2"), vec![b'd'; 900])]; + assert_ok!(Broadcaster::handle_publish(para_id, data4)); + }); +} + +#[test] +fn max_stored_keys_limit_enforced() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + register_test_publisher(para_id); + + // Publish 50 small items to test MaxStoredKeys without hitting TotalStorageSize limit + // Each item is 32 (key) + 1 (value) = 33 bytes, total ~1650 bytes + // Publish in batches of 10 items to respect MaxPublishItems = 10 + for batch in 0..5 { + let mut data = Vec::new(); + for i in 0..10 { + let key_num = batch * 10 + i; + if key_num < 50 { + data.push((hash_key(&format!("key{}", key_num).into_bytes()), b"v".to_vec())); + } + } + if !data.is_empty() { + assert_ok!(Broadcaster::handle_publish(para_id, data)); + } + } + + let published_keys = PublishedKeys::::get(para_id); + assert_eq!(published_keys.len(), 50); + + let result = + Broadcaster::handle_publish(para_id, vec![(hash_key(b"new_key"), b"value".to_vec())]); + assert_err!(result, Error::::TooManyStoredKeys); + + let result = Broadcaster::handle_publish( + para_id, + vec![(hash_key(b"key0"), b"updated_value".to_vec())], + ); + assert_ok!(result); + + assert_eq!( + Broadcaster::get_published_value(para_id, &hash_key(b"key0")), + Some(b"updated_value".to_vec()) + ); + }); +} + +#[test] +fn published_keys_storage_matches_child_trie() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + register_test_publisher(para_id); + + // Publish multiple batches to ensure consistency maintained across updates + let data1 = vec![ + (hash_key(b"key1"), b"value1".to_vec()), + (hash_key(b"key2"), b"value2".to_vec()), + ]; + Broadcaster::handle_publish(para_id, data1).unwrap(); + + // Update some keys, add new ones + let data2 = vec![ + (hash_key(b"key1"), b"updated_value1".to_vec()), + (hash_key(b"key3"), b"value3".to_vec()), + ]; + Broadcaster::handle_publish(para_id, data2).unwrap(); + + let tracked_keys = PublishedKeys::::get(para_id); + let actual_data = Broadcaster::get_all_published_data(para_id); + + // Counts must match + assert_eq!(tracked_keys.len(), actual_data.len()); + + // Every tracked key must exist in child trie + for tracked_key in tracked_keys.iter() { + assert!(actual_data.iter().any(|(k, _)| k == tracked_key)); + } + + // Every child trie key must be tracked + for (actual_key, _) in actual_data.iter() { + assert!(tracked_keys.contains(actual_key)); + } + }); +} + +#[test] +fn multiple_publishers_in_same_block() { + new_test_ext(Default::default()).execute_with(|| { + let para1 = ParaId::from(2000); + let para2 = ParaId::from(2001); + let para3 = ParaId::from(2002); + + // Register all publishers + register_test_publisher(para1); + setup_account(BOB, 10000); + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(BOB), para2)); + setup_account(3, 10000); + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(3), para3)); + + // Multiple parachains publish data in the same block + let data1 = vec![(hash_key(b"key1"), b"value1".to_vec())]; + let data2 = vec![(hash_key(b"key2"), b"value2".to_vec())]; + let data3 = vec![(hash_key(b"key3"), b"value3".to_vec())]; + + Broadcaster::handle_publish(para1, data1).unwrap(); + Broadcaster::handle_publish(para2, data2).unwrap(); + Broadcaster::handle_publish(para3, data3).unwrap(); + + // Verify all three publishers exist + assert!(PublisherExists::::get(para1)); + assert!(PublisherExists::::get(para2)); + assert!(PublisherExists::::get(para3)); + + // Verify each para's data is independently accessible + assert_eq!(Broadcaster::get_published_value(para1, &hash_key(b"key1")), Some(b"value1".to_vec())); + assert_eq!(Broadcaster::get_published_value(para2, &hash_key(b"key2")), Some(b"value2".to_vec())); + assert_eq!(Broadcaster::get_published_value(para3, &hash_key(b"key3")), Some(b"value3".to_vec())); + + // Verify no cross-contamination + assert_eq!(Broadcaster::get_published_value(para1, &hash_key(b"key2")), None); + assert_eq!(Broadcaster::get_published_value(para2, &hash_key(b"key3")), None); + assert_eq!(Broadcaster::get_published_value(para3, &hash_key(b"key1")), None); + }); +} + +#[test] +fn max_publishers_limit_enforced() { + new_test_ext(Default::default()).execute_with(|| { + // Register and publish for max publishers + for i in 0..1000 { + let para_id = ParaId::from(2000 + i); + setup_account(100 + i as u64, 10000); + assert_ok!(Broadcaster::register_publisher( + RuntimeOrigin::signed(100 + i as u64), + para_id + )); + let data = vec![(hash_key(b"key"), b"value".to_vec())]; + assert_ok!(Broadcaster::handle_publish(para_id, data)); + } + + assert_eq!(PublisherExists::::iter().count(), 1000); + + // Cannot register new publisher when limit reached + let new_para = ParaId::from(3000); + setup_account(ALICE, 10000); + + // Registration should fail at registration time due to MaxPublishers limit + assert_err!( + Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), new_para), + Error::::TooManyPublishers + ); + + // Existing publisher can still update + let existing_para = ParaId::from(2000); + let update_data = vec![(hash_key(b"key"), b"updated".to_vec())]; + assert_ok!(Broadcaster::handle_publish(existing_para, update_data)); + assert_eq!( + Broadcaster::get_published_value(existing_para, &hash_key(b"key")), + Some(b"updated".to_vec()) + ); + }); +} + +#[test] +fn cleanup_published_data_works() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + let data = vec![ + (hash_key(b"key1"), b"value1".to_vec()), + (hash_key(b"key2"), b"value2".to_vec()), + ]; + assert_ok!(Broadcaster::handle_publish(para_id, data)); + + assert!(PublisherExists::::get(para_id)); + assert_eq!(PublishedKeys::::get(para_id).len(), 2); + + assert_ok!(Broadcaster::cleanup_published_data(RuntimeOrigin::signed(ALICE), para_id)); + + assert!(!PublisherExists::::get(para_id)); + assert_eq!(PublishedKeys::::get(para_id).len(), 0); + assert_eq!(Broadcaster::get_published_value(para_id, &hash_key(b"key1")), None); + assert_eq!(Broadcaster::get_published_value(para_id, &hash_key(b"key2")), None); + assert!(RegisteredPublishers::::get(para_id).is_some()); + }); +} + +#[test] +fn cleanup_requires_manager() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + setup_account(BOB, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + assert_ok!(Broadcaster::handle_publish(para_id, vec![(hash_key(b"key"), b"value".to_vec())])); + + assert_err!( + Broadcaster::cleanup_published_data(RuntimeOrigin::signed(BOB), para_id), + Error::::NotAuthorized + ); + + assert!(PublisherExists::::get(para_id)); + }); +} + +#[test] +fn cleanup_fails_if_no_data() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + assert_err!( + Broadcaster::cleanup_published_data(RuntimeOrigin::signed(ALICE), para_id), + Error::::NoDataToCleanup + ); + }); +} + +#[test] +fn cleanup_fails_if_not_registered() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_err!( + Broadcaster::cleanup_published_data(RuntimeOrigin::signed(ALICE), para_id), + Error::::NotRegistered + ); + }); +} + +#[test] +fn deregister_publisher_works() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &ALICE), 100); + assert_eq!(Balances::balance(&ALICE), 9900); + + assert_ok!(Broadcaster::deregister_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &ALICE), 0); + assert_eq!(Balances::balance(&ALICE), 10000); + assert!(!RegisteredPublishers::::contains_key(para_id)); + }); +} + +#[test] +fn deregister_fails_if_data_exists() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + assert_ok!(Broadcaster::handle_publish(para_id, vec![(hash_key(b"key"), b"value".to_vec())])); + + assert_err!( + Broadcaster::deregister_publisher(RuntimeOrigin::signed(ALICE), para_id), + Error::::MustCleanupDataFirst + ); + + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &ALICE), 100); + }); +} + +#[test] +fn deregister_requires_manager() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + setup_account(BOB, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + assert_err!( + Broadcaster::deregister_publisher(RuntimeOrigin::signed(BOB), para_id), + Error::::NotAuthorized + ); + }); +} + +#[test] +fn two_phase_cleanup_and_deregister_works() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + let data = vec![ + (hash_key(b"key1"), b"value1".to_vec()), + (hash_key(b"key2"), b"value2".to_vec()), + (hash_key(b"key3"), b"value3".to_vec()), + ]; + assert_ok!(Broadcaster::handle_publish(para_id, data)); + + // Phase 1: Cleanup data + assert_ok!(Broadcaster::cleanup_published_data(RuntimeOrigin::signed(ALICE), para_id)); + assert!(!PublisherExists::::get(para_id)); + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &ALICE), 100); + + // Phase 2: Deregister + assert_ok!(Broadcaster::deregister_publisher(RuntimeOrigin::signed(ALICE), para_id)); + assert!(!RegisteredPublishers::::contains_key(para_id)); + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &ALICE), 0); + assert_eq!(Balances::balance(&ALICE), 10000); + }); +} + +#[test] +fn force_deregister_works() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + let data = vec![ + (hash_key(b"key1"), b"value1".to_vec()), + (hash_key(b"key2"), b"value2".to_vec()), + ]; + assert_ok!(Broadcaster::handle_publish(para_id, data)); + + assert_ok!(Broadcaster::force_deregister_publisher(RuntimeOrigin::root(), para_id)); + + assert!(!PublisherExists::::get(para_id)); + assert!(!RegisteredPublishers::::contains_key(para_id)); + assert_eq!(PublishedKeys::::get(para_id).len(), 0); + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &ALICE), 0); + assert_eq!(Balances::balance(&ALICE), 10000); + }); +} + +#[test] +fn force_deregister_works_without_data() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + assert_ok!(Broadcaster::force_deregister_publisher(RuntimeOrigin::root(), para_id)); + + assert!(!RegisteredPublishers::::contains_key(para_id)); + assert_eq!(Balances::balance(&ALICE), 10000); + }); +} + +#[test] +fn force_deregister_requires_root() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + assert_ok!(Broadcaster::handle_publish(para_id, vec![(hash_key(b"key"), b"value".to_vec())])); + + assert_err!( + Broadcaster::force_deregister_publisher(RuntimeOrigin::signed(ALICE), para_id), + sp_runtime::DispatchError::BadOrigin + ); + + assert!(PublisherExists::::get(para_id)); + assert!(RegisteredPublishers::::contains_key(para_id)); + }); +} + +#[test] +fn cleanup_removes_all_keys_from_child_trie() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + // Publish multiple batches to fill up keys + for batch in 0..5 { + let mut data = Vec::new(); + for i in 0..10 { + let key = format!("key_{}_{}", batch, i); + data.push((hash_key(key.as_bytes()), b"value".to_vec())); + } + assert_ok!(Broadcaster::handle_publish(para_id, data)); + } + + assert_eq!(PublishedKeys::::get(para_id).len(), 50); + + assert_ok!(Broadcaster::cleanup_published_data(RuntimeOrigin::signed(ALICE), para_id)); + + for batch in 0..5 { + for i in 0..10 { + let key = format!("key_{}_{}", batch, i); + assert_eq!(Broadcaster::get_published_value(para_id, &hash_key(key.as_bytes())), None); + } + } + + assert_eq!(PublishedKeys::::get(para_id).len(), 0); + }); +} + +#[test] +fn force_deregister_with_zero_deposit() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(1000); // System chain + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::force_register_publisher( + RuntimeOrigin::root(), + ALICE, + 0, + para_id + )); + + assert_ok!(Broadcaster::handle_publish(para_id, vec![(hash_key(b"key"), b"value".to_vec())])); + + assert_ok!(Broadcaster::force_deregister_publisher(RuntimeOrigin::root(), para_id)); + + assert!(!RegisteredPublishers::::contains_key(para_id)); + assert_eq!(Balances::balance(&ALICE), 10000); // No deposit change + }); +} + +#[test] +fn cleanup_outgoing_publishers_works() { + new_test_ext(Default::default()).execute_with(|| { + let para_a = ParaId::from(2000); + let para_b = ParaId::from(2001); + let para_c = ParaId::from(2002); + + setup_account(ALICE, 10000); + + // Register and publish data for A, B, C + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_a)); + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_b)); + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_c)); + + assert_ok!(Broadcaster::handle_publish(para_a, vec![(hash_key(b"key1"), b"value1".to_vec())])); + assert_ok!(Broadcaster::handle_publish(para_b, vec![(hash_key(b"key2"), b"value2".to_vec())])); + assert_ok!(Broadcaster::handle_publish(para_c, vec![(hash_key(b"key3"), b"value3".to_vec())])); + + let notification = crate::initializer::SessionChangeNotification::default(); + let outgoing_paras = vec![para_a, para_b]; + Broadcaster::initializer_on_new_session(¬ification, &outgoing_paras); + + // A and B cleaned up + assert!(!RegisteredPublishers::::contains_key(para_a)); + assert!(!RegisteredPublishers::::contains_key(para_b)); + assert!(!PublisherExists::::get(para_a)); + assert!(!PublisherExists::::get(para_b)); + + // C unaffected + assert!(RegisteredPublishers::::contains_key(para_c)); + assert!(PublisherExists::::get(para_c)); + }); +} + +#[test] +fn empty_publish_fails() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + register_test_publisher(para_id); + + // Try to publish empty data + let empty_data: Vec<([u8; 32], Vec)> = vec![]; + + assert_err!( + Broadcaster::handle_publish(para_id, empty_data), + Error::::EmptyPublish + ); + }); +} diff --git a/polkadot/runtime/parachains/src/broadcaster/traits.rs b/polkadot/runtime/parachains/src/broadcaster/traits.rs new file mode 100644 index 0000000000000..3f8046894f034 --- /dev/null +++ b/polkadot/runtime/parachains/src/broadcaster/traits.rs @@ -0,0 +1,32 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Traits for publish operations in the broadcaster pallet. + +use alloc::vec::Vec; +use polkadot_primitives::Id as ParaId; +use sp_runtime::DispatchResult; + +/// Trait for handling publish operations for parachains. +/// +/// This trait provides the interface for parachains to publish key-value data. +/// Keys must be 32-byte hashes. +pub trait Publish { + /// Publish key-value data for a specific parachain. + /// + /// Keys must be 32-byte hashes. + fn publish_data(publisher: ParaId, data: Vec<([u8; 32], Vec)>) -> DispatchResult; +} diff --git a/polkadot/runtime/parachains/src/broadcaster/weights.rs b/polkadot/runtime/parachains/src/broadcaster/weights.rs new file mode 100644 index 0000000000000..3586a929caa4b --- /dev/null +++ b/polkadot/runtime/parachains/src/broadcaster/weights.rs @@ -0,0 +1,44 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `broadcaster` +//! +//! THIS FILE WAS NOT AUTO-GENERATED. PLACEHOLDER WEIGHTS. +//! TODO: Run benchmarks to generate actual weights. + +use frame_support::weights::Weight; + +/// Weight information for broadcaster operations. +pub trait WeightInfo { + fn register_publisher() -> Weight; + fn force_register_publisher() -> Weight; + fn do_cleanup_publisher(k: u32) -> Weight; +} + +/// Placeholder weights (to be replaced with benchmarked values). +impl WeightInfo for () { + fn register_publisher() -> Weight { + Weight::zero() + } + + fn force_register_publisher() -> Weight { + Weight::zero() + } + + fn do_cleanup_publisher(_k: u32) -> Weight { + Weight::zero() + } +} diff --git a/polkadot/runtime/parachains/src/initializer.rs b/polkadot/runtime/parachains/src/initializer.rs index 6ee245fb5230c..99d399f62f19c 100644 --- a/polkadot/runtime/parachains/src/initializer.rs +++ b/polkadot/runtime/parachains/src/initializer.rs @@ -70,6 +70,23 @@ impl OnNewSession for () { fn on_new_session(_: &SessionChangeNotification) {} } +/// Handler for session changes with offboarded parachains. +pub trait OnNewSessionOutgoing { + /// Called when a new session starts with parachains being offboarded. + fn on_new_session_outgoing( + notification: &SessionChangeNotification, + outgoing_paras: &[polkadot_primitives::Id], + ); +} + +impl OnNewSessionOutgoing for () { + fn on_new_session_outgoing( + _: &SessionChangeNotification, + _: &[polkadot_primitives::Id], + ) { + } +} + /// Number of validators (not only parachain) in a session. pub type ValidatorSetCount = u32; @@ -134,6 +151,9 @@ pub mod pallet { /// to disable it on the ones that don't support it. Can be removed and replaced by a simple /// bound to `coretime::Config` once all chains support it. type CoretimeOnNewSession: OnNewSession>; + /// Optional handler for outgoing parachains on new session. + /// Use `()` to disable, or configure a pallet that implements `OnNewSessionOutgoing`. + type OnNewSessionOutgoing: OnNewSessionOutgoing>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } @@ -282,6 +302,7 @@ impl Pallet { T::SlashingHandler::initializer_on_new_session(session_index); dmp::Pallet::::initializer_on_new_session(¬ification, &outgoing_paras); hrmp::Pallet::::initializer_on_new_session(¬ification, &outgoing_paras); + T::OnNewSessionOutgoing::on_new_session_outgoing(¬ification, &outgoing_paras); T::CoretimeOnNewSession::on_new_session(¬ification); } diff --git a/polkadot/runtime/parachains/src/lib.rs b/polkadot/runtime/parachains/src/lib.rs index 1cd534257d7f9..0c7be5b0f6834 100644 --- a/polkadot/runtime/parachains/src/lib.rs +++ b/polkadot/runtime/parachains/src/lib.rs @@ -24,6 +24,7 @@ #![cfg_attr(not(feature = "std"), no_std)] pub mod assigner_coretime; +pub mod broadcaster; pub mod configuration; pub mod coretime; pub mod disputes; diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index cba63ae7b1b04..cc65d33b7802e 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -17,7 +17,7 @@ //! Mocks for all the traits. use crate::{ - assigner_coretime, configuration, coretime, disputes, dmp, hrmp, + assigner_coretime, broadcaster, configuration, coretime, disputes, dmp, hrmp, inclusion::{self, AggregateMessageOrigin, UmpQueueId}, initializer, on_demand, origin, paras, paras::ParaKind, @@ -74,6 +74,7 @@ frame_support::construct_runtime!( Paras: paras, Configuration: configuration, ParasShared: shared, + Broadcaster: broadcaster, ParaInclusion: inclusion, ParaInherent: paras_inherent, Scheduler: scheduler, @@ -192,6 +193,7 @@ impl crate::initializer::Config for Test { type ForceOrigin = frame_system::EnsureRoot; type WeightInfo = (); type CoretimeOnNewSession = Coretime; + type OnNewSessionOutgoing = (); } impl crate::configuration::Config for Test { @@ -215,6 +217,30 @@ impl crate::shared::Config for Test { type DisabledValidators = MockDisabledValidators; } +parameter_types! { + pub const MaxPublishItems: u32 = 10; + pub const MaxValueLength: u32 = 1024; + pub const MaxStoredKeys: u32 = 50; + pub const MaxTotalStorageSize: u32 = 2048; // 2 KiB + pub const MaxPublishers: u32 = 1000; +} + +parameter_types! { + pub const PublisherDeposit: Balance = 100; +} + +impl crate::broadcaster::Config for Test { + type Currency = Balances; + type RuntimeHoldReason = RuntimeHoldReason; + type WeightInfo = (); + type MaxPublishItems = MaxPublishItems; + type MaxValueLength = MaxValueLength; + type MaxStoredKeys = MaxStoredKeys; + type MaxTotalStorageSize = MaxTotalStorageSize; + type MaxPublishers = MaxPublishers; + type PublisherDeposit = PublisherDeposit; +} + impl origin::Config for Test {} parameter_types! { diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index c715980bcb0af..c1494d8a98b89 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1198,6 +1198,7 @@ impl parachains_initializer::Config for Runtime { type ForceOrigin = EnsureRoot; type WeightInfo = weights::polkadot_runtime_parachains_initializer::WeightInfo; type CoretimeOnNewSession = Coretime; + type OnNewSessionOutgoing = (); } impl parachains_disputes::Config for Runtime { diff --git a/polkadot/runtime/rococo/src/weights/xcm/mod.rs b/polkadot/runtime/rococo/src/weights/xcm/mod.rs index 36d818a87445d..ab9f5deed2ba8 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/mod.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/mod.rs @@ -305,6 +305,10 @@ impl XcmWeightInfo for RococoXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } + fn publish(_data: &PublishData) -> Weight { + // Rococo does not currently support Publish operations + Weight::MAX + } } #[test] diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index 87fc99eb32ad7..5b6654438fa62 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -227,6 +227,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = XcmPallet; + type BroadcastHandler = (); } parameter_types! { diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index b4a368c8d8a19..880c5b039e683 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -591,6 +591,7 @@ impl parachains_initializer::Config for Runtime { type ForceOrigin = frame_system::EnsureRoot; type WeightInfo = (); type CoretimeOnNewSession = Coretime; + type OnNewSessionOutgoing = (); } impl parachains_session_info::Config for Runtime { diff --git a/polkadot/runtime/test-runtime/src/xcm_config.rs b/polkadot/runtime/test-runtime/src/xcm_config.rs index 8d7e351d0d5be..4b43918733c91 100644 --- a/polkadot/runtime/test-runtime/src/xcm_config.rs +++ b/polkadot/runtime/test-runtime/src/xcm_config.rs @@ -158,6 +158,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = (); + type BroadcastHandler = (); } impl pallet_xcm::Config for crate::Runtime { diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 13808c1219346..9f027a116949e 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1575,6 +1575,7 @@ impl parachains_initializer::Config for Runtime { type ForceOrigin = EnsureRoot; type WeightInfo = weights::polkadot_runtime_parachains_initializer::WeightInfo; type CoretimeOnNewSession = Coretime; + type OnNewSessionOutgoing = (); } impl paras_sudo_wrapper::Config for Runtime {} diff --git a/polkadot/runtime/westend/src/weights/xcm/mod.rs b/polkadot/runtime/westend/src/weights/xcm/mod.rs index ba4502e228420..dc00bd4c868e6 100644 --- a/polkadot/runtime/westend/src/weights/xcm/mod.rs +++ b/polkadot/runtime/westend/src/weights/xcm/mod.rs @@ -307,6 +307,10 @@ impl XcmWeightInfo for WestendXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } + fn publish(_data: &PublishData) -> Weight { + // Westend does not currently support Publish operations + Weight::MAX + } } #[test] diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index a758d030de7de..67e838c843cb0 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -236,6 +236,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = XcmPallet; + type BroadcastHandler = (); } parameter_types! { diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs index 9e06550b6b724..dc4c14e2f6432 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs @@ -122,6 +122,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = (); + type BroadcastHandler = (); } impl crate::Config for Test { diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index aefbada7429dd..577bfebe7b334 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -961,6 +961,48 @@ mod benchmarks { Ok(()) } + #[benchmark] + fn publish(n: Linear<1, { MaxPublishItems::get() }>) -> Result<(), BenchmarkError> { + use xcm::latest::MaxPublishValueLength; + + let max_value_len = MaxPublishValueLength::get() as usize; + + // Calculate value size to fit within a conservative 2KB total storage budget + const KEY_SIZE: usize = 32; + let conservative_total_storage = 2048usize; + let value_size = ((conservative_total_storage / n.max(1) as usize).saturating_sub(KEY_SIZE)) + .min(max_value_len) + .max(1); + + let data_vec: Vec<_> = (0..n) + .map(|i| { + let mut key = [0u8; KEY_SIZE]; + key[0] = i as u8; + ( + key, + BoundedVec::try_from(vec![i as u8; value_size]).unwrap(), + ) + }) + .collect(); + + let data = BoundedVec::try_from(data_vec).unwrap(); + + let origin = T::publish_origin()?; + T::ensure_publisher_registered(&origin)?; + + let mut executor = new_executor::(origin); + + let instruction = Instruction::Publish { data }; + let xcm = Xcm(vec![instruction]); + + #[block] + { + executor.bench_process(xcm)?; + } + + Ok(()) + } + impl_benchmark_test_suite!( Pallet, crate::generic::mock::new_test_ext(), diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs index 6368ca0e9c3f5..16ecd63c608b5 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs @@ -26,7 +26,7 @@ use sp_runtime::traits::TrailingZeroInput; use xcm_builder::{ test_utils::{ AssetsInHolding, TestAssetExchanger, TestAssetLocker, TestAssetTrap, - TestSubscriptionService, TestUniversalAliases, + TestBroadcastHandler, TestSubscriptionService, TestUniversalAliases, }, AliasForeignAccountId32, AllowUnpaidExecutionFrom, EnsureDecodableXcm, FrameTransactionalProcessor, @@ -112,6 +112,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = (); + type BroadcastHandler = TestBroadcastHandler; } parameter_types! { @@ -193,6 +194,15 @@ impl generic::Config for Test { let target: Location = AccountId32 { network: None, id: [0; 32] }.into(); Ok((origin, target)) } + + fn publish_origin() -> Result { + Ok(Parachain(1000).into()) + } + + fn ensure_publisher_registered(_origin: &Location) -> Result<(), BenchmarkError> { + // No registration needed for tests + Ok(()) + } } #[cfg(feature = "runtime-benchmarks")] diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs index d7471b02368fa..3b8c953cbbae2 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs @@ -108,6 +108,23 @@ pub mod pallet { crate_version: as frame_support::traits::PalletInfoAccess>::crate_version(), } } + + /// Return a valid origin for `Publish` benchmark. + /// + /// Should return a parachain origin that is allowed by the BroadcastHandler filter. + /// If set to `Err`, benchmarks which rely on publish will be skipped. + fn publish_origin() -> Result { + // Avoid having to set on every runtime that does not want to recieve publish. + Err(BenchmarkError::Skip) + } + + /// Ensure the publisher from the given origin is registered. + /// This should register the parachain as a publisher if not already registered. + /// If set to `Err`, benchmarks which rely on publish will be skipped. + fn ensure_publisher_registered(_origin: &Location) -> Result<(), BenchmarkError> { + // Avoid having to set on every runtime that does not want to recieve publish. + Err(BenchmarkError::Skip) + } } #[pallet::pallet] diff --git a/polkadot/xcm/pallet-xcm/src/errors.rs b/polkadot/xcm/pallet-xcm/src/errors.rs index 84e544c08748e..6dccb583b9f67 100644 --- a/polkadot/xcm/pallet-xcm/src/errors.rs +++ b/polkadot/xcm/pallet-xcm/src/errors.rs @@ -136,6 +136,9 @@ pub enum ExecutionError { /// Too many assets matched the given asset filter. #[codec(index = 35)] TooManyAssets, + /// Publishing data failed. + #[codec(index = 36)] + PublishFailed, // Errors that happen prior to instructions being executed. These fall outside of the XCM // spec. /// XCM version not able to be handled. @@ -198,6 +201,7 @@ impl From for ExecutionError { XcmError::Unanchored => Self::Unanchored, XcmError::NotDepositable => Self::NotDepositable, XcmError::TooManyAssets => Self::TooManyAssets, + XcmError::PublishFailed => Self::PublishFailed, XcmError::UnhandledXcmVersion => Self::UnhandledXcmVersion, XcmError::WeightLimitReached(_) => Self::WeightLimitReached, XcmError::Barrier => Self::Barrier, diff --git a/polkadot/xcm/src/v4/mod.rs b/polkadot/xcm/src/v4/mod.rs index 502200e849405..215cc710b5644 100644 --- a/polkadot/xcm/src/v4/mod.rs +++ b/polkadot/xcm/src/v4/mod.rs @@ -1427,7 +1427,8 @@ impl TryFrom> for Instructi InitiateTransfer { .. } | PayFees { .. } | SetHints { .. } | - ExecuteWithOrigin { .. } => { + ExecuteWithOrigin { .. } | + Publish { .. } => { tracing::debug!(target: "xcm::versions::v5tov4", ?new_instruction, "not supported by v4"); return Err(()); }, diff --git a/polkadot/xcm/src/v5/mod.rs b/polkadot/xcm/src/v5/mod.rs index 0caf7d0c581fe..7d77f5a4e487b 100644 --- a/polkadot/xcm/src/v5/mod.rs +++ b/polkadot/xcm/src/v5/mod.rs @@ -186,8 +186,9 @@ pub mod prelude { InstructionError, InstructionIndex, InteriorLocation, Junction::{self, *}, Junctions::{self, Here}, - Location, MaxAssetTransferFilters, MaybeErrorCode, + Location, MaxAssetTransferFilters, MaxPublishItems, MaybeErrorCode, NetworkId::{self, *}, + PublishData, PublishKey, OriginKind, Outcome, PalletInfo, Parent, ParentThen, PreparedMessage, QueryId, QueryResponseInfo, Reanchorable, Response, Result as XcmResult, SendError, SendResult, SendXcm, Weight, @@ -211,8 +212,18 @@ parameter_types! { pub MaxPalletNameLen: u32 = 48; pub MaxPalletsInfo: u32 = 64; pub MaxAssetTransferFilters: u32 = 6; + pub MaxPublishItems: u32 = 10; + pub MaxPublishValueLength: u32 = 1024; } +/// Key type for published data - a 32-byte hash +pub type PublishKey = [u8; 32]; + +pub type PublishData = BoundedVec< + (PublishKey, BoundedVec), + MaxPublishItems, +>; + #[derive( Clone, Eq, PartialEq, Encode, Decode, DecodeWithMemTracking, Debug, TypeInfo, MaxEncodedLen, )] @@ -1139,6 +1150,26 @@ pub enum Instruction { /// - `hints`: A bounded vector of `ExecutionHint`, specifying the different hints that will /// be activated. SetHints { hints: BoundedVec }, + + /// Publish data to the relay chain for other parachains to access. + /// + /// This instruction allows parachains to publish key-value data pairs to the relay chain + /// which are stored in child tries on the relay chain indexed by the publisher's ParaId. + /// + /// - `data`: The key-value pairs to be published, bounded by MaxPublishItems + /// - Keys: 32-byte hashes + /// - Values: Bounded by MaxPublishValueLength + /// + /// Safety: Origin must be a parachain (Sovereign Account). The relay chain will validate + /// the origin and store data in the appropriate child trie. + /// + /// Kind: *Command* + /// + /// Errors: + /// - NoPermission: If origin is not authorized by the configured filter + /// - BadOrigin: If origin is not a valid parachain + /// - PublishFailed: If the underlying handler fails + Publish { data: PublishData }, } #[derive( @@ -1241,6 +1272,7 @@ impl Instruction { InitiateTransfer { destination, remote_fees, preserve_origin, assets, remote_xcm }, ExecuteWithOrigin { descendant_origin, xcm } => ExecuteWithOrigin { descendant_origin, xcm: xcm.into() }, + Publish { data } => Publish { data }, } } } @@ -1316,6 +1348,7 @@ impl> GetWeight for Instruction { W::initiate_transfer(destination, remote_fees, preserve_origin, assets, remote_xcm), ExecuteWithOrigin { descendant_origin, xcm } => W::execute_with_origin(descendant_origin, xcm), + Publish { data } => W::publish(data), } } } diff --git a/polkadot/xcm/src/v5/traits.rs b/polkadot/xcm/src/v5/traits.rs index ecbf46f84d31b..a157c176fa587 100644 --- a/polkadot/xcm/src/v5/traits.rs +++ b/polkadot/xcm/src/v5/traits.rs @@ -154,6 +154,9 @@ pub enum Error { /// Too many assets matched the given asset filter. #[codec(index = 35)] TooManyAssets, + /// Publishing data failed. + #[codec(index = 36)] + PublishFailed, // Errors that happen prior to instructions being executed. These fall outside of the XCM // spec. diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index 32f8f595031ef..15a395d7170d4 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -31,6 +31,8 @@ xcm-executor = { workspace = true } # Polkadot dependencies polkadot-parachain-primitives = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-runtime-parachains = { workspace = true } [dev-dependencies] pallet-assets = { workspace = true, default-features = true } @@ -71,6 +73,8 @@ std = [ "pallet-asset-conversion/std", "pallet-transaction-payment/std", "polkadot-parachain-primitives/std", + "polkadot-primitives/std", + "polkadot-runtime-parachains/std", "primitive-types/std", "scale-info/std", "sp-arithmetic/std", diff --git a/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs b/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs index 30136b004a480..2eaae0dcee4a2 100644 --- a/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs +++ b/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs @@ -250,6 +250,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); + type BroadcastHandler = (); type XcmRecorder = (); } diff --git a/polkadot/xcm/xcm-builder/src/broadcast_adapter.rs b/polkadot/xcm/xcm-builder/src/broadcast_adapter.rs new file mode 100644 index 0000000000000..4e2c4d50397a6 --- /dev/null +++ b/polkadot/xcm/xcm-builder/src/broadcast_adapter.rs @@ -0,0 +1,176 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Adapters for broadcast/publish operations in XCM. + +use alloc::vec::Vec; +use core::marker::PhantomData; +use frame_support::traits::Contains; +use polkadot_primitives::Id as ParaId; +use polkadot_runtime_parachains::broadcaster::Publish; +use xcm::latest::prelude::XcmError; +use xcm::latest::{Junction, Location, PublishData, Result as XcmResult}; +use xcm_executor::traits::BroadcastHandler; + +/// Configurable broadcast adapter that validates parachain origins. +pub struct ParachainBroadcastAdapter(PhantomData<(Filter, Handler)>); + +impl BroadcastHandler for ParachainBroadcastAdapter +where + Filter: Contains, + Handler: Publish, +{ + fn handle_publish(origin: &Location, data: PublishData) -> XcmResult { + // Check if origin is authorized to publish + if !Filter::contains(origin) { + return Err(XcmError::NoPermission); + } + + // Extract parachain ID from authorized origin + let para_id = match origin.unpack() { + (0, [Junction::Parachain(id)]) => ParaId::from(*id), // Direct parachain + (1, [Junction::Parachain(id), ..]) => ParaId::from(*id), // Sibling parachain + _ => return Err(XcmError::BadOrigin), // Should be caught by filter + }; + + // Call the actual handler + let data_vec: Vec<([u8; 32], Vec)> = data + .into_inner() + .into_iter() + .map(|(k, v)| (k, v.into_inner())) + .collect(); + Handler::publish_data(para_id, data_vec).map_err(|_| XcmError::PublishFailed) + } +} + +/// Allows only direct parachains (parents=0, interior=[Parachain(_)]). +pub struct OnlyParachains; +impl Contains for OnlyParachains { + fn contains(origin: &Location) -> bool { + matches!(origin.unpack(), (0, [Junction::Parachain(_)])) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use frame_support::parameter_types; + use polkadot_runtime_parachains::broadcaster::Publish; + use sp_runtime::BoundedVec; + use xcm::latest::prelude::XcmError; + use xcm::latest::{ + Junction, Location, MaxPublishValueLength, PublishData, PublishKey, + }; + + // Mock handler that tracks calls + parameter_types! { + pub static PublishCalls: Vec<(ParaId, Vec<(PublishKey, Vec)>)> = vec![]; + } + + // Helper to create test publish data + fn test_publish_data(items: Vec<([u8; 32], &[u8])>) -> PublishData { + items + .into_iter() + .map(|(k, v)| { + ( + k, + BoundedVec::::try_from(v.to_vec()).unwrap(), + ) + }) + .collect::>() + .try_into() + .unwrap() + } + + struct MockPublishHandler; + impl Publish for MockPublishHandler { + fn publish_data( + publisher: ParaId, + data: Vec<([u8; 32], Vec)>, + ) -> Result<(), sp_runtime::DispatchError> { + let mut calls = PublishCalls::get(); + calls.push((publisher, data)); + PublishCalls::set(calls); + Ok(()) + } + } + + #[test] + fn publish_from_direct_parachain_works() { + PublishCalls::set(vec![]); + let origin = Location::new(0, [Junction::Parachain(1000)]); + let key1 = [1u8; 32]; + let data = test_publish_data(vec![(key1, b"value1")]); + + let result = ParachainBroadcastAdapter::::handle_publish( + &origin, + data.clone(), + ); + + assert!(result.is_ok()); + let calls = PublishCalls::get(); + assert_eq!(calls.len(), 1); + assert_eq!(calls[0].0, ParaId::from(1000)); + assert_eq!(calls[0].1, vec![(key1, b"value1".to_vec())]); + } + + #[test] + fn publish_from_sibling_parachain_fails() { + PublishCalls::set(vec![]); + let origin = Location::new( + 1, + [Junction::Parachain(2000), Junction::AccountId32 { network: None, id: [1; 32] }], + ); + let key1 = [2u8; 32]; + let data = test_publish_data(vec![(key1, b"value1")]); + + let result = ParachainBroadcastAdapter::::handle_publish( + &origin, + data.clone(), + ); + + assert!(matches!(result, Err(XcmError::NoPermission))); + assert!(PublishCalls::get().is_empty()); + } + + #[test] + fn publish_from_non_parachain_fails() { + PublishCalls::set(vec![]); + let origin = Location::here(); + let key1 = [3u8; 32]; + let data = test_publish_data(vec![(key1, b"value1")]); + + let result = + ParachainBroadcastAdapter::::handle_publish( + &origin, data, + ); + + assert!(matches!(result, Err(XcmError::NoPermission))); + assert!(PublishCalls::get().is_empty()); + } + + #[test] + fn only_parachains_filter_works() { + // Direct parachain allowed + assert!(OnlyParachains::contains(&Location::new(0, [Junction::Parachain(1000)]))); + + // Sibling parachain not allowed + assert!(!OnlyParachains::contains(&Location::new(1, [Junction::Parachain(1000)]))); + + // Root not allowed + assert!(!OnlyParachains::contains(&Location::here())); + } +} diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index 83fb34bd6569f..d01628f4ea20d 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -49,6 +49,9 @@ pub use barriers::{ TakeWeightCredit, TrailingSetTopicAsId, WithComputedOrigin, }; +mod broadcast_adapter; +pub use broadcast_adapter::{OnlyParachains, ParachainBroadcastAdapter}; + mod controller; pub use controller::{ Controller, ExecuteController, ExecuteControllerWeightInfo, QueryController, diff --git a/polkadot/xcm/xcm-builder/src/test_utils.rs b/polkadot/xcm/xcm-builder/src/test_utils.rs index 90afb2c9a3d3e..ec5b78b8dc2ec 100644 --- a/polkadot/xcm/xcm-builder/src/test_utils.rs +++ b/polkadot/xcm/xcm-builder/src/test_utils.rs @@ -16,13 +16,13 @@ // Shared test utilities and implementations for the XCM Builder. -use alloc::vec::Vec; +use alloc::{collections::BTreeMap, vec::Vec}; use frame_support::{ parameter_types, traits::{Contains, CrateVersion, PalletInfoData, PalletsInfoAccess}, }; pub use xcm::latest::{prelude::*, Weight}; -use xcm_executor::traits::{ClaimAssets, DropAssets, VersionChangeNotifier}; +use xcm_executor::traits::{BroadcastHandler, ClaimAssets, DropAssets, VersionChangeNotifier}; pub use xcm_executor::{ traits::{ AssetExchange, AssetLock, ConvertOrigin, Enact, LockError, OnResponse, TransactAsset, @@ -33,6 +33,8 @@ pub use xcm_executor::{ parameter_types! { pub static SubscriptionRequests: Vec<(Location, Option<(QueryId, Weight)>)> = vec![]; pub static MaxAssetsIntoHolding: u32 = 4; + // Maps ParaId => Vec<(key, value)> + pub static PublishedData: BTreeMap)>> = BTreeMap::new(); } pub struct TestSubscriptionService; @@ -62,6 +64,32 @@ impl VersionChangeNotifier for TestSubscriptionService { } } +pub struct TestBroadcastHandler; + +impl BroadcastHandler for TestBroadcastHandler { + fn handle_publish(origin: &Location, data: PublishData) -> XcmResult { + // Extract para_id from origin + let para_id = match origin.unpack() { + (0, [Parachain(id)]) => *id, + (1, [Parachain(id), ..]) => *id, + _ => return Err(XcmError::BadOrigin), + }; + + let mut published = PublishedData::get(); + let data_vec: Vec<([u8; 32], Vec)> = data + .into_inner() + .into_iter() + .map(|(k, v)| (k, v.into_inner())) + .collect(); + + // Merge with existing data for this parachain + published.entry(para_id).or_insert_with(Vec::new).extend(data_vec); + PublishedData::set(published); + + Ok(()) + } +} + parameter_types! { pub static TrappedAssets: Vec<(Location, Assets)> = vec![]; } diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs index b932aaee6fcf8..75758cca65690 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs @@ -771,6 +771,7 @@ impl Config for TestConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = (); + type BroadcastHandler = TestBroadcastHandler; } pub fn fungible_multi_asset(location: Location, amount: u128) -> Asset { diff --git a/polkadot/xcm/xcm-builder/src/tests/mod.rs b/polkadot/xcm/xcm-builder/src/tests/mod.rs index 379baaf5e3767..f314d41d9ec65 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mod.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mod.rs @@ -35,6 +35,7 @@ mod expecting; mod locking; mod origins; mod pay; +mod publish; mod querying; mod routing; mod transacting; diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs index d8f8e15f5eb05..6e231a5baf2d6 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs @@ -246,6 +246,7 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = (); + type BroadcastHandler = (); type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); diff --git a/polkadot/xcm/xcm-builder/src/tests/publish.rs b/polkadot/xcm/xcm-builder/src/tests/publish.rs new file mode 100644 index 0000000000000..3d0933a6f3f03 --- /dev/null +++ b/polkadot/xcm/xcm-builder/src/tests/publish.rs @@ -0,0 +1,158 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tests for Publish XCM instruction. + +use super::*; +use crate::test_utils::PublishedData; +use sp_runtime::BoundedVec; +use xcm::latest::{MaxPublishValueLength, PublishKey}; + +// Helper to create test publish data +fn test_publish_data(items: Vec<(PublishKey, &[u8])>) -> PublishData { + items + .into_iter() + .map(|(k, v)| { + ( + k, + BoundedVec::::try_from(v.to_vec()).unwrap(), + ) + }) + .collect::>() + .try_into() + .unwrap() +} + +#[test] +fn publish_from_parachain_works() { + // Allow unpaid execution from Parachain(1000) + AllowUnpaidFrom::set(vec![Parachain(1000).into()]); + + let key1 = [1u8; 32]; + let data = test_publish_data(vec![(key1, b"value1")]); + + let message = Xcm::(vec![Publish { data: data.clone() }]); + let mut hash = fake_message_hash(&message); + let weight_limit = Weight::from_parts(10, 10); + + let r = XcmExecutor::::prepare_and_execute( + Parachain(1000), + message, + &mut hash, + weight_limit, + Weight::zero(), + ); + + assert_eq!(r, Outcome::Complete { used: Weight::from_parts(10, 10) }); + + // Verify data was published + let published = PublishedData::get(); + assert_eq!(published.get(&1000).unwrap().len(), 1); + assert_eq!(published.get(&1000).unwrap()[0], (key1, b"value1".to_vec())); +} + +#[test] +fn publish_from_non_parachain_fails() { + // Allow unpaid execution from Parent to test that origin validation happens + AllowUnpaidFrom::set(vec![Parent.into()]); + + let key1 = [2u8; 32]; + let data = test_publish_data(vec![(key1, b"value1")]); + + let message = Xcm::(vec![Publish { data }]); + let mut hash = fake_message_hash(&message); + let weight_limit = Weight::from_parts(10, 10); + + // Try from Parent (not a parachain) + let r = XcmExecutor::::prepare_and_execute( + Parent, + message, + &mut hash, + weight_limit, + Weight::zero(), + ); + + assert_eq!( + r, + Outcome::Incomplete { + used: Weight::from_parts(10, 10), + error: InstructionError { index: 0, error: XcmError::BadOrigin }, + } + ); +} + +#[test] +fn publish_without_origin_fails() { + // Allow unpaid execution from Parachain(1000) + AllowUnpaidFrom::set(vec![Parachain(1000).into()]); + + let key1 = [4u8; 32]; + let data = test_publish_data(vec![(key1, b"value1")]); + + let message = Xcm::(vec![ClearOrigin, Publish { data }]); + let mut hash = fake_message_hash(&message); + let weight_limit = Weight::from_parts(20, 20); + + let r = XcmExecutor::::prepare_and_execute( + Parachain(1000), + message, + &mut hash, + weight_limit, + Weight::zero(), + ); + + assert_eq!( + r, + Outcome::Incomplete { + used: Weight::from_parts(20, 20), + error: InstructionError { index: 1, error: XcmError::BadOrigin }, + } + ); +} + +#[test] +fn publish_multiple_items_works() { + // Allow unpaid execution from Parachain(1000) + AllowUnpaidFrom::set(vec![Parachain(1000).into()]); + + let key1 = [5u8; 32]; + let key2 = [6u8; 32]; + let data = test_publish_data(vec![ + (key1, b"value1"), + (key2, b"value2"), + ]); + + let message = Xcm::(vec![Publish { data: data.clone() }]); + let mut hash = fake_message_hash(&message); + let weight_limit = Weight::from_parts(10, 10); + + let r = XcmExecutor::::prepare_and_execute( + Parachain(1000), + message, + &mut hash, + weight_limit, + Weight::zero(), + ); + + assert_eq!(r, Outcome::Complete { used: Weight::from_parts(10, 10) }); + + // Verify all data was published + let published = PublishedData::get(); + let para_data = published.get(&1000).unwrap(); + assert_eq!(para_data.len(), 2); + assert!(para_data.contains(&(key1, b"value1".to_vec()))); + assert!(para_data.contains(&(key2, b"value2".to_vec()))); +} diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs index 7a2eb8cc55adf..f22bce759a70e 100644 --- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs +++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs @@ -192,6 +192,7 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = (); + type BroadcastHandler = (); type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); diff --git a/polkadot/xcm/xcm-builder/tests/scenarios.rs b/polkadot/xcm/xcm-builder/tests/scenarios.rs index c772a49fc8226..4246e4ca848da 100644 --- a/polkadot/xcm/xcm-builder/tests/scenarios.rs +++ b/polkadot/xcm/xcm-builder/tests/scenarios.rs @@ -399,6 +399,7 @@ fn recursive_xcm_execution_fail() { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = (); + type BroadcastHandler = (); type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); diff --git a/polkadot/xcm/xcm-executor/src/config.rs b/polkadot/xcm/xcm-executor/src/config.rs index 60a5ed63f32ee..a3cc889c8edeb 100644 --- a/polkadot/xcm/xcm-executor/src/config.rs +++ b/polkadot/xcm/xcm-executor/src/config.rs @@ -15,10 +15,10 @@ // along with Polkadot. If not, see . use crate::traits::{ - AssetExchange, AssetLock, CallDispatcher, ClaimAssets, ConvertOrigin, DropAssets, EventEmitter, - ExportXcm, FeeManager, HandleHrmpChannelAccepted, HandleHrmpChannelClosing, - HandleHrmpNewChannelOpenRequest, OnResponse, ProcessTransaction, RecordXcm, ShouldExecute, - TransactAsset, VersionChangeNotifier, WeightBounds, WeightTrader, + AssetExchange, AssetLock, BroadcastHandler, CallDispatcher, ClaimAssets, ConvertOrigin, + DropAssets, EventEmitter, ExportXcm, FeeManager, HandleHrmpChannelAccepted, + HandleHrmpChannelClosing, HandleHrmpNewChannelOpenRequest, OnResponse, ProcessTransaction, + RecordXcm, ShouldExecute, TransactAsset, VersionChangeNotifier, WeightBounds, WeightTrader, }; use frame_support::{ dispatch::{GetDispatchInfo, Parameter, PostDispatchInfo}, @@ -134,4 +134,6 @@ pub trait Config { type HrmpChannelClosingHandler: HandleHrmpChannelClosing; /// Allows recording the last executed XCM (used by dry-run runtime APIs). type XcmRecorder: RecordXcm; + /// Handler for publish operations on the relay chain. + type BroadcastHandler: BroadcastHandler; } diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index 1c569225ce2b6..3e8778e095038 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -33,11 +33,11 @@ use xcm::latest::{prelude::*, AssetTransferFilter}; pub mod traits; use traits::{ - validate_export, AssetExchange, AssetLock, CallDispatcher, ClaimAssets, ConvertOrigin, - DropAssets, Enact, EventEmitter, ExportXcm, FeeManager, FeeReason, HandleHrmpChannelAccepted, - HandleHrmpChannelClosing, HandleHrmpNewChannelOpenRequest, OnResponse, ProcessTransaction, - Properties, ShouldExecute, TransactAsset, VersionChangeNotifier, WeightBounds, WeightTrader, - XcmAssetTransfers, + validate_export, AssetExchange, AssetLock, BroadcastHandler, CallDispatcher, ClaimAssets, + ConvertOrigin, DropAssets, Enact, EventEmitter, ExportXcm, FeeManager, FeeReason, + HandleHrmpChannelAccepted, HandleHrmpChannelClosing, HandleHrmpNewChannelOpenRequest, + OnResponse, ProcessTransaction, Properties, ShouldExecute, TransactAsset, + VersionChangeNotifier, WeightBounds, WeightTrader, XcmAssetTransfers, }; pub use traits::RecordXcm; @@ -1819,6 +1819,11 @@ impl XcmExecutor { Config::TransactionalProcessor::process(|| { Config::HrmpChannelClosingHandler::handle(initiator, sender, recipient) }), + Publish { data } => { + let origin = self.origin_ref().ok_or(XcmError::BadOrigin)?; + Config::BroadcastHandler::handle_publish(origin, data)?; + Ok(()) + }, } } diff --git a/polkadot/xcm/xcm-executor/src/tests/mock.rs b/polkadot/xcm/xcm-executor/src/tests/mock.rs index 850629bef8c06..ef8789f8b1713 100644 --- a/polkadot/xcm/xcm-executor/src/tests/mock.rs +++ b/polkadot/xcm/xcm-executor/src/tests/mock.rs @@ -328,4 +328,5 @@ impl Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = (); + type BroadcastHandler = (); } diff --git a/polkadot/xcm/xcm-executor/src/traits/broadcast_handler.rs b/polkadot/xcm/xcm-executor/src/traits/broadcast_handler.rs new file mode 100644 index 0000000000000..0a2aab7c7ae0e --- /dev/null +++ b/polkadot/xcm/xcm-executor/src/traits/broadcast_handler.rs @@ -0,0 +1,34 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Traits for handling publish operations in XCM. + +use xcm::latest::{Location, PublishData, Result as XcmResult}; + +/// Trait for handling publish operations on the relay chain. +pub trait BroadcastHandler { + /// Handle publish operation from the given origin. + /// Should validate origin authorization and extract necessary data. + fn handle_publish(origin: &Location, data: PublishData) -> XcmResult; +} + +/// Implementation of `BroadcastHandler` for the unit type `()`. +impl BroadcastHandler for () { + fn handle_publish(_origin: &Location, _data: PublishData) -> XcmResult { + // No-op implementation for unit type + Ok(()) + } +} diff --git a/polkadot/xcm/xcm-executor/src/traits/mod.rs b/polkadot/xcm/xcm-executor/src/traits/mod.rs index 038de83e3fa37..af034e1918859 100644 --- a/polkadot/xcm/xcm-executor/src/traits/mod.rs +++ b/polkadot/xcm/xcm-executor/src/traits/mod.rs @@ -50,6 +50,8 @@ mod hrmp; pub use hrmp::{ HandleHrmpChannelAccepted, HandleHrmpChannelClosing, HandleHrmpNewChannelOpenRequest, }; +mod broadcast_handler; +pub use broadcast_handler::BroadcastHandler; mod event_emitter; mod record_xcm; mod weight;