From d12996837cc832c4feed3687bb1b98c272c05c32 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 5 Jun 2024 15:50:33 +0200 Subject: [PATCH 01/52] Rococo AH: cleanup storage (#4444) Follow up on #4414 to clean up the old storage. --------- Signed-off-by: Oliver Tale-Yazdi --- .../runtimes/assets/asset-hub-rococo/src/lib.rs | 5 +++++ prdoc/pr_4444.prdoc | 10 ++++++++++ 2 files changed, 15 insertions(+) create mode 100644 prdoc/pr_4444.prdoc diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 1fc67ba0c305..2bf09e6a7843 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -992,10 +992,15 @@ pub type Migrations = ( cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, pallet_collator_selection::migration::v2::MigrationToV2, + frame_support::migrations::RemovePallet, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); +parameter_types! { + pub const StateTrieMigrationName: &'static str = "StateTrieMigration"; +} + /// Migration to initialize storage versions for pallets added after genesis. /// /// This is now done automatically (see ), diff --git a/prdoc/pr_4444.prdoc b/prdoc/pr_4444.prdoc new file mode 100644 index 000000000000..0b6a5715e47f --- /dev/null +++ b/prdoc/pr_4444.prdoc @@ -0,0 +1,10 @@ +title: "Rococo AH: cleanup storage" + +doc: + - audience: Runtime Dev + description: | + Remove old storage that is left over in the Rococo AH storage. + +crates: + - name: asset-hub-rococo-runtime + bump: patch From 0d661eac46a02f3fd7478bb703fe1dce8091891b Mon Sep 17 00:00:00 2001 From: ordian Date: Wed, 5 Jun 2024 15:52:54 +0200 Subject: [PATCH 02/52] statement-distribution: prep for re-enabling (#4431) In preparation for launching re-enabling (https://github.com/paritytech/polkadot-sdk/issues/2418), we need to adjust the disabling strategy of statement-distribution to use the relay parent's state instead of the latest state (union of active leaves). This will also ensure no raciness of getting the latest state vs accepting statements from disabling validators at the cost of being more lenient/potentially accepting more statements from disabled validators. - [x] PRDoc --- .../statement-distribution/src/v2/cluster.rs | 7 - .../statement-distribution/src/v2/mod.rs | 147 +++--- .../src/v2/tests/mod.rs | 2 +- .../src/v2/tests/requests.rs | 455 +++--------------- .../node/backing/statement-distribution.md | 26 +- prdoc/pr_4431.prdoc | 17 + 6 files changed, 144 insertions(+), 510 deletions(-) create mode 100644 prdoc/pr_4431.prdoc diff --git a/polkadot/node/network/statement-distribution/src/v2/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/cluster.rs index c3f45314b246..87b25c785d83 100644 --- a/polkadot/node/network/statement-distribution/src/v2/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/cluster.rs @@ -60,13 +60,6 @@ use polkadot_primitives::{CandidateHash, CompactStatement, Hash, ValidatorIndex} use crate::LOG_TARGET; use std::collections::{HashMap, HashSet}; -#[derive(Hash, PartialEq, Eq)] -struct ValidStatementManifest { - remote: ValidatorIndex, - originator: ValidatorIndex, - candidate_hash: CandidateHash, -} - // A piece of knowledge about a candidate #[derive(Hash, Clone, PartialEq, Eq)] enum Knowledge { diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 961ec45bdada..73416b193bbe 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -68,7 +68,7 @@ use futures::{ use std::{ collections::{ hash_map::{Entry, HashMap}, - BTreeSet, HashSet, + HashSet, }, time::{Duration, Instant}, }; @@ -156,6 +156,7 @@ struct PerRelayParentState { seconding_limit: usize, session: SessionIndex, groups_per_para: HashMap>, + disabled_validators: HashSet, } impl PerRelayParentState { @@ -166,6 +167,17 @@ impl PerRelayParentState { fn active_validator_state_mut(&mut self) -> Option<&mut ActiveValidatorState> { self.local_validator.as_mut().and_then(|local| local.active.as_mut()) } + + /// Returns `true` if the given validator is disabled in the context of the relay parent. + pub fn is_disabled(&self, validator_index: &ValidatorIndex) -> bool { + self.disabled_validators.contains(validator_index) + } + + /// A convenience function to generate a disabled bitmask for the given backing group. + /// The output bits are set to `true` for validators that are disabled. + pub fn disabled_bitmask(&self, group: &[ValidatorIndex]) -> BitVec { + BitVec::from_iter(group.iter().map(|v| self.is_disabled(v))) + } } // per-relay-parent local validator state. @@ -206,8 +218,6 @@ struct PerSessionState { // getting the topology from the gossip-support subsystem grid_view: Option, local_validator: Option, - // We store the latest state here based on union of leaves. - disabled_validators: BTreeSet, } impl PerSessionState { @@ -224,16 +234,7 @@ impl PerSessionState { ) .map(|(_, index)| LocalValidatorIndex::Active(index)); - let disabled_validators = BTreeSet::new(); - - PerSessionState { - session_info, - groups, - authority_lookup, - grid_view: None, - local_validator, - disabled_validators, - } + PerSessionState { session_info, groups, authority_lookup, grid_view: None, local_validator } } fn supply_topology( @@ -269,33 +270,6 @@ impl PerSessionState { fn is_not_validator(&self) -> bool { self.grid_view.is_some() && self.local_validator.is_none() } - - /// A convenience function to generate a disabled bitmask for the given backing group. - /// The output bits are set to `true` for validators that are disabled. - /// Returns `None` if the group index is out of bounds. - pub fn disabled_bitmask(&self, group: GroupIndex) -> Option> { - let group = self.groups.get(group)?; - let mask = BitVec::from_iter(group.iter().map(|v| self.is_disabled(v))); - Some(mask) - } - - /// Returns `true` if the given validator is disabled in the current session. - pub fn is_disabled(&self, validator_index: &ValidatorIndex) -> bool { - self.disabled_validators.contains(validator_index) - } - - /// Extend the list of disabled validators. - pub fn extend_disabled_validators( - &mut self, - disabled: impl IntoIterator, - ) { - self.disabled_validators.extend(disabled); - } - - /// Clear the list of disabled validators. - pub fn clear_disabled_validators(&mut self) { - self.disabled_validators.clear(); - } } pub(crate) struct State { @@ -582,19 +556,16 @@ pub(crate) async fn handle_active_leaves_update( let new_relay_parents = state.implicit_view.all_allowed_relay_parents().cloned().collect::>(); - // We clear the list of disabled validators to reset it properly based on union of leaves. - let mut cleared_disabled_validators: BTreeSet = BTreeSet::new(); - for new_relay_parent in new_relay_parents.iter().cloned() { - // Even if we processed this relay parent before, we need to fetch the list of disabled - // validators based on union of active leaves. - let disabled_validators = + let disabled_validators: HashSet<_> = polkadot_node_subsystem_util::vstaging::get_disabled_validators_with_fallback( ctx.sender(), new_relay_parent, ) .await - .map_err(JfyiError::FetchDisabledValidators)?; + .map_err(JfyiError::FetchDisabledValidators)? + .into_iter() + .collect(); let session_index = polkadot_node_subsystem_util::request_session_index_for_child( new_relay_parent, @@ -644,10 +615,6 @@ pub(crate) async fn handle_active_leaves_update( .get_mut(&session_index) .expect("either existed or just inserted; qed"); - if cleared_disabled_validators.insert(session_index) { - per_session.clear_disabled_validators(); - } - if !disabled_validators.is_empty() { gum::debug!( target: LOG_TARGET, @@ -656,8 +623,6 @@ pub(crate) async fn handle_active_leaves_update( ?disabled_validators, "Disabled validators detected" ); - - per_session.extend_disabled_validators(disabled_validators); } if state.per_relay_parent.contains_key(&new_relay_parent) { @@ -723,6 +688,7 @@ pub(crate) async fn handle_active_leaves_update( seconding_limit, session: session_index, groups_per_para, + disabled_validators, }, ); } @@ -1581,6 +1547,17 @@ async fn handle_incoming_statement( }; let session_info = &per_session.session_info; + if per_relay_parent.is_disabled(&statement.unchecked_validator_index()) { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + validator_index = ?statement.unchecked_validator_index(), + "Ignoring a statement from disabled validator." + ); + modify_reputation(reputation, ctx.sender(), peer, COST_DISABLED_VALIDATOR).await; + return + } + let local_validator = match per_relay_parent.local_validator.as_mut() { None => { // we shouldn't be receiving statements unless we're a validator @@ -1614,17 +1591,6 @@ async fn handle_incoming_statement( }, }; - if per_session.is_disabled(&statement.unchecked_validator_index()) { - gum::debug!( - target: LOG_TARGET, - ?relay_parent, - validator_index = ?statement.unchecked_validator_index(), - "Ignoring a statement from disabled validator." - ); - modify_reputation(reputation, ctx.sender(), peer, COST_DISABLED_VALIDATOR).await; - return - } - let (active, cluster_sender_index) = { // This block of code only returns `Some` when both the originator and // the sending peer are in the cluster. @@ -2379,21 +2345,18 @@ async fn handle_incoming_manifest_common<'a, Context>( Some(s) => s, }; - let local_validator = match relay_parent_state.local_validator.as_mut() { - None => { - if per_session.is_not_validator() { - modify_reputation( - reputation, - ctx.sender(), - peer, - COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE, - ) - .await; - } - return None - }, - Some(x) => x, - }; + if relay_parent_state.local_validator.is_none() { + if per_session.is_not_validator() { + modify_reputation( + reputation, + ctx.sender(), + peer, + COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE, + ) + .await; + } + return None + } let Some(expected_groups) = relay_parent_state.groups_per_para.get(¶_id) else { modify_reputation(reputation, ctx.sender(), peer, COST_MALFORMED_MANIFEST).await; @@ -2436,10 +2399,13 @@ async fn handle_incoming_manifest_common<'a, Context>( let claimed_parent_hash = manifest_summary.claimed_parent_hash; // Ignore votes from disabled validators when counting towards the threshold. - let disabled_mask = per_session.disabled_bitmask(group_index).unwrap_or_default(); + let group = per_session.groups.get(group_index).unwrap_or(&[]); + let disabled_mask = relay_parent_state.disabled_bitmask(group); manifest_summary.statement_knowledge.mask_seconded(&disabled_mask); manifest_summary.statement_knowledge.mask_valid(&disabled_mask); + let local_validator = relay_parent_state.local_validator.as_mut().expect("checked above; qed"); + let acknowledge = match local_validator.grid_tracker.import_manifest( grid_topology, &per_session.groups, @@ -3018,9 +2984,7 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St } // Add disabled validators to the unwanted mask. - let disabled_mask = per_session - .disabled_bitmask(group_index) - .expect("group existence checked above; qed"); + let disabled_mask = relay_parent_state.disabled_bitmask(group); unwanted_mask.seconded_in_group |= &disabled_mask; unwanted_mask.validated_in_group |= &disabled_mask; @@ -3111,9 +3075,7 @@ pub(crate) async fn handle_response( Some(g) => g, }; - let disabled_mask = per_session - .disabled_bitmask(group_index) - .expect("group_index checked above; qed"); + let disabled_mask = relay_parent_state.disabled_bitmask(group); let res = response.validate_response( &mut state.request_manager, @@ -3258,7 +3220,7 @@ pub(crate) fn answer_request(state: &mut State, message: ResponderMessage) { Some(s) => s, }; - let local_validator = match relay_parent_state.local_validator.as_mut() { + let local_validator = match relay_parent_state.local_validator.as_ref() { None => return, Some(s) => s, }; @@ -3332,16 +3294,15 @@ pub(crate) fn answer_request(state: &mut State, message: ResponderMessage) { // Transform mask with 'OR' semantics into one with 'AND' semantics for the API used // below. - let mut and_mask = StatementFilter { + let and_mask = StatementFilter { seconded_in_group: !mask.seconded_in_group.clone(), validated_in_group: !mask.validated_in_group.clone(), }; - // Ignore disabled validators from the latest state when sending the response. - let disabled_mask = - per_session.disabled_bitmask(group_index).expect("group existence checked; qed"); - and_mask.mask_seconded(&disabled_mask); - and_mask.mask_valid(&disabled_mask); + let local_validator = match relay_parent_state.local_validator.as_mut() { + None => return, + Some(s) => s, + }; let mut sent_filter = StatementFilter::blank(group_size); let statements: Vec<_> = relay_parent_state diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index f9a484f47a94..078d556391a3 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -422,7 +422,7 @@ struct TestLeaf { parent_hash: Hash, session: SessionIndex, availability_cores: Vec, - disabled_validators: Vec, + pub disabled_validators: Vec, para_data: Vec<(ParaId, PerParaData)>, minimum_backing_votes: u32, } diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index 38d7a10b8652..4fdfda0dba24 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -22,9 +22,8 @@ use polkadot_node_network_protocol::{ request_response::v2 as request_v2, v2::BackedCandidateManifest, }; use polkadot_primitives_test_helpers::make_candidate; -use sc_network::{ - config::{IncomingRequest as RawIncomingRequest, OutgoingResponse as RawOutgoingResponse}, - ProtocolName, +use sc_network::config::{ + IncomingRequest as RawIncomingRequest, OutgoingResponse as RawOutgoingResponse, }; #[test] @@ -1222,392 +1221,8 @@ fn disabled_validators_added_to_unwanted_mask() { }); } -// We send a request to a peer and after receiving the response -// we learn about a validator being disabled. We should filter out -// the statement from the disabled validator when receiving it. #[test] -fn when_validator_disabled_after_sending_the_request() { - let group_size = 3; - let config = TestConfig { - validator_count: 20, - group_size, - local_validator: LocalRole::Validator, - async_backing_params: None, - }; - - let relay_parent = Hash::repeat_byte(1); - let another_relay_parent = Hash::repeat_byte(2); - let peer_disabled_later = PeerId::random(); - let peer_b = PeerId::random(); - - test_harness(config, |state, mut overseer| async move { - let local_validator = state.local.clone().unwrap(); - let local_group_index = local_validator.group_index.unwrap(); - let local_para = ParaId::from(local_group_index.0); - let other_group_validators = state.group_validators(local_group_index, true); - let index_disabled = other_group_validators[0]; - let index_b = other_group_validators[1]; - - let test_leaf = state.make_dummy_leaf_with_disabled_validators(relay_parent, vec![]); - let test_leaf_disabled = state - .make_dummy_leaf_with_disabled_validators(another_relay_parent, vec![index_disabled]); - - let (candidate, pvd) = make_candidate( - relay_parent, - 1, - local_para, - test_leaf.para_data(local_para).head_data.clone(), - vec![4, 5, 6].into(), - Hash::repeat_byte(42).into(), - ); - let candidate_hash = candidate.hash(); - - // peer A is in group, has relay parent in view and disabled later. - // peer B is in group, has relay parent in view. - { - connect_peer( - &mut overseer, - peer_disabled_later.clone(), - Some(vec![state.discovery_id(index_disabled)].into_iter().collect()), - ) - .await; - connect_peer( - &mut overseer, - peer_b.clone(), - Some(vec![state.discovery_id(index_b)].into_iter().collect()), - ) - .await; - send_peer_view_change(&mut overseer, peer_disabled_later.clone(), view![relay_parent]) - .await; - send_peer_view_change(&mut overseer, peer_b.clone(), view![relay_parent]).await; - } - - activate_leaf(&mut overseer, &test_leaf, &state, true, vec![]).await; - - let seconded_disabled = state - .sign_statement( - index_disabled, - CompactStatement::Seconded(candidate_hash), - &SigningContext { parent_hash: relay_parent, session_index: 1 }, - ) - .as_unchecked() - .clone(); - - let seconded_b = state - .sign_statement( - index_b, - CompactStatement::Seconded(candidate_hash), - &SigningContext { parent_hash: relay_parent, session_index: 1 }, - ) - .as_unchecked() - .clone(); - { - send_peer_message( - &mut overseer, - peer_b.clone(), - protocol_v2::StatementDistributionMessage::Statement( - relay_parent, - seconded_b.clone(), - ), - ) - .await; - - assert_matches!( - overseer.recv().await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) - if p == peer_b && r == BENEFIT_VALID_STATEMENT_FIRST.into() => { } - ); - } - - // Send a request to peer and activate leaf when a validator is disabled; - // mock the response with a statement from disabled validator. - { - let statements = vec![seconded_disabled]; - let mask = StatementFilter::blank(group_size); - - assert_matches!( - overseer.recv().await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests(mut requests, IfDisconnected::ImmediateError)) => { - assert_eq!(requests.len(), 1); - assert_matches!( - requests.pop().unwrap(), - Requests::AttestedCandidateV2(outgoing) => { - assert_eq!(outgoing.peer, Recipient::Peer(peer_b)); - assert_eq!(outgoing.payload.candidate_hash, candidate_hash); - assert_eq!(outgoing.payload.mask, mask); - - activate_leaf(&mut overseer, &test_leaf_disabled, &state, false, vec![]).await; - - let res = AttestedCandidateResponse { - candidate_receipt: candidate, - persisted_validation_data: pvd, - statements, - }; - outgoing.pending_response.send(Ok((res.encode(), ProtocolName::from("")))).unwrap(); - } - ); - } - ); - - assert_matches!( - overseer.recv().await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) - if p == peer_b && r == BENEFIT_VALID_RESPONSE.into() => { } - ); - - assert_matches!( - overseer.recv().await, - AllMessages:: NetworkBridgeTx( - NetworkBridgeTxMessage::SendValidationMessage( - peers, - Versioned::V2( - protocol_v2::ValidationProtocol::StatementDistribution( - protocol_v2::StatementDistributionMessage::Statement(hash, statement), - ), - ), - ) - ) => { - assert_eq!(peers, vec![peer_disabled_later]); - assert_eq!(hash, relay_parent); - assert_eq!(statement, seconded_b); - } - ); - answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; - } - - overseer - }); -} - -#[test] -fn no_response_for_grid_request_not_meeting_quorum() { - let validator_count = 6; - let group_size = 3; - let config = TestConfig { - validator_count, - group_size, - local_validator: LocalRole::Validator, - async_backing_params: None, - }; - - let relay_parent = Hash::repeat_byte(1); - let peer_a = PeerId::random(); - let peer_b = PeerId::random(); - let peer_c = PeerId::random(); - - test_harness(config, |mut state, mut overseer| async move { - let local_validator = state.local.clone().unwrap(); - let local_group_index = local_validator.group_index.unwrap(); - let local_para = ParaId::from(local_group_index.0); - - let test_leaf = state.make_dummy_leaf_with_min_backing_votes(relay_parent, 2); - - let (candidate, pvd) = make_candidate( - relay_parent, - 1, - local_para, - test_leaf.para_data(local_para).head_data.clone(), - vec![4, 5, 6].into(), - Hash::repeat_byte(42).into(), - ); - let candidate_hash = candidate.hash(); - - let other_group_validators = state.group_validators(local_group_index, true); - let target_group_validators = - state.group_validators((local_group_index.0 + 1).into(), true); - let v_a = other_group_validators[0]; - let v_b = other_group_validators[1]; - let v_c = target_group_validators[0]; - - // peer A is in group, has relay parent in view. - // peer B is in group, has no relay parent in view. - // peer C is not in group, has relay parent in view. - { - connect_peer( - &mut overseer, - peer_a.clone(), - Some(vec![state.discovery_id(v_a)].into_iter().collect()), - ) - .await; - - connect_peer( - &mut overseer, - peer_b.clone(), - Some(vec![state.discovery_id(v_b)].into_iter().collect()), - ) - .await; - - connect_peer( - &mut overseer, - peer_c.clone(), - Some(vec![state.discovery_id(v_c)].into_iter().collect()), - ) - .await; - - send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; - send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; - } - - activate_leaf(&mut overseer, &test_leaf, &state, true, vec![]).await; - - // Send gossip topology. - send_new_topology(&mut overseer, state.make_dummy_topology()).await; - - // Confirm the candidate locally so that we don't send out requests. - { - let statement = state - .sign_full_statement( - local_validator.validator_index, - Statement::Seconded(candidate.clone()), - &SigningContext { parent_hash: relay_parent, session_index: 1 }, - pvd.clone(), - ) - .clone(); - - overseer - .send(FromOrchestra::Communication { - msg: StatementDistributionMessage::Share(relay_parent, statement), - }) - .await; - - assert_matches!( - overseer.recv().await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] - ); - - answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; - } - - // Send enough statements to make candidate backable, make sure announcements are sent. - - // Send statement from peer A. - { - let statement = state - .sign_statement( - v_a, - CompactStatement::Seconded(candidate_hash), - &SigningContext { parent_hash: relay_parent, session_index: 1 }, - ) - .as_unchecked() - .clone(); - - send_peer_message( - &mut overseer, - peer_a.clone(), - protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), - ) - .await; - - assert_matches!( - overseer.recv().await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) - if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST.into() => { } - ); - } - - // Send statement from peer B. - let statement_b = state - .sign_statement( - v_b, - CompactStatement::Seconded(candidate_hash), - &SigningContext { parent_hash: relay_parent, session_index: 1 }, - ) - .as_unchecked() - .clone(); - { - send_peer_message( - &mut overseer, - peer_b.clone(), - protocol_v2::StatementDistributionMessage::Statement( - relay_parent, - statement_b.clone(), - ), - ) - .await; - - assert_matches!( - overseer.recv().await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) - if p == peer_b && r == BENEFIT_VALID_STATEMENT_FIRST.into() => { } - ); - - assert_matches!( - overseer.recv().await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] - ); - } - - // Send Backed notification. - { - overseer - .send(FromOrchestra::Communication { - msg: StatementDistributionMessage::Backed(candidate_hash), - }) - .await; - - assert_matches!( - overseer.recv().await, - AllMessages:: NetworkBridgeTx( - NetworkBridgeTxMessage::SendValidationMessage( - peers, - Versioned::V2( - protocol_v2::ValidationProtocol::StatementDistribution( - protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), - ), - ), - ) - ) => { - assert_eq!(peers, vec![peer_c]); - assert_eq!(manifest, BackedCandidateManifest { - relay_parent, - candidate_hash, - group_index: local_validator.group_index.unwrap(), - para_id: local_para, - parent_head_data_hash: pvd.parent_head.hash(), - statement_knowledge: StatementFilter { - seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], - }, - }); - } - ); - - answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; - } - - let mask = StatementFilter { - seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], - }; - - let relay_2 = Hash::repeat_byte(2); - let disabled_validators = vec![v_a]; - let leaf_2 = state.make_dummy_leaf_with_disabled_validators(relay_2, disabled_validators); - activate_leaf(&mut overseer, &leaf_2, &state, false, vec![]).await; - - // Incoming request to local node. Local node should not send the response as v_a is - // disabled and hence the quorum is not reached. - { - let response = state - .send_request( - peer_c, - request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask }, - ) - .await - .await; - - assert!( - response.is_none(), - "We should not send a response as the quorum is not reached yet" - ); - } - - overseer - }); -} - -#[test] -fn disabling_works_from_the_latest_state_not_relay_parent() { +fn disabling_works_from_relay_parent_not_the_latest_state() { let group_size = 3; let config = TestConfig { validator_count: 20, @@ -1642,7 +1257,7 @@ fn disabling_works_from_the_latest_state_not_relay_parent() { ); let candidate_1_hash = candidate_1.hash(); - let (candidate_2, _) = make_candidate( + let (candidate_2, pvd_2) = make_candidate( relay_1, 1, local_para, @@ -1652,6 +1267,16 @@ fn disabling_works_from_the_latest_state_not_relay_parent() { ); let candidate_2_hash = candidate_2.hash(); + let (candidate_3, _) = make_candidate( + relay_2, + 1, + local_para, + leaf_1.para_data(local_para).head_data.clone(), + vec![4, 5, 6, 7].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_3_hash = candidate_3.hash(); + { connect_peer( &mut overseer, @@ -1681,6 +1306,16 @@ fn disabling_works_from_the_latest_state_not_relay_parent() { ) .as_unchecked() .clone(); + + let seconded_3 = state + .sign_statement( + index_disabled, + CompactStatement::Seconded(candidate_3_hash), + &SigningContext { parent_hash: relay_2, session_index: 1 }, + ) + .as_unchecked() + .clone(); + { send_peer_message( &mut overseer, @@ -1733,6 +1368,48 @@ fn disabling_works_from_the_latest_state_not_relay_parent() { ) .await; + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_disabled && r == BENEFIT_VALID_STATEMENT_FIRST.into() => { } + ); + } + + { + handle_sent_request( + &mut overseer, + peer_disabled, + candidate_2_hash, + StatementFilter::blank(group_size), + candidate_2.clone(), + pvd_2.clone(), + vec![seconded_2.clone()], + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_disabled && r == BENEFIT_VALID_STATEMENT.into() => { } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_disabled && r == BENEFIT_VALID_RESPONSE.into() => { } + ); + + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; + } + + { + send_peer_message( + &mut overseer, + peer_disabled.clone(), + protocol_v2::StatementDistributionMessage::Statement(relay_2, seconded_3.clone()), + ) + .await; + assert_matches!( overseer.recv().await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) diff --git a/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md b/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md index e5eb9bd7642c..ce2ff3ca9139 100644 --- a/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md +++ b/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md @@ -130,23 +130,9 @@ accept statements from it. Filtering out of statements from disabled validators on the node side is purely an optimization, as it will be done in the runtime as well. -Because we use the state of the active leaves to -check whether a validator is disabled instead of the relay parent, the notion -of being disabled is inherently racy: -- the responder has learned about the disabled validator before the requester -- the receiver has witnessed the disabled validator after sending the request - -We could have sent a manifest to a peer, then received information about -disabling, and then receive a request. This can break an invariant of the grid -mode: -- the response is required to indicate quorum - -Due to the above, there should be no response at all for grid requests when -the backing threshold is no longer met as a result of disabled validators. -In addition to that, we add disabled validators to the request's unwanted -mask. This ensures that the sender will not send statements from disabled -validators (at least from the perspective of the receiver at the moment of the -request). This doesn't fully avoid race conditions, but tries to minimize them. +We use the state of the relay parent to check whether a validator is disabled +to avoid race conditions and ensure that disabling works well in the presense +of re-enabling. ## Messages @@ -211,9 +197,9 @@ We also have a request/response protocol because validators do not eagerly send - Requests are queued up with `RequestManager::get_or_insert`. - Done as needed, when handling incoming manifests/statements. - `RequestManager::dispatch_requests` sends any queued-up requests. - - Calls `RequestManager::next_request` to completion. - - Creates the `OutgoingRequest`, saves the receiver in `RequestManager::pending_responses`. - - Does nothing if we have more responses pending than the limit of parallel requests. + - Calls `RequestManager::next_request` to completion. + - Creates the `OutgoingRequest`, saves the receiver in `RequestManager::pending_responses`. + - Does nothing if we have more responses pending than the limit of parallel requests. 2. Peer diff --git a/prdoc/pr_4431.prdoc b/prdoc/pr_4431.prdoc new file mode 100644 index 000000000000..993a7326b9aa --- /dev/null +++ b/prdoc/pr_4431.prdoc @@ -0,0 +1,17 @@ +title: "Statement-Distribution validator disabling changes" + +doc: + - audience: Node Dev + description: | + In preparation for launching re-enabling (#2418), we need to adjust the + disabling strategy of statement-distribution to use the relay parent's + state instead of the latest state (union of active leaves). This will also + ensure no raciness of getting the latest state vs accepting statements from + disabling validators at the cost of being more lenient/potentially accepting + more statements from disabled validators. + +crates: + - name: polkadot-statement-distribution + bump: patch + - name: polkadot + bump: none From d2fd53645654d3b8e12cbf735b67b93078d70113 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 5 Jun 2024 15:54:37 +0200 Subject: [PATCH 03/52] Unify dependency aliases (#4633) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Inherited workspace dependencies cannot be renamed by the crate using them (see [1](https://github.com/rust-lang/cargo/issues/12546), [2](https://stackoverflow.com/questions/76792343/can-inherited-dependencies-in-rust-be-aliased-in-the-cargo-toml-file)). Since we want to use inherited workspace dependencies everywhere, we first need to unify all aliases that we use for a dependency throughout the workspace. The umbrella crate is currently excluded from this procedure, since it should be able to export the crates by their original name without much hassle. For example: one crate may alias `parity-scale-codec` to `codec`, while another crate does not alias it at all. After this change, all crates have to use `codec` as name. The problematic combinations were: - conflicting aliases: most crates aliases as `A` but some use `B`. - missing alias: most of the crates alias a dep but some dont. - superfluous alias: most crates dont alias a dep but some do. The script that i used first determines whether most crates opted to alias a dependency or not. From that info it decides whether to use an alias or not. If it decided to use an alias, the most common one is used everywhere. To reproduce, i used [this](https://github.com/ggwpez/substrate-scripts/blob/master/uniform-crate-alias.py) python script in combination with [this](https://github.com/ggwpez/zepter/blob/38ad10585fe98a5a86c1d2369738bc763a77057b/renames.json) error output from Zepter. --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Bastian Köcher --- bridges/relays/utils/Cargo.toml | 2 +- bridges/relays/utils/src/error.rs | 2 +- bridges/relays/utils/src/metrics.rs | 2 +- bridges/relays/utils/src/relay_loop.rs | 2 +- .../pallets/ethereum-client/Cargo.toml | 4 +- .../ethereum-client/src/benchmarking/mod.rs | 2 +- .../ethereum-client/src/benchmarking/util.rs | 2 +- .../pallets/ethereum-client/src/config/mod.rs | 2 +- .../pallets/ethereum-client/src/functions.rs | 7 +- .../pallets/ethereum-client/src/impls.rs | 2 +- .../pallets/ethereum-client/src/lib.rs | 2 +- .../pallets/ethereum-client/src/mock.rs | 30 +-- .../pallets/ethereum-client/src/tests.rs | 5 +- .../pallets/ethereum-client/src/types.rs | 14 +- .../snowbridge/primitives/router/Cargo.toml | 2 +- cumulus/client/consensus/aura/Cargo.toml | 2 +- .../aura/src/equivocation_import_queue.rs | 2 +- .../client/consensus/aura/src/import_queue.rs | 2 +- cumulus/client/consensus/common/Cargo.toml | 2 +- .../consensus/common/src/import_queue.rs | 2 +- .../client/consensus/relay-chain/Cargo.toml | 2 +- .../consensus/relay-chain/src/import_queue.rs | 2 +- .../client/relay-chain-interface/Cargo.toml | 2 +- .../client/relay-chain-interface/src/lib.rs | 2 +- .../relay-chain-minimal-node/Cargo.toml | 2 +- .../relay-chain-minimal-node/src/lib.rs | 2 +- .../relay-chain-rpc-interface/Cargo.toml | 2 +- .../src/rpc_client.rs | 2 +- .../pallets/session-benchmarking/Cargo.toml | 4 +- .../pallets/session-benchmarking/src/inner.rs | 2 +- .../emulated/chains/relays/rococo/Cargo.toml | 4 +- .../chains/relays/rococo/src/genesis.rs | 4 +- .../emulated/chains/relays/westend/Cargo.toml | 4 +- .../chains/relays/westend/src/genesis.rs | 4 +- .../emulated/common/Cargo.toml | 4 +- .../emulated/common/src/lib.rs | 4 +- cumulus/polkadot-parachain/Cargo.toml | 4 +- cumulus/polkadot-parachain/src/rpc.rs | 8 +- cumulus/polkadot-parachain/src/service.rs | 8 +- docs/sdk/Cargo.toml | 2 +- .../src/reference_docs/extrinsic_encoding.rs | 4 +- .../src/reference_docs/signed_extensions.rs | 2 +- polkadot/cli/Cargo.toml | 25 +-- polkadot/cli/src/command.rs | 106 +++++----- polkadot/cli/src/error.rs | 4 +- polkadot/cli/src/lib.rs | 6 +- polkadot/core-primitives/Cargo.toml | 4 +- polkadot/core-primitives/src/lib.rs | 2 +- polkadot/erasure-coding/Cargo.toml | 2 +- polkadot/erasure-coding/fuzzer/Cargo.toml | 2 +- .../erasure-coding/fuzzer/src/reconstruct.rs | 2 +- .../erasure-coding/fuzzer/src/round_trip.rs | 2 +- polkadot/erasure-coding/src/lib.rs | 4 +- polkadot/node/collation-generation/Cargo.toml | 4 +- polkadot/node/collation-generation/src/lib.rs | 2 +- .../node/collation-generation/src/tests.rs | 6 +- polkadot/node/core/approval-voting/Cargo.toml | 4 +- .../approval-voting/src/approval_checking.rs | 2 +- .../src/approval_db/common/mod.rs | 4 +- .../approval-voting/src/approval_db/v1/mod.rs | 2 +- .../src/approval_db/v1/tests.rs | 4 +- .../approval-voting/src/approval_db/v2/mod.rs | 2 +- .../src/approval_db/v2/tests.rs | 4 +- .../approval-voting/src/approval_db/v3/mod.rs | 2 +- .../src/approval_db/v3/tests.rs | 4 +- .../node/core/approval-voting/src/criteria.rs | 2 +- .../node/core/approval-voting/src/import.rs | 2 +- .../node/core/approval-voting/src/tests.rs | 8 +- polkadot/node/core/av-store/Cargo.toml | 6 +- polkadot/node/core/av-store/src/lib.rs | 8 +- polkadot/node/core/av-store/src/tests.rs | 47 +++-- polkadot/node/core/backing/Cargo.toml | 6 +- polkadot/node/core/backing/src/error.rs | 2 +- polkadot/node/core/backing/src/lib.rs | 4 +- polkadot/node/core/backing/src/tests/mod.rs | 23 ++- .../src/tests/prospective_parachains.rs | 3 +- .../node/core/bitfield-signing/Cargo.toml | 2 +- .../node/core/bitfield-signing/src/tests.rs | 2 +- .../node/core/candidate-validation/Cargo.toml | 4 +- .../node/core/candidate-validation/src/lib.rs | 2 +- .../core/candidate-validation/src/tests.rs | 64 ++++-- polkadot/node/core/chain-api/Cargo.toml | 2 +- polkadot/node/core/chain-api/src/tests.rs | 2 +- polkadot/node/core/chain-selection/Cargo.toml | 2 +- .../core/chain-selection/src/db_backend/v1.rs | 2 +- polkadot/node/core/chain-selection/src/lib.rs | 2 +- .../node/core/chain-selection/src/tests.rs | 8 +- .../node/core/dispute-coordinator/Cargo.toml | 4 +- .../core/dispute-coordinator/src/db/v1.rs | 6 +- .../core/dispute-coordinator/src/error.rs | 2 +- .../src/participation/queues/tests.rs | 2 +- .../src/participation/tests.rs | 8 +- .../dispute-coordinator/src/scraping/tests.rs | 4 +- .../core/dispute-coordinator/src/tests.rs | 2 +- .../core/prospective-parachains/Cargo.toml | 2 +- .../core/prospective-parachains/src/tests.rs | 6 +- polkadot/node/core/provisioner/Cargo.toml | 2 +- .../disputes/prioritized_selection/tests.rs | 17 +- polkadot/node/core/provisioner/src/tests.rs | 4 +- polkadot/node/core/pvf-checker/Cargo.toml | 2 +- polkadot/node/core/pvf-checker/src/tests.rs | 2 +- polkadot/node/core/pvf/Cargo.toml | 6 +- polkadot/node/core/pvf/common/Cargo.toml | 2 +- polkadot/node/core/pvf/common/src/error.rs | 2 +- polkadot/node/core/pvf/common/src/execute.rs | 2 +- .../core/pvf/common/src/executor_interface.rs | 2 +- polkadot/node/core/pvf/common/src/lib.rs | 2 +- polkadot/node/core/pvf/common/src/prepare.rs | 2 +- polkadot/node/core/pvf/common/src/pvf.rs | 2 +- .../node/core/pvf/common/src/worker/mod.rs | 2 +- .../node/core/pvf/execute-worker/Cargo.toml | 2 +- .../node/core/pvf/execute-worker/src/lib.rs | 2 +- .../node/core/pvf/prepare-worker/Cargo.toml | 2 +- .../node/core/pvf/prepare-worker/src/lib.rs | 2 +- .../core/pvf/src/execute/worker_interface.rs | 2 +- .../core/pvf/src/prepare/worker_interface.rs | 2 +- .../node/core/pvf/src/worker_interface.rs | 2 +- polkadot/node/core/pvf/tests/it/adder.rs | 14 +- polkadot/node/core/pvf/tests/it/main.rs | 68 +++++-- polkadot/node/core/pvf/tests/it/process.rs | 14 +- polkadot/node/core/runtime-api/Cargo.toml | 2 +- polkadot/node/core/runtime-api/src/tests.rs | 2 +- polkadot/node/jaeger/Cargo.toml | 2 +- polkadot/node/jaeger/src/spans.rs | 2 +- polkadot/node/malus/Cargo.toml | 2 +- .../src/variants/suggest_garbage_candidate.rs | 4 +- polkadot/node/metrics/Cargo.toml | 6 +- polkadot/node/metrics/src/lib.rs | 2 +- polkadot/node/metrics/src/runtime/mod.rs | 8 +- .../node/metrics/src/runtime/parachain.rs | 2 +- polkadot/node/metrics/src/tests.rs | 2 +- .../approval-distribution/src/tests.rs | 10 +- .../availability-distribution/Cargo.toml | 2 +- .../src/pov_requester/mod.rs | 11 +- .../src/requester/fetch_task/mod.rs | 2 +- .../src/requester/fetch_task/tests.rs | 2 +- .../src/responder.rs | 2 +- .../src/tests/mod.rs | 5 +- .../src/tests/state.rs | 4 +- .../network/availability-recovery/Cargo.toml | 2 +- .../availability-recovery/src/task/mod.rs | 2 +- .../src/task/strategy/mod.rs | 4 +- .../availability-recovery/src/tests.rs | 2 +- polkadot/node/network/bridge/Cargo.toml | 2 +- polkadot/node/network/bridge/src/lib.rs | 2 +- polkadot/node/network/bridge/src/network.rs | 2 +- polkadot/node/network/bridge/src/rx/mod.rs | 2 +- polkadot/node/network/bridge/src/tx/tests.rs | 2 +- .../node/network/collator-protocol/Cargo.toml | 2 +- .../src/collator_side/tests/mod.rs | 8 +- .../src/validator_side/tests/mod.rs | 6 +- .../network/dispute-distribution/Cargo.toml | 2 +- .../dispute-distribution/src/tests/mod.rs | 2 +- .../node/network/gossip-support/src/tests.rs | 6 +- polkadot/node/network/protocol/Cargo.toml | 2 +- polkadot/node/network/protocol/src/lib.rs | 8 +- .../src/request_response/incoming/error.rs | 2 +- .../src/request_response/incoming/mod.rs | 2 +- .../protocol/src/request_response/outgoing.rs | 2 +- .../protocol/src/request_response/v1.rs | 2 +- .../protocol/src/request_response/v2.rs | 2 +- .../network/statement-distribution/Cargo.toml | 2 +- .../src/legacy_v1/mod.rs | 2 +- .../src/legacy_v1/tests.rs | 2 +- .../src/v2/tests/mod.rs | 8 +- .../src/v2/tests/requests.rs | 2 +- polkadot/node/overseer/Cargo.toml | 6 +- .../node/overseer/examples/minimal-example.rs | 2 +- polkadot/node/overseer/src/lib.rs | 2 +- polkadot/node/overseer/src/tests.rs | 6 +- polkadot/node/primitives/Cargo.toml | 2 +- polkadot/node/primitives/src/approval.rs | 4 +- .../node/primitives/src/disputes/message.rs | 2 +- polkadot/node/primitives/src/disputes/mod.rs | 2 +- .../node/primitives/src/disputes/status.rs | 2 +- polkadot/node/primitives/src/lib.rs | 2 +- polkadot/node/service/Cargo.toml | 22 +-- polkadot/node/service/src/chain_spec.rs | 14 +- polkadot/node/service/src/fake_runtime_api.rs | 30 +-- polkadot/node/service/src/grandpa_support.rs | 6 +- polkadot/node/service/src/lib.rs | 187 ++++++++++-------- polkadot/node/service/src/overseer.rs | 2 +- .../node/service/src/parachains_db/upgrade.rs | 2 +- .../node/service/src/relay_chain_selection.rs | 4 +- polkadot/node/service/src/tests.rs | 7 +- polkadot/node/subsystem-bench/Cargo.toml | 6 +- .../src/lib/approval/message_generator.rs | 2 +- .../subsystem-bench/src/lib/approval/mod.rs | 2 +- .../src/lib/approval/test_message.rs | 2 +- .../src/lib/availability/mod.rs | 2 +- .../src/lib/availability/test_state.rs | 2 +- .../subsystem-bench/src/lib/mock/av_store.rs | 2 +- .../node/subsystem-bench/src/lib/network.rs | 2 +- .../src/lib/statement/test_state.rs | 2 +- polkadot/node/subsystem-types/Cargo.toml | 2 +- polkadot/node/subsystem-types/src/errors.rs | 2 +- polkadot/node/subsystem-util/Cargo.toml | 4 +- .../subsystem-util/src/availability_chunks.rs | 6 +- polkadot/node/subsystem-util/src/lib.rs | 2 +- .../node/subsystem-util/src/runtime/mod.rs | 2 +- polkadot/node/test/client/Cargo.toml | 2 +- .../node/test/client/src/block_builder.rs | 2 +- polkadot/node/test/service/Cargo.toml | 12 +- polkadot/node/test/service/src/chain_spec.rs | 4 +- .../node/zombienet-backchannel/Cargo.toml | 2 +- .../node/zombienet-backchannel/src/lib.rs | 2 +- polkadot/parachain/Cargo.toml | 4 +- polkadot/parachain/src/primitives.rs | 2 +- polkadot/parachain/src/wasm_api.rs | 2 +- polkadot/parachain/test-parachains/Cargo.toml | 8 +- .../test-parachains/adder/Cargo.toml | 6 +- .../test-parachains/adder/collator/Cargo.toml | 2 +- .../test-parachains/adder/collator/src/lib.rs | 2 +- .../test-parachains/adder/src/lib.rs | 2 +- .../adder/src/wasm_validation.rs | 8 +- .../test-parachains/undying/Cargo.toml | 8 +- .../undying/collator/Cargo.toml | 2 +- .../undying/collator/src/lib.rs | 2 +- .../test-parachains/undying/src/lib.rs | 2 +- .../undying/src/wasm_validation.rs | 8 +- polkadot/primitives/Cargo.toml | 22 +-- polkadot/primitives/src/v7/async_backing.rs | 4 +- polkadot/primitives/src/v7/executor_params.rs | 2 +- polkadot/primitives/src/v7/metrics.rs | 2 +- polkadot/primitives/src/v7/mod.rs | 52 +++-- polkadot/primitives/src/v7/signed.rs | 12 +- polkadot/primitives/src/v7/slashing.rs | 2 +- polkadot/primitives/src/vstaging/mod.rs | 4 +- polkadot/rpc/Cargo.toml | 4 +- polkadot/rpc/src/lib.rs | 6 +- polkadot/runtime/common/Cargo.toml | 24 +-- .../common/slot_range_helper/Cargo.toml | 4 +- .../common/slot_range_helper/src/lib.rs | 2 +- .../common/src/assigned_slots/benchmarking.rs | 2 +- .../runtime/common/src/assigned_slots/mod.rs | 15 +- polkadot/runtime/common/src/auctions.rs | 10 +- polkadot/runtime/common/src/claims.rs | 6 +- polkadot/runtime/common/src/crowdloan/mod.rs | 10 +- .../runtime/common/src/identity_migrator.rs | 2 +- polkadot/runtime/common/src/impls.rs | 14 +- .../runtime/common/src/integration_tests.rs | 6 +- polkadot/runtime/common/src/lib.rs | 6 +- polkadot/runtime/common/src/mock.rs | 14 +- .../runtime/common/src/paras_registrar/mod.rs | 44 +++-- .../runtime/common/src/paras_sudo_wrapper.rs | 14 +- polkadot/runtime/common/src/purchase.rs | 2 +- polkadot/runtime/common/src/slots/mod.rs | 8 +- polkadot/runtime/common/src/traits.rs | 2 +- polkadot/runtime/common/src/xcm_sender.rs | 8 +- polkadot/runtime/metrics/Cargo.toml | 8 +- .../metrics/src/with_runtime_metrics.rs | 4 +- .../metrics/src/without_runtime_metrics.rs | 2 +- polkadot/runtime/parachains/Cargo.toml | 18 +- .../src/assigner_coretime/mock_helpers.rs | 4 +- .../parachains/src/assigner_coretime/mod.rs | 4 +- .../parachains/src/assigner_coretime/tests.rs | 2 +- .../src/assigner_on_demand/benchmarking.rs | 2 +- .../src/assigner_on_demand/migration.rs | 2 +- .../src/assigner_on_demand/mock_helpers.rs | 2 +- .../parachains/src/assigner_on_demand/mod.rs | 2 +- .../src/assigner_on_demand/tests.rs | 2 +- .../parachains/src/assigner_parachains.rs | 4 +- .../src/assigner_parachains/mock_helpers.rs | 4 +- .../src/assigner_parachains/tests.rs | 2 +- polkadot/runtime/parachains/src/builder.rs | 2 +- .../runtime/parachains/src/configuration.rs | 8 +- .../src/configuration/benchmarking.rs | 2 +- .../src/configuration/migration/v10.rs | 11 +- .../src/configuration/migration/v11.rs | 11 +- .../src/configuration/migration/v12.rs | 11 +- .../src/configuration/migration/v6.rs | 4 +- .../src/configuration/migration/v7.rs | 11 +- .../src/configuration/migration/v8.rs | 9 +- .../src/configuration/migration/v9.rs | 9 +- .../parachains/src/configuration/tests.rs | 2 +- .../parachains/src/coretime/migration.rs | 10 +- .../runtime/parachains/src/coretime/mod.rs | 2 +- polkadot/runtime/parachains/src/disputes.rs | 6 +- .../parachains/src/disputes/migration.rs | 2 +- .../parachains/src/disputes/slashing.rs | 7 +- .../src/disputes/slashing/benchmarking.rs | 4 +- .../runtime/parachains/src/disputes/tests.rs | 2 +- polkadot/runtime/parachains/src/dmp.rs | 2 +- polkadot/runtime/parachains/src/dmp/tests.rs | 6 +- polkadot/runtime/parachains/src/hrmp.rs | 8 +- polkadot/runtime/parachains/src/hrmp/tests.rs | 4 +- .../parachains/src/inclusion/migration.rs | 14 +- .../runtime/parachains/src/inclusion/mod.rs | 14 +- .../runtime/parachains/src/inclusion/tests.rs | 16 +- .../runtime/parachains/src/initializer.rs | 4 +- .../src/initializer/benchmarking.rs | 2 +- .../parachains/src/initializer/tests.rs | 4 +- polkadot/runtime/parachains/src/lib.rs | 4 +- polkadot/runtime/parachains/src/metrics.rs | 4 +- polkadot/runtime/parachains/src/mock.rs | 6 +- polkadot/runtime/parachains/src/origin.rs | 2 +- .../parachains/src/paras/benchmarking.rs | 4 +- .../src/paras/benchmarking/pvf_check.rs | 4 +- polkadot/runtime/parachains/src/paras/mod.rs | 10 +- .../runtime/parachains/src/paras/tests.rs | 17 +- .../src/paras_inherent/benchmarking.rs | 2 +- .../parachains/src/paras_inherent/mod.rs | 2 +- .../parachains/src/paras_inherent/tests.rs | 74 +++---- .../parachains/src/paras_inherent/weights.rs | 4 +- .../runtime/parachains/src/reward_points.rs | 2 +- .../parachains/src/runtime_api_impl/v10.rs | 11 +- .../src/runtime_api_impl/vstaging.rs | 2 +- polkadot/runtime/parachains/src/scheduler.rs | 2 +- .../parachains/src/scheduler/common.rs | 2 +- .../parachains/src/scheduler/migration.rs | 2 +- .../runtime/parachains/src/scheduler/tests.rs | 4 +- .../runtime/parachains/src/session_info.rs | 4 +- .../parachains/src/session_info/tests.rs | 4 +- polkadot/runtime/parachains/src/shared.rs | 2 +- .../runtime/parachains/src/shared/tests.rs | 6 +- polkadot/runtime/parachains/src/ump_tests.rs | 4 +- polkadot/runtime/parachains/src/util.rs | 4 +- polkadot/runtime/rococo/Cargo.toml | 62 +++--- polkadot/runtime/rococo/constants/Cargo.toml | 8 +- polkadot/runtime/rococo/constants/src/lib.rs | 12 +- .../rococo/src/genesis_config_presets.rs | 25 +-- polkadot/runtime/rococo/src/impls.rs | 6 +- polkadot/runtime/rococo/src/lib.rs | 150 +++++++------- .../weights/runtime_common_assigned_slots.rs | 2 +- .../src/weights/runtime_common_auctions.rs | 2 +- .../src/weights/runtime_common_claims.rs | 2 +- .../src/weights/runtime_common_crowdloan.rs | 2 +- .../runtime_common_identity_migrator.rs | 2 +- .../weights/runtime_common_paras_registrar.rs | 2 +- .../src/weights/runtime_common_slots.rs | 2 +- .../runtime_parachains_assigner_on_demand.rs | 2 +- .../runtime_parachains_configuration.rs | 2 +- .../weights/runtime_parachains_coretime.rs | 4 +- .../weights/runtime_parachains_disputes.rs | 2 +- .../src/weights/runtime_parachains_hrmp.rs | 2 +- .../weights/runtime_parachains_inclusion.rs | 2 +- .../weights/runtime_parachains_initializer.rs | 2 +- .../src/weights/runtime_parachains_paras.rs | 2 +- .../runtime_parachains_paras_inherent.rs | 2 +- polkadot/runtime/rococo/src/xcm_config.rs | 4 +- polkadot/runtime/test-runtime/Cargo.toml | 46 ++--- .../runtime/test-runtime/constants/Cargo.toml | 4 +- .../runtime/test-runtime/constants/src/lib.rs | 6 +- polkadot/runtime/test-runtime/src/lib.rs | 91 ++++----- .../runtime/test-runtime/src/xcm_config.rs | 2 +- polkadot/runtime/westend/Cargo.toml | 58 +++--- polkadot/runtime/westend/constants/Cargo.toml | 8 +- polkadot/runtime/westend/constants/src/lib.rs | 12 +- polkadot/runtime/westend/src/impls.rs | 6 +- polkadot/runtime/westend/src/lib.rs | 148 +++++++------- polkadot/runtime/westend/src/tests.rs | 4 +- .../weights/runtime_common_assigned_slots.rs | 2 +- .../src/weights/runtime_common_auctions.rs | 2 +- .../src/weights/runtime_common_crowdloan.rs | 2 +- .../runtime_common_identity_migrator.rs | 2 +- .../weights/runtime_common_paras_registrar.rs | 2 +- .../src/weights/runtime_common_slots.rs | 2 +- .../runtime_parachains_assigner_on_demand.rs | 2 +- .../runtime_parachains_configuration.rs | 2 +- .../weights/runtime_parachains_coretime.rs | 2 +- .../weights/runtime_parachains_disputes.rs | 2 +- .../runtime_parachains_disputes_slashing.rs | 2 +- .../src/weights/runtime_parachains_hrmp.rs | 2 +- .../weights/runtime_parachains_inclusion.rs | 2 +- .../weights/runtime_parachains_initializer.rs | 2 +- .../src/weights/runtime_parachains_paras.rs | 2 +- .../runtime_parachains_paras_inherent.rs | 2 +- polkadot/runtime/westend/src/xcm_config.rs | 2 +- polkadot/statement-table/Cargo.toml | 4 +- polkadot/statement-table/src/generic.rs | 4 +- polkadot/statement-table/src/lib.rs | 2 +- polkadot/xcm/Cargo.toml | 4 +- polkadot/xcm/src/double_encoded.rs | 2 +- polkadot/xcm/src/lib.rs | 2 +- polkadot/xcm/src/v2/junction.rs | 2 +- polkadot/xcm/src/v2/mod.rs | 2 +- polkadot/xcm/src/v2/multiasset.rs | 4 +- polkadot/xcm/src/v2/multilocation.rs | 4 +- polkadot/xcm/src/v2/traits.rs | 4 +- polkadot/xcm/src/v3/junction.rs | 2 +- polkadot/xcm/src/v3/junctions.rs | 2 +- polkadot/xcm/src/v3/mod.rs | 6 +- polkadot/xcm/src/v3/multiasset.rs | 4 +- polkadot/xcm/src/v3/multilocation.rs | 4 +- polkadot/xcm/src/v3/traits.rs | 4 +- polkadot/xcm/src/v4/asset.rs | 6 +- polkadot/xcm/src/v4/junction.rs | 2 +- polkadot/xcm/src/v4/junctions.rs | 2 +- polkadot/xcm/src/v4/location.rs | 4 +- polkadot/xcm/src/v4/mod.rs | 6 +- polkadot/xcm/src/v4/traits.rs | 4 +- polkadot/xcm/xcm-builder/Cargo.toml | 8 +- .../xcm/xcm-builder/src/currency_adapter.rs | 2 +- .../xcm-builder/src/location_conversion.rs | 4 +- .../xcm-builder/src/process_xcm_message.rs | 4 +- polkadot/xcm/xcm-builder/src/routing.rs | 2 +- polkadot/xcm/xcm-builder/src/tests/mock.rs | 2 +- .../xcm/xcm-builder/src/tests/pay/mock.rs | 2 +- .../xcm/xcm-builder/src/universal_exports.rs | 2 +- polkadot/xcm/xcm-builder/src/weight.rs | 2 +- polkadot/xcm/xcm-builder/tests/mock/mod.rs | 2 +- polkadot/xcm/xcm-executor/Cargo.toml | 4 +- polkadot/xcm/xcm-executor/src/lib.rs | 2 +- .../xcm-executor/src/traits/on_response.rs | 2 +- prdoc/pr_4633.prdoc | 8 + substrate/client/cli/Cargo.toml | 2 +- .../client/cli/src/commands/chain_info_cmd.rs | 2 +- substrate/client/cli/src/error.rs | 2 +- substrate/client/consensus/beefy/Cargo.toml | 2 +- .../incoming_requests_handler.rs | 2 +- .../outgoing_requests_engine.rs | 2 +- substrate/client/consensus/beefy/src/lib.rs | 2 +- .../client/consensus/beefy/src/metrics.rs | 4 +- substrate/client/consensus/grandpa/Cargo.toml | 2 +- .../client/consensus/grandpa/rpc/Cargo.toml | 2 +- .../client/consensus/grandpa/rpc/src/lib.rs | 2 +- .../consensus/grandpa/rpc/src/notification.rs | 2 +- .../consensus/grandpa/src/authorities.rs | 6 +- .../consensus/grandpa/src/aux_schema.rs | 2 +- .../grandpa/src/communication/gossip.rs | 2 +- .../grandpa/src/communication/mod.rs | 2 +- .../grandpa/src/communication/tests.rs | 2 +- .../consensus/grandpa/src/environment.rs | 8 +- .../consensus/grandpa/src/finality_proof.rs | 2 +- .../client/consensus/grandpa/src/import.rs | 2 +- .../consensus/grandpa/src/justification.rs | 2 +- substrate/client/consensus/grandpa/src/lib.rs | 2 +- .../consensus/grandpa/src/warp_proof.rs | 6 +- substrate/client/service/test/Cargo.toml | 2 +- .../client/service/test/src/client/mod.rs | 2 +- substrate/frame/Cargo.toml | 4 +- substrate/frame/contracts/uapi/Cargo.toml | 4 +- substrate/frame/contracts/uapi/src/flags.rs | 2 +- .../solution-type/Cargo.toml | 2 +- substrate/frame/grandpa/Cargo.toml | 2 +- substrate/frame/grandpa/src/mock.rs | 2 +- substrate/frame/sassafras/Cargo.toml | 4 +- substrate/frame/sassafras/src/lib.rs | 2 +- substrate/frame/src/lib.rs | 4 +- .../support/test/compile_pass/Cargo.toml | 8 +- .../support/test/compile_pass/src/lib.rs | 8 +- .../primitives/consensus/grandpa/Cargo.toml | 4 +- .../primitives/consensus/grandpa/src/lib.rs | 76 ++++--- .../primitives/consensus/sassafras/Cargo.toml | 4 +- .../consensus/sassafras/src/digests.rs | 2 +- .../primitives/consensus/sassafras/src/lib.rs | 2 +- .../consensus/sassafras/src/ticket.rs | 2 +- .../primitives/consensus/sassafras/src/vrf.rs | 2 +- templates/minimal/runtime/Cargo.toml | 4 +- templates/parachain/node/Cargo.toml | 2 +- templates/parachain/node/src/service.rs | 2 +- 451 files changed, 1657 insertions(+), 1472 deletions(-) create mode 100644 prdoc/pr_4633.prdoc diff --git a/bridges/relays/utils/Cargo.toml b/bridges/relays/utils/Cargo.toml index 1264f582983f..4765730a0b4f 100644 --- a/bridges/relays/utils/Cargo.toml +++ b/bridges/relays/utils/Cargo.toml @@ -36,4 +36,4 @@ bp-runtime = { path = "../../primitives/runtime" } # Substrate dependencies sp-runtime = { path = "../../../substrate/primitives/runtime" } -substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } diff --git a/bridges/relays/utils/src/error.rs b/bridges/relays/utils/src/error.rs index 26f1d0cacefd..48c02bb9bd7a 100644 --- a/bridges/relays/utils/src/error.rs +++ b/bridges/relays/utils/src/error.rs @@ -42,5 +42,5 @@ pub enum Error { ExposingMetricsInvalidHost(String, AddrParseError), /// Prometheus error. #[error("{0}")] - Prometheus(#[from] substrate_prometheus_endpoint::prometheus::Error), + Prometheus(#[from] prometheus_endpoint::prometheus::Error), } diff --git a/bridges/relays/utils/src/metrics.rs b/bridges/relays/utils/src/metrics.rs index 2e6c8236da45..4c946651b058 100644 --- a/bridges/relays/utils/src/metrics.rs +++ b/bridges/relays/utils/src/metrics.rs @@ -16,7 +16,7 @@ pub use float_json_value::FloatJsonValueMetric; pub use global::GlobalMetrics; -pub use substrate_prometheus_endpoint::{ +pub use prometheus_endpoint::{ prometheus::core::{Atomic, Collector}, register, Counter, CounterVec, Gauge, GaugeVec, Opts, PrometheusError, Registry, F64, I64, U64, }; diff --git a/bridges/relays/utils/src/relay_loop.rs b/bridges/relays/utils/src/relay_loop.rs index 7105190a4583..7f84fdc98f3c 100644 --- a/bridges/relays/utils/src/relay_loop.rs +++ b/bridges/relays/utils/src/relay_loop.rs @@ -21,8 +21,8 @@ use crate::{ }; use async_trait::async_trait; +use prometheus_endpoint::{init_prometheus, Registry}; use std::{fmt::Debug, future::Future, net::SocketAddr, time::Duration}; -use substrate_prometheus_endpoint::{init_prometheus, Registry}; /// Default pause between reconnect attempts. pub const RECONNECT_DELAY: Duration = Duration::from_secs(10); diff --git a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml index e60934e34740..cab2b06b0931 100644 --- a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml @@ -33,7 +33,7 @@ sp-io = { path = "../../../../substrate/primitives/io", default-features = false snowbridge-core = { path = "../../primitives/core", default-features = false } snowbridge-ethereum = { path = "../../primitives/ethereum", default-features = false } snowbridge-pallet-ethereum-client-fixtures = { path = "fixtures", default-features = false, optional = true } -primitives = { package = "snowbridge-beacon-primitives", path = "../../primitives/beacon", default-features = false } +snowbridge-beacon-primitives = { path = "../../primitives/beacon", default-features = false } static_assertions = { version = "1.1.0", default-features = false } pallet-timestamp = { path = "../../../../substrate/frame/timestamp", default-features = false, optional = true } @@ -62,9 +62,9 @@ std = [ "frame-system/std", "log/std", "pallet-timestamp/std", - "primitives/std", "scale-info/std", "serde", + "snowbridge-beacon-primitives/std", "snowbridge-core/std", "snowbridge-ethereum/std", "snowbridge-pallet-ethereum-client-fixtures/std", diff --git a/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/mod.rs b/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/mod.rs index 4b8796b628d7..12aa3f4ca1fa 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/mod.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/mod.rs @@ -9,7 +9,7 @@ use frame_system::RawOrigin; use snowbridge_pallet_ethereum_client_fixtures::*; -use primitives::{ +use snowbridge_beacon_primitives::{ fast_aggregate_verify, prepare_aggregate_pubkey, prepare_aggregate_signature, verify_merkle_branch, }; diff --git a/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/util.rs b/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/util.rs index 7e5ded6e1f0d..95e16d9fd434 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/util.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/util.rs @@ -4,7 +4,7 @@ use crate::{ decompress_sync_committee_bits, Config, CurrentSyncCommittee, Pallet as EthereumBeaconClient, Update, ValidatorsRoot, Vec, }; -use primitives::PublicKeyPrepared; +use snowbridge_beacon_primitives::PublicKeyPrepared; use sp_core::H256; pub fn participant_pubkeys( diff --git a/bridges/snowbridge/pallets/ethereum-client/src/config/mod.rs b/bridges/snowbridge/pallets/ethereum-client/src/config/mod.rs index ba3ea47b94f9..1ab1f67d6397 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/config/mod.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/config/mod.rs @@ -1,6 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2023 Snowfork -use primitives::merkle_proof::{generalized_index_length, subtree_index}; +use snowbridge_beacon_primitives::merkle_proof::{generalized_index_length, subtree_index}; use static_assertions::const_assert; /// Generalized Indices diff --git a/bridges/snowbridge/pallets/ethereum-client/src/functions.rs b/bridges/snowbridge/pallets/ethereum-client/src/functions.rs index 751e63c7f86a..781ae6c67638 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/functions.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/functions.rs @@ -10,9 +10,10 @@ use crate::config::{ pub fn decompress_sync_committee_bits( input: [u8; SYNC_COMMITTEE_BITS_SIZE], ) -> [u8; SYNC_COMMITTEE_SIZE] { - primitives::decompress_sync_committee_bits::( - input, - ) + snowbridge_beacon_primitives::decompress_sync_committee_bits::< + SYNC_COMMITTEE_SIZE, + SYNC_COMMITTEE_BITS_SIZE, + >(input) } /// Compute the sync committee period in which a slot is contained. diff --git a/bridges/snowbridge/pallets/ethereum-client/src/impls.rs b/bridges/snowbridge/pallets/ethereum-client/src/impls.rs index f600b1f67e29..2def6f58ba30 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/impls.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/impls.rs @@ -2,7 +2,7 @@ // SPDX-FileCopyrightText: 2023 Snowfork use super::*; use frame_support::ensure; -use primitives::ExecutionProof; +use snowbridge_beacon_primitives::ExecutionProof; use snowbridge_core::inbound::{ VerificationError::{self, *}, diff --git a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs index 6a5972ca7a14..6894977c21f4 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs @@ -36,7 +36,7 @@ use frame_support::{ dispatch::DispatchResult, pallet_prelude::OptionQuery, traits::Get, transactional, }; use frame_system::ensure_signed; -use primitives::{ +use snowbridge_beacon_primitives::{ fast_aggregate_verify, verify_merkle_branch, verify_receipt_proof, BeaconHeader, BlsError, CompactBeaconState, ForkData, ForkVersion, ForkVersions, PublicKeyPrepared, SigningData, }; diff --git a/bridges/snowbridge/pallets/ethereum-client/src/mock.rs b/bridges/snowbridge/pallets/ethereum-client/src/mock.rs index bd6144ebd8f9..96298d4fa896 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/mock.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/mock.rs @@ -4,7 +4,7 @@ use crate as ethereum_beacon_client; use crate::config; use frame_support::{derive_impl, dispatch::DispatchResult, parameter_types}; use pallet_timestamp; -use primitives::{Fork, ForkVersions}; +use snowbridge_beacon_primitives::{Fork, ForkVersions}; use snowbridge_core::inbound::{Log, Proof}; use sp_std::default::Default; use std::{fs::File, path::PathBuf}; @@ -21,32 +21,40 @@ where serde_json::from_reader(File::open(filepath).unwrap()) } -pub fn load_execution_proof_fixture() -> primitives::ExecutionProof { +pub fn load_execution_proof_fixture() -> snowbridge_beacon_primitives::ExecutionProof { load_fixture("execution-proof.json".to_string()).unwrap() } pub fn load_checkpoint_update_fixture( -) -> primitives::CheckpointUpdate<{ config::SYNC_COMMITTEE_SIZE }> { +) -> snowbridge_beacon_primitives::CheckpointUpdate<{ config::SYNC_COMMITTEE_SIZE }> { load_fixture("initial-checkpoint.json".to_string()).unwrap() } -pub fn load_sync_committee_update_fixture( -) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { +pub fn load_sync_committee_update_fixture() -> snowbridge_beacon_primitives::Update< + { config::SYNC_COMMITTEE_SIZE }, + { config::SYNC_COMMITTEE_BITS_SIZE }, +> { load_fixture("sync-committee-update.json".to_string()).unwrap() } -pub fn load_finalized_header_update_fixture( -) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { +pub fn load_finalized_header_update_fixture() -> snowbridge_beacon_primitives::Update< + { config::SYNC_COMMITTEE_SIZE }, + { config::SYNC_COMMITTEE_BITS_SIZE }, +> { load_fixture("finalized-header-update.json".to_string()).unwrap() } -pub fn load_next_sync_committee_update_fixture( -) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { +pub fn load_next_sync_committee_update_fixture() -> snowbridge_beacon_primitives::Update< + { config::SYNC_COMMITTEE_SIZE }, + { config::SYNC_COMMITTEE_BITS_SIZE }, +> { load_fixture("next-sync-committee-update.json".to_string()).unwrap() } -pub fn load_next_finalized_header_update_fixture( -) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { +pub fn load_next_finalized_header_update_fixture() -> snowbridge_beacon_primitives::Update< + { config::SYNC_COMMITTEE_SIZE }, + { config::SYNC_COMMITTEE_BITS_SIZE }, +> { load_fixture("next-finalized-header-update.json".to_string()).unwrap() } diff --git a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs index da762dc2fd80..c16743b75ea4 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs @@ -17,7 +17,7 @@ pub use crate::mock::*; use crate::config::{EPOCHS_PER_SYNC_COMMITTEE_PERIOD, SLOTS_PER_EPOCH, SLOTS_PER_HISTORICAL_ROOT}; use frame_support::{assert_err, assert_noop, assert_ok}; use hex_literal::hex; -use primitives::{ +use snowbridge_beacon_primitives::{ types::deneb, Fork, ForkVersions, NextSyncCommitteeUpdate, VersionedExecutionPayloadHeader, }; use snowbridge_core::inbound::{VerificationError, Verifier}; @@ -171,7 +171,8 @@ pub fn sync_committee_participation_is_supermajority() { let bits = hex!("bffffffff7f1ffdfcfeffeffbfdffffbfffffdffffefefffdffff7f7ffff77fffdf7bff77ffdf7fffafffffff77fefffeff7effffffff5f7fedfffdfb6ddff7b" ); - let participation = primitives::decompress_sync_committee_bits::<512, 64>(bits); + let participation = + snowbridge_beacon_primitives::decompress_sync_committee_bits::<512, 64>(bits); assert_ok!(EthereumBeaconClient::sync_committee_participation_is_supermajority(&participation)); } diff --git a/bridges/snowbridge/pallets/ethereum-client/src/types.rs b/bridges/snowbridge/pallets/ethereum-client/src/types.rs index 92b9f77f739b..a670e691612e 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/types.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/types.rs @@ -8,14 +8,14 @@ use frame_support::storage::types::OptionQuery; use snowbridge_core::RingBufferMapImpl; // Specialize types based on configured sync committee size -pub type SyncCommittee = primitives::SyncCommittee; -pub type SyncCommitteePrepared = primitives::SyncCommitteePrepared; -pub type SyncAggregate = primitives::SyncAggregate; -pub type CheckpointUpdate = primitives::CheckpointUpdate; -pub type Update = primitives::Update; -pub type NextSyncCommitteeUpdate = primitives::NextSyncCommitteeUpdate; +pub type SyncCommittee = snowbridge_beacon_primitives::SyncCommittee; +pub type SyncCommitteePrepared = snowbridge_beacon_primitives::SyncCommitteePrepared; +pub type SyncAggregate = snowbridge_beacon_primitives::SyncAggregate; +pub type CheckpointUpdate = snowbridge_beacon_primitives::CheckpointUpdate; +pub type Update = snowbridge_beacon_primitives::Update; +pub type NextSyncCommitteeUpdate = snowbridge_beacon_primitives::NextSyncCommitteeUpdate; -pub use primitives::{AncestryProof, ExecutionProof}; +pub use snowbridge_beacon_primitives::{AncestryProof, ExecutionProof}; /// FinalizedState ring buffer implementation pub type FinalizedBeaconStateBuffer = RingBufferMapImpl< diff --git a/bridges/snowbridge/primitives/router/Cargo.toml b/bridges/snowbridge/primitives/router/Cargo.toml index 1d3fc43909df..ec0888dd41b0 100644 --- a/bridges/snowbridge/primitives/router/Cargo.toml +++ b/bridges/snowbridge/primitives/router/Cargo.toml @@ -30,7 +30,7 @@ snowbridge-core = { path = "../core", default-features = false } hex-literal = { version = "0.4.1" } [dev-dependencies] -hex = { package = "rustc-hex", version = "2.1.0" } +rustc-hex = { version = "2.1.0" } [features] default = ["std"] diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index 547137b73064..fad30e59e869 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -35,7 +35,7 @@ sp-keystore = { path = "../../../../substrate/primitives/keystore" } sp-runtime = { path = "../../../../substrate/primitives/runtime" } sp-timestamp = { path = "../../../../substrate/primitives/timestamp" } sp-state-machine = { path = "../../../../substrate/primitives/state-machine" } -substrate-prometheus-endpoint = { path = "../../../../substrate/utils/prometheus" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../../substrate/utils/prometheus" } # Cumulus cumulus-client-consensus-common = { path = "../common" } diff --git a/cumulus/client/consensus/aura/src/equivocation_import_queue.rs b/cumulus/client/consensus/aura/src/equivocation_import_queue.rs index c3b601123b56..be554bdcfc79 100644 --- a/cumulus/client/consensus/aura/src/equivocation_import_queue.rs +++ b/cumulus/client/consensus/aura/src/equivocation_import_queue.rs @@ -224,7 +224,7 @@ pub fn fully_verifying_import_queue( block_import: I, create_inherent_data_providers: CIDP, spawner: &impl sp_core::traits::SpawnEssentialNamed, - registry: Option<&substrate_prometheus_endpoint::Registry>, + registry: Option<&prometheus_endpoint::Registry>, telemetry: Option, ) -> BasicQueue where diff --git a/cumulus/client/consensus/aura/src/import_queue.rs b/cumulus/client/consensus/aura/src/import_queue.rs index 2611eaf532f8..cbbfbe8d2223 100644 --- a/cumulus/client/consensus/aura/src/import_queue.rs +++ b/cumulus/client/consensus/aura/src/import_queue.rs @@ -18,6 +18,7 @@ use codec::Codec; use cumulus_client_consensus_common::ParachainBlockImportMarker; +use prometheus_endpoint::Registry; use sc_client_api::{backend::AuxStore, BlockOf, UsageProvider}; use sc_consensus::{import_queue::DefaultImportQueue, BlockImport}; use sc_consensus_aura::{AuraVerifier, CompatibilityMode}; @@ -32,7 +33,6 @@ use sp_core::crypto::Pair; use sp_inherents::CreateInherentDataProviders; use sp_runtime::traits::Block as BlockT; use std::{fmt::Debug, sync::Arc}; -use substrate_prometheus_endpoint::Registry; /// Parameters for [`import_queue`]. pub struct ImportQueueParams<'a, I, C, CIDP, S> { diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index 3a7c6b57d6d9..d369304e2e33 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -28,7 +28,7 @@ sp-core = { path = "../../../../substrate/primitives/core" } sp-runtime = { path = "../../../../substrate/primitives/runtime" } sp-timestamp = { path = "../../../../substrate/primitives/timestamp" } sp-trie = { path = "../../../../substrate/primitives/trie" } -substrate-prometheus-endpoint = { path = "../../../../substrate/utils/prometheus" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../../substrate/utils/prometheus" } # Polkadot polkadot-primitives = { path = "../../../../polkadot/primitives" } diff --git a/cumulus/client/consensus/common/src/import_queue.rs b/cumulus/client/consensus/common/src/import_queue.rs index 311a2b7ad8cf..8024b7695a28 100644 --- a/cumulus/client/consensus/common/src/import_queue.rs +++ b/cumulus/client/consensus/common/src/import_queue.rs @@ -63,7 +63,7 @@ impl Verifier for VerifyNothing { pub fn verify_nothing_import_queue( block_import: I, spawner: &impl sp_core::traits::SpawnEssentialNamed, - registry: Option<&substrate_prometheus_endpoint::Registry>, + registry: Option<&prometheus_endpoint::Registry>, ) -> BasicQueue where I: BlockImport diff --git a/cumulus/client/consensus/relay-chain/Cargo.toml b/cumulus/client/consensus/relay-chain/Cargo.toml index cb32b9804576..7c3a901db6c3 100644 --- a/cumulus/client/consensus/relay-chain/Cargo.toml +++ b/cumulus/client/consensus/relay-chain/Cargo.toml @@ -24,7 +24,7 @@ sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } sp-core = { path = "../../../../substrate/primitives/core" } sp-inherents = { path = "../../../../substrate/primitives/inherents" } sp-runtime = { path = "../../../../substrate/primitives/runtime" } -substrate-prometheus-endpoint = { path = "../../../../substrate/utils/prometheus" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../../substrate/utils/prometheus" } # Cumulus cumulus-client-consensus-common = { path = "../common" } diff --git a/cumulus/client/consensus/relay-chain/src/import_queue.rs b/cumulus/client/consensus/relay-chain/src/import_queue.rs index f44f44093243..1b521e79d482 100644 --- a/cumulus/client/consensus/relay-chain/src/import_queue.rs +++ b/cumulus/client/consensus/relay-chain/src/import_queue.rs @@ -114,7 +114,7 @@ pub fn import_queue( block_import: I, create_inherent_data_providers: CIDP, spawner: &impl sp_core::traits::SpawnEssentialNamed, - registry: Option<&substrate_prometheus_endpoint::Registry>, + registry: Option<&prometheus_endpoint::Registry>, ) -> ClientResult> where I: BlockImport diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index 5962c68bba7a..5d612cdc0eef 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -23,4 +23,4 @@ futures = "0.3.28" async-trait = "0.1.79" thiserror = { workspace = true } jsonrpsee-core = "0.22" -parity-scale-codec = "3.6.12" +codec = { package = "parity-scale-codec", version = "3.6.12" } diff --git a/cumulus/client/relay-chain-interface/src/lib.rs b/cumulus/client/relay-chain-interface/src/lib.rs index bb93e6a168c8..7c7796b468c0 100644 --- a/cumulus/client/relay-chain-interface/src/lib.rs +++ b/cumulus/client/relay-chain-interface/src/lib.rs @@ -22,8 +22,8 @@ use sc_client_api::StorageProof; use futures::Stream; use async_trait::async_trait; +use codec::Error as CodecError; use jsonrpsee_core::ClientError as JsonRpcError; -use parity_scale_codec::Error as CodecError; use sp_api::ApiError; use cumulus_primitives_core::relay_chain::BlockId; diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 88673a8f08fe..0b541092a3de 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -32,7 +32,7 @@ sc-network = { path = "../../../substrate/client/network" } sc-network-common = { path = "../../../substrate/client/network/common" } sc-service = { path = "../../../substrate/client/service" } sc-client-api = { path = "../../../substrate/client/api" } -substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } sc-tracing = { path = "../../../substrate/client/tracing" } sc-utils = { path = "../../../substrate/client/utils" } sp-api = { path = "../../../substrate/primitives/api" } diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index 699393e2d48a..9101b8154aa7 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -190,7 +190,7 @@ async fn new_minimal_relay_chain(pallet_session::Pallet); pub trait Config: pallet_session::Config {} diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml index 7ac65b0ee1de..113036b4c00e 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml @@ -16,8 +16,8 @@ workspace = true sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } sp-authority-discovery = { path = "../../../../../../../substrate/primitives/authority-discovery", default-features = false } sp-consensus-babe = { path = "../../../../../../../substrate/primitives/consensus/babe", default-features = false } -beefy-primitives = { package = "sp-consensus-beefy", path = "../../../../../../../substrate/primitives/consensus/beefy" } -grandpa = { package = "sc-consensus-grandpa", path = "../../../../../../../substrate/client/consensus/grandpa", default-features = false } +sp-consensus-beefy = { path = "../../../../../../../substrate/primitives/consensus/beefy" } +sc-consensus-grandpa = { path = "../../../../../../../substrate/client/consensus/grandpa", default-features = false } # Polkadot polkadot-primitives = { path = "../../../../../../../polkadot/primitives", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs index 55437645b052..074a1de5e185 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs @@ -14,10 +14,10 @@ // limitations under the License. // Substrate -use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; -use grandpa::AuthorityId as GrandpaId; +use sc_consensus_grandpa::AuthorityId as GrandpaId; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; +use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId; use sp_core::{sr25519, storage::Storage}; // Polkadot diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml index e4688a1c9f02..b952477c47a7 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml @@ -17,8 +17,8 @@ sp-core = { path = "../../../../../../../substrate/primitives/core", default-fea sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } sp-authority-discovery = { path = "../../../../../../../substrate/primitives/authority-discovery", default-features = false } sp-consensus-babe = { path = "../../../../../../../substrate/primitives/consensus/babe", default-features = false } -beefy-primitives = { package = "sp-consensus-beefy", path = "../../../../../../../substrate/primitives/consensus/beefy" } -grandpa = { package = "sc-consensus-grandpa", path = "../../../../../../../substrate/client/consensus/grandpa", default-features = false } +sp-consensus-beefy = { path = "../../../../../../../substrate/primitives/consensus/beefy" } +sc-consensus-grandpa = { path = "../../../../../../../substrate/client/consensus/grandpa", default-features = false } pallet-staking = { path = "../../../../../../../substrate/frame/staking", default-features = false } # Polkadot diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs index 700b80e63f6c..b9f12932b84e 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs @@ -14,10 +14,10 @@ // limitations under the License. // Substrate -use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; -use grandpa::AuthorityId as GrandpaId; +use sc_consensus_grandpa::AuthorityId as GrandpaId; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; +use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId; use sp_core::storage::Storage; use sp_runtime::Perbill; diff --git a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml index b010d2a29638..d9ec81323230 100644 --- a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml @@ -14,8 +14,8 @@ codec = { package = "parity-scale-codec", version = "3.6.12", default-features = paste = "1.0.14" # Substrate -beefy-primitives = { package = "sp-consensus-beefy", path = "../../../../../substrate/primitives/consensus/beefy" } -grandpa = { package = "sc-consensus-grandpa", path = "../../../../../substrate/client/consensus/grandpa" } +sp-consensus-beefy = { path = "../../../../../substrate/primitives/consensus/beefy" } +sc-consensus-grandpa = { path = "../../../../../substrate/client/consensus/grandpa" } sp-authority-discovery = { path = "../../../../../substrate/primitives/authority-discovery" } sp-runtime = { path = "../../../../../substrate/primitives/runtime" } frame-support = { path = "../../../../../substrate/frame/support" } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index cbde0642f1a2..4a9d3b3a5aaf 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -20,11 +20,11 @@ pub mod xcm_helpers; pub use xcm_emulator; // Substrate -use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; use frame_support::parameter_types; -use grandpa::AuthorityId as GrandpaId; +use sc_consensus_grandpa::AuthorityId as GrandpaId; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; +use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId; use sp_core::{sr25519, storage::Storage, Pair, Public}; use sp_runtime::{ traits::{AccountIdConversion, IdentifyAccount, Verify}, diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index def7d95fd566..639b8b3d4dcf 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -86,9 +86,9 @@ sp-inherents = { path = "../../substrate/primitives/inherents" } sp-api = { path = "../../substrate/primitives/api" } sp-consensus-aura = { path = "../../substrate/primitives/consensus/aura" } sc-sysinfo = { path = "../../substrate/client/sysinfo" } -substrate-prometheus-endpoint = { path = "../../substrate/utils/prometheus" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../substrate/utils/prometheus" } sc-transaction-pool-api = { path = "../../substrate/client/transaction-pool/api" } -frame-rpc-system = { package = "substrate-frame-rpc-system", path = "../../substrate/utils/frame/rpc/system" } +substrate-frame-rpc-system = { path = "../../substrate/utils/frame/rpc/system" } pallet-transaction-payment-rpc = { path = "../../substrate/frame/transaction-payment/rpc" } substrate-state-trie-migration-rpc = { path = "../../substrate/utils/frame/rpc/state-trie-migration-rpc" } diff --git a/cumulus/polkadot-parachain/src/rpc.rs b/cumulus/polkadot-parachain/src/rpc.rs index caee14e55522..7437bb1f4b93 100644 --- a/cumulus/polkadot-parachain/src/rpc.rs +++ b/cumulus/polkadot-parachain/src/rpc.rs @@ -54,15 +54,15 @@ where + Send + Sync + 'static, - C::Api: frame_rpc_system::AccountNonceApi, + C::Api: substrate_frame_rpc_system::AccountNonceApi, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BlockBuilder, P: TransactionPool + Sync + Send + 'static, B: sc_client_api::Backend + Send + Sync + 'static, B::State: sc_client_api::backend::StateBackend>, { - use frame_rpc_system::{System, SystemApiServer}; use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; + use substrate_frame_rpc_system::{System, SystemApiServer}; use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer}; let mut module = RpcExtension::new(()); @@ -88,14 +88,14 @@ where + Send + Sync + 'static, - C::Api: frame_rpc_system::AccountNonceApi, + C::Api: substrate_frame_rpc_system::AccountNonceApi, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BlockBuilder, P: TransactionPool + Sync + Send + 'static, { - use frame_rpc_system::{System, SystemApiServer}; use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; use sc_rpc::dev::{Dev, DevApiServer}; + use substrate_frame_rpc_system::{System, SystemApiServer}; let mut module = RpcExtension::new(()); let FullDeps { client, pool, deny_unsafe } = deps; diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 12eda3e8a9cb..19ad75e384ce 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -43,6 +43,7 @@ pub use parachains_common::{AccountId, AuraId, Balance, Block, Hash, Header, Non use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; use futures::{lock::Mutex, prelude::*}; +use prometheus_endpoint::Registry; use sc_consensus::{ import_queue::{BasicQueue, Verifier as VerifierT}, BlockImportParams, ImportQueue, @@ -61,7 +62,6 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT}, }; use std::{marker::PhantomData, sync::Arc, time::Duration}; -use substrate_prometheus_endpoint::Registry; use polkadot_primitives::CollatorPair; @@ -209,7 +209,7 @@ where + sp_block_builder::BlockBuilder + cumulus_primitives_core::CollectCollationInfo + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + frame_rpc_system::AccountNonceApi, + + substrate_frame_rpc_system::AccountNonceApi, RB: Fn( DenyUnsafe, Arc>, @@ -471,7 +471,7 @@ where RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue + sp_block_builder::BlockBuilder + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + frame_rpc_system::AccountNonceApi, + + substrate_frame_rpc_system::AccountNonceApi, { let deps = rpc::FullDeps { client, pool, deny_unsafe }; @@ -736,7 +736,7 @@ where + cumulus_primitives_core::CollectCollationInfo + sp_consensus_aura::AuraApi::Pair as Pair>::Public> + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + frame_rpc_system::AccountNonceApi + + substrate_frame_rpc_system::AccountNonceApi + cumulus_primitives_aura::AuraUnincludedSegmentApi, <::Pair as Pair>::Signature: TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index a0953896356d..b0671623f48d 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -15,7 +15,7 @@ workspace = true [dependencies] # Needed for all FRAME-based code -parity-scale-codec = { version = "3.6.12", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.6.0", default-features = false } frame = { package = "polkadot-sdk-frame", path = "../../substrate/frame", features = [ "experimental", diff --git a/docs/sdk/src/reference_docs/extrinsic_encoding.rs b/docs/sdk/src/reference_docs/extrinsic_encoding.rs index 8c8568a228fa..31ce92c67e98 100644 --- a/docs/sdk/src/reference_docs/extrinsic_encoding.rs +++ b/docs/sdk/src/reference_docs/extrinsic_encoding.rs @@ -191,7 +191,7 @@ #[docify::export] pub mod call_data { - use parity_scale_codec::{Decode, Encode}; + use codec::{Decode, Encode}; // The outer enum composes calls within // different pallets together. We have two @@ -224,7 +224,7 @@ pub mod call_data { pub mod encoding_example { use super::call_data::{Call, PalletACall}; use crate::reference_docs::signed_extensions::signed_extensions_example; - use parity_scale_codec::Encode; + use codec::Encode; use sp_core::crypto::AccountId32; use sp_keyring::sr25519::Keyring; use sp_runtime::{ diff --git a/docs/sdk/src/reference_docs/signed_extensions.rs b/docs/sdk/src/reference_docs/signed_extensions.rs index 28b1426536bc..43a6bcc14c5d 100644 --- a/docs/sdk/src/reference_docs/signed_extensions.rs +++ b/docs/sdk/src/reference_docs/signed_extensions.rs @@ -8,7 +8,7 @@ #[docify::export] pub mod signed_extensions_example { - use parity_scale_codec::{Decode, Encode}; + use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::traits::SignedExtension; diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index 719d00490a9d..1917dcd579c4 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -23,10 +23,10 @@ clap = { version = "4.5.3", features = ["derive"], optional = true } log = { workspace = true, default-features = true } thiserror = { workspace = true } futures = "0.3.30" -pyro = { package = "pyroscope", version = "0.5.3", optional = true } +pyroscope = { version = "0.5.3", optional = true } pyroscope_pprofrs = { version = "0.2", optional = true } -service = { package = "polkadot-service", path = "../node/service", default-features = false, optional = true } +polkadot-service = { path = "../node/service", default-features = false, optional = true } sp-core = { path = "../../substrate/primitives/core" } sp-io = { path = "../../substrate/primitives/io" } @@ -48,7 +48,8 @@ substrate-build-script-utils = { path = "../../substrate/utils/build-script-util [features] default = ["cli", "db", "full-node"] -db = ["service/db"] +db = ["polkadot-service/db"] +service = ["dep:polkadot-service"] cli = [ "clap", "frame-benchmarking-cli", @@ -60,24 +61,24 @@ cli = [ runtime-benchmarks = [ "frame-benchmarking-cli?/runtime-benchmarks", "polkadot-node-metrics/runtime-benchmarks", + "polkadot-service?/runtime-benchmarks", "sc-service?/runtime-benchmarks", - "service/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] -full-node = ["service/full-node"] +full-node = ["polkadot-service/full-node"] try-runtime = [ - "service/try-runtime", + "polkadot-service?/try-runtime", "sp-runtime/try-runtime", ] -fast-runtime = ["service/fast-runtime"] -pyroscope = ["pyro", "pyroscope_pprofrs"] +fast-runtime = ["polkadot-service/fast-runtime"] +pyroscope = ["dep:pyroscope", "pyroscope_pprofrs"] # Configure the native runtimes to use. -westend-native = ["service/westend-native"] -rococo-native = ["service/rococo-native"] +westend-native = ["polkadot-service/westend-native"] +rococo-native = ["polkadot-service/rococo-native"] -malus = ["full-node", "service/malus"] +malus = ["full-node", "polkadot-service/malus"] runtime-metrics = [ "polkadot-node-metrics/runtime-metrics", - "service/runtime-metrics", + "polkadot-service/runtime-metrics", ] diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index f5ee538e8cec..b89054b4dc32 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -18,17 +18,17 @@ use crate::cli::{Cli, Subcommand, NODE_VERSION}; use frame_benchmarking_cli::{BenchmarkCmd, ExtrinsicFactory, SUBSTRATE_REFERENCE_HARDWARE}; use futures::future::TryFutureExt; use log::info; -use sc_cli::SubstrateCli; -use service::{ +use polkadot_service::{ self, benchmarking::{benchmark_inherent_data, RemarkBuilder, TransferKeepAliveBuilder}, HeaderBackend, IdentifyVariant, }; +use sc_cli::SubstrateCli; use sp_core::crypto::Ss58AddressFormatRegistry; use sp_keyring::Sr25519Keyring; use std::net::ToSocketAddrs; -pub use crate::{error::Error, service::BlockId}; +pub use crate::error::Error; #[cfg(feature = "hostperfcheck")] pub use polkadot_performance_test::PerfCheckError; #[cfg(feature = "pyroscope")] @@ -85,55 +85,55 @@ impl SubstrateCli for Cli { id }; Ok(match id { - "kusama" => Box::new(service::chain_spec::kusama_config()?), + "kusama" => Box::new(polkadot_service::chain_spec::kusama_config()?), name if name.starts_with("kusama-") && !name.ends_with(".json") => Err(format!("`{name}` is not supported anymore as the kusama native runtime no longer part of the node."))?, - "polkadot" => Box::new(service::chain_spec::polkadot_config()?), + "polkadot" => Box::new(polkadot_service::chain_spec::polkadot_config()?), name if name.starts_with("polkadot-") && !name.ends_with(".json") => Err(format!("`{name}` is not supported anymore as the polkadot native runtime no longer part of the node."))?, - "paseo" => Box::new(service::chain_spec::paseo_config()?), - "rococo" => Box::new(service::chain_spec::rococo_config()?), + "paseo" => Box::new(polkadot_service::chain_spec::paseo_config()?), + "rococo" => Box::new(polkadot_service::chain_spec::rococo_config()?), #[cfg(feature = "rococo-native")] - "dev" | "rococo-dev" => Box::new(service::chain_spec::rococo_development_config()?), + "dev" | "rococo-dev" => Box::new(polkadot_service::chain_spec::rococo_development_config()?), #[cfg(feature = "rococo-native")] - "rococo-local" => Box::new(service::chain_spec::rococo_local_testnet_config()?), + "rococo-local" => Box::new(polkadot_service::chain_spec::rococo_local_testnet_config()?), #[cfg(feature = "rococo-native")] - "rococo-staging" => Box::new(service::chain_spec::rococo_staging_testnet_config()?), + "rococo-staging" => Box::new(polkadot_service::chain_spec::rococo_staging_testnet_config()?), #[cfg(not(feature = "rococo-native"))] name if name.starts_with("rococo-") && !name.ends_with(".json") || name == "dev" => Err(format!("`{}` only supported with `rococo-native` feature enabled.", name))?, - "westend" => Box::new(service::chain_spec::westend_config()?), + "westend" => Box::new(polkadot_service::chain_spec::westend_config()?), #[cfg(feature = "westend-native")] - "westend-dev" => Box::new(service::chain_spec::westend_development_config()?), + "westend-dev" => Box::new(polkadot_service::chain_spec::westend_development_config()?), #[cfg(feature = "westend-native")] - "westend-local" => Box::new(service::chain_spec::westend_local_testnet_config()?), + "westend-local" => Box::new(polkadot_service::chain_spec::westend_local_testnet_config()?), #[cfg(feature = "westend-native")] - "westend-staging" => Box::new(service::chain_spec::westend_staging_testnet_config()?), + "westend-staging" => Box::new(polkadot_service::chain_spec::westend_staging_testnet_config()?), #[cfg(not(feature = "westend-native"))] name if name.starts_with("westend-") && !name.ends_with(".json") => Err(format!("`{}` only supported with `westend-native` feature enabled.", name))?, - "wococo" => Box::new(service::chain_spec::wococo_config()?), + "wococo" => Box::new(polkadot_service::chain_spec::wococo_config()?), #[cfg(feature = "rococo-native")] - "wococo-dev" => Box::new(service::chain_spec::wococo_development_config()?), + "wococo-dev" => Box::new(polkadot_service::chain_spec::wococo_development_config()?), #[cfg(feature = "rococo-native")] - "wococo-local" => Box::new(service::chain_spec::wococo_local_testnet_config()?), + "wococo-local" => Box::new(polkadot_service::chain_spec::wococo_local_testnet_config()?), #[cfg(not(feature = "rococo-native"))] name if name.starts_with("wococo-") => Err(format!("`{}` only supported with `rococo-native` feature enabled.", name))?, #[cfg(feature = "rococo-native")] - "versi-dev" => Box::new(service::chain_spec::versi_development_config()?), + "versi-dev" => Box::new(polkadot_service::chain_spec::versi_development_config()?), #[cfg(feature = "rococo-native")] - "versi-local" => Box::new(service::chain_spec::versi_local_testnet_config()?), + "versi-local" => Box::new(polkadot_service::chain_spec::versi_local_testnet_config()?), #[cfg(feature = "rococo-native")] - "versi-staging" => Box::new(service::chain_spec::versi_staging_testnet_config()?), + "versi-staging" => Box::new(polkadot_service::chain_spec::versi_staging_testnet_config()?), #[cfg(not(feature = "rococo-native"))] name if name.starts_with("versi-") => Err(format!("`{}` only supported with `rococo-native` feature enabled.", name))?, path => { let path = std::path::PathBuf::from(path); - let chain_spec = Box::new(service::GenericChainSpec::from_json_file(path.clone())?) - as Box; + let chain_spec = Box::new(polkadot_service::GenericChainSpec::from_json_file(path.clone())?) + as Box; // When `force_*` is given or the file name starts with the name of one of the known // chains, we use the chain spec for the specific chain. @@ -142,11 +142,11 @@ impl SubstrateCli for Cli { chain_spec.is_wococo() || chain_spec.is_versi() { - Box::new(service::RococoChainSpec::from_json_file(path)?) + Box::new(polkadot_service::RococoChainSpec::from_json_file(path)?) } else if self.run.force_kusama || chain_spec.is_kusama() { - Box::new(service::GenericChainSpec::from_json_file(path)?) + Box::new(polkadot_service::GenericChainSpec::from_json_file(path)?) } else if self.run.force_westend || chain_spec.is_westend() { - Box::new(service::WestendChainSpec::from_json_file(path)?) + Box::new(polkadot_service::WestendChainSpec::from_json_file(path)?) } else { chain_spec } @@ -155,7 +155,7 @@ impl SubstrateCli for Cli { } } -fn set_default_ss58_version(spec: &Box) { +fn set_default_ss58_version(spec: &Box) { let ss58_version = if spec.is_kusama() { Ss58AddressFormatRegistry::KusamaAccount } else if spec.is_westend() { @@ -176,7 +176,7 @@ fn set_default_ss58_version(spec: &Box) { #[cfg(feature = "malus")] pub fn run_node( run: Cli, - overseer_gen: impl service::OverseerGen, + overseer_gen: impl polkadot_service::OverseerGen, malus_finality_delay: Option, ) -> Result<()> { run_node_inner(run, overseer_gen, malus_finality_delay, |_logger_builder, _config| {}) @@ -184,7 +184,7 @@ pub fn run_node( fn run_node_inner( cli: Cli, - overseer_gen: impl service::OverseerGen, + overseer_gen: impl polkadot_service::OverseerGen, maybe_malus_finality_delay: Option, logger_hook: F, ) -> Result<()> @@ -235,10 +235,10 @@ where .flatten(); let database_source = config.database.clone(); - let task_manager = service::build_full( + let task_manager = polkadot_service::build_full( config, - service::NewFullParams { - is_parachain_node: service::IsParachainNode::No, + polkadot_service::NewFullParams { + is_parachain_node: polkadot_service::IsParachainNode::No, enable_beefy, force_authoring_backoff: cli.run.force_authoring_backoff, jaeger_agent, @@ -284,7 +284,7 @@ pub fn run() -> Result<()> { .next() .ok_or_else(|| Error::AddressResolutionMissing)?; // The pyroscope agent requires a `http://` prefix, so we just do that. - let agent = pyro::PyroscopeAgent::builder( + let agent = pyroscope::PyroscopeAgent::builder( "http://".to_owned() + address.to_string().as_str(), "polkadot".to_owned(), ) @@ -303,7 +303,7 @@ pub fn run() -> Result<()> { match &cli.subcommand { None => run_node_inner( cli, - service::ValidatorOverseerGen, + polkadot_service::ValidatorOverseerGen, None, polkadot_node_metrics::logger_hook(), ), @@ -319,7 +319,7 @@ pub fn run() -> Result<()> { runner.async_run(|mut config| { let (client, _, import_queue, task_manager) = - service::new_chain_ops(&mut config, None)?; + polkadot_service::new_chain_ops(&mut config, None)?; Ok((cmd.run(client, import_queue).map_err(Error::SubstrateCli), task_manager)) }) }, @@ -331,7 +331,8 @@ pub fn run() -> Result<()> { Ok(runner.async_run(|mut config| { let (client, _, _, task_manager) = - service::new_chain_ops(&mut config, None).map_err(Error::PolkadotService)?; + polkadot_service::new_chain_ops(&mut config, None) + .map_err(Error::PolkadotService)?; Ok((cmd.run(client, config.database).map_err(Error::SubstrateCli), task_manager)) })?) }, @@ -342,7 +343,8 @@ pub fn run() -> Result<()> { set_default_ss58_version(chain_spec); Ok(runner.async_run(|mut config| { - let (client, _, _, task_manager) = service::new_chain_ops(&mut config, None)?; + let (client, _, _, task_manager) = + polkadot_service::new_chain_ops(&mut config, None)?; Ok((cmd.run(client, config.chain_spec).map_err(Error::SubstrateCli), task_manager)) })?) }, @@ -354,7 +356,7 @@ pub fn run() -> Result<()> { Ok(runner.async_run(|mut config| { let (client, _, import_queue, task_manager) = - service::new_chain_ops(&mut config, None)?; + polkadot_service::new_chain_ops(&mut config, None)?; Ok((cmd.run(client, import_queue).map_err(Error::SubstrateCli), task_manager)) })?) }, @@ -369,15 +371,18 @@ pub fn run() -> Result<()> { set_default_ss58_version(chain_spec); Ok(runner.async_run(|mut config| { - let (client, backend, _, task_manager) = service::new_chain_ops(&mut config, None)?; + let (client, backend, _, task_manager) = + polkadot_service::new_chain_ops(&mut config, None)?; let aux_revert = Box::new(|client, backend, blocks| { - service::revert_backend(client, backend, blocks, config).map_err(|err| { - match err { - service::Error::Blockchain(err) => err.into(), - // Generic application-specific error. - err => sc_cli::Error::Application(err.into()), - } - }) + polkadot_service::revert_backend(client, backend, blocks, config).map_err( + |err| { + match err { + polkadot_service::Error::Blockchain(err) => err.into(), + // Generic application-specific error. + err => sc_cli::Error::Application(err.into()), + } + }, + ) }); Ok(( cmd.run(client, backend, Some(aux_revert)).map_err(Error::SubstrateCli), @@ -400,21 +405,22 @@ pub fn run() -> Result<()> { .into()), #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => runner.sync_run(|mut config| { - let (client, backend, _, _) = service::new_chain_ops(&mut config, None)?; + let (client, backend, _, _) = + polkadot_service::new_chain_ops(&mut config, None)?; let db = backend.expose_db(); let storage = backend.expose_storage(); cmd.run(config, client.clone(), db, storage).map_err(Error::SubstrateCli) }), BenchmarkCmd::Block(cmd) => runner.sync_run(|mut config| { - let (client, _, _, _) = service::new_chain_ops(&mut config, None)?; + let (client, _, _, _) = polkadot_service::new_chain_ops(&mut config, None)?; cmd.run(client.clone()).map_err(Error::SubstrateCli) }), // These commands are very similar and can be handled in nearly the same way. BenchmarkCmd::Extrinsic(_) | BenchmarkCmd::Overhead(_) => runner.sync_run(|mut config| { - let (client, _, _, _) = service::new_chain_ops(&mut config, None)?; + let (client, _, _, _) = polkadot_service::new_chain_ops(&mut config, None)?; let header = client.header(client.info().genesis_hash).unwrap().unwrap(); let inherent_data = benchmark_inherent_data(header) .map_err(|e| format!("generating inherent data: {:?}", e))?; @@ -454,7 +460,7 @@ pub fn run() -> Result<()> { if cfg!(feature = "runtime-benchmarks") { runner.sync_run(|config| { - cmd.run_with_spec::, ()>( + cmd.run_with_spec::, ()>( Some(config.chain_spec), ) .map_err(|e| Error::SubstrateCli(e)) @@ -481,7 +487,7 @@ pub fn run() -> Result<()> { Some(Subcommand::Key(cmd)) => Ok(cmd.run(&cli)?), Some(Subcommand::ChainInfo(cmd)) => { let runner = cli.create_runner(cmd)?; - Ok(runner.sync_run(|config| cmd.run::(&config))?) + Ok(runner.sync_run(|config| cmd.run::(&config))?) }, }?; diff --git a/polkadot/cli/src/error.rs b/polkadot/cli/src/error.rs index 219289796522..1fcd2ca04bb0 100644 --- a/polkadot/cli/src/error.rs +++ b/polkadot/cli/src/error.rs @@ -17,7 +17,7 @@ #[derive(thiserror::Error, Debug)] pub enum Error { #[error(transparent)] - PolkadotService(#[from] service::Error), + PolkadotService(#[from] polkadot_service::Error), #[error(transparent)] SubstrateCli(#[from] sc_cli::Error), @@ -34,7 +34,7 @@ pub enum Error { #[cfg(feature = "pyroscope")] #[error("Failed to connect to pyroscope agent")] - PyroscopeError(#[from] pyro::error::PyroscopeError), + PyroscopeError(#[from] pyroscope::error::PyroscopeError), #[error("Failed to resolve provided URL")] AddressResolutionFailure(#[from] std::io::Error), diff --git a/polkadot/cli/src/lib.rs b/polkadot/cli/src/lib.rs index 4bb0dfb75835..944f8438f20f 100644 --- a/polkadot/cli/src/lib.rs +++ b/polkadot/cli/src/lib.rs @@ -26,10 +26,12 @@ mod command; mod error; #[cfg(feature = "service")] -pub use service::{self, Block, CoreApi, IdentifyVariant, ProvideRuntimeApi, TFullClient}; +pub use polkadot_service::{ + self as service, Block, CoreApi, IdentifyVariant, ProvideRuntimeApi, TFullClient, +}; #[cfg(feature = "malus")] -pub use service::overseer::validator_overseer_builder; +pub use polkadot_service::overseer::validator_overseer_builder; #[cfg(feature = "cli")] pub use cli::*; diff --git a/polkadot/core-primitives/Cargo.toml b/polkadot/core-primitives/Cargo.toml index 9794f8286ac3..7d94196fa26d 100644 --- a/polkadot/core-primitives/Cargo.toml +++ b/polkadot/core-primitives/Cargo.toml @@ -14,12 +14,12 @@ sp-core = { path = "../../substrate/primitives/core", default-features = false } sp-std = { path = "../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } [features] default = ["std"] std = [ - "parity-scale-codec/std", + "codec/std", "scale-info/std", "sp-core/std", "sp-runtime/std", diff --git a/polkadot/core-primitives/src/lib.rs b/polkadot/core-primitives/src/lib.rs index a74cdef3ad76..072c045a8c70 100644 --- a/polkadot/core-primitives/src/lib.rs +++ b/polkadot/core-primitives/src/lib.rs @@ -20,7 +20,7 @@ //! //! These core Polkadot types are used by the relay chain and the Parachains. -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ generic, diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index bf152e03be71..3c14fd95eee3 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -13,7 +13,7 @@ workspace = true polkadot-primitives = { path = "../primitives" } polkadot-node-primitives = { package = "polkadot-node-primitives", path = "../node/primitives" } novelpoly = { package = "reed-solomon-novelpoly", version = "2.0.0" } -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive", "std"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "std"] } sp-core = { path = "../../substrate/primitives/core" } sp-trie = { path = "../../substrate/primitives/trie" } thiserror = { workspace = true } diff --git a/polkadot/erasure-coding/fuzzer/Cargo.toml b/polkadot/erasure-coding/fuzzer/Cargo.toml index 4e5ef9d229d8..bd254f6d5165 100644 --- a/polkadot/erasure-coding/fuzzer/Cargo.toml +++ b/polkadot/erasure-coding/fuzzer/Cargo.toml @@ -13,7 +13,7 @@ workspace = true polkadot-erasure-coding = { path = ".." } honggfuzz = "0.5" polkadot-primitives = { path = "../../primitives" } -primitives = { package = "polkadot-node-primitives", path = "../../node/primitives" } +polkadot-node-primitives = { path = "../../node/primitives" } [[bin]] name = "reconstruct" diff --git a/polkadot/erasure-coding/fuzzer/src/reconstruct.rs b/polkadot/erasure-coding/fuzzer/src/reconstruct.rs index b2f9690a6fd3..6cb5742bc7d1 100644 --- a/polkadot/erasure-coding/fuzzer/src/reconstruct.rs +++ b/polkadot/erasure-coding/fuzzer/src/reconstruct.rs @@ -16,7 +16,7 @@ use honggfuzz::fuzz; use polkadot_erasure_coding::*; -use primitives::AvailableData; +use polkadot_node_primitives::AvailableData; fn main() { loop { diff --git a/polkadot/erasure-coding/fuzzer/src/round_trip.rs b/polkadot/erasure-coding/fuzzer/src/round_trip.rs index 2e38becf651d..627c9724d494 100644 --- a/polkadot/erasure-coding/fuzzer/src/round_trip.rs +++ b/polkadot/erasure-coding/fuzzer/src/round_trip.rs @@ -16,8 +16,8 @@ use honggfuzz::fuzz; use polkadot_erasure_coding::*; +use polkadot_node_primitives::{AvailableData, BlockData, PoV}; use polkadot_primitives::PersistedValidationData; -use primitives::{AvailableData, BlockData, PoV}; use std::sync::Arc; fn main() { diff --git a/polkadot/erasure-coding/src/lib.rs b/polkadot/erasure-coding/src/lib.rs index b354c3dac64c..9ebf5d11d7a7 100644 --- a/polkadot/erasure-coding/src/lib.rs +++ b/polkadot/erasure-coding/src/lib.rs @@ -24,7 +24,7 @@ //! f is the maximum number of faulty validators in the system. //! The data is coded so any f+1 chunks can be used to reconstruct the full data. -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use polkadot_node_primitives::{AvailableData, Proof}; use polkadot_primitives::{BlakeTwo256, Hash as H256, HashT}; use sp_core::Blake2Hasher; @@ -71,7 +71,7 @@ pub enum Error { BadPayload, /// Unable to decode reconstructed bytes. #[error("Unable to decode reconstructed payload: {0}")] - Decode(#[source] parity_scale_codec::Error), + Decode(#[source] codec::Error), /// Invalid branch proof. #[error("Invalid branch proof")] InvalidBranchProof, diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index 0a28c3a830d1..da5d10d79949 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -20,11 +20,11 @@ polkadot-primitives = { path = "../../primitives" } sp-core = { path = "../../../substrate/primitives/core" } sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } thiserror = { workspace = true } -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } -test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } +polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } assert_matches = "1.4.0" rstest = "0.18.2" sp-keyring = { path = "../../../substrate/primitives/keyring" } diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index 374f090a2671..0c2f8ee14a58 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -31,8 +31,8 @@ #![deny(missing_docs)] +use codec::Encode; use futures::{channel::oneshot, future::FutureExt, join, select}; -use parity_scale_codec::Encode; use polkadot_node_primitives::{ AvailableData, Collation, CollationGenerationConfig, CollationSecondedSignal, PoV, SubmitCollationParams, diff --git a/polkadot/node/collation-generation/src/tests.rs b/polkadot/node/collation-generation/src/tests.rs index 10c391cba25d..0feee79e763c 100644 --- a/polkadot/node/collation-generation/src/tests.rs +++ b/polkadot/node/collation-generation/src/tests.rs @@ -34,15 +34,15 @@ use polkadot_primitives::{ AsyncBackingParams, BlockNumber, CollatorPair, HeadData, PersistedValidationData, ScheduledCore, ValidationCode, }; +use polkadot_primitives_test_helpers::{ + dummy_candidate_descriptor, dummy_hash, dummy_head_data, dummy_validator, make_candidate, +}; use rstest::rstest; use sp_keyring::sr25519::Keyring as Sr25519Keyring; use std::{ collections::{BTreeMap, VecDeque}, pin::Pin, }; -use test_helpers::{ - dummy_candidate_descriptor, dummy_hash, dummy_head_data, dummy_validator, make_candidate, -}; type VirtualOverseer = TestSubsystemContextHandle; diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index 5bf80d59ede9..7da3d7ddd781 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] futures = "0.3.30" futures-timer = "3.0.2" -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } gum = { package = "tracing-gum", path = "../../gum" } bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } schnellru = "0.2.1" @@ -50,7 +50,7 @@ sp-consensus-babe = { path = "../../../../substrate/primitives/consensus/babe" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } assert_matches = "1.4.0" kvdb-memorydb = "0.13.0" -test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } +polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } log = { workspace = true, default-features = true } env_logger = "0.11" diff --git a/polkadot/node/core/approval-voting/src/approval_checking.rs b/polkadot/node/core/approval-voting/src/approval_checking.rs index 693a28800114..8667d3639185 100644 --- a/polkadot/node/core/approval-voting/src/approval_checking.rs +++ b/polkadot/node/core/approval-voting/src/approval_checking.rs @@ -482,9 +482,9 @@ pub fn tranches_to_approve( mod tests { use super::*; use crate::{approval_db, BTreeMap}; - use ::test_helpers::{dummy_candidate_receipt, dummy_hash}; use bitvec::{bitvec, order::Lsb0 as BitOrderLsb0, vec::BitVec}; use polkadot_primitives::GroupIndex; + use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; #[test] fn pending_is_not_approved() { diff --git a/polkadot/node/core/approval-voting/src/approval_db/common/mod.rs b/polkadot/node/core/approval-voting/src/approval_db/common/mod.rs index 249dcf912df5..11266f0b99d8 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/common/mod.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/common/mod.rs @@ -17,7 +17,7 @@ //! Common helper functions for all versions of approval-voting database. use std::sync::Arc; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; use polkadot_node_subsystem_util::database::{DBTransaction, Database}; use polkadot_primitives::{BlockNumber, CandidateHash, CandidateIndex, Hash}; @@ -64,7 +64,7 @@ impl DbBackend { #[derive(Debug, derive_more::From, derive_more::Display)] pub enum Error { Io(std::io::Error), - InvalidDecoding(parity_scale_codec::Error), + InvalidDecoding(codec::Error), InternalError(SubsystemError), } diff --git a/polkadot/node/core/approval-voting/src/approval_db/v1/mod.rs b/polkadot/node/core/approval-voting/src/approval_db/v1/mod.rs index 011d0a559c02..53e9db64f636 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v1/mod.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v1/mod.rs @@ -22,7 +22,7 @@ //! its data in the database. Any breaking changes here will still //! require a db migration (check `node/service/src/parachains_db/upgrade.rs`). -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use polkadot_node_primitives::approval::v1::{AssignmentCert, DelayTranche}; use polkadot_primitives::{ BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex, diff --git a/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs index b0966ad01f7b..4c08d22f3ca2 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs @@ -25,7 +25,9 @@ use polkadot_node_subsystem_util::database::Database; use polkadot_primitives::Id as ParaId; use std::{collections::HashMap, sync::Arc}; -use ::test_helpers::{dummy_candidate_receipt, dummy_candidate_receipt_bad_sig, dummy_hash}; +use polkadot_primitives_test_helpers::{ + dummy_candidate_receipt, dummy_candidate_receipt_bad_sig, dummy_hash, +}; const DATA_COL: u32 = 0; diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs index da42fc5be485..cd9256a5d47e 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs @@ -16,7 +16,7 @@ //! Version 2 of the DB schema. -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use polkadot_node_primitives::approval::{v1::DelayTranche, v2::AssignmentCertV2}; use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; use polkadot_node_subsystem_util::database::{DBTransaction, Database}; diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs index 5fa915add416..06a3cc1e306b 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs @@ -34,7 +34,9 @@ use polkadot_primitives::Id as ParaId; use sp_consensus_slots::Slot; use std::{collections::HashMap, sync::Arc}; -use ::test_helpers::{dummy_candidate_receipt, dummy_candidate_receipt_bad_sig, dummy_hash}; +use polkadot_primitives_test_helpers::{ + dummy_candidate_receipt, dummy_candidate_receipt_bad_sig, dummy_hash, +}; const DATA_COL: u32 = 0; diff --git a/polkadot/node/core/approval-voting/src/approval_db/v3/mod.rs b/polkadot/node/core/approval-voting/src/approval_db/v3/mod.rs index 3e4f43021952..7118fb6770fd 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v3/mod.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v3/mod.rs @@ -19,7 +19,7 @@ //! Version 3 modifies the `our_approval` format of `ApprovalEntry` //! and adds a new field `pending_signatures` for `BlockEntry` -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use polkadot_node_primitives::approval::v2::CandidateBitfield; use polkadot_node_subsystem::SubsystemResult; use polkadot_node_subsystem_util::database::{DBTransaction, Database}; diff --git a/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs index 7c0cf9d4f7da..d2a1d7d400b1 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs @@ -33,7 +33,9 @@ use polkadot_primitives::Id as ParaId; use sp_consensus_slots::Slot; use std::{collections::HashMap, sync::Arc}; -use ::test_helpers::{dummy_candidate_receipt, dummy_candidate_receipt_bad_sig, dummy_hash}; +use polkadot_primitives_test_helpers::{ + dummy_candidate_receipt, dummy_candidate_receipt_bad_sig, dummy_hash, +}; const DATA_COL: u32 = 0; diff --git a/polkadot/node/core/approval-voting/src/criteria.rs b/polkadot/node/core/approval-voting/src/criteria.rs index 57c0ac272dc5..fb9d281e43bc 100644 --- a/polkadot/node/core/approval-voting/src/criteria.rs +++ b/polkadot/node/core/approval-voting/src/criteria.rs @@ -16,8 +16,8 @@ //! Assignment criteria VRF generation and checking. +use codec::{Decode, Encode}; use itertools::Itertools; -use parity_scale_codec::{Decode, Encode}; use polkadot_node_primitives::approval::{ self as approval_types, v1::{AssignmentCert, AssignmentCertKind, DelayTranche, RelayVRFStory}, diff --git a/polkadot/node/core/approval-voting/src/import.rs b/polkadot/node/core/approval-voting/src/import.rs index 13b0b1bae1bc..59b6f91c0a82 100644 --- a/polkadot/node/core/approval-voting/src/import.rs +++ b/polkadot/node/core/approval-voting/src/import.rs @@ -609,7 +609,6 @@ pub(crate) mod tests { approval_db::common::{load_block_entry, DbBackend}, RuntimeInfo, RuntimeInfoConfig, MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, }; - use ::test_helpers::{dummy_candidate_receipt, dummy_hash}; use assert_matches::assert_matches; use polkadot_node_primitives::{ approval::v1::{VrfSignature, VrfTranscript}, @@ -622,6 +621,7 @@ pub(crate) mod tests { node_features::FeatureIndex, ExecutorParams, Id as ParaId, IndexedVec, NodeFeatures, SessionInfo, ValidatorId, ValidatorIndex, }; + use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; use schnellru::{ByLength, LruMap}; pub(crate) use sp_consensus_babe::{ digests::{CompatibleDigestItem, PreDigest, SecondaryVRFPreDigest}, diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index 43af8d476a6b..5cbae7f908fc 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -68,7 +68,7 @@ use super::{ }, }; -use ::test_helpers::{dummy_candidate_receipt, dummy_candidate_receipt_bad_sig}; +use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_candidate_receipt_bad_sig}; const SLOT_DURATION_MILLIS: u64 = 5000; @@ -463,7 +463,8 @@ fn sign_approval_multiple_candidates( .into() } -type VirtualOverseer = test_helpers::TestSubsystemContextHandle; +type VirtualOverseer = + polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; #[derive(Default)] struct HarnessConfigBuilder { @@ -552,7 +553,8 @@ fn test_harness>( config; let pool = sp_core::testing::TaskExecutor::new(); - let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool); + let (context, virtual_overseer) = + polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); let keystore = LocalKeystore::in_memory(); let _ = keystore.sr25519_generate_new( diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index c5b3c382011b..62f7ff0b61e6 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -17,8 +17,8 @@ thiserror = { workspace = true } gum = { package = "tracing-gum", path = "../../gum" } bitvec = "1.0.0" -parity-scale-codec = { version = "3.6.12", features = ["derive"] } -erasure = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } +polkadot-erasure-coding = { path = "../../../erasure-coding" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-overseer = { path = "../../overseer" } @@ -38,4 +38,4 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } sp-keyring = { path = "../../../../substrate/primitives/keyring" } parking_lot = "0.12.1" -test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } +polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } diff --git a/polkadot/node/core/av-store/src/lib.rs b/polkadot/node/core/av-store/src/lib.rs index 59a35a6a45a9..7b245c9e3c52 100644 --- a/polkadot/node/core/av-store/src/lib.rs +++ b/polkadot/node/core/av-store/src/lib.rs @@ -26,6 +26,7 @@ use std::{ time::{Duration, SystemTime, SystemTimeError, UNIX_EPOCH}, }; +use codec::{Decode, Encode, Error as CodecError, Input}; use futures::{ channel::{ mpsc::{channel, Receiver as MpscReceiver, Sender as MpscSender}, @@ -34,7 +35,6 @@ use futures::{ future, select, FutureExt, SinkExt, StreamExt, }; use futures_timer::Delay; -use parity_scale_codec::{Decode, Encode, Error as CodecError, Input}; use polkadot_node_subsystem_util::database::{DBTransaction, Database}; use sp_consensus::SyncOracle; @@ -354,7 +354,7 @@ pub enum Error { ChainApi(#[from] ChainApiError), #[error(transparent)] - Erasure(#[from] erasure::Error), + Erasure(#[from] polkadot_erasure_coding::Error), #[error(transparent)] Io(#[from] io::Error), @@ -1321,8 +1321,8 @@ fn store_available_data( // Important note: This check below is critical for consensus and the `backing` subsystem relies // on it to ensure candidate validity. - let chunks = erasure::obtain_chunks_v1(n_validators, &available_data)?; - let branches = erasure::branches(chunks.as_ref()); + let chunks = polkadot_erasure_coding::obtain_chunks_v1(n_validators, &available_data)?; + let branches = polkadot_erasure_coding::branches(chunks.as_ref()); if branches.root() != expected_erasure_root { return Err(Error::InvalidErasureRoot) diff --git a/polkadot/node/core/av-store/src/tests.rs b/polkadot/node/core/av-store/src/tests.rs index e87f7cc3b8d6..04a223730bcd 100644 --- a/polkadot/node/core/av-store/src/tests.rs +++ b/polkadot/node/core/av-store/src/tests.rs @@ -21,7 +21,6 @@ use futures::{channel::oneshot, executor, future, Future}; use util::availability_chunks::availability_chunk_index; use self::test_helpers::mock::new_leaf; -use ::test_helpers::TestCandidateBuilder; use parking_lot::Mutex; use polkadot_node_primitives::{AvailableData, BlockData, PoV, Proof}; use polkadot_node_subsystem::{ @@ -35,6 +34,7 @@ use polkadot_primitives::{ node_features, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, HeadData, Header, PersistedValidationData, ValidatorId, }; +use polkadot_primitives_test_helpers::TestCandidateBuilder; use sp_keyring::Sr25519Keyring; mod columns { @@ -45,7 +45,8 @@ mod columns { const TEST_CONFIG: Config = Config { col_data: columns::DATA, col_meta: columns::META }; -type VirtualOverseer = test_helpers::TestSubsystemContextHandle; +type VirtualOverseer = + polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; #[derive(Clone)] struct TestClock { @@ -128,7 +129,8 @@ fn test_harness>( .try_init(); let pool = sp_core::testing::TaskExecutor::new(); - let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); + let (context, virtual_overseer) = + polkadot_node_subsystem_test_helpers::make_subsystem_context(pool.clone()); let subsystem = AvailabilityStoreSubsystem::with_pruning_config_and_clock( store, @@ -485,9 +487,11 @@ fn store_pov_and_queries_work() { validation_data: test_state.persisted_validation_data.clone(), }; - let chunks = erasure::obtain_chunks_v1(n_validators as _, &available_data).unwrap(); + let chunks = + polkadot_erasure_coding::obtain_chunks_v1(n_validators as _, &available_data) + .unwrap(); - let branches = erasure::branches(chunks.as_ref()); + let branches = polkadot_erasure_coding::branches(chunks.as_ref()); let (tx, rx) = oneshot::channel(); let block_msg = AvailabilityStoreMessage::StoreAvailableData { @@ -568,9 +572,11 @@ fn store_pov_and_queries_work() { validation_data: test_state.persisted_validation_data.clone(), }; - let chunks = erasure::obtain_chunks_v1(n_validators as _, &available_data).unwrap(); + let chunks = + polkadot_erasure_coding::obtain_chunks_v1(n_validators as _, &available_data) + .unwrap(); - let branches = erasure::branches(chunks.as_ref()); + let branches = polkadot_erasure_coding::branches(chunks.as_ref()); let core_index = CoreIndex(core_index); let (tx, rx) = oneshot::channel(); @@ -667,8 +673,9 @@ fn query_all_chunks_works() { { let chunks_expected = - erasure::obtain_chunks_v1(n_validators as _, &available_data).unwrap(); - let branches = erasure::branches(chunks_expected.as_ref()); + polkadot_erasure_coding::obtain_chunks_v1(n_validators as _, &available_data) + .unwrap(); + let branches = polkadot_erasure_coding::branches(chunks_expected.as_ref()); let (tx, rx) = oneshot::channel(); let block_msg = AvailabilityStoreMessage::StoreAvailableData { candidate_hash: candidate_hash_1, @@ -762,8 +769,9 @@ fn stored_but_not_included_data_is_pruned() { }; let (tx, rx) = oneshot::channel(); - let chunks = erasure::obtain_chunks_v1(n_validators as _, &available_data).unwrap(); - let branches = erasure::branches(chunks.as_ref()); + let chunks = + polkadot_erasure_coding::obtain_chunks_v1(n_validators as _, &available_data).unwrap(); + let branches = polkadot_erasure_coding::branches(chunks.as_ref()); let block_msg = AvailabilityStoreMessage::StoreAvailableData { candidate_hash, @@ -819,8 +827,9 @@ fn stored_data_kept_until_finalized() { let parent = Hash::repeat_byte(2); let block_number = 10; - let chunks = erasure::obtain_chunks_v1(n_validators as _, &available_data).unwrap(); - let branches = erasure::branches(chunks.as_ref()); + let chunks = + polkadot_erasure_coding::obtain_chunks_v1(n_validators as _, &available_data).unwrap(); + let branches = polkadot_erasure_coding::branches(chunks.as_ref()); let (tx, rx) = oneshot::channel(); let block_msg = AvailabilityStoreMessage::StoreAvailableData { @@ -1096,8 +1105,10 @@ fn forkfullness_works() { validation_data: test_state.persisted_validation_data.clone(), }; - let chunks = erasure::obtain_chunks_v1(n_validators as _, &available_data_1).unwrap(); - let branches = erasure::branches(chunks.as_ref()); + let chunks = + polkadot_erasure_coding::obtain_chunks_v1(n_validators as _, &available_data_1) + .unwrap(); + let branches = polkadot_erasure_coding::branches(chunks.as_ref()); let (tx, rx) = oneshot::channel(); let msg = AvailabilityStoreMessage::StoreAvailableData { @@ -1114,8 +1125,10 @@ fn forkfullness_works() { rx.await.unwrap().unwrap(); - let chunks = erasure::obtain_chunks_v1(n_validators as _, &available_data_2).unwrap(); - let branches = erasure::branches(chunks.as_ref()); + let chunks = + polkadot_erasure_coding::obtain_chunks_v1(n_validators as _, &available_data_2) + .unwrap(); + let branches = polkadot_erasure_coding::branches(chunks.as_ref()); let (tx, rx) = oneshot::channel(); let msg = AvailabilityStoreMessage::StoreAvailableData { diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index f426f73284e8..ffd6de076889 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -16,8 +16,8 @@ polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } -erasure-coding = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" } -statement-table = { package = "polkadot-statement-table", path = "../../../statement-table" } +polkadot-erasure-coding = { path = "../../../erasure-coding" } +polkadot-statement-table = { path = "../../../statement-table" } bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } gum = { package = "tracing-gum", path = "../../gum" } thiserror = { workspace = true } @@ -34,4 +34,4 @@ futures = { version = "0.3.30", features = ["thread-pool"] } assert_matches = "1.4.0" rstest = "0.18.2" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } +polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } diff --git a/polkadot/node/core/backing/src/error.rs b/polkadot/node/core/backing/src/error.rs index 52684f3fe306..568f71402644 100644 --- a/polkadot/node/core/backing/src/error.rs +++ b/polkadot/node/core/backing/src/error.rs @@ -88,7 +88,7 @@ pub enum Error { JoinMultiple(#[source] oneshot::Canceled), #[error("Obtaining erasure chunks failed")] - ObtainErasureChunks(#[from] erasure_coding::Error), + ObtainErasureChunks(#[from] polkadot_erasure_coding::Error), #[error(transparent)] ValidationFailed(#[from] ValidationFailed), diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 2fa8ad29efe5..38e8a93bb048 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -111,8 +111,7 @@ use polkadot_primitives::{ PvfExecKind, SessionIndex, SigningContext, ValidationCode, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, }; -use sp_keystore::KeystorePtr; -use statement_table::{ +use polkadot_statement_table::{ generic::AttestedCandidate as TableAttestedCandidate, v2::{ SignedStatement as TableSignedStatement, Statement as TableStatement, @@ -120,6 +119,7 @@ use statement_table::{ }, Config as TableConfig, Context as TableContextTrait, Table, }; +use sp_keystore::KeystorePtr; use util::{runtime::request_node_features, vstaging::get_disabled_validators_with_fallback}; mod error; diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 00f9e4cd8ff6..bb23c7fbeb24 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -16,10 +16,6 @@ use self::test_helpers::mock::new_leaf; use super::*; -use ::test_helpers::{ - dummy_candidate_receipt_bad_sig, dummy_collator, dummy_collator_signature, - dummy_committed_candidate_receipt, dummy_hash, validator_pubkeys, -}; use assert_matches::assert_matches; use futures::{future, Future}; use polkadot_node_primitives::{BlockData, InvalidCandidate, SignedFullStatement, Statement}; @@ -36,12 +32,16 @@ use polkadot_primitives::{ node_features, CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, PvfExecKind, ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, }; +use polkadot_primitives_test_helpers::{ + dummy_candidate_receipt_bad_sig, dummy_collator, dummy_collator_signature, + dummy_committed_candidate_receipt, dummy_hash, validator_pubkeys, +}; +use polkadot_statement_table::v2::Misbehavior; use rstest::rstest; use sp_application_crypto::AppCrypto; use sp_keyring::Sr25519Keyring; use sp_keystore::Keystore; use sp_tracing as _; -use statement_table::v2::Misbehavior; use std::{collections::HashMap, time::Duration}; mod prospective_parachains; @@ -164,7 +164,8 @@ impl Default for TestState { } } -type VirtualOverseer = test_helpers::TestSubsystemContextHandle; +type VirtualOverseer = + polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; fn test_harness>( keystore: KeystorePtr, @@ -172,7 +173,8 @@ fn test_harness>( ) { let pool = sp_core::testing::TaskExecutor::new(); - let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); + let (context, virtual_overseer) = + polkadot_node_subsystem_test_helpers::make_subsystem_context(pool.clone()); let subsystem = async move { if let Err(e) = super::run(context, keystore, Metrics(None)).await { @@ -196,8 +198,9 @@ fn test_harness>( fn make_erasure_root(test: &TestState, pov: PoV, validation_data: PersistedValidationData) -> Hash { let available_data = AvailableData { validation_data, pov: Arc::new(pov) }; - let chunks = erasure_coding::obtain_chunks_v1(test.validators.len(), &available_data).unwrap(); - erasure_coding::branches(&chunks).root() + let chunks = + polkadot_erasure_coding::obtain_chunks_v1(test.validators.len(), &available_data).unwrap(); + polkadot_erasure_coding::branches(&chunks).root() } #[derive(Default, Clone)] @@ -1955,7 +1958,7 @@ fn candidate_backing_reorders_votes() { data[32..36].copy_from_slice(idx.encode().as_slice()); let sig = ValidatorSignature::try_from(data).unwrap(); - statement_table::generic::ValidityAttestation::Implicit(sig) + polkadot_statement_table::generic::ValidityAttestation::Implicit(sig) }; let attested = TableAttestedCandidate { diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index 5ef3a3b15285..74490c84eb18 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -1607,7 +1607,8 @@ fn occupied_core_assignment() { let previous_para_id = test_state.chain_ids[1]; // Set the core state to occupied. - let mut candidate_descriptor = ::test_helpers::dummy_candidate_descriptor(Hash::zero()); + let mut candidate_descriptor = + polkadot_primitives_test_helpers::dummy_candidate_descriptor(Hash::zero()); candidate_descriptor.para_id = previous_para_id; test_state.availability_cores[0] = CoreState::Occupied(OccupiedCore { group_responsible: Default::default(), diff --git a/polkadot/node/core/bitfield-signing/Cargo.toml b/polkadot/node/core/bitfield-signing/Cargo.toml index 0663e0f1b699..335e733987b0 100644 --- a/polkadot/node/core/bitfield-signing/Cargo.toml +++ b/polkadot/node/core/bitfield-signing/Cargo.toml @@ -21,4 +21,4 @@ thiserror = { workspace = true } [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } +polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } diff --git a/polkadot/node/core/bitfield-signing/src/tests.rs b/polkadot/node/core/bitfield-signing/src/tests.rs index 0e61e6086d28..eeaa524d1c63 100644 --- a/polkadot/node/core/bitfield-signing/src/tests.rs +++ b/polkadot/node/core/bitfield-signing/src/tests.rs @@ -18,7 +18,7 @@ use super::*; use futures::{executor::block_on, pin_mut, StreamExt}; use polkadot_node_subsystem::messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest}; use polkadot_primitives::{CandidateHash, OccupiedCore}; -use test_helpers::dummy_candidate_descriptor; +use polkadot_primitives_test_helpers::dummy_candidate_descriptor; fn occupied_core(para_id: u32, candidate_hash: CandidateHash) -> CoreState { CoreState::Occupied(OccupiedCore { diff --git a/polkadot/node/core/candidate-validation/Cargo.toml b/polkadot/node/core/candidate-validation/Cargo.toml index e79b3a734b8f..a0b25e6c25f9 100644 --- a/polkadot/node/core/candidate-validation/Cargo.toml +++ b/polkadot/node/core/candidate-validation/Cargo.toml @@ -16,7 +16,7 @@ futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } sp-maybe-compressed-blob = { package = "sp-maybe-compressed-blob", path = "../../../../substrate/primitives/maybe-compressed-blob" } -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } polkadot-primitives = { path = "../../../primitives" } polkadot-parachain-primitives = { path = "../../../parachain" } @@ -35,4 +35,4 @@ futures = { version = "0.3.30", features = ["thread-pool"] } assert_matches = "1.4.0" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } sp-core = { path = "../../../../substrate/primitives/core" } -test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } +polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 08881dad1961..76619bd391f2 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -53,7 +53,7 @@ use polkadot_primitives::{ ValidationCodeHash, }; -use parity_scale_codec::Encode; +use codec::Encode; use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered}; diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index e492d51e239e..491ed7a335d8 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -15,14 +15,13 @@ // along with Polkadot. If not, see . use super::*; -use ::test_helpers::{dummy_hash, make_valid_candidate_descriptor}; use assert_matches::assert_matches; use futures::executor; use polkadot_node_core_pvf::PrepareError; use polkadot_node_subsystem::messages::AllMessages; -use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::reexports::SubsystemContext; use polkadot_primitives::{HeadData, Id as ParaId, UpwardMessage}; +use polkadot_primitives_test_helpers::{dummy_hash, make_valid_candidate_descriptor}; use sp_core::testing::TaskExecutor; use sp_keyring::Sr25519Keyring; @@ -47,8 +46,10 @@ fn correctly_checks_included_assumption() { ); let pool = TaskExecutor::new(); - let (mut ctx, mut ctx_handle) = - test_helpers::make_subsystem_context::(pool.clone()); + let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< + AllMessages, + _, + >(pool.clone()); let (check_fut, check_result) = check_assumption_validation_data( ctx.sender(), @@ -119,8 +120,10 @@ fn correctly_checks_timed_out_assumption() { ); let pool = TaskExecutor::new(); - let (mut ctx, mut ctx_handle) = - test_helpers::make_subsystem_context::(pool.clone()); + let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< + AllMessages, + _, + >(pool.clone()); let (check_fut, check_result) = check_assumption_validation_data( ctx.sender(), @@ -189,8 +192,10 @@ fn check_is_bad_request_if_no_validation_data() { ); let pool = TaskExecutor::new(); - let (mut ctx, mut ctx_handle) = - test_helpers::make_subsystem_context::(pool.clone()); + let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< + AllMessages, + _, + >(pool.clone()); let (check_fut, check_result) = check_assumption_validation_data( ctx.sender(), @@ -243,8 +248,10 @@ fn check_is_bad_request_if_no_validation_code() { ); let pool = TaskExecutor::new(); - let (mut ctx, mut ctx_handle) = - test_helpers::make_subsystem_context::(pool.clone()); + let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< + AllMessages, + _, + >(pool.clone()); let (check_fut, check_result) = check_assumption_validation_data( ctx.sender(), @@ -309,8 +316,10 @@ fn check_does_not_match() { ); let pool = TaskExecutor::new(); - let (mut ctx, mut ctx_handle) = - test_helpers::make_subsystem_context::(pool.clone()); + let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< + AllMessages, + _, + >(pool.clone()); let (check_fut, check_result) = check_assumption_validation_data( ctx.sender(), @@ -850,7 +859,10 @@ fn candidate_validation_code_mismatch_is_invalid() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let pool = TaskExecutor::new(); - let (_ctx, _ctx_handle) = test_helpers::make_subsystem_context::(pool.clone()); + let (_ctx, _ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< + AllMessages, + _, + >(pool.clone()); let v = executor::block_on(validate_candidate_exhaustive( MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( @@ -960,7 +972,10 @@ fn code_decompression_failure_is_error() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let pool = TaskExecutor::new(); - let (_ctx, _ctx_handle) = test_helpers::make_subsystem_context::(pool.clone()); + let (_ctx, _ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< + AllMessages, + _, + >(pool.clone()); let v = executor::block_on(validate_candidate_exhaustive( MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result)), @@ -1012,7 +1027,10 @@ fn pov_decompression_failure_is_invalid() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let pool = TaskExecutor::new(); - let (_ctx, _ctx_handle) = test_helpers::make_subsystem_context::(pool.clone()); + let (_ctx, _ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< + AllMessages, + _, + >(pool.clone()); let v = executor::block_on(validate_candidate_exhaustive( MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result)), @@ -1062,8 +1080,10 @@ fn precheck_works() { let validation_code_hash = validation_code.hash(); let pool = TaskExecutor::new(); - let (mut ctx, mut ctx_handle) = - test_helpers::make_subsystem_context::(pool.clone()); + let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< + AllMessages, + _, + >(pool.clone()); let (check_fut, check_result) = precheck_pvf( ctx.sender(), @@ -1124,8 +1144,10 @@ fn precheck_invalid_pvf_blob_compression() { let validation_code_hash = validation_code.hash(); let pool = TaskExecutor::new(); - let (mut ctx, mut ctx_handle) = - test_helpers::make_subsystem_context::(pool.clone()); + let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< + AllMessages, + _, + >(pool.clone()); let (check_fut, check_result) = precheck_pvf( ctx.sender(), @@ -1183,7 +1205,9 @@ fn precheck_properly_classifies_outcomes() { let pool = TaskExecutor::new(); let (mut ctx, mut ctx_handle) = - test_helpers::make_subsystem_context::(pool.clone()); + polkadot_node_subsystem_test_helpers::make_subsystem_context::( + pool.clone(), + ); let (check_fut, check_result) = precheck_pvf( ctx.sender(), diff --git a/polkadot/node/core/chain-api/Cargo.toml b/polkadot/node/core/chain-api/Cargo.toml index bd8531c20784..c58024876b9c 100644 --- a/polkadot/node/core/chain-api/Cargo.toml +++ b/polkadot/node/core/chain-api/Cargo.toml @@ -21,7 +21,7 @@ sc-consensus-babe = { path = "../../../../substrate/client/consensus/babe" } [dev-dependencies] futures = { version = "0.3.30", features = ["thread-pool"] } maplit = "1.0.2" -parity-scale-codec = "3.6.12" +codec = { package = "parity-scale-codec", version = "3.6.12" } polkadot-node-primitives = { path = "../../primitives" } polkadot-primitives = { path = "../../../primitives" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/polkadot/node/core/chain-api/src/tests.rs b/polkadot/node/core/chain-api/src/tests.rs index eae8f6fa4ac5..4e85affc540f 100644 --- a/polkadot/node/core/chain-api/src/tests.rs +++ b/polkadot/node/core/chain-api/src/tests.rs @@ -16,8 +16,8 @@ use super::*; +use codec::Encode; use futures::{channel::oneshot, future::BoxFuture}; -use parity_scale_codec::Encode; use std::collections::BTreeMap; use polkadot_node_primitives::BlockWeight; diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml index b58053b5417e..2aa929653ccc 100644 --- a/polkadot/node/core/chain-selection/Cargo.toml +++ b/polkadot/node/core/chain-selection/Cargo.toml @@ -19,7 +19,7 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } kvdb = "0.13.0" thiserror = { workspace = true } -parity-scale-codec = "3.6.12" +codec = { package = "parity-scale-codec", version = "3.6.12" } [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/polkadot/node/core/chain-selection/src/db_backend/v1.rs b/polkadot/node/core/chain-selection/src/db_backend/v1.rs index 7c7144bb763d..8831b1e3c36c 100644 --- a/polkadot/node/core/chain-selection/src/db_backend/v1.rs +++ b/polkadot/node/core/chain-selection/src/db_backend/v1.rs @@ -40,7 +40,7 @@ use crate::{ use polkadot_node_primitives::BlockWeight; use polkadot_primitives::{BlockNumber, Hash}; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use polkadot_node_subsystem_util::database::{DBTransaction, Database}; use std::sync::Arc; diff --git a/polkadot/node/core/chain-selection/src/lib.rs b/polkadot/node/core/chain-selection/src/lib.rs index 07c245e839bf..6c091b02709b 100644 --- a/polkadot/node/core/chain-selection/src/lib.rs +++ b/polkadot/node/core/chain-selection/src/lib.rs @@ -26,8 +26,8 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::database::Database; use polkadot_primitives::{BlockNumber, ConsensusLog, Hash, Header}; +use codec::Error as CodecError; use futures::{channel::oneshot, future::Either, prelude::*}; -use parity_scale_codec::Error as CodecError; use std::{ sync::Arc, diff --git a/polkadot/node/core/chain-selection/src/tests.rs b/polkadot/node/core/chain-selection/src/tests.rs index 1fe87f04cd58..2b1e1196ede3 100644 --- a/polkadot/node/core/chain-selection/src/tests.rs +++ b/polkadot/node/core/chain-selection/src/tests.rs @@ -30,8 +30,8 @@ use std::{ }; use assert_matches::assert_matches; +use codec::Encode; use futures::channel::oneshot; -use parity_scale_codec::Encode; use parking_lot::Mutex; use sp_core::testing::TaskExecutor; @@ -229,13 +229,15 @@ impl Clock for TestClock { const TEST_STAGNANT_INTERVAL: Duration = Duration::from_millis(20); -type VirtualOverseer = test_helpers::TestSubsystemContextHandle; +type VirtualOverseer = + polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; fn test_harness>( test: impl FnOnce(TestBackend, TestClock, VirtualOverseer) -> T, ) { let pool = TaskExecutor::new(); - let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool); + let (context, virtual_overseer) = + polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); let backend = TestBackend::default(); let clock = TestClock::new(0); diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index 8bd510697c91..2c08cfa9b1ef 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } -parity-scale-codec = "3.6.12" +codec = { package = "parity-scale-codec", version = "3.6.12" } kvdb = "0.13.0" thiserror = { workspace = true } schnellru = "0.2.1" @@ -33,7 +33,7 @@ sp-keyring = { path = "../../../../substrate/primitives/keyring" } sp-core = { path = "../../../../substrate/primitives/core" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } assert_matches = "1.4.0" -test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } +polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } futures-timer = "3.0.2" sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } sp-tracing = { path = "../../../../substrate/primitives/tracing" } diff --git a/polkadot/node/core/dispute-coordinator/src/db/v1.rs b/polkadot/node/core/dispute-coordinator/src/db/v1.rs index 4950765cf510..0101791550ee 100644 --- a/polkadot/node/core/dispute-coordinator/src/db/v1.rs +++ b/polkadot/node/core/dispute-coordinator/src/db/v1.rs @@ -31,7 +31,7 @@ use polkadot_primitives::{ use std::sync::Arc; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use crate::{ backend::{Backend, BackendWriteOp, OverlayedBackend}, @@ -258,7 +258,7 @@ pub enum Error { #[error(transparent)] Io(#[from] std::io::Error), #[error(transparent)] - Codec(#[from] parity_scale_codec::Error), + Codec(#[from] codec::Error), } impl From for crate::error::Error { @@ -375,9 +375,9 @@ fn load_cleaned_votes_watermark( mod tests { use super::*; - use ::test_helpers::{dummy_candidate_receipt, dummy_hash}; use polkadot_node_primitives::DISPUTE_WINDOW; use polkadot_primitives::{Hash, Id as ParaId}; + use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; fn make_db() -> DbBackend { let db = kvdb_memorydb::create(1); diff --git a/polkadot/node/core/dispute-coordinator/src/error.rs b/polkadot/node/core/dispute-coordinator/src/error.rs index cbda3dc1d121..94bc8e9c9497 100644 --- a/polkadot/node/core/dispute-coordinator/src/error.rs +++ b/polkadot/node/core/dispute-coordinator/src/error.rs @@ -21,7 +21,7 @@ use polkadot_node_subsystem::{errors::ChainApiError, SubsystemError}; use polkadot_node_subsystem_util::runtime; use crate::{db, participation, LOG_TARGET}; -use parity_scale_codec::Error as CodecError; +use codec::Error as CodecError; pub type Result = std::result::Result; pub type FatalResult = std::result::Result; diff --git a/polkadot/node/core/dispute-coordinator/src/participation/queues/tests.rs b/polkadot/node/core/dispute-coordinator/src/participation/queues/tests.rs index 63bfc1d7d026..9176d00b2f5c 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/queues/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/queues/tests.rs @@ -15,9 +15,9 @@ // along with Polkadot. If not, see . use crate::{metrics::Metrics, ParticipationPriority}; -use ::test_helpers::{dummy_candidate_receipt, dummy_hash}; use assert_matches::assert_matches; use polkadot_primitives::{BlockNumber, Hash}; +use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; use super::{CandidateComparator, ParticipationRequest, QueueError, Queues}; diff --git a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs index 1316508e84cf..a80553828ac6 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs @@ -22,10 +22,7 @@ use std::{sync::Arc, time::Duration}; use sp_core::testing::TaskExecutor; use super::*; -use ::test_helpers::{ - dummy_candidate_commitments, dummy_candidate_receipt_bad_sig, dummy_digest, dummy_hash, -}; -use parity_scale_codec::Encode; +use codec::Encode; use polkadot_node_primitives::{AvailableData, BlockData, InvalidCandidate, PoV}; use polkadot_node_subsystem::{ messages::{ @@ -40,6 +37,9 @@ use polkadot_node_subsystem_test_helpers::{ use polkadot_primitives::{ BlakeTwo256, CandidateCommitments, HashT, Header, PersistedValidationData, ValidationCode, }; +use polkadot_primitives_test_helpers::{ + dummy_candidate_commitments, dummy_candidate_receipt_bad_sig, dummy_digest, dummy_hash, +}; type VirtualOverseer = TestSubsystemContextHandle; diff --git a/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs b/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs index 726dda596d7b..ed2400387ef7 100644 --- a/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs @@ -18,11 +18,10 @@ use std::time::Duration; use assert_matches::assert_matches; +use codec::Encode; use futures::future::join; -use parity_scale_codec::Encode; use sp_core::testing::TaskExecutor; -use ::test_helpers::{dummy_collator, dummy_collator_signature, dummy_hash}; use polkadot_node_primitives::DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION; use polkadot_node_subsystem::{ messages::{ @@ -40,6 +39,7 @@ use polkadot_primitives::{ BlakeTwo256, BlockNumber, CandidateDescriptor, CandidateEvent, CandidateReceipt, CoreIndex, GroupIndex, Hash, HashT, HeadData, Id as ParaId, }; +use polkadot_primitives_test_helpers::{dummy_collator, dummy_collator_signature, dummy_hash}; use crate::{scraping::Inclusions, LOG_TARGET}; diff --git a/polkadot/node/core/dispute-coordinator/src/tests.rs b/polkadot/node/core/dispute-coordinator/src/tests.rs index 13cf2df88223..f97a625a9528 100644 --- a/polkadot/node/core/dispute-coordinator/src/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/tests.rs @@ -51,7 +51,6 @@ use sp_core::{sr25519::Pair, testing::TaskExecutor, Pair as PairT}; use sp_keyring::Sr25519Keyring; use sp_keystore::{Keystore, KeystorePtr}; -use ::test_helpers::{dummy_candidate_receipt_bad_sig, dummy_digest, dummy_hash}; use polkadot_node_primitives::{Timestamp, ACTIVE_DURATION_SECS}; use polkadot_node_subsystem::{ messages::{AllMessages, BlockDescription, RuntimeApiMessage, RuntimeApiRequest}, @@ -67,6 +66,7 @@ use polkadot_primitives::{ SessionInfo, SigningContext, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, }; +use polkadot_primitives_test_helpers::{dummy_candidate_receipt_bad_sig, dummy_digest, dummy_hash}; use crate::{ backend::Backend, diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 5b4f12a5fbda..f3193153be89 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } -parity-scale-codec = "3.6.12" +codec = { package = "parity-scale-codec", version = "3.6.12" } thiserror = { workspace = true } fatality = "0.1.1" bitvec = "1" diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index 4bc473672788..d2fc3cbd3623 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -42,7 +42,8 @@ const ASYNC_BACKING_DISABLED_ERROR: RuntimeApiError = const MAX_POV_SIZE: u32 = 1_000_000; -type VirtualOverseer = test_helpers::TestSubsystemContextHandle; +type VirtualOverseer = + polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; fn dummy_constraints( min_relay_parent_number: BlockNumber, @@ -97,7 +98,8 @@ fn test_harness>( ) -> View { let pool = sp_core::testing::TaskExecutor::new(); - let (mut context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); + let (mut context, virtual_overseer) = + polkadot_node_subsystem_test_helpers::make_subsystem_context(pool.clone()); let mut view = View::new(); let subsystem = async move { diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index d19783212644..a81d22c6f828 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -26,5 +26,5 @@ schnellru = "0.2.1" sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } +polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } rstest = "0.18.2" diff --git a/polkadot/node/core/provisioner/src/disputes/prioritized_selection/tests.rs b/polkadot/node/core/provisioner/src/disputes/prioritized_selection/tests.rs index f6c49e52eeb9..ecb7aac78396 100644 --- a/polkadot/node/core/provisioner/src/disputes/prioritized_selection/tests.rs +++ b/polkadot/node/core/provisioner/src/disputes/prioritized_selection/tests.rs @@ -29,7 +29,6 @@ use polkadot_primitives::{ CandidateHash, DisputeState, InvalidDisputeStatementKind, SessionIndex, ValidDisputeStatementKind, ValidatorSignature, }; -use test_helpers; // // Unit tests for various functions @@ -428,7 +427,7 @@ impl TestDisputes { let onchain_votes_count = self.validators_count * 80 / 100; let session_idx = 0; let lf = leaf(); - let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash); + let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Active); self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); @@ -446,7 +445,7 @@ impl TestDisputes { let onchain_votes_count = self.validators_count * 40 / 100; let session_idx = 1; let lf = leaf(); - let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash); + let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Active); self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); @@ -463,7 +462,7 @@ impl TestDisputes { let local_votes_count = self.validators_count * 90 / 100; let session_idx = 2; let lf = leaf(); - let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash); + let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Confirmed); self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); @@ -479,7 +478,7 @@ impl TestDisputes { let onchain_votes_count = self.validators_count * 75 / 100; let session_idx = 3; let lf = leaf(); - let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash); + let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::ConcludedFor(0)); self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); @@ -495,7 +494,7 @@ impl TestDisputes { let local_votes_count = self.validators_count * 90 / 100; let session_idx = 4; let lf = leaf(); - let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash); + let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::ConcludedFor(0)); self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); @@ -511,7 +510,7 @@ impl TestDisputes { let onchain_votes_count = self.validators_count * 10 / 100; let session_idx = 5; let lf = leaf(); - let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash); + let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Active); self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); @@ -528,7 +527,7 @@ impl TestDisputes { let local_votes_count = self.validators_count * 10 / 100; let session_idx = 6; let lf = leaf(); - let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash); + let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Active); self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); @@ -547,7 +546,7 @@ impl TestDisputes { .map(|idx| { ( ValidatorIndex(idx as u32), - (statement_kind.clone(), test_helpers::dummy_signature()), + (statement_kind.clone(), polkadot_primitives_test_helpers::dummy_signature()), ) }) .collect::>() diff --git a/polkadot/node/core/provisioner/src/tests.rs b/polkadot/node/core/provisioner/src/tests.rs index d463b7f16633..0d3675777cbf 100644 --- a/polkadot/node/core/provisioner/src/tests.rs +++ b/polkadot/node/core/provisioner/src/tests.rs @@ -15,9 +15,9 @@ // along with Polkadot. If not, see . use super::*; -use ::test_helpers::{dummy_candidate_descriptor, dummy_hash}; use bitvec::bitvec; use polkadot_primitives::{OccupiedCore, ScheduledCore}; +use polkadot_primitives_test_helpers::{dummy_candidate_descriptor, dummy_hash}; const MOCK_GROUP_SIZE: usize = 5; @@ -244,7 +244,6 @@ mod select_candidates { super::*, build_occupied_core, common::test_harness, default_bitvec, occupied_core, scheduled_core, MOCK_GROUP_SIZE, }; - use ::test_helpers::{dummy_candidate_descriptor, dummy_hash}; use futures::channel::mpsc; use polkadot_node_subsystem::messages::{ AllMessages, RuntimeApiMessage, @@ -257,6 +256,7 @@ mod select_candidates { use polkadot_primitives::{ BlockNumber, CandidateCommitments, CommittedCandidateReceipt, PersistedValidationData, }; + use polkadot_primitives_test_helpers::{dummy_candidate_descriptor, dummy_hash}; use rstest::rstest; use std::ops::Not; use CoreState::{Free, Scheduled}; diff --git a/polkadot/node/core/pvf-checker/Cargo.toml b/polkadot/node/core/pvf-checker/Cargo.toml index 91b12b868097..6dec407e2d2d 100644 --- a/polkadot/node/core/pvf-checker/Cargo.toml +++ b/polkadot/node/core/pvf-checker/Cargo.toml @@ -28,6 +28,6 @@ sp-runtime = { path = "../../../../substrate/primitives/runtime" } sc-keystore = { path = "../../../../substrate/client/keystore" } sp-keyring = { path = "../../../../substrate/primitives/keyring" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } +polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } futures-timer = "3.0.2" diff --git a/polkadot/node/core/pvf-checker/src/tests.rs b/polkadot/node/core/pvf-checker/src/tests.rs index b2365fe53e52..e12a44ddd2af 100644 --- a/polkadot/node/core/pvf-checker/src/tests.rs +++ b/polkadot/node/core/pvf-checker/src/tests.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use ::test_helpers::{dummy_digest, dummy_hash, validator_pubkeys}; use futures::{channel::oneshot, future::BoxFuture, prelude::*}; use polkadot_node_subsystem::{ messages::{ @@ -30,6 +29,7 @@ use polkadot_primitives::{ BlockNumber, Hash, Header, PvfCheckStatement, SessionIndex, ValidationCode, ValidationCodeHash, ValidatorId, }; +use polkadot_primitives_test_helpers::{dummy_digest, dummy_hash, validator_pubkeys}; use sp_application_crypto::AppCrypto; use sp_core::testing::TaskExecutor; use sp_keyring::Sr25519Keyring; diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index ba9954a10668..8aebe0b4c3f0 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -25,7 +25,7 @@ tempfile = "3.3.0" thiserror = { workspace = true } tokio = { version = "1.24.2", features = ["fs", "process"] } -parity-scale-codec = { version = "3.6.12", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } @@ -56,8 +56,8 @@ polkadot-node-core-pvf-common = { path = "common", features = ["test-utils"] } polkadot-node-core-pvf = { path = "", features = ["test-utils"] } rococo-runtime = { path = "../../../runtime/rococo" } -adder = { package = "test-parachain-adder", path = "../../../parachain/test-parachains/adder" } -halt = { package = "test-parachain-halt", path = "../../../parachain/test-parachains/halt" } +test-parachain-adder = { path = "../../../parachain/test-parachains/adder" } +test-parachain-halt = { path = "../../../parachain/test-parachains/halt" } [target.'cfg(target_os = "linux")'.dev-dependencies] libc = "0.2.153" diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 5ad7409cc6c7..491f6cc49642 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -17,7 +17,7 @@ libc = "0.2.152" nix = { version = "0.28.0", features = ["resource", "sched"] } thiserror = { workspace = true } -parity-scale-codec = { version = "3.6.12", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } diff --git a/polkadot/node/core/pvf/common/src/error.rs b/polkadot/node/core/pvf/common/src/error.rs index adeb40c0b195..7ee05448d3c5 100644 --- a/polkadot/node/core/pvf/common/src/error.rs +++ b/polkadot/node/core/pvf/common/src/error.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use crate::prepare::{PrepareSuccess, PrepareWorkerSuccess}; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; pub use sc_executor_common::error::Error as ExecuteError; /// Result of PVF preparation from a worker, with checksum of the compiled PVF and stats of the diff --git a/polkadot/node/core/pvf/common/src/execute.rs b/polkadot/node/core/pvf/common/src/execute.rs index ae6096cacec4..46862f9f80b6 100644 --- a/polkadot/node/core/pvf/common/src/execute.rs +++ b/polkadot/node/core/pvf/common/src/execute.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use crate::error::InternalValidationError; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use polkadot_parachain_primitives::primitives::ValidationResult; use polkadot_primitives::ExecutorParams; use std::time::Duration; diff --git a/polkadot/node/core/pvf/common/src/executor_interface.rs b/polkadot/node/core/pvf/common/src/executor_interface.rs index 252e611db8a4..87491e70c5f2 100644 --- a/polkadot/node/core/pvf/common/src/executor_interface.rs +++ b/polkadot/node/core/pvf/common/src/executor_interface.rs @@ -372,7 +372,7 @@ impl sp_core::traits::ReadRuntimeVersion for ReadRuntimeVersion { .map_err(|e| format!("Failed to read the static section from the PVF blob: {:?}", e))? { Some(version) => { - use parity_scale_codec::Encode; + use codec::Encode; Ok(version.encode()) }, None => Err("runtime version section is not found".to_string()), diff --git a/polkadot/node/core/pvf/common/src/lib.rs b/polkadot/node/core/pvf/common/src/lib.rs index 0cd928201639..30d0aa445281 100644 --- a/polkadot/node/core/pvf/common/src/lib.rs +++ b/polkadot/node/core/pvf/common/src/lib.rs @@ -32,7 +32,7 @@ pub use sp_tracing; const LOG_TARGET: &str = "parachain::pvf-common"; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use std::{ io::{self, Read, Write}, mem, diff --git a/polkadot/node/core/pvf/common/src/prepare.rs b/polkadot/node/core/pvf/common/src/prepare.rs index 28ab682ec136..64e7f3d6bcf4 100644 --- a/polkadot/node/core/pvf/common/src/prepare.rs +++ b/polkadot/node/core/pvf/common/src/prepare.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use std::path::PathBuf; /// Result from prepare worker if successful. diff --git a/polkadot/node/core/pvf/common/src/pvf.rs b/polkadot/node/core/pvf/common/src/pvf.rs index 5f248f49b9a3..e2ac36a2406a 100644 --- a/polkadot/node/core/pvf/common/src/pvf.rs +++ b/polkadot/node/core/pvf/common/src/pvf.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use crate::prepare::PrepareJobKind; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use polkadot_parachain_primitives::primitives::ValidationCodeHash; use polkadot_primitives::ExecutorParams; use std::{fmt, sync::Arc, time::Duration}; diff --git a/polkadot/node/core/pvf/common/src/worker/mod.rs b/polkadot/node/core/pvf/common/src/worker/mod.rs index 67e7bece407d..70dcf055a262 100644 --- a/polkadot/node/core/pvf/common/src/worker/mod.rs +++ b/polkadot/node/core/pvf/common/src/worker/mod.rs @@ -21,10 +21,10 @@ pub mod security; use crate::{ framed_recv_blocking, framed_send_blocking, SecurityStatus, WorkerHandshake, LOG_TARGET, }; +use codec::{Decode, Encode}; use cpu_time::ProcessTime; use futures::never::Never; use nix::{errno::Errno, sys::resource::Usage}; -use parity_scale_codec::{Decode, Encode}; use std::{ any::Any, fmt::{self}, diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index ac90fac4d57a..cf5b873e29d7 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -16,7 +16,7 @@ cfg-if = "1.0" nix = { version = "0.28.0", features = ["process", "resource", "sched"] } libc = "0.2.152" -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } polkadot-node-core-pvf-common = { path = "../common" } polkadot-parachain-primitives = { path = "../../../../parachain" } diff --git a/polkadot/node/core/pvf/execute-worker/src/lib.rs b/polkadot/node/core/pvf/execute-worker/src/lib.rs index 55f5290bd87e..35858ab36cec 100644 --- a/polkadot/node/core/pvf/execute-worker/src/lib.rs +++ b/polkadot/node/core/pvf/execute-worker/src/lib.rs @@ -27,6 +27,7 @@ pub use polkadot_node_core_pvf_common::{ // separate spawned processes. Run with e.g. `RUST_LOG=parachain::pvf-execute-worker=trace`. const LOG_TARGET: &str = "parachain::pvf-execute-worker"; +use codec::{Decode, Encode}; use cpu_time::ProcessTime; use nix::{ errno::Errno, @@ -36,7 +37,6 @@ use nix::{ }, unistd::{ForkResult, Pid}, }; -use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::InternalValidationError, execute::{Handshake, JobError, JobResponse, JobResult, WorkerError, WorkerResponse}, diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index 1850a2048907..f7daa0d7a89c 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -20,7 +20,7 @@ tikv-jemalloc-ctl = { version = "0.5.0", optional = true } tikv-jemallocator = { version = "0.5.0", optional = true } nix = { version = "0.28.0", features = ["process", "resource", "sched"] } -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } polkadot-node-core-pvf-common = { path = "../common" } polkadot-primitives = { path = "../../../../primitives" } diff --git a/polkadot/node/core/pvf/prepare-worker/src/lib.rs b/polkadot/node/core/pvf/prepare-worker/src/lib.rs index d1b218f48ae8..ef33d11720eb 100644 --- a/polkadot/node/core/pvf/prepare-worker/src/lib.rs +++ b/polkadot/node/core/pvf/prepare-worker/src/lib.rs @@ -39,7 +39,7 @@ use polkadot_node_core_pvf_common::{ worker::{pipe2_cloexec, PipeFd, WorkerInfo}, }; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::{PrepareError, PrepareWorkerResult}, executor_interface::create_runtime_from_artifact_bytes, diff --git a/polkadot/node/core/pvf/src/execute/worker_interface.rs b/polkadot/node/core/pvf/src/execute/worker_interface.rs index 9dcadfb4c2a7..d15d7c15426e 100644 --- a/polkadot/node/core/pvf/src/execute/worker_interface.rs +++ b/polkadot/node/core/pvf/src/execute/worker_interface.rs @@ -24,9 +24,9 @@ use crate::{ }, LOG_TARGET, }; +use codec::{Decode, Encode}; use futures::FutureExt; use futures_timer::Delay; -use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::InternalValidationError, execute::{Handshake, WorkerError, WorkerResponse}, diff --git a/polkadot/node/core/pvf/src/prepare/worker_interface.rs b/polkadot/node/core/pvf/src/prepare/worker_interface.rs index d64ee1510cad..5c4245d76315 100644 --- a/polkadot/node/core/pvf/src/prepare/worker_interface.rs +++ b/polkadot/node/core/pvf/src/prepare/worker_interface.rs @@ -25,7 +25,7 @@ use crate::{ }, LOG_TARGET, }; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::{PrepareError, PrepareResult, PrepareWorkerResult}, prepare::{PrepareStats, PrepareSuccess, PrepareWorkerSuccess}, diff --git a/polkadot/node/core/pvf/src/worker_interface.rs b/polkadot/node/core/pvf/src/worker_interface.rs index 93fffc806622..e63778d4692f 100644 --- a/polkadot/node/core/pvf/src/worker_interface.rs +++ b/polkadot/node/core/pvf/src/worker_interface.rs @@ -17,9 +17,9 @@ //! Common logic for implementation of worker processes. use crate::LOG_TARGET; +use codec::Encode; use futures::FutureExt as _; use futures_timer::Delay; -use parity_scale_codec::Encode; use pin_project::pin_project; use polkadot_node_core_pvf_common::{SecurityStatus, WorkerHandshake}; use rand::Rng; diff --git a/polkadot/node/core/pvf/tests/it/adder.rs b/polkadot/node/core/pvf/tests/it/adder.rs index 9a7ddcb40890..455e8c36c88d 100644 --- a/polkadot/node/core/pvf/tests/it/adder.rs +++ b/polkadot/node/core/pvf/tests/it/adder.rs @@ -17,12 +17,12 @@ //! PVF host integration tests checking the chain production pipeline. use super::TestHost; -use adder::{hash_state, BlockData, HeadData}; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use polkadot_parachain_primitives::primitives::{ BlockData as GenericBlockData, HeadData as GenericHeadData, RelayChainBlockNumber, ValidationParams, }; +use test_parachain_adder::{hash_state, BlockData, HeadData}; #[tokio::test] async fn execute_good_block_on_parent() { @@ -34,7 +34,7 @@ async fn execute_good_block_on_parent() { let ret = host .validate_candidate( - adder::wasm_binary_unwrap(), + test_parachain_adder::wasm_binary_unwrap(), ValidationParams { parent_head: GenericHeadData(parent_head.encode()), block_data: GenericBlockData(block_data.encode()), @@ -68,7 +68,7 @@ async fn execute_good_chain_on_parent() { let ret = host .validate_candidate( - adder::wasm_binary_unwrap(), + test_parachain_adder::wasm_binary_unwrap(), ValidationParams { parent_head: GenericHeadData(parent_head.encode()), block_data: GenericBlockData(block_data.encode()), @@ -104,7 +104,7 @@ async fn execute_bad_block_on_parent() { let _err = host .validate_candidate( - adder::wasm_binary_unwrap(), + test_parachain_adder::wasm_binary_unwrap(), ValidationParams { parent_head: GenericHeadData(parent_head.encode()), block_data: GenericBlockData(block_data.encode()), @@ -126,7 +126,7 @@ async fn stress_spawn() { let block_data = BlockData { state: 0, add: 512 }; let ret = host .validate_candidate( - adder::wasm_binary_unwrap(), + test_parachain_adder::wasm_binary_unwrap(), ValidationParams { parent_head: GenericHeadData(parent_head.encode()), block_data: GenericBlockData(block_data.encode()), @@ -163,7 +163,7 @@ async fn execute_can_run_serially() { let block_data = BlockData { state: 0, add: 512 }; let ret = host .validate_candidate( - adder::wasm_binary_unwrap(), + test_parachain_adder::wasm_binary_unwrap(), ValidationParams { parent_head: GenericHeadData(parent_head.encode()), block_data: GenericBlockData(block_data.encode()), diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index 6961b93832ab..d62a1aef2309 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -17,7 +17,7 @@ //! General PVF host integration tests checking the functionality of the PVF host itself. use assert_matches::assert_matches; -use parity_scale_codec::Encode as _; +use codec::Encode as _; #[cfg(all(feature = "ci-only-tests", target_os = "linux"))] use polkadot_node_core_pvf::SecurityStatus; use polkadot_node_core_pvf::{ @@ -163,7 +163,7 @@ async fn execute_job_terminates_on_timeout() { let start = std::time::Instant::now(); let result = host .validate_candidate( - halt::wasm_binary_unwrap(), + test_parachain_halt::wasm_binary_unwrap(), ValidationParams { block_data: BlockData(Vec::new()), parent_head: Default::default(), @@ -190,7 +190,7 @@ async fn ensure_parallel_execution() { // Run some jobs that do not complete, thus timing out. let host = TestHost::new().await; let execute_pvf_future_1 = host.validate_candidate( - halt::wasm_binary_unwrap(), + test_parachain_halt::wasm_binary_unwrap(), ValidationParams { block_data: BlockData(Vec::new()), parent_head: Default::default(), @@ -200,7 +200,7 @@ async fn ensure_parallel_execution() { Default::default(), ); let execute_pvf_future_2 = host.validate_candidate( - halt::wasm_binary_unwrap(), + test_parachain_halt::wasm_binary_unwrap(), ValidationParams { block_data: BlockData(Vec::new()), parent_head: Default::default(), @@ -244,7 +244,7 @@ async fn execute_queue_doesnt_stall_if_workers_died() { let start = std::time::Instant::now(); futures::future::join_all((0u8..=8).map(|_| { host.validate_candidate( - halt::wasm_binary_unwrap(), + test_parachain_halt::wasm_binary_unwrap(), ValidationParams { block_data: BlockData(Vec::new()), parent_head: Default::default(), @@ -287,7 +287,7 @@ async fn execute_queue_doesnt_stall_with_varying_executor_params() { let start = std::time::Instant::now(); futures::future::join_all((0u8..6).map(|i| { host.validate_candidate( - halt::wasm_binary_unwrap(), + test_parachain_halt::wasm_binary_unwrap(), ValidationParams { block_data: BlockData(Vec::new()), parent_head: Default::default(), @@ -325,7 +325,10 @@ async fn deleting_prepared_artifact_does_not_dispute() { let host = TestHost::new().await; let cache_dir = host.cache_dir.path(); - let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), Default::default()).await.unwrap(); + let _stats = host + .precheck_pvf(test_parachain_halt::wasm_binary_unwrap(), Default::default()) + .await + .unwrap(); // Manually delete the prepared artifact from disk. The in-memory artifacts table won't change. { @@ -345,7 +348,7 @@ async fn deleting_prepared_artifact_does_not_dispute() { // Try to validate, artifact should get recreated. let result = host .validate_candidate( - halt::wasm_binary_unwrap(), + test_parachain_halt::wasm_binary_unwrap(), ValidationParams { block_data: BlockData(Vec::new()), parent_head: Default::default(), @@ -365,7 +368,10 @@ async fn corrupted_prepared_artifact_does_not_dispute() { let host = TestHost::new().await; let cache_dir = host.cache_dir.path(); - let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), Default::default()).await.unwrap(); + let _stats = host + .precheck_pvf(test_parachain_halt::wasm_binary_unwrap(), Default::default()) + .await + .unwrap(); // Manually corrupting the prepared artifact from disk. The in-memory artifacts table won't // change. @@ -395,7 +401,7 @@ async fn corrupted_prepared_artifact_does_not_dispute() { // Try to validate, artifact should get removed because of the corruption. let result = host .validate_candidate( - halt::wasm_binary_unwrap(), + test_parachain_halt::wasm_binary_unwrap(), ValidationParams { block_data: BlockData(Vec::new()), parent_head: Default::default(), @@ -412,7 +418,9 @@ async fn corrupted_prepared_artifact_does_not_dispute() { ); // because of RuntimeConstruction we may retry - host.precheck_pvf(halt::wasm_binary_unwrap(), Default::default()).await.unwrap(); + host.precheck_pvf(test_parachain_halt::wasm_binary_unwrap(), Default::default()) + .await + .unwrap(); // The actual artifact removal is done concurrently // with sending of the result of the execution @@ -437,7 +445,10 @@ async fn cache_cleared_on_startup() { // Don't drop this host, it owns the `TempDir` which gets cleared on drop. let host = TestHost::new().await; - let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), Default::default()).await.unwrap(); + let _stats = host + .precheck_pvf(test_parachain_halt::wasm_binary_unwrap(), Default::default()) + .await + .unwrap(); // The cache dir should contain one artifact and one worker dir. let cache_dir = host.cache_dir.path().to_owned(); @@ -461,7 +472,7 @@ async fn prechecking_within_memory_limits() { let host = TestHost::new().await; let result = host .precheck_pvf( - ::adder::wasm_binary_unwrap(), + ::test_parachain_adder::wasm_binary_unwrap(), ExecutorParams::from(&[ExecutorParam::PrecheckingMaxMemory(10 * 1024 * 1024)][..]), ) .await; @@ -480,7 +491,7 @@ async fn prechecking_out_of_memory() { let host = TestHost::new().await; let result = host .precheck_pvf( - ::adder::wasm_binary_unwrap(), + ::test_parachain_adder::wasm_binary_unwrap(), ExecutorParams::from(&[ExecutorParam::PrecheckingMaxMemory(512 * 1024)][..]), ) .await; @@ -497,12 +508,15 @@ async fn prepare_can_run_serially() { .await; let _stats = host - .precheck_pvf(::adder::wasm_binary_unwrap(), Default::default()) + .precheck_pvf(::test_parachain_adder::wasm_binary_unwrap(), Default::default()) .await .unwrap(); // Prepare a different wasm blob to prevent skipping work. - let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), Default::default()).await.unwrap(); + let _stats = host + .precheck_pvf(test_parachain_halt::wasm_binary_unwrap(), Default::default()) + .await + .unwrap(); } // CI machines should be able to enable all the security features. @@ -555,7 +569,7 @@ async fn nonexistent_cache_dir() { assert!(host.security_status().await.can_unshare_user_namespace_and_change_root); let _stats = host - .precheck_pvf(::adder::wasm_binary_unwrap(), Default::default()) + .precheck_pvf(::test_parachain_adder::wasm_binary_unwrap(), Default::default()) .await .unwrap(); } @@ -574,7 +588,10 @@ async fn artifact_does_not_reprepare_on_non_meaningful_exec_parameter_change() { let set2 = ExecutorParams::from(&[ExecutorParam::PvfExecTimeout(PvfExecKind::Backing, 2500)][..]); - let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), set1).await.unwrap(); + let _stats = host + .precheck_pvf(test_parachain_halt::wasm_binary_unwrap(), set1) + .await + .unwrap(); let md1 = { let mut cache_dir: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); @@ -590,7 +607,10 @@ async fn artifact_does_not_reprepare_on_non_meaningful_exec_parameter_change() { // second attifact will be different tokio::time::sleep(Duration::from_secs(2)).await; - let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), set2).await.unwrap(); + let _stats = host + .precheck_pvf(test_parachain_halt::wasm_binary_unwrap(), set2) + .await + .unwrap(); let md2 = { let mut cache_dir: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); @@ -619,12 +639,18 @@ async fn artifact_does_reprepare_on_meaningful_exec_parameter_change() { let set2 = ExecutorParams::from(&[ExecutorParam::PvfPrepTimeout(PvfPrepKind::Prepare, 60000)][..]); - let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), set1).await.unwrap(); + let _stats = host + .precheck_pvf(test_parachain_halt::wasm_binary_unwrap(), set1) + .await + .unwrap(); let cache_dir_contents: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); assert_eq!(cache_dir_contents.len(), 2); - let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), set2).await.unwrap(); + let _stats = host + .precheck_pvf(test_parachain_halt::wasm_binary_unwrap(), set2) + .await + .unwrap(); let cache_dir_contents: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); assert_eq!(cache_dir_contents.len(), 3); // new artifact has been added diff --git a/polkadot/node/core/pvf/tests/it/process.rs b/polkadot/node/core/pvf/tests/it/process.rs index e989eb874ba9..b8fd2cdce0ce 100644 --- a/polkadot/node/core/pvf/tests/it/process.rs +++ b/polkadot/node/core/pvf/tests/it/process.rs @@ -18,9 +18,8 @@ //! spawned by the host) and job processes (spawned by the workers to securely perform PVF jobs). use super::TestHost; -use adder::{hash_state, BlockData, HeadData}; use assert_matches::assert_matches; -use parity_scale_codec::Encode; +use codec::Encode; use polkadot_node_core_pvf::{ InvalidCandidate, PossiblyInvalidError, PrepareError, ValidationError, }; @@ -30,6 +29,7 @@ use polkadot_parachain_primitives::primitives::{ use procfs::process; use rusty_fork::rusty_fork_test; use std::{future::Future, sync::Arc, time::Duration}; +use test_parachain_adder::{hash_state, BlockData, HeadData}; const PREPARE_PROCESS_NAME: &'static str = "polkadot-prepare-worker"; const EXECUTE_PROCESS_NAME: &'static str = "polkadot-execute-worker"; @@ -127,7 +127,7 @@ rusty_fork_test! { let block_data = BlockData { state: 0, add: 512 }; host .validate_candidate( - adder::wasm_binary_unwrap(), + test_parachain_adder::wasm_binary_unwrap(), ValidationParams { parent_head: GenericHeadData(parent_head.encode()), block_data: GenericBlockData(block_data.encode()), @@ -164,7 +164,7 @@ rusty_fork_test! { fn execute_worker_timeout() { test_wrapper(|host, sid| async move { // Prepare the artifact ahead of time. - let binary = halt::wasm_binary_unwrap(); + let binary = test_parachain_halt::wasm_binary_unwrap(); host.precheck_pvf(binary, Default::default()).await.unwrap(); let (result, _) = futures::join!( @@ -216,7 +216,7 @@ rusty_fork_test! { fn execute_worker_killed_during_job() { test_wrapper(|host, sid| async move { // Prepare the artifact ahead of time. - let binary = halt::wasm_binary_unwrap(); + let binary = test_parachain_halt::wasm_binary_unwrap(); host.precheck_pvf(binary, Default::default()).await.unwrap(); let (result, _) = futures::join!( @@ -272,7 +272,7 @@ rusty_fork_test! { fn forked_execute_job_killed_during_job() { test_wrapper(|host, sid| async move { // Prepare the artifact ahead of time. - let binary = halt::wasm_binary_unwrap(); + let binary = test_parachain_halt::wasm_binary_unwrap(); host.precheck_pvf(binary, Default::default()).await.unwrap(); let (result, _) = futures::join!( @@ -340,7 +340,7 @@ rusty_fork_test! { fn ensure_execute_processes_have_correct_num_threads() { test_wrapper(|host, sid| async move { // Prepare the artifact ahead of time. - let binary = halt::wasm_binary_unwrap(); + let binary = test_parachain_halt::wasm_binary_unwrap(); host.precheck_pvf(binary, Default::default()).await.unwrap(); let _ = futures::join!( diff --git a/polkadot/node/core/runtime-api/Cargo.toml b/polkadot/node/core/runtime-api/Cargo.toml index 91f5c35b2794..5524cc705457 100644 --- a/polkadot/node/core/runtime-api/Cargo.toml +++ b/polkadot/node/core/runtime-api/Cargo.toml @@ -29,4 +29,4 @@ async-trait = "0.1.79" futures = { version = "0.3.30", features = ["thread-pool"] } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-node-primitives = { path = "../../primitives" } -test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } +polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs index 0113de83c89e..7c382707264f 100644 --- a/polkadot/node/core/runtime-api/src/tests.rs +++ b/polkadot/node/core/runtime-api/src/tests.rs @@ -27,13 +27,13 @@ use polkadot_primitives::{ PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, Slot, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; +use polkadot_primitives_test_helpers::{dummy_committed_candidate_receipt, dummy_validation_code}; use sp_api::ApiError; use sp_core::testing::TaskExecutor; use std::{ collections::{BTreeMap, HashMap, VecDeque}, sync::{Arc, Mutex}, }; -use test_helpers::{dummy_committed_candidate_receipt, dummy_validation_code}; #[derive(Default)] struct MockSubsystemClient { diff --git a/polkadot/node/jaeger/Cargo.toml b/polkadot/node/jaeger/Cargo.toml index f879f9550d01..18b0c417aaf3 100644 --- a/polkadot/node/jaeger/Cargo.toml +++ b/polkadot/node/jaeger/Cargo.toml @@ -21,4 +21,4 @@ sp-core = { path = "../../../substrate/primitives/core" } thiserror = { workspace = true } tokio = "1.37" log = { workspace = true, default-features = true } -parity-scale-codec = { version = "3.6.12", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } diff --git a/polkadot/node/jaeger/src/spans.rs b/polkadot/node/jaeger/src/spans.rs index fcee8be9a50f..efc1a9f91d19 100644 --- a/polkadot/node/jaeger/src/spans.rs +++ b/polkadot/node/jaeger/src/spans.rs @@ -83,7 +83,7 @@ //! # } //! ``` -use parity_scale_codec::Encode; +use codec::Encode; use polkadot_node_primitives::PoV; use polkadot_primitives::{ BlakeTwo256, CandidateHash, ChunkIndex, Hash, HashT, Id as ParaId, ValidatorIndex, diff --git a/polkadot/node/malus/Cargo.toml b/polkadot/node/malus/Cargo.toml index 750074fa9b3c..fec148f7d381 100644 --- a/polkadot/node/malus/Cargo.toml +++ b/polkadot/node/malus/Cargo.toml @@ -48,7 +48,7 @@ clap = { version = "4.5.3", features = ["derive"] } futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../gum" } -erasure = { package = "polkadot-erasure-coding", path = "../../erasure-coding" } +polkadot-erasure-coding = { path = "../../erasure-coding" } rand = "0.8.5" # Required for worker binaries to build. diff --git a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs index 739ed40db362..6921352cdfc2 100644 --- a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs +++ b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs @@ -197,13 +197,13 @@ where let pov_hash = pov.hash(); let erasure_root = { - let chunks = erasure::obtain_chunks_v1( + let chunks = polkadot_erasure_coding::obtain_chunks_v1( n_validators as usize, &malicious_available_data, ) .unwrap(); - let branches = erasure::branches(chunks.as_ref()); + let branches = polkadot_erasure_coding::branches(chunks.as_ref()); branches.root() }; diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml index e3a53cc6df1b..55df8d3daf6d 100644 --- a/polkadot/node/metrics/Cargo.toml +++ b/polkadot/node/metrics/Cargo.toml @@ -19,10 +19,10 @@ metered = { package = "prioritized-metered-channel", version = "0.6.1", default- sc-service = { path = "../../../substrate/client/service" } sc-cli = { path = "../../../substrate/client/cli" } -substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } sc-tracing = { path = "../../../substrate/client/tracing" } codec = { package = "parity-scale-codec", version = "3.6.12" } -primitives = { package = "polkadot-primitives", path = "../../primitives" } +polkadot-primitives = { path = "../../primitives" } bs58 = { version = "0.5.0", features = ["alloc"] } log = { workspace = true, default-features = true } @@ -41,7 +41,7 @@ prometheus-parse = { version = "0.2.2" } default = [] runtime-metrics = [] runtime-benchmarks = [ + "polkadot-primitives/runtime-benchmarks", "polkadot-test-service/runtime-benchmarks", - "primitives/runtime-benchmarks", "sc-service/runtime-benchmarks", ] diff --git a/polkadot/node/metrics/src/lib.rs b/polkadot/node/metrics/src/lib.rs index 9cb0f289a580..3445c3de107a 100644 --- a/polkadot/node/metrics/src/lib.rs +++ b/polkadot/node/metrics/src/lib.rs @@ -45,7 +45,7 @@ pub fn logger_hook() -> impl FnOnce(&mut sc_cli::LoggerBuilder, &sc_service::Con /// This module reexports Prometheus types and defines the [`Metrics`](metrics::Metrics) trait. pub mod metrics { /// Reexport Substrate Prometheus types. - pub use substrate_prometheus_endpoint as prometheus; + pub use prometheus_endpoint as prometheus; /// Subsystem- or job-specific Prometheus metrics. /// diff --git a/polkadot/node/metrics/src/runtime/mod.rs b/polkadot/node/metrics/src/runtime/mod.rs index 7cd24b01c117..c5ece849aa3e 100644 --- a/polkadot/node/metrics/src/runtime/mod.rs +++ b/polkadot/node/metrics/src/runtime/mod.rs @@ -28,17 +28,17 @@ #![cfg(feature = "runtime-metrics")] use codec::Decode; -use primitives::{ +use polkadot_primitives::{ metric_definitions::{CounterDefinition, CounterVecDefinition, HistogramDefinition}, RuntimeMetricLabelValues, RuntimeMetricOp, RuntimeMetricUpdate, }; +use prometheus_endpoint::{ + register, Counter, CounterVec, Histogram, HistogramOpts, Opts, PrometheusError, Registry, U64, +}; use std::{ collections::hash_map::HashMap, sync::{Arc, Mutex, MutexGuard}, }; -use substrate_prometheus_endpoint::{ - register, Counter, CounterVec, Histogram, HistogramOpts, Opts, PrometheusError, Registry, U64, -}; mod parachain; /// Holds the registered Prometheus metric collections. diff --git a/polkadot/node/metrics/src/runtime/parachain.rs b/polkadot/node/metrics/src/runtime/parachain.rs index becc7c64d59d..7aecaf5590f1 100644 --- a/polkadot/node/metrics/src/runtime/parachain.rs +++ b/polkadot/node/metrics/src/runtime/parachain.rs @@ -18,7 +18,7 @@ //! All of the metrics have a correspondent runtime metric definition. use crate::runtime::RuntimeMetricsProvider; -use primitives::metric_definitions::{ +use polkadot_primitives::metric_definitions::{ PARACHAIN_CREATE_INHERENT_BITFIELDS_SIGNATURE_CHECKS, PARACHAIN_INHERENT_DATA_BITFIELDS_PROCESSED, PARACHAIN_INHERENT_DATA_CANDIDATES_PROCESSED, PARACHAIN_INHERENT_DATA_DISPUTE_SETS_PROCESSED, PARACHAIN_INHERENT_DATA_WEIGHT, diff --git a/polkadot/node/metrics/src/tests.rs b/polkadot/node/metrics/src/tests.rs index 861080228cd8..fde7c3144134 100644 --- a/polkadot/node/metrics/src/tests.rs +++ b/polkadot/node/metrics/src/tests.rs @@ -17,8 +17,8 @@ //! Polkadot runtime metrics integration test. use hyper::{Client, Uri}; +use polkadot_primitives::metric_definitions::PARACHAIN_INHERENT_DATA_BITFIELDS_PROCESSED; use polkadot_test_service::{node_config, run_validator_node, test_prometheus_config}; -use primitives::metric_definitions::PARACHAIN_INHERENT_DATA_BITFIELDS_PROCESSED; use sp_keyring::AccountKeyring::*; use std::collections::HashMap; diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs index 3159fe2ae5e8..5ad034464767 100644 --- a/polkadot/node/network/approval-distribution/src/tests.rs +++ b/polkadot/node/network/approval-distribution/src/tests.rs @@ -36,7 +36,6 @@ use polkadot_node_primitives::approval::{ use polkadot_node_subsystem::messages::{ network_bridge_event, AllMessages, ApprovalCheckError, ReportPeerMessage, }; -use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::{reputation::add_reputation, TimeoutExt as _}; use polkadot_primitives::{AuthorityDiscoveryId, BlakeTwo256, CoreIndex, HashT}; use polkadot_primitives_test_helpers::dummy_signature; @@ -44,7 +43,8 @@ use rand::SeedableRng; use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; use sp_core::crypto::Pair as PairT; use std::time::Duration; -type VirtualOverseer = test_helpers::TestSubsystemContextHandle; +type VirtualOverseer = + polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; fn test_harness>( mut state: State, @@ -56,7 +56,8 @@ fn test_harness>( .try_init(); let pool = sp_core::testing::TaskExecutor::new(); - let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); + let (context, virtual_overseer) = + polkadot_node_subsystem_test_helpers::make_subsystem_context(pool.clone()); let subsystem = ApprovalDistribution::new(Default::default()); { @@ -3657,7 +3658,8 @@ fn batch_test_round(message_count: usize) { let pool = sp_core::testing::TaskExecutor::new(); let mut state = State::default(); - let (mut context, mut virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); + let (mut context, mut virtual_overseer) = + polkadot_node_subsystem_test_helpers::make_subsystem_context(pool.clone()); let subsystem = ApprovalDistribution::new(Default::default()); let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(12345); let mut sender = context.sender().clone(); diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index 01b208421d79..db3a0456d9ad 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } -parity-scale-codec = { version = "3.6.12", features = ["std"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["std"] } polkadot-primitives = { path = "../../../primitives" } polkadot-erasure-coding = { path = "../../../erasure-coding" } polkadot-node-network-protocol = { path = "../protocol" } diff --git a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs index f99002d4188b..6c632fa7efee 100644 --- a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs +++ b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs @@ -138,7 +138,7 @@ mod tests { use assert_matches::assert_matches; use futures::{executor, future}; - use parity_scale_codec::Encode; + use codec::Encode; use sc_network::ProtocolName; use sp_core::testing::TaskExecutor; @@ -169,10 +169,11 @@ mod tests { fn test_run(pov_hash: Hash, pov: PoV) { let pool = TaskExecutor::new(); - let (mut context, mut virtual_overseer) = test_helpers::make_subsystem_context::< - AvailabilityDistributionMessage, - TaskExecutor, - >(pool.clone()); + let (mut context, mut virtual_overseer) = + polkadot_node_subsystem_test_helpers::make_subsystem_context::< + AvailabilityDistributionMessage, + TaskExecutor, + >(pool.clone()); let keystore = make_ferdie_keystore(); let mut runtime = polkadot_node_subsystem_util::runtime::RuntimeInfo::new(Some(keystore)); diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs index 7bd36709bc5f..278608cc858d 100644 --- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -22,7 +22,7 @@ use futures::{ FutureExt, SinkExt, }; -use parity_scale_codec::Decode; +use codec::Decode; use polkadot_erasure_coding::branch_hash; use polkadot_node_network_protocol::request_response::{ outgoing::{OutgoingRequest, Recipient, RequestError, Requests}, diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs index 25fae37f725a..2cd4bf29a563 100644 --- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs +++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs @@ -16,7 +16,7 @@ use std::collections::HashMap; -use parity_scale_codec::Encode; +use codec::Encode; use futures::{ channel::{mpsc, oneshot}, diff --git a/polkadot/node/network/availability-distribution/src/responder.rs b/polkadot/node/network/availability-distribution/src/responder.rs index 2c1885d27727..fb08c4712503 100644 --- a/polkadot/node/network/availability-distribution/src/responder.rs +++ b/polkadot/node/network/availability-distribution/src/responder.rs @@ -20,8 +20,8 @@ use std::sync::Arc; use futures::{channel::oneshot, select, FutureExt}; +use codec::{Decode, Encode}; use fatality::Nested; -use parity_scale_codec::{Decode, Encode}; use polkadot_node_network_protocol::{ request_response::{v1, v2, IncomingRequest, IncomingRequestReceiver, IsRequest}, UnifiedReputationChange as Rep, diff --git a/polkadot/node/network/availability-distribution/src/tests/mod.rs b/polkadot/node/network/availability-distribution/src/tests/mod.rs index b30e11a293c8..3320871bceb5 100644 --- a/polkadot/node/network/availability-distribution/src/tests/mod.rs +++ b/polkadot/node/network/availability-distribution/src/tests/mod.rs @@ -25,8 +25,6 @@ use polkadot_node_network_protocol::request_response::{ use polkadot_primitives::{node_features, Block, CoreState, Hash, NodeFeatures}; use sp_keystore::KeystorePtr; -use polkadot_node_subsystem_test_helpers as test_helpers; - use super::*; mod state; @@ -44,7 +42,8 @@ fn test_harness>( sp_tracing::init_for_tests(); let pool = sp_core::testing::TaskExecutor::new(); - let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); + let (context, virtual_overseer) = + polkadot_node_subsystem_test_helpers::make_subsystem_context(pool.clone()); let (pov_req_receiver, pov_req_cfg) = IncomingRequest::get_config_receiver::< Block, diff --git a/polkadot/node/network/availability-distribution/src/tests/state.rs b/polkadot/node/network/availability-distribution/src/tests/state.rs index ecc3eefbf3da..befbff0a2f27 100644 --- a/polkadot/node/network/availability-distribution/src/tests/state.rs +++ b/polkadot/node/network/availability-distribution/src/tests/state.rs @@ -55,7 +55,9 @@ use test_helpers::mock::{make_ferdie_keystore, new_leaf}; use super::mock::{make_session_info, OccupiedCoreBuilder}; use crate::LOG_TARGET; -type VirtualOverseer = test_helpers::TestSubsystemContextHandle; +type VirtualOverseer = polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle< + AvailabilityDistributionMessage, +>; pub struct TestHarness { pub virtual_overseer: VirtualOverseer, pub pov_req_cfg: RequestResponseConfig, diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index 1c2b5f4968ad..1c9c861e6f73 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -25,7 +25,7 @@ polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-network-protocol = { path = "../protocol" } -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } sc-network = { path = "../../../../substrate/client/network" } [dev-dependencies] diff --git a/polkadot/node/network/availability-recovery/src/task/mod.rs b/polkadot/node/network/availability-recovery/src/task/mod.rs index 800a82947d6f..0a8b52411afe 100644 --- a/polkadot/node/network/availability-recovery/src/task/mod.rs +++ b/polkadot/node/network/availability-recovery/src/task/mod.rs @@ -30,7 +30,7 @@ pub use self::strategy::{REGULAR_CHUNKS_REQ_RETRY_LIMIT, SYSTEMATIC_CHUNKS_REQ_R use crate::{metrics::Metrics, ErasureTask, PostRecoveryCheck, LOG_TARGET}; -use parity_scale_codec::Encode; +use codec::Encode; use polkadot_node_primitives::AvailableData; use polkadot_node_subsystem::{messages::AvailabilityStoreMessage, overseer, RecoveryError}; use polkadot_primitives::{AuthorityDiscoveryId, CandidateHash, Hash}; diff --git a/polkadot/node/network/availability-recovery/src/task/strategy/mod.rs b/polkadot/node/network/availability-recovery/src/task/strategy/mod.rs index fb31ff6aa779..1403277c8a95 100644 --- a/polkadot/node/network/availability-recovery/src/task/strategy/mod.rs +++ b/polkadot/node/network/availability-recovery/src/task/strategy/mod.rs @@ -29,8 +29,8 @@ use crate::{ futures_undead::FuturesUndead, ErasureTask, PostRecoveryCheck, RecoveryParams, LOG_TARGET, }; +use codec::Decode; use futures::{channel::oneshot, SinkExt}; -use parity_scale_codec::Decode; use polkadot_erasure_coding::branch_hash; #[cfg(not(test))] use polkadot_node_network_protocol::request_response::CHUNK_REQUEST_TIMEOUT; @@ -636,11 +636,11 @@ mod tests { use super::*; use crate::{tests::*, Metrics, RecoveryStrategy, RecoveryTask}; use assert_matches::assert_matches; + use codec::Error as DecodingError; use futures::{ channel::mpsc::{self, UnboundedReceiver}, executor, future, Future, FutureExt, StreamExt, }; - use parity_scale_codec::Error as DecodingError; use polkadot_erasure_coding::{recovery_threshold, systematic_recovery_threshold}; use polkadot_node_network_protocol::request_response::Protocol; use polkadot_node_primitives::{BlockData, PoV}; diff --git a/polkadot/node/network/availability-recovery/src/tests.rs b/polkadot/node/network/availability-recovery/src/tests.rs index d0a4a2d8b60e..4fd9ede40ff6 100644 --- a/polkadot/node/network/availability-recovery/src/tests.rs +++ b/polkadot/node/network/availability-recovery/src/tests.rs @@ -24,7 +24,7 @@ use futures::{executor, future}; use futures_timer::Delay; use rstest::rstest; -use parity_scale_codec::Encode; +use codec::Encode; use polkadot_node_network_protocol::request_response::{ self as req_res, v1::{AvailableDataFetchingRequest, ChunkResponse}, diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index b609fb1e0719..cd4e00ee1e4c 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -15,7 +15,7 @@ async-trait = "0.1.79" futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } sc-network = { path = "../../../../substrate/client/network" } sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } polkadot-node-metrics = { path = "../../metrics" } diff --git a/polkadot/node/network/bridge/src/lib.rs b/polkadot/node/network/bridge/src/lib.rs index 0305aaa067cc..0db18bc219a9 100644 --- a/polkadot/node/network/bridge/src/lib.rs +++ b/polkadot/node/network/bridge/src/lib.rs @@ -21,8 +21,8 @@ #![deny(unused_crate_dependencies)] #![warn(missing_docs)] +use codec::{Decode, Encode}; use futures::prelude::*; -use parity_scale_codec::{Decode, Encode}; use parking_lot::Mutex; use sp_consensus::SyncOracle; diff --git a/polkadot/node/network/bridge/src/network.rs b/polkadot/node/network/bridge/src/network.rs index 17d6676b8430..b31359f48a56 100644 --- a/polkadot/node/network/bridge/src/network.rs +++ b/polkadot/node/network/bridge/src/network.rs @@ -22,7 +22,7 @@ use std::{ use async_trait::async_trait; use parking_lot::Mutex; -use parity_scale_codec::Encode; +use codec::Encode; use sc_network::{ config::parse_addr, multiaddr::Multiaddr, service::traits::NetworkService, types::ProtocolName, diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs index 0a4497fc4b5a..84e935366d0c 100644 --- a/polkadot/node/network/bridge/src/rx/mod.rs +++ b/polkadot/node/network/bridge/src/rx/mod.rs @@ -20,8 +20,8 @@ use super::*; use always_assert::never; use bytes::Bytes; +use codec::{Decode, DecodeAll}; use net_protocol::filter_by_peer_version; -use parity_scale_codec::{Decode, DecodeAll}; use parking_lot::Mutex; use sc_network::{ diff --git a/polkadot/node/network/bridge/src/tx/tests.rs b/polkadot/node/network/bridge/src/tx/tests.rs index c3cf0f322f68..9265358196db 100644 --- a/polkadot/node/network/bridge/src/tx/tests.rs +++ b/polkadot/node/network/bridge/src/tx/tests.rs @@ -26,7 +26,7 @@ use sc_network::{ IfDisconnected, ObservedRole as SubstrateObservedRole, ProtocolName, ReputationChange, Roles, }; -use parity_scale_codec::DecodeAll; +use codec::DecodeAll; use polkadot_node_network_protocol::{ peer_set::{PeerSetProtocolNames, ValidationVersion}, request_response::{outgoing::Requests, ReqProtocolNames}, diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index d7291552738d..a56c1c7dfe98 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -38,7 +38,7 @@ sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } sp-keyring = { path = "../../../../substrate/primitives/keyring" } sc-keystore = { path = "../../../../substrate/client/keystore" } sc-network = { path = "../../../../substrate/client/network" } -parity-scale-codec = { version = "3.6.12", features = ["std"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["std"] } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index 412792bbecfb..a13e99df4ab4 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -22,7 +22,7 @@ use assert_matches::assert_matches; use futures::{executor, future, Future}; use futures_timer::Delay; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use sc_network::config::IncomingRequest as RawIncomingRequest; use sp_core::crypto::Pair; @@ -222,7 +222,8 @@ impl TestState { } } -type VirtualOverseer = test_helpers::TestSubsystemContextHandle; +type VirtualOverseer = + polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; struct TestHarness { virtual_overseer: VirtualOverseer, @@ -244,7 +245,8 @@ fn test_harness>( let pool = sp_core::testing::TaskExecutor::new(); - let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); + let (context, virtual_overseer) = + polkadot_node_subsystem_test_helpers::make_subsystem_context(pool.clone()); let genesis_hash = Hash::repeat_byte(0xff); let req_protocol_names = ReqProtocolNames::new(&genesis_hash, None); diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs index 1ba6389212cc..3f4459d8e65d 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -132,7 +132,8 @@ impl Default for TestState { } } -type VirtualOverseer = test_helpers::TestSubsystemContextHandle; +type VirtualOverseer = + polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; struct TestHarness { virtual_overseer: VirtualOverseer, @@ -151,7 +152,8 @@ fn test_harness>( let pool = sp_core::testing::TaskExecutor::new(); - let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); + let (context, virtual_overseer) = + polkadot_node_subsystem_test_helpers::make_subsystem_context(pool.clone()); let keystore = Arc::new(sc_keystore::LocalKeystore::in_memory()); Keystore::sr25519_generate_new( diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index dff285590d97..08713209bb74 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -14,7 +14,7 @@ futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } derive_more = "0.99.17" -parity-scale-codec = { version = "3.6.12", features = ["std"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["std"] } polkadot-primitives = { path = "../../../primitives" } polkadot-erasure-coding = { path = "../../../erasure-coding" } polkadot-node-subsystem = { path = "../../subsystem" } diff --git a/polkadot/node/network/dispute-distribution/src/tests/mod.rs b/polkadot/node/network/dispute-distribution/src/tests/mod.rs index 1d0d667f5ccf..60820e62ca2d 100644 --- a/polkadot/node/network/dispute-distribution/src/tests/mod.rs +++ b/polkadot/node/network/dispute-distribution/src/tests/mod.rs @@ -24,13 +24,13 @@ use std::{ }; use assert_matches::assert_matches; +use codec::{Decode, Encode}; use futures::{ channel::oneshot, future::{poll_fn, ready}, pin_mut, Future, }; use futures_timer::Delay; -use parity_scale_codec::{Decode, Encode}; use sc_network::{config::RequestResponseConfig, ProtocolName}; diff --git a/polkadot/node/network/gossip-support/src/tests.rs b/polkadot/node/network/gossip-support/src/tests.rs index cce78df38f30..42197d00e6f3 100644 --- a/polkadot/node/network/gossip-support/src/tests.rs +++ b/polkadot/node/network/gossip-support/src/tests.rs @@ -90,7 +90,8 @@ lazy_static! { ]; } -type VirtualOverseer = test_helpers::TestSubsystemContextHandle; +type VirtualOverseer = + polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; #[derive(Debug, Clone)] struct MockAuthorityDiscovery { @@ -200,7 +201,8 @@ fn test_harness, AD: AuthorityDiscovery>( test_fn: impl FnOnce(VirtualOverseer) -> T, ) -> GossipSupport { let pool = sp_core::testing::TaskExecutor::new(); - let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); + let (context, virtual_overseer) = + polkadot_node_subsystem_test_helpers::make_subsystem_context(pool.clone()); let subsystem = subsystem.run(context); diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index c5015b8c6450..83145ce40130 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -16,7 +16,7 @@ hex = "0.4.3" polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } polkadot-node-jaeger = { path = "../../jaeger" } -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } sc-network = { path = "../../../../substrate/client/network" } sc-network-types = { path = "../../../../substrate/client/network/types" } sc-authority-discovery = { path = "../../../../substrate/client/authority-discovery" } diff --git a/polkadot/node/network/protocol/src/lib.rs b/polkadot/node/network/protocol/src/lib.rs index c38838b1ef98..ca0f8a4e4849 100644 --- a/polkadot/node/network/protocol/src/lib.rs +++ b/polkadot/node/network/protocol/src/lib.rs @@ -19,7 +19,7 @@ #![deny(unused_crate_dependencies)] #![warn(missing_docs)] -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use polkadot_primitives::{BlockNumber, Hash}; use std::{collections::HashMap, fmt}; @@ -462,7 +462,7 @@ impl_versioned_try_from!( /// v1 notification protocol types. pub mod v1 { - use parity_scale_codec::{Decode, Encode}; + use codec::{Decode, Encode}; use polkadot_primitives::{ CandidateHash, CandidateIndex, CollatorId, CollatorSignature, CompactStatement, Hash, @@ -621,7 +621,7 @@ pub mod v1 { /// v2 network protocol types. pub mod v2 { use bitvec::{order::Lsb0, slice::BitSlice, vec::BitVec}; - use parity_scale_codec::{Decode, Encode}; + use codec::{Decode, Encode}; use polkadot_primitives::{ CandidateHash, CandidateIndex, CollatorId, CollatorSignature, GroupIndex, Hash, @@ -875,7 +875,7 @@ pub mod v2 { /// Purpose is for changing ApprovalDistributionMessage to /// include more than one assignment and approval in a message. pub mod v3 { - use parity_scale_codec::{Decode, Encode}; + use codec::{Decode, Encode}; use polkadot_node_primitives::approval::v2::{ CandidateBitfield, IndirectAssignmentCertV2, IndirectSignedApprovalVoteV2, diff --git a/polkadot/node/network/protocol/src/request_response/incoming/error.rs b/polkadot/node/network/protocol/src/request_response/incoming/error.rs index 7de9d919058a..d3aa0b7275c1 100644 --- a/polkadot/node/network/protocol/src/request_response/incoming/error.rs +++ b/polkadot/node/network/protocol/src/request_response/incoming/error.rs @@ -18,7 +18,7 @@ use sc_network_types::PeerId; -use parity_scale_codec::Error as DecodingError; +use codec::Error as DecodingError; #[allow(missing_docs)] #[fatality::fatality(splitable)] diff --git a/polkadot/node/network/protocol/src/request_response/incoming/mod.rs b/polkadot/node/network/protocol/src/request_response/incoming/mod.rs index e85390729ee3..9577c690ebdc 100644 --- a/polkadot/node/network/protocol/src/request_response/incoming/mod.rs +++ b/polkadot/node/network/protocol/src/request_response/incoming/mod.rs @@ -18,7 +18,7 @@ use std::marker::PhantomData; use futures::{channel::oneshot, StreamExt}; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use sc_network::{config as netconfig, NetworkBackend}; use sc_network_types::PeerId; diff --git a/polkadot/node/network/protocol/src/request_response/outgoing.rs b/polkadot/node/network/protocol/src/request_response/outgoing.rs index f578c4ffded3..27f0f34bf8d4 100644 --- a/polkadot/node/network/protocol/src/request_response/outgoing.rs +++ b/polkadot/node/network/protocol/src/request_response/outgoing.rs @@ -16,8 +16,8 @@ use futures::{channel::oneshot, prelude::Future, FutureExt}; +use codec::{Decode, Encode, Error as DecodingError}; use network::ProtocolName; -use parity_scale_codec::{Decode, Encode, Error as DecodingError}; use sc_network as network; use sc_network_types::PeerId; diff --git a/polkadot/node/network/protocol/src/request_response/v1.rs b/polkadot/node/network/protocol/src/request_response/v1.rs index c503c6e4df03..80721f1884af 100644 --- a/polkadot/node/network/protocol/src/request_response/v1.rs +++ b/polkadot/node/network/protocol/src/request_response/v1.rs @@ -16,7 +16,7 @@ //! Requests and responses as sent over the wire for the individual protocols. -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use polkadot_node_primitives::{ AvailableData, DisputeMessage, ErasureChunk, PoV, Proof, UncheckedDisputeMessage, diff --git a/polkadot/node/network/protocol/src/request_response/v2.rs b/polkadot/node/network/protocol/src/request_response/v2.rs index 7e1a2d989168..ae65b39cd406 100644 --- a/polkadot/node/network/protocol/src/request_response/v2.rs +++ b/polkadot/node/network/protocol/src/request_response/v2.rs @@ -16,7 +16,7 @@ //! Requests and responses as sent over the wire for the individual protocols. -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use polkadot_node_primitives::ErasureChunk; use polkadot_primitives::{ diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index 65224f9e2be6..b044acd1a86d 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -22,7 +22,7 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-network-protocol = { path = "../protocol" } arrayvec = "0.7.4" indexmap = "2.0.0" -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } thiserror = { workspace = true } fatality = "0.1.1" bitvec = "1" diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs index e22883f89376..264333435a00 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -14,8 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use codec::Encode; use net_protocol::{filter_by_peer_version, peer_set::ProtocolVersion}; -use parity_scale_codec::Encode; use polkadot_node_network_protocol::{ self as net_protocol, diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs index d4c5f95034ae..8e6fcbaebbf1 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -20,9 +20,9 @@ use super::*; use crate::{metrics::Metrics, *}; use assert_matches::assert_matches; +use codec::{Decode, Encode}; use futures::executor; use futures_timer::Delay; -use parity_scale_codec::{Decode, Encode}; use polkadot_node_network_protocol::{ grid_topology::{SessionGridTopology, TopologyPeerInfo}, peer_set::ValidationVersion, diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index 078d556391a3..119dc832d13a 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -44,8 +44,8 @@ use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; use sp_keyring::Sr25519Keyring; use assert_matches::assert_matches; +use codec::Encode; use futures::Future; -use parity_scale_codec::Encode; use rand::{Rng, SeedableRng}; use test_helpers::mock::new_leaf; @@ -55,7 +55,8 @@ mod cluster; mod grid; mod requests; -type VirtualOverseer = test_helpers::TestSubsystemContextHandle; +type VirtualOverseer = + polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; const DEFAULT_ASYNC_BACKING_PARAMETERS: AsyncBackingParams = AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; @@ -371,7 +372,8 @@ fn test_harness>( let test_state = TestState::from_config(config, req_cfg.inbound_queue.unwrap(), &mut rng); - let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); + let (context, virtual_overseer) = + polkadot_node_subsystem_test_helpers::make_subsystem_context(pool.clone()); let subsystem = async move { let subsystem = crate::StatementDistributionSubsystem { keystore, diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index 4fdfda0dba24..dcb90bacdcde 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -17,7 +17,7 @@ use super::*; use bitvec::order::Lsb0; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use polkadot_node_network_protocol::{ request_response::v2 as request_v2, v2::BackedCandidateManifest, }; diff --git a/polkadot/node/overseer/Cargo.toml b/polkadot/node/overseer/Cargo.toml index ef79cfe2f702..e77cead4a756 100644 --- a/polkadot/node/overseer/Cargo.toml +++ b/polkadot/node/overseer/Cargo.toml @@ -10,7 +10,7 @@ description = "System overseer of the Polkadot node" workspace = true [dependencies] -client = { package = "sc-client-api", path = "../../../substrate/client/api" } +sc-client-api = { path = "../../../substrate/client/api" } sp-api = { path = "../../../substrate/primitives/api" } futures = "0.3.30" futures-timer = "3.0.2" @@ -32,8 +32,8 @@ sp-core = { path = "../../../substrate/primitives/core" } futures = { version = "0.3.30", features = ["thread-pool"] } femme = "2.2.1" assert_matches = "1.4.0" -test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } -node-test-helpers = { package = "polkadot-node-subsystem-test-helpers", path = "../subsystem-test-helpers" } +polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } +polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } [target.'cfg(target_os = "linux")'.dependencies] tikv-jemalloc-ctl = "0.5.0" diff --git a/polkadot/node/overseer/examples/minimal-example.rs b/polkadot/node/overseer/examples/minimal-example.rs index 857cdba673db..86a1801a5f2d 100644 --- a/polkadot/node/overseer/examples/minimal-example.rs +++ b/polkadot/node/overseer/examples/minimal-example.rs @@ -23,7 +23,6 @@ use futures_timer::Delay; use orchestra::async_trait; use std::time::Duration; -use ::test_helpers::{dummy_candidate_descriptor, dummy_hash}; use polkadot_node_primitives::{BlockData, PoV}; use polkadot_node_subsystem_types::messages::CandidateValidationMessage; use polkadot_overseer::{ @@ -33,6 +32,7 @@ use polkadot_overseer::{ HeadSupportsParachains, SubsystemError, }; use polkadot_primitives::{CandidateReceipt, Hash, PvfExecKind}; +use polkadot_primitives_test_helpers::{dummy_candidate_descriptor, dummy_hash}; struct AlwaysSupportsParachains; diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs index 167b32a15bc4..24985a99913d 100644 --- a/polkadot/node/overseer/src/lib.rs +++ b/polkadot/node/overseer/src/lib.rs @@ -71,8 +71,8 @@ use std::{ use futures::{channel::oneshot, future::BoxFuture, select, Future, FutureExt, StreamExt}; -use client::{BlockImportNotification, BlockchainEvents, FinalityNotification}; use polkadot_primitives::{Block, BlockNumber, Hash}; +use sc_client_api::{BlockImportNotification, BlockchainEvents, FinalityNotification}; use self::messages::{BitfieldSigningMessage, PvfCheckerMessage}; use polkadot_node_subsystem_types::messages::{ diff --git a/polkadot/node/overseer/src/tests.rs b/polkadot/node/overseer/src/tests.rs index 87484914ef97..177e3addf368 100644 --- a/polkadot/node/overseer/src/tests.rs +++ b/polkadot/node/overseer/src/tests.rs @@ -18,13 +18,12 @@ use async_trait::async_trait; use futures::{executor, pending, pin_mut, poll, select, stream, FutureExt}; use std::{collections::HashMap, sync::atomic, task::Poll}; -use ::test_helpers::{dummy_candidate_descriptor, dummy_candidate_receipt, dummy_hash}; -use node_test_helpers::mock::{dummy_unpin_handle, new_leaf}; use polkadot_node_network_protocol::{PeerId, UnifiedReputationChange}; use polkadot_node_primitives::{ BlockData, CollationGenerationConfig, CollationResult, DisputeMessage, InvalidDisputeVote, PoV, UncheckedDisputeMessage, ValidDisputeVote, }; +use polkadot_node_subsystem_test_helpers::mock::{dummy_unpin_handle, new_leaf}; use polkadot_node_subsystem_types::messages::{ NetworkBridgeEvent, ReportPeerMessage, RuntimeApiRequest, }; @@ -32,6 +31,9 @@ use polkadot_primitives::{ CandidateHash, CandidateReceipt, CollatorPair, Id as ParaId, InvalidDisputeStatementKind, PvfExecKind, SessionIndex, ValidDisputeStatementKind, ValidatorIndex, }; +use polkadot_primitives_test_helpers::{ + dummy_candidate_descriptor, dummy_candidate_receipt, dummy_hash, +}; use crate::{ self as overseer, diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index 526d4e480bb0..0a84e5dae2a5 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -13,7 +13,7 @@ workspace = true bounded-vec = "0.7" futures = "0.3.30" polkadot-primitives = { path = "../../primitives" } -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } sp-core = { path = "../../../substrate/primitives/core" } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto" } sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } diff --git a/polkadot/node/primitives/src/approval.rs b/polkadot/node/primitives/src/approval.rs index b73cb4c717db..66883b33367b 100644 --- a/polkadot/node/primitives/src/approval.rs +++ b/polkadot/node/primitives/src/approval.rs @@ -23,7 +23,7 @@ pub mod v1 { Randomness, Slot, VrfPreOutput, VrfProof, VrfSignature, VrfTranscript, }; - use parity_scale_codec::{Decode, Encode}; + use codec::{Decode, Encode}; use polkadot_primitives::{ BlockNumber, CandidateHash, CandidateIndex, CoreIndex, Hash, Header, SessionIndex, ValidatorIndex, ValidatorSignature, @@ -212,7 +212,7 @@ pub mod v1 { /// A list of primitives introduced by v2. pub mod v2 { - use parity_scale_codec::{Decode, Encode}; + use codec::{Decode, Encode}; pub use sp_consensus_babe::{ Randomness, Slot, VrfPreOutput, VrfProof, VrfSignature, VrfTranscript, }; diff --git a/polkadot/node/primitives/src/disputes/message.rs b/polkadot/node/primitives/src/disputes/message.rs index 31fe73a7ba1c..f9dec073bf50 100644 --- a/polkadot/node/primitives/src/disputes/message.rs +++ b/polkadot/node/primitives/src/disputes/message.rs @@ -21,7 +21,7 @@ use thiserror::Error; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use super::{InvalidDisputeVote, SignedDisputeStatement, ValidDisputeVote}; use polkadot_primitives::{ diff --git a/polkadot/node/primitives/src/disputes/mod.rs b/polkadot/node/primitives/src/disputes/mod.rs index 5814ecee44f4..0f08b4733654 100644 --- a/polkadot/node/primitives/src/disputes/mod.rs +++ b/polkadot/node/primitives/src/disputes/mod.rs @@ -19,7 +19,7 @@ use std::collections::{ BTreeMap, BTreeSet, }; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use sp_application_crypto::AppCrypto; use sp_keystore::{Error as KeystoreError, KeystorePtr}; diff --git a/polkadot/node/primitives/src/disputes/status.rs b/polkadot/node/primitives/src/disputes/status.rs index d93c3ec846ce..b9a1c57d53de 100644 --- a/polkadot/node/primitives/src/disputes/status.rs +++ b/polkadot/node/primitives/src/disputes/status.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; /// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS /// reboots. diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index 5f007bc8d67d..aded1b8fe734 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -25,8 +25,8 @@ use std::pin::Pin; use bounded_vec::BoundedVec; +use codec::{Decode, Encode, Error as CodecError, Input}; use futures::Future; -use parity_scale_codec::{Decode, Encode, Error as CodecError, Input}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use polkadot_primitives::{ diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 0dfdf926b1b0..ec5113d2c8a5 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -13,9 +13,9 @@ workspace = true [dependencies] # Substrate Client sc-authority-discovery = { path = "../../../substrate/client/authority-discovery" } -babe = { package = "sc-consensus-babe", path = "../../../substrate/client/consensus/babe" } -beefy = { package = "sc-consensus-beefy", path = "../../../substrate/client/consensus/beefy" } -grandpa = { package = "sc-consensus-grandpa", path = "../../../substrate/client/consensus/grandpa" } +sc-consensus-babe = { path = "../../../substrate/client/consensus/babe" } +sc-consensus-beefy = { path = "../../../substrate/client/consensus/beefy" } +sc-consensus-grandpa = { path = "../../../substrate/client/consensus/grandpa" } mmr-gadget = { path = "../../../substrate/client/merkle-mountain-range" } sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range" } sc-block-builder = { path = "../../../substrate/client/block-builder" } @@ -35,14 +35,14 @@ sc-keystore = { path = "../../../substrate/client/keystore" } sc-basic-authorship = { path = "../../../substrate/client/basic-authorship" } sc-offchain = { path = "../../../substrate/client/offchain" } sc-sysinfo = { path = "../../../substrate/client/sysinfo" } -service = { package = "sc-service", path = "../../../substrate/client/service", default-features = false } -telemetry = { package = "sc-telemetry", path = "../../../substrate/client/telemetry" } +sc-service = { path = "../../../substrate/client/service", default-features = false } +sc-telemetry = { path = "../../../substrate/client/telemetry" } # Substrate Primitives sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery" } -consensus_common = { package = "sp-consensus", path = "../../../substrate/primitives/consensus/common" } -beefy-primitives = { package = "sp-consensus-beefy", path = "../../../substrate/primitives/consensus/beefy" } -grandpa_primitives = { package = "sp-consensus-grandpa", path = "../../../substrate/primitives/consensus/grandpa" } +sp-consensus = { path = "../../../substrate/primitives/consensus/common" } +sp-consensus-beefy = { path = "../../../substrate/primitives/consensus/beefy" } +sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa" } sp-inherents = { path = "../../../substrate/primitives/inherents" } sp-keyring = { path = "../../../substrate/primitives/keyring" } sp-api = { path = "../../../substrate/primitives/api" } @@ -148,7 +148,7 @@ xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api" } [dev-dependencies] polkadot-test-client = { path = "../test/client" } polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } -test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } +polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } env_logger = "0.11" assert_matches = "1.5.0" serial_test = "2.0.0" @@ -157,7 +157,7 @@ tempfile = "3.2" [features] default = ["db", "full-node"] -db = ["service/rocksdb"] +db = ["sc-service/rocksdb"] full-node = [ "kvdb-rocksdb", @@ -214,7 +214,7 @@ runtime-benchmarks = [ "polkadot-test-client/runtime-benchmarks", "rococo-runtime?/runtime-benchmarks", "sc-client-db/runtime-benchmarks", - "service/runtime-benchmarks", + "sc-service/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "westend-runtime?/runtime-benchmarks", "xcm-fee-payment-runtime-api/runtime-benchmarks", diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index c7019e3f0b22..0358bc300ab0 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -16,13 +16,13 @@ //! Polkadot chain configurations. -use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; -use grandpa::AuthorityId as GrandpaId; #[cfg(feature = "westend-native")] use pallet_staking::Forcing; use polkadot_primitives::{AccountId, AccountPublic, AssignmentId, ValidatorId}; +use sc_consensus_grandpa::AuthorityId as GrandpaId; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; +use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId; #[cfg(feature = "westend-native")] use polkadot_primitives::vstaging::SchedulerParams; @@ -31,13 +31,13 @@ use rococo_runtime as rococo; use sc_chain_spec::ChainSpecExtension; #[cfg(any(feature = "westend-native", feature = "rococo-native"))] use sc_chain_spec::ChainType; +#[cfg(any(feature = "westend-native", feature = "rococo-native"))] +use sc_telemetry::TelemetryEndpoints; use serde::{Deserialize, Serialize}; use sp_core::{sr25519, Pair, Public}; use sp_runtime::traits::IdentifyAccount; #[cfg(feature = "westend-native")] use sp_runtime::Perbill; -#[cfg(any(feature = "westend-native", feature = "rococo-native"))] -use telemetry::TelemetryEndpoints; #[cfg(feature = "westend-native")] use westend_runtime as westend; #[cfg(feature = "westend-native")] @@ -70,11 +70,11 @@ pub struct Extensions { } // Generic chain spec, in case when we don't have the native runtime. -pub type GenericChainSpec = service::GenericChainSpec; +pub type GenericChainSpec = sc_service::GenericChainSpec; /// The `ChainSpec` parameterized for the westend runtime. #[cfg(feature = "westend-native")] -pub type WestendChainSpec = service::GenericChainSpec; +pub type WestendChainSpec = sc_service::GenericChainSpec; /// The `ChainSpec` parameterized for the westend runtime. // Dummy chain spec, but that is fine when we don't have the native runtime. @@ -83,7 +83,7 @@ pub type WestendChainSpec = GenericChainSpec; /// The `ChainSpec` parameterized for the rococo runtime. #[cfg(feature = "rococo-native")] -pub type RococoChainSpec = service::GenericChainSpec; +pub type RococoChainSpec = sc_service::GenericChainSpec; /// The `ChainSpec` parameterized for the rococo runtime. // Dummy chain spec, but that is fine when we don't have the native runtime. diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index 34abc76813ff..dd8a0a7e635b 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -19,8 +19,6 @@ //! These are used to provide a type that implements these runtime APIs without requiring to import //! the native runtimes. -use beefy_primitives::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; -use grandpa_primitives::AuthorityId as GrandpaId; use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; use polkadot_primitives::{ runtime_api, slashing, AccountId, AuthorityDiscoveryId, Balance, Block, BlockNumber, @@ -30,6 +28,8 @@ use polkadot_primitives::{ ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; +use sp_consensus_beefy::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; +use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::OpaqueMetadata; use sp_runtime::{ @@ -232,30 +232,30 @@ sp_api::impl_runtime_apis! { } } - impl beefy_primitives::BeefyApi for Runtime { + impl sp_consensus_beefy::BeefyApi for Runtime { fn beefy_genesis() -> Option { unimplemented!() } - fn validator_set() -> Option> { + fn validator_set() -> Option> { unimplemented!() } fn submit_report_equivocation_unsigned_extrinsic( - _: beefy_primitives::DoubleVotingProof< + _: sp_consensus_beefy::DoubleVotingProof< BlockNumber, BeefyId, BeefySignature, >, - _: beefy_primitives::OpaqueKeyOwnershipProof, + _: sp_consensus_beefy::OpaqueKeyOwnershipProof, ) -> Option<()> { unimplemented!() } fn generate_key_ownership_proof( - _: beefy_primitives::ValidatorSetId, + _: sp_consensus_beefy::ValidatorSetId, _: BeefyId, - ) -> Option { + ) -> Option { unimplemented!() } } @@ -291,29 +291,29 @@ sp_api::impl_runtime_apis! { } } - impl grandpa_primitives::GrandpaApi for Runtime { + impl sp_consensus_grandpa::GrandpaApi for Runtime { fn grandpa_authorities() -> Vec<(GrandpaId, u64)> { unimplemented!() } - fn current_set_id() -> grandpa_primitives::SetId { + fn current_set_id() -> sp_consensus_grandpa::SetId { unimplemented!() } fn submit_report_equivocation_unsigned_extrinsic( - _: grandpa_primitives::EquivocationProof< + _: sp_consensus_grandpa::EquivocationProof< ::Hash, sp_runtime::traits::NumberFor, >, - _: grandpa_primitives::OpaqueKeyOwnershipProof, + _: sp_consensus_grandpa::OpaqueKeyOwnershipProof, ) -> Option<()> { unimplemented!() } fn generate_key_ownership_proof( - _: grandpa_primitives::SetId, - _: grandpa_primitives::AuthorityId, - ) -> Option { + _: sp_consensus_grandpa::SetId, + _: sp_consensus_grandpa::AuthorityId, + ) -> Option { unimplemented!() } } diff --git a/polkadot/node/service/src/grandpa_support.rs b/polkadot/node/service/src/grandpa_support.rs index 729dbfde5c76..c85d5eb32b19 100644 --- a/polkadot/node/service/src/grandpa_support.rs +++ b/polkadot/node/service/src/grandpa_support.rs @@ -64,7 +64,7 @@ where /// w3f validators and randomly selected validators from the latest session (at /// #1500988). #[cfg(feature = "full-node")] -pub(crate) fn kusama_hard_forks() -> Vec> { +pub(crate) fn kusama_hard_forks() -> Vec> { use sp_core::crypto::Ss58Codec; use std::str::FromStr; @@ -141,7 +141,7 @@ pub(crate) fn kusama_hard_forks() -> Vec> { .into_iter() .map(|address| { ( - grandpa_primitives::AuthorityId::from_ss58check(address) + sp_consensus_grandpa::AuthorityId::from_ss58check(address) .expect("hard fork authority addresses are static and they should be carefully defined; qed."), 1, ) @@ -154,7 +154,7 @@ pub(crate) fn kusama_hard_forks() -> Vec> { let hash = Hash::from_str(hash) .expect("hard fork hashes are static and they should be carefully defined; qed."); - grandpa::AuthoritySetHardFork { + sc_consensus_grandpa::AuthoritySetHardFork { set_id, block: (hash, number), authorities: authorities.clone(), diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 9ee81f80d66a..b4f63bd2aa06 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -41,7 +41,6 @@ mod tests; #[cfg(feature = "full-node")] use { - grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}, gum::info, polkadot_node_core_approval_voting::{ self as approval_voting_subsystem, Config as ApprovalVotingConfig, @@ -58,6 +57,7 @@ use { request_response::ReqProtocolNames, }, sc_client_api::BlockBackend, + sc_consensus_grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}, sc_transaction_pool_api::OffchainTransactionPoolFactory, sp_core::traits::SpawnNamed, }; @@ -82,15 +82,13 @@ use std::{collections::HashMap, path::PathBuf, sync::Arc, time::Duration}; use prometheus_endpoint::Registry; #[cfg(feature = "full-node")] -use service::KeystoreContainer; -use service::RpcHandlers; -use telemetry::TelemetryWorker; +use sc_service::KeystoreContainer; +use sc_service::RpcHandlers; +use sc_telemetry::TelemetryWorker; #[cfg(feature = "full-node")] -use telemetry::{Telemetry, TelemetryWorkerHandle}; +use sc_telemetry::{Telemetry, TelemetryWorkerHandle}; -use beefy_primitives::ecdsa_crypto; pub use chain_spec::{GenericChainSpec, RococoChainSpec, WestendChainSpec}; -pub use consensus_common::{Proposal, SelectChain}; use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; use mmr_gadget::MmrGadget; use polkadot_node_subsystem_types::DefaultSubsystemClient; @@ -99,12 +97,14 @@ pub use sc_client_api::{Backend, CallExecutor}; pub use sc_consensus::{BlockImport, LongestChain}; pub use sc_executor::NativeExecutionDispatch; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; -pub use service::{ +pub use sc_service::{ config::{DatabaseSource, PrometheusConfig}, ChainSpec, Configuration, Error as SubstrateServiceError, PruningMode, Role, TFullBackend, TFullCallExecutor, TFullClient, TaskManager, TransactionPoolOptions, }; pub use sp_api::{ApiRef, ConstructRuntimeApi, Core as CoreApi, ProvideRuntimeApi}; +pub use sp_consensus::{Proposal, SelectChain}; +use sp_consensus_beefy::ecdsa_crypto; pub use sp_runtime::{ generic, traits::{self as runtime_traits, BlakeTwo256, Block as BlockT, Header as HeaderT, NumberFor}, @@ -118,10 +118,10 @@ pub use {westend_runtime, westend_runtime_constants}; pub use fake_runtime_api::{GetLastTimestamp, RuntimeApi}; #[cfg(feature = "full-node")] -pub type FullBackend = service::TFullBackend; +pub type FullBackend = sc_service::TFullBackend; #[cfg(feature = "full-node")] -pub type FullClient = service::TFullClient< +pub type FullClient = sc_service::TFullClient< Block, RuntimeApi, WasmExecutor<(sp_io::SubstrateHostFunctions, frame_benchmarking::benchmarking::HostFunctions)>, @@ -210,7 +210,7 @@ pub enum Error { Blockchain(#[from] sp_blockchain::Error), #[error(transparent)] - Consensus(#[from] consensus_common::Error), + Consensus(#[from] sp_consensus::Error), #[error("Failed to create an overseer")] Overseer(#[from] polkadot_overseer::SubsystemError), @@ -219,7 +219,7 @@ pub enum Error { Prometheus(#[from] prometheus_endpoint::PrometheusError), #[error(transparent)] - Telemetry(#[from] telemetry::Error), + Telemetry(#[from] sc_telemetry::Error), #[error(transparent)] Jaeger(#[from] polkadot_node_subsystem::jaeger::JaegerError), @@ -393,10 +393,16 @@ fn jaeger_launch_collector_with_agent( type FullSelectChain = relay_chain_selection::SelectRelayChain; #[cfg(feature = "full-node")] type FullGrandpaBlockImport = - grandpa::GrandpaBlockImport; + sc_consensus_grandpa::GrandpaBlockImport; #[cfg(feature = "full-node")] type FullBeefyBlockImport = - beefy::import::BeefyBlockImport; + sc_consensus_beefy::import::BeefyBlockImport< + Block, + FullBackend, + FullClient, + InnerBlockImport, + AuthorityId, + >; #[cfg(feature = "full-node")] struct Basics { @@ -417,7 +423,7 @@ fn new_partial_basics( .telemetry_endpoints .clone() .filter(|x| !x.is_empty()) - .map(move |endpoints| -> Result<_, telemetry::Error> { + .map(move |endpoints| -> Result<_, sc_telemetry::Error> { let (worker, mut worker_handle) = if let Some(worker_handle) = telemetry_worker_handle { (None, worker_handle) } else { @@ -443,7 +449,7 @@ fn new_partial_basics( .build(); let (client, backend, keystore_container, task_manager) = - service::new_full_parts::( + sc_service::new_full_parts::( &config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, @@ -472,7 +478,7 @@ fn new_partial( Basics { task_manager, backend, client, keystore_container, telemetry }: Basics, select_chain: ChainSelection, ) -> Result< - service::PartialComponents< + sc_service::PartialComponents< FullClient, FullBackend, ChainSelection, @@ -484,7 +490,7 @@ fn new_partial( polkadot_rpc::SubscriptionTaskExecutor, ) -> Result, ( - babe::BabeBlockImport< + sc_consensus_babe::BabeBlockImport< Block, FullClient, FullBeefyBlockImport< @@ -492,11 +498,11 @@ fn new_partial( ecdsa_crypto::AuthorityId, >, >, - grandpa::LinkHalf, - babe::BabeLink, - beefy::BeefyVoterLinks, + sc_consensus_grandpa::LinkHalf, + sc_consensus_babe::BabeLink, + sc_consensus_beefy::BeefyVoterLinks, ), - grandpa::SharedVoterState, + sc_consensus_grandpa::SharedVoterState, sp_consensus_babe::SlotDuration, Option, ), @@ -520,55 +526,57 @@ where Vec::new() }; - let (grandpa_block_import, grandpa_link) = grandpa::block_import_with_authority_set_hard_forks( - client.clone(), - GRANDPA_JUSTIFICATION_PERIOD, - &(client.clone() as Arc<_>), - select_chain.clone(), - grandpa_hard_forks, - telemetry.as_ref().map(|x| x.handle()), - )?; + let (grandpa_block_import, grandpa_link) = + sc_consensus_grandpa::block_import_with_authority_set_hard_forks( + client.clone(), + GRANDPA_JUSTIFICATION_PERIOD, + &(client.clone() as Arc<_>), + select_chain.clone(), + grandpa_hard_forks, + telemetry.as_ref().map(|x| x.handle()), + )?; let justification_import = grandpa_block_import.clone(); let (beefy_block_import, beefy_voter_links, beefy_rpc_links) = - beefy::beefy_block_import_and_links( + sc_consensus_beefy::beefy_block_import_and_links( grandpa_block_import, backend.clone(), client.clone(), config.prometheus_registry().cloned(), ); - let babe_config = babe::configuration(&*client)?; + let babe_config = sc_consensus_babe::configuration(&*client)?; let (block_import, babe_link) = - babe::block_import(babe_config.clone(), beefy_block_import, client.clone())?; + sc_consensus_babe::block_import(babe_config.clone(), beefy_block_import, client.clone())?; let slot_duration = babe_link.config().slot_duration(); - let (import_queue, babe_worker_handle) = babe::import_queue(babe::ImportQueueParams { - link: babe_link.clone(), - block_import: block_import.clone(), - justification_import: Some(Box::new(justification_import)), - client: client.clone(), - select_chain: select_chain.clone(), - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + let (import_queue, babe_worker_handle) = + sc_consensus_babe::import_queue(sc_consensus_babe::ImportQueueParams { + link: babe_link.clone(), + block_import: block_import.clone(), + justification_import: Some(Box::new(justification_import)), + client: client.clone(), + select_chain: select_chain.clone(), + create_inherent_data_providers: move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - let slot = + let slot = sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( *timestamp, slot_duration, ); - Ok((slot, timestamp)) - }, - spawner: &task_manager.spawn_essential_handle(), - registry: config.prometheus_registry(), - telemetry: telemetry.as_ref().map(|x| x.handle()), - offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()), - })?; + Ok((slot, timestamp)) + }, + spawner: &task_manager.spawn_essential_handle(), + registry: config.prometheus_registry(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()), + })?; let justification_stream = grandpa_link.justification_stream(); let shared_authority_set = grandpa_link.shared_authority_set().clone(); - let shared_voter_state = grandpa::SharedVoterState::empty(); + let shared_voter_state = sc_consensus_grandpa::SharedVoterState::empty(); let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service( backend.clone(), Some(shared_authority_set.clone()), @@ -587,7 +595,7 @@ where move |deny_unsafe, subscription_executor: polkadot_rpc::SubscriptionTaskExecutor| - -> Result { + -> Result { let deps = polkadot_rpc::FullDeps { client: client.clone(), pool: transaction_pool.clone(), @@ -617,7 +625,7 @@ where } }; - Ok(service::PartialComponents { + Ok(sc_service::PartialComponents { client, backend, task_manager, @@ -812,7 +820,7 @@ pub fn new_full< SelectRelayChain::new_longest_chain(basics.backend.clone()) }; - let service::PartialComponents::<_, _, SelectRelayChain<_>, _, _, _> { + let sc_service::PartialComponents::<_, _, SelectRelayChain<_>, _, _, _> { client, backend, mut task_manager, @@ -839,9 +847,10 @@ pub fn new_full< // Note: GrandPa is pushed before the Polkadot-specific protocols. This doesn't change // anything in terms of behaviour, but makes the logs more consistent with the other // Substrate nodes. - let grandpa_protocol_name = grandpa::protocol_standard_name(&genesis_hash, &config.chain_spec); + let grandpa_protocol_name = + sc_consensus_grandpa::protocol_standard_name(&genesis_hash, &config.chain_spec); let (grandpa_protocol_config, grandpa_notification_service) = - grandpa::grandpa_peers_set_config::<_, Network>( + sc_consensus_grandpa::grandpa_peers_set_config::<_, Network>( grandpa_protocol_name.clone(), metrics.clone(), Arc::clone(&peer_store_handle), @@ -849,21 +858,19 @@ pub fn new_full< net_config.add_notification_protocol(grandpa_protocol_config); let beefy_gossip_proto_name = - beefy::gossip_protocol_name(&genesis_hash, config.chain_spec.fork_id()); + sc_consensus_beefy::gossip_protocol_name(&genesis_hash, config.chain_spec.fork_id()); // `beefy_on_demand_justifications_handler` is given to `beefy-gadget` task to be run, // while `beefy_req_resp_cfg` is added to `config.network.request_response_protocols`. let (beefy_on_demand_justifications_handler, beefy_req_resp_cfg) = - beefy::communication::request_response::BeefyJustifsRequestHandler::new::<_, Network>( - &genesis_hash, - config.chain_spec.fork_id(), - client.clone(), - prometheus_registry.clone(), - ); + sc_consensus_beefy::communication::request_response::BeefyJustifsRequestHandler::new::< + _, + Network, + >(&genesis_hash, config.chain_spec.fork_id(), client.clone(), prometheus_registry.clone()); let beefy_notification_service = match enable_beefy { false => None, true => { let (beefy_notification_config, beefy_notification_service) = - beefy::communication::beefy_peers_set_config::<_, Network>( + sc_consensus_beefy::communication::beefy_peers_set_config::<_, Network>( beefy_gossip_proto_name.clone(), metrics.clone(), Arc::clone(&peer_store_handle), @@ -932,7 +939,7 @@ pub fn new_full< Vec::new() }; - let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new( + let warp_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new( backend.clone(), import_setup.1.shared_authority_set().clone(), grandpa_hard_forks, @@ -1020,7 +1027,7 @@ pub fn new_full< }; let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = - service::build_network(service::BuildNetworkParams { + sc_service::build_network(sc_service::BuildNetworkParams { config: &config, net_config, client: client.clone(), @@ -1056,7 +1063,7 @@ pub fn new_full< ); } - let rpc_handlers = service::spawn_tasks(service::SpawnTasksParams { + let rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { config, backend: backend.clone(), client: client.clone(), @@ -1152,7 +1159,7 @@ pub fn new_full< let overseer_handle = if let Some(authority_discovery_service) = authority_discovery_service { let (overseer, overseer_handle) = overseer_gen - .generate::>( + .generate::>( overseer_connector, OverseerGenArgs { runtime_client, @@ -1224,7 +1231,7 @@ pub fn new_full< let overseer_handle = overseer_handle.as_ref().ok_or(Error::AuthoritiesRequireRealOverseer)?.clone(); let slot_duration = babe_link.config().slot_duration(); - let babe_config = babe::BabeParams { + let babe_config = sc_consensus_babe::BabeParams { keystore: keystore_container.keystore(), client: client.clone(), select_chain, @@ -1258,12 +1265,12 @@ pub fn new_full< force_authoring, backoff_authoring_blocks, babe_link, - block_proposal_slot_portion: babe::SlotProportion::new(2f32 / 3f32), + block_proposal_slot_portion: sc_consensus_babe::SlotProportion::new(2f32 / 3f32), max_block_proposal_slot_portion: None, telemetry: telemetry.as_ref().map(|x| x.handle()), }; - let babe = babe::start_babe(babe_config)?; + let babe = sc_consensus_babe::start_babe(babe_config)?; task_manager.spawn_essential_handle().spawn_blocking("babe", None, babe); } @@ -1274,7 +1281,7 @@ pub fn new_full< // beefy is enabled if its notification service exists if let Some(notification_service) = beefy_notification_service { let justifications_protocol_name = beefy_on_demand_justifications_handler.protocol_name(); - let network_params = beefy::BeefyNetworkParams { + let network_params = sc_consensus_beefy::BeefyNetworkParams { network: Arc::new(network.clone()), sync: sync_service.clone(), gossip_protocol_name: beefy_gossip_proto_name, @@ -1282,8 +1289,8 @@ pub fn new_full< notification_service, _phantom: core::marker::PhantomData::, }; - let payload_provider = beefy_primitives::mmr::MmrRootProvider::new(client.clone()); - let beefy_params = beefy::BeefyParams { + let payload_provider = sp_consensus_beefy::mmr::MmrRootProvider::new(client.clone()); + let beefy_params = sc_consensus_beefy::BeefyParams { client: client.clone(), backend: backend.clone(), payload_provider, @@ -1297,9 +1304,16 @@ pub fn new_full< is_authority: role.is_authority(), }; - let gadget = beefy::start_beefy_gadget::<_, _, _, _, _, _, _, ecdsa_crypto::AuthorityId>( - beefy_params, - ); + let gadget = sc_consensus_beefy::start_beefy_gadget::< + _, + _, + _, + _, + _, + _, + _, + ecdsa_crypto::AuthorityId, + >(beefy_params); // BEEFY is part of consensus, if it fails we'll bring the node down with it to make sure it // is noticed. @@ -1320,7 +1334,7 @@ pub fn new_full< ); } - let config = grandpa::Config { + let config = sc_consensus_grandpa::Config { // FIXME substrate#1578 make this available through chainspec // Grandpa performance can be improved a bit by tuning this parameter, see: // https://github.com/paritytech/polkadot/issues/5464 @@ -1343,17 +1357,18 @@ pub fn new_full< // provide better guarantees of block and vote data availability than // the observer. - let mut voting_rules_builder = grandpa::VotingRulesBuilder::default(); + let mut voting_rules_builder = sc_consensus_grandpa::VotingRulesBuilder::default(); #[cfg(not(feature = "malus"))] let _malus_finality_delay = None; if let Some(delay) = _malus_finality_delay { info!(?delay, "Enabling malus finality delay",); - voting_rules_builder = voting_rules_builder.add(grandpa::BeforeBestBlockBy(delay)); + voting_rules_builder = + voting_rules_builder.add(sc_consensus_grandpa::BeforeBestBlockBy(delay)); }; - let grandpa_config = grandpa::GrandpaParams { + let grandpa_config = sc_consensus_grandpa::GrandpaParams { config, link: link_half, network: network.clone(), @@ -1369,7 +1384,7 @@ pub fn new_full< task_manager.spawn_essential_handle().spawn_blocking( "grandpa-voter", None, - grandpa::run_grandpa_voter(grandpa_config)?, + sc_consensus_grandpa::run_grandpa_voter(grandpa_config)?, ); } @@ -1398,7 +1413,7 @@ macro_rules! chain_ops { // use the longest chain selection, since there is no overseer available let chain_selection = LongestChain::new(basics.backend.clone()); - let service::PartialComponents { client, backend, import_queue, task_manager, .. } = + let sc_service::PartialComponents { client, backend, import_queue, task_manager, .. } = new_partial::>(&mut config, basics, chain_selection)?; Ok((client, backend, import_queue, task_manager)) }}; @@ -1411,7 +1426,7 @@ pub fn new_chain_ops( jaeger_agent: Option, ) -> Result<(Arc, Arc, sc_consensus::BasicQueue, TaskManager), Error> { - config.keystore = service::config::KeystoreConfig::InMemory; + config.keystore = sc_service::config::KeystoreConfig::InMemory; if config.chain_spec.is_rococo() || config.chain_spec.is_wococo() || @@ -1489,8 +1504,8 @@ pub fn revert_backend( revert_approval_voting(parachains_db.clone(), hash)?; revert_chain_selection(parachains_db, hash)?; // Revert Substrate consensus related components - babe::revert(client.clone(), backend, blocks)?; - grandpa::revert(client, blocks)?; + sc_consensus_babe::revert(client.clone(), backend, blocks)?; + sc_consensus_grandpa::revert(client, blocks)?; Ok(()) } @@ -1519,7 +1534,7 @@ fn revert_approval_voting(db: Arc, hash: Hash) -> sp_blockchain::R config, db, Arc::new(sc_keystore::LocalKeystore::in_memory()), - Box::new(consensus_common::NoNetwork), + Box::new(sp_consensus::NoNetwork), approval_voting_subsystem::Metrics::default(), ); diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index 6f35718cd18f..5f4db99b00ef 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -82,7 +82,7 @@ where /// Underlying network service implementation. pub network_service: Arc, /// Underlying syncing service implementation. - pub sync_service: Arc, + pub sync_service: Arc, /// Underlying authority discovery service. pub authority_discovery_service: AuthorityDiscoveryService, /// Collations request receiver for network protocol v1. diff --git a/polkadot/node/service/src/parachains_db/upgrade.rs b/polkadot/node/service/src/parachains_db/upgrade.rs index 4d7370859609..808acf04b4e7 100644 --- a/polkadot/node/service/src/parachains_db/upgrade.rs +++ b/polkadot/node/service/src/parachains_db/upgrade.rs @@ -463,7 +463,7 @@ mod tests { v3::migration_helpers::{v1_to_latest_sanity_check, v2_fill_test_data}, }; use polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter; - use test_helpers::dummy_candidate_receipt; + use polkadot_primitives_test_helpers::dummy_candidate_receipt; #[test] fn test_paritydb_migrate_0_to_1() { diff --git a/polkadot/node/service/src/relay_chain_selection.rs b/polkadot/node/service/src/relay_chain_selection.rs index c5546c34bdba..c0b1ce8b0ebe 100644 --- a/polkadot/node/service/src/relay_chain_selection.rs +++ b/polkadot/node/service/src/relay_chain_selection.rs @@ -36,7 +36,6 @@ #![cfg(feature = "full-node")] use super::{HeaderProvider, HeaderProviderProvider}; -use consensus_common::{Error as ConsensusError, SelectChain}; use futures::channel::oneshot; use polkadot_node_primitives::MAX_FINALITY_LAG as PRIMITIVES_MAX_FINALITY_LAG; use polkadot_node_subsystem::messages::{ @@ -46,9 +45,10 @@ use polkadot_node_subsystem::messages::{ use polkadot_node_subsystem_util::metrics::{self, prometheus}; use polkadot_overseer::{AllMessages, Handle}; use polkadot_primitives::{Block as PolkadotBlock, BlockNumber, Hash, Header as PolkadotHeader}; +use sp_consensus::{Error as ConsensusError, SelectChain}; use std::sync::Arc; -pub use service::SpawnTaskHandle; +pub use sc_service::SpawnTaskHandle; /// The maximum amount of unfinalized blocks we are willing to allow due to approval checking /// or disputes. diff --git a/polkadot/node/service/src/tests.rs b/polkadot/node/service/src/tests.rs index 26c8083185d8..bebd05071013 100644 --- a/polkadot/node/service/src/tests.rs +++ b/polkadot/node/service/src/tests.rs @@ -19,7 +19,6 @@ use super::{relay_chain_selection::*, *}; use futures::channel::oneshot::Receiver; use polkadot_node_primitives::approval::v2::VrfSignature; use polkadot_node_subsystem::messages::{AllMessages, BlockDescription}; -use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_test_client::Sr25519Keyring; use sp_consensus_babe::{ @@ -46,7 +45,8 @@ use polkadot_primitives::{Block, BlockNumber, Hash, Header}; use polkadot_node_subsystem_test_helpers::TestSubsystemSender; use polkadot_overseer::{SubsystemContext, SubsystemSender}; -type VirtualOverseer = test_helpers::TestSubsystemContextHandle; +type VirtualOverseer = + polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; #[async_trait::async_trait] impl OverseerHandleT for TestSubsystemSender { @@ -76,7 +76,8 @@ fn test_harness>( .try_init(); let pool = TaskExecutor::new(); - let (mut context, virtual_overseer) = test_helpers::make_subsystem_context(pool); + let (mut context, virtual_overseer) = + polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); let (finality_target_tx, finality_target_rx) = oneshot::channel::>(); diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index ebd9322e9f74..5001104f929a 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -56,7 +56,7 @@ rand_distr = "0.4.3" bitvec = "1.0.1" kvdb-memorydb = "0.13.0" -parity-scale-codec = { version = "3.6.12", features = ["derive", "std"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive", "std"] } tokio = { version = "1.24.2", features = ["parking_lot", "rt-multi-thread"] } clap-num = "1.0.2" polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } @@ -69,7 +69,7 @@ sp-consensus = { path = "../../../substrate/primitives/consensus/common" } polkadot-node-metrics = { path = "../metrics" } itertools = "0.11" polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } -prometheus_endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } prometheus = { version = "0.13.0", default-features = false } serde = { workspace = true, default-features = true } serde_yaml = { workspace = true } @@ -87,7 +87,7 @@ rand_core = "0.6.2" rand_chacha = { version = "0.3.1" } paste = "1.0.14" orchestra = { version = "0.3.5", default-features = false, features = ["futures_channel"] } -pyroscope = "0.5.7" +pyroscope = { version = "0.5.7" } pyroscope_pprofrs = "0.2.7" strum = { version = "0.24", features = ["derive"] } diff --git a/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs index e4a6c207970f..6d3e7dd92db1 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs @@ -25,9 +25,9 @@ use crate::{ mock::runtime_api::session_info_for_peers, NODE_UNDER_TEST, }; +use codec::Encode; use futures::SinkExt; use itertools::Itertools; -use parity_scale_codec::Encode; use polkadot_node_core_approval_voting::{ criteria::{compute_assignments, Config}, time::tranche_to_tick, diff --git a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs index 2e5831276ad3..5c0c65b11cdb 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs @@ -40,12 +40,12 @@ use crate::{ usage::BenchmarkUsage, NODE_UNDER_TEST, }; +use codec::{Decode, Encode}; use colored::Colorize; use futures::channel::oneshot; use itertools::Itertools; use orchestra::TimeoutExt; use overseer::{metrics::Metrics as OverseerMetrics, MetricsTrait}; -use parity_scale_codec::{Decode, Encode}; use polkadot_approval_distribution::ApprovalDistribution; use polkadot_node_core_approval_voting::{ time::{slot_number_to_tick, tick_to_slot_number, Clock, ClockExt, SystemClock}, diff --git a/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs b/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs index 9641b62a94d8..d23c2552b8b3 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs @@ -18,8 +18,8 @@ use crate::{ approval::{ApprovalsOptions, BlockTestData, CandidateTestData}, configuration::TestAuthorities, }; +use codec::{Decode, Encode}; use itertools::Itertools; -use parity_scale_codec::{Decode, Encode}; use polkadot_node_network_protocol::v3 as protocol_v3; use polkadot_primitives::{CandidateIndex, Hash, ValidatorIndex}; use sc_network_types::PeerId; diff --git a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs index 52944ffb08f3..32dc8ae2c8dc 100644 --- a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs @@ -33,7 +33,7 @@ use crate::{ use colored::Colorize; use futures::{channel::oneshot, stream::FuturesUnordered, StreamExt}; -use parity_scale_codec::Encode; +use codec::Encode; use polkadot_availability_bitfield_distribution::BitfieldDistribution; use polkadot_availability_distribution::{ AvailabilityDistributionSubsystem, IncomingRequestReceivers, diff --git a/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs b/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs index 5d443734bb38..173b23f6b76e 100644 --- a/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs @@ -20,9 +20,9 @@ use crate::{ mock::runtime_api::node_features_with_chunk_mapping_enabled, }; use bitvec::bitvec; +use codec::Encode; use colored::Colorize; use itertools::Itertools; -use parity_scale_codec::Encode; use polkadot_node_network_protocol::{ request_response::{v2::ChunkFetchingRequest, ReqProtocolNames}, Versioned, VersionedValidationProtocol, diff --git a/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs index 14ec4ccb4c32..7586e848ab47 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs @@ -17,8 +17,8 @@ //! A generic av store subsystem mockup suitable to be used in benchmarks. use crate::network::{HandleNetworkMessage, NetworkMessage}; +use codec::Encode; use futures::{channel::oneshot, FutureExt}; -use parity_scale_codec::Encode; use polkadot_node_network_protocol::request_response::{ v1::AvailableDataFetchingResponse, v2::ChunkFetchingResponse, Protocol, ReqProtocolNames, Requests, diff --git a/polkadot/node/subsystem-bench/src/lib/network.rs b/polkadot/node/subsystem-bench/src/lib/network.rs index 775f881eaad8..331dd7d25156 100644 --- a/polkadot/node/subsystem-bench/src/lib/network.rs +++ b/polkadot/node/subsystem-bench/src/lib/network.rs @@ -38,6 +38,7 @@ use crate::{ environment::TestEnvironmentDependencies, NODE_UNDER_TEST, }; +use codec::Encode; use colored::Colorize; use futures::{ channel::{ @@ -55,7 +56,6 @@ use net_protocol::{ request_response::{Recipient, Requests, ResponseSender}, ObservedRole, VersionedValidationProtocol, View, }; -use parity_scale_codec::Encode; use polkadot_node_network_protocol::{self as net_protocol, Versioned}; use polkadot_node_subsystem::messages::StatementDistributionMessage; use polkadot_node_subsystem_types::messages::NetworkBridgeEvent; diff --git a/polkadot/node/subsystem-bench/src/lib/statement/test_state.rs b/polkadot/node/subsystem-bench/src/lib/statement/test_state.rs index b8ea64c7e331..88b5e8b76b62 100644 --- a/polkadot/node/subsystem-bench/src/lib/statement/test_state.rs +++ b/polkadot/node/subsystem-bench/src/lib/statement/test_state.rs @@ -21,9 +21,9 @@ use crate::{ NODE_UNDER_TEST, }; use bitvec::vec::BitVec; +use codec::{Decode, Encode}; use futures::channel::oneshot; use itertools::Itertools; -use parity_scale_codec::{Decode, Encode}; use polkadot_node_network_protocol::{ request_response::{ v2::{AttestedCandidateRequest, AttestedCandidateResponse}, diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index e03fc60a1fd7..0178b193cba8 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -29,7 +29,7 @@ sp-authority-discovery = { path = "../../../substrate/primitives/authority-disco sc-client-api = { path = "../../../substrate/client/api" } sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } smallvec = "1.8.0" -substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } thiserror = { workspace = true } async-trait = "0.1.79" bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } diff --git a/polkadot/node/subsystem-types/src/errors.rs b/polkadot/node/subsystem-types/src/errors.rs index b8e70641243e..8e1b515c8db0 100644 --- a/polkadot/node/subsystem-types/src/errors.rs +++ b/polkadot/node/subsystem-types/src/errors.rs @@ -107,7 +107,7 @@ pub enum SubsystemError { Infallible(#[from] std::convert::Infallible), #[error(transparent)] - Prometheus(#[from] substrate_prometheus_endpoint::PrometheusError), + Prometheus(#[from] prometheus_endpoint::PrometheusError), #[error(transparent)] Jaeger(#[from] JaegerError), diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index 9259ca94f073..b7fb75b94b2c 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -14,7 +14,7 @@ async-trait = "0.1.79" futures = "0.3.30" futures-channel = "0.3.23" itertools = "0.11" -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } parking_lot = "0.12.1" pin-project = "1.0.9" rand = "0.8.5" @@ -24,7 +24,7 @@ gum = { package = "tracing-gum", path = "../gum" } derive_more = "0.99.17" schnellru = "0.2.1" -erasure-coding = { package = "polkadot-erasure-coding", path = "../../erasure-coding" } +polkadot-erasure-coding = { path = "../../erasure-coding" } polkadot-node-subsystem = { path = "../subsystem" } polkadot-node-subsystem-types = { path = "../subsystem-types" } polkadot-node-jaeger = { path = "../jaeger" } diff --git a/polkadot/node/subsystem-util/src/availability_chunks.rs b/polkadot/node/subsystem-util/src/availability_chunks.rs index 45168e4512e1..651dd3633cfc 100644 --- a/polkadot/node/subsystem-util/src/availability_chunks.rs +++ b/polkadot/node/subsystem-util/src/availability_chunks.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use erasure_coding::systematic_recovery_threshold; +use polkadot_erasure_coding::systematic_recovery_threshold; use polkadot_primitives::{node_features, ChunkIndex, CoreIndex, NodeFeatures, ValidatorIndex}; /// Compute the per-validator availability chunk index. @@ -26,7 +26,7 @@ pub fn availability_chunk_index( n_validators: usize, core_index: CoreIndex, validator_index: ValidatorIndex, -) -> Result { +) -> Result { if let Some(features) = maybe_node_features { if let Some(&true) = features .get(usize::from(node_features::FeatureIndex::AvailabilityChunkMapping as u8)) @@ -51,7 +51,7 @@ pub fn availability_chunk_indices( maybe_node_features: Option<&NodeFeatures>, n_validators: usize, core_index: CoreIndex, -) -> Result, erasure_coding::Error> { +) -> Result, polkadot_erasure_coding::Error> { let identity = (0..n_validators).map(|index| ChunkIndex(index as u32)); if let Some(features) = maybe_node_features { if let Some(&true) = features diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs index d371b699b9eb..92f2cd189054 100644 --- a/polkadot/node/subsystem-util/src/lib.rs +++ b/polkadot/node/subsystem-util/src/lib.rs @@ -37,8 +37,8 @@ use polkadot_node_subsystem::{ pub use polkadot_node_metrics::{metrics, Metronome}; +use codec::Encode; use futures::channel::{mpsc, oneshot}; -use parity_scale_codec::Encode; use polkadot_primitives::{ async_backing::BackingState, slashing, AsyncBackingParams, AuthorityDiscoveryId, diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 214c58a8e88f..2c9ec8db3778 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -18,7 +18,7 @@ use schnellru::{ByLength, LruMap}; -use parity_scale_codec::Encode; +use codec::Encode; use sp_application_crypto::AppCrypto; use sp_core::crypto::ByteArray; use sp_keystore::{Keystore, KeystorePtr}; diff --git a/polkadot/node/test/client/Cargo.toml b/polkadot/node/test/client/Cargo.toml index 55d4d81d1c21..0b49866ee2ae 100644 --- a/polkadot/node/test/client/Cargo.toml +++ b/polkadot/node/test/client/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } # Polkadot dependencies polkadot-test-runtime = { path = "../../../runtime/test-runtime" } diff --git a/polkadot/node/test/client/src/block_builder.rs b/polkadot/node/test/client/src/block_builder.rs index 57e6008917af..71bcdaffac4e 100644 --- a/polkadot/node/test/client/src/block_builder.rs +++ b/polkadot/node/test/client/src/block_builder.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use crate::Client; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use polkadot_primitives::{Block, InherentData as ParachainsInherentData}; use polkadot_test_runtime::UncheckedExtrinsic; use polkadot_test_service::GetLastTimestamp; diff --git a/polkadot/node/test/service/Cargo.toml b/polkadot/node/test/service/Cargo.toml index 48a206f23c66..3fc6d060870b 100644 --- a/polkadot/node/test/service/Cargo.toml +++ b/polkadot/node/test/service/Cargo.toml @@ -34,13 +34,13 @@ polkadot-runtime-parachains = { path = "../../../runtime/parachains" } # Substrate dependencies sp-authority-discovery = { path = "../../../../substrate/primitives/authority-discovery" } sc-authority-discovery = { path = "../../../../substrate/client/authority-discovery" } -babe = { package = "sc-consensus-babe", path = "../../../../substrate/client/consensus/babe" } -babe-primitives = { package = "sp-consensus-babe", path = "../../../../substrate/primitives/consensus/babe" } -consensus_common = { package = "sp-consensus", path = "../../../../substrate/primitives/consensus/common" } +sc-consensus-babe = { path = "../../../../substrate/client/consensus/babe" } +sp-consensus-babe = { path = "../../../../substrate/primitives/consensus/babe" } +sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } frame-system = { path = "../../../../substrate/frame/system" } -grandpa = { package = "sc-consensus-grandpa", path = "../../../../substrate/client/consensus/grandpa" } -grandpa_primitives = { package = "sp-consensus-grandpa", path = "../../../../substrate/primitives/consensus/grandpa" } -inherents = { package = "sp-inherents", path = "../../../../substrate/primitives/inherents" } +sc-consensus-grandpa = { path = "../../../../substrate/client/consensus/grandpa" } +sp-consensus-grandpa = { path = "../../../../substrate/primitives/consensus/grandpa" } +sp-inherents = { path = "../../../../substrate/primitives/inherents" } pallet-staking = { path = "../../../../substrate/frame/staking" } pallet-balances = { path = "../../../../substrate/frame/balances" } pallet-transaction-payment = { path = "../../../../substrate/frame/transaction-payment" } diff --git a/polkadot/node/test/service/src/chain_spec.rs b/polkadot/node/test/service/src/chain_spec.rs index e6a1229caf86..bd53fd843c69 100644 --- a/polkadot/node/test/service/src/chain_spec.rs +++ b/polkadot/node/test/service/src/chain_spec.rs @@ -16,8 +16,6 @@ //! Chain specifications for the test runtime. -use babe_primitives::AuthorityId as BabeId; -use grandpa::AuthorityId as GrandpaId; use pallet_staking::Forcing; use polkadot_primitives::{ vstaging::SchedulerParams, AccountId, AssignmentId, ValidatorId, MAX_CODE_SIZE, MAX_POV_SIZE, @@ -25,7 +23,9 @@ use polkadot_primitives::{ use polkadot_service::chain_spec::{get_account_id_from_seed, get_from_seed, Extensions}; use polkadot_test_runtime::BABE_GENESIS_EPOCH_CONFIG; use sc_chain_spec::{ChainSpec, ChainType}; +use sc_consensus_grandpa::AuthorityId as GrandpaId; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; +use sp_consensus_babe::AuthorityId as BabeId; use sp_core::sr25519; use sp_runtime::Perbill; use test_runtime_constants::currency::DOTS; diff --git a/polkadot/node/zombienet-backchannel/Cargo.toml b/polkadot/node/zombienet-backchannel/Cargo.toml index a0233bb46e51..31662ccfc464 100644 --- a/polkadot/node/zombienet-backchannel/Cargo.toml +++ b/polkadot/node/zombienet-backchannel/Cargo.toml @@ -17,7 +17,7 @@ url = "2.3.1" tokio-tungstenite = "0.20.1" futures-util = "0.3.30" lazy_static = "1.4.0" -parity-scale-codec = { version = "3.6.12", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } reqwest = { version = "0.11", features = ["rustls-tls"], default-features = false } thiserror = { workspace = true } gum = { package = "tracing-gum", path = "../gum" } diff --git a/polkadot/node/zombienet-backchannel/src/lib.rs b/polkadot/node/zombienet-backchannel/src/lib.rs index fa9218d2d350..9068b03399ca 100644 --- a/polkadot/node/zombienet-backchannel/src/lib.rs +++ b/polkadot/node/zombienet-backchannel/src/lib.rs @@ -19,9 +19,9 @@ //! values in the test specifications, through a bidirectional message passing //! implemented as a `backchannel`. +use codec; use futures_util::{SinkExt, StreamExt}; use lazy_static::lazy_static; -use parity_scale_codec as codec; use serde::{Deserialize, Serialize}; use std::{env, sync::Mutex}; use tokio::sync::broadcast; diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index 1344baac64b6..11e8e3ce6d84 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -13,7 +13,7 @@ workspace = true # note: special care is taken to avoid inclusion of `sp-io` externals when compiling # this crate for WASM. This is critical to avoid forcing all parachain WASM into implementing # various unnecessary Substrate-specific endpoints. -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } sp-std = { path = "../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false, features = ["serde"] } @@ -31,7 +31,7 @@ default = ["std"] wasm-api = [] std = [ "bounded-collections/std", - "parity-scale-codec/std", + "codec/std", "polkadot-core-primitives/std", "scale-info/std", "serde/std", diff --git a/polkadot/parachain/src/primitives.rs b/polkadot/parachain/src/primitives.rs index 276438436372..d92bbee8d28d 100644 --- a/polkadot/parachain/src/primitives.rs +++ b/polkadot/parachain/src/primitives.rs @@ -20,7 +20,7 @@ use sp_std::vec::Vec; use bounded_collections::{BoundedVec, ConstU32}; -use parity_scale_codec::{CompactAs, Decode, Encode, MaxEncodedLen}; +use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use sp_core::{bytes, RuntimeDebug, TypeId}; diff --git a/polkadot/parachain/src/wasm_api.rs b/polkadot/parachain/src/wasm_api.rs index 981d276af75c..f0c832666284 100644 --- a/polkadot/parachain/src/wasm_api.rs +++ b/polkadot/parachain/src/wasm_api.rs @@ -24,7 +24,7 @@ pub unsafe fn load_params(params: *const u8, len: usize) -> crate::primitives::ValidationParams { let mut slice = sp_std::slice::from_raw_parts(params, len); - parity_scale_codec::Decode::decode(&mut slice).expect("Invalid input data") + codec::Decode::decode(&mut slice).expect("Invalid input data") } /// Allocate the validation result in memory, getting the return-pointer back. diff --git a/polkadot/parachain/test-parachains/Cargo.toml b/polkadot/parachain/test-parachains/Cargo.toml index 22f3d2942e0c..c58b11a11b01 100644 --- a/polkadot/parachain/test-parachains/Cargo.toml +++ b/polkadot/parachain/test-parachains/Cargo.toml @@ -12,14 +12,14 @@ workspace = true [dependencies] tiny-keccak = { version = "2.0.2", features = ["keccak"] } -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -adder = { package = "test-parachain-adder", path = "adder" } -halt = { package = "test-parachain-halt", path = "halt" } +test-parachain-adder = { path = "adder" } +test-parachain-halt = { path = "halt" } [dev-dependencies] sp-core = { path = "../../../substrate/primitives/core" } [features] default = ["std"] -std = ["adder/std", "halt/std", "parity-scale-codec/std"] +std = ["codec/std", "test-parachain-adder/std", "test-parachain-halt/std"] diff --git a/polkadot/parachain/test-parachains/adder/Cargo.toml b/polkadot/parachain/test-parachains/adder/Cargo.toml index 273fa93a50f4..e0bbe177eedc 100644 --- a/polkadot/parachain/test-parachains/adder/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/Cargo.toml @@ -12,8 +12,8 @@ publish = false workspace = true [dependencies] -parachain = { package = "polkadot-parachain-primitives", path = "../..", default-features = false, features = ["wasm-api"] } -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +polkadot-parachain-primitives = { path = "../..", default-features = false, features = ["wasm-api"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } sp-std = { path = "../../../../substrate/primitives/std", default-features = false } tiny-keccak = { version = "2.0.2", features = ["keccak"] } dlmalloc = { version = "0.2.4", features = ["global"] } @@ -26,4 +26,4 @@ substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } [features] default = ["std"] -std = ["parachain/std", "parity-scale-codec/std", "sp-io/std", "sp-std/std"] +std = ["codec/std", "polkadot-parachain-primitives/std", "sp-io/std", "sp-std/std"] diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index f9aaab74debd..996735e8c8bf 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -15,7 +15,7 @@ name = "adder-collator" path = "src/main.rs" [dependencies] -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } clap = { version = "4.5.3", features = ["derive"] } futures = "0.3.30" futures-timer = "3.0.2" diff --git a/polkadot/parachain/test-parachains/adder/collator/src/lib.rs b/polkadot/parachain/test-parachains/adder/collator/src/lib.rs index c2ba93f389b0..daeb8bc915dd 100644 --- a/polkadot/parachain/test-parachains/adder/collator/src/lib.rs +++ b/polkadot/parachain/test-parachains/adder/collator/src/lib.rs @@ -16,9 +16,9 @@ //! Collator for the adder test parachain. +use codec::{Decode, Encode}; use futures::channel::oneshot; use futures_timer::Delay; -use parity_scale_codec::{Decode, Encode}; use polkadot_node_primitives::{ Collation, CollationResult, CollationSecondedSignal, CollatorFn, MaybeCompressedPoV, PoV, Statement, diff --git a/polkadot/parachain/test-parachains/adder/src/lib.rs b/polkadot/parachain/test-parachains/adder/src/lib.rs index 4cf1ba8ac971..28914f02511d 100644 --- a/polkadot/parachain/test-parachains/adder/src/lib.rs +++ b/polkadot/parachain/test-parachains/adder/src/lib.rs @@ -18,7 +18,7 @@ #![no_std] -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use tiny_keccak::{Hasher as _, Keccak}; #[cfg(not(feature = "std"))] diff --git a/polkadot/parachain/test-parachains/adder/src/wasm_validation.rs b/polkadot/parachain/test-parachains/adder/src/wasm_validation.rs index 048330437cd7..7dba7a964d3b 100644 --- a/polkadot/parachain/test-parachains/adder/src/wasm_validation.rs +++ b/polkadot/parachain/test-parachains/adder/src/wasm_validation.rs @@ -17,14 +17,14 @@ //! WASM validation for adder parachain. use crate::{BlockData, HeadData}; +use codec::{Decode, Encode}; use core::panic; -use parachain::primitives::{HeadData as GenericHeadData, ValidationResult}; -use parity_scale_codec::{Decode, Encode}; +use polkadot_parachain_primitives::primitives::{HeadData as GenericHeadData, ValidationResult}; use sp_std::vec::Vec; #[no_mangle] pub extern "C" fn validate_block(params: *const u8, len: usize) -> u64 { - let params = unsafe { parachain::load_params(params, len) }; + let params = unsafe { polkadot_parachain_primitives::load_params(params, len) }; let parent_head = HeadData::decode(&mut ¶ms.parent_head.0[..]).expect("invalid parent head format."); @@ -34,7 +34,7 @@ pub extern "C" fn validate_block(params: *const u8, len: usize) -> u64 { let parent_hash = crate::keccak256(¶ms.parent_head.0[..]); let new_head = crate::execute(parent_hash, parent_head, &block_data).expect("Executes block"); - parachain::write_result(&ValidationResult { + polkadot_parachain_primitives::write_result(&ValidationResult { head_data: GenericHeadData(new_head.encode()), new_validation_code: None, upward_messages: sp_std::vec::Vec::new().try_into().expect("empty vec fits into bounds"), diff --git a/polkadot/parachain/test-parachains/undying/Cargo.toml b/polkadot/parachain/test-parachains/undying/Cargo.toml index f2067a2c3b9b..4d3d2abaeafe 100644 --- a/polkadot/parachain/test-parachains/undying/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/Cargo.toml @@ -12,8 +12,8 @@ license.workspace = true workspace = true [dependencies] -parachain = { package = "polkadot-parachain-primitives", path = "../..", default-features = false, features = ["wasm-api"] } -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +polkadot-parachain-primitives = { path = "../..", default-features = false, features = ["wasm-api"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } sp-std = { path = "../../../../substrate/primitives/std", default-features = false } tiny-keccak = { version = "2.0.2", features = ["keccak"] } dlmalloc = { version = "0.2.4", features = ["global"] } @@ -28,9 +28,9 @@ substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } [features] default = ["std"] std = [ + "codec/std", "log/std", - "parachain/std", - "parity-scale-codec/std", + "polkadot-parachain-primitives/std", "sp-io/std", "sp-std/std", ] diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index 08d1e74d8798..288549c2c268 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -15,7 +15,7 @@ name = "undying-collator" path = "src/main.rs" [dependencies] -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } clap = { version = "4.5.3", features = ["derive"] } futures = "0.3.30" futures-timer = "3.0.2" diff --git a/polkadot/parachain/test-parachains/undying/collator/src/lib.rs b/polkadot/parachain/test-parachains/undying/collator/src/lib.rs index 3c869233182f..920099f4499d 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/lib.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/lib.rs @@ -16,9 +16,9 @@ //! Collator for the `Undying` test parachain. +use codec::{Decode, Encode}; use futures::channel::oneshot; use futures_timer::Delay; -use parity_scale_codec::{Decode, Encode}; use polkadot_node_primitives::{ maybe_compress_pov, Collation, CollationResult, CollationSecondedSignal, CollatorFn, MaybeCompressedPoV, PoV, Statement, diff --git a/polkadot/parachain/test-parachains/undying/src/lib.rs b/polkadot/parachain/test-parachains/undying/src/lib.rs index abd88726b7fc..dc056e64fa23 100644 --- a/polkadot/parachain/test-parachains/undying/src/lib.rs +++ b/polkadot/parachain/test-parachains/undying/src/lib.rs @@ -18,7 +18,7 @@ #![no_std] -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use sp_std::vec::Vec; use tiny_keccak::{Hasher as _, Keccak}; diff --git a/polkadot/parachain/test-parachains/undying/src/wasm_validation.rs b/polkadot/parachain/test-parachains/undying/src/wasm_validation.rs index de4a1d7e2329..23fac43a3c73 100644 --- a/polkadot/parachain/test-parachains/undying/src/wasm_validation.rs +++ b/polkadot/parachain/test-parachains/undying/src/wasm_validation.rs @@ -17,12 +17,12 @@ //! WASM validation for the `Undying` parachain. use crate::{BlockData, HeadData}; -use parachain::primitives::{HeadData as GenericHeadData, ValidationResult}; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; +use polkadot_parachain_primitives::primitives::{HeadData as GenericHeadData, ValidationResult}; #[no_mangle] pub extern "C" fn validate_block(params: *const u8, len: usize) -> u64 { - let params = unsafe { parachain::load_params(params, len) }; + let params = unsafe { polkadot_parachain_primitives::load_params(params, len) }; let parent_head = HeadData::decode(&mut ¶ms.parent_head.0[..]).expect("invalid parent head format."); @@ -34,7 +34,7 @@ pub extern "C" fn validate_block(params: *const u8, len: usize) -> u64 { let (new_head, _) = crate::execute(parent_hash, parent_head, block_data).expect("Executes block"); - parachain::write_result(&ValidationResult { + polkadot_parachain_primitives::write_result(&ValidationResult { head_data: GenericHeadData(new_head.encode()), new_validation_code: None, upward_messages: sp_std::vec::Vec::new().try_into().expect("empty vec fits within bounds"), diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index 603d08b8fee5..d6df077b88b7 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -12,15 +12,15 @@ workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc", "serde"] } hex-literal = "0.4.1" -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive", "serde"] } log = { workspace = true, default-features = false } serde = { features = ["alloc", "derive"], workspace = true } -application-crypto = { package = "sp-application-crypto", path = "../../substrate/primitives/application-crypto", default-features = false, features = ["serde"] } -inherents = { package = "sp-inherents", path = "../../substrate/primitives/inherents", default-features = false } -primitives = { package = "sp-core", path = "../../substrate/primitives/core", default-features = false } -runtime_primitives = { package = "sp-runtime", path = "../../substrate/primitives/runtime", default-features = false } +sp-application-crypto = { path = "../../substrate/primitives/application-crypto", default-features = false, features = ["serde"] } +sp-inherents = { path = "../../substrate/primitives/inherents", default-features = false } +sp-core = { path = "../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false } sp-api = { path = "../../substrate/primitives/api", default-features = false } sp-arithmetic = { path = "../../substrate/primitives/arithmetic", default-features = false, features = ["serde"] } sp-authority-discovery = { path = "../../substrate/primitives/authority-discovery", default-features = false, features = ["serde"] } @@ -36,29 +36,29 @@ polkadot-parachain-primitives = { path = "../parachain", default-features = fals [features] default = ["std"] std = [ - "application-crypto/std", "bitvec/std", - "inherents/std", + "codec/std", "log/std", - "parity-scale-codec/std", "polkadot-core-primitives/std", "polkadot-parachain-primitives/std", - "primitives/std", - "runtime_primitives/std", "scale-info/std", "serde/std", "sp-api/std", + "sp-application-crypto/std", "sp-arithmetic/std", "sp-authority-discovery/std", "sp-consensus-slots/std", + "sp-core/std", + "sp-inherents/std", "sp-io/std", "sp-keystore", "sp-keystore?/std", + "sp-runtime/std", "sp-staking/std", "sp-std/std", ] runtime-benchmarks = [ "polkadot-parachain-primitives/runtime-benchmarks", - "runtime_primitives/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", "sp-staking/runtime-benchmarks", ] diff --git a/polkadot/primitives/src/v7/async_backing.rs b/polkadot/primitives/src/v7/async_backing.rs index 1abe87b6dec4..a82d843d28bf 100644 --- a/polkadot/primitives/src/v7/async_backing.rs +++ b/polkadot/primitives/src/v7/async_backing.rs @@ -18,9 +18,9 @@ use super::*; -use parity_scale_codec::{Decode, Encode}; -use primitives::RuntimeDebug; +use codec::{Decode, Encode}; use scale_info::TypeInfo; +use sp_core::RuntimeDebug; /// Candidate's acceptance limitations for asynchronous backing per relay parent. #[derive( diff --git a/polkadot/primitives/src/v7/executor_params.rs b/polkadot/primitives/src/v7/executor_params.rs index 918a7f17a7e3..e58cf3e76cc2 100644 --- a/polkadot/primitives/src/v7/executor_params.rs +++ b/polkadot/primitives/src/v7/executor_params.rs @@ -22,7 +22,7 @@ //! done in `polkadot-node-core-pvf`. use crate::{BlakeTwo256, HashT as _, PvfExecKind, PvfPrepKind}; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use polkadot_core_primitives::Hash; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; diff --git a/polkadot/primitives/src/v7/metrics.rs b/polkadot/primitives/src/v7/metrics.rs index 97f7678e4373..1a29471c5450 100644 --- a/polkadot/primitives/src/v7/metrics.rs +++ b/polkadot/primitives/src/v7/metrics.rs @@ -16,7 +16,7 @@ //! Runtime metric primitives. -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use sp_std::prelude::*; /// Runtime metric operations. diff --git a/polkadot/primitives/src/v7/mod.rs b/polkadot/primitives/src/v7/mod.rs index fb8406aece69..6b7985847a10 100644 --- a/polkadot/primitives/src/v7/mod.rs +++ b/polkadot/primitives/src/v7/mod.rs @@ -17,7 +17,7 @@ //! `V7` Primitives. use bitvec::{field::BitField, slice::BitSlice, vec::BitVec}; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_std::{ marker::PhantomData, @@ -26,13 +26,13 @@ use sp_std::{ vec::IntoIter, }; -use application_crypto::KeyTypeId; -use inherents::InherentIdentifier; -use primitives::RuntimeDebug; -use runtime_primitives::traits::{AppVerify, Header as HeaderT}; +use sp_application_crypto::KeyTypeId; use sp_arithmetic::traits::{BaseArithmetic, Saturating}; +use sp_core::RuntimeDebug; +use sp_inherents::InherentIdentifier; +use sp_runtime::traits::{AppVerify, Header as HeaderT}; -pub use runtime_primitives::traits::{BlakeTwo256, Hash as HashT}; +pub use sp_runtime::traits::{BlakeTwo256, Hash as HashT}; // Export some core primitives. pub use polkadot_core_primitives::v2::{ @@ -77,7 +77,7 @@ pub const COLLATOR_KEY_TYPE_ID: KeyTypeId = KeyTypeId(*b"coll"); const LOG_TARGET: &str = "runtime::primitives"; mod collator_app { - use application_crypto::{app_crypto, sr25519}; + use sp_application_crypto::{app_crypto, sr25519}; app_crypto!(sr25519, super::COLLATOR_KEY_TYPE_ID); } @@ -95,7 +95,7 @@ pub type CollatorSignature = collator_app::Signature; pub const PARACHAIN_KEY_TYPE_ID: KeyTypeId = KeyTypeId(*b"para"); mod validator_app { - use application_crypto::{app_crypto, sr25519}; + use sp_application_crypto::{app_crypto, sr25519}; app_crypto!(sr25519, super::PARACHAIN_KEY_TYPE_ID); } @@ -158,7 +158,7 @@ impl TypeIndex for ValidatorIndex { } } -application_crypto::with_pair! { +sp_application_crypto::with_pair! { /// A Parachain validator keypair. pub type ValidatorPair = validator_app::Pair; } @@ -172,8 +172,8 @@ pub type ValidatorSignature = validator_app::Signature; /// A declarations of storage keys where an external observer can find some interesting data. pub mod well_known_keys { use super::{HrmpChannelId, Id, WellKnownKey}; + use codec::Encode as _; use hex_literal::hex; - use parity_scale_codec::Encode as _; use sp_io::hashing::twox_64; use sp_std::prelude::*; @@ -443,7 +443,7 @@ pub const LEGACY_MIN_BACKING_VOTES: u32 = 2; // The public key of a keypair used by a validator for determining assignments /// to approve included parachain candidates. mod assignment_app { - use application_crypto::{app_crypto, sr25519}; + use sp_application_crypto::{app_crypto, sr25519}; app_crypto!(sr25519, super::ASSIGNMENT_KEY_TYPE_ID); } @@ -451,7 +451,7 @@ mod assignment_app { /// to approve included parachain candidates. pub type AssignmentId = assignment_app::Public; -application_crypto::with_pair! { +sp_application_crypto::with_pair! { /// The full keypair used by a validator for determining assignments to approve included /// parachain candidates. pub type AssignmentPair = assignment_app::Pair; @@ -1361,7 +1361,7 @@ pub enum UpgradeGoAhead { } /// Consensus engine id for polkadot v1 consensus engine. -pub const POLKADOT_ENGINE_ID: runtime_primitives::ConsensusEngineId = *b"POL1"; +pub const POLKADOT_ENGINE_ID: sp_runtime::ConsensusEngineId = *b"POL1"; /// A consensus log item for polkadot validation. To be used with [`POLKADOT_ENGINE_ID`]. #[derive(Decode, Encode, Clone, PartialEq, Eq)] @@ -1391,18 +1391,18 @@ pub enum ConsensusLog { impl ConsensusLog { /// Attempt to convert a reference to a generic digest item into a consensus log. pub fn from_digest_item( - digest_item: &runtime_primitives::DigestItem, - ) -> Result, parity_scale_codec::Error> { + digest_item: &sp_runtime::DigestItem, + ) -> Result, codec::Error> { match digest_item { - runtime_primitives::DigestItem::Consensus(id, encoded) if id == &POLKADOT_ENGINE_ID => + sp_runtime::DigestItem::Consensus(id, encoded) if id == &POLKADOT_ENGINE_ID => Ok(Some(Self::decode(&mut &encoded[..])?)), _ => Ok(None), } } } -impl From for runtime_primitives::DigestItem { - fn from(c: ConsensusLog) -> runtime_primitives::DigestItem { +impl From for sp_runtime::DigestItem { + fn from(c: ConsensusLog) -> sp_runtime::DigestItem { Self::Consensus(POLKADOT_ENGINE_ID, c.encode()) } } @@ -1752,25 +1752,23 @@ impl From for CompactStatementInner { } } -impl parity_scale_codec::Encode for CompactStatement { +impl codec::Encode for CompactStatement { fn size_hint(&self) -> usize { // magic + discriminant + payload 4 + 1 + 32 } - fn encode_to(&self, dest: &mut T) { + fn encode_to(&self, dest: &mut T) { dest.write(&BACKING_STATEMENT_MAGIC); CompactStatementInner::from(self.clone()).encode_to(dest) } } -impl parity_scale_codec::Decode for CompactStatement { - fn decode( - input: &mut I, - ) -> Result { +impl codec::Decode for CompactStatement { + fn decode(input: &mut I) -> Result { let maybe_magic = <[u8; 4]>::decode(input)?; if maybe_magic != BACKING_STATEMENT_MAGIC { - return Err(parity_scale_codec::Error::from("invalid magic string")) + return Err(codec::Error::from("invalid magic string")) } Ok(match CompactStatementInner::decode(input)? { @@ -1987,7 +1985,7 @@ impl WellKnownKey { /// Gets the value or `None` if it does not exist or decoding failed. pub fn get(&self) -> Option { sp_io::storage::get(&self.key) - .and_then(|raw| parity_scale_codec::DecodeAll::decode_all(&mut raw.as_ref()).ok()) + .and_then(|raw| codec::DecodeAll::decode_all(&mut raw.as_ref()).ok()) } } @@ -2051,7 +2049,7 @@ pub mod node_features { mod tests { use super::*; use bitvec::bitvec; - use primitives::sr25519; + use sp_core::sr25519; pub fn dummy_committed_candidate_receipt() -> CommittedCandidateReceipt { let zeros = Hash::zero(); diff --git a/polkadot/primitives/src/v7/signed.rs b/polkadot/primitives/src/v7/signed.rs index 96646d54cbba..62e4df238503 100644 --- a/polkadot/primitives/src/v7/signed.rs +++ b/polkadot/primitives/src/v7/signed.rs @@ -14,17 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use scale_info::TypeInfo; #[cfg(feature = "std")] -use application_crypto::AppCrypto; +use sp_application_crypto::AppCrypto; #[cfg(feature = "std")] use sp_keystore::{Error as KeystoreError, KeystorePtr}; use sp_std::prelude::Vec; -use primitives::RuntimeDebug; -use runtime_primitives::traits::AppVerify; +use sp_core::RuntimeDebug; +use sp_runtime::traits::AppVerify; use super::{SigningContext, ValidatorId, ValidatorIndex, ValidatorSignature}; @@ -312,7 +312,7 @@ impl, RealPayload: Encode> UncheckedSigned, validator_index: ValidatorIndex, ) -> Self { - use application_crypto::RuntimeAppPublic; + use sp_application_crypto::RuntimeAppPublic; let data = Self::payload_data(&payload, context); let signature = public.sign(&data).unwrap(); @@ -343,7 +343,7 @@ impl From> /// This helper trait ensures that we can encode `Statement` as `CompactStatement`, /// and anything as itself. /// -/// This resembles `parity_scale_codec::EncodeLike`, but it's distinct: +/// This resembles `codec::EncodeLike`, but it's distinct: /// `EncodeLike` is a marker trait which asserts at the typesystem level that /// one type's encoding is a valid encoding for another type. It doesn't /// perform any type conversion when encoding. diff --git a/polkadot/primitives/src/v7/slashing.rs b/polkadot/primitives/src/v7/slashing.rs index bcd7d0c2fc44..ea06e960b5bc 100644 --- a/polkadot/primitives/src/v7/slashing.rs +++ b/polkadot/primitives/src/v7/slashing.rs @@ -17,7 +17,7 @@ //! Primitives types used for dispute slashing. use crate::{CandidateHash, SessionIndex, ValidatorId, ValidatorIndex}; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 1af73993f640..fecad783f7cb 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -20,10 +20,10 @@ use crate::v7::*; use sp_std::prelude::*; -use parity_scale_codec::{Decode, Encode}; -use primitives::RuntimeDebug; +use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_arithmetic::Perbill; +use sp_core::RuntimeDebug; /// Scheduler configuration parameters. All coretime/ondemand parameters are here. #[derive( diff --git a/polkadot/rpc/Cargo.toml b/polkadot/rpc/Cargo.toml index 1900b595d671..cceb4dc5a93b 100644 --- a/polkadot/rpc/Cargo.toml +++ b/polkadot/rpc/Cargo.toml @@ -32,8 +32,8 @@ sc-consensus-epochs = { path = "../../substrate/client/consensus/epochs" } sc-consensus-grandpa = { path = "../../substrate/client/consensus/grandpa" } sc-consensus-grandpa-rpc = { path = "../../substrate/client/consensus/grandpa/rpc" } sc-sync-state-rpc = { path = "../../substrate/client/sync-state-rpc" } -txpool-api = { package = "sc-transaction-pool-api", path = "../../substrate/client/transaction-pool/api" } -frame-rpc-system = { package = "substrate-frame-rpc-system", path = "../../substrate/utils/frame/rpc/system" } +sc-transaction-pool-api = { path = "../../substrate/client/transaction-pool/api" } +substrate-frame-rpc-system = { path = "../../substrate/utils/frame/rpc/system" } mmr-rpc = { path = "../../substrate/client/merkle-mountain-range/rpc" } pallet-transaction-payment-rpc = { path = "../../substrate/frame/transaction-payment/rpc" } sp-block-builder = { path = "../../substrate/primitives/block-builder" } diff --git a/polkadot/rpc/src/lib.rs b/polkadot/rpc/src/lib.rs index 2daa246102fc..7d678ada5ff5 100644 --- a/polkadot/rpc/src/lib.rs +++ b/polkadot/rpc/src/lib.rs @@ -28,6 +28,7 @@ use sc_consensus_beefy::communication::notification::{ }; use sc_consensus_grandpa::FinalityProofProvider; pub use sc_rpc::{DenyUnsafe, SubscriptionTaskExecutor}; +use sc_transaction_pool_api::TransactionPool; use sp_api::ProvideRuntimeApi; use sp_application_crypto::RuntimeAppPublic; use sp_block_builder::BlockBuilder; @@ -36,7 +37,6 @@ use sp_consensus::SelectChain; use sp_consensus_babe::BabeApi; use sp_consensus_beefy::AuthorityIdBound; use sp_keystore::KeystorePtr; -use txpool_api::TransactionPool; /// A type representing all RPC extensions. pub type RpcExtension = RpcModule<()>; @@ -107,7 +107,7 @@ where + Send + Sync + 'static, - C::Api: frame_rpc_system::AccountNonceApi, + C::Api: substrate_frame_rpc_system::AccountNonceApi, C::Api: mmr_rpc::MmrRuntimeApi::Hash, BlockNumber>, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BabeApi, @@ -119,7 +119,6 @@ where AuthorityId: AuthorityIdBound, ::Signature: Send + Sync, { - use frame_rpc_system::{System, SystemApiServer}; use mmr_rpc::{Mmr, MmrApiServer}; use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; use sc_consensus_babe_rpc::{Babe, BabeApiServer}; @@ -127,6 +126,7 @@ where use sc_consensus_grandpa_rpc::{Grandpa, GrandpaApiServer}; use sc_rpc_spec_v2::chain_spec::{ChainSpec, ChainSpecApiServer}; use sc_sync_state_rpc::{SyncState, SyncStateApiServer}; + use substrate_frame_rpc_system::{System, SystemApiServer}; use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer}; let mut io = RpcModule::new(()); diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index 3a6414881768..da89bd2251ac 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] impl-trait-for-tuples = "0.2.2" bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } @@ -21,7 +21,7 @@ serde_derive = { workspace = true } static_assertions = "1.1.0" sp-api = { path = "../../../substrate/primitives/api", default-features = false } -inherents = { package = "sp-inherents", path = "../../../substrate/primitives/inherents", default-features = false } +sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } @@ -51,9 +51,9 @@ frame-election-provider-support = { path = "../../../substrate/frame/election-pr frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } pallet-babe = { path = "../../../substrate/frame/babe", default-features = false, optional = true } -primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } +polkadot-primitives = { path = "../../primitives", default-features = false } libsecp256k1 = { version = "0.7.0", default-features = false } -runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parachains", default-features = false } +polkadot-runtime-parachains = { path = "../parachains", default-features = false } slot-range-helper = { path = "slot_range_helper", default-features = false } xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } @@ -69,18 +69,18 @@ sp-keystore = { path = "../../../substrate/primitives/keystore" } sp-keyring = { path = "../../../substrate/primitives/keyring" } serde_json = { workspace = true, default-features = true } libsecp256k1 = "0.7.0" -test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } +polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } [features] default = ["std"] no_std = [] std = [ "bitvec/std", + "codec/std", "frame-benchmarking?/std", "frame-election-provider-support/std", "frame-support/std", "frame-system/std", - "inherents/std", "libsecp256k1/std", "log/std", "pallet-asset-rate?/std", @@ -97,15 +97,15 @@ std = [ "pallet-transaction-payment/std", "pallet-treasury/std", "pallet-vesting/std", - "parity-scale-codec/std", - "primitives/std", - "runtime-parachains/std", + "polkadot-primitives/std", + "polkadot-runtime-parachains/std", "rustc-hex/std", "scale-info/std", "serde/std", "slot-range-helper/std", "sp-api/std", "sp-core/std", + "sp-inherents/std", "sp-io/std", "sp-npos-elections/std", "sp-runtime/std", @@ -134,8 +134,8 @@ runtime-benchmarks = [ "pallet-timestamp/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", - "primitives/runtime-benchmarks", - "runtime-parachains/runtime-benchmarks", + "polkadot-primitives/runtime-benchmarks", + "polkadot-runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "sp-staking/runtime-benchmarks", "xcm-builder/runtime-benchmarks", @@ -160,6 +160,6 @@ try-runtime = [ "pallet-transaction-payment/try-runtime", "pallet-treasury/try-runtime", "pallet-vesting/try-runtime", - "runtime-parachains/try-runtime", + "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", ] diff --git a/polkadot/runtime/common/slot_range_helper/Cargo.toml b/polkadot/runtime/common/slot_range_helper/Cargo.toml index 314e101ad221..47e8fea24002 100644 --- a/polkadot/runtime/common/slot_range_helper/Cargo.toml +++ b/polkadot/runtime/common/slot_range_helper/Cargo.toml @@ -12,10 +12,10 @@ workspace = true [dependencies] paste = "1.0" enumn = "0.1.12" -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } sp-std = { package = "sp-std", path = "../../../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } [features] default = ["std"] -std = ["parity-scale-codec/std", "sp-runtime/std", "sp-std/std"] +std = ["codec/std", "sp-runtime/std", "sp-std/std"] diff --git a/polkadot/runtime/common/slot_range_helper/src/lib.rs b/polkadot/runtime/common/slot_range_helper/src/lib.rs index bbe5b61ae1f3..f907390bc91b 100644 --- a/polkadot/runtime/common/slot_range_helper/src/lib.rs +++ b/polkadot/runtime/common/slot_range_helper/src/lib.rs @@ -18,8 +18,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +pub use codec::{Decode, Encode}; pub use enumn::N; -pub use parity_scale_codec::{Decode, Encode}; pub use paste; pub use sp_runtime::traits::CheckedSub; pub use sp_std::{ops::Add, result}; diff --git a/polkadot/runtime/common/src/assigned_slots/benchmarking.rs b/polkadot/runtime/common/src/assigned_slots/benchmarking.rs index 61638fe6cabf..882bfa051c83 100644 --- a/polkadot/runtime/common/src/assigned_slots/benchmarking.rs +++ b/polkadot/runtime/common/src/assigned_slots/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use frame_benchmarking::v2::*; use frame_support::assert_ok; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; -use primitives::Id as ParaId; +use polkadot_primitives::Id as ParaId; use sp_runtime::traits::Bounded; type CurrencyOf = <::Leaser as Leaser>>::Currency; diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index 92a8e46f5f9c..368708f25640 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -30,12 +30,12 @@ use crate::{ slots::{self, Pallet as Slots, WeightInfo as SlotsWeightInfo}, traits::{LeaseError, Leaser, Registrar}, }; +use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{pallet_prelude::*, traits::Currency}; use frame_system::pallet_prelude::*; pub use pallet::*; -use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; -use primitives::Id as ParaId; -use runtime_parachains::{ +use polkadot_primitives::Id as ParaId; +use polkadot_runtime_parachains::{ configuration, paras::{self}, }; @@ -428,7 +428,8 @@ pub mod pallet { // Force downgrade to on-demand parachain (if needed) before end of lease period if is_parachain { - if let Err(err) = runtime_parachains::schedule_parachain_downgrade::(id) { + if let Err(err) = polkadot_runtime_parachains::schedule_parachain_downgrade::(id) + { // Treat failed downgrade as warning .. slot lease has been cleared, // so the parachain will be downgraded anyway by the slots pallet // at the end of the lease period . @@ -630,12 +631,12 @@ mod tests { use super::*; use crate::{assigned_slots, mock::TestRegistrar, slots}; - use ::test_helpers::{dummy_head_data, dummy_validation_code}; use frame_support::{assert_noop, assert_ok, derive_impl, parameter_types}; use frame_system::EnsureRoot; use pallet_balances; - use primitives::BlockNumber; - use runtime_parachains::{ + use polkadot_primitives::BlockNumber; + use polkadot_primitives_test_helpers::{dummy_head_data, dummy_validation_code}; + use polkadot_runtime_parachains::{ configuration as parachains_configuration, paras as parachains_paras, shared as parachains_shared, }; diff --git a/polkadot/runtime/common/src/auctions.rs b/polkadot/runtime/common/src/auctions.rs index e7b7c081ae4e..199b18fba51d 100644 --- a/polkadot/runtime/common/src/auctions.rs +++ b/polkadot/runtime/common/src/auctions.rs @@ -22,6 +22,7 @@ use crate::{ slot_range::SlotRange, traits::{AuctionStatus, Auctioneer, LeaseError, Leaser, Registrar}, }; +use codec::Decode; use frame_support::{ dispatch::DispatchResult, ensure, @@ -30,8 +31,7 @@ use frame_support::{ }; use frame_system::pallet_prelude::BlockNumberFor; pub use pallet::*; -use parity_scale_codec::Decode; -use primitives::Id as ParaId; +use polkadot_primitives::Id as ParaId; use sp_runtime::traits::{CheckedSub, One, Saturating, Zero}; use sp_std::{mem::swap, prelude::*}; @@ -671,7 +671,6 @@ impl Pallet { mod tests { use super::*; use crate::{auctions, mock::TestRegistrar}; - use ::test_helpers::{dummy_hash, dummy_head_data, dummy_validation_code}; use frame_support::{ assert_noop, assert_ok, assert_storage_noop, derive_impl, ord_parameter_types, parameter_types, @@ -679,7 +678,8 @@ mod tests { }; use frame_system::{EnsureRoot, EnsureSignedBy}; use pallet_balances; - use primitives::{BlockNumber, Id as ParaId}; + use polkadot_primitives::{BlockNumber, Id as ParaId}; + use polkadot_primitives_test_helpers::{dummy_hash, dummy_head_data, dummy_validation_code}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -1728,7 +1728,7 @@ mod benchmarking { traits::{EnsureOrigin, OnInitialize}, }; use frame_system::RawOrigin; - use runtime_parachains::paras; + use polkadot_runtime_parachains::paras; use sp_runtime::{traits::Bounded, SaturatedConversion}; use frame_benchmarking::{account, benchmarks, whitelisted_caller, BenchmarkError}; diff --git a/polkadot/runtime/common/src/claims.rs b/polkadot/runtime/common/src/claims.rs index 8407c7f0dda9..54208e7fd135 100644 --- a/polkadot/runtime/common/src/claims.rs +++ b/polkadot/runtime/common/src/claims.rs @@ -16,6 +16,7 @@ //! Pallet to process claims from Ethereum addresses. +use codec::{Decode, Encode}; use frame_support::{ ensure, traits::{Currency, Get, IsSubType, VestingSchedule}, @@ -23,8 +24,7 @@ use frame_support::{ DefaultNoBound, }; pub use pallet::*; -use parity_scale_codec::{Decode, Encode}; -use primitives::ValidityError; +use polkadot_primitives::ValidityError; use scale_info::TypeInfo; use serde::{self, Deserialize, Deserializer, Serialize, Serializer}; use sp_io::{crypto::secp256k1_ecdsa_recover, hashing::keccak_256}; @@ -699,7 +699,7 @@ mod tests { use hex_literal::hex; use secp_utils::*; - use parity_scale_codec::Encode; + use codec::Encode; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use crate::claims; diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs index 0aecbcd531c4..1dbba363de56 100644 --- a/polkadot/runtime/common/src/crowdloan/mod.rs +++ b/polkadot/runtime/common/src/crowdloan/mod.rs @@ -55,6 +55,7 @@ use crate::{ slot_range::SlotRange, traits::{Auctioneer, Registrar}, }; +use codec::{Decode, Encode}; use frame_support::{ ensure, pallet_prelude::{DispatchResult, Weight}, @@ -68,8 +69,7 @@ use frame_support::{ }; use frame_system::pallet_prelude::BlockNumberFor; pub use pallet::*; -use parity_scale_codec::{Decode, Encode}; -use primitives::Id as ParaId; +use polkadot_primitives::Id as ParaId; use scale_info::TypeInfo; use sp_runtime::{ traits::{ @@ -862,7 +862,7 @@ mod tests { assert_noop, assert_ok, derive_impl, parameter_types, traits::{ConstU32, OnFinalize, OnInitialize}, }; - use primitives::Id as ParaId; + use polkadot_primitives::Id as ParaId; use sp_core::H256; use std::{cell::RefCell, collections::BTreeMap, sync::Arc}; // The testing primitives are very useful for avoiding having to work with signatures @@ -872,7 +872,7 @@ mod tests { mock::TestRegistrar, traits::{AuctionStatus, OnSwap}, }; - use ::test_helpers::{dummy_head_data, dummy_validation_code}; + use polkadot_primitives_test_helpers::{dummy_head_data, dummy_validation_code}; use sp_keystore::{testing::MemoryKeystore, KeystoreExt}; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup, TrailingZeroInput}, @@ -1979,7 +1979,7 @@ mod benchmarking { use super::{Pallet as Crowdloan, *}; use frame_support::{assert_ok, traits::OnInitialize}; use frame_system::RawOrigin; - use runtime_parachains::paras; + use polkadot_runtime_parachains::paras; use sp_core::crypto::UncheckedFrom; use sp_runtime::traits::{Bounded, CheckedSub}; use sp_std::prelude::*; diff --git a/polkadot/runtime/common/src/identity_migrator.rs b/polkadot/runtime/common/src/identity_migrator.rs index bf334a63e958..7d02e24b5368 100644 --- a/polkadot/runtime/common/src/identity_migrator.rs +++ b/polkadot/runtime/common/src/identity_migrator.rs @@ -172,10 +172,10 @@ impl OnReapIdentity for () { #[benchmarks] mod benchmarks { use super::*; + use codec::Encode; use frame_support::traits::EnsureOrigin; use frame_system::RawOrigin; use pallet_identity::{Data, IdentityInformationProvider, Judgement, Pallet as Identity}; - use parity_scale_codec::Encode; use sp_runtime::{ traits::{Bounded, Hash, StaticLookup}, Saturating, diff --git a/polkadot/runtime/common/src/impls.rs b/polkadot/runtime/common/src/impls.rs index a92a05219cf8..ac2288c906a5 100644 --- a/polkadot/runtime/common/src/impls.rs +++ b/polkadot/runtime/common/src/impls.rs @@ -16,14 +16,14 @@ //! Auxiliary `struct`/`enum`s for polkadot runtime. +use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::traits::{ fungible::{Balanced, Credit}, tokens::imbalance::ResolveTo, Contains, ContainsPair, Imbalance, OnUnbalanced, }; use pallet_treasury::TreasuryAccountId; -use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; -use primitives::Balance; +use polkadot_primitives::Balance; use sp_runtime::{traits::TryConvert, Perquintill, RuntimeDebug}; use xcm::VersionedLocation; @@ -32,8 +32,8 @@ pub struct ToAuthor(sp_std::marker::PhantomData); impl OnUnbalanced>> for ToAuthor where R: pallet_balances::Config + pallet_authorship::Config, - ::AccountId: From, - ::AccountId: Into, + ::AccountId: From, + ::AccountId: Into, { fn on_nonzero_unbalanced( amount: Credit<::AccountId, pallet_balances::Pallet>, @@ -48,8 +48,8 @@ pub struct DealWithFees(sp_std::marker::PhantomData); impl OnUnbalanced>> for DealWithFees where R: pallet_balances::Config + pallet_authorship::Config + pallet_treasury::Config, - ::AccountId: From, - ::AccountId: Into, + ::AccountId: From, + ::AccountId: Into, { fn on_unbalanceds( mut fees_then_tips: impl Iterator>>, @@ -255,7 +255,7 @@ mod tests { PalletId, }; use frame_system::limits; - use primitives::AccountId; + use polkadot_primitives::AccountId; use sp_core::{ConstU64, H256}; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index 2122e75f3e2d..e77035b3f6b4 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -24,6 +24,7 @@ use crate::{ slots, traits::{AuctionStatus, Auctioneer, Leaser, Registrar as RegistrarT}, }; +use codec::Encode; use frame_support::{ assert_noop, assert_ok, derive_impl, parameter_types, traits::{ConstU32, Currency, OnFinalize, OnInitialize}, @@ -33,12 +34,11 @@ use frame_support::{ use frame_support_test::TestRandomness; use frame_system::EnsureRoot; use pallet_identity::{self, legacy::IdentityInfo}; -use parity_scale_codec::Encode; -use primitives::{ +use polkadot_primitives::{ BlockNumber, HeadData, Id as ParaId, SessionIndex, ValidationCode, LOWEST_PUBLIC_ID, MAX_CODE_SIZE, }; -use runtime_parachains::{ +use polkadot_runtime_parachains::{ configuration, dmp, origin, paras, shared, Origin as ParaOrigin, ParaLifecycle, }; use sp_core::H256; diff --git a/polkadot/runtime/common/src/lib.rs b/polkadot/runtime/common/src/lib.rs index 60cc684149b4..6e50384f68c9 100644 --- a/polkadot/runtime/common/src/lib.rs +++ b/polkadot/runtime/common/src/lib.rs @@ -47,7 +47,7 @@ use frame_support::{ weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, }; use frame_system::limits; -use primitives::{AssignmentId, Balance, BlockNumber, ValidatorId}; +use polkadot_primitives::{AssignmentId, Balance, BlockNumber, ValidatorId}; use sp_runtime::{FixedPointNumber, Perbill, Perquintill}; use static_assertions::const_assert; @@ -123,7 +123,7 @@ macro_rules! impl_runtime_weights { use frame_support::{dispatch::DispatchClass, weights::Weight}; use frame_system::limits; use pallet_transaction_payment::{Multiplier, TargetedFeeAdjustment}; - pub use runtime_common::{ + pub use polkadot_runtime_common::{ impl_elections_weights, AVERAGE_ON_INITIALIZE_RATIO, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, }; @@ -165,7 +165,7 @@ macro_rules! impl_runtime_weights { /// /// This must only be used as long as the balance type is `u128`. pub type CurrencyToVote = sp_staking::currency_to_vote::U128CurrencyToVote; -static_assertions::assert_eq_size!(primitives::Balance, u128); +static_assertions::assert_eq_size!(polkadot_primitives::Balance, u128); /// A placeholder since there is currently no provided session key handler for parachain validator /// keys. diff --git a/polkadot/runtime/common/src/mock.rs b/polkadot/runtime/common/src/mock.rs index c9e3a8c39f12..6534110cc210 100644 --- a/polkadot/runtime/common/src/mock.rs +++ b/polkadot/runtime/common/src/mock.rs @@ -17,11 +17,13 @@ //! Mocking utilities for testing. use crate::traits::Registrar; +use codec::{Decode, Encode}; use frame_support::{dispatch::DispatchResult, weights::Weight}; use frame_system::pallet_prelude::BlockNumberFor; -use parity_scale_codec::{Decode, Encode}; -use primitives::{HeadData, Id as ParaId, PvfCheckStatement, SessionIndex, ValidationCode}; -use runtime_parachains::paras; +use polkadot_primitives::{ + HeadData, Id as ParaId, PvfCheckStatement, SessionIndex, ValidationCode, +}; +use polkadot_runtime_parachains::paras; use sp_keyring::Sr25519Keyring; use sp_runtime::{traits::SaturatedConversion, DispatchError, Permill}; use std::{cell::RefCell, collections::HashMap}; @@ -239,7 +241,9 @@ impl frame_support::traits::EstimateNextSessionRotation for TestNextSession } } -pub fn validators_public_keys(validators: &[Sr25519Keyring]) -> Vec { +pub fn validators_public_keys( + validators: &[Sr25519Keyring], +) -> Vec { validators.iter().map(|v| v.public().into()).collect() } @@ -248,7 +252,7 @@ pub fn conclude_pvf_checking( validators: &[Sr25519Keyring], session_index: SessionIndex, ) { - let num_required = primitives::supermajority_threshold(validators.len()); + let num_required = polkadot_primitives::supermajority_threshold(validators.len()); validators.iter().enumerate().take(num_required).for_each(|(idx, key)| { let validator_index = idx as u32; let statement = PvfCheckStatement { diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index c90802a40129..9bbb152f855f 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -26,8 +26,10 @@ use frame_support::{ traits::{Currency, Get, ReservableCurrency}, }; use frame_system::{self, ensure_root, ensure_signed}; -use primitives::{HeadData, Id as ParaId, ValidationCode, LOWEST_PUBLIC_ID, MIN_CODE_SIZE}; -use runtime_parachains::{ +use polkadot_primitives::{ + HeadData, Id as ParaId, ValidationCode, LOWEST_PUBLIC_ID, MIN_CODE_SIZE, +}; +use polkadot_runtime_parachains::{ configuration, ensure_parachain, paras::{self, ParaGenesisArgs, UpgradeStrategy}, Origin, ParaLifecycle, @@ -35,9 +37,9 @@ use runtime_parachains::{ use sp_std::{prelude::*, result}; use crate::traits::{OnSwap, Registrar}; +use codec::{Decode, Encode}; pub use pallet::*; -use parity_scale_codec::{Decode, Encode}; -use runtime_parachains::paras::{OnNewHead, ParaKind}; +use polkadot_runtime_parachains::paras::{OnNewHead, ParaKind}; use scale_info::TypeInfo; use sp_runtime::{ traits::{CheckedSub, Saturating}, @@ -425,7 +427,7 @@ pub mod pallet { new_code: ValidationCode, ) -> DispatchResult { Self::ensure_root_para_or_owner(origin, para)?; - runtime_parachains::schedule_code_upgrade::( + polkadot_runtime_parachains::schedule_code_upgrade::( para, new_code, UpgradeStrategy::ApplyAtExpectedBlock, @@ -445,7 +447,7 @@ pub mod pallet { new_head: HeadData, ) -> DispatchResult { Self::ensure_root_para_or_owner(origin, para)?; - runtime_parachains::set_current_head::(para, new_head); + polkadot_runtime_parachains::set_current_head::(para, new_head); Ok(()) } } @@ -510,7 +512,7 @@ impl Registrar for Pallet { paras::Pallet::::lifecycle(id) == Some(ParaLifecycle::Parathread), Error::::NotParathread ); - runtime_parachains::schedule_parathread_upgrade::(id) + polkadot_runtime_parachains::schedule_parathread_upgrade::(id) .map_err(|_| Error::::CannotUpgrade)?; Ok(()) @@ -523,7 +525,7 @@ impl Registrar for Pallet { paras::Pallet::::lifecycle(id) == Some(ParaLifecycle::Parachain), Error::::NotParachain ); - runtime_parachains::schedule_parachain_downgrade::(id) + polkadot_runtime_parachains::schedule_parachain_downgrade::(id) .map_err(|_| Error::::CannotDowngrade)?; Ok(()) } @@ -545,7 +547,7 @@ impl Registrar for Pallet { #[cfg(any(feature = "runtime-benchmarks", test))] fn execute_pending_transitions() { - use runtime_parachains::shared; + use polkadot_runtime_parachains::shared; shared::Pallet::::set_session_index(shared::Pallet::::scheduled_session()); paras::Pallet::::test_on_new_session(); } @@ -634,7 +636,7 @@ impl Pallet { Paras::::insert(id, info); // We check above that para has no lifecycle, so this should not fail. - let res = runtime_parachains::schedule_para_initialize::(id, genesis); + let res = polkadot_runtime_parachains::schedule_para_initialize::(id, genesis); debug_assert!(res.is_ok()); Self::deposit_event(Event::::Registered { para_id: id, manager: who }); Ok(()) @@ -647,7 +649,7 @@ impl Pallet { Some(ParaLifecycle::Parathread) | None => {}, _ => return Err(Error::::NotParathread.into()), } - runtime_parachains::schedule_para_cleanup::(id) + polkadot_runtime_parachains::schedule_para_cleanup::(id) .map_err(|_| Error::::CannotDeregister)?; if let Some(info) = Paras::::take(&id) { @@ -686,9 +688,9 @@ impl Pallet { /// Swap a lease holding parachain and parathread (on-demand parachain), which involves /// scheduling an appropriate lifecycle update. fn do_thread_and_chain_swap(to_downgrade: ParaId, to_upgrade: ParaId) { - let res1 = runtime_parachains::schedule_parachain_downgrade::(to_downgrade); + let res1 = polkadot_runtime_parachains::schedule_parachain_downgrade::(to_downgrade); debug_assert!(res1.is_ok()); - let res2 = runtime_parachains::schedule_parathread_upgrade::(to_upgrade); + let res2 = polkadot_runtime_parachains::schedule_parathread_upgrade::(to_upgrade); debug_assert!(res2.is_ok()); T::OnSwap::on_swap(to_upgrade, to_downgrade); } @@ -723,8 +725,8 @@ mod tests { }; use frame_system::limits; use pallet_balances::Error as BalancesError; - use primitives::{Balance, BlockNumber, SessionIndex, MAX_CODE_SIZE}; - use runtime_parachains::{configuration, origin, shared}; + use polkadot_primitives::{Balance, BlockNumber, SessionIndex, MAX_CODE_SIZE}; + use polkadot_runtime_parachains::{configuration, origin, shared}; use sp_core::H256; use sp_io::TestExternalities; use sp_keyring::Sr25519Keyring; @@ -941,7 +943,7 @@ mod tests { } fn para_origin(id: ParaId) -> RuntimeOrigin { - runtime_parachains::Origin::Parachain(id).into() + polkadot_runtime_parachains::Origin::Parachain(id).into() } fn max_code_size() -> u32 { @@ -1527,8 +1529,8 @@ mod benchmarking { use crate::traits::Registrar as RegistrarT; use frame_support::assert_ok; use frame_system::RawOrigin; - use primitives::{MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MIN_CODE_SIZE}; - use runtime_parachains::{paras, shared, Origin as ParaOrigin}; + use polkadot_primitives::{MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MIN_CODE_SIZE}; + use polkadot_runtime_parachains::{paras, shared, Origin as ParaOrigin}; use sp_runtime::traits::Bounded; use frame_benchmarking::{account, benchmarks, whitelisted_caller}; @@ -1554,7 +1556,7 @@ mod benchmarking { genesis_head, validation_code.clone() )); - assert_ok!(runtime_parachains::paras::Pallet::::add_trusted_validation_code( + assert_ok!(polkadot_runtime_parachains::paras::Pallet::::add_trusted_validation_code( frame_system::Origin::::Root.into(), validation_code, )); @@ -1595,7 +1597,7 @@ mod benchmarking { verify { assert_last_event::(Event::::Registered{ para_id: para, manager: caller }.into()); assert_eq!(paras::Pallet::::lifecycle(para), Some(ParaLifecycle::Onboarding)); - assert_ok!(runtime_parachains::paras::Pallet::::add_trusted_validation_code( + assert_ok!(polkadot_runtime_parachains::paras::Pallet::::add_trusted_validation_code( frame_system::Origin::::Root.into(), validation_code, )); @@ -1613,7 +1615,7 @@ mod benchmarking { verify { assert_last_event::(Event::::Registered { para_id: para, manager }.into()); assert_eq!(paras::Pallet::::lifecycle(para), Some(ParaLifecycle::Onboarding)); - assert_ok!(runtime_parachains::paras::Pallet::::add_trusted_validation_code( + assert_ok!(polkadot_runtime_parachains::paras::Pallet::::add_trusted_validation_code( frame_system::Origin::::Root.into(), validation_code, )); diff --git a/polkadot/runtime/common/src/paras_sudo_wrapper.rs b/polkadot/runtime/common/src/paras_sudo_wrapper.rs index b56dc96af436..3ff8d4ac08e1 100644 --- a/polkadot/runtime/common/src/paras_sudo_wrapper.rs +++ b/polkadot/runtime/common/src/paras_sudo_wrapper.rs @@ -16,12 +16,12 @@ //! A simple wrapper allowing `Sudo` to call into `paras` routines. +use codec::Encode; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; pub use pallet::*; -use parity_scale_codec::Encode; -use primitives::Id as ParaId; -use runtime_parachains::{ +use polkadot_primitives::Id as ParaId; +use polkadot_runtime_parachains::{ configuration, dmp, hrmp, paras::{self, AssignCoretime, ParaGenesisArgs}, ParaLifecycle, @@ -80,7 +80,7 @@ pub mod pallet { genesis: ParaGenesisArgs, ) -> DispatchResult { ensure_root(origin)?; - runtime_parachains::schedule_para_initialize::(id, genesis) + polkadot_runtime_parachains::schedule_para_initialize::(id, genesis) .map_err(|_| Error::::ParaAlreadyExists)?; T::AssignCoretime::assign_coretime(id)?; @@ -93,7 +93,7 @@ pub mod pallet { #[pallet::weight((1_000, DispatchClass::Operational))] pub fn sudo_schedule_para_cleanup(origin: OriginFor, id: ParaId) -> DispatchResult { ensure_root(origin)?; - runtime_parachains::schedule_para_cleanup::(id) + polkadot_runtime_parachains::schedule_para_cleanup::(id) .map_err(|_| Error::::CouldntCleanup)?; Ok(()) } @@ -111,7 +111,7 @@ pub mod pallet { paras::Pallet::::lifecycle(id) == Some(ParaLifecycle::Parathread), Error::::NotParathread, ); - runtime_parachains::schedule_parathread_upgrade::(id) + polkadot_runtime_parachains::schedule_parathread_upgrade::(id) .map_err(|_| Error::::CannotUpgrade)?; Ok(()) } @@ -129,7 +129,7 @@ pub mod pallet { paras::Pallet::::lifecycle(id) == Some(ParaLifecycle::Parachain), Error::::NotParachain, ); - runtime_parachains::schedule_parachain_downgrade::(id) + polkadot_runtime_parachains::schedule_parachain_downgrade::(id) .map_err(|_| Error::::CannotDowngrade)?; Ok(()) } diff --git a/polkadot/runtime/common/src/purchase.rs b/polkadot/runtime/common/src/purchase.rs index 3920a2c68c55..5ae6b422618e 100644 --- a/polkadot/runtime/common/src/purchase.rs +++ b/polkadot/runtime/common/src/purchase.rs @@ -16,13 +16,13 @@ //! Pallet to process purchase of DOTs. +use codec::{Decode, Encode}; use frame_support::{ pallet_prelude::*, traits::{Currency, EnsureOrigin, ExistenceRequirement, Get, VestingSchedule}, }; use frame_system::pallet_prelude::*; pub use pallet::*; -use parity_scale_codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_core::sr25519; use sp_runtime::{ diff --git a/polkadot/runtime/common/src/slots/mod.rs b/polkadot/runtime/common/src/slots/mod.rs index 9da345beea39..900e04eaff18 100644 --- a/polkadot/runtime/common/src/slots/mod.rs +++ b/polkadot/runtime/common/src/slots/mod.rs @@ -32,7 +32,7 @@ use frame_support::{ }; use frame_system::pallet_prelude::*; pub use pallet::*; -use primitives::Id as ParaId; +use polkadot_primitives::Id as ParaId; use sp_runtime::traits::{CheckedConversion, CheckedSub, Saturating, Zero}; use sp_std::prelude::*; @@ -503,11 +503,11 @@ mod tests { use super::*; use crate::{mock::TestRegistrar, slots}; - use ::test_helpers::{dummy_head_data, dummy_validation_code}; use frame_support::{assert_noop, assert_ok, derive_impl, parameter_types}; use frame_system::EnsureRoot; use pallet_balances; - use primitives::BlockNumber; + use polkadot_primitives::BlockNumber; + use polkadot_primitives_test_helpers::{dummy_head_data, dummy_validation_code}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -985,7 +985,7 @@ mod benchmarking { use super::*; use frame_support::assert_ok; use frame_system::RawOrigin; - use runtime_parachains::paras; + use polkadot_runtime_parachains::paras; use sp_runtime::traits::{Bounded, One}; use frame_benchmarking::{account, benchmarks, whitelisted_caller, BenchmarkError}; diff --git a/polkadot/runtime/common/src/traits.rs b/polkadot/runtime/common/src/traits.rs index 8f75bf5c2fd8..2ed1fb8af9be 100644 --- a/polkadot/runtime/common/src/traits.rs +++ b/polkadot/runtime/common/src/traits.rs @@ -20,7 +20,7 @@ use frame_support::{ dispatch::DispatchResult, traits::{Currency, ReservableCurrency}, }; -use primitives::{HeadData, Id as ParaId, ValidationCode}; +use polkadot_primitives::{HeadData, Id as ParaId, ValidationCode}; use sp_std::vec::*; /// Parachain registration API. diff --git a/polkadot/runtime/common/src/xcm_sender.rs b/polkadot/runtime/common/src/xcm_sender.rs index cbec1a8ca103..5858a0ac3ca7 100644 --- a/polkadot/runtime/common/src/xcm_sender.rs +++ b/polkadot/runtime/common/src/xcm_sender.rs @@ -16,11 +16,11 @@ //! XCM sender for relay chain. +use codec::{Decode, Encode}; use frame_support::traits::Get; use frame_system::pallet_prelude::BlockNumberFor; -use parity_scale_codec::{Decode, Encode}; -use primitives::Id as ParaId; -use runtime_parachains::{ +use polkadot_primitives::Id as ParaId; +use polkadot_runtime_parachains::{ configuration::{self, HostConfiguration}, dmp, FeeTracker, }; @@ -259,7 +259,7 @@ mod tests { use super::*; use crate::integration_tests::new_test_ext; use frame_support::{assert_ok, parameter_types}; - use runtime_parachains::FeeTracker; + use polkadot_runtime_parachains::FeeTracker; use sp_runtime::FixedU128; use xcm::MAX_XCM_DECODE_DEPTH; diff --git a/polkadot/runtime/metrics/Cargo.toml b/polkadot/runtime/metrics/Cargo.toml index 76c1d134fa18..342c5a885033 100644 --- a/polkadot/runtime/metrics/Cargo.toml +++ b/polkadot/runtime/metrics/Cargo.toml @@ -12,8 +12,8 @@ workspace = true [dependencies] sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } -parity-scale-codec = { version = "3.6.12", default-features = false } -primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +polkadot-primitives = { path = "../../primitives", default-features = false } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } bs58 = { version = "0.5.0", default-features = false, features = ["alloc"] } @@ -22,9 +22,9 @@ bs58 = { version = "0.5.0", default-features = false, features = ["alloc"] } default = ["std"] std = [ "bs58/std", + "codec/std", "frame-benchmarking?/std", - "parity-scale-codec/std", - "primitives/std", + "polkadot-primitives/std", "sp-std/std", "sp-tracing/std", ] diff --git a/polkadot/runtime/metrics/src/with_runtime_metrics.rs b/polkadot/runtime/metrics/src/with_runtime_metrics.rs index 562aa9ca162b..1339df9ff687 100644 --- a/polkadot/runtime/metrics/src/with_runtime_metrics.rs +++ b/polkadot/runtime/metrics/src/with_runtime_metrics.rs @@ -22,8 +22,8 @@ const TRACING_TARGET: &'static str = "metrics"; -use parity_scale_codec::Encode; -use primitives::{ +use codec::Encode; +use polkadot_primitives::{ metric_definitions::{CounterDefinition, CounterVecDefinition, HistogramDefinition}, RuntimeMetricLabelValues, RuntimeMetricOp, RuntimeMetricUpdate, }; diff --git a/polkadot/runtime/metrics/src/without_runtime_metrics.rs b/polkadot/runtime/metrics/src/without_runtime_metrics.rs index 41d9c24635ae..555cdf4751c8 100644 --- a/polkadot/runtime/metrics/src/without_runtime_metrics.rs +++ b/polkadot/runtime/metrics/src/without_runtime_metrics.rs @@ -18,7 +18,7 @@ //! provide a dummy implementation for the native runtime to avoid cluttering the runtime code //! with `#[cfg(feature = "runtime-metrics")]`. -use primitives::metric_definitions::{ +use polkadot_primitives::metric_definitions::{ CounterDefinition, CounterVecDefinition, HistogramDefinition, }; diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index d00a19c6ddb8..250fee65beef 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] impl-trait-for-tuples = "0.2.2" bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } @@ -21,7 +21,7 @@ derive_more = "0.99.17" bitflags = "1.3.2" sp-api = { path = "../../../substrate/primitives/api", default-features = false } -inherents = { package = "sp-inherents", path = "../../../substrate/primitives/inherents", default-features = false } +sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } @@ -49,7 +49,7 @@ frame-system = { path = "../../../substrate/frame/system", default-features = fa xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false } -primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } +polkadot-primitives = { path = "../../primitives", default-features = false } rand = { version = "0.8.5", default-features = false } rand_chacha = { version = "0.3.1", default-features = false } @@ -61,10 +61,10 @@ polkadot-core-primitives = { path = "../../core-primitives", default-features = [dev-dependencies] futures = "0.3.30" hex-literal = "0.4.1" -keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyring" } +sp-keyring = { path = "../../../substrate/primitives/keyring" } frame-support-test = { path = "../../../substrate/frame/support/test" } sc-keystore = { path = "../../../substrate/client/keystore" } -test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } +polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } sp-tracing = { path = "../../../substrate/primitives/tracing" } sp-crypto-hashing = { path = "../../../substrate/primitives/crypto/hashing" } thousands = "0.2.0" @@ -77,10 +77,10 @@ default = ["std"] no_std = [] std = [ "bitvec/std", + "codec/std", "frame-benchmarking?/std", "frame-support/std", "frame-system/std", - "inherents/std", "log/std", "pallet-authority-discovery/std", "pallet-authorship/std", @@ -92,11 +92,10 @@ std = [ "pallet-staking/std", "pallet-timestamp/std", "pallet-vesting/std", - "parity-scale-codec/std", "polkadot-core-primitives/std", "polkadot-parachain-primitives/std", + "polkadot-primitives/std", "polkadot-runtime-metrics/std", - "primitives/std", "rand/std", "rand_chacha/std", "rustc-hex/std", @@ -106,6 +105,7 @@ std = [ "sp-application-crypto?/std", "sp-arithmetic/std", "sp-core/std", + "sp-inherents/std", "sp-io/std", "sp-keystore", "sp-keystore?/std", @@ -128,7 +128,7 @@ runtime-benchmarks = [ "pallet-timestamp/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", - "primitives/runtime-benchmarks", + "polkadot-primitives/runtime-benchmarks", "sp-application-crypto", "sp-runtime/runtime-benchmarks", "sp-staking/runtime-benchmarks", diff --git a/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs b/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs index e2ba0b4f7ea5..6c63e062c4b9 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs @@ -26,7 +26,7 @@ use crate::{ }; use sp_runtime::Perbill; -use primitives::{Balance, HeadData, ValidationCode}; +use polkadot_primitives::{Balance, HeadData, ValidationCode}; fn default_genesis_config() -> MockGenesisConfig { MockGenesisConfig { @@ -44,7 +44,7 @@ pub struct GenesisConfigBuilder { pub on_demand_fee_variability: Perbill, pub on_demand_max_queue_size: u32, pub on_demand_target_queue_utilization: Perbill, - pub onboarded_on_demand_chains: Vec, + pub onboarded_on_demand_chains: Vec, } impl Default for GenesisConfigBuilder { diff --git a/polkadot/runtime/parachains/src/assigner_coretime/mod.rs b/polkadot/runtime/parachains/src/assigner_coretime/mod.rs index 1e821dd86846..e68ac2664b89 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/mod.rs @@ -37,7 +37,7 @@ use crate::{ use frame_support::{defensive, pallet_prelude::*}; use frame_system::pallet_prelude::*; use pallet_broker::CoreAssignment; -use primitives::CoreIndex; +use polkadot_primitives::CoreIndex; use sp_runtime::traits::{One, Saturating}; use sp_std::prelude::*; @@ -317,7 +317,7 @@ impl AssignmentProvider> for Pallet { } #[cfg(any(feature = "runtime-benchmarks", test))] - fn get_mock_assignment(_: CoreIndex, para_id: primitives::Id) -> Assignment { + fn get_mock_assignment(_: CoreIndex, para_id: polkadot_primitives::Id) -> Assignment { // Given that we are not tracking anything in `Bulk` assignments, it is safe to always // return a bulk assignment. Assignment::Bulk(para_id) diff --git a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs index 5d42a9d0c8ee..41cf21e267e4 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs @@ -28,7 +28,7 @@ use crate::{ }; use frame_support::{assert_noop, assert_ok, pallet_prelude::*, traits::Currency}; use pallet_broker::TaskId; -use primitives::{BlockNumber, Id as ParaId, SessionIndex, ValidationCode}; +use polkadot_primitives::{BlockNumber, Id as ParaId, SessionIndex, ValidationCode}; use sp_std::collections::btree_map::BTreeMap; fn schedule_blank_para(id: ParaId, parakind: ParaKind) { diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs index 779d6f04e396..ba6951a14692 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs @@ -29,7 +29,7 @@ use frame_benchmarking::v2::*; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; -use primitives::{ +use polkadot_primitives::{ HeadData, Id as ParaId, SessionIndex, ValidationCode, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs index 50e5e1daf41a..314be11adbeb 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs @@ -143,7 +143,7 @@ pub type MigrateV0ToV1 = VersionedMigration< mod tests { use super::{v0, v1, UncheckedOnRuntimeUpgrade, Weight}; use crate::mock::{new_test_ext, MockGenesisConfig, OnDemandAssigner, Test}; - use primitives::Id as ParaId; + use polkadot_primitives::Id as ParaId; #[test] fn migration_to_v1_preserves_queue_ordering() { diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs index f8d1a894f0e4..d2a7a221587d 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs @@ -25,7 +25,7 @@ use crate::{ paras::{ParaGenesisArgs, ParaKind}, }; -use primitives::{Balance, HeadData, ValidationCode}; +use polkadot_primitives::{Balance, HeadData, ValidationCode}; fn default_genesis_config() -> MockGenesisConfig { MockGenesisConfig { diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 795759b3b39e..043a36d99c49 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -53,7 +53,7 @@ use frame_support::{ }, }; use frame_system::pallet_prelude::*; -use primitives::{CoreIndex, Id as ParaId, ON_DEMAND_MAX_QUEUE_MAX_SIZE}; +use polkadot_primitives::{CoreIndex, Id as ParaId, ON_DEMAND_MAX_QUEUE_MAX_SIZE}; use sp_runtime::{ traits::{One, SaturatedConversion}, FixedPointNumber, FixedPointOperand, FixedU128, Perbill, Saturating, diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs index 982efe77b939..8ac6ab77beee 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs @@ -27,7 +27,7 @@ use crate::{ }; use frame_support::{assert_noop, assert_ok, error::BadOrigin}; use pallet_balances::Error as BalancesError; -use primitives::{BlockNumber, SessionIndex, ValidationCode}; +use polkadot_primitives::{BlockNumber, SessionIndex, ValidationCode}; use sp_std::collections::btree_map::BTreeMap; fn schedule_blank_para(id: ParaId, parakind: ParaKind) { diff --git a/polkadot/runtime/parachains/src/assigner_parachains.rs b/polkadot/runtime/parachains/src/assigner_parachains.rs index e79facd1fef0..3c735b999cf2 100644 --- a/polkadot/runtime/parachains/src/assigner_parachains.rs +++ b/polkadot/runtime/parachains/src/assigner_parachains.rs @@ -23,7 +23,7 @@ mod mock_helpers; mod tests; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::CoreIndex; +use polkadot_primitives::CoreIndex; use crate::{ configuration, paras, @@ -59,7 +59,7 @@ impl AssignmentProvider> for Pallet { fn push_back_assignment(_: Assignment) {} #[cfg(any(feature = "runtime-benchmarks", test))] - fn get_mock_assignment(_: CoreIndex, para_id: primitives::Id) -> Assignment { + fn get_mock_assignment(_: CoreIndex, para_id: polkadot_primitives::Id) -> Assignment { Assignment::Bulk(para_id) } diff --git a/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs b/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs index a46e114daeaf..d984fd9232c3 100644 --- a/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs +++ b/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs @@ -21,7 +21,7 @@ use crate::{ paras::{ParaGenesisArgs, ParaKind}, }; -use primitives::{Balance, HeadData, ValidationCode}; +use polkadot_primitives::{Balance, HeadData, ValidationCode}; use sp_runtime::Perbill; fn default_genesis_config() -> MockGenesisConfig { @@ -40,7 +40,7 @@ pub struct GenesisConfigBuilder { pub on_demand_fee_variability: Perbill, pub on_demand_max_queue_size: u32, pub on_demand_target_queue_utilization: Perbill, - pub onboarded_on_demand_chains: Vec, + pub onboarded_on_demand_chains: Vec, } impl Default for GenesisConfigBuilder { diff --git a/polkadot/runtime/parachains/src/assigner_parachains/tests.rs b/polkadot/runtime/parachains/src/assigner_parachains/tests.rs index a110686aaeb0..ebd24e89162a 100644 --- a/polkadot/runtime/parachains/src/assigner_parachains/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_parachains/tests.rs @@ -24,7 +24,7 @@ use crate::{ paras::{ParaGenesisArgs, ParaKind}, }; use frame_support::{assert_ok, pallet_prelude::*}; -use primitives::{BlockNumber, Id as ParaId, SessionIndex, ValidationCode}; +use polkadot_primitives::{BlockNumber, Id as ParaId, SessionIndex, ValidationCode}; use sp_std::collections::btree_map::BTreeMap; fn schedule_blank_para(id: ParaId, parakind: ParaKind) { diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index d1e2bc392feb..5ed5a2b527c0 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -24,7 +24,7 @@ use crate::{ use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; -use primitives::{ +use polkadot_primitives::{ collator_signature_payload, node_features::FeatureIndex, AvailabilityBitfield, BackedCandidate, CandidateCommitments, CandidateDescriptor, CandidateHash, CollatorId, CollatorSignature, CommittedCandidateReceipt, CompactStatement, CoreIndex, DisputeStatement, DisputeStatementSet, diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index 34923897f02b..10ecaa16a846 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -19,13 +19,13 @@ //! Configuration can change only at session boundaries and is buffered until then. use crate::{inclusion::MAX_UPWARD_MESSAGE_SIZE_BOUND, shared}; +use codec::{Decode, Encode}; use frame_support::{pallet_prelude::*, DefaultNoBound}; use frame_system::pallet_prelude::*; -use parity_scale_codec::{Decode, Encode}; use polkadot_parachain_primitives::primitives::{ MAX_HORIZONTAL_MESSAGE_NUM, MAX_UPWARD_MESSAGE_NUM, }; -use primitives::{ +use polkadot_primitives::{ ApprovalVotingParams, AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, NodeFeatures, SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, @@ -42,7 +42,7 @@ mod benchmarking; pub mod migration; pub use pallet::*; -use primitives::vstaging::SchedulerParams; +use polkadot_primitives::vstaging::SchedulerParams; const LOG_TARGET: &str = "runtime::configuration"; @@ -1276,7 +1276,7 @@ pub mod pallet { fn integrity_test() { assert_eq!( &ActiveConfig::::hashed_key(), - primitives::well_known_keys::ACTIVE_CONFIG, + polkadot_primitives::well_known_keys::ACTIVE_CONFIG, "`well_known_keys::ACTIVE_CONFIG` doesn't match key of `ActiveConfig`! Make sure that the name of the\ configuration pallet is `Configuration` in the runtime!", ); diff --git a/polkadot/runtime/parachains/src/configuration/benchmarking.rs b/polkadot/runtime/parachains/src/configuration/benchmarking.rs index 882b5aab096a..adc7f31a7b29 100644 --- a/polkadot/runtime/parachains/src/configuration/benchmarking.rs +++ b/polkadot/runtime/parachains/src/configuration/benchmarking.rs @@ -17,7 +17,7 @@ use crate::configuration::*; use frame_benchmarking::{benchmarks, BenchmarkError, BenchmarkResult}; use frame_system::RawOrigin; -use primitives::{ExecutorParam, ExecutorParams, PvfExecKind, PvfPrepKind}; +use polkadot_primitives::{ExecutorParam, ExecutorParams, PvfExecKind, PvfPrepKind}; use sp_runtime::traits::One; benchmarks! { diff --git a/polkadot/runtime/parachains/src/configuration/migration/v10.rs b/polkadot/runtime/parachains/src/configuration/migration/v10.rs index fa72c357d7da..c53f58faaf03 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v10.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v10.rs @@ -23,7 +23,7 @@ use frame_support::{ weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{ +use polkadot_primitives::{ AsyncBackingParams, Balance, ExecutorParams, NodeFeatures, SessionIndex, LEGACY_MIN_BACKING_VOTES, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; @@ -275,7 +275,7 @@ fn migrate_to_v10() -> Weight { mod tests { use super::*; use crate::mock::{new_test_ext, Test}; - use primitives::LEGACY_MIN_BACKING_VOTES; + use polkadot_primitives::LEGACY_MIN_BACKING_VOTES; #[test] fn v10_deserialized_from_actual_data() { @@ -304,7 +304,8 @@ mod tests { ]; let v10 = - V10HostConfiguration::::decode(&mut &raw_config[..]).unwrap(); + V10HostConfiguration::::decode(&mut &raw_config[..]) + .unwrap(); // We check only a sample of the values here. If we missed any fields or messed up data // types that would skew all the fields coming after. @@ -333,7 +334,7 @@ mod tests { // We specify only the picked fields and the rest should be provided by the `Default` // implementation. That implementation is copied over between the two types and should work // fine. - let v9 = V9HostConfiguration:: { + let v9 = V9HostConfiguration:: { needed_approvals: 69, paras_availability_period: 55, hrmp_recipient_deposit: 1337, @@ -368,7 +369,7 @@ mod tests { // pallet's storage. #[test] fn test_migrate_to_v10_no_pending() { - let v9 = V9HostConfiguration::::default(); + let v9 = V9HostConfiguration::::default(); new_test_ext(Default::default()).execute_with(|| { // Implant the v9 version in the state. diff --git a/polkadot/runtime/parachains/src/configuration/migration/v11.rs b/polkadot/runtime/parachains/src/configuration/migration/v11.rs index 65656e8d7c06..4d1bfc26196c 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v11.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v11.rs @@ -24,7 +24,7 @@ use frame_support::{ weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{ +use polkadot_primitives::{ ApprovalVotingParams, AsyncBackingParams, ExecutorParams, NodeFeatures, SessionIndex, LEGACY_MIN_BACKING_VOTES, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; @@ -289,7 +289,7 @@ approval_voting_params : ApprovalVotingParams { #[cfg(test)] mod tests { - use primitives::LEGACY_MIN_BACKING_VOTES; + use polkadot_primitives::LEGACY_MIN_BACKING_VOTES; use super::*; use crate::mock::{new_test_ext, Test}; @@ -321,7 +321,8 @@ mod tests { ]; let v11 = - V11HostConfiguration::::decode(&mut &raw_config[..]).unwrap(); + V11HostConfiguration::::decode(&mut &raw_config[..]) + .unwrap(); // We check only a sample of the values here. If we missed any fields or messed up data // types that would skew all the fields coming after. @@ -348,7 +349,7 @@ mod tests { // We specify only the picked fields and the rest should be provided by the `Default` // implementation. That implementation is copied over between the two types and should work // fine. - let v10 = V10HostConfiguration:: { + let v10 = V10HostConfiguration:: { needed_approvals: 69, paras_availability_period: 55, hrmp_recipient_deposit: 1337, @@ -424,7 +425,7 @@ mod tests { // pallet's storage. #[test] fn test_migrate_to_v11_no_pending() { - let v10 = V10HostConfiguration::::default(); + let v10 = V10HostConfiguration::::default(); new_test_ext(Default::default()).execute_with(|| { // Implant the v10 version in the state. diff --git a/polkadot/runtime/parachains/src/configuration/migration/v12.rs b/polkadot/runtime/parachains/src/configuration/migration/v12.rs index 69bacc83d044..126597ed8454 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v12.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v12.rs @@ -23,7 +23,7 @@ use frame_support::{ traits::{Defensive, UncheckedOnRuntimeUpgrade}, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::vstaging::SchedulerParams; +use polkadot_primitives::vstaging::SchedulerParams; use sp_core::Get; use sp_staking::SessionIndex; use sp_std::vec::Vec; @@ -181,7 +181,7 @@ fn migrate_to_v12() -> Weight { #[cfg(test)] mod tests { - use primitives::LEGACY_MIN_BACKING_VOTES; + use polkadot_primitives::LEGACY_MIN_BACKING_VOTES; use sp_arithmetic::Perbill; use super::*; @@ -214,7 +214,8 @@ mod tests { ]; let v12 = - V12HostConfiguration::::decode(&mut &raw_config[..]).unwrap(); + V12HostConfiguration::::decode(&mut &raw_config[..]) + .unwrap(); // We check only a sample of the values here. If we missed any fields or messed up data // types that would skew all the fields coming after. @@ -251,7 +252,7 @@ mod tests { // We specify only the picked fields and the rest should be provided by the `Default` // implementation. That implementation is copied over between the two types and should work // fine. - let v11 = V11HostConfiguration:: { + let v11 = V11HostConfiguration:: { needed_approvals: 69, paras_availability_period: 55, hrmp_recipient_deposit: 1337, @@ -334,7 +335,7 @@ mod tests { // pallet's storage. #[test] fn test_migrate_to_v12_no_pending() { - let v11 = V11HostConfiguration::::default(); + let v11 = V11HostConfiguration::::default(); new_test_ext(Default::default()).execute_with(|| { // Implant the v10 version in the state. diff --git a/polkadot/runtime/parachains/src/configuration/migration/v6.rs b/polkadot/runtime/parachains/src/configuration/migration/v6.rs index 19031a90bab4..bec41d3ea0dc 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v6.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v6.rs @@ -21,11 +21,11 @@ use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::BlockNumberFor; use sp_std::vec::Vec; -use primitives::{AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; +use polkadot_primitives::{AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; #[cfg(feature = "try-runtime")] use sp_std::prelude::*; -#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, Clone)] +#[derive(codec::Encode, codec::Decode, Debug, Clone)] pub struct V6HostConfiguration { pub max_code_size: u32, pub max_head_data_size: u32, diff --git a/polkadot/runtime/parachains/src/configuration/migration/v7.rs b/polkadot/runtime/parachains/src/configuration/migration/v7.rs index 1754b78e0a1d..8fe4087cf9b1 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v7.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v7.rs @@ -23,14 +23,14 @@ use frame_support::{ weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; +use polkadot_primitives::{AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; use super::v6::V6HostConfiguration; -#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, Clone)] +#[derive(codec::Encode, codec::Decode, Debug, Clone)] pub struct V7HostConfiguration { pub max_code_size: u32, pub max_head_data_size: u32, @@ -289,7 +289,8 @@ mod tests { let raw_config = hex_literal::hex!["00003000005000005555150000008000fbff0100000200000a000000c80000006400000000000000000000000000500000c800000a0000000000000000c0220fca950300000000000000000000c0220fca9503000000000000000000e8030000009001000a0000000000000000900100008070000000000000000000000a000000050000000500000001000000010500000001c80000000600000058020000020000002800000000000000020000000100000001020000000f000000"]; let v6 = - V6HostConfiguration::::decode(&mut &raw_config[..]).unwrap(); + V6HostConfiguration::::decode(&mut &raw_config[..]) + .unwrap(); // We check only a sample of the values here. If we missed any fields or messed up data // types that would skew all the fields coming after. @@ -312,7 +313,7 @@ mod tests { // We specify only the picked fields and the rest should be provided by the `Default` // implementation. That implementation is copied over between the two types and should work // fine. - let v6 = V6HostConfiguration:: { + let v6 = V6HostConfiguration:: { needed_approvals: 69, thread_availability_period: 55, hrmp_recipient_deposit: 1337, @@ -390,7 +391,7 @@ mod tests { // pallet's storage. #[test] fn test_migrate_to_v7_no_pending() { - let v6 = V6HostConfiguration::::default(); + let v6 = V6HostConfiguration::::default(); new_test_ext(Default::default()).execute_with(|| { // Implant the v6 version in the state. diff --git a/polkadot/runtime/parachains/src/configuration/migration/v8.rs b/polkadot/runtime/parachains/src/configuration/migration/v8.rs index 537dfa9abd77..0aa7f550b102 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v8.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v8.rs @@ -23,7 +23,7 @@ use frame_support::{ weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{ +use polkadot_primitives::{ AsyncBackingParams, Balance, ExecutorParams, SessionIndex, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::Perbill; @@ -304,7 +304,8 @@ mod tests { ]; let v8 = - V8HostConfiguration::::decode(&mut &raw_config[..]).unwrap(); + V8HostConfiguration::::decode(&mut &raw_config[..]) + .unwrap(); // We check only a sample of the values here. If we missed any fields or messed up data // types that would skew all the fields coming after. @@ -329,7 +330,7 @@ mod tests { // We specify only the picked fields and the rest should be provided by the `Default` // implementation. That implementation is copied over between the two types and should work // fine. - let v7 = V7HostConfiguration:: { + let v7 = V7HostConfiguration:: { needed_approvals: 69, thread_availability_period: 55, hrmp_recipient_deposit: 1337, @@ -403,7 +404,7 @@ mod tests { // pallet's storage. #[test] fn test_migrate_to_v8_no_pending() { - let v7 = V7HostConfiguration::::default(); + let v7 = V7HostConfiguration::::default(); new_test_ext(Default::default()).execute_with(|| { // Implant the v6 version in the state. diff --git a/polkadot/runtime/parachains/src/configuration/migration/v9.rs b/polkadot/runtime/parachains/src/configuration/migration/v9.rs index ca4bbd9dacef..6afdd3cec29e 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v9.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v9.rs @@ -23,7 +23,7 @@ use frame_support::{ weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{ +use polkadot_primitives::{ AsyncBackingParams, Balance, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; @@ -308,7 +308,8 @@ mod tests { ]; let v9 = - V9HostConfiguration::::decode(&mut &raw_config[..]).unwrap(); + V9HostConfiguration::::decode(&mut &raw_config[..]) + .unwrap(); // We check only a sample of the values here. If we missed any fields or messed up data // types that would skew all the fields coming after. @@ -334,7 +335,7 @@ mod tests { // We specify only the picked fields and the rest should be provided by the `Default` // implementation. That implementation is copied over between the two types and should work // fine. - let v8 = V8HostConfiguration:: { + let v8 = V8HostConfiguration:: { needed_approvals: 69, paras_availability_period: 55, hrmp_recipient_deposit: 1337, @@ -408,7 +409,7 @@ mod tests { // pallet's storage. #[test] fn test_migrate_to_v9_no_pending() { - let v8 = V8HostConfiguration::::default(); + let v8 = V8HostConfiguration::::default(); new_test_ext(Default::default()).execute_with(|| { // Implant the v8 version in the state. diff --git a/polkadot/runtime/parachains/src/configuration/tests.rs b/polkadot/runtime/parachains/src/configuration/tests.rs index 64bbb8481fc1..dad8b6458e10 100644 --- a/polkadot/runtime/parachains/src/configuration/tests.rs +++ b/polkadot/runtime/parachains/src/configuration/tests.rs @@ -513,7 +513,7 @@ fn verify_externally_accessible() { // This test verifies that the value can be accessed through the well known keys and the // host configuration decodes into the abridged version. - use primitives::{well_known_keys, AbridgedHostConfiguration}; + use polkadot_primitives::{well_known_keys, AbridgedHostConfiguration}; new_test_ext(Default::default()).execute_with(|| { let mut ground_truth = HostConfiguration::default(); diff --git a/polkadot/runtime/parachains/src/coretime/migration.rs b/polkadot/runtime/parachains/src/coretime/migration.rs index 6c8ddaa8aab3..3f82472da8aa 100644 --- a/polkadot/runtime/parachains/src/coretime/migration.rs +++ b/polkadot/runtime/parachains/src/coretime/migration.rs @@ -27,6 +27,10 @@ mod v_coretime { paras, }; #[cfg(feature = "try-runtime")] + use codec::Decode; + #[cfg(feature = "try-runtime")] + use codec::Encode; + #[cfg(feature = "try-runtime")] use frame_support::ensure; use frame_support::{ traits::{OnRuntimeUpgrade, PalletInfoAccess, StorageVersion}, @@ -34,12 +38,8 @@ mod v_coretime { }; use frame_system::pallet_prelude::BlockNumberFor; use pallet_broker::{CoreAssignment, CoreMask, ScheduleItem}; - #[cfg(feature = "try-runtime")] - use parity_scale_codec::Decode; - #[cfg(feature = "try-runtime")] - use parity_scale_codec::Encode; use polkadot_parachain_primitives::primitives::IsSystem; - use primitives::{CoreIndex, Id as ParaId}; + use polkadot_primitives::{CoreIndex, Id as ParaId}; use sp_arithmetic::traits::SaturatedConversion; use sp_core::Get; use sp_runtime::BoundedVec; diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index 33cbcb98fb29..dedffb733d33 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -24,7 +24,7 @@ use frame_support::{pallet_prelude::*, traits::Currency}; use frame_system::pallet_prelude::*; pub use pallet::*; use pallet_broker::{CoreAssignment, CoreIndex as BrokerCoreIndex}; -use primitives::{CoreIndex, Id as ParaId}; +use polkadot_primitives::{CoreIndex, Id as ParaId}; use sp_arithmetic::traits::SaturatedConversion; use xcm::prelude::{ send_xcm, Instruction, Junction, Location, OriginKind, SendXcm, WeightLimit, Xcm, diff --git a/polkadot/runtime/parachains/src/disputes.rs b/polkadot/runtime/parachains/src/disputes.rs index 62e02e67157d..4a0f2390b45d 100644 --- a/polkadot/runtime/parachains/src/disputes.rs +++ b/polkadot/runtime/parachains/src/disputes.rs @@ -20,17 +20,17 @@ use crate::{ configuration, initializer::SessionChangeNotification, metrics::METRICS, session_info, }; use bitvec::{bitvec, order::Lsb0 as BitOrderLsb0}; +use codec::{Decode, Encode}; use frame_support::{ensure, weights::Weight}; use frame_system::pallet_prelude::*; -use parity_scale_codec::{Decode, Encode}; -use polkadot_runtime_metrics::get_current_time; -use primitives::{ +use polkadot_primitives::{ byzantine_threshold, supermajority_threshold, ApprovalVote, ApprovalVoteMultipleCandidates, CandidateHash, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CompactStatement, ConsensusLog, DisputeState, DisputeStatement, DisputeStatementSet, ExplicitDisputeStatement, InvalidDisputeStatementKind, MultiDisputeStatementSet, SessionIndex, SigningContext, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, }; +use polkadot_runtime_metrics::get_current_time; use scale_info::TypeInfo; use sp_runtime::{ traits::{AppVerify, One, Saturating, Zero}, diff --git a/polkadot/runtime/parachains/src/disputes/migration.rs b/polkadot/runtime/parachains/src/disputes/migration.rs index ccd367e41b36..e12edffb51b3 100644 --- a/polkadot/runtime/parachains/src/disputes/migration.rs +++ b/polkadot/runtime/parachains/src/disputes/migration.rs @@ -24,7 +24,7 @@ pub mod v1 { use frame_support::{ pallet_prelude::*, storage_alias, traits::OnRuntimeUpgrade, weights::Weight, }; - use primitives::SessionIndex; + use polkadot_primitives::SessionIndex; use sp_std::prelude::*; #[storage_alias] diff --git a/polkadot/runtime/parachains/src/disputes/slashing.rs b/polkadot/runtime/parachains/src/disputes/slashing.rs index a61d0c899836..b50853ecc696 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing.rs @@ -50,7 +50,7 @@ use frame_support::{ }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{ +use polkadot_primitives::{ slashing::{DisputeProof, DisputesTimeSlot, PendingSlashes, SlashingOffenceKind}, CandidateHash, SessionIndex, ValidatorId, ValidatorIndex, }; @@ -456,7 +456,8 @@ pub mod pallet { let validator_set_count = key_owner_proof.validator_count() as ValidatorSetCount; // check the membership proof to extract the offender's id - let key = (primitives::PARACHAIN_KEY_TYPE_ID, dispute_proof.validator_id.clone()); + let key = + (polkadot_primitives::PARACHAIN_KEY_TYPE_ID, dispute_proof.validator_id.clone()); let offender = T::KeyOwnerProofSystem::check_proof(key, key_owner_proof) .ok_or(Error::::InvalidKeyOwnershipProof)?; @@ -615,7 +616,7 @@ fn is_known_offence( key_owner_proof: &T::KeyOwnerProof, ) -> Result<(), TransactionValidityError> { // check the membership proof to extract the offender's id - let key = (primitives::PARACHAIN_KEY_TYPE_ID, dispute_proof.validator_id.clone()); + let key = (polkadot_primitives::PARACHAIN_KEY_TYPE_ID, dispute_proof.validator_id.clone()); let offender = T::KeyOwnerProofSystem::check_proof(key, key_owner_proof.clone()) .ok_or(InvalidTransaction::BadProof)?; diff --git a/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs b/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs index 42a64725160c..b53f98caeea3 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs @@ -17,12 +17,12 @@ use super::*; use crate::{disputes::SlashingHandler, initializer, shared}; +use codec::Decode; use frame_benchmarking::{benchmarks, whitelist_account}; use frame_support::traits::{OnFinalize, OnInitialize}; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use pallet_staking::testing_utils::create_validators; -use parity_scale_codec::Decode; -use primitives::{Hash, PARACHAIN_KEY_TYPE_ID}; +use polkadot_primitives::{Hash, PARACHAIN_KEY_TYPE_ID}; use sp_runtime::traits::{One, OpaqueKeys, StaticLookup}; use sp_session::MembershipProof; diff --git a/polkadot/runtime/parachains/src/disputes/tests.rs b/polkadot/runtime/parachains/src/disputes/tests.rs index 16b4fa3a9f1a..f505bf4625a6 100644 --- a/polkadot/runtime/parachains/src/disputes/tests.rs +++ b/polkadot/runtime/parachains/src/disputes/tests.rs @@ -29,7 +29,7 @@ use frame_support::{ traits::{OnFinalize, OnInitialize}, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::BlockNumber; +use polkadot_primitives::BlockNumber; use sp_core::{crypto::CryptoType, Pair}; const VOTE_FOR: VoteKind = VoteKind::ExplicitValid; diff --git a/polkadot/runtime/parachains/src/dmp.rs b/polkadot/runtime/parachains/src/dmp.rs index df2f93e19421..c0e1635ba169 100644 --- a/polkadot/runtime/parachains/src/dmp.rs +++ b/polkadot/runtime/parachains/src/dmp.rs @@ -48,7 +48,7 @@ use crate::{ }; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{DownwardMessage, Hash, Id as ParaId, InboundDownwardMessage}; +use polkadot_primitives::{DownwardMessage, Hash, Id as ParaId, InboundDownwardMessage}; use sp_core::MAX_POSSIBLE_ALLOCATION; use sp_runtime::{ traits::{BlakeTwo256, Hash as HashT, SaturatedConversion}, diff --git a/polkadot/runtime/parachains/src/dmp/tests.rs b/polkadot/runtime/parachains/src/dmp/tests.rs index f39d7ae16733..de1515958125 100644 --- a/polkadot/runtime/parachains/src/dmp/tests.rs +++ b/polkadot/runtime/parachains/src/dmp/tests.rs @@ -19,10 +19,10 @@ use crate::{ configuration::ActiveConfig, mock::{new_test_ext, Dmp, MockGenesisConfig, Paras, System, Test}, }; +use codec::Encode; use frame_support::assert_ok; use hex_literal::hex; -use parity_scale_codec::Encode; -use primitives::BlockNumber; +use polkadot_primitives::BlockNumber; pub(crate) fn run_to_block(to: BlockNumber, new_session: Option>) { while System::block_number() < to { @@ -210,7 +210,7 @@ fn queue_downward_message_critical() { #[test] fn verify_dmq_mqc_head_is_externally_accessible() { use hex_literal::hex; - use primitives::well_known_keys; + use polkadot_primitives::well_known_keys; let a = ParaId::from(2020); diff --git a/polkadot/runtime/parachains/src/hrmp.rs b/polkadot/runtime/parachains/src/hrmp.rs index 42a9c23e5aa1..e34e4a03e711 100644 --- a/polkadot/runtime/parachains/src/hrmp.rs +++ b/polkadot/runtime/parachains/src/hrmp.rs @@ -18,11 +18,11 @@ use crate::{ configuration::{self, HostConfiguration}, dmp, ensure_parachain, initializer, paras, }; +use codec::{Decode, Encode}; use frame_support::{pallet_prelude::*, traits::ReservableCurrency, DefaultNoBound}; use frame_system::pallet_prelude::*; -use parity_scale_codec::{Decode, Encode}; use polkadot_parachain_primitives::primitives::{HorizontalMessages, IsSystem}; -use primitives::{ +use polkadot_primitives::{ Balance, Hash, HrmpChannelId, Id as ParaId, InboundHrmpMessage, OutboundHrmpMessage, SessionIndex, }; @@ -1864,7 +1864,7 @@ impl Pallet { /// If the XCM version is unknown, the latest XCM version is used as a best effort. fn wrap_notification( mut notification: impl FnMut() -> xcm::opaque::latest::opaque::Xcm, - ) -> impl FnOnce(ParaId) -> primitives::DownwardMessage { + ) -> impl FnOnce(ParaId) -> polkadot_primitives::DownwardMessage { use xcm::{ opaque::VersionedXcm, prelude::{Junction, Location}, @@ -1892,7 +1892,7 @@ impl Pallet { log_label: &str, config: &HostConfiguration>, dest: ParaId, - notification_bytes_for: impl FnOnce(ParaId) -> primitives::DownwardMessage, + notification_bytes_for: impl FnOnce(ParaId) -> polkadot_primitives::DownwardMessage, ) { // prepare notification let notification_bytes = notification_bytes_for(dest); diff --git a/polkadot/runtime/parachains/src/hrmp/tests.rs b/polkadot/runtime/parachains/src/hrmp/tests.rs index acfaa8f2d290..4fcbc69e98ad 100644 --- a/polkadot/runtime/parachains/src/hrmp/tests.rs +++ b/polkadot/runtime/parachains/src/hrmp/tests.rs @@ -28,7 +28,7 @@ use crate::{ shared, }; use frame_support::{assert_noop, assert_ok, error::BadOrigin}; -use primitives::{BlockNumber, InboundDownwardMessage}; +use polkadot_primitives::{BlockNumber, InboundDownwardMessage}; use std::collections::BTreeMap; pub(crate) fn run_to_block(to: BlockNumber, new_session: Option>) { @@ -660,7 +660,7 @@ fn check_sent_messages() { #[test] fn verify_externally_accessible() { - use primitives::{well_known_keys, AbridgedHrmpChannel}; + use polkadot_primitives::{well_known_keys, AbridgedHrmpChannel}; let para_a = 2020.into(); let para_b = 2021.into(); diff --git a/polkadot/runtime/parachains/src/inclusion/migration.rs b/polkadot/runtime/parachains/src/inclusion/migration.rs index 5f35680ee694..a340d52643e0 100644 --- a/polkadot/runtime/parachains/src/inclusion/migration.rs +++ b/polkadot/runtime/parachains/src/inclusion/migration.rs @@ -16,10 +16,10 @@ pub use v1::MigrateToV1; pub mod v0 { use crate::inclusion::{Config, Pallet}; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; + use codec::{Decode, Encode}; use frame_support::{storage_alias, Twox64Concat}; use frame_system::pallet_prelude::BlockNumberFor; - use parity_scale_codec::{Decode, Encode}; - use primitives::{ + use polkadot_primitives::{ AvailabilityBitfield, CandidateCommitments, CandidateDescriptor, CandidateHash, CoreIndex, GroupIndex, Id as ParaId, ValidatorIndex, }; @@ -77,13 +77,13 @@ mod v1 { use sp_core::Get; use sp_std::{collections::vec_deque::VecDeque, vec::Vec}; + #[cfg(feature = "try-runtime")] + use codec::{Decode, Encode}; #[cfg(feature = "try-runtime")] use frame_support::{ ensure, traits::{GetStorageVersion, StorageVersion}, }; - #[cfg(feature = "try-runtime")] - use parity_scale_codec::{Decode, Encode}; pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); @@ -217,8 +217,10 @@ mod tests { mock::{new_test_ext, MockGenesisConfig, Test}, }; use frame_support::traits::UncheckedOnRuntimeUpgrade; - use primitives::{AvailabilityBitfield, Id as ParaId}; - use test_helpers::{dummy_candidate_commitments, dummy_candidate_descriptor, dummy_hash}; + use polkadot_primitives::{AvailabilityBitfield, Id as ParaId}; + use polkadot_primitives_test_helpers::{ + dummy_candidate_commitments, dummy_candidate_descriptor, dummy_hash, + }; #[test] fn migrate_to_v1() { diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 0c7274984085..a86941a1a0b8 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -28,6 +28,7 @@ use crate::{ util::make_persisted_validation_data_with_parent, }; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; +use codec::{Decode, Encode}; use frame_support::{ defensive, pallet_prelude::*, @@ -36,8 +37,7 @@ use frame_support::{ }; use frame_system::pallet_prelude::*; use pallet_message_queue::OnQueueChanged; -use parity_scale_codec::{Decode, Encode}; -use primitives::{ +use polkadot_primitives::{ effective_minimum_backing_votes, supermajority_threshold, well_known_keys, BackedCandidate, CandidateCommitments, CandidateDescriptor, CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, GroupIndex, Hash, HeadData, Id as ParaId, @@ -746,7 +746,7 @@ impl Pallet { backed_candidate.validator_indices_and_core_index(core_index_enabled); // check the signatures in the backing and that it is a majority. - let maybe_amount_validated = primitives::check_candidate_backing( + let maybe_amount_validated = polkadot_primitives::check_candidate_backing( backed_candidate.candidate().hash(), backed_candidate.validity_votes(), validator_indices, @@ -795,7 +795,7 @@ impl Pallet { pub(crate) fn check_validation_outputs_for_runtime_api( para_id: ParaId, relay_parent_number: BlockNumberFor, - validation_outputs: primitives::CandidateCommitments, + validation_outputs: polkadot_primitives::CandidateCommitments, ) -> bool { let prev_context = paras::MostRecentContext::::get(para_id); let check_ctx = CandidateCheckContext::::new(prev_context); @@ -1319,11 +1319,11 @@ impl CandidateCheckContext { para_id: ParaId, relay_parent_number: BlockNumberFor, head_data: &HeadData, - new_validation_code: &Option, + new_validation_code: &Option, processed_downward_messages: u32, - upward_messages: &[primitives::UpwardMessage], + upward_messages: &[polkadot_primitives::UpwardMessage], hrmp_watermark: BlockNumberFor, - horizontal_messages: &[primitives::OutboundHrmpMessage], + horizontal_messages: &[polkadot_primitives::OutboundHrmpMessage], ) -> Result<(), AcceptanceCheckErr> { ensure!( head_data.0.len() <= self.config.max_head_data_size as _, diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index c19bc6eb7bfc..18def664f4b2 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -25,24 +25,26 @@ use crate::{ paras_inherent::DisputedBitfield, shared::AllowedRelayParentsTracker, }; -use primitives::{ +use polkadot_primitives::{ effective_minimum_backing_votes, AvailabilityBitfield, SignedAvailabilityBitfields, UncheckedSignedAvailabilityBitfields, }; use assert_matches::assert_matches; +use codec::DecodeAll; use frame_support::assert_noop; -use keyring::Sr25519Keyring; -use parity_scale_codec::DecodeAll; -use primitives::{ +use polkadot_primitives::{ BlockNumber, CandidateCommitments, CandidateDescriptor, CollatorId, CompactStatement as Statement, Hash, SignedAvailabilityBitfield, SignedStatement, ValidationCode, ValidatorId, ValidityAttestation, PARACHAIN_KEY_TYPE_ID, }; +use polkadot_primitives_test_helpers::{ + dummy_collator, dummy_collator_signature, dummy_validation_code, +}; use sc_keystore::LocalKeystore; +use sp_keyring::Sr25519Keyring; use sp_keystore::{Keystore, KeystorePtr}; use std::sync::Arc; -use test_helpers::{dummy_collator, dummy_collator_signature, dummy_validation_code}; fn default_config() -> HostConfiguration { let mut config = HostConfiguration::default(); @@ -100,7 +102,7 @@ pub(crate) fn collator_sign_candidate( ) { candidate.descriptor.collator = collator.public().into(); - let payload = primitives::collator_signature_payload( + let payload = polkadot_primitives::collator_signature_payload( &candidate.descriptor.relay_parent, &candidate.descriptor.para_id, &candidate.descriptor.persisted_validation_data_hash, @@ -158,7 +160,7 @@ pub(crate) fn back_candidate( let backed = BackedCandidate::new(candidate, validity_votes, validator_indices.clone(), core_index); - let successfully_backed = primitives::check_candidate_backing( + let successfully_backed = polkadot_primitives::check_candidate_backing( backed.candidate().hash(), backed.validity_votes(), validator_indices.as_bitslice(), diff --git a/polkadot/runtime/parachains/src/initializer.rs b/polkadot/runtime/parachains/src/initializer.rs index 511d74421032..fd0f1c3c0651 100644 --- a/polkadot/runtime/parachains/src/initializer.rs +++ b/polkadot/runtime/parachains/src/initializer.rs @@ -25,13 +25,13 @@ use crate::{ disputes::{self, DisputesHandler as _, SlashingHandler as _}, dmp, hrmp, inclusion, paras, scheduler, session_info, shared, }; +use codec::{Decode, Encode}; use frame_support::{ traits::{OneSessionHandler, Randomness}, weights::Weight, }; use frame_system::limits::BlockWeights; -use parity_scale_codec::{Decode, Encode}; -use primitives::{BlockNumber, ConsensusLog, SessionIndex, ValidatorId}; +use polkadot_primitives::{BlockNumber, ConsensusLog, SessionIndex, ValidatorId}; use scale_info::TypeInfo; use sp_std::prelude::*; diff --git a/polkadot/runtime/parachains/src/initializer/benchmarking.rs b/polkadot/runtime/parachains/src/initializer/benchmarking.rs index ece41c726f04..2083c058fd04 100644 --- a/polkadot/runtime/parachains/src/initializer/benchmarking.rs +++ b/polkadot/runtime/parachains/src/initializer/benchmarking.rs @@ -17,7 +17,7 @@ use super::*; use frame_benchmarking::benchmarks; use frame_system::RawOrigin; -use primitives::ConsensusLog; +use polkadot_primitives::ConsensusLog; use sp_runtime::DigestItem; // Random large number for the digest diff --git a/polkadot/runtime/parachains/src/initializer/tests.rs b/polkadot/runtime/parachains/src/initializer/tests.rs index e757e6b9d117..a2bdb36eaa64 100644 --- a/polkadot/runtime/parachains/src/initializer/tests.rs +++ b/polkadot/runtime/parachains/src/initializer/tests.rs @@ -20,8 +20,8 @@ use crate::{ paras::ParaKind, session_info, }; -use primitives::{HeadData, Id as ParaId}; -use test_helpers::dummy_validation_code; +use polkadot_primitives::{HeadData, Id as ParaId}; +use polkadot_primitives_test_helpers::dummy_validation_code; use frame_support::{ assert_ok, diff --git a/polkadot/runtime/parachains/src/lib.rs b/polkadot/runtime/parachains/src/lib.rs index 97d6ab74904d..51110e89416c 100644 --- a/polkadot/runtime/parachains/src/lib.rs +++ b/polkadot/runtime/parachains/src/lib.rs @@ -55,7 +55,7 @@ mod ump_tests; pub use origin::{ensure_parachain, Origin}; pub use paras::{ParaLifecycle, UpgradeStrategy}; -use primitives::{HeadData, Id as ParaId, ValidationCode}; +use polkadot_primitives::{HeadData, Id as ParaId, ValidationCode}; use sp_runtime::{DispatchResult, FixedU128}; /// Trait for tracking message delivery fees on a transport protocol. @@ -86,7 +86,7 @@ pub fn schedule_para_initialize( } /// Schedule a para to be cleaned up at the start of the next session. -pub fn schedule_para_cleanup(id: primitives::Id) -> Result<(), ()> { +pub fn schedule_para_cleanup(id: polkadot_primitives::Id) -> Result<(), ()> { paras::Pallet::::schedule_para_cleanup(id).map_err(|_| ()) } diff --git a/polkadot/runtime/parachains/src/metrics.rs b/polkadot/runtime/parachains/src/metrics.rs index 023bd09f83a8..7a17aafabd12 100644 --- a/polkadot/runtime/parachains/src/metrics.rs +++ b/polkadot/runtime/parachains/src/metrics.rs @@ -16,13 +16,13 @@ //! Runtime declaration of the parachain metrics. -use polkadot_runtime_metrics::{Counter, CounterVec, Histogram}; -use primitives::metric_definitions::{ +use polkadot_primitives::metric_definitions::{ PARACHAIN_CREATE_INHERENT_BITFIELDS_SIGNATURE_CHECKS, PARACHAIN_INHERENT_DATA_BITFIELDS_PROCESSED, PARACHAIN_INHERENT_DATA_CANDIDATES_PROCESSED, PARACHAIN_INHERENT_DATA_DISPUTE_SETS_PROCESSED, PARACHAIN_INHERENT_DATA_WEIGHT, PARACHAIN_VERIFY_DISPUTE_SIGNATURE, }; +use polkadot_runtime_metrics::{Counter, CounterVec, Histogram}; pub struct Metrics { /// Samples inherent data weight. diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 75b835b17541..0a0be8432b25 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -27,8 +27,9 @@ use crate::{ session_info, shared, ParaId, }; use frame_support::pallet_prelude::*; -use primitives::CoreIndex; +use polkadot_primitives::CoreIndex; +use codec::Decode; use frame_support::{ assert_ok, derive_impl, parameter_types, traits::{ @@ -38,8 +39,7 @@ use frame_support::{ }; use frame_support_test::TestRandomness; use frame_system::limits; -use parity_scale_codec::Decode; -use primitives::{ +use polkadot_primitives::{ AuthorityDiscoveryId, Balance, BlockNumber, CandidateHash, Moment, SessionIndex, UpwardMessage, ValidationCode, ValidatorIndex, }; diff --git a/polkadot/runtime/parachains/src/origin.rs b/polkadot/runtime/parachains/src/origin.rs index c83fec1b8923..5202cba232d2 100644 --- a/polkadot/runtime/parachains/src/origin.rs +++ b/polkadot/runtime/parachains/src/origin.rs @@ -16,7 +16,7 @@ //! Declaration of the parachain specific origin and a pallet that hosts it. -use primitives::Id as ParaId; +use polkadot_primitives::Id as ParaId; use sp_runtime::traits::BadOrigin; use sp_std::result; diff --git a/polkadot/runtime/parachains/src/paras/benchmarking.rs b/polkadot/runtime/parachains/src/paras/benchmarking.rs index 437c4091a98b..0f3318612a77 100644 --- a/polkadot/runtime/parachains/src/paras/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras/benchmarking.rs @@ -18,7 +18,9 @@ use super::*; use crate::configuration::HostConfiguration; use frame_benchmarking::benchmarks; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; -use primitives::{HeadData, Id as ParaId, ValidationCode, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE}; +use polkadot_primitives::{ + HeadData, Id as ParaId, ValidationCode, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, +}; use sp_runtime::traits::{One, Saturating}; mod pvf_check; diff --git a/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs b/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs index 9281332fdada..0bf5fe783a0e 100644 --- a/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs +++ b/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs @@ -19,7 +19,7 @@ use crate::{configuration, paras::*, shared::Pallet as ParasShared}; use frame_support::assert_ok; use frame_system::RawOrigin; -use primitives::{HeadData, Id as ParaId, ValidationCode, ValidatorId, ValidatorIndex}; +use polkadot_primitives::{HeadData, Id as ParaId, ValidationCode, ValidatorId, ValidatorIndex}; use sp_application_crypto::RuntimeAppPublic; // Constants for the benchmarking @@ -204,7 +204,7 @@ where { let validators = shared::ActiveValidatorKeys::::get(); - let accept_threshold = primitives::supermajority_threshold(validators.len()); + let accept_threshold = polkadot_primitives::supermajority_threshold(validators.len()); let required_votes = match vote_outcome { VoteOutcome::Accept => accept_threshold, VoteOutcome::Reject => validators.len() - accept_threshold, diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index 36a693bcc8e2..8cffcbbbb024 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -114,10 +114,10 @@ use crate::{ shared, }; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; +use codec::{Decode, Encode}; use frame_support::{pallet_prelude::*, traits::EstimateNextSessionRotation, DefaultNoBound}; use frame_system::pallet_prelude::*; -use parity_scale_codec::{Decode, Encode}; -use primitives::{ +use polkadot_primitives::{ ConsensusLog, HeadData, Id as ParaId, PvfCheckStatement, SessionIndex, UpgradeGoAhead, UpgradeRestriction, ValidationCode, ValidationCodeHash, ValidatorSignature, MIN_CODE_SIZE, }; @@ -348,9 +348,7 @@ impl Encode for ParaKind { } impl Decode for ParaKind { - fn decode( - input: &mut I, - ) -> Result { + fn decode(input: &mut I) -> Result { match bool::decode(input) { Ok(true) => Ok(ParaKind::Parachain), Ok(false) => Ok(ParaKind::Parathread), @@ -487,7 +485,7 @@ impl PvfCheckActiveVoteState { /// Returns `None` if the quorum is not reached, or the direction of the decision. fn quorum(&self, n_validators: usize) -> Option { - let accept_threshold = primitives::supermajority_threshold(n_validators); + let accept_threshold = polkadot_primitives::supermajority_threshold(n_validators); // At this threshold, a supermajority is no longer possible, so we reject. let reject_threshold = n_validators - accept_threshold; diff --git a/polkadot/runtime/parachains/src/paras/tests.rs b/polkadot/runtime/parachains/src/paras/tests.rs index 0b458f2f91eb..732b75417387 100644 --- a/polkadot/runtime/parachains/src/paras/tests.rs +++ b/polkadot/runtime/parachains/src/paras/tests.rs @@ -16,12 +16,12 @@ use super::*; use frame_support::{assert_err, assert_ok, assert_storage_noop}; -use keyring::Sr25519Keyring; -use primitives::{vstaging::SchedulerParams, BlockNumber, PARACHAIN_KEY_TYPE_ID}; +use polkadot_primitives::{vstaging::SchedulerParams, BlockNumber, PARACHAIN_KEY_TYPE_ID}; +use polkadot_primitives_test_helpers::{dummy_head_data, dummy_validation_code, validator_pubkeys}; use sc_keystore::LocalKeystore; +use sp_keyring::Sr25519Keyring; use sp_keystore::{Keystore, KeystorePtr}; use std::sync::Arc; -use test_helpers::{dummy_head_data, dummy_validation_code, validator_pubkeys}; use crate::{ configuration::HostConfiguration, @@ -135,7 +135,10 @@ fn check_code_is_not_stored(validation_code: &ValidationCode) { /// An utility for checking that certain events were deposited. struct EventValidator { events: Vec< - frame_system::EventRecord<::RuntimeEvent, primitives::Hash>, + frame_system::EventRecord< + ::RuntimeEvent, + polkadot_primitives::Hash, + >, >, } @@ -1810,7 +1813,7 @@ fn add_trusted_validation_code_enacts_existing_pvf_vote() { #[test] fn verify_upgrade_go_ahead_signal_is_externally_accessible() { - use primitives::well_known_keys; + use polkadot_primitives::well_known_keys; let a = ParaId::from(2020); @@ -1826,7 +1829,7 @@ fn verify_upgrade_go_ahead_signal_is_externally_accessible() { #[test] fn verify_upgrade_restriction_signal_is_externally_accessible() { - use primitives::well_known_keys; + use polkadot_primitives::well_known_keys; let a = ParaId::from(2020); @@ -1842,7 +1845,7 @@ fn verify_upgrade_restriction_signal_is_externally_accessible() { #[test] fn verify_para_head_is_externally_accessible() { - use primitives::well_known_keys; + use polkadot_primitives::well_known_keys; let a = ParaId::from(2020); let expected_head_data = HeadData(vec![0, 1, 2, 3]); diff --git a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs index e643888ae29a..267a9781a106 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs @@ -20,7 +20,7 @@ use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; use frame_system::RawOrigin; use sp_std::{cmp::min, collections::btree_map::BTreeMap}; -use primitives::v7::GroupIndex; +use polkadot_primitives::v7::GroupIndex; use crate::builder::BenchBuilder; diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index ac4cf5dc8d41..386873aad457 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -42,7 +42,7 @@ use frame_support::{ }; use frame_system::pallet_prelude::*; use pallet_babe::{self, ParentBlockRandomness}; -use primitives::{ +use polkadot_primitives::{ effective_minimum_backing_votes, node_features::FeatureIndex, BackedCandidate, CandidateHash, CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CoreIndex, DisputeStatementSet, HeadData, InherentData as ParachainsInherentData, diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index 64fbc9c4a4e0..06a544296461 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -20,7 +20,7 @@ use crate::{ configuration::{self, HostConfiguration}, mock::MockGenesisConfig, }; -use primitives::vstaging::SchedulerParams; +use polkadot_primitives::vstaging::SchedulerParams; fn default_config() -> MockGenesisConfig { MockGenesisConfig { @@ -57,7 +57,7 @@ mod enter { use core::panic; use frame_support::assert_ok; use frame_system::limits; - use primitives::{vstaging::SchedulerParams, AvailabilityBitfield, UncheckedSigned}; + use polkadot_primitives::{vstaging::SchedulerParams, AvailabilityBitfield, UncheckedSigned}; use sp_runtime::Perbill; use sp_std::collections::btree_map::BTreeMap; @@ -494,7 +494,7 @@ mod enter { #[test] fn test_session_is_tracked_in_on_chain_scraping() { use crate::disputes::run_to_block; - use primitives::{ + use polkadot_primitives::{ DisputeStatement, DisputeStatementSet, ExplicitDisputeStatement, InvalidDisputeStatementKind, ValidDisputeStatementKind, }; @@ -1467,8 +1467,8 @@ mod enter { } } -fn default_header() -> primitives::Header { - primitives::Header { +fn default_header() -> polkadot_primitives::Header { + polkadot_primitives::Header { parent_hash: Default::default(), number: 0, state_root: Default::default(), @@ -1487,7 +1487,7 @@ mod sanitizers { mock::new_test_ext, }; use bitvec::order::Lsb0; - use primitives::{ + use polkadot_primitives::{ AvailabilityBitfield, GroupIndex, Hash, Id as ParaId, SignedAvailabilityBitfield, ValidatorIndex, }; @@ -1495,13 +1495,13 @@ mod sanitizers { use sp_core::crypto::UncheckedFrom; use crate::mock::Test; - use keyring::Sr25519Keyring; - use primitives::PARACHAIN_KEY_TYPE_ID; + use polkadot_primitives::PARACHAIN_KEY_TYPE_ID; use sc_keystore::LocalKeystore; + use sp_keyring::Sr25519Keyring; use sp_keystore::{Keystore, KeystorePtr}; use std::sync::Arc; - fn validator_pubkeys(val_ids: &[keyring::Sr25519Keyring]) -> Vec { + fn validator_pubkeys(val_ids: &[sp_keyring::Sr25519Keyring]) -> Vec { val_ids.iter().map(|v| v.public().into()).collect() } @@ -1518,10 +1518,10 @@ mod sanitizers { let signing_context = SigningContext { parent_hash, session_index }; let validators = vec![ - keyring::Sr25519Keyring::Alice, - keyring::Sr25519Keyring::Bob, - keyring::Sr25519Keyring::Charlie, - keyring::Sr25519Keyring::Dave, + sp_keyring::Sr25519Keyring::Alice, + sp_keyring::Sr25519Keyring::Bob, + sp_keyring::Sr25519Keyring::Charlie, + sp_keyring::Sr25519Keyring::Dave, ]; for validator in validators.iter() { Keystore::sr25519_generate_new( @@ -1744,7 +1744,7 @@ mod sanitizers { scheduler::{common::Assignment, ParasEntry}, util::{make_persisted_validation_data, make_persisted_validation_data_with_parent}, }; - use primitives::ValidationCode; + use polkadot_primitives::ValidationCode; use sp_std::collections::vec_deque::VecDeque; use super::*; @@ -1754,7 +1754,7 @@ mod sanitizers { backed_candidates: Vec, expected_backed_candidates_with_core: BTreeMap>, - scheduled_paras: BTreeMap>, + scheduled_paras: BTreeMap>, } // Generate test data for the candidates and assert that the environment is set as expected @@ -1780,11 +1780,11 @@ mod sanitizers { let signing_context = SigningContext { parent_hash: relay_parent, session_index }; let validators = vec![ - keyring::Sr25519Keyring::Alice, - keyring::Sr25519Keyring::Bob, - keyring::Sr25519Keyring::Charlie, - keyring::Sr25519Keyring::Dave, - keyring::Sr25519Keyring::Eve, + sp_keyring::Sr25519Keyring::Alice, + sp_keyring::Sr25519Keyring::Bob, + sp_keyring::Sr25519Keyring::Charlie, + sp_keyring::Sr25519Keyring::Dave, + sp_keyring::Sr25519Keyring::Eve, ]; for validator in validators.iter() { Keystore::sr25519_generate_new( @@ -1965,14 +1965,14 @@ mod sanitizers { let signing_context = SigningContext { parent_hash: relay_parent, session_index }; let validators = vec![ - keyring::Sr25519Keyring::Alice, - keyring::Sr25519Keyring::Bob, - keyring::Sr25519Keyring::Charlie, - keyring::Sr25519Keyring::Dave, - keyring::Sr25519Keyring::Eve, - keyring::Sr25519Keyring::Ferdie, - keyring::Sr25519Keyring::One, - keyring::Sr25519Keyring::Two, + sp_keyring::Sr25519Keyring::Alice, + sp_keyring::Sr25519Keyring::Bob, + sp_keyring::Sr25519Keyring::Charlie, + sp_keyring::Sr25519Keyring::Dave, + sp_keyring::Sr25519Keyring::Eve, + sp_keyring::Sr25519Keyring::Ferdie, + sp_keyring::Sr25519Keyring::One, + sp_keyring::Sr25519Keyring::Two, ]; for validator in validators.iter() { Keystore::sr25519_generate_new( @@ -2504,15 +2504,15 @@ mod sanitizers { let signing_context = SigningContext { parent_hash: relay_parent, session_index }; let validators = vec![ - keyring::Sr25519Keyring::Alice, - keyring::Sr25519Keyring::Bob, - keyring::Sr25519Keyring::Charlie, - keyring::Sr25519Keyring::Dave, - keyring::Sr25519Keyring::Eve, - keyring::Sr25519Keyring::Ferdie, - keyring::Sr25519Keyring::One, - keyring::Sr25519Keyring::Two, - keyring::Sr25519Keyring::AliceStash, + sp_keyring::Sr25519Keyring::Alice, + sp_keyring::Sr25519Keyring::Bob, + sp_keyring::Sr25519Keyring::Charlie, + sp_keyring::Sr25519Keyring::Dave, + sp_keyring::Sr25519Keyring::Eve, + sp_keyring::Sr25519Keyring::Ferdie, + sp_keyring::Sr25519Keyring::One, + sp_keyring::Sr25519Keyring::Two, + sp_keyring::Sr25519Keyring::AliceStash, ]; for validator in validators.iter() { Keystore::sr25519_generate_new( diff --git a/polkadot/runtime/parachains/src/paras_inherent/weights.rs b/polkadot/runtime/parachains/src/paras_inherent/weights.rs index 0f4e5be572a6..37809396a823 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/weights.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/weights.rs @@ -19,8 +19,8 @@ //! the relay chain, but we do care about the size of the block, by putting the tx in the //! proof_size we can use the already existing weight limiting code to limit the used size as well. -use parity_scale_codec::{Encode, WrapperTypeEncode}; -use primitives::{ +use codec::{Encode, WrapperTypeEncode}; +use polkadot_primitives::{ CheckedMultiDisputeStatementSet, MultiDisputeStatementSet, UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, }; diff --git a/polkadot/runtime/parachains/src/reward_points.rs b/polkadot/runtime/parachains/src/reward_points.rs index 3be743a2c551..5f45445b0ba2 100644 --- a/polkadot/runtime/parachains/src/reward_points.rs +++ b/polkadot/runtime/parachains/src/reward_points.rs @@ -23,7 +23,7 @@ use crate::{session_info, shared}; use frame_support::traits::{Defensive, ValidatorSet}; -use primitives::{SessionIndex, ValidatorIndex}; +use polkadot_primitives::{SessionIndex, ValidatorIndex}; use sp_std::collections::btree_set::BTreeSet; /// The amount of era points given by backing a candidate that is included. diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs index 3dca38050a0a..dbb79b86c56c 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs @@ -24,7 +24,7 @@ use crate::{ }; use frame_support::traits::{GetStorageVersion, StorageVersion}; use frame_system::pallet_prelude::*; -use primitives::{ +use polkadot_primitives::{ async_backing::{ AsyncBackingParams, BackingState, CandidatePendingAvailability, Constraints, InboundHrmpLimitations, OutboundHrmpChannelLimitations, @@ -149,7 +149,10 @@ pub fn availability_cores() -> Vec { if let Some(para_id) = scheduled.get(&CoreIndex(i as _)).cloned() { - CoreState::Scheduled(primitives::ScheduledCore { para_id, collator: None }) + CoreState::Scheduled(polkadot_primitives::ScheduledCore { + para_id, + collator: None, + }) } else { CoreState::Free } @@ -161,7 +164,7 @@ pub fn availability_cores() -> Vec( ) -> (BlockNumberFor, ::Hash) { - use parity_scale_codec::Decode as _; + use codec::Decode as _; let state_version = frame_system::Pallet::::runtime_version().state_version(); let relay_parent_number = frame_system::Pallet::::block_number(); let relay_parent_storage_root = T::Hash::decode(&mut &sp_io::storage::root(state_version)[..]) @@ -241,7 +244,7 @@ pub fn assumed_validation_data( /// Implementation for the `check_validation_outputs` function of the runtime API. pub fn check_validation_outputs( para_id: ParaId, - outputs: primitives::CandidateCommitments, + outputs: polkadot_primitives::CandidateCommitments, ) -> bool { let relay_parent_number = frame_system::Pallet::::block_number(); inclusion::Pallet::::check_validation_outputs_for_runtime_api( diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index 32bbdca84a3c..8c239dc207f6 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -17,7 +17,7 @@ //! Put implementations of functions from staging APIs here. use crate::{inclusion, initializer, scheduler}; -use primitives::{CommittedCandidateReceipt, CoreIndex, Id as ParaId}; +use polkadot_primitives::{CommittedCandidateReceipt, CoreIndex, Id as ParaId}; use sp_runtime::traits::One; use sp_std::{ collections::{btree_map::BTreeMap, vec_deque::VecDeque}, diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index baeec49839df..0442301a32ff 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -40,7 +40,7 @@ use crate::{configuration, initializer::SessionChangeNotification, paras}; use frame_support::{pallet_prelude::*, traits::Defensive}; use frame_system::pallet_prelude::BlockNumberFor; pub use polkadot_core_primitives::v2::BlockNumber; -use primitives::{ +use polkadot_primitives::{ CoreIndex, GroupIndex, GroupRotationInfo, Id as ParaId, ScheduledCore, ValidatorIndex, }; use sp_runtime::traits::One; diff --git a/polkadot/runtime/parachains/src/scheduler/common.rs b/polkadot/runtime/parachains/src/scheduler/common.rs index 66a4e6d30be0..114cd4b940bc 100644 --- a/polkadot/runtime/parachains/src/scheduler/common.rs +++ b/polkadot/runtime/parachains/src/scheduler/common.rs @@ -22,7 +22,7 @@ use sp_runtime::{ RuntimeDebug, }; -use primitives::{CoreIndex, Id as ParaId}; +use polkadot_primitives::{CoreIndex, Id as ParaId}; /// Assignment (ParaId -> CoreIndex). #[derive(Encode, Decode, TypeInfo, RuntimeDebug, Clone, PartialEq)] diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs index 5482c8821e58..57f4fd670fbe 100644 --- a/polkadot/runtime/parachains/src/scheduler/migration.rs +++ b/polkadot/runtime/parachains/src/scheduler/migration.rs @@ -34,7 +34,7 @@ struct V0Assignment { /// Old scheduler with explicit parathreads and `Scheduled` storage instead of `ClaimQueue`. mod v0 { use super::*; - use primitives::{CollatorId, Id}; + use polkadot_primitives::{CollatorId, Id}; #[storage_alias] pub(super) type Scheduled = StorageValue, Vec, ValueQuery>; diff --git a/polkadot/runtime/parachains/src/scheduler/tests.rs b/polkadot/runtime/parachains/src/scheduler/tests.rs index 200f49ff2e82..74ad8adf00c4 100644 --- a/polkadot/runtime/parachains/src/scheduler/tests.rs +++ b/polkadot/runtime/parachains/src/scheduler/tests.rs @@ -17,10 +17,10 @@ use super::*; use frame_support::assert_ok; -use keyring::Sr25519Keyring; -use primitives::{ +use polkadot_primitives::{ vstaging::SchedulerParams, BlockNumber, SessionIndex, ValidationCode, ValidatorId, }; +use sp_keyring::Sr25519Keyring; use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; use crate::{ diff --git a/polkadot/runtime/parachains/src/session_info.rs b/polkadot/runtime/parachains/src/session_info.rs index 2f7f1ead76ad..ff032f7e34d5 100644 --- a/polkadot/runtime/parachains/src/session_info.rs +++ b/polkadot/runtime/parachains/src/session_info.rs @@ -29,7 +29,9 @@ use frame_support::{ traits::{OneSessionHandler, ValidatorSet, ValidatorSetWithIdentification}, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{AssignmentId, AuthorityDiscoveryId, ExecutorParams, SessionIndex, SessionInfo}; +use polkadot_primitives::{ + AssignmentId, AuthorityDiscoveryId, ExecutorParams, SessionIndex, SessionInfo, +}; use sp_std::vec::Vec; pub use pallet::*; diff --git a/polkadot/runtime/parachains/src/session_info/tests.rs b/polkadot/runtime/parachains/src/session_info/tests.rs index 18b9d8f59010..3e81ca498713 100644 --- a/polkadot/runtime/parachains/src/session_info/tests.rs +++ b/polkadot/runtime/parachains/src/session_info/tests.rs @@ -24,8 +24,8 @@ use crate::{ }, util::take_active_subset, }; -use keyring::Sr25519Keyring; -use primitives::{vstaging::SchedulerParams, BlockNumber, ValidatorId, ValidatorIndex}; +use polkadot_primitives::{vstaging::SchedulerParams, BlockNumber, ValidatorId, ValidatorIndex}; +use sp_keyring::Sr25519Keyring; fn run_to_block( to: BlockNumber, diff --git a/polkadot/runtime/parachains/src/shared.rs b/polkadot/runtime/parachains/src/shared.rs index 319b22515889..417de1fa3fb0 100644 --- a/polkadot/runtime/parachains/src/shared.rs +++ b/polkadot/runtime/parachains/src/shared.rs @@ -21,7 +21,7 @@ use frame_support::{pallet_prelude::*, traits::DisabledValidators}; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{SessionIndex, ValidatorId, ValidatorIndex}; +use polkadot_primitives::{SessionIndex, ValidatorId, ValidatorIndex}; use sp_runtime::traits::AtLeast32BitUnsigned; use sp_std::{ collections::{btree_map::BTreeMap, vec_deque::VecDeque}, diff --git a/polkadot/runtime/parachains/src/shared/tests.rs b/polkadot/runtime/parachains/src/shared/tests.rs index 4ae37463a6d9..e47d1fd9cfe0 100644 --- a/polkadot/runtime/parachains/src/shared/tests.rs +++ b/polkadot/runtime/parachains/src/shared/tests.rs @@ -21,9 +21,9 @@ use crate::{ shared, }; use assert_matches::assert_matches; -use keyring::Sr25519Keyring; -use primitives::Hash; -use test_helpers::validator_pubkeys; +use polkadot_primitives::Hash; +use polkadot_primitives_test_helpers::validator_pubkeys; +use sp_keyring::Sr25519Keyring; #[test] fn tracker_earliest_block_number() { diff --git a/polkadot/runtime/parachains/src/ump_tests.rs b/polkadot/runtime/parachains/src/ump_tests.rs index 43829974b569..4d6da8c9e3c1 100644 --- a/polkadot/runtime/parachains/src/ump_tests.rs +++ b/polkadot/runtime/parachains/src/ump_tests.rs @@ -31,7 +31,7 @@ use frame_support::{ traits::{EnqueueMessage, ExecuteOverweightError, ServiceQueues}, weights::Weight, }; -use primitives::{well_known_keys, Id as ParaId, UpwardMessage}; +use polkadot_primitives::{well_known_keys, Id as ParaId, UpwardMessage}; use sp_crypto_hashing::{blake2_256, twox_64}; use sp_runtime::traits::Bounded; use sp_std::prelude::*; @@ -426,7 +426,7 @@ fn relay_dispatch_queue_size_key_is_correct() { // A "random" para id. let para: ParaId = u32::from_ne_bytes(twox_64(&i.encode())[..4].try_into().unwrap()).into(); - let well_known = primitives::well_known_keys::relay_dispatch_queue_size(para); + let well_known = polkadot_primitives::well_known_keys::relay_dispatch_queue_size(para); let aliased = RelayDispatchQueueSize::hashed_key_for(para); assert_eq!(well_known, aliased, "Old and new key must match"); diff --git a/polkadot/runtime/parachains/src/util.rs b/polkadot/runtime/parachains/src/util.rs index 5aa2d58da3c9..cb2deffd7f65 100644 --- a/polkadot/runtime/parachains/src/util.rs +++ b/polkadot/runtime/parachains/src/util.rs @@ -18,7 +18,7 @@ //! on all modules. use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{HeadData, Id as ParaId, PersistedValidationData, ValidatorIndex}; +use polkadot_primitives::{HeadData, Id as ParaId, PersistedValidationData, ValidatorIndex}; use sp_std::{collections::btree_set::BTreeSet, vec::Vec}; use crate::{configuration, hrmp, paras}; @@ -121,7 +121,7 @@ mod tests { use sp_std::vec::Vec; use crate::util::{split_active_subset, take_active_subset}; - use primitives::ValidatorIndex; + use polkadot_primitives::ValidatorIndex; #[test] fn take_active_subset_is_compatible_with_split_active_subset() { diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index c78f3e668b9c..d342926d3c5a 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -11,7 +11,7 @@ license.workspace = true workspace = true [dependencies] -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } serde = { workspace = true } @@ -21,16 +21,16 @@ static_assertions = "1.1.0" smallvec = "1.8.0" bitvec = { version = "1.0.1", default-features = false, features = ["alloc"] } -authority-discovery-primitives = { package = "sp-authority-discovery", path = "../../../substrate/primitives/authority-discovery", default-features = false } -babe-primitives = { package = "sp-consensus-babe", path = "../../../substrate/primitives/consensus/babe", default-features = false } -beefy-primitives = { package = "sp-consensus-beefy", path = "../../../substrate/primitives/consensus/beefy", default-features = false } -grandpa_primitives = { package = "sp-consensus-grandpa", path = "../../../substrate/primitives/consensus/grandpa", default-features = false } +sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery", default-features = false } +sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe", default-features = false } +sp-consensus-beefy = { path = "../../../substrate/primitives/consensus/beefy", default-features = false } +sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa", default-features = false } binary-merkle-tree = { path = "../../../substrate/utils/binary-merkle-tree", default-features = false } rococo-runtime-constants = { package = "rococo-runtime-constants", path = "constants", default-features = false } sp-api = { path = "../../../substrate/primitives/api", default-features = false } sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } -inherents = { package = "sp-inherents", path = "../../../substrate/primitives/inherents", default-features = false } -offchain-primitives = { package = "sp-offchain", path = "../../../substrate/primitives/offchain", default-features = false } +sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } @@ -41,8 +41,8 @@ sp-core = { path = "../../../substrate/primitives/core", default-features = fals sp-session = { path = "../../../substrate/primitives/session", default-features = false } sp-storage = { path = "../../../substrate/primitives/storage", default-features = false } sp-version = { path = "../../../substrate/primitives/version", default-features = false } -tx-pool-api = { package = "sp-transaction-pool", path = "../../../substrate/primitives/transaction-pool", default-features = false } -block-builder-api = { package = "sp-block-builder", path = "../../../substrate/primitives/block-builder", default-features = false } +sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } +sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } pallet-authority-discovery = { path = "../../../substrate/frame/authority-discovery", default-features = false } pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } @@ -100,9 +100,9 @@ frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-fea frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } hex-literal = { version = "0.4.1" } -runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } -runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parachains", default-features = false } -primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } +polkadot-runtime-common = { path = "../common", default-features = false } +polkadot-runtime-parachains = { path = "../parachains", default-features = false } +polkadot-primitives = { path = "../../primitives", default-features = false } polkadot-parachain-primitives = { path = "../../parachain", default-features = false } xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } @@ -112,7 +112,7 @@ xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api", [dev-dependencies] tiny-keccak = { version = "2.0.2", features = ["keccak"] } -keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyring" } +sp-keyring = { path = "../../../substrate/primitives/keyring" } remote-externalities = { package = "frame-remote-externalities", path = "../../../substrate/utils/frame/remote-externalities" } sp-trie = { path = "../../../substrate/primitives/trie" } separator = "0.4.1" @@ -127,12 +127,9 @@ substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optio default = ["std"] no_std = [] std = [ - "authority-discovery-primitives/std", - "babe-primitives/std", - "beefy-primitives/std", "binary-merkle-tree/std", "bitvec/std", - "block-builder-api/std", + "codec/std", "frame-benchmarking?/std", "frame-executive/std", "frame-metadata-hash-extension/std", @@ -141,10 +138,7 @@ std = [ "frame-system-rpc-runtime-api/std", "frame-system/std", "frame-try-runtime/std", - "grandpa_primitives/std", - "inherents/std", "log/std", - "offchain-primitives/std", "pallet-asset-rate/std", "pallet-authority-discovery/std", "pallet-authorship/std", @@ -190,31 +184,37 @@ std = [ "pallet-whitelist/std", "pallet-xcm-benchmarks?/std", "pallet-xcm/std", - "parity-scale-codec/std", "polkadot-parachain-primitives/std", - "primitives/std", + "polkadot-primitives/std", + "polkadot-runtime-common/std", + "polkadot-runtime-parachains/std", "rococo-runtime-constants/std", - "runtime-common/std", - "runtime-parachains/std", "scale-info/std", "serde/std", "serde_derive", "serde_json/std", "sp-api/std", "sp-arithmetic/std", + "sp-authority-discovery/std", + "sp-block-builder/std", + "sp-consensus-babe/std", + "sp-consensus-beefy/std", + "sp-consensus-grandpa/std", "sp-core/std", "sp-genesis-builder/std", + "sp-inherents/std", "sp-io/std", "sp-mmr-primitives/std", + "sp-offchain/std", "sp-runtime/std", "sp-session/std", "sp-staking/std", "sp-std/std", "sp-storage/std", "sp-tracing/std", + "sp-transaction-pool/std", "sp-version/std", "substrate-wasm-builder", - "tx-pool-api/std", "xcm-builder/std", "xcm-executor/std", "xcm-fee-payment-runtime-api/std", @@ -263,9 +263,9 @@ runtime-benchmarks = [ "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", - "primitives/runtime-benchmarks", - "runtime-common/runtime-benchmarks", - "runtime-parachains/runtime-benchmarks", + "polkadot-primitives/runtime-benchmarks", + "polkadot-runtime-common/runtime-benchmarks", + "polkadot-runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "sp-staking/runtime-benchmarks", "xcm-builder/runtime-benchmarks", @@ -321,8 +321,8 @@ try-runtime = [ "pallet-vesting/try-runtime", "pallet-whitelist/try-runtime", "pallet-xcm/try-runtime", - "runtime-common/try-runtime", - "runtime-parachains/try-runtime", + "polkadot-runtime-common/try-runtime", + "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", ] @@ -332,7 +332,7 @@ metadata-hash = ["substrate-wasm-builder/metadata-hash"] # Set timing constants (e.g. session period) to faster versions to speed up testing. fast-runtime = ["rococo-runtime-constants/fast-runtime"] -runtime-metrics = ["runtime-parachains/runtime-metrics", "sp-io/with-tracing"] +runtime-metrics = ["polkadot-runtime-parachains/runtime-metrics", "sp-io/with-tracing"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm diff --git a/polkadot/runtime/rococo/constants/Cargo.toml b/polkadot/runtime/rococo/constants/Cargo.toml index 3ca3877a7650..2c49488077e6 100644 --- a/polkadot/runtime/rococo/constants/Cargo.toml +++ b/polkadot/runtime/rococo/constants/Cargo.toml @@ -13,8 +13,8 @@ workspace = true smallvec = "1.8.0" frame-support = { path = "../../../../substrate/frame/support", default-features = false } -primitives = { package = "polkadot-primitives", path = "../../../primitives", default-features = false } -runtime-common = { package = "polkadot-runtime-common", path = "../../common", default-features = false } +polkadot-primitives = { path = "../../../primitives", default-features = false } +polkadot-runtime-common = { path = "../../common", default-features = false } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } sp-weights = { path = "../../../../substrate/primitives/weights", default-features = false } sp-core = { path = "../../../../substrate/primitives/core", default-features = false } @@ -26,8 +26,8 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../../xcm/xcm-builde default = ["std"] std = [ "frame-support/std", - "primitives/std", - "runtime-common/std", + "polkadot-primitives/std", + "polkadot-runtime-common/std", "sp-core/std", "sp-runtime/std", "sp-weights/std", diff --git a/polkadot/runtime/rococo/constants/src/lib.rs b/polkadot/runtime/rococo/constants/src/lib.rs index 89d5deb86f1a..1dcafdcbc4d9 100644 --- a/polkadot/runtime/rococo/constants/src/lib.rs +++ b/polkadot/runtime/rococo/constants/src/lib.rs @@ -20,7 +20,7 @@ pub mod weights; /// Money matters. pub mod currency { - use primitives::Balance; + use polkadot_primitives::Balance; /// The existential deposit. pub const EXISTENTIAL_DEPOSIT: Balance = 1 * CENTS; @@ -37,9 +37,9 @@ pub mod currency { /// Time and blocks. pub mod time { - use runtime_common::prod_or_fast; + use polkadot_runtime_common::prod_or_fast; - use primitives::{BlockNumber, Moment}; + use polkadot_primitives::{BlockNumber, Moment}; pub const MILLISECS_PER_BLOCK: Moment = 6000; pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; @@ -67,7 +67,7 @@ pub mod fee { use frame_support::weights::{ WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, }; - use primitives::Balance; + use polkadot_primitives::Balance; use smallvec::smallvec; pub use sp_runtime::Perbill; @@ -103,7 +103,7 @@ pub mod fee { /// System Parachains. pub mod system_parachain { - use primitives::Id; + use polkadot_primitives::Id; use xcm_builder::IsChildSystemParachain; /// Network's Asset Hub parachain ID. @@ -134,7 +134,7 @@ mod tests { }; use crate::weights::ExtrinsicBaseWeight; use frame_support::weights::WeightToFee as WeightToFeeT; - use runtime_common::MAXIMUM_BLOCK_WEIGHT; + use polkadot_runtime_common::MAXIMUM_BLOCK_WEIGHT; #[test] // Test that the fee for `MAXIMUM_BLOCK_WEIGHT` of weight has sane bounds. diff --git a/polkadot/runtime/rococo/src/genesis_config_presets.rs b/polkadot/runtime/rococo/src/genesis_config_presets.rs index bac6902383e3..1c70c94ce048 100644 --- a/polkadot/runtime/rococo/src/genesis_config_presets.rs +++ b/polkadot/runtime/rococo/src/genesis_config_presets.rs @@ -17,12 +17,14 @@ //! Genesis configs presets for the Rococo runtime use crate::{SessionKeys, BABE_GENESIS_EPOCH_CONFIG}; -use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; -use babe_primitives::AuthorityId as BabeId; -use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; -use grandpa_primitives::AuthorityId as GrandpaId; -use primitives::{vstaging::SchedulerParams, AccountId, AccountPublic, AssignmentId, ValidatorId}; +use polkadot_primitives::{ + vstaging::SchedulerParams, AccountId, AccountPublic, AssignmentId, ValidatorId, +}; use rococo_runtime_constants::currency::UNITS as ROC; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; +use sp_consensus_babe::AuthorityId as BabeId; +use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId; +use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{sr25519, Pair, Public}; use sp_runtime::traits::IdentifyAccount; #[cfg(not(feature = "std"))] @@ -105,12 +107,13 @@ fn rococo_session_keys( } fn default_parachains_host_configuration( -) -> runtime_parachains::configuration::HostConfiguration { - use primitives::{ +) -> polkadot_runtime_parachains::configuration::HostConfiguration +{ + use polkadot_primitives::{ node_features::FeatureIndex, AsyncBackingParams, MAX_CODE_SIZE, MAX_POV_SIZE, }; - runtime_parachains::configuration::HostConfiguration { + polkadot_runtime_parachains::configuration::HostConfiguration { validation_upgrade_cooldown: 2u32, validation_upgrade_delay: 2, code_retention_period: 1200, @@ -205,7 +208,7 @@ fn rococo_testnet_genesis( }, "sudo": { "key": Some(root_key.clone()) }, "configuration": { - "config": runtime_parachains::configuration::HostConfiguration { + "config": polkadot_runtime_parachains::configuration::HostConfiguration { scheduler_params: SchedulerParams { max_validators_per_core: Some(1), ..default_parachains_host_configuration().scheduler_params @@ -214,7 +217,7 @@ fn rococo_testnet_genesis( }, }, "registrar": { - "nextFreeParaId": primitives::LOWEST_PUBLIC_ID, + "nextFreeParaId": polkadot_primitives::LOWEST_PUBLIC_ID, } }) } @@ -473,7 +476,7 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { "config": default_parachains_host_configuration(), }, "registrar": { - "nextFreeParaId": primitives::LOWEST_PUBLIC_ID, + "nextFreeParaId": polkadot_primitives::LOWEST_PUBLIC_ID, }, }) } diff --git a/polkadot/runtime/rococo/src/impls.rs b/polkadot/runtime/rococo/src/impls.rs index ac7100d78583..7b5c7b1fb4ac 100644 --- a/polkadot/runtime/rococo/src/impls.rs +++ b/polkadot/runtime/rococo/src/impls.rs @@ -15,12 +15,12 @@ // along with Polkadot. If not, see . use crate::xcm_config; +use codec::{Decode, Encode}; use frame_support::pallet_prelude::DispatchResult; use frame_system::RawOrigin; -use parity_scale_codec::{Decode, Encode}; -use primitives::Balance; +use polkadot_primitives::Balance; +use polkadot_runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; use rococo_runtime_constants::currency::*; -use runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; use sp_std::{marker::PhantomData, prelude::*}; use xcm::{latest::prelude::*, VersionedLocation, VersionedXcm}; use xcm_executor::traits::TransactAsset; diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index a77c0188a1da..91ca5eb5e31d 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -20,18 +20,13 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limit. #![recursion_limit = "512"] -use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; -use beefy_primitives::{ - ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}, - mmr::{BeefyDataProvider, MmrLeafVersion}, -}; +use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dynamic_params::{dynamic_pallet_params, dynamic_params}, traits::FromContains, }; use pallet_nis::WithMaximumOf; -use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; -use primitives::{ +use polkadot_primitives::{ slashing, AccountId, AccountIndex, ApprovalVotingParams, Balance, BlockNumber, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, @@ -39,8 +34,7 @@ use primitives::{ SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID, }; -use rococo_runtime_constants::system_parachain::BROKER_ID; -use runtime_common::{ +use polkadot_runtime_common::{ assigned_slots, auctions, claims, crowdloan, identity_migrator, impl_runtime_weights, impls::{ ContainsParts, LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, @@ -50,7 +44,7 @@ use runtime_common::{ traits::{Leaser, OnSwap}, BlockHashCount, BlockLength, SlowAdjustingFeeUpdate, }; -use runtime_parachains::{ +use polkadot_runtime_parachains::{ assigner_coretime as parachains_assigner_coretime, assigner_on_demand as parachains_assigner_on_demand, configuration as parachains_configuration, configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio, @@ -66,7 +60,13 @@ use runtime_parachains::{ scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; +use rococo_runtime_constants::system_parachain::BROKER_ID; use scale_info::TypeInfo; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; +use sp_consensus_beefy::{ + ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}, + mmr::{BeefyDataProvider, MmrLeafVersion}, +}; use sp_genesis_builder::PresetId; use sp_std::{ cmp::Ordering, @@ -170,10 +170,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { }; /// The BABE epoch configuration at genesis. -pub const BABE_GENESIS_EPOCH_CONFIG: babe_primitives::BabeEpochConfiguration = - babe_primitives::BabeEpochConfiguration { +pub const BABE_GENESIS_EPOCH_CONFIG: sp_consensus_babe::BabeEpochConfiguration = + sp_consensus_babe::BabeEpochConfiguration { c: PRIMARY_PROBABILITY, - allowed_slots: babe_primitives::AllowedSlots::PrimaryAndSecondaryVRFSlots, + allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryVRFSlots, }; /// Native version. @@ -539,7 +539,7 @@ impl pallet_treasury::Config for Runtime { >; type PayoutPeriod = PayoutSpendPeriod; #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = runtime_common::impls::benchmarks::TreasuryArguments; + type BenchmarkHelper = polkadot_runtime_common::impls::benchmarks::TreasuryArguments; } parameter_types! { @@ -956,7 +956,7 @@ impl parachains_session_info::Config for Runtime { /// Special `RewardValidators` that does nothing ;) pub struct RewardValidators; -impl runtime_parachains::inclusion::RewardValidators for RewardValidators { +impl polkadot_runtime_parachains::inclusion::RewardValidators for RewardValidators { fn reward_backing(_: impl IntoIterator) {} fn reward_bitfields(_: impl IntoIterator) {} } @@ -1364,7 +1364,7 @@ impl pallet_asset_rate::Config for Runtime { type Currency = Balances; type AssetKind = ::AssetKind; #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = runtime_common::impls::benchmarks::AssetRateArguments; + type BenchmarkHelper = polkadot_runtime_common::impls::benchmarks::AssetRateArguments; } // Notify `coretime` pallet when a lease swap occurs @@ -1701,22 +1701,22 @@ mod benches { // Polkadot // NOTE: Make sure to prefix these with `runtime_common::` so // the that path resolves correctly in the generated file. - [runtime_common::assigned_slots, AssignedSlots] - [runtime_common::auctions, Auctions] - [runtime_common::coretime, Coretime] - [runtime_common::crowdloan, Crowdloan] - [runtime_common::claims, Claims] - [runtime_common::identity_migrator, IdentityMigrator] - [runtime_common::slots, Slots] - [runtime_common::paras_registrar, Registrar] - [runtime_parachains::configuration, Configuration] - [runtime_parachains::hrmp, Hrmp] - [runtime_parachains::disputes, ParasDisputes] - [runtime_parachains::inclusion, ParaInclusion] - [runtime_parachains::initializer, Initializer] - [runtime_parachains::paras_inherent, ParaInherent] - [runtime_parachains::paras, Paras] - [runtime_parachains::assigner_on_demand, OnDemandAssignmentProvider] + [polkadot_runtime_common::assigned_slots, AssignedSlots] + [polkadot_runtime_common::auctions, Auctions] + [polkadot_runtime_common::coretime, Coretime] + [polkadot_runtime_common::crowdloan, Crowdloan] + [polkadot_runtime_common::claims, Claims] + [polkadot_runtime_common::identity_migrator, IdentityMigrator] + [polkadot_runtime_common::slots, Slots] + [polkadot_runtime_common::paras_registrar, Registrar] + [polkadot_runtime_parachains::configuration, Configuration] + [polkadot_runtime_parachains::hrmp, Hrmp] + [polkadot_runtime_parachains::disputes, ParasDisputes] + [polkadot_runtime_parachains::inclusion, ParaInclusion] + [polkadot_runtime_parachains::initializer, Initializer] + [polkadot_runtime_parachains::paras_inherent, ParaInherent] + [polkadot_runtime_parachains::paras, Paras] + [polkadot_runtime_parachains::assigner_on_demand, OnDemandAssignmentProvider] // Substrate [pallet_balances, Balances] [pallet_balances, NisCounterpartBalances] @@ -1823,7 +1823,7 @@ sp_api::impl_runtime_apis! { } } - impl block_builder_api::BlockBuilder for Runtime { + impl sp_block_builder::BlockBuilder for Runtime { fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { Executive::apply_extrinsic(extrinsic) } @@ -1832,19 +1832,19 @@ sp_api::impl_runtime_apis! { Executive::finalize_block() } - fn inherent_extrinsics(data: inherents::InherentData) -> Vec<::Extrinsic> { + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { data.create_extrinsics() } fn check_inherents( block: Block, - data: inherents::InherentData, - ) -> inherents::CheckInherentsResult { + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } } - impl tx_pool_api::runtime_api::TaggedTransactionQueue for Runtime { + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { fn validate_transaction( source: TransactionSource, tx: ::Extrinsic, @@ -1854,14 +1854,14 @@ sp_api::impl_runtime_apis! { } } - impl offchain_primitives::OffchainWorkerApi for Runtime { + impl sp_offchain::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { Executive::offchain_worker(header) } } #[api_version(11)] - impl primitives::runtime_api::ParachainHost for Runtime { + impl polkadot_primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() } @@ -1891,7 +1891,7 @@ sp_api::impl_runtime_apis! { fn check_validation_outputs( para_id: ParaId, - outputs: primitives::CandidateCommitments, + outputs: polkadot_primitives::CandidateCommitments, ) -> bool { parachains_runtime_api_impl::check_validation_outputs::(para_id, outputs) } @@ -1948,8 +1948,8 @@ sp_api::impl_runtime_apis! { } fn submit_pvf_check_statement( - stmt: primitives::PvfCheckStatement, - signature: primitives::ValidatorSignature + stmt: polkadot_primitives::PvfCheckStatement, + signature: polkadot_primitives::ValidatorSignature ) { parachains_runtime_api_impl::submit_pvf_check_statement::(stmt, signature) } @@ -1976,7 +1976,7 @@ sp_api::impl_runtime_apis! { fn key_ownership_proof( validator_id: ValidatorId, ) -> Option { - use parity_scale_codec::Encode; + use codec::Encode; Historical::prove((PARACHAIN_KEY_TYPE_ID, validator_id)) .map(|p| p.encode()) @@ -1997,11 +1997,11 @@ sp_api::impl_runtime_apis! { parachains_runtime_api_impl::minimum_backing_votes::() } - fn para_backing_state(para_id: ParaId) -> Option { + fn para_backing_state(para_id: ParaId) -> Option { parachains_runtime_api_impl::backing_state::(para_id) } - fn async_backing_params() -> primitives::AsyncBackingParams { + fn async_backing_params() -> polkadot_primitives::AsyncBackingParams { parachains_runtime_api_impl::async_backing_params::() } @@ -2027,22 +2027,22 @@ sp_api::impl_runtime_apis! { } #[api_version(3)] - impl beefy_primitives::BeefyApi for Runtime { + impl sp_consensus_beefy::BeefyApi for Runtime { fn beefy_genesis() -> Option { pallet_beefy::GenesisBlock::::get() } - fn validator_set() -> Option> { + fn validator_set() -> Option> { Beefy::validator_set() } fn submit_report_equivocation_unsigned_extrinsic( - equivocation_proof: beefy_primitives::DoubleVotingProof< + equivocation_proof: sp_consensus_beefy::DoubleVotingProof< BlockNumber, BeefyId, BeefySignature, >, - key_owner_proof: beefy_primitives::OpaqueKeyOwnershipProof, + key_owner_proof: sp_consensus_beefy::OpaqueKeyOwnershipProof, ) -> Option<()> { let key_owner_proof = key_owner_proof.decode()?; @@ -2053,14 +2053,14 @@ sp_api::impl_runtime_apis! { } fn generate_key_ownership_proof( - _set_id: beefy_primitives::ValidatorSetId, + _set_id: sp_consensus_beefy::ValidatorSetId, authority_id: BeefyId, - ) -> Option { - use parity_scale_codec::Encode; + ) -> Option { + use codec::Encode; - Historical::prove((beefy_primitives::KEY_TYPE, authority_id)) + Historical::prove((sp_consensus_beefy::KEY_TYPE, authority_id)) .map(|p| p.encode()) - .map(beefy_primitives::OpaqueKeyOwnershipProof::new) + .map(sp_consensus_beefy::OpaqueKeyOwnershipProof::new) } } @@ -2139,7 +2139,7 @@ sp_api::impl_runtime_apis! { _set_id: fg_primitives::SetId, authority_id: fg_primitives::AuthorityId, ) -> Option { - use parity_scale_codec::Encode; + use codec::Encode; Historical::prove((fg_primitives::KEY_TYPE, authority_id)) .map(|p| p.encode()) @@ -2147,10 +2147,10 @@ sp_api::impl_runtime_apis! { } } - impl babe_primitives::BabeApi for Runtime { - fn configuration() -> babe_primitives::BabeConfiguration { + impl sp_consensus_babe::BabeApi for Runtime { + fn configuration() -> sp_consensus_babe::BabeConfiguration { let epoch_config = Babe::epoch_config().unwrap_or(BABE_GENESIS_EPOCH_CONFIG); - babe_primitives::BabeConfiguration { + sp_consensus_babe::BabeConfiguration { slot_duration: Babe::slot_duration(), epoch_length: EpochDurationInBlocks::get().into(), c: epoch_config.c, @@ -2160,32 +2160,32 @@ sp_api::impl_runtime_apis! { } } - fn current_epoch_start() -> babe_primitives::Slot { + fn current_epoch_start() -> sp_consensus_babe::Slot { Babe::current_epoch_start() } - fn current_epoch() -> babe_primitives::Epoch { + fn current_epoch() -> sp_consensus_babe::Epoch { Babe::current_epoch() } - fn next_epoch() -> babe_primitives::Epoch { + fn next_epoch() -> sp_consensus_babe::Epoch { Babe::next_epoch() } fn generate_key_ownership_proof( - _slot: babe_primitives::Slot, - authority_id: babe_primitives::AuthorityId, - ) -> Option { - use parity_scale_codec::Encode; + _slot: sp_consensus_babe::Slot, + authority_id: sp_consensus_babe::AuthorityId, + ) -> Option { + use codec::Encode; - Historical::prove((babe_primitives::KEY_TYPE, authority_id)) + Historical::prove((sp_consensus_babe::KEY_TYPE, authority_id)) .map(|p| p.encode()) - .map(babe_primitives::OpaqueKeyOwnershipProof::new) + .map(sp_consensus_babe::OpaqueKeyOwnershipProof::new) } fn submit_report_equivocation_unsigned_extrinsic( - equivocation_proof: babe_primitives::EquivocationProof<::Header>, - key_owner_proof: babe_primitives::OpaqueKeyOwnershipProof, + equivocation_proof: sp_consensus_babe::EquivocationProof<::Header>, + key_owner_proof: sp_consensus_babe::OpaqueKeyOwnershipProof, ) -> Option<()> { let key_owner_proof = key_owner_proof.decode()?; @@ -2196,7 +2196,7 @@ sp_api::impl_runtime_apis! { } } - impl authority_discovery_primitives::AuthorityDiscoveryApi for Runtime { + impl sp_authority_discovery::AuthorityDiscoveryApi for Runtime { fn authorities() -> Vec { parachains_runtime_api_impl::relevant_authority_ids::() } @@ -2239,11 +2239,11 @@ sp_api::impl_runtime_apis! { } impl pallet_beefy_mmr::BeefyMmrApi for RuntimeApi { - fn authority_set_proof() -> beefy_primitives::mmr::BeefyAuthoritySet { + fn authority_set_proof() -> sp_consensus_beefy::mmr::BeefyAuthoritySet { MmrLeaf::authority_set_proof() } - fn next_authority_set_proof() -> beefy_primitives::mmr::BeefyNextAuthoritySet { + fn next_authority_set_proof() -> sp_consensus_beefy::mmr::BeefyNextAuthoritySet { MmrLeaf::next_authority_set_proof() } } @@ -2319,14 +2319,14 @@ sp_api::impl_runtime_apis! { impl frame_benchmarking::baseline::Config for Runtime {} impl pallet_xcm::benchmarking::Config for Runtime { type DeliveryHelper = ( - runtime_common::xcm_sender::ToParachainDeliveryHelper< + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< XcmConfig, ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, AssetHubParaId, (), >, - runtime_common::xcm_sender::ToParachainDeliveryHelper< + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< XcmConfig, ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, @@ -2385,7 +2385,7 @@ sp_api::impl_runtime_apis! { impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = XcmConfig; type AccountIdConverter = LocationConverter; - type DeliveryHelper = runtime_common::xcm_sender::ToParachainDeliveryHelper< + type DeliveryHelper = polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< XcmConfig, ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_assigned_slots.rs b/polkadot/runtime/rococo/src/weights/runtime_common_assigned_slots.rs index a6beeded4286..2aaf282c59d5 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_assigned_slots.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_assigned_slots.rs @@ -47,7 +47,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::assigned_slots`. pub struct WeightInfo(PhantomData); -impl runtime_common::assigned_slots::WeightInfo for WeightInfo { +impl polkadot_runtime_common::assigned_slots::WeightInfo for WeightInfo { /// Storage: `Registrar::Paras` (r:1 w:1) /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::ParaLifecycles` (r:1 w:1) diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_auctions.rs b/polkadot/runtime/rococo/src/weights/runtime_common_auctions.rs index 3cd7c7a47e90..897dc1c1752a 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_auctions.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_auctions.rs @@ -46,7 +46,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::auctions`. pub struct WeightInfo(PhantomData); -impl runtime_common::auctions::WeightInfo for WeightInfo { +impl polkadot_runtime_common::auctions::WeightInfo for WeightInfo { /// Storage: Auctions AuctionInfo (r:1 w:1) /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) /// Storage: Auctions AuctionCounter (r:1 w:1) diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_claims.rs b/polkadot/runtime/rococo/src/weights/runtime_common_claims.rs index 52e0dd24afa0..8fbc798dbd46 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_claims.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_claims.rs @@ -46,7 +46,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::claims`. pub struct WeightInfo(PhantomData); -impl runtime_common::claims::WeightInfo for WeightInfo { +impl polkadot_runtime_common::claims::WeightInfo for WeightInfo { /// Storage: Claims Claims (r:1 w:1) /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) /// Storage: Claims Signing (r:1 w:1) diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_crowdloan.rs b/polkadot/runtime/rococo/src/weights/runtime_common_crowdloan.rs index 0e7420cba2e6..b75ff8d42e7e 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_crowdloan.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_crowdloan.rs @@ -46,7 +46,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::crowdloan`. pub struct WeightInfo(PhantomData); -impl runtime_common::crowdloan::WeightInfo for WeightInfo { +impl polkadot_runtime_common::crowdloan::WeightInfo for WeightInfo { /// Storage: Crowdloan Funds (r:1 w:1) /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) /// Storage: Registrar Paras (r:1 w:1) diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs b/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs index cec357453b67..4ea6f6796801 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs @@ -42,7 +42,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::identity_migrator`. pub struct WeightInfo(PhantomData); -impl runtime_common::identity_migrator::WeightInfo for WeightInfo { +impl polkadot_runtime_common::identity_migrator::WeightInfo for WeightInfo { /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_paras_registrar.rs b/polkadot/runtime/rococo/src/weights/runtime_common_paras_registrar.rs index 0a56562a1a95..0ce09d1be2a4 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_paras_registrar.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_paras_registrar.rs @@ -46,7 +46,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::paras_registrar`. pub struct WeightInfo(PhantomData); -impl runtime_common::paras_registrar::WeightInfo for WeightInfo { +impl polkadot_runtime_common::paras_registrar::WeightInfo for WeightInfo { /// Storage: Registrar NextFreeParaId (r:1 w:1) /// Proof Skipped: Registrar NextFreeParaId (max_values: Some(1), max_size: None, mode: Measured) /// Storage: Registrar Paras (r:1 w:1) diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_slots.rs b/polkadot/runtime/rococo/src/weights/runtime_common_slots.rs index 23ab1ed3ee0e..8c601aa8486f 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_slots.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_slots.rs @@ -46,7 +46,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::slots`. pub struct WeightInfo(PhantomData); -impl runtime_common::slots::WeightInfo for WeightInfo { +impl polkadot_runtime_common::slots::WeightInfo for WeightInfo { /// Storage: Slots Leases (r:1 w:1) /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) /// Storage: System Account (r:1 w:1) diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs index dba9e7904c79..9f275e7b8cdc 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs @@ -47,7 +47,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::assigner_on_demand`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::assigner_on_demand::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::assigner_on_demand::WeightInfo for WeightInfo { /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs index ca0575cb1b64..5592a85c90fa 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs @@ -47,7 +47,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::configuration`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::configuration::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::configuration::WeightInfo for WeightInfo { /// Storage: `Configuration::PendingConfigs` (r:1 w:1) /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_coretime.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_coretime.rs index d9f2d45207b9..0ad32996c495 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_coretime.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_coretime.rs @@ -45,11 +45,11 @@ use frame_support::{traits::Get, weights::Weight}; use core::marker::PhantomData; -use runtime_parachains::configuration::{self, WeightInfo as ConfigWeightInfo}; +use polkadot_runtime_parachains::configuration::{self, WeightInfo as ConfigWeightInfo}; /// Weight functions for `runtime_common::coretime`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::coretime::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::coretime::WeightInfo for WeightInfo { fn request_core_count() -> Weight { ::WeightInfo::set_config_with_u32() } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_disputes.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_disputes.rs index 63a8c3addc7d..a20515502b19 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_disputes.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_disputes.rs @@ -46,7 +46,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::disputes`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::disputes::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::disputes::WeightInfo for WeightInfo { /// Storage: ParasDisputes Frozen (r:0 w:1) /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) fn force_unfreeze() -> Weight { diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs index 572ecc7d4110..3c9def0b37e5 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs @@ -47,7 +47,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::hrmp`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::hrmp::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::hrmp::WeightInfo for WeightInfo { /// Storage: `Paras::ParaLifecycles` (r:1 w:0) /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_inclusion.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_inclusion.rs index a121ad774cef..da1b7a0dad9a 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_inclusion.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_inclusion.rs @@ -46,7 +46,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::inclusion`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::inclusion::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::inclusion::WeightInfo for WeightInfo { /// Storage: MessageQueue BookStateFor (r:1 w:1) /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) /// Storage: MessageQueue Pages (r:1 w:999) diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_initializer.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_initializer.rs index 5c627507dfb6..6065c32b1741 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_initializer.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_initializer.rs @@ -46,7 +46,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::initializer`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::initializer::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::initializer::WeightInfo for WeightInfo { /// Storage: System Digest (r:1 w:1) /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) /// The range of component `d` is `[0, 65536]`. diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras.rs index dfd95006dc7d..2dcabb7c36bb 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras.rs @@ -46,7 +46,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::paras`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::paras::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::paras::WeightInfo for WeightInfo { /// Storage: Paras CurrentCodeHash (r:1 w:1) /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) /// Storage: Paras CodeByHashRefs (r:1 w:1) diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs index c250c86665be..c00966fb8048 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs @@ -47,7 +47,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::paras_inherent`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::paras_inherent::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::paras_inherent::WeightInfo for WeightInfo { /// Storage: `ParaInherent::Included` (r:1 w:1) /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `System::ParentHash` (r:1 w:0) diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index decbc795143f..96416821e4c8 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -29,11 +29,11 @@ use frame_support::{ weights::Weight, }; use frame_system::EnsureRoot; -use rococo_runtime_constants::{currency::CENTS, system_parachain::*}; -use runtime_common::{ +use polkadot_runtime_common::{ xcm_sender::{ChildParachainRouter, ExponentialPrice}, ToAuthor, }; +use rococo_runtime_constants::{currency::CENTS, system_parachain::*}; use sp_core::ConstU32; use xcm::latest::prelude::*; use xcm_builder::{ diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index 596cc974c825..c4d78b1081a6 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -11,17 +11,17 @@ license.workspace = true workspace = true [dependencies] -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { workspace = true } -authority-discovery-primitives = { package = "sp-authority-discovery", path = "../../../substrate/primitives/authority-discovery", default-features = false } -babe-primitives = { package = "sp-consensus-babe", path = "../../../substrate/primitives/consensus/babe", default-features = false } -beefy-primitives = { package = "sp-consensus-beefy", path = "../../../substrate/primitives/consensus/beefy", default-features = false } +sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery", default-features = false } +sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe", default-features = false } +sp-consensus-beefy = { path = "../../../substrate/primitives/consensus/beefy", default-features = false } sp-api = { path = "../../../substrate/primitives/api", default-features = false } -inherents = { package = "sp-inherents", path = "../../../substrate/primitives/inherents", default-features = false } -offchain-primitives = { package = "sp-offchain", path = "../../../substrate/primitives/offchain", default-features = false } +sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } @@ -32,8 +32,8 @@ sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-rang sp-session = { path = "../../../substrate/primitives/session", default-features = false } sp-version = { path = "../../../substrate/primitives/version", default-features = false } frame-election-provider-support = { path = "../../../substrate/frame/election-provider-support", default-features = false } -tx-pool-api = { package = "sp-transaction-pool", path = "../../../substrate/primitives/transaction-pool", default-features = false } -block-builder-api = { package = "sp-block-builder", path = "../../../substrate/primitives/block-builder", default-features = false } +sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } +sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } pallet-authority-discovery = { path = "../../../substrate/frame/authority-discovery", default-features = false } pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } @@ -56,8 +56,8 @@ pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-featur pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = false } -runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } -primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } +polkadot-runtime-common = { path = "../common", default-features = false } +polkadot-primitives = { path = "../../primitives", default-features = false } pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } polkadot-runtime-parachains = { path = "../parachains", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } @@ -67,7 +67,7 @@ xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } [dev-dependencies] hex-literal = "0.4.1" tiny-keccak = { version = "2.0.2", features = ["keccak"] } -keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyring" } +sp-keyring = { path = "../../../substrate/primitives/keyring" } sp-trie = { path = "../../../substrate/primitives/trie" } serde_json = { workspace = true, default-features = true } @@ -84,18 +84,13 @@ runtime-metrics = [ ] std = [ - "authority-discovery-primitives/std", - "babe-primitives/std", - "beefy-primitives/std", - "block-builder-api/std", + "codec/std", "frame-election-provider-support/std", "frame-executive/std", "frame-support/std", "frame-system-rpc-runtime-api/std", "frame-system/std", - "inherents/std", "log/std", - "offchain-primitives/std", "pallet-authority-discovery/std", "pallet-authorship/std", "pallet-babe/std", @@ -111,24 +106,29 @@ std = [ "pallet-transaction-payment/std", "pallet-vesting/std", "pallet-xcm/std", - "parity-scale-codec/std", + "polkadot-primitives/std", + "polkadot-runtime-common/std", "polkadot-runtime-parachains/std", - "primitives/std", - "runtime-common/std", "scale-info/std", "serde/std", "sp-api/std", + "sp-authority-discovery/std", + "sp-block-builder/std", + "sp-consensus-babe/std", + "sp-consensus-beefy/std", "sp-core/std", "sp-genesis-builder/std", + "sp-inherents/std", "sp-io/std", "sp-mmr-primitives/std", + "sp-offchain/std", "sp-runtime/std", "sp-session/std", "sp-staking/std", "sp-std/std", + "sp-transaction-pool/std", "sp-version/std", "test-runtime-constants/std", - "tx-pool-api/std", "xcm-builder/std", "xcm-executor/std", "xcm/std", @@ -148,9 +148,9 @@ runtime-benchmarks = [ "pallet-timestamp/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", + "polkadot-primitives/runtime-benchmarks", + "polkadot-runtime-common/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", - "primitives/runtime-benchmarks", - "runtime-common/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "sp-staking/runtime-benchmarks", "xcm-builder/runtime-benchmarks", diff --git a/polkadot/runtime/test-runtime/constants/Cargo.toml b/polkadot/runtime/test-runtime/constants/Cargo.toml index 5b8a4d7a051a..ed10ece54f67 100644 --- a/polkadot/runtime/test-runtime/constants/Cargo.toml +++ b/polkadot/runtime/test-runtime/constants/Cargo.toml @@ -13,13 +13,13 @@ workspace = true smallvec = "1.8.0" frame-support = { path = "../../../../substrate/frame/support", default-features = false } -primitives = { package = "polkadot-primitives", path = "../../../primitives", default-features = false } +polkadot-primitives = { path = "../../../primitives", default-features = false } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } [features] default = ["std"] std = [ "frame-support/std", - "primitives/std", + "polkadot-primitives/std", "sp-runtime/std", ] diff --git a/polkadot/runtime/test-runtime/constants/src/lib.rs b/polkadot/runtime/test-runtime/constants/src/lib.rs index 2422762ca38e..0d16909b2990 100644 --- a/polkadot/runtime/test-runtime/constants/src/lib.rs +++ b/polkadot/runtime/test-runtime/constants/src/lib.rs @@ -20,7 +20,7 @@ pub mod weights; /// Money matters. pub mod currency { - use primitives::Balance; + use polkadot_primitives::Balance; pub const DOTS: Balance = 1_000_000_000_000; pub const DOLLARS: Balance = DOTS; @@ -30,7 +30,7 @@ pub mod currency { /// Time and blocks. pub mod time { - use primitives::{BlockNumber, Moment}; + use polkadot_primitives::{BlockNumber, Moment}; // Testnet pub const MILLISECS_PER_BLOCK: Moment = 6000; pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; @@ -55,7 +55,7 @@ pub mod fee { use frame_support::weights::{ WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, }; - use primitives::Balance; + use polkadot_primitives::Balance; use smallvec::smallvec; pub use sp_runtime::Perbill; diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 9eb0fcca6678..8178639946f8 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -20,8 +20,8 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit = "256"] +use codec::Encode; use pallet_transaction_payment::FungibleAdapter; -use parity_scale_codec::Encode; use sp_std::{ collections::{btree_map::BTreeMap, vec_deque::VecDeque}, prelude::*, @@ -41,8 +41,6 @@ use polkadot_runtime_parachains::{ shared as parachains_shared, }; -use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; -use beefy_primitives::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; use frame_election_provider_support::{ bounds::{ElectionBounds, ElectionBoundsBuilder}, onchain, SequentialPhragmen, @@ -56,8 +54,7 @@ use frame_support::{ use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_session::historical as session_historical; use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; -use polkadot_runtime_parachains::reward_points::RewardValidatorsWithEraPoints; -use primitives::{ +use polkadot_primitives::{ slashing, AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash as HashT, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, @@ -65,10 +62,13 @@ use primitives::{ SessionInfo as SessionInfoData, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID, }; -use runtime_common::{ +use polkadot_runtime_common::{ claims, impl_runtime_weights, paras_sudo_wrapper, BlockHashCount, BlockLength, SlowAdjustingFeeUpdate, }; +use polkadot_runtime_parachains::reward_points::RewardValidatorsWithEraPoints; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; +use sp_consensus_beefy::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; use sp_core::{ConstU32, OpaqueMetadata}; use sp_mmr_primitives as mmr; use sp_runtime::{ @@ -121,10 +121,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { }; /// The BABE epoch configuration at genesis. -pub const BABE_GENESIS_EPOCH_CONFIG: babe_primitives::BabeEpochConfiguration = - babe_primitives::BabeEpochConfiguration { +pub const BABE_GENESIS_EPOCH_CONFIG: sp_consensus_babe::BabeEpochConfiguration = + sp_consensus_babe::BabeEpochConfiguration { c: PRIMARY_PROBABILITY, - allowed_slots: babe_primitives::AllowedSlots::PrimaryAndSecondaryVRFSlots, + allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryVRFSlots, }; /// Native version. @@ -324,7 +324,8 @@ parameter_types! { pub struct OnChainSeqPhragmen; impl onchain::Config for OnChainSeqPhragmen { type System = Runtime; - type Solver = SequentialPhragmen; + type Solver = + SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); type Bounds = ElectionBoundsOnChain; @@ -338,7 +339,7 @@ impl pallet_staking::Config for Runtime { type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = Timestamp; - type CurrencyToVote = runtime_common::CurrencyToVote; + type CurrencyToVote = polkadot_runtime_common::CurrencyToVote; type RewardRemainder = (); type RuntimeEvent = RuntimeEvent; type Slash = (); @@ -361,7 +362,7 @@ impl pallet_staking::Config for Runtime { type MaxUnlockingChunks = frame_support::traits::ConstU32<32>; type MaxControllersInDeprecationBatch = ConstU32<5900>; type HistoryDepth = frame_support::traits::ConstU32<84>; - type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; + type BenchmarkingConfig = polkadot_runtime_common::StakingBenchmarkingConfig; type EventListeners = (); type WeightInfo = (); type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; @@ -796,7 +797,7 @@ sp_api::impl_runtime_apis! { } } - impl block_builder_api::BlockBuilder for Runtime { + impl sp_block_builder::BlockBuilder for Runtime { fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { Executive::apply_extrinsic(extrinsic) } @@ -805,19 +806,19 @@ sp_api::impl_runtime_apis! { Executive::finalize_block() } - fn inherent_extrinsics(data: inherents::InherentData) -> Vec<::Extrinsic> { + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { data.create_extrinsics() } fn check_inherents( block: Block, - data: inherents::InherentData, - ) -> inherents::CheckInherentsResult { + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } } - impl tx_pool_api::runtime_api::TaggedTransactionQueue for Runtime { + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { fn validate_transaction( source: TransactionSource, tx: ::Extrinsic, @@ -827,20 +828,20 @@ sp_api::impl_runtime_apis! { } } - impl offchain_primitives::OffchainWorkerApi for Runtime { + impl sp_offchain::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { Executive::offchain_worker(header) } } - impl authority_discovery_primitives::AuthorityDiscoveryApi for Runtime { + impl sp_authority_discovery::AuthorityDiscoveryApi for Runtime { fn authorities() -> Vec { runtime_impl::relevant_authority_ids::() } } #[api_version(11)] - impl primitives::runtime_api::ParachainHost for Runtime { + impl polkadot_primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { runtime_impl::validators::() } @@ -871,7 +872,7 @@ sp_api::impl_runtime_apis! { fn check_validation_outputs( para_id: ParaId, - outputs: primitives::CandidateCommitments, + outputs: polkadot_primitives::CandidateCommitments, ) -> bool { runtime_impl::check_validation_outputs::(para_id, outputs) } @@ -924,8 +925,8 @@ sp_api::impl_runtime_apis! { } fn submit_pvf_check_statement( - stmt: primitives::PvfCheckStatement, - signature: primitives::ValidatorSignature, + stmt: polkadot_primitives::PvfCheckStatement, + signature: polkadot_primitives::ValidatorSignature, ) { runtime_impl::submit_pvf_check_statement::(stmt, signature) } @@ -952,7 +953,7 @@ sp_api::impl_runtime_apis! { fn key_ownership_proof( validator_id: ValidatorId, ) -> Option { - use parity_scale_codec::Encode; + use codec::Encode; Historical::prove((PARACHAIN_KEY_TYPE_ID, validator_id)) .map(|p| p.encode()) @@ -973,15 +974,15 @@ sp_api::impl_runtime_apis! { runtime_impl::minimum_backing_votes::() } - fn para_backing_state(para_id: ParaId) -> Option { + fn para_backing_state(para_id: ParaId) -> Option { runtime_impl::backing_state::(para_id) } - fn async_backing_params() -> primitives::AsyncBackingParams { + fn async_backing_params() -> polkadot_primitives::AsyncBackingParams { runtime_impl::async_backing_params::() } - fn approval_voting_params() -> primitives::ApprovalVotingParams { + fn approval_voting_params() -> polkadot_primitives::ApprovalVotingParams { runtime_impl::approval_voting_params::() } @@ -989,7 +990,7 @@ sp_api::impl_runtime_apis! { runtime_impl::disabled_validators::() } - fn node_features() -> primitives::NodeFeatures { + fn node_features() -> polkadot_primitives::NodeFeatures { runtime_impl::node_features::() } @@ -1002,32 +1003,32 @@ sp_api::impl_runtime_apis! { } } - impl beefy_primitives::BeefyApi for Runtime { + impl sp_consensus_beefy::BeefyApi for Runtime { fn beefy_genesis() -> Option { // dummy implementation due to lack of BEEFY pallet. None } - fn validator_set() -> Option> { + fn validator_set() -> Option> { // dummy implementation due to lack of BEEFY pallet. None } fn submit_report_equivocation_unsigned_extrinsic( - _equivocation_proof: beefy_primitives::DoubleVotingProof< + _equivocation_proof: sp_consensus_beefy::DoubleVotingProof< BlockNumber, BeefyId, BeefySignature, >, - _key_owner_proof: beefy_primitives::OpaqueKeyOwnershipProof, + _key_owner_proof: sp_consensus_beefy::OpaqueKeyOwnershipProof, ) -> Option<()> { None } fn generate_key_ownership_proof( - _set_id: beefy_primitives::ValidatorSetId, + _set_id: sp_consensus_beefy::ValidatorSetId, _authority_id: BeefyId, - ) -> Option { + ) -> Option { None } } @@ -1090,10 +1091,10 @@ sp_api::impl_runtime_apis! { } } - impl babe_primitives::BabeApi for Runtime { - fn configuration() -> babe_primitives::BabeConfiguration { + impl sp_consensus_babe::BabeApi for Runtime { + fn configuration() -> sp_consensus_babe::BabeConfiguration { let epoch_config = Babe::epoch_config().unwrap_or(BABE_GENESIS_EPOCH_CONFIG); - babe_primitives::BabeConfiguration { + sp_consensus_babe::BabeConfiguration { slot_duration: Babe::slot_duration(), epoch_length: EpochDuration::get(), c: epoch_config.c, @@ -1103,28 +1104,28 @@ sp_api::impl_runtime_apis! { } } - fn current_epoch_start() -> babe_primitives::Slot { + fn current_epoch_start() -> sp_consensus_babe::Slot { Babe::current_epoch_start() } - fn current_epoch() -> babe_primitives::Epoch { + fn current_epoch() -> sp_consensus_babe::Epoch { Babe::current_epoch() } - fn next_epoch() -> babe_primitives::Epoch { + fn next_epoch() -> sp_consensus_babe::Epoch { Babe::next_epoch() } fn generate_key_ownership_proof( - _slot: babe_primitives::Slot, - _authority_id: babe_primitives::AuthorityId, - ) -> Option { + _slot: sp_consensus_babe::Slot, + _authority_id: sp_consensus_babe::AuthorityId, + ) -> Option { None } fn submit_report_equivocation_unsigned_extrinsic( - _equivocation_proof: babe_primitives::EquivocationProof<::Header>, - _key_owner_proof: babe_primitives::OpaqueKeyOwnershipProof, + _equivocation_proof: sp_consensus_babe::EquivocationProof<::Header>, + _key_owner_proof: sp_consensus_babe::OpaqueKeyOwnershipProof, ) -> Option<()> { None } diff --git a/polkadot/runtime/test-runtime/src/xcm_config.rs b/polkadot/runtime/test-runtime/src/xcm_config.rs index fc3d0dc42a3b..b1d86ff9a85e 100644 --- a/polkadot/runtime/test-runtime/src/xcm_config.rs +++ b/polkadot/runtime/test-runtime/src/xcm_config.rs @@ -20,8 +20,8 @@ use frame_support::{ weights::Weight, }; use frame_system::EnsureRoot; +use polkadot_runtime_common::xcm_sender::{ChildParachainRouter, PriceForMessageDelivery}; use polkadot_runtime_parachains::FeeTracker; -use runtime_common::xcm_sender::{ChildParachainRouter, PriceForMessageDelivery}; use xcm::latest::prelude::*; use xcm_builder::{ AllowUnpaidExecutionFrom, EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index 56623272be82..ccb8a02b981c 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } @@ -20,12 +20,12 @@ serde = { workspace = true } serde_derive = { optional = true, workspace = true } smallvec = "1.8.0" -authority-discovery-primitives = { package = "sp-authority-discovery", path = "../../../substrate/primitives/authority-discovery", default-features = false } -babe-primitives = { package = "sp-consensus-babe", path = "../../../substrate/primitives/consensus/babe", default-features = false } -beefy-primitives = { package = "sp-consensus-beefy", path = "../../../substrate/primitives/consensus/beefy", default-features = false } +sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery", default-features = false } +sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe", default-features = false } +sp-consensus-beefy = { path = "../../../substrate/primitives/consensus/beefy", default-features = false } binary-merkle-tree = { path = "../../../substrate/utils/binary-merkle-tree", default-features = false } -inherents = { package = "sp-inherents", path = "../../../substrate/primitives/inherents", default-features = false } -offchain-primitives = { package = "sp-offchain", path = "../../../substrate/primitives/offchain", default-features = false } +sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } sp-api = { path = "../../../substrate/primitives/api", default-features = false } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } @@ -39,8 +39,8 @@ sp-core = { path = "../../../substrate/primitives/core", default-features = fals sp-session = { path = "../../../substrate/primitives/session", default-features = false } sp-storage = { path = "../../../substrate/primitives/storage", default-features = false } sp-version = { path = "../../../substrate/primitives/version", default-features = false } -tx-pool-api = { package = "sp-transaction-pool", path = "../../../substrate/primitives/transaction-pool", default-features = false } -block-builder-api = { package = "sp-block-builder", path = "../../../substrate/primitives/block-builder", default-features = false } +sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } +sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", default-features = false } frame-election-provider-support = { path = "../../../substrate/frame/election-provider-support", default-features = false } @@ -107,10 +107,10 @@ pallet-offences-benchmarking = { path = "../../../substrate/frame/offences/bench pallet-session-benchmarking = { path = "../../../substrate/frame/session/benchmarking", default-features = false, optional = true } hex-literal = { version = "0.4.1", optional = true } -runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } -primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } +polkadot-runtime-common = { path = "../common", default-features = false } +polkadot-primitives = { path = "../../primitives", default-features = false } polkadot-parachain-primitives = { path = "../../parachain", default-features = false } -runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parachains", default-features = false } +polkadot-runtime-parachains = { path = "../parachains", default-features = false } xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false } @@ -120,7 +120,7 @@ xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api", [dev-dependencies] hex-literal = "0.4.1" tiny-keccak = { version = "2.0.2", features = ["keccak"] } -keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyring" } +sp-keyring = { path = "../../../substrate/primitives/keyring" } serde_json = { workspace = true, default-features = true } remote-externalities = { package = "frame-remote-externalities", path = "../../../substrate/utils/frame/remote-externalities" } tokio = { version = "1.24.2", features = ["macros"] } @@ -134,12 +134,9 @@ default = ["std"] no_std = [] only-staking = [] std = [ - "authority-discovery-primitives/std", - "babe-primitives/std", - "beefy-primitives/std", "binary-merkle-tree/std", "bitvec/std", - "block-builder-api/std", + "codec/std", "frame-benchmarking?/std", "frame-election-provider-support/std", "frame-executive/std", @@ -149,9 +146,7 @@ std = [ "frame-system-rpc-runtime-api/std", "frame-system/std", "frame-try-runtime/std", - "inherents/std", "log/std", - "offchain-primitives/std", "pallet-asset-rate/std", "pallet-authority-discovery/std", "pallet-authorship/std", @@ -202,11 +197,10 @@ std = [ "pallet-whitelist/std", "pallet-xcm-benchmarks?/std", "pallet-xcm/std", - "parity-scale-codec/std", "polkadot-parachain-primitives/std", - "primitives/std", - "runtime-common/std", - "runtime-parachains/std", + "polkadot-primitives/std", + "polkadot-runtime-common/std", + "polkadot-runtime-parachains/std", "rustc-hex/std", "scale-info/std", "serde/std", @@ -214,19 +208,25 @@ std = [ "sp-api/std", "sp-application-crypto/std", "sp-arithmetic/std", + "sp-authority-discovery/std", + "sp-block-builder/std", + "sp-consensus-babe/std", + "sp-consensus-beefy/std", "sp-core/std", "sp-genesis-builder/std", + "sp-inherents/std", "sp-io/std", "sp-mmr-primitives/std", "sp-npos-elections/std", + "sp-offchain/std", "sp-runtime/std", "sp-session/std", "sp-staking/std", "sp-std/std", "sp-storage/std", "sp-tracing/std", + "sp-transaction-pool/std", "sp-version/std", - "tx-pool-api/std", "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", @@ -281,9 +281,9 @@ runtime-benchmarks = [ "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", - "primitives/runtime-benchmarks", - "runtime-common/runtime-benchmarks", - "runtime-parachains/runtime-benchmarks", + "polkadot-primitives/runtime-benchmarks", + "polkadot-runtime-common/runtime-benchmarks", + "polkadot-runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "sp-staking/runtime-benchmarks", "xcm-builder/runtime-benchmarks", @@ -339,8 +339,8 @@ try-runtime = [ "pallet-vesting/try-runtime", "pallet-whitelist/try-runtime", "pallet-xcm/try-runtime", - "runtime-common/try-runtime", - "runtime-parachains/try-runtime", + "polkadot-runtime-common/try-runtime", + "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", ] @@ -350,7 +350,7 @@ metadata-hash = ["substrate-wasm-builder/metadata-hash"] # Set timing constants (e.g. session period) to faster versions to speed up testing. fast-runtime = [] -runtime-metrics = ["runtime-parachains/runtime-metrics", "sp-io/with-tracing"] +runtime-metrics = ["polkadot-runtime-parachains/runtime-metrics", "sp-io/with-tracing"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm diff --git a/polkadot/runtime/westend/constants/Cargo.toml b/polkadot/runtime/westend/constants/Cargo.toml index 81df8f4f024d..d50b168fac52 100644 --- a/polkadot/runtime/westend/constants/Cargo.toml +++ b/polkadot/runtime/westend/constants/Cargo.toml @@ -13,8 +13,8 @@ workspace = true smallvec = "1.8.0" frame-support = { path = "../../../../substrate/frame/support", default-features = false } -primitives = { package = "polkadot-primitives", path = "../../../primitives", default-features = false } -runtime-common = { package = "polkadot-runtime-common", path = "../../common", default-features = false } +polkadot-primitives = { path = "../../../primitives", default-features = false } +polkadot-runtime-common = { path = "../../common", default-features = false } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } sp-weights = { path = "../../../../substrate/primitives/weights", default-features = false } sp-core = { path = "../../../../substrate/primitives/core", default-features = false } @@ -26,8 +26,8 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../../xcm/xcm-builde default = ["std"] std = [ "frame-support/std", - "primitives/std", - "runtime-common/std", + "polkadot-primitives/std", + "polkadot-runtime-common/std", "sp-core/std", "sp-runtime/std", "sp-weights/std", diff --git a/polkadot/runtime/westend/constants/src/lib.rs b/polkadot/runtime/westend/constants/src/lib.rs index 1a4c1f311061..58048272e791 100644 --- a/polkadot/runtime/westend/constants/src/lib.rs +++ b/polkadot/runtime/westend/constants/src/lib.rs @@ -20,7 +20,7 @@ pub mod weights; /// Money matters. pub mod currency { - use primitives::Balance; + use polkadot_primitives::Balance; /// The existential deposit. pub const EXISTENTIAL_DEPOSIT: Balance = 1 * CENTS; @@ -37,8 +37,8 @@ pub mod currency { /// Time and blocks. pub mod time { - use primitives::{BlockNumber, Moment}; - use runtime_common::prod_or_fast; + use polkadot_primitives::{BlockNumber, Moment}; + use polkadot_runtime_common::prod_or_fast; pub const MILLISECS_PER_BLOCK: Moment = 6000; pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; @@ -62,7 +62,7 @@ pub mod fee { use frame_support::weights::{ WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, }; - use primitives::Balance; + use polkadot_primitives::Balance; use smallvec::smallvec; pub use sp_runtime::Perbill; @@ -98,7 +98,7 @@ pub mod fee { /// System Parachains. pub mod system_parachain { - use primitives::Id; + use polkadot_primitives::Id; use xcm_builder::IsChildSystemParachain; /// Network's Asset Hub parachain ID. @@ -144,7 +144,7 @@ mod tests { }; use crate::weights::ExtrinsicBaseWeight; use frame_support::weights::WeightToFee as WeightToFeeT; - use runtime_common::MAXIMUM_BLOCK_WEIGHT; + use polkadot_runtime_common::MAXIMUM_BLOCK_WEIGHT; #[test] // Test that the fee for `MAXIMUM_BLOCK_WEIGHT` of weight has sane bounds. diff --git a/polkadot/runtime/westend/src/impls.rs b/polkadot/runtime/westend/src/impls.rs index 71e6b696a20a..d7ca677a7620 100644 --- a/polkadot/runtime/westend/src/impls.rs +++ b/polkadot/runtime/westend/src/impls.rs @@ -15,11 +15,11 @@ // along with Polkadot. If not, see . use crate::xcm_config; +use codec::{Decode, Encode}; use frame_support::pallet_prelude::DispatchResult; use frame_system::RawOrigin; -use parity_scale_codec::{Decode, Encode}; -use primitives::Balance; -use runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; +use polkadot_primitives::Balance; +use polkadot_runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; use sp_std::{marker::PhantomData, prelude::*}; use westend_runtime_constants::currency::*; use xcm::{latest::prelude::*, VersionedLocation, VersionedXcm}; diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index bcdb00c76337..77262a98a94c 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -20,11 +20,7 @@ // `#[frame_support::runtime]!` does a lot of recursion and requires us to increase the limit. #![recursion_limit = "512"] -use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; -use beefy_primitives::{ - ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}, - mmr::{BeefyDataProvider, MmrLeafVersion}, -}; +use codec::{Decode, Encode, MaxEncodedLen}; use frame_election_provider_support::{bounds::ElectionBoundsBuilder, onchain, SequentialPhragmen}; use frame_support::{ derive_impl, @@ -43,8 +39,7 @@ use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_identity::legacy::IdentityInfo; use pallet_session::historical as session_historical; use pallet_transaction_payment::{FeeDetails, FungibleAdapter, RuntimeDispatchInfo}; -use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; -use primitives::{ +use polkadot_primitives::{ slashing, AccountId, AccountIndex, ApprovalVotingParams, Balance, BlockNumber, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, @@ -52,7 +47,7 @@ use primitives::{ ScrapedOnChainVotes, SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, PARACHAIN_KEY_TYPE_ID, }; -use runtime_common::{ +use polkadot_runtime_common::{ assigned_slots, auctions, crowdloan, elections::OnChainAccuracy, identity_migrator, impl_runtime_weights, @@ -65,7 +60,7 @@ use runtime_common::{ BalanceToU256, BlockHashCount, BlockLength, CurrencyToVote, SlowAdjustingFeeUpdate, U256ToBalance, }; -use runtime_parachains::{ +use polkadot_runtime_parachains::{ assigner_coretime as parachains_assigner_coretime, assigner_on_demand as parachains_assigner_on_demand, configuration as parachains_configuration, configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio, @@ -82,6 +77,11 @@ use runtime_parachains::{ shared as parachains_shared, }; use scale_info::TypeInfo; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; +use sp_consensus_beefy::{ + ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}, + mmr::{BeefyDataProvider, MmrLeafVersion}, +}; use sp_core::{ConstU8, OpaqueMetadata, RuntimeDebug, H256}; use sp_runtime::{ create_runtime_str, @@ -162,10 +162,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { }; /// The BABE epoch configuration at genesis. -pub const BABE_GENESIS_EPOCH_CONFIG: babe_primitives::BabeEpochConfiguration = - babe_primitives::BabeEpochConfiguration { +pub const BABE_GENESIS_EPOCH_CONFIG: sp_consensus_babe::BabeEpochConfiguration = + sp_consensus_babe::BabeEpochConfiguration { c: PRIMARY_PROBABILITY, - allowed_slots: babe_primitives::AllowedSlots::PrimaryAndSecondaryVRFSlots, + allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryVRFSlots, }; /// Native version. @@ -570,7 +570,7 @@ impl pallet_election_provider_multi_phase::Config for Runtime { pallet_election_provider_multi_phase::SolutionAccuracyOf, (), >; - type BenchmarkingConfig = runtime_common::elections::BenchmarkConfig; + type BenchmarkingConfig = polkadot_runtime_common::elections::BenchmarkConfig; type ForceOrigin = EnsureRoot; type WeightInfo = weights::pallet_election_provider_multi_phase::WeightInfo; type MaxWinners = MaxActiveValidators; @@ -643,7 +643,7 @@ impl pallet_staking::Config for Runtime { type MaxUnlockingChunks = frame_support::traits::ConstU32<32>; type HistoryDepth = frame_support::traits::ConstU32<84>; type MaxControllersInDeprecationBatch = MaxControllersInDeprecationBatch; - type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; + type BenchmarkingConfig = polkadot_runtime_common::StakingBenchmarkingConfig; type EventListeners = (NominationPools, DelegatedStaking); type WeightInfo = weights::pallet_staking::WeightInfo; type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; @@ -724,7 +724,7 @@ impl pallet_treasury::Config for Runtime { >; type PayoutPeriod = PayoutSpendPeriod; #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = runtime_common::impls::benchmarks::TreasuryArguments; + type BenchmarkHelper = polkadot_runtime_common::impls::benchmarks::TreasuryArguments; } impl pallet_offences::Config for Runtime { @@ -1403,7 +1403,7 @@ impl pallet_asset_rate::Config for Runtime { type Currency = Balances; type AssetKind = ::AssetKind; #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = runtime_common::impls::benchmarks::AssetRateArguments; + type BenchmarkHelper = polkadot_runtime_common::impls::benchmarks::AssetRateArguments; } // Notify `coretime` pallet when a lease swap occurs @@ -1708,22 +1708,22 @@ mod benches { // Polkadot // NOTE: Make sure to prefix these with `runtime_common::` so // the that path resolves correctly in the generated file. - [runtime_common::assigned_slots, AssignedSlots] - [runtime_common::auctions, Auctions] - [runtime_common::crowdloan, Crowdloan] - [runtime_common::identity_migrator, IdentityMigrator] - [runtime_common::paras_registrar, Registrar] - [runtime_common::slots, Slots] - [runtime_parachains::configuration, Configuration] - [runtime_parachains::disputes, ParasDisputes] - [runtime_parachains::disputes::slashing, ParasSlashing] - [runtime_parachains::hrmp, Hrmp] - [runtime_parachains::inclusion, ParaInclusion] - [runtime_parachains::initializer, Initializer] - [runtime_parachains::paras, Paras] - [runtime_parachains::paras_inherent, ParaInherent] - [runtime_parachains::assigner_on_demand, OnDemandAssignmentProvider] - [runtime_parachains::coretime, Coretime] + [polkadot_runtime_common::assigned_slots, AssignedSlots] + [polkadot_runtime_common::auctions, Auctions] + [polkadot_runtime_common::crowdloan, Crowdloan] + [polkadot_runtime_common::identity_migrator, IdentityMigrator] + [polkadot_runtime_common::paras_registrar, Registrar] + [polkadot_runtime_common::slots, Slots] + [polkadot_runtime_parachains::configuration, Configuration] + [polkadot_runtime_parachains::disputes, ParasDisputes] + [polkadot_runtime_parachains::disputes::slashing, ParasSlashing] + [polkadot_runtime_parachains::hrmp, Hrmp] + [polkadot_runtime_parachains::inclusion, ParaInclusion] + [polkadot_runtime_parachains::initializer, Initializer] + [polkadot_runtime_parachains::paras, Paras] + [polkadot_runtime_parachains::paras_inherent, ParaInherent] + [polkadot_runtime_parachains::assigner_on_demand, OnDemandAssignmentProvider] + [polkadot_runtime_parachains::coretime, Coretime] // Substrate [pallet_bags_list, VoterList] [pallet_balances, Balances] @@ -1789,7 +1789,7 @@ sp_api::impl_runtime_apis! { } } - impl block_builder_api::BlockBuilder for Runtime { + impl sp_block_builder::BlockBuilder for Runtime { fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { Executive::apply_extrinsic(extrinsic) } @@ -1798,19 +1798,19 @@ sp_api::impl_runtime_apis! { Executive::finalize_block() } - fn inherent_extrinsics(data: inherents::InherentData) -> Vec<::Extrinsic> { + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { data.create_extrinsics() } fn check_inherents( block: Block, - data: inherents::InherentData, - ) -> inherents::CheckInherentsResult { + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } } - impl tx_pool_api::runtime_api::TaggedTransactionQueue for Runtime { + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { fn validate_transaction( source: TransactionSource, tx: ::Extrinsic, @@ -1820,14 +1820,14 @@ sp_api::impl_runtime_apis! { } } - impl offchain_primitives::OffchainWorkerApi for Runtime { + impl sp_offchain::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { Executive::offchain_worker(header) } } #[api_version(11)] - impl primitives::runtime_api::ParachainHost for Runtime { + impl polkadot_primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() } @@ -1857,7 +1857,7 @@ sp_api::impl_runtime_apis! { fn check_validation_outputs( para_id: ParaId, - outputs: primitives::CandidateCommitments, + outputs: polkadot_primitives::CandidateCommitments, ) -> bool { parachains_runtime_api_impl::check_validation_outputs::(para_id, outputs) } @@ -1942,7 +1942,7 @@ sp_api::impl_runtime_apis! { fn key_ownership_proof( validator_id: ValidatorId, ) -> Option { - use parity_scale_codec::Encode; + use codec::Encode; Historical::prove((PARACHAIN_KEY_TYPE_ID, validator_id)) .map(|p| p.encode()) @@ -1963,11 +1963,11 @@ sp_api::impl_runtime_apis! { parachains_runtime_api_impl::minimum_backing_votes::() } - fn para_backing_state(para_id: ParaId) -> Option { + fn para_backing_state(para_id: ParaId) -> Option { parachains_runtime_api_impl::backing_state::(para_id) } - fn async_backing_params() -> primitives::AsyncBackingParams { + fn async_backing_params() -> polkadot_primitives::AsyncBackingParams { parachains_runtime_api_impl::async_backing_params::() } @@ -1992,22 +1992,22 @@ sp_api::impl_runtime_apis! { } } - impl beefy_primitives::BeefyApi for Runtime { + impl sp_consensus_beefy::BeefyApi for Runtime { fn beefy_genesis() -> Option { pallet_beefy::GenesisBlock::::get() } - fn validator_set() -> Option> { + fn validator_set() -> Option> { Beefy::validator_set() } fn submit_report_equivocation_unsigned_extrinsic( - equivocation_proof: beefy_primitives::DoubleVotingProof< + equivocation_proof: sp_consensus_beefy::DoubleVotingProof< BlockNumber, BeefyId, BeefySignature, >, - key_owner_proof: beefy_primitives::OpaqueKeyOwnershipProof, + key_owner_proof: sp_consensus_beefy::OpaqueKeyOwnershipProof, ) -> Option<()> { let key_owner_proof = key_owner_proof.decode()?; @@ -2018,14 +2018,14 @@ sp_api::impl_runtime_apis! { } fn generate_key_ownership_proof( - _set_id: beefy_primitives::ValidatorSetId, + _set_id: sp_consensus_beefy::ValidatorSetId, authority_id: BeefyId, - ) -> Option { - use parity_scale_codec::Encode; + ) -> Option { + use codec::Encode; - Historical::prove((beefy_primitives::KEY_TYPE, authority_id)) + Historical::prove((sp_consensus_beefy::KEY_TYPE, authority_id)) .map(|p| p.encode()) - .map(beefy_primitives::OpaqueKeyOwnershipProof::new) + .map(sp_consensus_beefy::OpaqueKeyOwnershipProof::new) } } @@ -2076,11 +2076,11 @@ sp_api::impl_runtime_apis! { } impl pallet_beefy_mmr::BeefyMmrApi for RuntimeApi { - fn authority_set_proof() -> beefy_primitives::mmr::BeefyAuthoritySet { + fn authority_set_proof() -> sp_consensus_beefy::mmr::BeefyAuthoritySet { BeefyMmrLeaf::authority_set_proof() } - fn next_authority_set_proof() -> beefy_primitives::mmr::BeefyNextAuthoritySet { + fn next_authority_set_proof() -> sp_consensus_beefy::mmr::BeefyNextAuthoritySet { BeefyMmrLeaf::next_authority_set_proof() } } @@ -2113,7 +2113,7 @@ sp_api::impl_runtime_apis! { _set_id: fg_primitives::SetId, authority_id: fg_primitives::AuthorityId, ) -> Option { - use parity_scale_codec::Encode; + use codec::Encode; Historical::prove((fg_primitives::KEY_TYPE, authority_id)) .map(|p| p.encode()) @@ -2121,10 +2121,10 @@ sp_api::impl_runtime_apis! { } } - impl babe_primitives::BabeApi for Runtime { - fn configuration() -> babe_primitives::BabeConfiguration { + impl sp_consensus_babe::BabeApi for Runtime { + fn configuration() -> sp_consensus_babe::BabeConfiguration { let epoch_config = Babe::epoch_config().unwrap_or(BABE_GENESIS_EPOCH_CONFIG); - babe_primitives::BabeConfiguration { + sp_consensus_babe::BabeConfiguration { slot_duration: Babe::slot_duration(), epoch_length: EpochDuration::get(), c: epoch_config.c, @@ -2134,32 +2134,32 @@ sp_api::impl_runtime_apis! { } } - fn current_epoch_start() -> babe_primitives::Slot { + fn current_epoch_start() -> sp_consensus_babe::Slot { Babe::current_epoch_start() } - fn current_epoch() -> babe_primitives::Epoch { + fn current_epoch() -> sp_consensus_babe::Epoch { Babe::current_epoch() } - fn next_epoch() -> babe_primitives::Epoch { + fn next_epoch() -> sp_consensus_babe::Epoch { Babe::next_epoch() } fn generate_key_ownership_proof( - _slot: babe_primitives::Slot, - authority_id: babe_primitives::AuthorityId, - ) -> Option { - use parity_scale_codec::Encode; + _slot: sp_consensus_babe::Slot, + authority_id: sp_consensus_babe::AuthorityId, + ) -> Option { + use codec::Encode; - Historical::prove((babe_primitives::KEY_TYPE, authority_id)) + Historical::prove((sp_consensus_babe::KEY_TYPE, authority_id)) .map(|p| p.encode()) - .map(babe_primitives::OpaqueKeyOwnershipProof::new) + .map(sp_consensus_babe::OpaqueKeyOwnershipProof::new) } fn submit_report_equivocation_unsigned_extrinsic( - equivocation_proof: babe_primitives::EquivocationProof<::Header>, - key_owner_proof: babe_primitives::OpaqueKeyOwnershipProof, + equivocation_proof: sp_consensus_babe::EquivocationProof<::Header>, + key_owner_proof: sp_consensus_babe::OpaqueKeyOwnershipProof, ) -> Option<()> { let key_owner_proof = key_owner_proof.decode()?; @@ -2170,7 +2170,7 @@ sp_api::impl_runtime_apis! { } } - impl authority_discovery_primitives::AuthorityDiscoveryApi for Runtime { + impl sp_authority_discovery::AuthorityDiscoveryApi for Runtime { fn authorities() -> Vec { parachains_runtime_api_impl::relevant_authority_ids::() } @@ -2400,14 +2400,14 @@ sp_api::impl_runtime_apis! { impl pallet_xcm::benchmarking::Config for Runtime { type DeliveryHelper = ( - runtime_common::xcm_sender::ToParachainDeliveryHelper< + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< xcm_config::XcmConfig, ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, AssetHubParaId, (), >, - runtime_common::xcm_sender::ToParachainDeliveryHelper< + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< xcm_config::XcmConfig, ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, @@ -2463,7 +2463,7 @@ sp_api::impl_runtime_apis! { } impl frame_system_benchmarking::Config for Runtime {} impl pallet_nomination_pools_benchmarking::Config for Runtime {} - impl runtime_parachains::disputes::slashing::benchmarking::Config for Runtime {} + impl polkadot_runtime_parachains::disputes::slashing::benchmarking::Config for Runtime {} use xcm::latest::{ AssetId, Fungibility::*, InteriorLocation, Junction, Junctions::*, @@ -2473,7 +2473,7 @@ sp_api::impl_runtime_apis! { impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; type AccountIdConverter = xcm_config::LocationConverter; - type DeliveryHelper = runtime_common::xcm_sender::ToParachainDeliveryHelper< + type DeliveryHelper = polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< xcm_config::XcmConfig, ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, diff --git a/polkadot/runtime/westend/src/tests.rs b/polkadot/runtime/westend/src/tests.rs index 4acb81e963b2..4d5e2e946bce 100644 --- a/polkadot/runtime/westend/src/tests.rs +++ b/polkadot/runtime/westend/src/tests.rs @@ -24,7 +24,7 @@ use sp_core::hexdisplay::HexDisplay; #[test] fn remove_keys_weight_is_sensible() { - use runtime_common::crowdloan::WeightInfo; + use polkadot_runtime_common::crowdloan::WeightInfo; let max_weight = ::WeightInfo::refund(RemoveKeysLimit::get()); // Max remove keys limit should be no more than half the total block weight. assert!((max_weight * 2).all_lt(BlockWeights::get().max_block)); @@ -32,7 +32,7 @@ fn remove_keys_weight_is_sensible() { #[test] fn sample_size_is_sensible() { - use runtime_common::auctions::WeightInfo; + use polkadot_runtime_common::auctions::WeightInfo; // Need to clean up all samples at the end of an auction. let samples: BlockNumber = EndingPeriod::get() / SampleLength::get(); let max_weight: frame_support::weights::Weight = diff --git a/polkadot/runtime/westend/src/weights/runtime_common_assigned_slots.rs b/polkadot/runtime/westend/src/weights/runtime_common_assigned_slots.rs index c3f1060a9ac0..08b0b0f34df1 100644 --- a/polkadot/runtime/westend/src/weights/runtime_common_assigned_slots.rs +++ b/polkadot/runtime/westend/src/weights/runtime_common_assigned_slots.rs @@ -47,7 +47,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::assigned_slots`. pub struct WeightInfo(PhantomData); -impl runtime_common::assigned_slots::WeightInfo for WeightInfo { +impl polkadot_runtime_common::assigned_slots::WeightInfo for WeightInfo { /// Storage: `Registrar::Paras` (r:1 w:1) /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::ParaLifecycles` (r:1 w:1) diff --git a/polkadot/runtime/westend/src/weights/runtime_common_auctions.rs b/polkadot/runtime/westend/src/weights/runtime_common_auctions.rs index a6f5bbe5a1da..58ca2a083b2c 100644 --- a/polkadot/runtime/westend/src/weights/runtime_common_auctions.rs +++ b/polkadot/runtime/westend/src/weights/runtime_common_auctions.rs @@ -49,7 +49,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::auctions`. pub struct WeightInfo(PhantomData); -impl runtime_common::auctions::WeightInfo for WeightInfo { +impl polkadot_runtime_common::auctions::WeightInfo for WeightInfo { /// Storage: Auctions AuctionInfo (r:1 w:1) /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) /// Storage: Auctions AuctionCounter (r:1 w:1) diff --git a/polkadot/runtime/westend/src/weights/runtime_common_crowdloan.rs b/polkadot/runtime/westend/src/weights/runtime_common_crowdloan.rs index 97b0279544c7..47472406de1e 100644 --- a/polkadot/runtime/westend/src/weights/runtime_common_crowdloan.rs +++ b/polkadot/runtime/westend/src/weights/runtime_common_crowdloan.rs @@ -49,7 +49,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::crowdloan`. pub struct WeightInfo(PhantomData); -impl runtime_common::crowdloan::WeightInfo for WeightInfo { +impl polkadot_runtime_common::crowdloan::WeightInfo for WeightInfo { /// Storage: Crowdloan Funds (r:1 w:1) /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) /// Storage: Registrar Paras (r:1 w:1) diff --git a/polkadot/runtime/westend/src/weights/runtime_common_identity_migrator.rs b/polkadot/runtime/westend/src/weights/runtime_common_identity_migrator.rs index cec357453b67..4ea6f6796801 100644 --- a/polkadot/runtime/westend/src/weights/runtime_common_identity_migrator.rs +++ b/polkadot/runtime/westend/src/weights/runtime_common_identity_migrator.rs @@ -42,7 +42,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::identity_migrator`. pub struct WeightInfo(PhantomData); -impl runtime_common::identity_migrator::WeightInfo for WeightInfo { +impl polkadot_runtime_common::identity_migrator::WeightInfo for WeightInfo { /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) diff --git a/polkadot/runtime/westend/src/weights/runtime_common_paras_registrar.rs b/polkadot/runtime/westend/src/weights/runtime_common_paras_registrar.rs index 50290c0fe59f..befd89874411 100644 --- a/polkadot/runtime/westend/src/weights/runtime_common_paras_registrar.rs +++ b/polkadot/runtime/westend/src/weights/runtime_common_paras_registrar.rs @@ -49,7 +49,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::paras_registrar`. pub struct WeightInfo(PhantomData); -impl runtime_common::paras_registrar::WeightInfo for WeightInfo { +impl polkadot_runtime_common::paras_registrar::WeightInfo for WeightInfo { /// Storage: Registrar NextFreeParaId (r:1 w:1) /// Proof Skipped: Registrar NextFreeParaId (max_values: Some(1), max_size: None, mode: Measured) /// Storage: Registrar Paras (r:1 w:1) diff --git a/polkadot/runtime/westend/src/weights/runtime_common_slots.rs b/polkadot/runtime/westend/src/weights/runtime_common_slots.rs index c95859221fa7..b1422e506ab1 100644 --- a/polkadot/runtime/westend/src/weights/runtime_common_slots.rs +++ b/polkadot/runtime/westend/src/weights/runtime_common_slots.rs @@ -49,7 +49,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::slots`. pub struct WeightInfo(PhantomData); -impl runtime_common::slots::WeightInfo for WeightInfo { +impl polkadot_runtime_common::slots::WeightInfo for WeightInfo { /// Storage: Slots Leases (r:1 w:1) /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) /// Storage: System Account (r:1 w:1) diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs index acd1834f79ed..8b046f5d34ad 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs @@ -47,7 +47,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::assigner_on_demand`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::assigner_on_demand::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::assigner_on_demand::WeightInfo for WeightInfo { /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs index 8fa3207c6446..5130b04668b2 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs @@ -47,7 +47,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::configuration`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::configuration::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::configuration::WeightInfo for WeightInfo { /// Storage: `Configuration::PendingConfigs` (r:1 w:1) /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_coretime.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_coretime.rs index aa65a2e9034a..443651a6fda4 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_coretime.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_coretime.rs @@ -47,7 +47,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::coretime`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::coretime::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::coretime::WeightInfo for WeightInfo { /// Storage: `Configuration::PendingConfigs` (r:1 w:1) /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_disputes.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_disputes.rs index 4a6a6079cf13..5beb82ec5944 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_disputes.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_disputes.rs @@ -49,7 +49,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::disputes`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::disputes::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::disputes::WeightInfo for WeightInfo { /// Storage: ParasDisputes Frozen (r:0 w:1) /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) fn force_unfreeze() -> Weight { diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_disputes_slashing.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_disputes_slashing.rs index 8600717fee1e..a035ea2b0b5e 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_disputes_slashing.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_disputes_slashing.rs @@ -49,7 +49,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::disputes::slashing`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::disputes::slashing::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::disputes::slashing::WeightInfo for WeightInfo { /// Storage: Session CurrentIndex (r:1 w:0) /// Proof Skipped: Session CurrentIndex (max_values: Some(1), max_size: None, mode: Measured) /// Storage: Historical HistoricalSessions (r:1 w:0) diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs index f1d7932fe8b7..8946261664be 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs @@ -47,7 +47,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::hrmp`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::hrmp::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::hrmp::WeightInfo for WeightInfo { /// Storage: `Paras::ParaLifecycles` (r:1 w:0) /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_inclusion.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_inclusion.rs index 767097f660e8..25909beb6a07 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_inclusion.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_inclusion.rs @@ -49,7 +49,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::inclusion`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::inclusion::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::inclusion::WeightInfo for WeightInfo { /// Storage: MessageQueue BookStateFor (r:1 w:1) /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) /// Storage: MessageQueue Pages (r:1 w:999) diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_initializer.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_initializer.rs index 81aca5c958d9..8e501de6e67f 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_initializer.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_initializer.rs @@ -49,7 +49,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::initializer`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::initializer::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::initializer::WeightInfo for WeightInfo { /// Storage: System Digest (r:1 w:1) /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) /// The range of component `d` is `[0, 65536]`. diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_paras.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_paras.rs index 07623f60b012..d96964e69c11 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_paras.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_paras.rs @@ -49,7 +49,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::paras`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::paras::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::paras::WeightInfo for WeightInfo { /// Storage: Paras CurrentCodeHash (r:1 w:1) /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) /// Storage: Paras CodeByHashRefs (r:1 w:1) diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs index aa99ac9438c4..74dd55cc3f2c 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs @@ -47,7 +47,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::paras_inherent`. pub struct WeightInfo(PhantomData); -impl runtime_parachains::paras_inherent::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::paras_inherent::WeightInfo for WeightInfo { /// Storage: `ParaInherent::Included` (r:1 w:1) /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `System::ParentHash` (r:1 w:0) diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index c6c5fb9e72a4..9d7143c96bb5 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -28,7 +28,7 @@ use frame_support::{ }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; -use runtime_common::{ +use polkadot_runtime_common::{ xcm_sender::{ChildParachainRouter, ExponentialPrice}, ToAuthor, }; diff --git a/polkadot/statement-table/Cargo.toml b/polkadot/statement-table/Cargo.toml index ad4a053fa3f9..7181afd9989e 100644 --- a/polkadot/statement-table/Cargo.toml +++ b/polkadot/statement-table/Cargo.toml @@ -10,7 +10,7 @@ description = "Stores messages other authorities issue about candidates in Polka workspace = true [dependencies] -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } sp-core = { path = "../../substrate/primitives/core" } -primitives = { package = "polkadot-primitives", path = "../primitives" } +polkadot-primitives = { path = "../primitives" } gum = { package = "tracing-gum", path = "../node/gum" } diff --git a/polkadot/statement-table/src/generic.rs b/polkadot/statement-table/src/generic.rs index 2ee6f6a4f781..e96ed6af73d9 100644 --- a/polkadot/statement-table/src/generic.rs +++ b/polkadot/statement-table/src/generic.rs @@ -30,12 +30,12 @@ use std::{ hash::Hash, }; -use primitives::{ +use polkadot_primitives::{ effective_minimum_backing_votes, ValidatorSignature, ValidityAttestation as PrimitiveValidityAttestation, }; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; const LOG_TARGET: &str = "parachain::statement-table"; /// Context for the statement table. diff --git a/polkadot/statement-table/src/lib.rs b/polkadot/statement-table/src/lib.rs index 3740d15cc4f3..469c877eafc9 100644 --- a/polkadot/statement-table/src/lib.rs +++ b/polkadot/statement-table/src/lib.rs @@ -34,7 +34,7 @@ pub use generic::{Config, Context, Table}; /// Concrete instantiations suitable for v2 primitives. pub mod v2 { use crate::generic; - use primitives::{ + use polkadot_primitives::{ CandidateHash, CommittedCandidateReceipt, CompactStatement as PrimitiveStatement, CoreIndex, ValidatorIndex, ValidatorSignature, }; diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index 2cd8e822ae16..690fb377dad7 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -15,7 +15,7 @@ bounded-collections = { version = "0.2.0", default-features = false, features = derivative = { version = "2.2.0", default-features = false, features = ["use_core"] } impl-trait-for-tuples = "0.2.2" log = { workspace = true } -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } sp-weights = { path = "../../substrate/primitives/weights", default-features = false, features = ["serde"] } serde = { features = ["alloc", "derive", "rc"], workspace = true } @@ -33,9 +33,9 @@ default = ["std"] wasm-api = [] std = [ "bounded-collections/std", + "codec/std", "environmental/std", "log/std", - "parity-scale-codec/std", "scale-info/std", "serde/std", "sp-weights/std", diff --git a/polkadot/xcm/src/double_encoded.rs b/polkadot/xcm/src/double_encoded.rs index 320cccf9b1f0..a5eecdee9796 100644 --- a/polkadot/xcm/src/double_encoded.rs +++ b/polkadot/xcm/src/double_encoded.rs @@ -16,7 +16,7 @@ use crate::MAX_XCM_DECODE_DEPTH; use alloc::vec::Vec; -use parity_scale_codec::{Decode, DecodeLimit, Encode}; +use codec::{Decode, DecodeLimit, Encode}; /// Wrapper around the encoded and decoded versions of a value. /// Caches the decoded value once computed. diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index 8b0030e59b5f..1f5191c23407 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -26,8 +26,8 @@ extern crate alloc; +use codec::{Decode, DecodeLimit, Encode, Error as CodecError, Input, MaxEncodedLen}; use derivative::Derivative; -use parity_scale_codec::{Decode, DecodeLimit, Encode, Error as CodecError, Input, MaxEncodedLen}; use scale_info::TypeInfo; #[deprecated( diff --git a/polkadot/xcm/src/v2/junction.rs b/polkadot/xcm/src/v2/junction.rs index 771931f4b566..68a7886f3039 100644 --- a/polkadot/xcm/src/v2/junction.rs +++ b/polkadot/xcm/src/v2/junction.rs @@ -19,7 +19,7 @@ use super::{BodyId, BodyPart, Junctions, MultiLocation, NetworkId}; use crate::v3::Junction as NewJunction; use bounded_collections::{ConstU32, WeakBoundedVec}; -use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; /// A single item in a path to describe the relative location of a consensus system. diff --git a/polkadot/xcm/src/v2/mod.rs b/polkadot/xcm/src/v2/mod.rs index 7b6858e6a5c2..38e55d0ea51e 100644 --- a/polkadot/xcm/src/v2/mod.rs +++ b/polkadot/xcm/src/v2/mod.rs @@ -62,9 +62,9 @@ use super::{ }; use alloc::{vec, vec::Vec}; use bounded_collections::{ConstU32, WeakBoundedVec}; +use codec::{self, Decode, Encode, MaxEncodedLen}; use core::{fmt::Debug, result}; use derivative::Derivative; -use parity_scale_codec::{self, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; mod junction; diff --git a/polkadot/xcm/src/v2/multiasset.rs b/polkadot/xcm/src/v2/multiasset.rs index 5681e9ef8a44..7090ef138ca2 100644 --- a/polkadot/xcm/src/v2/multiasset.rs +++ b/polkadot/xcm/src/v2/multiasset.rs @@ -34,8 +34,8 @@ use crate::v3::{ WildMultiAsset as NewWildMultiAsset, }; use alloc::{vec, vec::Vec}; +use codec::{self as codec, Decode, Encode}; use core::cmp::Ordering; -use parity_scale_codec::{self as codec, Decode, Encode}; use scale_info::TypeInfo; /// A general identifier for an instance of a non-fungible asset class. @@ -317,7 +317,7 @@ impl TryFrom for MultiAsset { pub struct MultiAssets(Vec); impl Decode for MultiAssets { - fn decode(input: &mut I) -> Result { + fn decode(input: &mut I) -> Result { Self::from_sorted_and_deduplicated(Vec::::decode(input)?) .map_err(|()| "Out of order".into()) } diff --git a/polkadot/xcm/src/v2/multilocation.rs b/polkadot/xcm/src/v2/multilocation.rs index ac98da8d08c9..9399ca6619c0 100644 --- a/polkadot/xcm/src/v2/multilocation.rs +++ b/polkadot/xcm/src/v2/multilocation.rs @@ -18,8 +18,8 @@ use super::Junction; use crate::v3::MultiLocation as NewMultiLocation; +use codec::{Decode, Encode, MaxEncodedLen}; use core::{mem, result}; -use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; /// A relative path between state-bearing consensus systems. @@ -883,7 +883,7 @@ impl TryFrom for Junctions { mod tests { use super::{Ancestor, AncestorThen, Junctions::*, MultiLocation, Parent, ParentThen}; use crate::opaque::v2::{Junction::*, NetworkId::*}; - use parity_scale_codec::{Decode, Encode}; + use codec::{Decode, Encode}; #[test] fn inverted_works() { diff --git a/polkadot/xcm/src/v2/traits.rs b/polkadot/xcm/src/v2/traits.rs index 9cfb9b051ab2..4dcb4c50c68c 100644 --- a/polkadot/xcm/src/v2/traits.rs +++ b/polkadot/xcm/src/v2/traits.rs @@ -17,8 +17,8 @@ //! Cross-Consensus Message format data structures. use crate::v3::Error as NewError; +use codec::{Decode, Encode}; use core::result; -use parity_scale_codec::{Decode, Encode}; use scale_info::TypeInfo; use super::*; @@ -282,7 +282,7 @@ pub type SendResult = result::Result<(), SendError>; /// # Example /// ```rust /// # use staging_xcm::v2::prelude::*; -/// # use parity_scale_codec::Encode; +/// # use codec::Encode; /// /// /// A sender that only passes the message through and does nothing. /// struct Sender1; diff --git a/polkadot/xcm/src/v3/junction.rs b/polkadot/xcm/src/v3/junction.rs index 32ce352c5c02..aea4e0372515 100644 --- a/polkadot/xcm/src/v3/junction.rs +++ b/polkadot/xcm/src/v3/junction.rs @@ -26,7 +26,7 @@ use crate::{ VersionedLocation, }; use bounded_collections::{BoundedSlice, BoundedVec, ConstU32}; -use parity_scale_codec::{self, Decode, Encode, MaxEncodedLen}; +use codec::{self, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; diff --git a/polkadot/xcm/src/v3/junctions.rs b/polkadot/xcm/src/v3/junctions.rs index 7b014304fdaf..56f5326fe97c 100644 --- a/polkadot/xcm/src/v3/junctions.rs +++ b/polkadot/xcm/src/v3/junctions.rs @@ -17,8 +17,8 @@ //! XCM `Junctions`/`InteriorMultiLocation` datatype. use super::{Junction, MultiLocation, NetworkId}; +use codec::{Decode, Encode, MaxEncodedLen}; use core::{mem, result}; -use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; /// Maximum number of `Junction`s that a `Junctions` can contain. diff --git a/polkadot/xcm/src/v3/mod.rs b/polkadot/xcm/src/v3/mod.rs index 8ff661a9bbac..880520cfedc2 100644 --- a/polkadot/xcm/src/v3/mod.rs +++ b/polkadot/xcm/src/v3/mod.rs @@ -28,12 +28,12 @@ use super::v4::{ use crate::DoubleEncoded; use alloc::{vec, vec::Vec}; use bounded_collections::{parameter_types, BoundedVec}; -use core::{fmt::Debug, result}; -use derivative::Derivative; -use parity_scale_codec::{ +use codec::{ self, decode_vec_with_len, Compact, Decode, Encode, Error as CodecError, Input as CodecInput, MaxEncodedLen, }; +use core::{fmt::Debug, result}; +use derivative::Derivative; use scale_info::TypeInfo; mod junction; diff --git a/polkadot/xcm/src/v3/multiasset.rs b/polkadot/xcm/src/v3/multiasset.rs index 9a67b0e4986c..7db0fa736902 100644 --- a/polkadot/xcm/src/v3/multiasset.rs +++ b/polkadot/xcm/src/v3/multiasset.rs @@ -42,8 +42,8 @@ use crate::{ }; use alloc::{vec, vec::Vec}; use bounded_collections::{BoundedVec, ConstU32}; +use codec::{self as codec, Decode, Encode, MaxEncodedLen}; use core::cmp::Ordering; -use parity_scale_codec::{self as codec, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; /// A general identifier for an instance of a non-fungible asset class. @@ -302,7 +302,7 @@ enum UncheckedFungibility { } impl Decode for Fungibility { - fn decode(input: &mut I) -> Result { + fn decode(input: &mut I) -> Result { match UncheckedFungibility::decode(input)? { UncheckedFungibility::Fungible(a) if a != 0 => Ok(Self::Fungible(a)), UncheckedFungibility::NonFungible(i) => Ok(Self::NonFungible(i)), diff --git a/polkadot/xcm/src/v3/multilocation.rs b/polkadot/xcm/src/v3/multilocation.rs index 731e277b29d8..e51981204d96 100644 --- a/polkadot/xcm/src/v3/multilocation.rs +++ b/polkadot/xcm/src/v3/multilocation.rs @@ -20,8 +20,8 @@ use super::{Junction, Junctions}; use crate::{ v2::MultiLocation as OldMultiLocation, v4::Location as NewMultiLocation, VersionedLocation, }; +use codec::{Decode, Encode, MaxEncodedLen}; use core::result; -use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; /// A relative path between state-bearing consensus systems. @@ -531,7 +531,7 @@ xcm_procedural::impl_conversion_functions_for_multilocation_v3!(); #[cfg(test)] mod tests { use crate::v3::prelude::*; - use parity_scale_codec::{Decode, Encode}; + use codec::{Decode, Encode}; #[test] fn conversion_works() { diff --git a/polkadot/xcm/src/v3/traits.rs b/polkadot/xcm/src/v3/traits.rs index 680e0bacd0c9..7fa8824c3568 100644 --- a/polkadot/xcm/src/v3/traits.rs +++ b/polkadot/xcm/src/v3/traits.rs @@ -17,8 +17,8 @@ //! Cross-Consensus Message format data structures. use crate::v2::Error as OldError; +use codec::{Decode, Encode, MaxEncodedLen}; use core::result; -use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; pub use sp_weights::Weight; @@ -407,7 +407,7 @@ pub type SendResult = result::Result<(T, MultiAssets), SendError>; /// /// # Example /// ```rust -/// # use parity_scale_codec::Encode; +/// # use codec::Encode; /// # use staging_xcm::v3::{prelude::*, Weight}; /// # use staging_xcm::VersionedXcm; /// # use std::convert::Infallible; diff --git a/polkadot/xcm/src/v4/asset.rs b/polkadot/xcm/src/v4/asset.rs index 6b6d200f32fe..a081b595adb1 100644 --- a/polkadot/xcm/src/v4/asset.rs +++ b/polkadot/xcm/src/v4/asset.rs @@ -34,8 +34,8 @@ use crate::v3::{ }; use alloc::{vec, vec::Vec}; use bounded_collections::{BoundedVec, ConstU32}; +use codec::{self as codec, Decode, Encode, MaxEncodedLen}; use core::cmp::Ordering; -use parity_scale_codec::{self as codec, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; /// A general identifier for an instance of a non-fungible asset class. @@ -274,7 +274,7 @@ enum UncheckedFungibility { } impl Decode for Fungibility { - fn decode(input: &mut I) -> Result { + fn decode(input: &mut I) -> Result { match UncheckedFungibility::decode(input)? { UncheckedFungibility::Fungible(a) if a != 0 => Ok(Self::Fungible(a)), UncheckedFungibility::NonFungible(i) => Ok(Self::NonFungible(i)), @@ -559,7 +559,7 @@ impl MaxEncodedLen for Assets { } impl Decode for Assets { - fn decode(input: &mut I) -> Result { + fn decode(input: &mut I) -> Result { let bounded_instructions = BoundedVec::>::decode(input)?; Self::from_sorted_and_deduplicated(bounded_instructions.into_inner()) diff --git a/polkadot/xcm/src/v4/junction.rs b/polkadot/xcm/src/v4/junction.rs index 3ae97de5e9b8..36fb616d2dc5 100644 --- a/polkadot/xcm/src/v4/junction.rs +++ b/polkadot/xcm/src/v4/junction.rs @@ -23,7 +23,7 @@ use crate::{ VersionedLocation, }; use bounded_collections::{BoundedSlice, BoundedVec, ConstU32}; -use parity_scale_codec::{self, Decode, Encode, MaxEncodedLen}; +use codec::{self, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; diff --git a/polkadot/xcm/src/v4/junctions.rs b/polkadot/xcm/src/v4/junctions.rs index 6d1af59e13dc..e5c54ecb21a5 100644 --- a/polkadot/xcm/src/v4/junctions.rs +++ b/polkadot/xcm/src/v4/junctions.rs @@ -18,8 +18,8 @@ use super::{Junction, Location, NetworkId}; use alloc::sync::Arc; +use codec::{Decode, Encode, MaxEncodedLen}; use core::{mem, ops::Range, result}; -use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; /// Maximum number of `Junction`s that a `Junctions` can contain. diff --git a/polkadot/xcm/src/v4/location.rs b/polkadot/xcm/src/v4/location.rs index cee76b689407..9e94d13626d6 100644 --- a/polkadot/xcm/src/v4/location.rs +++ b/polkadot/xcm/src/v4/location.rs @@ -18,8 +18,8 @@ use super::{traits::Reanchorable, Junction, Junctions}; use crate::{v3::MultiLocation as OldLocation, VersionedLocation}; +use codec::{Decode, Encode, MaxEncodedLen}; use core::result; -use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; /// A relative path between state-bearing consensus systems. @@ -539,7 +539,7 @@ xcm_procedural::impl_conversion_functions_for_location_v4!(); #[cfg(test)] mod tests { use crate::v4::prelude::*; - use parity_scale_codec::{Decode, Encode}; + use codec::{Decode, Encode}; #[test] fn conversion_works() { diff --git a/polkadot/xcm/src/v4/mod.rs b/polkadot/xcm/src/v4/mod.rs index e1ca60087b19..57840562ba3e 100644 --- a/polkadot/xcm/src/v4/mod.rs +++ b/polkadot/xcm/src/v4/mod.rs @@ -24,12 +24,12 @@ use super::v3::{ use crate::DoubleEncoded; use alloc::{vec, vec::Vec}; use bounded_collections::{parameter_types, BoundedVec}; -use core::{fmt::Debug, result}; -use derivative::Derivative; -use parity_scale_codec::{ +use codec::{ self, decode_vec_with_len, Compact, Decode, Encode, Error as CodecError, Input as CodecInput, MaxEncodedLen, }; +use core::{fmt::Debug, result}; +use derivative::Derivative; use scale_info::TypeInfo; mod asset; diff --git a/polkadot/xcm/src/v4/traits.rs b/polkadot/xcm/src/v4/traits.rs index f6136c76d808..351de92c80ed 100644 --- a/polkadot/xcm/src/v4/traits.rs +++ b/polkadot/xcm/src/v4/traits.rs @@ -17,8 +17,8 @@ //! Cross-Consensus Message format data structures. pub use crate::v3::{Error, Result, SendError, XcmHash}; +use codec::{Decode, Encode}; use core::result; -use parity_scale_codec::{Decode, Encode}; use scale_info::TypeInfo; pub use sp_weights::Weight; @@ -161,7 +161,7 @@ pub type SendResult = result::Result<(T, Assets), SendError>; /// /// # Example /// ```rust -/// # use parity_scale_codec::Encode; +/// # use codec::Encode; /// # use staging_xcm::v4::{prelude::*, Weight}; /// # use staging_xcm::VersionedXcm; /// # use std::convert::Infallible; diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index 707e4aac7968..79c601b98b4f 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] impl-trait-for-tuples = "0.2.1" -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } @@ -34,7 +34,7 @@ pallet-balances = { path = "../../../substrate/frame/balances" } pallet-xcm = { path = "../pallet-xcm" } pallet-salary = { path = "../../../substrate/frame/salary" } pallet-assets = { path = "../../../substrate/frame/assets" } -primitives = { package = "polkadot-primitives", path = "../../primitives" } +polkadot-primitives = { path = "../../primitives" } polkadot-runtime-parachains = { path = "../../runtime/parachains" } assert_matches = "1.5.0" polkadot-test-runtime = { path = "../../runtime/test-runtime" } @@ -49,18 +49,18 @@ runtime-benchmarks = [ "pallet-salary/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-primitives/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", "polkadot-test-runtime/runtime-benchmarks", - "primitives/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", ] std = [ + "codec/std", "frame-support/std", "frame-system/std", "log/std", "pallet-transaction-payment/std", - "parity-scale-codec/std", "polkadot-parachain-primitives/std", "scale-info/std", "sp-arithmetic/std", diff --git a/polkadot/xcm/xcm-builder/src/currency_adapter.rs b/polkadot/xcm/xcm-builder/src/currency_adapter.rs index 24261ac06583..99a736d6ac1f 100644 --- a/polkadot/xcm/xcm-builder/src/currency_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/currency_adapter.rs @@ -51,7 +51,7 @@ impl From for XcmError { /// /// # Example /// ``` -/// use parity_scale_codec::Decode; +/// use codec::Decode; /// use frame_support::{parameter_types, PalletId}; /// use sp_runtime::traits::{AccountIdConversion, TrailingZeroInput}; /// use xcm::latest::prelude::*; diff --git a/polkadot/xcm/xcm-builder/src/location_conversion.rs b/polkadot/xcm/xcm-builder/src/location_conversion.rs index c9553030817a..f95258492381 100644 --- a/polkadot/xcm/xcm-builder/src/location_conversion.rs +++ b/polkadot/xcm/xcm-builder/src/location_conversion.rs @@ -15,8 +15,8 @@ // along with Polkadot. If not, see . use crate::universal_exports::ensure_is_remote; +use codec::{Compact, Decode, Encode}; use frame_support::traits::Get; -use parity_scale_codec::{Compact, Decode, Encode}; use sp_io::hashing::blake2_256; use sp_runtime::traits::{AccountIdConversion, TrailingZeroInput, TryConvert}; use sp_std::{marker::PhantomData, prelude::*}; @@ -460,7 +460,7 @@ impl #[cfg(test)] mod tests { use super::*; - use primitives::AccountId; + use polkadot_primitives::AccountId; pub type ForeignChainAliasAccount = HashedDescription; diff --git a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs index 449cda3d2323..ef8c71fc2495 100644 --- a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs +++ b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs @@ -16,8 +16,8 @@ //! Implementation of `ProcessMessage` for an `ExecuteXcm` implementation. +use codec::{Decode, FullCodec, MaxEncodedLen}; use frame_support::traits::{ProcessMessage, ProcessMessageError}; -use parity_scale_codec::{Decode, FullCodec, MaxEncodedLen}; use scale_info::TypeInfo; use sp_std::{fmt::Debug, marker::PhantomData}; use sp_weights::{Weight, WeightMeter}; @@ -118,11 +118,11 @@ impl< #[cfg(test)] mod tests { use super::*; + use codec::Encode; use frame_support::{ assert_err, assert_ok, traits::{ProcessMessageError, ProcessMessageError::*}, }; - use parity_scale_codec::Encode; use polkadot_test_runtime::*; use xcm::{v3, v4, VersionedXcm}; diff --git a/polkadot/xcm/xcm-builder/src/routing.rs b/polkadot/xcm/xcm-builder/src/routing.rs index 5c284aaf1475..543aef97c340 100644 --- a/polkadot/xcm/xcm-builder/src/routing.rs +++ b/polkadot/xcm/xcm-builder/src/routing.rs @@ -16,8 +16,8 @@ //! Various implementations for `SendXcm`. +use codec::Encode; use frame_system::unique; -use parity_scale_codec::Encode; use sp_std::{marker::PhantomData, result::Result, vec::Vec}; use xcm::prelude::*; use xcm_executor::{traits::FeeReason, FeesMode}; diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs index f45650ec5404..f35c73bdb685 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs @@ -26,6 +26,7 @@ pub use crate::{ AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, FixedRateOfFungible, FixedWeightBounds, TakeWeightCredit, }; +pub use codec::{Decode, Encode}; use frame_support::traits::{ContainsPair, Everything}; pub use frame_support::{ dispatch::{DispatchInfo, DispatchResultWithPostInfo, GetDispatchInfo, PostDispatchInfo}, @@ -33,7 +34,6 @@ pub use frame_support::{ sp_runtime::{traits::Dispatchable, DispatchError, DispatchErrorWithPostInfo}, traits::{Contains, Get, IsInVec}, }; -pub use parity_scale_codec::{Decode, Encode}; pub use sp_std::{ cell::{Cell, RefCell}, collections::{btree_map::BTreeMap, btree_set::BTreeSet}, diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs index 076ff4184f0c..10e9f4c6c085 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs @@ -25,8 +25,8 @@ use frame_support::{ traits::{ConstU32, Everything}, }; use frame_system::{EnsureRoot, EnsureSigned}; +use polkadot_primitives::{AccountIndex, BlakeTwo256, Signature}; use polkadot_test_runtime::SignedExtra; -use primitives::{AccountIndex, BlakeTwo256, Signature}; use sp_runtime::{generic, traits::MaybeEquivalence, AccountId32, BuildStorage}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; diff --git a/polkadot/xcm/xcm-builder/src/universal_exports.rs b/polkadot/xcm/xcm-builder/src/universal_exports.rs index 04ceb7e51688..9820d535f7ef 100644 --- a/polkadot/xcm/xcm-builder/src/universal_exports.rs +++ b/polkadot/xcm/xcm-builder/src/universal_exports.rs @@ -17,8 +17,8 @@ //! Traits and utilities to help with origin mutation and bridging. use crate::InspectMessageQueues; +use codec::{Decode, Encode}; use frame_support::{ensure, traits::Get}; -use parity_scale_codec::{Decode, Encode}; use sp_std::{convert::TryInto, marker::PhantomData, prelude::*}; use xcm::prelude::*; use xcm_executor::traits::{validate_export, ExportXcm}; diff --git a/polkadot/xcm/xcm-builder/src/weight.rs b/polkadot/xcm/xcm-builder/src/weight.rs index 6141b0142eed..1efa42ce9560 100644 --- a/polkadot/xcm/xcm-builder/src/weight.rs +++ b/polkadot/xcm/xcm-builder/src/weight.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use codec::Decode; use frame_support::{ dispatch::GetDispatchInfo, traits::{ @@ -25,7 +26,6 @@ use frame_support::{ WeightToFee as WeightToFeeT, }, }; -use parity_scale_codec::Decode; use sp_runtime::traits::{SaturatedConversion, Saturating, Zero}; use sp_std::{marker::PhantomData, result::Result}; use xcm::latest::{prelude::*, GetWeight, Weight}; diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs index 7f7ff17e2115..62b448a9f430 100644 --- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs +++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs @@ -14,13 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use codec::Encode; use frame_support::{ construct_runtime, derive_impl, parameter_types, traits::{ConstU32, Everything, Nothing}, weights::Weight, }; use frame_system::EnsureRoot; -use parity_scale_codec::Encode; use primitive_types::H256; use sp_runtime::{traits::IdentityLookup, AccountId32, BuildStorage}; use sp_std::cell::RefCell; diff --git a/polkadot/xcm/xcm-executor/Cargo.toml b/polkadot/xcm/xcm-executor/Cargo.toml index 64b2d405b906..3b30b4f13e2d 100644 --- a/polkadot/xcm/xcm-executor/Cargo.toml +++ b/polkadot/xcm/xcm-executor/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] impl-trait-for-tuples = "0.2.2" environmental = { version = "1.1.4", default-features = false } -parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } xcm = { package = "staging-xcm", path = "..", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } @@ -33,11 +33,11 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", ] std = [ + "codec/std", "environmental/std", "frame-benchmarking/std", "frame-support/std", "log/std", - "parity-scale-codec/std", "scale-info/std", "sp-arithmetic/std", "sp-core/std", diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index e0b8a8a9c73e..da9de93ca0f6 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -16,12 +16,12 @@ #![cfg_attr(not(feature = "std"), no_std)] +use codec::{Decode, Encode}; use frame_support::{ dispatch::GetDispatchInfo, ensure, traits::{Contains, ContainsPair, Defensive, Get, PalletsInfoAccess}, }; -use parity_scale_codec::{Decode, Encode}; use sp_core::defer; use sp_io::hashing::blake2_128; use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; diff --git a/polkadot/xcm/xcm-executor/src/traits/on_response.rs b/polkadot/xcm/xcm-executor/src/traits/on_response.rs index 1049bacdca5f..5d2412d61375 100644 --- a/polkadot/xcm/xcm-executor/src/traits/on_response.rs +++ b/polkadot/xcm/xcm-executor/src/traits/on_response.rs @@ -15,9 +15,9 @@ // along with Polkadot. If not, see . use crate::{Junctions::Here, Xcm}; +use codec::{Decode, Encode}; use core::result; use frame_support::{pallet_prelude::Get, parameter_types}; -use parity_scale_codec::{Decode, Encode}; use sp_arithmetic::traits::Zero; use sp_std::fmt::Debug; use xcm::latest::{ diff --git a/prdoc/pr_4633.prdoc b/prdoc/pr_4633.prdoc new file mode 100644 index 000000000000..f239191cc198 --- /dev/null +++ b/prdoc/pr_4633.prdoc @@ -0,0 +1,8 @@ +title: "Unify dependency aliases" + +doc: + - audience: [Runtime Dev, Node Dev] + description: | + Changes the re-export names of some crates but does not do any logic changes. + +crates: [ ] diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index 1f3bce799b2c..169ed72c96e4 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -25,7 +25,7 @@ itertools = "0.11" libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } log = { workspace = true, default-features = true } names = { version = "0.14.0", default-features = false } -parity-scale-codec = "3.6.12" +codec = { package = "parity-scale-codec", version = "3.6.12" } rand = "0.8.5" regex = "1.6.0" rpassword = "7.0.0" diff --git a/substrate/client/cli/src/commands/chain_info_cmd.rs b/substrate/client/cli/src/commands/chain_info_cmd.rs index 002d7893d9f3..8558c8a2d1cb 100644 --- a/substrate/client/cli/src/commands/chain_info_cmd.rs +++ b/substrate/client/cli/src/commands/chain_info_cmd.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use crate::{CliConfiguration, DatabaseParams, PruningParams, Result as CliResult, SharedParams}; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use sc_client_api::{backend::Backend as BackendT, blockchain::HeaderBackend}; use sp_blockchain::Info; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; diff --git a/substrate/client/cli/src/error.rs b/substrate/client/cli/src/error.rs index 90ad048009ad..90f936561512 100644 --- a/substrate/client/cli/src/error.rs +++ b/substrate/client/cli/src/error.rs @@ -42,7 +42,7 @@ pub enum Error { Client(#[from] sp_blockchain::Error), #[error(transparent)] - Codec(#[from] parity_scale_codec::Error), + Codec(#[from] codec::Error), #[error("Invalid input: {0}")] Input(String), diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml index cd183f6bc8b0..f5528ec5931d 100644 --- a/substrate/client/consensus/beefy/Cargo.toml +++ b/substrate/client/consensus/beefy/Cargo.toml @@ -22,7 +22,7 @@ log = { workspace = true, default-features = true } parking_lot = "0.12.1" thiserror = { workspace = true } wasm-timer = "0.2.5" -prometheus = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-client-api = { path = "../../api" } sc-consensus = { path = "../common" } sc-network = { path = "../../network" } diff --git a/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs b/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs index 350e7a271bc3..c473c14bccc3 100644 --- a/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs +++ b/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs @@ -144,7 +144,7 @@ where genesis_hash: Hash, fork_id: Option<&str>, client: Arc, - prometheus_registry: Option, + prometheus_registry: Option, ) -> (Self, Network::RequestResponseProtocolConfig) { let (request_receiver, config): (_, Network::RequestResponseProtocolConfig) = on_demand_justifications_protocol_config::<_, _, Network>(genesis_hash, fork_id); diff --git a/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs b/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs index 4d40656375ec..e127e5a89590 100644 --- a/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs +++ b/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs @@ -85,7 +85,7 @@ impl OnDemandJustificationsEngine, protocol_name: ProtocolName, live_peers: Arc>>, - prometheus_registry: Option, + prometheus_registry: Option, ) -> Self { let metrics = register_metrics(prometheus_registry); Self { diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs index 4cb014b00d5b..a47bfe1dbe24 100644 --- a/substrate/client/consensus/beefy/src/lib.rs +++ b/substrate/client/consensus/beefy/src/lib.rs @@ -34,7 +34,7 @@ use crate::{ use futures::{stream::Fuse, FutureExt, StreamExt}; use log::{debug, error, info, warn}; use parking_lot::Mutex; -use prometheus::Registry; +use prometheus_endpoint::Registry; use sc_client_api::{Backend, BlockBackend, BlockchainEvents, FinalityNotifications, Finalizer}; use sc_consensus::BlockImport; use sc_network::{NetworkRequest, NotificationService, ProtocolName}; diff --git a/substrate/client/consensus/beefy/src/metrics.rs b/substrate/client/consensus/beefy/src/metrics.rs index ef3928d79faa..30180fe43ec4 100644 --- a/substrate/client/consensus/beefy/src/metrics.rs +++ b/substrate/client/consensus/beefy/src/metrics.rs @@ -20,7 +20,7 @@ use crate::LOG_TARGET; use log::{debug, error}; -use prometheus::{register, Counter, Gauge, PrometheusError, Registry, U64}; +use prometheus_endpoint::{register, Counter, Gauge, PrometheusError, Registry, U64}; /// Helper trait for registering BEEFY metrics to Prometheus registry. pub(crate) trait PrometheusRegister: Sized { @@ -282,7 +282,7 @@ impl PrometheusRegister for OnDemandOutgoingRequestsMetrics { } pub(crate) fn register_metrics( - prometheus_registry: Option, + prometheus_registry: Option, ) -> Option { prometheus_registry.as_ref().map(T::register).and_then(|result| match result { Ok(metrics) => { diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml index 9099761fbceb..b03a263ae0a3 100644 --- a/substrate/client/consensus/grandpa/Cargo.toml +++ b/substrate/client/consensus/grandpa/Cargo.toml @@ -25,7 +25,7 @@ finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } -parity-scale-codec = { version = "3.6.12", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } parking_lot = "0.12.1" rand = "0.8.5" serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/grandpa/rpc/Cargo.toml b/substrate/client/consensus/grandpa/rpc/Cargo.toml index d4e72baef3e7..a9437a9be075 100644 --- a/substrate/client/consensus/grandpa/rpc/Cargo.toml +++ b/substrate/client/consensus/grandpa/rpc/Cargo.toml @@ -17,7 +17,7 @@ finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } futures = "0.3.30" jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } log = { workspace = true, default-features = true } -parity-scale-codec = { version = "3.6.12", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } sc-client-api = { path = "../../../api" } diff --git a/substrate/client/consensus/grandpa/rpc/src/lib.rs b/substrate/client/consensus/grandpa/rpc/src/lib.rs index 68de068c3058..430525019dfb 100644 --- a/substrate/client/consensus/grandpa/rpc/src/lib.rs +++ b/substrate/client/consensus/grandpa/rpc/src/lib.rs @@ -127,8 +127,8 @@ mod tests { use super::*; use std::{collections::HashSet, sync::Arc}; + use codec::{Decode, Encode}; use jsonrpsee::{core::EmptyServerParams as EmptyParams, types::SubscriptionId, RpcModule}; - use parity_scale_codec::{Decode, Encode}; use sc_block_builder::BlockBuilderBuilder; use sc_consensus_grandpa::{ report, AuthorityId, FinalityProof, GrandpaJustification, GrandpaJustificationSender, diff --git a/substrate/client/consensus/grandpa/rpc/src/notification.rs b/substrate/client/consensus/grandpa/rpc/src/notification.rs index 42b9123ed8c1..5bcf90f4d79d 100644 --- a/substrate/client/consensus/grandpa/rpc/src/notification.rs +++ b/substrate/client/consensus/grandpa/rpc/src/notification.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use parity_scale_codec::Encode; +use codec::Encode; use sc_consensus_grandpa::GrandpaJustification; use serde::{Deserialize, Serialize}; use sp_runtime::traits::Block as BlockT; diff --git a/substrate/client/consensus/grandpa/src/authorities.rs b/substrate/client/consensus/grandpa/src/authorities.rs index 623223e41eb8..2ac15d761b2e 100644 --- a/substrate/client/consensus/grandpa/src/authorities.rs +++ b/substrate/client/consensus/grandpa/src/authorities.rs @@ -20,10 +20,10 @@ use std::{cmp::Ord, fmt::Debug, ops::Add}; +use codec::{Decode, Encode}; use finality_grandpa::voter_set::VoterSet; use fork_tree::{FilterAction, ForkTree}; use log::debug; -use parity_scale_codec::{Decode, Encode}; use parking_lot::MappedMutexGuard; use sc_consensus::shared_data::{SharedData, SharedDataLocked}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; @@ -662,9 +662,7 @@ pub struct PendingChange { } impl Decode for PendingChange { - fn decode( - value: &mut I, - ) -> Result { + fn decode(value: &mut I) -> Result { let next_authorities = Decode::decode(value)?; let delay = Decode::decode(value)?; let canon_height = Decode::decode(value)?; diff --git a/substrate/client/consensus/grandpa/src/aux_schema.rs b/substrate/client/consensus/grandpa/src/aux_schema.rs index 97a8bc660317..8ec882591be9 100644 --- a/substrate/client/consensus/grandpa/src/aux_schema.rs +++ b/substrate/client/consensus/grandpa/src/aux_schema.rs @@ -20,9 +20,9 @@ use std::fmt::Debug; +use codec::{Decode, Encode}; use finality_grandpa::round::State as RoundState; use log::{info, warn}; -use parity_scale_codec::{Decode, Encode}; use fork_tree::ForkTree; use sc_client_api::backend::AuxStore; diff --git a/substrate/client/consensus/grandpa/src/communication/gossip.rs b/substrate/client/consensus/grandpa/src/communication/gossip.rs index c7fe5a46a5eb..a6aa063357cb 100644 --- a/substrate/client/consensus/grandpa/src/communication/gossip.rs +++ b/substrate/client/consensus/grandpa/src/communication/gossip.rs @@ -86,8 +86,8 @@ //! We only send polite messages to peers, use ahash::{AHashMap, AHashSet}; +use codec::{Decode, DecodeAll, Encode}; use log::{debug, trace}; -use parity_scale_codec::{Decode, DecodeAll, Encode}; use prometheus_endpoint::{register, CounterVec, Opts, PrometheusError, Registry, U64}; use rand::seq::SliceRandom; use sc_network::ReputationChange; diff --git a/substrate/client/consensus/grandpa/src/communication/mod.rs b/substrate/client/consensus/grandpa/src/communication/mod.rs index cf78e1d4cf08..3b0ea2c5ee96 100644 --- a/substrate/client/consensus/grandpa/src/communication/mod.rs +++ b/substrate/client/consensus/grandpa/src/communication/mod.rs @@ -40,12 +40,12 @@ use std::{ time::Duration, }; +use codec::{Decode, DecodeAll, Encode}; use finality_grandpa::{ voter, voter_set::VoterSet, Message::{Precommit, Prevote, PrimaryPropose}, }; -use parity_scale_codec::{Decode, DecodeAll, Encode}; use sc_network::{NetworkBlock, NetworkSyncForkRequest, NotificationService, ReputationChange}; use sc_network_gossip::{GossipEngine, Network as GossipNetwork}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; diff --git a/substrate/client/consensus/grandpa/src/communication/tests.rs b/substrate/client/consensus/grandpa/src/communication/tests.rs index d7153a79ce0b..5d1562f05188 100644 --- a/substrate/client/consensus/grandpa/src/communication/tests.rs +++ b/substrate/client/consensus/grandpa/src/communication/tests.rs @@ -23,8 +23,8 @@ use super::{ Round, SetId, VoterSet, }; use crate::{communication::grandpa_protocol_name, environment::SharedVoterSetState}; +use codec::{DecodeAll, Encode}; use futures::prelude::*; -use parity_scale_codec::{DecodeAll, Encode}; use sc_network::{ config::{MultiaddrWithPeerId, Role}, event::Event as NetworkEvent, diff --git a/substrate/client/consensus/grandpa/src/environment.rs b/substrate/client/consensus/grandpa/src/environment.rs index 31df038044a4..6199e8a97d99 100644 --- a/substrate/client/consensus/grandpa/src/environment.rs +++ b/substrate/client/consensus/grandpa/src/environment.rs @@ -24,13 +24,13 @@ use std::{ time::Duration, }; +use codec::{Decode, Encode}; use finality_grandpa::{ round::State as RoundState, voter, voter_set::VoterSet, BlockNumberOps, Error as GrandpaError, }; use futures::prelude::*; use futures_timer::Delay; use log::{debug, warn}; -use parity_scale_codec::{Decode, Encode}; use parking_lot::RwLock; use prometheus_endpoint::{register, Counter, Gauge, PrometheusError, U64}; @@ -104,12 +104,10 @@ impl Encode for CompletedRounds { } } -impl parity_scale_codec::EncodeLike for CompletedRounds {} +impl codec::EncodeLike for CompletedRounds {} impl Decode for CompletedRounds { - fn decode( - value: &mut I, - ) -> Result { + fn decode(value: &mut I) -> Result { <(Vec>, SetId, Vec)>::decode(value) .map(|(rounds, set_id, voters)| CompletedRounds { rounds, set_id, voters }) } diff --git a/substrate/client/consensus/grandpa/src/finality_proof.rs b/substrate/client/consensus/grandpa/src/finality_proof.rs index 80b6249ade86..af965f2e4ae6 100644 --- a/substrate/client/consensus/grandpa/src/finality_proof.rs +++ b/substrate/client/consensus/grandpa/src/finality_proof.rs @@ -39,7 +39,7 @@ use log::{trace, warn}; use std::sync::Arc; -use parity_scale_codec::{Decode, Encode}; +use codec::{Decode, Encode}; use sc_client_api::backend::Backend; use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; use sp_consensus_grandpa::GRANDPA_ENGINE_ID; diff --git a/substrate/client/consensus/grandpa/src/import.rs b/substrate/client/consensus/grandpa/src/import.rs index bc2983569c53..b594c0f678ce 100644 --- a/substrate/client/consensus/grandpa/src/import.rs +++ b/substrate/client/consensus/grandpa/src/import.rs @@ -18,8 +18,8 @@ use std::{collections::HashMap, marker::PhantomData, sync::Arc}; +use codec::Decode; use log::debug; -use parity_scale_codec::Decode; use sc_client_api::{backend::Backend, utils::is_descendent_of}; use sc_consensus::{ diff --git a/substrate/client/consensus/grandpa/src/justification.rs b/substrate/client/consensus/grandpa/src/justification.rs index a38cb113b40a..934c0d695fda 100644 --- a/substrate/client/consensus/grandpa/src/justification.rs +++ b/substrate/client/consensus/grandpa/src/justification.rs @@ -22,8 +22,8 @@ use std::{ sync::Arc, }; +use codec::{Decode, DecodeAll, Encode}; use finality_grandpa::{voter_set::VoterSet, Error as GrandpaError}; -use parity_scale_codec::{Decode, DecodeAll, Encode}; use sp_blockchain::{Error as ClientError, HeaderBackend}; use sp_consensus_grandpa::AuthorityId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; diff --git a/substrate/client/consensus/grandpa/src/lib.rs b/substrate/client/consensus/grandpa/src/lib.rs index 03452bd07c75..a07dc035de35 100644 --- a/substrate/client/consensus/grandpa/src/lib.rs +++ b/substrate/client/consensus/grandpa/src/lib.rs @@ -56,9 +56,9 @@ #![warn(missing_docs)] +use codec::Decode; use futures::{prelude::*, StreamExt}; use log::{debug, error, info}; -use parity_scale_codec::Decode; use parking_lot::RwLock; use prometheus_endpoint::{PrometheusError, Registry}; use sc_client_api::{ diff --git a/substrate/client/consensus/grandpa/src/warp_proof.rs b/substrate/client/consensus/grandpa/src/warp_proof.rs index 7169a424c14a..c836ab09fd5d 100644 --- a/substrate/client/consensus/grandpa/src/warp_proof.rs +++ b/substrate/client/consensus/grandpa/src/warp_proof.rs @@ -16,7 +16,7 @@ //! Utilities for generating and verifying GRANDPA warp sync proofs. -use parity_scale_codec::{Decode, DecodeAll, Encode}; +use codec::{Decode, DecodeAll, Encode}; use crate::{ best_justification, find_scheduled_change, AuthoritySetChanges, AuthoritySetHardFork, @@ -38,7 +38,7 @@ use std::{collections::HashMap, sync::Arc}; pub enum Error { /// Decoding error. #[error("Failed to decode block hash: {0}.")] - DecodeScale(#[from] parity_scale_codec::Error), + DecodeScale(#[from] codec::Error), /// Client backend error. #[error("{0}")] Client(#[from] sp_blockchain::Error), @@ -320,7 +320,7 @@ where mod tests { use super::WarpSyncProof; use crate::{AuthoritySetChanges, GrandpaJustification}; - use parity_scale_codec::Encode; + use codec::Encode; use rand::prelude::*; use sc_block_builder::BlockBuilderBuilder; use sp_blockchain::HeaderBackend; diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index e95e06cee267..3c7542313952 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -20,7 +20,7 @@ array-bytes = "6.2.2" fdlimit = "0.3.0" futures = "0.3.30" log = { workspace = true, default-features = true } -parity-scale-codec = "3.6.12" +codec = { package = "parity-scale-codec", version = "3.6.12" } parking_lot = "0.12.1" tempfile = "3.1.0" tokio = { version = "1.22.0", features = ["time"] } diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index 6542830c998b..bd48fae63444 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -17,8 +17,8 @@ // along with this program. If not, see . use async_channel::TryRecvError; +use codec::{Decode, Encode, Joiner}; use futures::executor::block_on; -use parity_scale_codec::{Decode, Encode, Joiner}; use sc_block_builder::BlockBuilderBuilder; use sc_client_api::{ in_mem, BlockBackend, BlockchainEvents, ExecutorProvider, FinalityNotifications, HeaderBackend, diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 3942f06ce6ee..a3b3d1900e6e 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # external deps -parity-scale-codec = { version = "3.6.12", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = [ @@ -83,6 +83,7 @@ runtime = [ "frame-system-rpc-runtime-api", ] std = [ + "codec/std", "frame-benchmarking?/std", "frame-executive?/std", "frame-support/std", @@ -91,7 +92,6 @@ std = [ "frame-system/std", "frame-try-runtime?/std", "log/std", - "parity-scale-codec/std", "scale-info/std", "sp-api?/std", "sp-arithmetic/std", diff --git a/substrate/frame/contracts/uapi/Cargo.toml b/substrate/frame/contracts/uapi/Cargo.toml index 80de7a1d5d69..e19caa460419 100644 --- a/substrate/frame/contracts/uapi/Cargo.toml +++ b/substrate/frame/contracts/uapi/Cargo.toml @@ -15,7 +15,7 @@ workspace = true paste = { version = "1.0", default-features = false } bitflags = "1.0" scale-info = { version = "2.11.1", default-features = false, features = ["derive"], optional = true } -scale = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", "max-encoded-len", ], optional = true } @@ -28,4 +28,4 @@ default-target = ["wasm32-unknown-unknown"] [features] default = ["scale"] -scale = ["dep:scale", "scale-info"] +scale = ["dep:codec", "scale-info"] diff --git a/substrate/frame/contracts/uapi/src/flags.rs b/substrate/frame/contracts/uapi/src/flags.rs index e6dfdeaedfa7..9ad105b8372a 100644 --- a/substrate/frame/contracts/uapi/src/flags.rs +++ b/substrate/frame/contracts/uapi/src/flags.rs @@ -19,7 +19,7 @@ use bitflags::bitflags; bitflags! { /// Flags used by a contract to customize exit behaviour. - #[cfg_attr(feature = "scale", derive(scale::Encode, scale::Decode, scale_info::TypeInfo))] + #[cfg_attr(feature = "scale", derive(codec::Encode, codec::Decode, scale_info::TypeInfo))] pub struct ReturnFlags: u32 { /// If this bit is set all changes made by the contract execution are rolled back. const REVERT = 0x0000_0001; diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index 3f8893dad6f2..0b631bd7bb03 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -24,7 +24,7 @@ proc-macro2 = "1.0.56" proc-macro-crate = "3.0.0" [dev-dependencies] -parity-scale-codec = "3.6.12" +codec = { package = "parity-scale-codec", version = "3.6.12" } scale-info = "2.11.1" sp-arithmetic = { path = "../../../primitives/arithmetic" } # used by generate_solution_type: diff --git a/substrate/frame/grandpa/Cargo.toml b/substrate/frame/grandpa/Cargo.toml index 302ce327aed4..37048b06608f 100644 --- a/substrate/frame/grandpa/Cargo.toml +++ b/substrate/frame/grandpa/Cargo.toml @@ -34,7 +34,7 @@ sp-staking = { path = "../../primitives/staking", default-features = false, feat sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] -grandpa = { package = "finality-grandpa", version = "0.16.2", features = ["derive-codec"] } +finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } frame-benchmarking = { path = "../benchmarking" } frame-election-provider-support = { path = "../election-provider-support" } pallet-balances = { path = "../balances" } diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 2d54f525b1f0..38b5536bc598 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -20,8 +20,8 @@ #![cfg(test)] use crate::{self as pallet_grandpa, AuthorityId, AuthorityList, Config, ConsensusLog}; -use ::grandpa as finality_grandpa; use codec::Encode; +use finality_grandpa; use frame_election_provider_support::{ bounds::{ElectionBounds, ElectionBoundsBuilder}, onchain, SequentialPhragmen, diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml index 82fb9a1d8c5f..2105ba133147 100644 --- a/substrate/frame/sassafras/Cargo.toml +++ b/substrate/frame/sassafras/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -scale-codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } @@ -36,11 +36,11 @@ sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } [features] default = ["std"] std = [ + "codec/std", "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "log/std", - "scale-codec/std", "scale-info/std", "sp-consensus-sassafras/std", "sp-io/std", diff --git a/substrate/frame/sassafras/src/lib.rs b/substrate/frame/sassafras/src/lib.rs index 8cbf1e47e320..d521ed9dd91b 100644 --- a/substrate/frame/sassafras/src/lib.rs +++ b/substrate/frame/sassafras/src/lib.rs @@ -47,8 +47,8 @@ #![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] +use codec::{Decode, Encode, MaxEncodedLen}; use log::{debug, error, trace, warn}; -use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use frame_support::{ diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index f6507cd02c71..e41f7f1c0ef3 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -316,11 +316,11 @@ pub mod primitives { /// /// This is already part of the [`prelude`]. pub mod derive { + pub use codec::{Decode, Encode}; pub use frame_support::{ CloneNoBound, DebugNoBound, DefaultNoBound, EqNoBound, OrdNoBound, PartialEqNoBound, PartialOrdNoBound, RuntimeDebugNoBound, }; - pub use parity_scale_codec::{Decode, Encode}; pub use scale_info::TypeInfo; pub use sp_runtime::RuntimeDebug; pub use sp_std::fmt::Debug; @@ -345,7 +345,7 @@ pub mod deps { pub use sp_runtime; pub use sp_std; - pub use parity_scale_codec as codec; + pub use codec; pub use scale_info; #[cfg(feature = "runtime")] diff --git a/substrate/frame/support/test/compile_pass/Cargo.toml b/substrate/frame/support/test/compile_pass/Cargo.toml index 37c069247e18..d6e0c66261a9 100644 --- a/substrate/frame/support/test/compile_pass/Cargo.toml +++ b/substrate/frame/support/test/compile_pass/Cargo.toml @@ -17,8 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -renamed-frame-support = { package = "frame-support", path = "../..", default-features = false } -renamed-frame-system = { package = "frame-system", path = "../../../system", default-features = false } +frame-support = { path = "../..", default-features = false } +frame-system = { path = "../../../system", default-features = false } sp-core = { path = "../../../../primitives/core", default-features = false } sp-runtime = { path = "../../../../primitives/runtime", default-features = false } sp-version = { path = "../../../../primitives/version", default-features = false } @@ -27,8 +27,8 @@ sp-version = { path = "../../../../primitives/version", default-features = false default = ["std"] std = [ "codec/std", - "renamed-frame-support/std", - "renamed-frame-system/std", + "frame-support/std", + "frame-system/std", "scale-info/std", "sp-core/std", "sp-runtime/std", diff --git a/substrate/frame/support/test/compile_pass/src/lib.rs b/substrate/frame/support/test/compile_pass/src/lib.rs index 07d2f7d9ecdb..37af683fbc7f 100644 --- a/substrate/frame/support/test/compile_pass/src/lib.rs +++ b/substrate/frame/support/test/compile_pass/src/lib.rs @@ -21,7 +21,7 @@ #![cfg_attr(not(feature = "std"), no_std)] -use renamed_frame_support::{ +use frame_support::{ construct_runtime, derive_impl, parameter_types, traits::{ConstU16, ConstU32, ConstU64, Everything}, }; @@ -51,8 +51,8 @@ parameter_types! { pub const Version: RuntimeVersion = VERSION; } -#[derive_impl(renamed_frame_system::config_preludes::TestDefaultConfig)] -impl renamed_frame_system::Config for Runtime { +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Runtime { type BaseCallFilter = Everything; type BlockWeights = (); type BlockLength = (); @@ -84,6 +84,6 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// A GRANDPA message for a substrate chain. -pub type Message
= grandpa::Message<
::Hash,
::Number>; +pub type Message
= + finality_grandpa::Message<
::Hash,
::Number>; /// A signed message. -pub type SignedMessage
= grandpa::SignedMessage< +pub type SignedMessage
= finality_grandpa::SignedMessage<
::Hash,
::Number, AuthoritySignature, @@ -89,21 +90,22 @@ pub type SignedMessage
= grandpa::SignedMessage< /// A primary propose message for this chain's block type. pub type PrimaryPropose
= - grandpa::PrimaryPropose<
::Hash,
::Number>; + finality_grandpa::PrimaryPropose<
::Hash,
::Number>; /// A prevote message for this chain's block type. -pub type Prevote
= grandpa::Prevote<
::Hash,
::Number>; +pub type Prevote
= + finality_grandpa::Prevote<
::Hash,
::Number>; /// A precommit message for this chain's block type. pub type Precommit
= - grandpa::Precommit<
::Hash,
::Number>; + finality_grandpa::Precommit<
::Hash,
::Number>; /// A catch up message for this chain's block type. -pub type CatchUp
= grandpa::CatchUp< +pub type CatchUp
= finality_grandpa::CatchUp<
::Hash,
::Number, AuthoritySignature, AuthorityId, >; /// A commit message for this chain's block type. -pub type Commit
= grandpa::Commit< +pub type Commit
= finality_grandpa::Commit<
::Hash,
::Number, AuthoritySignature, @@ -111,7 +113,7 @@ pub type Commit
= grandpa::Commit< >; /// A compact commit message for this chain's block type. -pub type CompactCommit
= grandpa::CompactCommit< +pub type CompactCommit
= finality_grandpa::CompactCommit<
::Hash,
::Number, AuthoritySignature, @@ -266,18 +268,36 @@ impl EquivocationProof { #[derive(Clone, Debug, Decode, Encode, PartialEq, Eq, TypeInfo)] pub enum Equivocation { /// Proof of equivocation at prevote stage. - Prevote(grandpa::Equivocation, AuthoritySignature>), + Prevote( + finality_grandpa::Equivocation< + AuthorityId, + finality_grandpa::Prevote, + AuthoritySignature, + >, + ), /// Proof of equivocation at precommit stage. - Precommit(grandpa::Equivocation, AuthoritySignature>), + Precommit( + finality_grandpa::Equivocation< + AuthorityId, + finality_grandpa::Precommit, + AuthoritySignature, + >, + ), } -impl From, AuthoritySignature>> - for Equivocation +impl + From< + finality_grandpa::Equivocation< + AuthorityId, + finality_grandpa::Prevote, + AuthoritySignature, + >, + > for Equivocation { fn from( - equivocation: grandpa::Equivocation< + equivocation: finality_grandpa::Equivocation< AuthorityId, - grandpa::Prevote, + finality_grandpa::Prevote, AuthoritySignature, >, ) -> Self { @@ -285,13 +305,19 @@ impl From, Autho } } -impl From, AuthoritySignature>> - for Equivocation +impl + From< + finality_grandpa::Equivocation< + AuthorityId, + finality_grandpa::Precommit, + AuthoritySignature, + >, + > for Equivocation { fn from( - equivocation: grandpa::Equivocation< + equivocation: finality_grandpa::Equivocation< AuthorityId, - grandpa::Precommit, + finality_grandpa::Precommit, AuthoritySignature, >, ) -> Self { @@ -358,10 +384,10 @@ where match report.equivocation { Equivocation::Prevote(equivocation) => { - check!(equivocation, grandpa::Message::Prevote); + check!(equivocation, finality_grandpa::Message::Prevote); }, Equivocation::Precommit(equivocation) => { - check!(equivocation, grandpa::Message::Precommit); + check!(equivocation, finality_grandpa::Message::Precommit); }, } } @@ -389,7 +415,7 @@ pub fn localized_payload_with_buffer( /// Check a message signature by encoding the message as a localized payload and /// verifying the provided signature using the expected authority id. pub fn check_message_signature( - message: &grandpa::Message, + message: &finality_grandpa::Message, id: &AuthorityId, signature: &AuthoritySignature, round: RoundNumber, @@ -407,7 +433,7 @@ where /// The encoding necessary to verify the signature will be done using the given /// buffer, the original content of the buffer will be cleared. pub fn check_message_signature_with_buffer( - message: &grandpa::Message, + message: &finality_grandpa::Message, id: &AuthorityId, signature: &AuthoritySignature, round: RoundNumber, @@ -437,11 +463,11 @@ where #[cfg(feature = "std")] pub fn sign_message( keystore: KeystorePtr, - message: grandpa::Message, + message: finality_grandpa::Message, public: AuthorityId, round: RoundNumber, set_id: SetId, -) -> Option> +) -> Option> where H: Encode, N: Encode, @@ -456,7 +482,7 @@ where .try_into() .ok()?; - Some(grandpa::SignedMessage { message, signature, id: public }) + Some(finality_grandpa::SignedMessage { message, signature, id: public }) } /// An opaque type used to represent the key ownership proof at the runtime API diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index c8eb9b76b93b..792755730839 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -18,7 +18,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -scale-codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } @@ -30,7 +30,7 @@ sp-runtime = { path = "../../runtime", default-features = false } [features] default = ["std"] std = [ - "scale-codec/std", + "codec/std", "scale-info/std", "serde/std", "sp-api/std", diff --git a/substrate/primitives/consensus/sassafras/src/digests.rs b/substrate/primitives/consensus/sassafras/src/digests.rs index 64190a41ce1c..bac31f57f2da 100644 --- a/substrate/primitives/consensus/sassafras/src/digests.rs +++ b/substrate/primitives/consensus/sassafras/src/digests.rs @@ -22,7 +22,7 @@ use crate::{ EpochConfiguration, Randomness, Slot, SASSAFRAS_ENGINE_ID, }; -use scale_codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(not(feature = "std"))] diff --git a/substrate/primitives/consensus/sassafras/src/lib.rs b/substrate/primitives/consensus/sassafras/src/lib.rs index c1fea74d0452..d7880c4de9e8 100644 --- a/substrate/primitives/consensus/sassafras/src/lib.rs +++ b/substrate/primitives/consensus/sassafras/src/lib.rs @@ -24,7 +24,7 @@ extern crate alloc; use alloc::vec::Vec; -use scale_codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_core::crypto::KeyTypeId; use sp_runtime::{ConsensusEngineId, RuntimeDebug}; diff --git a/substrate/primitives/consensus/sassafras/src/ticket.rs b/substrate/primitives/consensus/sassafras/src/ticket.rs index 345de99be28d..fd025f1d53ea 100644 --- a/substrate/primitives/consensus/sassafras/src/ticket.rs +++ b/substrate/primitives/consensus/sassafras/src/ticket.rs @@ -18,7 +18,7 @@ //! Primitives related to tickets. use crate::vrf::RingVrfSignature; -use scale_codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; pub use sp_core::ed25519::{Public as EphemeralPublic, Signature as EphemeralSignature}; diff --git a/substrate/primitives/consensus/sassafras/src/vrf.rs b/substrate/primitives/consensus/sassafras/src/vrf.rs index 537cff52ab6f..f8def1b5f189 100644 --- a/substrate/primitives/consensus/sassafras/src/vrf.rs +++ b/substrate/primitives/consensus/sassafras/src/vrf.rs @@ -20,7 +20,7 @@ use crate::{Randomness, TicketBody, TicketId}; #[cfg(not(feature = "std"))] use alloc::vec::Vec; -use scale_codec::Encode; +use codec::Encode; use sp_consensus_slots::Slot; pub use sp_core::bandersnatch::{ diff --git a/templates/minimal/runtime/Cargo.toml b/templates/minimal/runtime/Cargo.toml index ab6a48b73f3c..42ea49ff4046 100644 --- a/templates/minimal/runtime/Cargo.toml +++ b/templates/minimal/runtime/Cargo.toml @@ -10,7 +10,7 @@ edition.workspace = true publish = false [dependencies] -parity-scale-codec = { version = "3.6.12", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.6.0", default-features = false } # this is a frame-based runtime, thus importing `frame` with runtime feature enabled. @@ -39,7 +39,7 @@ substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optio [features] default = ["std"] std = [ - "parity-scale-codec/std", + "codec/std", "scale-info/std", "frame/std", diff --git a/templates/parachain/node/Cargo.toml b/templates/parachain/node/Cargo.toml index 94873cf1faea..1737c6a9df75 100644 --- a/templates/parachain/node/Cargo.toml +++ b/templates/parachain/node/Cargo.toml @@ -56,7 +56,7 @@ sp-io = { path = "../../../substrate/primitives/io" } sp-runtime = { path = "../../../substrate/primitives/runtime" } sp-timestamp = { path = "../../../substrate/primitives/timestamp" } substrate-frame-rpc-system = { path = "../../../substrate/utils/frame/rpc/system" } -substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } # Polkadot polkadot-cli = { path = "../../../polkadot/cli", features = ["rococo-native"] } diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index ce6308915871..587dd19faf3e 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -28,6 +28,7 @@ use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; // Substrate Imports use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; +use prometheus_endpoint::Registry; use sc_client_api::Backend; use sc_consensus::ImportQueue; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; @@ -37,7 +38,6 @@ use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, Ta use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_keystore::KeystorePtr; -use substrate_prometheus_endpoint::Registry; #[docify::export(wasm_executor)] type ParachainExecutor = WasmExecutor; From 2460cddf57660a88844d201f769eb17a7accce5a Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Wed, 5 Jun 2024 21:14:16 +0300 Subject: [PATCH 04/52] fix build on MacOS: bump secp256k1 and secp256k1-sys to patched versions (#4709) `secp256k1 v0.28.0` and `secp256k1-sys v0.9.0` were yanked because building them fails for `aarch64-apple-darwin` targets. Use the `secp256k1 v0.28.2` and `secp256k1-sys v0.9.2` patched versions that build fine on ARM chipset MacOS. --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 82aac5fb2a8d..9ef971b4be93 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18352,18 +18352,18 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.28.0" +version = "0.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2acea373acb8c21ecb5a23741452acd2593ed44ee3d343e72baaa143bc89d0d5" +checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" dependencies = [ "secp256k1-sys", ] [[package]] name = "secp256k1-sys" -version = "0.9.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09e67c467c38fd24bd5499dc9a18183b31575c12ee549197e3e20d57aa4fe3b7" +checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb" dependencies = [ "cc", ] From 5fb4c40a3ea24ae3ab2bdfefb3f3a40badc2a583 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Thu, 6 Jun 2024 16:48:23 +0200 Subject: [PATCH 05/52] [CI] Delete cargo-deny config (#4677) Nobody seems to maintain this and the job is disabled since months. I think unless the Security team wants to pick this up we delete it for now. Signed-off-by: Oliver Tale-Yazdi --- .gitlab/pipeline/check.yml | 24 ------- substrate/scripts/ci/deny.toml | 118 --------------------------------- 2 files changed, 142 deletions(-) delete mode 100644 substrate/scripts/ci/deny.toml diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 5c1a667a313c..2b8b90ef19a4 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -24,30 +24,6 @@ check-try-runtime: # experimental code may rely on try-runtime and vice-versa - time cargo check --locked --all --features try-runtime,experimental -# FIXME -.cargo-deny-licenses: - stage: check - extends: - - .docker-env - - .test-pr-refs - variables: - CARGO_DENY_CMD: "cargo deny --all-features check licenses -c ./substrate/scripts/ci/deny.toml" - script: - - $CARGO_DENY_CMD --hide-inclusion-graph - after_script: - - echo "___The complete log is in the artifacts___" - - $CARGO_DENY_CMD 2> deny.log - - if [ $CI_JOB_STATUS != 'success' ]; then - echo 'Please check license of your crate or add an exception to scripts/ci/deny.toml'; - fi - allow_failure: true - artifacts: - name: $CI_COMMIT_SHORT_SHA - expire_in: 3 days - when: always - paths: - - deny.log - # from substrate # not sure if it's needed in monorepo check-dependency-rules: diff --git a/substrate/scripts/ci/deny.toml b/substrate/scripts/ci/deny.toml deleted file mode 100644 index 2e1701f3c60d..000000000000 --- a/substrate/scripts/ci/deny.toml +++ /dev/null @@ -1,118 +0,0 @@ -[licenses] -# The lint level for crates which do not have a detectable license -unlicensed = "deny" -# List of explicitly allowed licenses -# See https://spdx.org/licenses/ for list of possible licenses -# [possible values: any SPDX 3.11 short identifier (+ optional exception)]. -allow = [ - "MPL-2.0", -] -# List of explicitly disallowed licenses -# See https://spdx.org/licenses/ for list of possible licenses -# [possible values: any SPDX 3.11 short identifier (+ optional exception)]. -deny = [ -] -# Lint level for licenses considered copyleft -copyleft = "deny" -# Blanket approval or denial for OSI-approved or FSF Free/Libre licenses -# * both - The license will be approved if it is both OSI-approved *AND* FSF -# * either - The license will be approved if it is either OSI-approved *OR* FSF -# * osi-only - The license will be approved if is OSI-approved *AND NOT* FSF -# * fsf-only - The license will be approved if is FSF *AND NOT* OSI-approved -# * neither - This predicate is ignored and the default lint level is used -allow-osi-fsf-free = "either" -# Lint level used when no other predicates are matched -# 1. License isn't in the allow or deny lists -# 2. License isn't copyleft -# 3. License isn't OSI/FSF, or allow-osi-fsf-free = "neither" -default = "deny" -# The confidence threshold for detecting a license from license text. -# The higher the value, the more closely the license text must be to the -# canonical license text of a valid SPDX license file. -# [possible values: any between 0.0 and 1.0]. -confidence-threshold = 0.8 -# Allow 1 or more licenses on a per-crate basis, so that particular licenses -# aren't accepted for every possible crate as with the normal allow list -exceptions = [ - # Each entry is the crate and version constraint, and its specific allow list - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "chain-spec-builder" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "mmr-gadget" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-bench" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-inspect" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-template-release" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-testing" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-authority-discovery" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-basic-authorship" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-block-builder" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-chain-spec" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-chain-spec-derive" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-cli" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-client-api" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-client-db" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-aura" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-babe" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-babe-rpc" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-beefy" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-beefy-rpc" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-epochs" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-grandpa" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-grandpa-rpc" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-manual-seal" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-pow" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-slots" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-common" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-wasmi" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-wasmtime" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-informant" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-keystore" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-mixnet" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-common" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-gossip" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-light" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-statement" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-sync" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-test" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-transactions" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-offchain" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-peerset" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-proposer-metrics" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc-api" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc-server" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc-spec-v2" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-runtime-test" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-service" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-service-test" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-state-db" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-statement-store" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-storage-monitor" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-sysinfo" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-telemetry" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-tracing" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-transaction-pool" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-transaction-pool-api" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "staging-node-cli" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "subkey" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "substrate" }, -] - -# Some crates don't have (easily) machine readable licensing information, -# adding a clarification entry for it allows you to manually specify the -# licensing information -[[licenses.clarify]] -# The name of the crate the clarification applies to -name = "ring" -# The SPDX expression for the license requirements of the crate -expression = "MIT AND ISC AND OpenSSL" -# One or more files in the crate's source used as the "source of truth" for -# the license expression. If the contents match, the clarification will be used -# when running the license check, otherwise the clarification will be ignored -# and the crate will be checked normally, which may produce warnings or errors -# depending on the rest of your configuration -license-files = [ - # Each entry is a crate relative path, and the (opaque) hash of its contents - { path = "LICENSE", hash = 0xbd0eed23 }, -] From dd4e6fd0968b2ccc9de8c5290d1c580b23491db9 Mon Sep 17 00:00:00 2001 From: Przemek Rzad Date: Thu, 6 Jun 2024 20:44:15 +0200 Subject: [PATCH 06/52] Update link to a latest polkadot release (#4711) The link to [releases](https://github.com/paritytech/polkadot-sdk/releases) usually takes you to a list with a bunch of drafts at the top so you have to scroll. [This link](https://github.com/paritytech/polkadot-sdk/releases/latest) takes you directly to the latest release. --- polkadot/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/README.md b/polkadot/README.md index d7435f27b946..47af79a3aa92 100644 --- a/polkadot/README.md +++ b/polkadot/README.md @@ -11,7 +11,7 @@ guides, like how to run a validator node, see the [Polkadot Wiki](https://wiki.p If you just wish to run a Polkadot node without compiling it yourself, you may either: -- run the latest binary from our [releases](https://github.com/paritytech/polkadot-sdk/releases) page (make sure to also +- run the [latest released binary](https://github.com/paritytech/polkadot-sdk/releases/latest) (make sure to also download all the `worker` binaries and put them in the same directory as `polkadot`), or - install Polkadot from one of our package repositories. From 494448b7fed02e098fbf38bad517d9245b056d1d Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Thu, 6 Jun 2024 21:22:22 +0200 Subject: [PATCH 07/52] Cleanup PVF artifact by cache limit and stale time (#4662) Part of https://github.com/paritytech/polkadot-sdk/issues/4324 We don't change but extend the existing cleanup strategy. - We still don't touch artifacts being stale less than 24h - First time we attempt pruning only when we hit cache limit (10 GB) - If somehow happened that after we hit 10 GB and least used artifact is stale less than 24h we don't remove it. --------- Co-authored-by: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> --- polkadot/node/core/pvf/common/src/prepare.rs | 2 + polkadot/node/core/pvf/src/artifacts.rs | 179 ++++++++++++++++-- polkadot/node/core/pvf/src/host.rs | 42 ++-- .../core/pvf/src/prepare/worker_interface.rs | 14 ++ polkadot/node/core/pvf/src/testing.rs | 8 +- prdoc/pr_4662.prdoc | 17 ++ 6 files changed, 226 insertions(+), 36 deletions(-) create mode 100644 prdoc/pr_4662.prdoc diff --git a/polkadot/node/core/pvf/common/src/prepare.rs b/polkadot/node/core/pvf/common/src/prepare.rs index 64e7f3d6bcf4..81e165a7b8a4 100644 --- a/polkadot/node/core/pvf/common/src/prepare.rs +++ b/polkadot/node/core/pvf/common/src/prepare.rs @@ -31,6 +31,8 @@ pub struct PrepareWorkerSuccess { pub struct PrepareSuccess { /// Canonical path to the compiled artifact. pub path: PathBuf, + /// Size in bytes + pub size: u64, /// Stats of the current preparation run. pub stats: PrepareStats, } diff --git a/polkadot/node/core/pvf/src/artifacts.rs b/polkadot/node/core/pvf/src/artifacts.rs index a3a48b61acb1..119af34082a9 100644 --- a/polkadot/node/core/pvf/src/artifacts.rs +++ b/polkadot/node/core/pvf/src/artifacts.rs @@ -142,6 +142,8 @@ pub enum ArtifactState { /// This is updated when we get the heads up for this artifact or when we just discover /// this file. last_time_needed: SystemTime, + /// Size in bytes + size: u64, /// Stats produced by successful preparation. prepare_stats: PrepareStats, }, @@ -169,6 +171,33 @@ pub struct Artifacts { inner: HashMap, } +/// Parameters we use to cleanup artifacts +/// After we hit the cache limit we remove the least used artifacts +/// but only if they are stale more than minimum stale time +#[derive(Debug)] +pub struct ArtifactsCleanupConfig { + // Max size in bytes. Reaching it the least used artefacts are deleted + cache_limit: u64, + // Inactive time after which artefact is allowed to be deleted + min_stale_time: Duration, +} + +impl Default for ArtifactsCleanupConfig { + fn default() -> Self { + Self { + cache_limit: 10 * 1024 * 1024 * 1024, // 10 GiB + min_stale_time: Duration::from_secs(24 * 60 * 60), // 24 hours + } + } +} + +#[cfg(test)] +impl ArtifactsCleanupConfig { + pub fn new(cache_limit: u64, min_stale_time: Duration) -> Self { + Self { cache_limit, min_stale_time } + } +} + impl Artifacts { #[cfg(test)] pub(crate) fn empty() -> Self { @@ -180,6 +209,11 @@ impl Artifacts { self.inner.len() } + #[cfg(test)] + fn artifact_ids(&self) -> Vec { + self.inner.keys().cloned().collect() + } + /// Create an empty table and the cache directory on-disk if it doesn't exist. pub async fn new(cache_path: &Path) -> Self { // Make sure that the cache path directory and all its parents are created. @@ -234,12 +268,16 @@ impl Artifacts { artifact_id: ArtifactId, path: PathBuf, last_time_needed: SystemTime, + size: u64, prepare_stats: PrepareStats, ) { // See the precondition. always!(self .inner - .insert(artifact_id, ArtifactState::Prepared { path, last_time_needed, prepare_stats }) + .insert( + artifact_id, + ArtifactState::Prepared { path, last_time_needed, size, prepare_stats } + ) .is_none()); } @@ -251,25 +289,40 @@ impl Artifacts { }) } - /// Remove artifacts older than the given TTL and return id and path of the removed ones. - pub fn prune(&mut self, artifact_ttl: Duration) -> Vec<(ArtifactId, PathBuf)> { + /// Remove artifacts older than the given TTL when the total artifact size reaches the limit + /// and return id and path of the removed ones + pub fn prune(&mut self, cleanup_config: &ArtifactsCleanupConfig) -> Vec<(ArtifactId, PathBuf)> { + let mut to_remove = vec![]; let now = SystemTime::now(); - let mut to_remove = vec![]; + let mut total_size = 0; + let mut artifact_sizes = vec![]; + for (k, v) in self.inner.iter() { - if let ArtifactState::Prepared { last_time_needed, ref path, .. } = *v { - if now - .duration_since(last_time_needed) - .map(|age| age > artifact_ttl) - .unwrap_or(false) - { - to_remove.push((k.clone(), path.clone())); - } + if let ArtifactState::Prepared { ref path, last_time_needed, size, .. } = *v { + total_size += size; + artifact_sizes.push((k.clone(), path.clone(), size, last_time_needed)); } } + artifact_sizes + .sort_by_key(|&(_, _, _, last_time_needed)| std::cmp::Reverse(last_time_needed)); + + while total_size > cleanup_config.cache_limit { + let Some((artifact_id, path, size, last_time_needed)) = artifact_sizes.pop() else { + break + }; + + let used_recently = now + .duration_since(last_time_needed) + .map(|stale_time| stale_time < cleanup_config.min_stale_time) + .unwrap_or(true); + if used_recently { + break; + } - for artifact in &to_remove { - self.inner.remove(&artifact.0); + self.inner.remove(&artifact_id); + to_remove.push((artifact_id, path)); + total_size -= size; } to_remove @@ -278,6 +331,8 @@ impl Artifacts { #[cfg(test)] mod tests { + use crate::testing::artifact_id; + use super::*; #[tokio::test] @@ -307,4 +362,100 @@ mod tests { assert!(entries.contains(&String::from("worker-prepare-test"))); assert_eq!(artifacts.len(), 0); } + + #[tokio::test] + async fn test_pruned_by_cache_size() { + let mock_now = SystemTime::now(); + let tempdir = tempfile::tempdir().unwrap(); + let cache_path = tempdir.path(); + + let path1 = generate_artifact_path(cache_path); + let path2 = generate_artifact_path(cache_path); + let path3 = generate_artifact_path(cache_path); + let artifact_id1 = artifact_id(1); + let artifact_id2 = artifact_id(2); + let artifact_id3 = artifact_id(3); + + let mut artifacts = Artifacts::new(cache_path).await; + let cleanup_config = ArtifactsCleanupConfig::new(1500, Duration::from_secs(0)); + + artifacts.insert_prepared( + artifact_id1.clone(), + path1.clone(), + mock_now - Duration::from_secs(5), + 1024, + PrepareStats::default(), + ); + artifacts.insert_prepared( + artifact_id2.clone(), + path2.clone(), + mock_now - Duration::from_secs(10), + 1024, + PrepareStats::default(), + ); + artifacts.insert_prepared( + artifact_id3.clone(), + path3.clone(), + mock_now - Duration::from_secs(15), + 1024, + PrepareStats::default(), + ); + + let pruned = artifacts.prune(&cleanup_config); + + assert!(artifacts.artifact_ids().contains(&artifact_id1)); + assert!(!pruned.contains(&(artifact_id1, path1))); + assert!(!artifacts.artifact_ids().contains(&artifact_id2)); + assert!(pruned.contains(&(artifact_id2, path2))); + assert!(!artifacts.artifact_ids().contains(&artifact_id3)); + assert!(pruned.contains(&(artifact_id3, path3))); + } + + #[tokio::test] + async fn test_did_not_prune_by_cache_size_because_of_stale_time() { + let mock_now = SystemTime::now(); + let tempdir = tempfile::tempdir().unwrap(); + let cache_path = tempdir.path(); + + let path1 = generate_artifact_path(cache_path); + let path2 = generate_artifact_path(cache_path); + let path3 = generate_artifact_path(cache_path); + let artifact_id1 = artifact_id(1); + let artifact_id2 = artifact_id(2); + let artifact_id3 = artifact_id(3); + + let mut artifacts = Artifacts::new(cache_path).await; + let cleanup_config = ArtifactsCleanupConfig::new(1500, Duration::from_secs(12)); + + artifacts.insert_prepared( + artifact_id1.clone(), + path1.clone(), + mock_now - Duration::from_secs(5), + 1024, + PrepareStats::default(), + ); + artifacts.insert_prepared( + artifact_id2.clone(), + path2.clone(), + mock_now - Duration::from_secs(10), + 1024, + PrepareStats::default(), + ); + artifacts.insert_prepared( + artifact_id3.clone(), + path3.clone(), + mock_now - Duration::from_secs(15), + 1024, + PrepareStats::default(), + ); + + let pruned = artifacts.prune(&cleanup_config); + + assert!(artifacts.artifact_ids().contains(&artifact_id1)); + assert!(!pruned.contains(&(artifact_id1, path1))); + assert!(artifacts.artifact_ids().contains(&artifact_id2)); + assert!(!pruned.contains(&(artifact_id2, path2))); + assert!(!artifacts.artifact_ids().contains(&artifact_id3)); + assert!(pruned.contains(&(artifact_id3, path3))); + } } diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 4065598a3ac4..462631d33b52 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -21,7 +21,7 @@ //! [`ValidationHost`], that allows communication with that event-loop. use crate::{ - artifacts::{ArtifactId, ArtifactPathId, ArtifactState, Artifacts}, + artifacts::{ArtifactId, ArtifactPathId, ArtifactState, Artifacts, ArtifactsCleanupConfig}, execute::{self, PendingExecutionRequest}, metrics::Metrics, prepare, Priority, SecurityStatus, ValidationError, LOG_TARGET, @@ -293,7 +293,7 @@ pub async fn start( let run_host = async move { run(Inner { cleanup_pulse_interval: Duration::from_secs(3600), - artifact_ttl: Duration::from_secs(3600 * 24), + cleanup_config: ArtifactsCleanupConfig::default(), artifacts, to_host_rx, to_prepare_queue_tx, @@ -337,7 +337,7 @@ impl AwaitingPrepare { struct Inner { cleanup_pulse_interval: Duration, - artifact_ttl: Duration, + cleanup_config: ArtifactsCleanupConfig, artifacts: Artifacts, to_host_rx: mpsc::Receiver, @@ -359,7 +359,7 @@ struct Fatal; async fn run( Inner { cleanup_pulse_interval, - artifact_ttl, + cleanup_config, mut artifacts, to_host_rx, from_prepare_queue_rx, @@ -415,7 +415,7 @@ async fn run( break_if_fatal!(handle_cleanup_pulse( &mut to_sweeper_tx, &mut artifacts, - artifact_ttl, + &cleanup_config, ).await); }, to_host = to_host_rx.next() => { @@ -803,8 +803,12 @@ async fn handle_prepare_done( } *state = match result { - Ok(PrepareSuccess { path, stats: prepare_stats }) => - ArtifactState::Prepared { path, last_time_needed: SystemTime::now(), prepare_stats }, + Ok(PrepareSuccess { path, stats: prepare_stats, size }) => ArtifactState::Prepared { + path, + last_time_needed: SystemTime::now(), + size, + prepare_stats, + }, Err(error) => { let last_time_failed = SystemTime::now(); let num_failures = *num_failures + 1; @@ -859,9 +863,9 @@ async fn enqueue_prepare_for_execute( async fn handle_cleanup_pulse( sweeper_tx: &mut mpsc::Sender, artifacts: &mut Artifacts, - artifact_ttl: Duration, + cleanup_config: &ArtifactsCleanupConfig, ) -> Result<(), Fatal> { - let to_remove = artifacts.prune(artifact_ttl); + let to_remove = artifacts.prune(cleanup_config); gum::debug!( target: LOG_TARGET, "PVF pruning: {} artifacts reached their end of life", @@ -959,7 +963,7 @@ fn pulse_every(interval: std::time::Duration) -> impl futures::Stream #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::{artifacts::generate_artifact_path, PossiblyInvalidError}; + use crate::{artifacts::generate_artifact_path, testing::artifact_id, PossiblyInvalidError}; use assert_matches::assert_matches; use futures::future::BoxFuture; use polkadot_node_core_pvf_common::prepare::PrepareStats; @@ -981,14 +985,9 @@ pub(crate) mod tests { } } - /// Creates a new PVF which artifact id can be uniquely identified by the given number. - fn artifact_id(discriminator: u32) -> ArtifactId { - ArtifactId::from_pvf_prep_data(&PvfPrepData::from_discriminator(discriminator)) - } - struct Builder { cleanup_pulse_interval: Duration, - artifact_ttl: Duration, + cleanup_config: ArtifactsCleanupConfig, artifacts: Artifacts, } @@ -997,8 +996,7 @@ pub(crate) mod tests { Self { // these are selected high to not interfere in tests in which pruning is irrelevant. cleanup_pulse_interval: Duration::from_secs(3600), - artifact_ttl: Duration::from_secs(3600), - + cleanup_config: ArtifactsCleanupConfig::default(), artifacts: Artifacts::empty(), } } @@ -1022,7 +1020,7 @@ pub(crate) mod tests { } impl Test { - fn new(Builder { cleanup_pulse_interval, artifact_ttl, artifacts }: Builder) -> Self { + fn new(Builder { cleanup_pulse_interval, artifacts, cleanup_config }: Builder) -> Self { let (to_host_tx, to_host_rx) = mpsc::channel(10); let (to_prepare_queue_tx, to_prepare_queue_rx) = mpsc::channel(10); let (from_prepare_queue_tx, from_prepare_queue_rx) = mpsc::unbounded(); @@ -1032,7 +1030,7 @@ pub(crate) mod tests { let run = run(Inner { cleanup_pulse_interval, - artifact_ttl, + cleanup_config, artifacts, to_host_rx, to_prepare_queue_tx, @@ -1183,19 +1181,21 @@ pub(crate) mod tests { let mut builder = Builder::default(); builder.cleanup_pulse_interval = Duration::from_millis(100); - builder.artifact_ttl = Duration::from_millis(500); + builder.cleanup_config = ArtifactsCleanupConfig::new(1024, Duration::from_secs(0)); let path1 = generate_artifact_path(cache_path); let path2 = generate_artifact_path(cache_path); builder.artifacts.insert_prepared( artifact_id(1), path1.clone(), mock_now, + 1024, PrepareStats::default(), ); builder.artifacts.insert_prepared( artifact_id(2), path2.clone(), mock_now, + 1024, PrepareStats::default(), ); let mut test = builder.build(); diff --git a/polkadot/node/core/pvf/src/prepare/worker_interface.rs b/polkadot/node/core/pvf/src/prepare/worker_interface.rs index 5c4245d76315..22ee93319d84 100644 --- a/polkadot/node/core/pvf/src/prepare/worker_interface.rs +++ b/polkadot/node/core/pvf/src/prepare/worker_interface.rs @@ -234,6 +234,19 @@ async fn handle_response( return Outcome::TimedOut } + let size = match tokio::fs::metadata(cache_path).await { + Ok(metadata) => metadata.len(), + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?cache_path, + "failed to read size of the artifact: {}", + err, + ); + return Outcome::IoErr(err.to_string()) + }, + }; + // The file name should uniquely identify the artifact even across restarts. In case the cache // for some reason is not cleared correctly, we cannot // accidentally execute an artifact compiled under a different wasmtime version, host @@ -253,6 +266,7 @@ async fn handle_response( worker, result: Ok(PrepareSuccess { path: artifact_path, + size, stats: PrepareStats { cpu_time_elapsed, memory_stats: memory_stats.clone() }, }), }, diff --git a/polkadot/node/core/pvf/src/testing.rs b/polkadot/node/core/pvf/src/testing.rs index 60b0b4b8d3d0..8c75dafa69c2 100644 --- a/polkadot/node/core/pvf/src/testing.rs +++ b/polkadot/node/core/pvf/src/testing.rs @@ -21,8 +21,9 @@ pub use crate::{ worker_interface::{spawn_with_program_path, SpawnErr}, }; -use crate::get_worker_version; +use crate::{artifacts::ArtifactId, get_worker_version}; use is_executable::IsExecutable; +use polkadot_node_core_pvf_common::pvf::PvfPrepData; use polkadot_node_primitives::NODE_VERSION; use polkadot_primitives::ExecutorParams; use std::{ @@ -126,3 +127,8 @@ pub fn build_workers_and_get_paths() -> (PathBuf, PathBuf) { let guard = mutex.lock().unwrap(); (guard.0.clone(), guard.1.clone()) } + +/// Creates a new PVF which artifact id can be uniquely identified by the given number. +pub fn artifact_id(discriminator: u32) -> ArtifactId { + ArtifactId::from_pvf_prep_data(&PvfPrepData::from_discriminator(discriminator)) +} diff --git a/prdoc/pr_4662.prdoc b/prdoc/pr_4662.prdoc new file mode 100644 index 000000000000..50f8a5bfd011 --- /dev/null +++ b/prdoc/pr_4662.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Cleanup PVF artifact by cache limit and stale time + +doc: + - audience: Node Operator + description: | + Extend the PVF artifacts cleanup strategy. Previously, we pruned artifacts that were stale more than 24 hours. + After this change we attempt pruning artifacts only when they reach the 10 GB cache limit. If the least used + artifact is stale less than 24 hours we don't remove it. + +crates: + - name: polkadot-node-core-pvf-common + bump: patch + - name: polkadot-node-core-pvf + bump: patch From 426956f87cc91f94ce71e2ed74ca34d88766e1d8 Mon Sep 17 00:00:00 2001 From: batman Date: Fri, 7 Jun 2024 04:06:34 +0800 Subject: [PATCH 08/52] Update the README to include a link to the Polkadot SDK Version Manager (#4718) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds a link to the [Polkadot SDK Version Manager](https://github.com/paritytech/psvm) since this tool is not well known, but very useful for developers using the Polkadot SDK. --------- Co-authored-by: Bastian Köcher --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 773481732520..50972da058af 100644 --- a/README.md +++ b/README.md @@ -81,3 +81,7 @@ fellowship, this separation, the RFC process This repository is the amalgamation of 3 separate repositories that used to make up Polkadot SDK, namely Substrate, Polkadot and Cumulus. Read more about the merge and its history [here](https://polkadot-public.notion.site/Polkadot-SDK-FAQ-fbc4cecc2c46443fb37b9eeec2f0d85f). + +## Other useful resources and tooling + +* A simple tool to manage and update the Polkadot SDK dependencies (https://github.com/paritytech/psvm) From 2a89cc27339fe9d40afa5e5e32da1ddc17177917 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Fri, 7 Jun 2024 12:42:45 +0300 Subject: [PATCH 09/52] statement-distribution: Fix false warning (#4727) ... when backing group is of size 1. Signed-off-by: Alexandru Gheorghe --- .../node/network/statement-distribution/src/v2/cluster.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/polkadot/node/network/statement-distribution/src/v2/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/cluster.rs index 87b25c785d83..26d7a68eb2a7 100644 --- a/polkadot/node/network/statement-distribution/src/v2/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/cluster.rs @@ -429,7 +429,9 @@ impl ClusterTracker { pub fn warn_if_too_many_pending_statements(&self, parent_hash: Hash) { if self.pending.iter().filter(|pending| !pending.1.is_empty()).count() >= - self.validators.len() + self.validators.len() && + // No reason to warn if we are the only node in the cluster. + self.validators.len() > 1 { gum::warn!( target: LOG_TARGET, From 9dfe0fee74ce1e4b7f99c1a5122b635aa43a1e5f Mon Sep 17 00:00:00 2001 From: eskimor Date: Fri, 7 Jun 2024 12:50:30 +0200 Subject: [PATCH 10/52] Fix occupied core handling (#4691) Co-authored-by: eskimor Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> --- .../parachains/src/assigner_coretime/tests.rs | 2 +- .../src/assigner_on_demand/tests.rs | 2 +- .../src/assigner_parachains/tests.rs | 2 +- polkadot/runtime/parachains/src/builder.rs | 49 ++++-- .../runtime/parachains/src/configuration.rs | 6 + .../src/paras_inherent/benchmarking.rs | 4 +- .../parachains/src/paras_inherent/mod.rs | 19 ++- .../parachains/src/paras_inherent/tests.rs | 20 +-- .../parachains/src/runtime_api_impl/v10.rs | 4 +- .../src/runtime_api_impl/vstaging.rs | 15 +- polkadot/runtime/parachains/src/scheduler.rs | 161 ++++++++++++------ .../parachains/src/scheduler/migration.rs | 2 +- .../runtime/parachains/src/scheduler/tests.rs | 105 ++++++++---- prdoc/pr_4691.prdoc | 14 ++ 14 files changed, 270 insertions(+), 135 deletions(-) create mode 100644 prdoc/pr_4691.prdoc diff --git a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs index 41cf21e267e4..81a0988ea67c 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs @@ -75,7 +75,7 @@ fn run_to_block( Scheduler::initializer_initialize(b + 1); // In the real runtime this is expected to be called by the `InclusionInherent` pallet. - Scheduler::free_cores_and_fill_claimqueue(BTreeMap::new(), b + 1); + Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1); } } diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs index 8ac6ab77beee..5747413e7147 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs @@ -77,7 +77,7 @@ fn run_to_block( OnDemandAssigner::on_initialize(b + 1); // In the real runtime this is expected to be called by the `InclusionInherent` pallet. - Scheduler::free_cores_and_fill_claimqueue(BTreeMap::new(), b + 1); + Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1); } } diff --git a/polkadot/runtime/parachains/src/assigner_parachains/tests.rs b/polkadot/runtime/parachains/src/assigner_parachains/tests.rs index ebd24e89162a..14cb1a897860 100644 --- a/polkadot/runtime/parachains/src/assigner_parachains/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_parachains/tests.rs @@ -71,7 +71,7 @@ fn run_to_block( Scheduler::initializer_initialize(b + 1); // In the real runtime this is expected to be called by the `InclusionInherent` pallet. - Scheduler::free_cores_and_fill_claimqueue(BTreeMap::new(), b + 1); + Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1); } } diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index 5ed5a2b527c0..c046526ba372 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -92,11 +92,17 @@ pub(crate) struct BenchBuilder { /// will correspond to core index 3. There must be one entry for each core with a dispute /// statement set. dispute_sessions: Vec, + /// Paras here will both be backed in the inherent data and already occupying a core (which is + /// freed via bitfields). + /// /// Map from para id to number of validity votes. Core indices are generated based on /// `elastic_paras` configuration. Each para id in `elastic_paras` gets the /// specified amount of consecutive cores assigned to it. If a para id is not present /// in `elastic_paras` it get assigned to a single core. backed_and_concluding_paras: BTreeMap, + + /// Paras which don't yet occupy a core, but will after the inherent has been processed. + backed_in_inherent_paras: BTreeMap, /// Map from para id (seed) to number of chained candidates. elastic_paras: BTreeMap, /// Make every candidate include a code upgrade by setting this to `Some` where the interior @@ -132,6 +138,7 @@ impl BenchBuilder { dispute_statements: BTreeMap::new(), dispute_sessions: Default::default(), backed_and_concluding_paras: Default::default(), + backed_in_inherent_paras: Default::default(), elastic_paras: Default::default(), code_upgrade: None, fill_claimqueue: true, @@ -167,6 +174,12 @@ impl BenchBuilder { self } + /// Set a map from para id seed to number of validity votes for votes in inherent data. + pub(crate) fn set_backed_in_inherent_paras(mut self, backed: BTreeMap) -> Self { + self.backed_in_inherent_paras = backed; + self + } + /// Set a map from para id seed to number of cores assigned to it. pub(crate) fn set_elastic_paras(mut self, elastic_paras: BTreeMap) -> Self { self.elastic_paras = elastic_paras; @@ -753,8 +766,8 @@ impl BenchBuilder { /// /// Note that this API only allows building scenarios where the `backed_and_concluding_paras` /// are mutually exclusive with the cores for disputes. So - /// `backed_and_concluding_paras.len() + dispute_sessions.len()` must be less than the max - /// number of cores. + /// `backed_and_concluding_paras.len() + dispute_sessions.len() + backed_in_inherent_paras` must + /// be less than the max number of cores. pub(crate) fn build(self) -> Bench { // Make sure relevant storage is cleared. This is just to get the asserts to work when // running tests because it seems the storage is not cleared in between. @@ -771,8 +784,10 @@ impl BenchBuilder { .sum::() .saturating_sub(self.elastic_paras.len() as usize); - let used_cores = - self.dispute_sessions.len() + self.backed_and_concluding_paras.len() + extra_cores; + let used_cores = self.dispute_sessions.len() + + self.backed_and_concluding_paras.len() + + self.backed_in_inherent_paras.len() + + extra_cores; assert!(used_cores <= max_cores); let fill_claimqueue = self.fill_claimqueue; @@ -793,8 +808,12 @@ impl BenchBuilder { &builder.elastic_paras, used_cores, ); + + let mut backed_in_inherent = BTreeMap::new(); + backed_in_inherent.append(&mut builder.backed_and_concluding_paras.clone()); + backed_in_inherent.append(&mut builder.backed_in_inherent_paras.clone()); let backed_candidates = builder.create_backed_candidates( - &builder.backed_and_concluding_paras, + &backed_in_inherent, &builder.elastic_paras, builder.code_upgrade, ); @@ -845,12 +864,16 @@ impl BenchBuilder { scheduler::AvailabilityCores::::set(cores); core_idx = 0u32; + + // We need entries in the claim queue for those: + all_cores.append(&mut builder.backed_in_inherent_paras.clone()); + if fill_claimqueue { let cores = all_cores .keys() .flat_map(|para_id| { (0..elastic_paras.get(¶_id).cloned().unwrap_or(1)) - .filter_map(|_para_local_core_idx| { + .map(|_para_local_core_idx| { let ttl = configuration::ActiveConfig::::get().scheduler_params.ttl; // Load an assignment into provider so that one is present to pop let assignment = @@ -859,17 +882,11 @@ impl BenchBuilder { ParaId::from(*para_id), ); - let entry = ( - CoreIndex(core_idx), - [ParasEntry::new(assignment, now + ttl)].into(), - ); - let res = if builder.unavailable_cores.contains(&core_idx) { - None - } else { - Some(entry) - }; core_idx += 1; - res + ( + CoreIndex(core_idx - 1), + [ParasEntry::new(assignment, now + ttl)].into(), + ) }) .collect::>)>>() }) diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index 10ecaa16a846..bffeab4a0d21 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -335,6 +335,8 @@ pub enum InconsistentError { InconsistentExecutorParams { inner: ExecutorParamError }, /// TTL should be bigger than lookahead LookaheadExceedsTTL, + /// Lookahead is zero, while it must be at least 1 for parachains to work. + LookaheadZero, /// Passed in queue size for on-demand was too large. OnDemandQueueSizeTooLarge, /// Number of delay tranches cannot be 0. @@ -432,6 +434,10 @@ where return Err(LookaheadExceedsTTL) } + if self.scheduler_params.lookahead == 0 { + return Err(LookaheadZero) + } + if self.scheduler_params.on_demand_queue_max_size > ON_DEMAND_MAX_QUEUE_MAX_SIZE { return Err(OnDemandQueueSizeTooLarge) } diff --git a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs index 267a9781a106..4c8b093451ed 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs @@ -110,7 +110,7 @@ benchmarks! { .collect(); let scenario = BenchBuilder::::new() - .set_backed_and_concluding_paras(cores_with_backed.clone()) + .set_backed_in_inherent_paras(cores_with_backed.clone()) .build(); let mut benchmark = scenario.data.clone(); @@ -161,7 +161,7 @@ benchmarks! { .collect(); let scenario = BenchBuilder::::new() - .set_backed_and_concluding_paras(cores_with_backed.clone()) + .set_backed_in_inherent_paras(cores_with_backed.clone()) .set_code_upgrade(v) .build(); diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 386873aad457..8b527c09490d 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -560,7 +560,7 @@ impl Pallet { .chain(freed_disputed.into_iter().map(|core| (core, FreedReason::Concluded))) .chain(freed_timeout.into_iter().map(|c| (c, FreedReason::TimedOut))) .collect::>(); - scheduler::Pallet::::free_cores_and_fill_claimqueue(freed, now); + scheduler::Pallet::::free_cores_and_fill_claim_queue(freed, now); METRICS.on_candidates_processed_total(backed_candidates.len() as u64); @@ -570,12 +570,13 @@ impl Pallet { .map(|b| *b) .unwrap_or(false); - let mut scheduled: BTreeMap> = BTreeMap::new(); - let mut total_scheduled_cores = 0; + let mut eligible: BTreeMap> = BTreeMap::new(); + let mut total_eligible_cores = 0; - for (core_idx, para_id) in scheduler::Pallet::::scheduled_paras() { - total_scheduled_cores += 1; - scheduled.entry(para_id).or_default().insert(core_idx); + for (core_idx, para_id) in scheduler::Pallet::::eligible_paras() { + total_eligible_cores += 1; + log::trace!(target: LOG_TARGET, "Found eligible para {:?} on core {:?}", para_id, core_idx); + eligible.entry(para_id).or_default().insert(core_idx); } let initial_candidate_count = backed_candidates.len(); @@ -583,12 +584,12 @@ impl Pallet { backed_candidates, &allowed_relay_parents, concluded_invalid_hashes, - scheduled, + eligible, core_index_enabled, ); let count = count_backed_candidates(&backed_candidates_with_core); - ensure!(count <= total_scheduled_cores, Error::::UnscheduledCandidate); + ensure!(count <= total_eligible_cores, Error::::UnscheduledCandidate); METRICS.on_candidates_sanitized(count as u64); @@ -1422,7 +1423,7 @@ fn map_candidates_to_cores::claimqueue_is_empty()); + assert!(scheduler::Pallet::::claim_queue_is_empty()); // Nothing is filtered out (including the backed candidates.) assert_eq!( @@ -257,7 +257,7 @@ mod enter { .unwrap(); // The current schedule is empty prior to calling `create_inherent_enter`. - assert!(scheduler::Pallet::::claimqueue_is_empty()); + assert!(scheduler::Pallet::::claim_queue_is_empty()); assert!(pallet::OnChainVotes::::get().is_none()); @@ -372,7 +372,7 @@ mod enter { let mut inherent_data = InherentData::new(); inherent_data.put_data(PARACHAINS_INHERENT_IDENTIFIER, &scenario.data).unwrap(); - assert!(!scheduler::Pallet::::claimqueue_is_empty()); + assert!(!scheduler::Pallet::::claim_queue_is_empty()); // The right candidates have been filtered out (the ones for cores 0,4,5) assert_eq!( @@ -618,7 +618,7 @@ mod enter { .unwrap(); // The current schedule is empty prior to calling `create_inherent_enter`. - assert!(scheduler::Pallet::::claimqueue_is_empty()); + assert!(scheduler::Pallet::::claim_queue_is_empty()); let multi_dispute_inherent_data = Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); @@ -690,7 +690,7 @@ mod enter { .unwrap(); // The current schedule is empty prior to calling `create_inherent_enter`. - assert!(scheduler::Pallet::::claimqueue_is_empty()); + assert!(scheduler::Pallet::::claim_queue_is_empty()); let limit_inherent_data = Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); @@ -762,7 +762,7 @@ mod enter { .unwrap(); // The current schedule is empty prior to calling `create_inherent_enter`. - assert!(scheduler::Pallet::::claimqueue_is_empty()); + assert!(scheduler::Pallet::::claim_queue_is_empty()); // Nothing is filtered out (including the backed candidates.) let limit_inherent_data = @@ -849,7 +849,7 @@ mod enter { .unwrap(); // The current schedule is empty prior to calling `create_inherent_enter`. - assert!(scheduler::Pallet::::claimqueue_is_empty()); + assert!(scheduler::Pallet::::claim_queue_is_empty()); // Nothing is filtered out (including the backed candidates.) let limit_inherent_data = @@ -1818,7 +1818,7 @@ mod sanitizers { ]); // Update scheduler's claimqueue with the parachains - scheduler::Pallet::::set_claimqueue(BTreeMap::from([ + scheduler::Pallet::::set_claim_queue(BTreeMap::from([ ( CoreIndex::from(0), VecDeque::from([ParasEntry::new( @@ -2001,7 +2001,7 @@ mod sanitizers { ]); // Update scheduler's claimqueue with the parachains - scheduler::Pallet::::set_claimqueue(BTreeMap::from([ + scheduler::Pallet::::set_claim_queue(BTreeMap::from([ ( CoreIndex::from(0), VecDeque::from([ParasEntry::new( @@ -2542,7 +2542,7 @@ mod sanitizers { ]); // Update scheduler's claimqueue with the parachains - scheduler::Pallet::::set_claimqueue(BTreeMap::from([ + scheduler::Pallet::::set_claim_queue(BTreeMap::from([ ( CoreIndex::from(0), VecDeque::from([ParasEntry::new( diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs index dbb79b86c56c..4417ec75abd6 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs @@ -66,7 +66,7 @@ pub fn availability_cores() -> Vec::free_cores_and_fill_claimqueue(Vec::new(), now); + scheduler::Pallet::::free_cores_and_fill_claim_queue(Vec::new(), now); let time_out_for = scheduler::Pallet::::availability_timeout_predicate(); @@ -305,7 +305,7 @@ pub fn validation_code( /// Implementation for the `candidate_pending_availability` function of the runtime API. #[deprecated( - note = "`candidate_pending_availability` will be removed. Use `candidates_pending_availability` to query + note = "`candidate_pending_availability` will be removed. Use `candidates_pending_availability` to query all candidates pending availability" )] pub fn candidate_pending_availability( diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index 8c239dc207f6..62e96e9fbb05 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -16,7 +16,7 @@ //! Put implementations of functions from staging APIs here. -use crate::{inclusion, initializer, scheduler}; +use crate::{configuration, inclusion, initializer, scheduler}; use polkadot_primitives::{CommittedCandidateReceipt, CoreIndex, Id as ParaId}; use sp_runtime::traits::One; use sp_std::{ @@ -32,12 +32,21 @@ pub fn claim_queue() -> BTreeMap>::free_cores_and_fill_claimqueue(Vec::new(), now); + >::free_cores_and_fill_claim_queue(Vec::new(), now); + let config = configuration::ActiveConfig::::get(); + // Extra sanity, config should already never be smaller than 1: + let n_lookahead = config.scheduler_params.lookahead.max(1); scheduler::ClaimQueue::::get() .into_iter() .map(|(core_index, entries)| { - (core_index, entries.into_iter().map(|e| e.para_id()).collect()) + // on cores timing out internal claim queue size may be temporarily longer than it + // should be as the timed out assignment might got pushed back to an already full claim + // queue: + ( + core_index, + entries.into_iter().map(|e| e.para_id()).take(n_lookahead as usize).collect(), + ) }) .collect() } diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index 0442301a32ff..33b4d849c490 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -36,6 +36,8 @@ //! number of groups as availability cores. Validator groups will be assigned to different //! availability cores over time. +use core::iter::Peekable; + use crate::{configuration, initializer::SessionChangeNotification, paras}; use frame_support::{pallet_prelude::*, traits::Defensive}; use frame_system::pallet_prelude::BlockNumberFor; @@ -45,7 +47,10 @@ use polkadot_primitives::{ }; use sp_runtime::traits::One; use sp_std::{ - collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + collections::{ + btree_map::{self, BTreeMap}, + vec_deque::VecDeque, + }, prelude::*, }; @@ -190,7 +195,29 @@ pub mod pallet { } } -type PositionInClaimqueue = u32; +type PositionInClaimQueue = u32; + +struct ClaimQueueIterator { + next_idx: u32, + queue: Peekable>>, +} + +impl Iterator for ClaimQueueIterator { + type Item = (CoreIndex, VecDeque); + + fn next(&mut self) -> Option { + let (idx, _) = self.queue.peek()?; + let val = if idx != &CoreIndex(self.next_idx) { + log::trace!(target: LOG_TARGET, "idx did not match claim queue idx: {:?} vs {:?}", idx, self.next_idx); + (CoreIndex(self.next_idx), VecDeque::new()) + } else { + let (idx, q) = self.queue.next()?; + (idx, q) + }; + self.next_idx += 1; + Some(val) + } +} impl Pallet { /// Called by the initializer to initialize the scheduler pallet. @@ -203,7 +230,7 @@ impl Pallet { /// Called before the initializer notifies of a new session. pub(crate) fn pre_new_session() { - Self::push_claimqueue_items_to_assignment_provider(); + Self::push_claim_queue_items_to_assignment_provider(); Self::push_occupied_cores_to_assignment_provider(); } @@ -309,37 +336,51 @@ impl Pallet { (concluded_paras, timedout_paras) } - /// Note that the given cores have become occupied. Update the claimqueue accordingly. + /// Get an iterator into the claim queues. + /// + /// This iterator will have an item for each and every core index up to the maximum core index + /// found in the claim queue. In other words there will be no holes/missing core indices, + /// between core 0 and the maximum, even if the claim queue was missing entries for particular + /// indices in between. (The iterator will return an empty `VecDeque` for those indices. + fn claim_queue_iterator() -> impl Iterator>)> { + let queues = ClaimQueue::::get(); + return ClaimQueueIterator::> { + next_idx: 0, + queue: queues.into_iter().peekable(), + } + } + + /// Note that the given cores have become occupied. Update the claim queue accordingly. pub(crate) fn occupied( now_occupied: BTreeMap, - ) -> BTreeMap { + ) -> BTreeMap { let mut availability_cores = AvailabilityCores::::get(); log::debug!(target: LOG_TARGET, "[occupied] now_occupied {:?}", now_occupied); - let pos_mapping: BTreeMap = now_occupied + let pos_mapping: BTreeMap = now_occupied .iter() .flat_map(|(core_idx, para_id)| { - match Self::remove_from_claimqueue(*core_idx, *para_id) { + match Self::remove_from_claim_queue(*core_idx, *para_id) { Err(e) => { log::debug!( target: LOG_TARGET, - "[occupied] error on remove_from_claimqueue {}", + "[occupied] error on remove_from_claim queue {}", e ); None }, - Ok((pos_in_claimqueue, pe)) => { + Ok((pos_in_claim_queue, pe)) => { availability_cores[core_idx.0 as usize] = CoreOccupied::Paras(pe); - Some((*core_idx, pos_in_claimqueue)) + Some((*core_idx, pos_in_claim_queue)) }, } }) .collect(); // Drop expired claims after processing now_occupied. - Self::drop_expired_claims_from_claimqueue(); + Self::drop_expired_claims_from_claim_queue(); AvailabilityCores::::set(availability_cores); @@ -349,7 +390,7 @@ impl Pallet { /// Iterates through every element in all claim queues and tries to add new assignments from the /// `AssignmentProvider`. A claim is considered expired if it's `ttl` field is lower than the /// current block height. - fn drop_expired_claims_from_claimqueue() { + fn drop_expired_claims_from_claim_queue() { let now = frame_system::Pallet::::block_number(); let availability_cores = AvailabilityCores::::get(); let ttl = configuration::ActiveConfig::::get().scheduler_params.ttl; @@ -357,13 +398,13 @@ impl Pallet { ClaimQueue::::mutate(|cq| { for (idx, _) in (0u32..).zip(availability_cores) { let core_idx = CoreIndex(idx); - if let Some(core_claimqueue) = cq.get_mut(&core_idx) { + if let Some(core_claim_queue) = cq.get_mut(&core_idx) { let mut i = 0; let mut num_dropped = 0; - while i < core_claimqueue.len() { - let maybe_dropped = if let Some(entry) = core_claimqueue.get(i) { + while i < core_claim_queue.len() { + let maybe_dropped = if let Some(entry) = core_claim_queue.get(i) { if entry.ttl < now { - core_claimqueue.remove(i) + core_claim_queue.remove(i) } else { None } @@ -381,11 +422,11 @@ impl Pallet { for _ in 0..num_dropped { // For all claims dropped due to TTL, attempt to pop a new entry to - // the back of the claimqueue. + // the back of the claim queue. if let Some(assignment) = T::AssignmentProvider::pop_assignment_for_core(core_idx) { - core_claimqueue.push_back(ParasEntry::new(assignment, now + ttl)); + core_claim_queue.push_back(ParasEntry::new(assignment, now + ttl)); } } } @@ -536,10 +577,10 @@ impl Pallet { } // on new session - fn push_claimqueue_items_to_assignment_provider() { + fn push_claim_queue_items_to_assignment_provider() { for (_, claim_queue) in ClaimQueue::::take() { // Push back in reverse order so that when we pop from the provider again, - // the entries in the claimqueue are in the same order as they are right now. + // the entries in the claim queue are in the same order as they are right now. for para_entry in claim_queue.into_iter().rev() { Self::maybe_push_assignment(para_entry); } @@ -554,15 +595,8 @@ impl Pallet { } } - // - // ClaimQueue related functions - // - fn claimqueue_lookahead() -> u32 { - configuration::ActiveConfig::::get().scheduler_params.lookahead - } - - /// Frees cores and fills the free claimqueue spots by popping from the `AssignmentProvider`. - pub fn free_cores_and_fill_claimqueue( + /// Frees cores and fills the free claim queue spots by popping from the `AssignmentProvider`. + pub fn free_cores_and_fill_claim_queue( just_freed_cores: impl IntoIterator, now: BlockNumberFor, ) { @@ -573,26 +607,33 @@ impl Pallet { if ValidatorGroups::::decode_len().map_or(true, |l| l == 0) { return } - // If there exists a core, ensure we schedule at least one job onto it. - let n_lookahead = Self::claimqueue_lookahead().max(1); let n_session_cores = T::AssignmentProvider::session_core_count(); let cq = ClaimQueue::::get(); let config = configuration::ActiveConfig::::get(); + // Extra sanity, config should already never be smaller than 1: + let n_lookahead = config.scheduler_params.lookahead.max(1); let max_availability_timeouts = config.scheduler_params.max_availability_timeouts; let ttl = config.scheduler_params.ttl; for core_idx in 0..n_session_cores { let core_idx = CoreIndex::from(core_idx); + let n_lookahead_used = cq.get(&core_idx).map_or(0, |v| v.len() as u32); + // add previously timedout paras back into the queue if let Some(mut entry) = timedout_paras.remove(&core_idx) { if entry.availability_timeouts < max_availability_timeouts { // Increment the timeout counter. entry.availability_timeouts += 1; - // Reset the ttl so that a timed out assignment. - entry.ttl = now + ttl; - Self::add_to_claimqueue(core_idx, entry); - // The claim has been added back into the claimqueue. + if n_lookahead_used < n_lookahead { + entry.ttl = now + ttl; + } else { + // Over max capacity, we need to bump ttl (we exceeded the claim queue + // size, so otherwise the entry might get dropped before reaching the top): + entry.ttl = now + ttl + One::one(); + } + Self::add_to_claim_queue(core_idx, entry); + // The claim has been added back into the claim queue. // Do not pop another assignment for the core. continue } else { @@ -606,12 +647,9 @@ impl Pallet { if let Some(concluded_para) = concluded_paras.remove(&core_idx) { T::AssignmentProvider::report_processed(concluded_para); } - // We consider occupied cores to be part of the claimqueue - let n_lookahead_used = cq.get(&core_idx).map_or(0, |v| v.len() as u32) + - if Self::is_core_occupied(core_idx) { 1 } else { 0 }; for _ in n_lookahead_used..n_lookahead { if let Some(assignment) = T::AssignmentProvider::pop_assignment_for_core(core_idx) { - Self::add_to_claimqueue(core_idx, ParasEntry::new(assignment, now + ttl)); + Self::add_to_claim_queue(core_idx, ParasEntry::new(assignment, now + ttl)); } } } @@ -620,24 +658,17 @@ impl Pallet { debug_assert!(concluded_paras.is_empty()); } - fn is_core_occupied(core_idx: CoreIndex) -> bool { - match AvailabilityCores::::get().get(core_idx.0 as usize) { - None | Some(CoreOccupied::Free) => false, - Some(CoreOccupied::Paras(_)) => true, - } - } - - fn add_to_claimqueue(core_idx: CoreIndex, pe: ParasEntryType) { + fn add_to_claim_queue(core_idx: CoreIndex, pe: ParasEntryType) { ClaimQueue::::mutate(|la| { la.entry(core_idx).or_default().push_back(pe); }); } /// Returns `ParasEntry` with `para_id` at `core_idx` if found. - fn remove_from_claimqueue( + fn remove_from_claim_queue( core_idx: CoreIndex, para_id: ParaId, - ) -> Result<(PositionInClaimqueue, ParasEntryType), &'static str> { + ) -> Result<(PositionInClaimQueue, ParasEntryType), &'static str> { ClaimQueue::::mutate(|cq| { let core_claims = cq.get_mut(&core_idx).ok_or("core_idx not found in lookahead")?; @@ -654,20 +685,38 @@ impl Pallet { /// Paras scheduled next in the claim queue. pub(crate) fn scheduled_paras() -> impl Iterator { - let claimqueue = ClaimQueue::::get(); - claimqueue + let claim_queue = ClaimQueue::::get(); + claim_queue .into_iter() .filter_map(|(core_idx, v)| v.front().map(|e| (core_idx, e.assignment.para_id()))) } + /// Paras that may get backed on cores. + /// + /// 1. The para must be scheduled on core. + /// 2. Core needs to be free, otherwise backing is not possible. + pub(crate) fn eligible_paras() -> impl Iterator { + let availability_cores = AvailabilityCores::::get(); + + Self::claim_queue_iterator().zip(availability_cores.into_iter()).filter_map( + |((core_idx, queue), core)| { + if core != CoreOccupied::Free { + return None + } + let next_scheduled = queue.front()?; + Some((core_idx, next_scheduled.assignment.para_id())) + }, + ) + } + #[cfg(any(feature = "try-runtime", test))] - fn claimqueue_len() -> usize { + fn claim_queue_len() -> usize { ClaimQueue::::get().iter().map(|la_vec| la_vec.1.len()).sum() } #[cfg(all(not(feature = "runtime-benchmarks"), test))] - pub(crate) fn claimqueue_is_empty() -> bool { - Self::claimqueue_len() == 0 + pub(crate) fn claim_queue_is_empty() -> bool { + Self::claim_queue_len() == 0 } #[cfg(test)] @@ -676,7 +725,7 @@ impl Pallet { } #[cfg(test)] - pub(crate) fn set_claimqueue(claimqueue: BTreeMap>>) { - ClaimQueue::::set(claimqueue); + pub(crate) fn set_claim_queue(claim_queue: BTreeMap>>) { + ClaimQueue::::set(claim_queue); } } diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs index 57f4fd670fbe..84d7d4b56710 100644 --- a/polkadot/runtime/parachains/src/scheduler/migration.rs +++ b/polkadot/runtime/parachains/src/scheduler/migration.rs @@ -248,7 +248,7 @@ mod v1 { .count(); ensure!( - Pallet::::claimqueue_len() as u32 + availability_cores_waiting as u32 == + Pallet::::claim_queue_len() as u32 + availability_cores_waiting as u32 == expected_len, "ClaimQueue and AvailabilityCores should have the correct length", ); diff --git a/polkadot/runtime/parachains/src/scheduler/tests.rs b/polkadot/runtime/parachains/src/scheduler/tests.rs index 74ad8adf00c4..32811241e171 100644 --- a/polkadot/runtime/parachains/src/scheduler/tests.rs +++ b/polkadot/runtime/parachains/src/scheduler/tests.rs @@ -80,7 +80,7 @@ fn run_to_block( Scheduler::initializer_initialize(b + 1); // In the real runtime this is expected to be called by the `InclusionInherent` pallet. - Scheduler::free_cores_and_fill_claimqueue(BTreeMap::new(), b + 1); + Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1); } } @@ -158,6 +158,37 @@ fn scheduled_entries() -> impl Iterator(vec![para_id])); // Claim is dropped post call. - Scheduler::drop_expired_claims_from_claimqueue(); + Scheduler::drop_expired_claims_from_claim_queue(); assert!(!claimqueue_contains_para_ids::(vec![para_id])); // Add a claim on core 0 with a ttl in the future (15). let paras_entry = ParasEntry::new(Assignment::Bulk(para_id), now + 5); - Scheduler::add_to_claimqueue(core_idx, paras_entry.clone()); + Scheduler::add_to_claim_queue(core_idx, paras_entry.clone()); // Claim is in queue post call. - Scheduler::drop_expired_claims_from_claimqueue(); + Scheduler::drop_expired_claims_from_claim_queue(); assert!(claimqueue_contains_para_ids::(vec![para_id])); now = now + 6; run_to_block(now, |_| None); // Claim is dropped - Scheduler::drop_expired_claims_from_claimqueue(); + Scheduler::drop_expired_claims_from_claim_queue(); assert!(!claimqueue_contains_para_ids::(vec![para_id])); // Add a claim on core 0 with a ttl == now (16) let paras_entry = ParasEntry::new(Assignment::Bulk(para_id), now); - Scheduler::add_to_claimqueue(core_idx, paras_entry.clone()); + Scheduler::add_to_claim_queue(core_idx, paras_entry.clone()); // Claim is in queue post call. - Scheduler::drop_expired_claims_from_claimqueue(); + Scheduler::drop_expired_claims_from_claim_queue(); assert!(claimqueue_contains_para_ids::(vec![para_id])); now = now + 1; run_to_block(now, |_| None); // Drop expired claim. - Scheduler::drop_expired_claims_from_claimqueue(); + Scheduler::drop_expired_claims_from_claim_queue(); assert!(!claimqueue_contains_para_ids::(vec![para_id])); // Add a claim on core 0 with a ttl == now (17) let paras_entry_non_expired = ParasEntry::new(Assignment::Bulk(para_id), now); let paras_entry_expired = ParasEntry::new(Assignment::Bulk(para_id), now - 2); // ttls = [17, 15, 17] - Scheduler::add_to_claimqueue(core_idx, paras_entry_non_expired.clone()); - Scheduler::add_to_claimqueue(core_idx, paras_entry_expired.clone()); - Scheduler::add_to_claimqueue(core_idx, paras_entry_non_expired.clone()); + Scheduler::add_to_claim_queue(core_idx, paras_entry_non_expired.clone()); + Scheduler::add_to_claim_queue(core_idx, paras_entry_expired.clone()); + Scheduler::add_to_claim_queue(core_idx, paras_entry_non_expired.clone()); let cq = scheduler::ClaimQueue::::get(); assert_eq!(cq.get(&core_idx).unwrap().len(), 3); @@ -231,7 +262,7 @@ fn claimqueue_ttl_drop_fn_works() { MockAssigner::add_test_assignment(assignment.clone()); // Drop expired claim. - Scheduler::drop_expired_claims_from_claimqueue(); + Scheduler::drop_expired_claims_from_claim_queue(); let cq = scheduler::ClaimQueue::::get(); let cqc = cq.get(&core_idx).unwrap(); @@ -378,7 +409,7 @@ fn fill_claimqueue_fills() { run_to_block(2, |_| None); { - assert_eq!(Scheduler::claimqueue_len(), 3); + assert_eq!(Scheduler::claim_queue_len(), 3); let scheduled: BTreeMap<_, _> = scheduled_entries().collect(); // Was added a block later, note the TTL. @@ -488,9 +519,8 @@ fn schedule_schedules_including_just_freed() { .for_each(|(_core_idx, core_queue)| assert_eq!(core_queue.len(), 0)) } - // add a couple more para claims - the claim on `b` will go to the 3rd core - // (2) and the claim on `d` will go back to the 1st para core (0). The claim on `e` - // then will go for core `1`. + MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_c.clone()); MockAssigner::add_test_assignment(assignment_b.clone()); MockAssigner::add_test_assignment(assignment_d.clone()); MockAssigner::add_test_assignment(assignment_e.clone()); @@ -500,8 +530,7 @@ fn schedule_schedules_including_just_freed() { { let scheduled: BTreeMap<_, _> = scheduled_entries().collect(); - // cores 0 and 1 are occupied by claims. core 2 was free. - assert_eq!(scheduled.len(), 1); + assert_eq!(scheduled.len(), 3); assert_eq!( scheduled.get(&CoreIndex(2)).unwrap(), &ParasEntry { @@ -519,7 +548,7 @@ fn schedule_schedules_including_just_freed() { ] .into_iter() .collect(); - Scheduler::free_cores_and_fill_claimqueue(just_updated, now); + Scheduler::free_cores_and_fill_claim_queue(just_updated, now); { let scheduled: BTreeMap<_, _> = scheduled_entries().collect(); @@ -529,17 +558,28 @@ fn schedule_schedules_including_just_freed() { assert_eq!( scheduled.get(&CoreIndex(0)).unwrap(), &ParasEntry { - assignment: Assignment::Bulk(para_d), + // Next entry in queue is `a` again: + assignment: Assignment::Bulk(para_a), availability_timeouts: 0, ttl: 8 }, ); // Although C was descheduled, the core `2` was occupied so C goes back to the queue. + assert_eq!( + scheduler::ClaimQueue::::get()[&CoreIndex(1)][1], + ParasEntry { + assignment: Assignment::Bulk(para_c), + // End of the queue should be the pushed back entry: + availability_timeouts: 1, + // ttl 1 higher: + ttl: 9 + }, + ); assert_eq!( scheduled.get(&CoreIndex(1)).unwrap(), &ParasEntry { assignment: Assignment::Bulk(para_c), - availability_timeouts: 1, + availability_timeouts: 0, ttl: 8 }, ); @@ -552,8 +592,6 @@ fn schedule_schedules_including_just_freed() { }, ); - // Para A claim should have been wiped, but para C claim should remain. - assert!(!claimqueue_contains_para_ids::(vec![para_a])); assert!(claimqueue_contains_para_ids::(vec![para_c])); assert!(!availability_cores_contains_para_ids::(vec![para_a, para_c])); } @@ -627,12 +665,13 @@ fn schedule_clears_availability_cores() { // Add more assignments MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_b.clone()); MockAssigner::add_test_assignment(assignment_c.clone()); run_to_block(3, |_| None); // now note that cores 0 and 2 were freed. - Scheduler::free_cores_and_fill_claimqueue( + Scheduler::free_cores_and_fill_claim_queue( vec![(CoreIndex(0), FreedReason::Concluded), (CoreIndex(2), FreedReason::Concluded)] .into_iter() .collect::>(), @@ -807,7 +846,7 @@ fn on_demand_claims_are_pruned_after_timing_out() { ] .into_iter() .collect(); - Scheduler::free_cores_and_fill_claimqueue(just_updated, now); + Scheduler::free_cores_and_fill_claim_queue(just_updated, now); // ParaId a exists in the claim queue until max_retries is reached. if n < max_timeouts + now { @@ -854,7 +893,7 @@ fn on_demand_claims_are_pruned_after_timing_out() { } } - Scheduler::free_cores_and_fill_claimqueue(just_updated, now); + Scheduler::free_cores_and_fill_claim_queue(just_updated, now); // ParaId a exists in the claim queue until groups are rotated. if n < 31 { @@ -943,12 +982,12 @@ fn next_up_on_available_uses_next_scheduled_or_none() { ttl: 5 as u32, }; - Scheduler::add_to_claimqueue(CoreIndex(0), entry_a.clone()); + Scheduler::add_to_claim_queue(CoreIndex(0), entry_a.clone()); run_to_block(2, |_| None); { - assert_eq!(Scheduler::claimqueue_len(), 1); + assert_eq!(Scheduler::claim_queue_len(), 1); assert_eq!(scheduler::AvailabilityCores::::get().len(), 1); let mut map = BTreeMap::new(); @@ -963,7 +1002,7 @@ fn next_up_on_available_uses_next_scheduled_or_none() { assert!(Scheduler::next_up_on_available(CoreIndex(0)).is_none()); - Scheduler::add_to_claimqueue(CoreIndex(0), entry_b); + Scheduler::add_to_claim_queue(CoreIndex(0), entry_b); assert_eq!( Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), @@ -1032,7 +1071,7 @@ fn next_up_on_time_out_reuses_claim_if_nothing_queued() { MockAssigner::add_test_assignment(assignment_b.clone()); // Pop assignment_b into the claimqueue - Scheduler::free_cores_and_fill_claimqueue(BTreeMap::new(), 2); + Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), 2); //// Now that there is an earlier next-up, we use that. assert_eq!( @@ -1113,7 +1152,7 @@ fn session_change_requires_reschedule_dropping_removed_paras() { _ => None, }); - Scheduler::free_cores_and_fill_claimqueue(BTreeMap::new(), 3); + Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), 3); assert_eq!( scheduler::ClaimQueue::::get(), @@ -1161,7 +1200,7 @@ fn session_change_requires_reschedule_dropping_removed_paras() { let groups = ValidatorGroups::::get(); assert_eq!(groups.len(), 5); - Scheduler::free_cores_and_fill_claimqueue(BTreeMap::new(), 4); + Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), 4); assert_eq!( scheduler::ClaimQueue::::get(), diff --git a/prdoc/pr_4691.prdoc b/prdoc/pr_4691.prdoc new file mode 100644 index 000000000000..18cbb2296d43 --- /dev/null +++ b/prdoc/pr_4691.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix claim queue size + +doc: + - audience: Runtime User + description: | + Ensure claim queue size is always the number configured by ` scheduler_params.lookahead`. Previously the claim queue of a core was shortened by 1 if the core was occupied. + + +crates: + - name: polkadot-runtime-parachains + bump: minor From 9bb1f3f98a99588c88b3a2bcba29f7efaed3772d Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Fri, 7 Jun 2024 19:09:43 +0800 Subject: [PATCH 11/52] Frame Pallets: Clean a lot of test setups (#4642) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Screenshot 2024-05-30 at 10 30 41 --------- Co-authored-by: Dónal Murray --- bridges/modules/beefy/src/mock.rs | 2 +- .../pallets/inbound-queue/src/mock.rs | 17 ++--------- .../pallets/collator-selection/src/mock.rs | 26 +---------------- .../frame/conviction-voting/src/tests.rs | 1 + .../multi-block-migrations/src/mock.rs | 2 +- substrate/frame/fast-unstake/src/mock.rs | 19 +----------- substrate/frame/im-online/src/mock.rs | 24 +-------------- substrate/frame/indices/src/mock.rs | 24 +-------------- .../nomination-pools/benchmarking/src/mock.rs | 20 ------------- substrate/frame/nomination-pools/src/mock.rs | 18 ------------ .../test-delegate-stake/src/mock.rs | 18 ------------ .../test-transfer-stake/src/mock.rs | 20 ------------- .../frame/offences/benchmarking/src/mock.rs | 23 --------------- substrate/frame/offences/src/mock.rs | 24 ++------------- substrate/frame/paged-list/src/mock.rs | 25 ++-------------- .../frame/session/benchmarking/src/mock.rs | 18 ------------ substrate/frame/sudo/src/mock.rs | 1 + substrate/frame/system/benches/bench.rs | 28 ++---------------- .../frame/system/benchmarking/src/mock.rs | 27 +---------------- .../asset-conversion-tx-payment/src/mock.rs | 19 +----------- .../asset-tx-payment/src/mock.rs | 22 +------------- .../frame/transaction-payment/src/mock.rs | 27 ++--------------- substrate/frame/tx-pause/src/mock.rs | 25 +--------------- substrate/test-utils/runtime/src/lib.rs | 17 ----------- .../parachain/pallets/template/src/mock.rs | 29 ++----------------- .../solochain/pallets/template/src/mock.rs | 29 ++----------------- 26 files changed, 26 insertions(+), 479 deletions(-) diff --git a/bridges/modules/beefy/src/mock.rs b/bridges/modules/beefy/src/mock.rs index c99566b6b06d..53efd57c29a0 100644 --- a/bridges/modules/beefy/src/mock.rs +++ b/bridges/modules/beefy/src/mock.rs @@ -66,7 +66,7 @@ construct_runtime! { } } -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for TestRuntime { type Block = Block; } diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index 05481ca2f6b4..a842f9aa60cb 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -2,11 +2,7 @@ // SPDX-FileCopyrightText: 2023 Snowfork use super::*; -use frame_support::{ - derive_impl, parameter_types, - traits::{ConstU32, Everything}, - weights::IdentityFee, -}; +use frame_support::{derive_impl, parameter_types, traits::ConstU32, weights::IdentityFee}; use hex_literal::hex; use snowbridge_beacon_primitives::{ types::deneb, BeaconHeader, ExecutionProof, Fork, ForkVersions, VersionedExecutionPayloadHeader, @@ -19,7 +15,7 @@ use snowbridge_core::{ use snowbridge_router_primitives::inbound::MessageToXcm; use sp_core::{H160, H256}; use sp_runtime::{ - traits::{BlakeTwo256, IdentifyAccount, IdentityLookup, Verify}, + traits::{IdentifyAccount, IdentityLookup, Verify}, BuildStorage, FixedU128, MultiSignature, }; use sp_std::{convert::From, default::Default}; @@ -47,18 +43,9 @@ type Balance = u128; #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = Everything; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type RuntimeTask = RuntimeTask; - type Hash = H256; - type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type Nonce = u64; type Block = Block; } diff --git a/cumulus/pallets/collator-selection/src/mock.rs b/cumulus/pallets/collator-selection/src/mock.rs index 196184d62781..6521c954eac2 100644 --- a/cumulus/pallets/collator-selection/src/mock.rs +++ b/cumulus/pallets/collator-selection/src/mock.rs @@ -22,12 +22,7 @@ use frame_support::{ }; use frame_system as system; use frame_system::EnsureSignedBy; -use sp_core::H256; -use sp_runtime::{ - testing::UintAuthorityId, - traits::{BlakeTwo256, IdentityLookup, OpaqueKeys}, - BuildStorage, RuntimeAppPublic, -}; +use sp_runtime::{testing::UintAuthorityId, traits::OpaqueKeys, BuildStorage, RuntimeAppPublic}; type Block = frame_system::mocking::MockBlock; @@ -51,28 +46,9 @@ parameter_types! { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); type SS58Prefix = SS58Prefix; - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } parameter_types! { diff --git a/substrate/frame/conviction-voting/src/tests.rs b/substrate/frame/conviction-voting/src/tests.rs index 74baeace898b..0e985e25290f 100644 --- a/substrate/frame/conviction-voting/src/tests.rs +++ b/substrate/frame/conviction-voting/src/tests.rs @@ -49,6 +49,7 @@ impl Contains for BaseFilter { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { + type BaseCallFilter = BaseFilter; type Block = Block; type AccountData = pallet_balances::AccountData; } diff --git a/substrate/frame/examples/multi-block-migrations/src/mock.rs b/substrate/frame/examples/multi-block-migrations/src/mock.rs index 9da1d2051fa1..b2a946e1c505 100644 --- a/substrate/frame/examples/multi-block-migrations/src/mock.rs +++ b/substrate/frame/examples/multi-block-migrations/src/mock.rs @@ -59,7 +59,7 @@ impl pallet_migrations::Config for Runtime { type MaxServiceWeight = MigratorServiceWeight; } -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type Block = Block; type MultiBlockMigrator = Migrator; diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs index d876f9f6171e..9238a085141d 100644 --- a/substrate/frame/fast-unstake/src/mock.rs +++ b/substrate/frame/fast-unstake/src/mock.rs @@ -23,10 +23,7 @@ use frame_support::{ traits::{ConstU64, Currency}, weights::constants::WEIGHT_REF_TIME_PER_SECOND, }; -use sp_runtime::{ - traits::{Convert, IdentityLookup}, - BuildStorage, -}; +use sp_runtime::{traits::IdentityLookup, BuildStorage}; use pallet_staking::{Exposure, IndividualExposure, StakerStatus}; use sp_std::prelude::*; @@ -147,20 +144,6 @@ impl pallet_staking::Config for Runtime { type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } -pub struct BalanceToU256; -impl Convert for BalanceToU256 { - fn convert(n: Balance) -> sp_core::U256 { - n.into() - } -} - -pub struct U256ToBalance; -impl Convert for U256ToBalance { - fn convert(n: sp_core::U256) -> Balance { - n.try_into().unwrap() - } -} - parameter_types! { pub static Deposit: u128 = 7; pub static BatchSize: u32 = 1; diff --git a/substrate/frame/im-online/src/mock.rs b/substrate/frame/im-online/src/mock.rs index 2aff9a0e26df..882581702ea1 100644 --- a/substrate/frame/im-online/src/mock.rs +++ b/substrate/frame/im-online/src/mock.rs @@ -25,10 +25,9 @@ use frame_support::{ weights::Weight, }; use pallet_session::historical as pallet_session_historical; -use sp_core::H256; use sp_runtime::{ testing::{TestXt, UintAuthorityId}, - traits::{BlakeTwo256, ConvertInto, IdentityLookup}, + traits::ConvertInto, BuildStorage, Permill, }; use sp_staking::{ @@ -114,28 +113,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } parameter_types! { diff --git a/substrate/frame/indices/src/mock.rs b/substrate/frame/indices/src/mock.rs index 87b8d79a7f83..7a8ff98f6d4a 100644 --- a/substrate/frame/indices/src/mock.rs +++ b/substrate/frame/indices/src/mock.rs @@ -20,11 +20,7 @@ #![cfg(test)] use crate::{self as pallet_indices, Config}; -use frame_support::{ - derive_impl, - traits::{ConstU32, ConstU64}, -}; -use sp_core::H256; +use frame_support::{derive_impl, traits::ConstU64}; use sp_runtime::BuildStorage; type Block = frame_system::mocking::MockBlock; @@ -40,28 +36,10 @@ frame_support::construct_runtime!( #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; type Nonce = u64; - type Hash = H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; - type AccountId = u64; type Lookup = Indices; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } impl pallet_balances::Config for Test { diff --git a/substrate/frame/nomination-pools/benchmarking/src/mock.rs b/substrate/frame/nomination-pools/benchmarking/src/mock.rs index def98b4d2945..7cbb61e00a31 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/mock.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/mock.rs @@ -24,35 +24,15 @@ use sp_runtime::{ }; type AccountId = u128; -type Nonce = u32; type BlockNumber = u64; type Balance = u128; #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Nonce = Nonce; - type RuntimeCall = RuntimeCall; - type Hash = sp_core::H256; - type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = (); - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } impl pallet_timestamp::Config for Runtime { diff --git a/substrate/frame/nomination-pools/src/mock.rs b/substrate/frame/nomination-pools/src/mock.rs index b659c975a839..93fe6aa56054 100644 --- a/substrate/frame/nomination-pools/src/mock.rs +++ b/substrate/frame/nomination-pools/src/mock.rs @@ -240,29 +240,11 @@ impl sp_staking::StakingInterface for StakingMock { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { - type SS58Prefix = (); - type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = sp_core::H256; - type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = sp_runtime::traits::IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = (); - type DbWeight = (); - type BlockLength = (); - type BlockWeights = (); - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } parameter_types! { diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs index 501823263598..820f2b7718ce 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs @@ -45,29 +45,11 @@ pub(crate) const POOL1_REWARD: AccountId = 20397359637244482196168876781421u128; #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; type Nonce = Nonce; - type RuntimeCall = RuntimeCall; - type Hash = sp_core::H256; - type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = (); - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } impl pallet_timestamp::Config for Runtime { diff --git a/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs b/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs index 0970570453b4..eb9d463424c8 100644 --- a/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs +++ b/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs @@ -29,7 +29,6 @@ use sp_runtime::{ }; type AccountId = u128; -type Nonce = u32; type BlockNumber = u64; type Balance = u128; @@ -40,29 +39,10 @@ pub(crate) const POOL1_REWARD: AccountId = 20397359637244482196168876781421u128; #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Nonce = Nonce; - type RuntimeCall = RuntimeCall; - type Hash = sp_core::H256; - type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = (); - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } impl pallet_timestamp::Config for Runtime { diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index eeaa1364504a..6cbdde578528 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -29,39 +29,16 @@ use frame_system as system; use pallet_session::historical as pallet_session_historical; use sp_runtime::{ testing::{Header, UintAuthorityId}, - traits::IdentityLookup, BuildStorage, Perbill, }; type AccountId = u64; -type Nonce = u32; type Balance = u64; #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Nonce = Nonce; - type RuntimeCall = RuntimeCall; - type Hash = sp_core::H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = (); - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } impl pallet_balances::Config for Test { diff --git a/substrate/frame/offences/src/mock.rs b/substrate/frame/offences/src/mock.rs index 1725f4158d33..6796837637ae 100644 --- a/substrate/frame/offences/src/mock.rs +++ b/substrate/frame/offences/src/mock.rs @@ -27,11 +27,7 @@ use frame_support::{ traits::ConstU32, weights::{constants::RocksDbWeight, Weight}, }; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, Perbill, -}; +use sp_runtime::{traits::IdentityLookup, BuildStorage, Perbill}; use sp_staking::{ offence::{self, Kind, OffenceDetails}, SessionIndex, @@ -75,27 +71,11 @@ frame_support::construct_runtime!( #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = RocksDbWeight; - type RuntimeOrigin = RuntimeOrigin; type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); + type DbWeight = RocksDbWeight; type MaxConsumers = ConstU32<16>; } diff --git a/substrate/frame/paged-list/src/mock.rs b/substrate/frame/paged-list/src/mock.rs index e086b4ba2b27..3e4903200c3d 100644 --- a/substrate/frame/paged-list/src/mock.rs +++ b/substrate/frame/paged-list/src/mock.rs @@ -20,12 +20,8 @@ #![cfg(feature = "std")] use crate::{paged_list::StoragePagedListMeta, Config, ListPrefix}; -use frame_support::{derive_impl, traits::ConstU16}; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, -}; +use frame_support::derive_impl; +use sp_runtime::{traits::IdentityLookup, BuildStorage}; type Block = frame_system::mocking::MockBlock; @@ -40,28 +36,11 @@ frame_support::construct_runtime!( #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = ConstU16<42>; - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } frame_support::parameter_types! { diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 6cefa8f39a8c..5cba79ef5b9a 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -47,29 +47,11 @@ frame_support::construct_runtime!( #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; type Nonce = Nonce; - type RuntimeCall = RuntimeCall; - type Hash = sp_core::H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = (); - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } impl pallet_balances::Config for Test { diff --git a/substrate/frame/sudo/src/mock.rs b/substrate/frame/sudo/src/mock.rs index a3a786c4af39..67f896e1c021 100644 --- a/substrate/frame/sudo/src/mock.rs +++ b/substrate/frame/sudo/src/mock.rs @@ -106,6 +106,7 @@ impl Contains for BlockEverything { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { + type BaseCallFilter = BlockEverything; type Block = Block; } diff --git a/substrate/frame/system/benches/bench.rs b/substrate/frame/system/benches/bench.rs index b3029630409f..1b0f459c9792 100644 --- a/substrate/frame/system/benches/bench.rs +++ b/substrate/frame/system/benches/bench.rs @@ -16,12 +16,8 @@ // limitations under the License. use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use frame_support::{derive_impl, traits::ConstU32}; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, Perbill, -}; +use frame_support::derive_impl; +use sp_runtime::{BuildStorage, Perbill}; #[frame_support::pallet] mod module { use frame_support::pallet_prelude::*; @@ -59,28 +55,8 @@ frame_support::parameter_types! { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = BlockLength; - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } impl module::Config for Runtime { diff --git a/substrate/frame/system/benchmarking/src/mock.rs b/substrate/frame/system/benchmarking/src/mock.rs index 39a64ff6177c..42e4aa0eaf4b 100644 --- a/substrate/frame/system/benchmarking/src/mock.rs +++ b/substrate/frame/system/benchmarking/src/mock.rs @@ -21,10 +21,7 @@ use codec::Encode; use frame_support::derive_impl; -use sp_runtime::{traits::IdentityLookup, BuildStorage}; - -type AccountId = u64; -type Nonce = u32; +use sp_runtime::BuildStorage; type Block = frame_system::mocking::MockBlock; @@ -37,29 +34,7 @@ frame_support::construct_runtime!( #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Nonce = Nonce; - type RuntimeCall = RuntimeCall; - type Hash = sp_core::H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = (); - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } impl crate::Config for Test {} diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs index 0cafb35d52e1..cc43cffd7deb 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs @@ -38,9 +38,8 @@ use frame_system as system; use frame_system::{EnsureRoot, EnsureSignedBy}; use pallet_asset_conversion::{Ascending, Chain, WithFirstAsset}; use pallet_transaction_payment::FungibleAdapter; -use sp_core::H256; use sp_runtime::{ - traits::{AccountIdConversion, BlakeTwo256, IdentityLookup, SaturatedConversion}, + traits::{AccountIdConversion, IdentityLookup, SaturatedConversion}, Permill, }; @@ -87,28 +86,12 @@ parameter_types! { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } parameter_types! { diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs index f27fcd53fecd..fce712c3eba3 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs @@ -29,8 +29,7 @@ use frame_support::{ use frame_system as system; use frame_system::EnsureRoot; use pallet_transaction_payment::FungibleAdapter; -use sp_core::H256; -use sp_runtime::traits::{BlakeTwo256, ConvertInto, IdentityLookup, SaturatedConversion}; +use sp_runtime::traits::{ConvertInto, SaturatedConversion}; type Block = frame_system::mocking::MockBlock; type Balance = u64; @@ -73,28 +72,9 @@ parameter_types! { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } parameter_types! { diff --git a/substrate/frame/transaction-payment/src/mock.rs b/substrate/frame/transaction-payment/src/mock.rs index 1ef95128f2a8..7b731eeb8250 100644 --- a/substrate/frame/transaction-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/src/mock.rs @@ -17,15 +17,11 @@ use super::*; use crate as pallet_transaction_payment; - -use sp_core::H256; -use sp_runtime::traits::{BlakeTwo256, IdentityLookup}; - use frame_support::{ derive_impl, dispatch::DispatchClass, parameter_types, - traits::{fungible, ConstU32, ConstU64, Imbalance, OnUnbalanced}, + traits::{fungible, ConstU64, Imbalance, OnUnbalanced}, weights::{Weight, WeightToFee as WeightToFeeT}, }; use frame_system as system; @@ -72,28 +68,9 @@ parameter_types! { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; + type AccountData = pallet_balances::AccountData; } impl pallet_balances::Config for Runtime { diff --git a/substrate/frame/tx-pause/src/mock.rs b/substrate/frame/tx-pause/src/mock.rs index 7245fe7d5d72..f42d4cb58a2a 100644 --- a/substrate/frame/tx-pause/src/mock.rs +++ b/substrate/frame/tx-pause/src/mock.rs @@ -27,36 +27,13 @@ use frame_support::{ traits::{ConstU64, Everything, InsideBoth, InstanceFilter}, }; use frame_system::EnsureSignedBy; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, -}; +use sp_runtime::{traits::BlakeTwo256, BuildStorage}; #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = InsideBoth; - type BlockWeights = (); - type BlockLength = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; type Block = Block; - type DbWeight = (); - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } parameter_types! { diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index ab87db0e7006..0aab6d3f01ca 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -355,29 +355,12 @@ parameter_types! { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::pallet::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = RuntimeBlockWeights; - type BlockLength = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; type Nonce = Nonce; - type Hash = H256; - type Hashing = Hashing; type AccountId = AccountId; type Lookup = sp_runtime::traits::IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<2400>; - type DbWeight = (); - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } pub mod currency { diff --git a/templates/parachain/pallets/template/src/mock.rs b/templates/parachain/pallets/template/src/mock.rs index 9a907f616605..ebb0598df97b 100644 --- a/templates/parachain/pallets/template/src/mock.rs +++ b/templates/parachain/pallets/template/src/mock.rs @@ -1,10 +1,6 @@ -use frame_support::{derive_impl, parameter_types, traits::Everything}; +use frame_support::{derive_impl, parameter_types}; use frame_system as system; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, -}; +use sp_runtime::BuildStorage; type Block = frame_system::mocking::MockBlock; @@ -23,28 +19,7 @@ parameter_types! { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl system::Config for Test { - type BaseCallFilter = Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = SS58Prefix; - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } impl crate::Config for Test { diff --git a/templates/solochain/pallets/template/src/mock.rs b/templates/solochain/pallets/template/src/mock.rs index 09081dae0625..0c2a247e802b 100644 --- a/templates/solochain/pallets/template/src/mock.rs +++ b/templates/solochain/pallets/template/src/mock.rs @@ -1,10 +1,6 @@ use crate as pallet_template; -use frame_support::{derive_impl, traits::ConstU16}; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, -}; +use frame_support::derive_impl; +use sp_runtime::BuildStorage; type Block = frame_system::mocking::MockBlock; @@ -19,28 +15,7 @@ frame_support::construct_runtime!( #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = ConstU16<42>; - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } impl pallet_template::Config for Test { From d783ca9d9bfb42ae938f8d4ce9899b6aa3cc00c6 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Fri, 7 Jun 2024 19:26:52 +0800 Subject: [PATCH 12/52] New reference doc for Custom RPC V2 (#4654) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Thanks for @xlc for the original seed info, I've just fixed it up a bit and added example links. I've moved the comparison between eth-rpc-api and frontier outside, as it is opinionation. I think the content there was good but should live in the README of the corresponding repos. No strong opinion, happy either way. --------- Co-authored-by: Bryan Chen Co-authored-by: Bastian Köcher Co-authored-by: Gonçalo Pestana Co-authored-by: command-bot <> --- Cargo.lock | 3 + .../reference_docs/custom_runtime_api_rpc.rs | 77 +++++++++++++++++++ docs/sdk/src/reference_docs/mod.rs | 3 + .../frame/system/rpc/runtime-api/Cargo.toml | 1 + .../frame/system/rpc/runtime-api/src/lib.rs | 1 + substrate/utils/frame/rpc/system/Cargo.toml | 9 ++- substrate/utils/frame/rpc/system/src/lib.rs | 1 + templates/minimal/node/Cargo.toml | 1 + templates/minimal/node/src/rpc.rs | 3 +- 9 files changed, 96 insertions(+), 3 deletions(-) create mode 100644 docs/sdk/src/reference_docs/custom_runtime_api_rpc.rs diff --git a/Cargo.lock b/Cargo.lock index 9ef971b4be93..a01420bc3ef6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6055,6 +6055,7 @@ dependencies = [ name = "frame-system-rpc-runtime-api" version = "26.0.0" dependencies = [ + "docify", "parity-scale-codec", "sp-api", ] @@ -8372,6 +8373,7 @@ name = "minimal-template-node" version = "0.0.0" dependencies = [ "clap 4.5.3", + "docify", "futures", "futures-timer", "jsonrpsee", @@ -20961,6 +20963,7 @@ name = "substrate-frame-rpc-system" version = "28.0.0" dependencies = [ "assert_matches", + "docify", "frame-system-rpc-runtime-api", "futures", "jsonrpsee", diff --git a/docs/sdk/src/reference_docs/custom_runtime_api_rpc.rs b/docs/sdk/src/reference_docs/custom_runtime_api_rpc.rs new file mode 100644 index 000000000000..83a70606cb8d --- /dev/null +++ b/docs/sdk/src/reference_docs/custom_runtime_api_rpc.rs @@ -0,0 +1,77 @@ +//! # Custom RPC do's and don'ts +//! +//! **TLDR:** don't create new custom RPCs. Instead, rely on custom Runtime APIs, combined with +//! `state_call` +//! +//! ## Background +//! +//! Polkadot-SDK offers the ability to query and subscribe storages directly. However what it does +//! not have is [view functions](https://github.com/paritytech/polkadot-sdk/issues/216). This is an +//! essential feature to avoid duplicated logic between runtime and the client SDK. Custom RPC was +//! used as a solution. It allow the RPC node to expose new RPCs that clients can be used to query +//! computed properties. +//! +//! ## Problems with Custom RPC +//! +//! Unfortunately, custom RPC comes with many problems. To list a few: +//! +//! - It is offchain logic executed by the RPC node and therefore the client has to trust the RPC +//! node. +//! - To upgrade or add a new RPC logic, the RPC node has to be upgraded. This can cause significant +//! trouble when the RPC infrastructure is decentralized as we will need to coordinate multiple +//! parties to upgrade the RPC nodes. +//! - A lot of boilerplate code are required to add custom RPC. +//! - It prevents the dApp to use a light client or alternative client. +//! - It makes ecosystem tooling integration much more complicated. For example, the dApp will not +//! be able to use [Chopsticks](https://github.com/AcalaNetwork/chopsticks) for testing as +//! Chopsticks will not have the custom RPC implementation. +//! - Poorly implemented custom RPC can be a DoS vector. +//! +//! Hence, we should avoid custom RPC. +//! +//! ## Alternatives +//! +//! Generally, [`sc_rpc::state::StateBackend::call`] aka. `state_call` should be used instead of +//! custom RPC. +//! +//! Usually, each custom RPC comes with a corresponding runtime API which implements the business +//! logic. So instead of invoke the custom RPC, we can use `state_call` to invoke the runtime API +//! directly. This is a trivial change on the dApp and no change on the runtime side. We may remove +//! the custom RPC from the node side if wanted. +//! +//! There are some other cases that a simple runtime API is not enough. For example, implementation +//! of Ethereum RPC requires an additional offchain database to index transactions. In this +//! particular case, we can have the RPC implemented on another client. +//! +//! For example, the Acala EVM+ RPC are implemented by +//! [eth-rpc-adapter](https://github.com/AcalaNetwork/bodhi.js/tree/master/packages/eth-rpc-adapter). +//! Alternatively, the [Frontier](https://github.com/polkadot-evm/frontier) project also provided +//! Ethereum RPC compatibility directly in the node-side software. +//! +//! ## Create a new Runtime API +//! +//! For example, let's take a look a the process through which the account nonce can be queried +//! through an RPC. First, a new runtime-api needs to be declared: +#![doc = docify::embed!("../../substrate/frame/system/rpc/runtime-api/src/lib.rs", AccountNonceApi)] +//! +//! This API is implemented at the runtime level, always inside [`sp_api::impl_runtime_apis!`]. +//! +//! As noted, this is already enough to make this API usable via `state_call`. +//! +//! ## Create a new custom RPC (Legacy) +//! +//! Should you wish to implement the legacy approach of exposing this runtime-api as a custom +//! RPC-api, then a custom RPC server has to be defined. +#![doc = docify::embed!("../../substrate/utils/frame/rpc/system/src/lib.rs", SystemApi)] +//! +//! ## Add a new RPC to the node (Legacy) +//! +//! Finally, this custom RPC needs to be integrated into the node side. This is usually done in a +//! `rpc.rs` in a typical template, as follows: +#![doc = docify::embed!("../../templates/minimal/node/src/rpc.rs", create_full)] +//! +//! ## Future +//! +//! - [XCQ](https://forum.polkadot.network/t/cross-consensus-query-language-xcq/7583) will be a good +//! solution for most of the query needs. +//! - [New JSON-RPC Specification](https://github.com/paritytech/json-rpc-interface-spec) diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index e50690b50212..8e0431c48b6f 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -108,3 +108,6 @@ pub mod frame_pallet_coupling; /// Learn about the Polkadot Umbrella crate that re-exports all other crates. pub mod umbrella_crate; + +/// Learn about how to create custom RPC endpoints and runtime APIs. +pub mod custom_runtime_api_rpc; diff --git a/substrate/frame/system/rpc/runtime-api/Cargo.toml b/substrate/frame/system/rpc/runtime-api/Cargo.toml index b134cc3b6173..8b71ca2a1395 100644 --- a/substrate/frame/system/rpc/runtime-api/Cargo.toml +++ b/substrate/frame/system/rpc/runtime-api/Cargo.toml @@ -18,6 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } sp-api = { path = "../../../../primitives/api", default-features = false } +docify = "0.2.0" [features] default = ["std"] diff --git a/substrate/frame/system/rpc/runtime-api/src/lib.rs b/substrate/frame/system/rpc/runtime-api/src/lib.rs index f59988d818f0..67adeb5cb9da 100644 --- a/substrate/frame/system/rpc/runtime-api/src/lib.rs +++ b/substrate/frame/system/rpc/runtime-api/src/lib.rs @@ -23,6 +23,7 @@ #![cfg_attr(not(feature = "std"), no_std)] +#[docify::export(AccountNonceApi)] sp_api::decl_runtime_apis! { /// The API to query account nonce. pub trait AccountNonceApi where diff --git a/substrate/utils/frame/rpc/system/Cargo.toml b/substrate/utils/frame/rpc/system/Cargo.toml index 6829d753ed71..75d24e8e210f 100644 --- a/substrate/utils/frame/rpc/system/Cargo.toml +++ b/substrate/utils/frame/rpc/system/Cargo.toml @@ -16,9 +16,14 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } futures = "0.3.30" +codec = { package = "parity-scale-codec", version = "3.6.12" } +docify = "0.2.0" +jsonrpsee = { version = "0.22.5", features = [ + "client-core", + "macros", + "server-core", +] } log = { workspace = true, default-features = true } frame-system-rpc-runtime-api = { path = "../../../../frame/system/rpc/runtime-api" } sc-rpc-api = { path = "../../../../client/rpc-api" } diff --git a/substrate/utils/frame/rpc/system/src/lib.rs b/substrate/utils/frame/rpc/system/src/lib.rs index bb0592599b2a..8cb7b785bc7c 100644 --- a/substrate/utils/frame/rpc/system/src/lib.rs +++ b/substrate/utils/frame/rpc/system/src/lib.rs @@ -37,6 +37,7 @@ use sp_runtime::{legacy, traits}; pub use frame_system_rpc_runtime_api::AccountNonceApi; /// System RPC methods. +#[docify::export] #[rpc(client, server)] pub trait SystemApi { /// Returns the next valid index (aka nonce) for given account. diff --git a/templates/minimal/node/Cargo.toml b/templates/minimal/node/Cargo.toml index d07c7b6dd9b5..a10364a2854a 100644 --- a/templates/minimal/node/Cargo.toml +++ b/templates/minimal/node/Cargo.toml @@ -14,6 +14,7 @@ build = "build.rs" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +docify = "0.2.0" clap = { version = "4.5.3", features = ["derive"] } futures = { version = "0.3.30", features = ["thread-pool"] } futures-timer = "3.0.1" diff --git a/templates/minimal/node/src/rpc.rs b/templates/minimal/node/src/rpc.rs index d0c417a93d7a..4b283bb2a66f 100644 --- a/templates/minimal/node/src/rpc.rs +++ b/templates/minimal/node/src/rpc.rs @@ -27,7 +27,6 @@ use runtime::interface::{AccountId, Nonce, OpaqueBlock}; use sc_transaction_pool_api::TransactionPool; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use std::sync::Arc; -use substrate_frame_rpc_system::{System, SystemApiServer}; pub use sc_rpc_api::DenyUnsafe; @@ -41,6 +40,7 @@ pub struct FullDeps { pub deny_unsafe: DenyUnsafe, } +#[docify::export] /// Instantiate all full RPC extensions. pub fn create_full( deps: FullDeps, @@ -57,6 +57,7 @@ where C::Api: substrate_frame_rpc_system::AccountNonceApi, P: TransactionPool + 'static, { + use substrate_frame_rpc_system::{System, SystemApiServer}; let mut module = RpcModule::new(()); let FullDeps { client, pool, deny_unsafe } = deps; From c7697eaab5a537035c73c10cb46d0879d5a8b7d4 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Fri, 7 Jun 2024 16:13:49 +0200 Subject: [PATCH 13/52] Backport style changes from P<>K bridge to R<>W bridge (#4732) Closes: https://github.com/paritytech/parity-bridges-common/issues/2734 --- .../bridge-hub-rococo/src/tests/snowbridge.rs | 16 ++++++++-------- .../runtimes/assets/asset-hub-rococo/src/lib.rs | 6 ++---- .../assets/asset-hub-rococo/src/xcm_config.rs | 3 +-- .../runtimes/assets/asset-hub-westend/src/lib.rs | 6 ++---- .../assets/asset-hub-westend/src/xcm_config.rs | 3 +-- 5 files changed, 14 insertions(+), 20 deletions(-) diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index 1c1c51404aa4..8196b27cfe02 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -215,7 +215,7 @@ fn register_weth_token_from_ethereum_to_asset_hub() { // Construct RegisterToken message and sent to inbound queue let register_token_message = make_register_token_message(); - send_inbound_message(register_token_message.clone()).unwrap(); + assert_ok!(send_inbound_message(register_token_message.clone())); assert_expected_events!( BridgeHubRococo, @@ -250,10 +250,10 @@ fn send_token_from_ethereum_to_asset_hub() { type RuntimeEvent = ::RuntimeEvent; // Construct RegisterToken message and sent to inbound queue - send_inbound_message(make_register_token_message()).unwrap(); + assert_ok!(send_inbound_message(make_register_token_message())); // Construct SendToken message and sent to inbound queue - send_inbound_message(make_send_token_message()).unwrap(); + assert_ok!(send_inbound_message(make_send_token_message())); // Check that the message was sent assert_expected_events!( @@ -332,14 +332,14 @@ fn send_token_from_ethereum_to_penpal() { type RuntimeEvent = ::RuntimeEvent; // Construct RegisterToken message and sent to inbound queue - send_inbound_message(make_register_token_message()).unwrap(); + assert_ok!(send_inbound_message(make_register_token_message())); // Construct SendToken message to AssetHub(only for increase the nonce as the same order in // smoke test) - send_inbound_message(make_send_token_message()).unwrap(); + assert_ok!(send_inbound_message(make_send_token_message())); // Construct SendToken message and sent to inbound queue - send_inbound_message(make_send_token_to_penpal_message()).unwrap(); + assert_ok!(send_inbound_message(make_send_token_to_penpal_message())); assert_expected_events!( BridgeHubRococo, @@ -399,7 +399,7 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { type RuntimeEvent = ::RuntimeEvent; // Construct RegisterToken message and sent to inbound queue - send_inbound_message(make_register_token_message()).unwrap(); + assert_ok!(send_inbound_message(make_register_token_message())); // Check that the register token message was sent using xcm assert_expected_events!( @@ -410,7 +410,7 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { ); // Construct SendToken message and sent to inbound queue - send_inbound_message(make_send_token_message()).unwrap(); + assert_ok!(send_inbound_message(make_send_token_message())); // Check that the send token message was sent using xcm assert_expected_events!( diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 2bf09e6a7843..d75b07bd2b9f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1644,10 +1644,8 @@ impl_runtime_apis! { } fn universal_alias() -> Result<(Location, Junction), BenchmarkError> { - match xcm_config::bridging::BridgingBenchmarksHelper::prepare_universal_alias() { - Some(alias) => Ok(alias), - None => Err(BenchmarkError::Skip) - } + xcm_config::bridging::BridgingBenchmarksHelper::prepare_universal_alias() + .ok_or(BenchmarkError::Skip) } fn transact_origin_and_runtime_call() -> Result<(Location, RuntimeCall), BenchmarkError> { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index 664d2b9c9dd5..cf5a3905e581 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -699,8 +699,7 @@ pub mod bridging { false => None, } }); - assert!(alias.is_some(), "we expect here BridgeHubRococo to Westend mapping at least"); - Some(alias.unwrap()) + Some(alias.expect("we expect here BridgeHubRococo to Westend mapping at least")) } } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index d9249cdfc482..e9c2b10f719d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1735,10 +1735,8 @@ impl_runtime_apis! { } fn universal_alias() -> Result<(Location, Junction), BenchmarkError> { - match xcm_config::bridging::BridgingBenchmarksHelper::prepare_universal_alias() { - Some(alias) => Ok(alias), - None => Err(BenchmarkError::Skip) - } + xcm_config::bridging::BridgingBenchmarksHelper::prepare_universal_alias() + .ok_or(BenchmarkError::Skip) } fn transact_origin_and_runtime_call() -> Result<(Location, RuntimeCall), BenchmarkError> { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index 35a42627ad71..ff1fc99cba8a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -648,8 +648,7 @@ pub mod bridging { false => None, } }); - assert!(alias.is_some(), "we expect here BridgeHubWestend to Rococo mapping at least"); - Some(alias.unwrap()) + Some(alias.expect("we expect here BridgeHubWestend to Rococo mapping at least")) } } } From 48d875d0e60c6d5e4c0c901582cc8edfb76f2f42 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Fri, 7 Jun 2024 16:40:10 +0200 Subject: [PATCH 14/52] Contracts: update wasmi to 0.32 (#3679) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit take over #2941 [Weights compare](https://weights.tasty.limo/compare?unit=weight&ignore_errors=true&threshold=10&method=asymptotic&repo=polkadot-sdk&old=master&new=pg%2Fwasmi-to-v0.32.0-beta.7&path_pattern=substrate%2Fframe%2F**%2Fsrc%2Fweights.rs%2Cpolkadot%2Fruntime%2F*%2Fsrc%2Fweights%2F**%2F*.rs%2Cpolkadot%2Fbridges%2Fmodules%2F*%2Fsrc%2Fweights.rs%2Ccumulus%2F**%2Fweights%2F*.rs%2Ccumulus%2F**%2Fweights%2Fxcm%2F*.rs%2Ccumulus%2F**%2Fsrc%2Fweights.rs) --------- Co-authored-by: command-bot <> Co-authored-by: Alexander Theißen --- Cargo.lock | 98 ++- prdoc/pr_3679.prdoc | 14 + substrate/frame/contracts/Cargo.toml | 2 +- .../frame/contracts/proc-macro/src/lib.rs | 21 +- .../frame/contracts/src/benchmarking/mod.rs | 4 +- .../contracts/src/benchmarking/sandbox.rs | 5 +- substrate/frame/contracts/src/gas.rs | 73 +- substrate/frame/contracts/src/schedule.rs | 7 + substrate/frame/contracts/src/wasm/mod.rs | 33 +- substrate/frame/contracts/src/wasm/prepare.rs | 40 +- substrate/frame/contracts/src/wasm/runtime.rs | 88 +- substrate/frame/contracts/src/weights.rs | 772 +++++++++--------- 12 files changed, 649 insertions(+), 508 deletions(-) create mode 100644 prdoc/pr_3679.prdoc diff --git a/Cargo.lock b/Cargo.lock index a01420bc3ef6..a96bb680b750 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -90,9 +90,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.8" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42cd52102d3df161c77a887b608d7a4897d7cc112886a9537b738a887a03aaff" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "getrandom 0.2.10", @@ -6508,7 +6508,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.8", + "ahash 0.8.11", ] [[package]] @@ -6517,7 +6517,7 @@ version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ - "ahash 0.8.8", + "ahash 0.8.11", "allocator-api2", "serde", ] @@ -7425,9 +7425,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libnghttp2-sys" @@ -8559,6 +8559,12 @@ dependencies = [ "syn 2.0.61", ] +[[package]] +name = "multi-stash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "685a9ac4b61f4e728e1d2c6a7844609c16527aeb5e6c865915c08e619c16410f" + [[package]] name = "multiaddr" version = "0.17.1" @@ -9115,6 +9121,17 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" +dependencies = [ + "proc-macro2 1.0.82", + "quote 1.0.35", + "syn 2.0.61", +] + [[package]] name = "num-format" version = "0.4.4" @@ -9984,7 +10001,7 @@ dependencies = [ "staging-xcm", "staging-xcm-builder", "wasm-instrument", - "wasmi", + "wasmi 0.32.3", "wat", ] @@ -17107,7 +17124,7 @@ dependencies = [ name = "sc-consensus-grandpa" version = "0.19.0" dependencies = [ - "ahash 0.8.8", + "ahash 0.8.11", "array-bytes", "assert_matches", "async-trait", @@ -17492,7 +17509,7 @@ dependencies = [ name = "sc-network-gossip" version = "0.34.0" dependencies = [ - "ahash 0.8.8", + "ahash 0.8.11", "async-trait", "futures", "futures-timer", @@ -18245,7 +18262,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "772575a524feeb803e5b0fcbc6dd9f367e579488197c94c6e4023aad2305774d" dependencies = [ - "ahash 0.8.8", + "ahash 0.8.11", "cfg-if", "hashbrown 0.13.2", ] @@ -18965,7 +18982,7 @@ dependencies = [ "smallvec", "soketto", "twox-hash", - "wasmi", + "wasmi 0.31.2", "x25519-dalek 2.0.0", "zeroize", ] @@ -20410,7 +20427,7 @@ dependencies = [ name = "sp-trie" version = "29.0.0" dependencies = [ - "ahash 0.8.8", + "ahash 0.8.11", "array-bytes", "criterion", "hash-db", @@ -20781,6 +20798,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "string-interner" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c6a0d765f5807e98a091107bae0a56ea3799f66a5de47b2c84c94a39c09974e" +dependencies = [ + "cfg-if", + "hashbrown 0.14.3", + "serde", +] + [[package]] name = "strobe-rs" version = "0.8.1" @@ -22823,7 +22851,24 @@ dependencies = [ "smallvec", "spin 0.9.8", "wasmi_arena", - "wasmi_core", + "wasmi_core 0.13.0", + "wasmparser-nostd", +] + +[[package]] +name = "wasmi" +version = "0.32.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50386c99b9c32bd2ed71a55b6dd4040af2580530fae8bdb9a6576571a80d0cca" +dependencies = [ + "arrayvec 0.7.4", + "multi-stash", + "num-derive", + "num-traits", + "smallvec", + "spin 0.9.8", + "wasmi_collections", + "wasmi_core 0.32.3", "wasmparser-nostd", ] @@ -22833,6 +22878,17 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "104a7f73be44570cac297b3035d76b169d6599637631cf37a1703326a0727073" +[[package]] +name = "wasmi_collections" +version = "0.32.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c128c039340ffd50d4195c3f8ce31aac357f06804cfc494c8b9508d4b30dca4" +dependencies = [ + "ahash 0.8.11", + "hashbrown 0.14.3", + "string-interner", +] + [[package]] name = "wasmi_core" version = "0.13.0" @@ -22845,6 +22901,18 @@ dependencies = [ "paste", ] +[[package]] +name = "wasmi_core" +version = "0.32.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a23b3a7f6c8c3ceeec6b83531ee61f0013c56e51cbf2b14b0f213548b23a4b41" +dependencies = [ + "downcast-rs", + "libm", + "num-traits", + "paste", +] + [[package]] name = "wasmparser" version = "0.102.0" @@ -22857,9 +22925,9 @@ dependencies = [ [[package]] name = "wasmparser-nostd" -version = "0.100.1" +version = "0.100.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9157cab83003221bfd385833ab587a039f5d6fa7304854042ba358a3b09e0724" +checksum = "d5a015fe95f3504a94bb1462c717aae75253e39b9dd6c3fb1062c934535c64aa" dependencies = [ "indexmap-nostd", ] diff --git a/prdoc/pr_3679.prdoc b/prdoc/pr_3679.prdoc new file mode 100644 index 000000000000..86c1e9beafe9 --- /dev/null +++ b/prdoc/pr_3679.prdoc @@ -0,0 +1,14 @@ +title: "[pallet-contracts] bump wasmi to 0.32" + +doc: + - audience: Runtime Dev + description: | + - Bump wasmi to 0.32 + - Turn on lazy and unchecked compilation when calling a contract. + See https://docs.rs/wasmi/0.32.0/wasmi/enum.CompilationMode.html#variant.Lazy + See https://docs.rs/wasmi/0.32.0/wasmi/struct.Module.html#method.new_unchecked + See https://wasmi-labs.github.io/blog/posts/wasmi-v0.32 for more details, on the wasmi update. + +crates: + - name: pallet-contracts + - name: pallet-contracts-proc-macro diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index bd4ded1a1170..70363562f6af 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -30,7 +30,7 @@ serde = { optional = true, features = ["derive"], workspace = true, default-feat smallvec = { version = "1", default-features = false, features = [ "const_generics", ] } -wasmi = { version = "0.31", default-features = false } +wasmi = { version = "0.32.3", default-features = false } impl-trait-for-tuples = "0.2" # Only used in benchmarking to generate contract code diff --git a/substrate/frame/contracts/proc-macro/src/lib.rs b/substrate/frame/contracts/proc-macro/src/lib.rs index 2472863b58b1..f91f8660cd31 100644 --- a/substrate/frame/contracts/proc-macro/src/lib.rs +++ b/substrate/frame/contracts/proc-macro/src/lib.rs @@ -150,7 +150,7 @@ impl HostFnReturn { Self::U64 => quote! { ::core::primitive::u64 }, }; quote! { - ::core::result::Result<#ok, ::wasmi::core::Trap> + ::core::result::Result<#ok, ::wasmi::Error> } } } @@ -694,7 +694,7 @@ fn expand_functions(def: &EnvDef, expand_mode: ExpandMode) -> TokenStream2 { let into_host = if expand_blocks { quote! { |reason| { - ::wasmi::core::Trap::from(reason) + ::wasmi::Error::host(reason) } } } else { @@ -711,13 +711,13 @@ fn expand_functions(def: &EnvDef, expand_mode: ExpandMode) -> TokenStream2 { quote! { // Write gas from wasmi into pallet-contracts before entering the host function. let __gas_left_before__ = { - let executor_total = - __caller__.fuel_consumed().expect("Fuel metering is enabled; qed"); + let fuel = + __caller__.get_fuel().expect("Fuel metering is enabled; qed"); __caller__ .data_mut() .ext() .gas_meter_mut() - .sync_from_executor(executor_total) + .sync_from_executor(fuel) .map_err(TrapReason::from) .map_err(#into_host)? }; @@ -733,15 +733,18 @@ fn expand_functions(def: &EnvDef, expand_mode: ExpandMode) -> TokenStream2 { // Write gas from pallet-contracts into wasmi after leaving the host function. let sync_gas_after = if expand_blocks { quote! { - let fuel_consumed = __caller__ + let fuel = __caller__ .data_mut() .ext() .gas_meter_mut() .sync_to_executor(__gas_left_before__) - .map_err(TrapReason::from)?; + .map_err(|err| { + let err = TrapReason::from(err); + wasmi::Error::host(err) + })?; __caller__ - .consume_fuel(fuel_consumed.into()) - .map_err(|_| TrapReason::from(Error::::OutOfGas))?; + .set_fuel(fuel.into()) + .expect("Fuel metering is enabled; qed"); } } else { quote! { } diff --git a/substrate/frame/contracts/src/benchmarking/mod.rs b/substrate/frame/contracts/src/benchmarking/mod.rs index 7c993bc9a771..80c7e863d299 100644 --- a/substrate/frame/contracts/src/benchmarking/mod.rs +++ b/substrate/frame/contracts/src/benchmarking/mod.rs @@ -1278,7 +1278,6 @@ mod benchmarks { // s: size of salt in bytes #[benchmark(pov_mode = Measured)] fn seal_instantiate( - t: Linear<0, 1>, i: Linear<0, { (code::max_pages::() - 1) * 64 * 1024 }>, s: Linear<0, { (code::max_pages::() - 1) * 64 * 1024 }>, ) -> Result<(), BenchmarkError> { @@ -1286,7 +1285,7 @@ mod benchmarks { let hash_bytes = hash.encode(); let hash_len = hash_bytes.len() as u32; - let value: BalanceOf = t.into(); + let value: BalanceOf = 1u32.into(); let value_bytes = value.encode(); let value_len = value_bytes.len() as u32; @@ -1341,6 +1340,7 @@ mod benchmarks { assert_ok!(result); assert!(ContractInfoOf::::get(&addr).is_some()); + assert_eq!(T::Currency::balance(&addr), Pallet::::min_balance() + value); Ok(()) } diff --git a/substrate/frame/contracts/src/benchmarking/sandbox.rs b/substrate/frame/contracts/src/benchmarking/sandbox.rs index 308bf6873e49..1bcf0c401f41 100644 --- a/substrate/frame/contracts/src/benchmarking/sandbox.rs +++ b/substrate/frame/contracts/src/benchmarking/sandbox.rs @@ -24,7 +24,7 @@ use crate::wasm::{ LoadingMode, WasmBlob, }; use sp_core::Get; -use wasmi::{errors::LinkerError, Func, Linker, StackLimits, Store}; +use wasmi::{errors::LinkerError, CompilationMode, Func, Linker, StackLimits, Store}; /// Minimal execution environment without any imported functions. pub struct Sandbox { @@ -48,6 +48,7 @@ impl From<&WasmModule> for Sandbox { Determinism::Relaxed, Some(StackLimits::default()), LoadingMode::Checked, + CompilationMode::Eager, ) .expect("Failed to load Wasm module"); @@ -62,7 +63,7 @@ impl From<&WasmModule> for Sandbox { // Set fuel for wasmi execution. store - .add_fuel(u64::MAX) + .set_fuel(u64::MAX) .expect("We've set up engine to fuel consuming mode; qed"); let entry_point = instance diff --git a/substrate/frame/contracts/src/gas.rs b/substrate/frame/contracts/src/gas.rs index 32fad2140f14..f8c97e251f3d 100644 --- a/substrate/frame/contracts/src/gas.rs +++ b/substrate/frame/contracts/src/gas.rs @@ -23,7 +23,7 @@ use frame_support::{ DefaultNoBound, }; use sp_core::Get; -use sp_runtime::{traits::Zero, DispatchError, Saturating}; +use sp_runtime::{traits::Zero, DispatchError}; #[cfg(test)] use std::{any::Any, fmt::Debug}; @@ -37,6 +37,45 @@ impl ChargedAmount { } } +/// Meter for syncing the gas between the executor and the gas meter. +#[derive(DefaultNoBound)] +struct EngineMeter { + fuel: u64, + _phantom: PhantomData, +} + +impl EngineMeter { + /// Create a meter with the given fuel limit. + fn new(limit: Weight) -> Self { + Self { + fuel: limit.ref_time().saturating_div(T::Schedule::get().ref_time_by_fuel()), + _phantom: PhantomData, + } + } + + /// Set the fuel left to the given value. + /// Returns the amount of Weight consumed since the last update. + fn set_fuel(&mut self, fuel: u64) -> Weight { + let consumed = self + .fuel + .saturating_sub(fuel) + .saturating_mul(T::Schedule::get().ref_time_by_fuel()); + self.fuel = fuel; + Weight::from_parts(consumed, 0) + } + + /// Charge the given amount of gas. + /// Returns the amount of fuel left. + fn charge_ref_time(&mut self, ref_time: u64) -> Result { + let amount = ref_time + .checked_div(T::Schedule::get().ref_time_by_fuel()) + .ok_or(Error::::InvalidSchedule)?; + + self.fuel.checked_sub(amount).ok_or_else(|| Error::::OutOfGas)?; + Ok(Syncable(self.fuel)) + } +} + /// Used to capture the gas left before entering a host function. /// /// Has to be consumed in order to sync back the gas after leaving the host function. @@ -103,12 +142,9 @@ pub struct GasMeter { /// Due to `adjust_gas` and `nested` the `gas_left` can temporarily dip below its final value. gas_left_lowest: Weight, /// The amount of resources that was consumed by the execution engine. - /// - /// This should be equivalent to `self.gas_consumed().ref_time()` but expressed in whatever - /// unit the execution engine uses to track resource consumption. We have to track it - /// separately in order to avoid the loss of precision that happens when converting from - /// ref_time to the execution engine unit. - executor_consumed: u64, + /// We have to track it separately in order to avoid the loss of precision that happens when + /// converting from ref_time to the execution engine unit. + engine_meter: EngineMeter, _phantom: PhantomData, #[cfg(test)] tokens: Vec, @@ -120,7 +156,7 @@ impl GasMeter { gas_limit, gas_left: gas_limit, gas_left_lowest: gas_limit, - executor_consumed: 0, + engine_meter: EngineMeter::new(gas_limit), _phantom: PhantomData, #[cfg(test)] tokens: Vec::new(), @@ -200,16 +236,10 @@ impl GasMeter { /// Needs to be called when entering a host function to update this meter with the /// gas that was tracked by the executor. It tracks the latest seen total value /// in order to compute the delta that needs to be charged. - pub fn sync_from_executor( - &mut self, - executor_total: u64, - ) -> Result { - let chargable_reftime = executor_total - .saturating_sub(self.executor_consumed) - .saturating_mul(u64::from(T::Schedule::get().instruction_weights.base)); - self.executor_consumed = executor_total; + pub fn sync_from_executor(&mut self, engine_fuel: u64) -> Result { + let weight_consumed = self.engine_meter.set_fuel(engine_fuel); self.gas_left - .checked_reduce(Weight::from_parts(chargable_reftime, 0)) + .checked_reduce(weight_consumed) .ok_or_else(|| Error::::OutOfGas)?; Ok(RefTimeLeft(self.gas_left.ref_time())) } @@ -223,13 +253,8 @@ impl GasMeter { /// It is important that this does **not** actually sync with the executor. That has /// to be done by the caller. pub fn sync_to_executor(&mut self, before: RefTimeLeft) -> Result { - let chargable_executor_resource = before - .0 - .saturating_sub(self.gas_left().ref_time()) - .checked_div(u64::from(T::Schedule::get().instruction_weights.base)) - .ok_or(Error::::InvalidSchedule)?; - self.executor_consumed.saturating_accrue(chargable_executor_resource); - Ok(Syncable(chargable_executor_resource)) + let ref_time_consumed = before.0.saturating_sub(self.gas_left().ref_time()); + self.engine_meter.charge_ref_time(ref_time_consumed) } /// Returns the amount of gas that is required to run the same call. diff --git a/substrate/frame/contracts/src/schedule.rs b/substrate/frame/contracts/src/schedule.rs index a1fbdea4228b..60c9520677eb 100644 --- a/substrate/frame/contracts/src/schedule.rs +++ b/substrate/frame/contracts/src/schedule.rs @@ -62,6 +62,13 @@ pub struct Schedule { pub instruction_weights: InstructionWeights, } +impl Schedule { + /// Returns the reference time per engine fuel. + pub fn ref_time_by_fuel(&self) -> u64 { + self.instruction_weights.base as u64 + } +} + /// Describes the upper limits on various metrics. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[cfg_attr(feature = "runtime-benchmarks", derive(Debug))] diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index 5eccdfffb91d..0d65d696758d 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -57,7 +57,7 @@ use frame_support::{ use sp_core::Get; use sp_runtime::{DispatchError, RuntimeDebug}; use sp_std::prelude::*; -use wasmi::{InstancePre, Linker, Memory, MemoryType, StackLimits, Store}; +use wasmi::{CompilationMode, InstancePre, Linker, Memory, MemoryType, StackLimits, Store}; const BYTES_PER_PAGE: usize = 64 * 1024; @@ -142,11 +142,6 @@ struct CodeLoadToken(u32); impl Token for CodeLoadToken { fn weight(&self) -> Weight { - // When loading the contract, we already covered the general costs of - // calling the storage but still need to account for the actual size of the - // contract code. This is why we subtract `T::*::(0)`. We need to do this at this - // point because when charging the general weight for calling the contract we don't know the - // size of the contract. T::WeightInfo::call_with_code_per_byte(self.0) .saturating_sub(T::WeightInfo::call_with_code_per_byte(0)) } @@ -351,9 +346,9 @@ impl WasmBlob { mut store: Store>, result: Result<(), wasmi::Error>, ) -> ExecResult { - let engine_consumed_total = store.fuel_consumed().expect("Fuel metering is enabled; qed"); + let engine_fuel = store.get_fuel().expect("Fuel metering is enabled; qed"); let gas_meter = store.data_mut().ext().gas_meter_mut(); - let _ = gas_meter.sync_from_executor(engine_consumed_total)?; + let _ = gas_meter.sync_from_executor(engine_fuel)?; store.into_data().to_execution_result(result) } @@ -364,8 +359,13 @@ impl WasmBlob { input_data: Vec, ) -> (Func, Store>) { use InstanceOrExecReturn::*; - match Self::prepare_execute(self, Runtime::new(ext, input_data), &ExportedFunction::Call) - .expect("Benchmark should provide valid module") + match Self::prepare_execute( + self, + Runtime::new(ext, input_data), + &ExportedFunction::Call, + CompilationMode::Eager, + ) + .expect("Benchmark should provide valid module") { Instance((func, store)) => (func, store), ExecReturn(_) => panic!("Expected Instance"), @@ -376,6 +376,7 @@ impl WasmBlob { self, runtime: Runtime<'a, E>, function: &'a ExportedFunction, + compilation_mode: CompilationMode, ) -> PreExecResult<'a, E> { let code = self.code.as_slice(); // Instantiate the Wasm module to the engine. @@ -386,6 +387,7 @@ impl WasmBlob { self.code_info.determinism, Some(StackLimits::default()), LoadingMode::Unchecked, + compilation_mode, ) .map_err(|err| { log::debug!(target: LOG_TARGET, "failed to create wasmi module: {err:?}"); @@ -415,10 +417,10 @@ impl WasmBlob { .gas_meter_mut() .gas_left() .ref_time() - .checked_div(T::Schedule::get().instruction_weights.base as u64) + .checked_div(T::Schedule::get().ref_time_by_fuel()) .ok_or(Error::::InvalidSchedule)?; store - .add_fuel(fuel_limit) + .set_fuel(fuel_limit) .expect("We've set up engine to fuel consuming mode; qed"); // Start function should already see the correct refcount in case it will be ever inspected. @@ -464,7 +466,12 @@ impl Executable for WasmBlob { input_data: Vec, ) -> ExecResult { use InstanceOrExecReturn::*; - match Self::prepare_execute(self, Runtime::new(ext, input_data), function)? { + match Self::prepare_execute( + self, + Runtime::new(ext, input_data), + function, + CompilationMode::Lazy, + )? { Instance((func, mut store)) => { let result = func.call(&mut store, &[], &mut []); Self::process_result(store, result) diff --git a/substrate/frame/contracts/src/wasm/prepare.rs b/substrate/frame/contracts/src/wasm/prepare.rs index 0d9a12d8ae83..50eb6d625321 100644 --- a/substrate/frame/contracts/src/wasm/prepare.rs +++ b/substrate/frame/contracts/src/wasm/prepare.rs @@ -33,8 +33,8 @@ use sp_runtime::{traits::Hash, DispatchError}; #[cfg(any(test, feature = "runtime-benchmarks"))] use sp_std::prelude::Vec; use wasmi::{ - core::ValueType as WasmiValueType, Config as WasmiConfig, Engine, ExternType, - FuelConsumptionMode, Module, StackLimits, + core::ValType as WasmiValueType, CompilationMode, Config as WasmiConfig, Engine, ExternType, + Module, StackLimits, }; /// Imported memory must be located inside this module. The reason for hardcoding is that current @@ -71,7 +71,8 @@ impl LoadedModule { code: &[u8], determinism: Determinism, stack_limits: Option, - _mode: LoadingMode, + loading_mode: LoadingMode, + compilation_mode: CompilationMode, ) -> Result { // NOTE: wasmi does not support unstable WebAssembly features. The module is implicitly // checked for not having those ones when creating `wasmi::Module` below. @@ -86,8 +87,8 @@ impl LoadedModule { .wasm_extended_const(false) .wasm_saturating_float_to_int(false) .floats(matches!(determinism, Determinism::Relaxed)) - .consume_fuel(true) - .fuel_consumption_mode(FuelConsumptionMode::Eager); + .compilation_mode(compilation_mode) + .consume_fuel(true); if let Some(stack_limits) = stack_limits { config.set_stack_limits(stack_limits); @@ -95,14 +96,18 @@ impl LoadedModule { let engine = Engine::new(&config); - // TODO use Module::new_unchecked when validate_module is true once we are on wasmi@0.32 - let module = Module::new(&engine, code).map_err(|err| { + let module = match loading_mode { + LoadingMode::Checked => Module::new(&engine, code), + // Safety: The code has been validated, Therefore we know that it's a valid binary. + LoadingMode::Unchecked => unsafe { Module::new_unchecked(&engine, code) }, + } + .map_err(|err| { log::debug!(target: LOG_TARGET, "Module creation failed: {:?}", err); "Can't load the module into wasmi!" })?; #[cfg(test)] - tracker::LOADED_MODULE.with(|t| t.borrow_mut().push(_mode)); + tracker::LOADED_MODULE.with(|t| t.borrow_mut().push(loading_mode)); // Return a `LoadedModule` instance with // __valid__ module. @@ -263,17 +268,25 @@ where Determinism::Enforced, stack_limits, LoadingMode::Checked, + CompilationMode::Eager, ) { *determinism = Determinism::Enforced; module } else { - LoadedModule::new::(code, Determinism::Relaxed, None, LoadingMode::Checked)? + LoadedModule::new::( + code, + Determinism::Relaxed, + None, + LoadingMode::Checked, + CompilationMode::Eager, + )? }, Determinism::Enforced => LoadedModule::new::( code, Determinism::Enforced, stack_limits, LoadingMode::Checked, + CompilationMode::Eager, )?, }; @@ -348,8 +361,13 @@ pub mod benchmarking { owner: AccountIdOf, ) -> Result, DispatchError> { let determinism = Determinism::Enforced; - let contract_module = - LoadedModule::new::(&code, determinism, None, LoadingMode::Checked)?; + let contract_module = LoadedModule::new::( + &code, + determinism, + None, + LoadingMode::Checked, + CompilationMode::Eager, + )?; let _ = contract_module.scan_imports::(schedule)?; let code: CodeVec = code.try_into().map_err(|_| >::CodeTooLarge)?; let code_info = CodeInfo { diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 07ecd60f7d5e..5f50dbf391a2 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -209,9 +209,7 @@ pub enum RuntimeCosts { /// Weight per byte that is cloned by supplying the `CLONE_INPUT` flag. CallInputCloned(u32), /// Weight of calling `seal_instantiate` for the given input length and salt. - InstantiateBase { input_data_len: u32, salt_len: u32 }, - /// Weight of the transfer performed during an instantiate. - InstantiateTransferSurcharge, + Instantiate { input_data_len: u32, salt_len: u32 }, /// Weight of calling `seal_hash_sha_256` for the given input size. HashSha256(u32), /// Weight of calling `seal_hash_keccak_256` for the given input size. @@ -302,9 +300,8 @@ impl Token for RuntimeCosts { DelegateCallBase => T::WeightInfo::seal_delegate_call(), CallTransferSurcharge => cost_args!(seal_call, 1, 0), CallInputCloned(len) => cost_args!(seal_call, 0, len), - InstantiateBase { input_data_len, salt_len } => - T::WeightInfo::seal_instantiate(0, input_data_len, salt_len), - InstantiateTransferSurcharge => cost_args!(seal_instantiate, 1, 0, 0), + Instantiate { input_data_len, salt_len } => + T::WeightInfo::seal_instantiate(input_data_len, salt_len), HashSha256(len) => T::WeightInfo::seal_hash_sha2_256(len), HashKeccak256(len) => T::WeightInfo::seal_hash_keccak_256(len), HashBlake256(len) => T::WeightInfo::seal_hash_blake2_256(len), @@ -385,49 +382,55 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { /// Converts the sandbox result and the runtime state into the execution outcome. pub fn to_execution_result(self, sandbox_result: Result<(), wasmi::Error>) -> ExecResult { - use wasmi::core::TrapCode::OutOfFuel; + use wasmi::{ + core::TrapCode, + errors::{ErrorKind, FuelError}, + }; use TrapReason::*; - match sandbox_result { + let Err(error) = sandbox_result else { // Contract returned from main function -> no data was returned. - Ok(_) => Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }), + return Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) + }; + if let ErrorKind::Fuel(FuelError::OutOfFuel) = error.kind() { // `OutOfGas` when host asks engine to consume more than left in the _store_. // We should never get this case, as gas meter is being charged (and hence raises error) // first. - Err(wasmi::Error::Store(_)) => Err(Error::::OutOfGas.into()), - // Contract either trapped or some host function aborted the execution. - Err(wasmi::Error::Trap(trap)) => { - if let Some(OutOfFuel) = trap.trap_code() { - // `OutOfGas` during engine execution. - return Err(Error::::OutOfGas.into()) - } - // If we encoded a reason then it is some abort generated by a host function. - if let Some(reason) = &trap.downcast_ref::() { - match &reason { - Return(ReturnData { flags, data }) => { - let flags = ReturnFlags::from_bits(*flags) - .ok_or(Error::::InvalidCallFlags)?; - return Ok(ExecReturnValue { flags, data: data.to_vec() }) - }, - Termination => - return Ok(ExecReturnValue { - flags: ReturnFlags::empty(), - data: Vec::new(), - }), - SupervisorError(error) => return Err((*error).into()), - } - } + return Err(Error::::OutOfGas.into()) + } + match error.as_trap_code() { + Some(TrapCode::OutOfFuel) => { + // `OutOfGas` during engine execution. + return Err(Error::::OutOfGas.into()) + }, + Some(_trap_code) => { // Otherwise the trap came from the contract itself. - Err(Error::::ContractTrapped.into()) + return Err(Error::::ContractTrapped.into()) }, - // Any other error is returned only if instantiation or linking failed (i.e. - // wasm binary tried to import a function that is not provided by the host). - // This shouldn't happen because validation process ought to reject such binaries. - // - // Because panics are really undesirable in the runtime code, we treat this as - // a trap for now. Eventually, we might want to revisit this. - Err(_) => Err(Error::::CodeRejected.into()), + None => {}, + } + // If we encoded a reason then it is some abort generated by a host function. + if let Some(reason) = &error.downcast_ref::() { + match &reason { + Return(ReturnData { flags, data }) => { + let flags = + ReturnFlags::from_bits(*flags).ok_or(Error::::InvalidCallFlags)?; + return Ok(ExecReturnValue { flags, data: data.to_vec() }) + }, + Termination => + return Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }), + SupervisorError(error) => return Err((*error).into()), + } } + + // Any other error is returned only if instantiation or linking failed (i.e. + // wasm binary tried to import a function that is not provided by the host). + // This shouldn't happen because validation process ought to reject such binaries. + // + // Because panics are really undesirable in the runtime code, we treat this as + // a trap for now. Eventually, we might want to revisit this. + log::debug!("Code rejected: {:?}", error); + Err(Error::::CodeRejected.into()) } /// Get a mutable reference to the inner `Ext`. @@ -894,16 +897,13 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { salt_ptr: u32, salt_len: u32, ) -> Result { - self.charge_gas(RuntimeCosts::InstantiateBase { input_data_len, salt_len })?; + self.charge_gas(RuntimeCosts::Instantiate { input_data_len, salt_len })?; let deposit_limit: BalanceOf<::T> = if deposit_ptr == SENTINEL { BalanceOf::<::T>::zero() } else { self.read_sandbox_memory_as(memory, deposit_ptr)? }; let value: BalanceOf<::T> = self.read_sandbox_memory_as(memory, value_ptr)?; - if value > 0u32.into() { - self.charge_gas(RuntimeCosts::InstantiateTransferSurcharge)?; - } let code_hash: CodeHash<::T> = self.read_sandbox_memory_as(memory, code_hash_ptr)?; let input_data = self.read_sandbox_memory(memory, input_data_ptr, input_data_len)?; diff --git a/substrate/frame/contracts/src/weights.rs b/substrate/frame/contracts/src/weights.rs index 98b41eda964c..0404a9d3d8e5 100644 --- a/substrate/frame/contracts/src/weights.rs +++ b/substrate/frame/contracts/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_contracts` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-06-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vicqj8em-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-1pho9goo-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -101,7 +101,7 @@ pub trait WeightInfo { fn seal_transfer() -> Weight; fn seal_call(t: u32, i: u32, ) -> Weight; fn seal_delegate_call() -> Weight; - fn seal_instantiate(t: u32, i: u32, s: u32, ) -> Weight; + fn seal_instantiate(i: u32, s: u32, ) -> Weight; fn seal_hash_sha2_256(n: u32, ) -> Weight; fn seal_hash_keccak_256(n: u32, ) -> Weight; fn seal_hash_blake2_256(n: u32, ) -> Weight; @@ -127,8 +127,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 1_960_000 picoseconds. - Weight::from_parts(2_043_000, 1627) + // Minimum execution time: 1_896_000 picoseconds. + Weight::from_parts(1_990_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -138,10 +138,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 11_574_000 picoseconds. - Weight::from_parts(11_846_000, 442) - // Standard Error: 1_342 - .saturating_add(Weight::from_parts(1_113_844, 0).saturating_mul(k.into())) + // Minimum execution time: 11_142_000 picoseconds. + Weight::from_parts(11_578_000, 442) + // Standard Error: 1_557 + .saturating_add(Weight::from_parts(1_165_198, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -155,10 +155,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 7_709_000 picoseconds. - Weight::from_parts(5_068_795, 6149) + // Minimum execution time: 7_649_000 picoseconds. + Weight::from_parts(4_827_445, 6149) // Standard Error: 5 - .saturating_add(Weight::from_parts(1_689, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_630, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -171,8 +171,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_477_000 picoseconds. - Weight::from_parts(17_313_000, 6450) + // Minimum execution time: 16_096_000 picoseconds. + Weight::from_parts(16_937_000, 6450) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -185,10 +185,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_111_000 picoseconds. - Weight::from_parts(3_198_000, 3635) - // Standard Error: 593 - .saturating_add(Weight::from_parts(1_081_746, 0).saturating_mul(k.into())) + // Minimum execution time: 3_131_000 picoseconds. + Weight::from_parts(3_209_000, 3635) + // Standard Error: 481 + .saturating_add(Weight::from_parts(1_087_506, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -207,10 +207,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `325 + c * (1 ±0)` // Estimated: `6263 + c * (1 ±0)` - // Minimum execution time: 15_390_000 picoseconds. - Weight::from_parts(16_157_208, 6263) + // Minimum execution time: 15_289_000 picoseconds. + Weight::from_parts(16_157_168, 6263) // Standard Error: 1 - .saturating_add(Weight::from_parts(501, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(395, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -221,8 +221,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 12_045_000 picoseconds. - Weight::from_parts(12_892_000, 6380) + // Minimum execution time: 12_312_000 picoseconds. + Weight::from_parts(12_650_000, 6380) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -236,8 +236,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 47_250_000 picoseconds. - Weight::from_parts(49_231_000, 6292) + // Minimum execution time: 47_239_000 picoseconds. + Weight::from_parts(48_617_000, 6292) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -249,8 +249,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 53_722_000 picoseconds. - Weight::from_parts(55_268_000, 6534) + // Minimum execution time: 52_084_000 picoseconds. + Weight::from_parts(53_838_000, 6534) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -260,8 +260,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `409` // Estimated: `6349` - // Minimum execution time: 11_707_000 picoseconds. - Weight::from_parts(12_305_000, 6349) + // Minimum execution time: 11_785_000 picoseconds. + Weight::from_parts(12_284_000, 6349) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -271,8 +271,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_129_000 picoseconds. - Weight::from_parts(2_197_000, 1627) + // Minimum execution time: 2_136_000 picoseconds. + Weight::from_parts(2_233_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -284,8 +284,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 11_145_000 picoseconds. - Weight::from_parts(11_445_000, 3631) + // Minimum execution time: 10_957_000 picoseconds. + Weight::from_parts(11_314_000, 3631) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -295,8 +295,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_463_000 picoseconds. - Weight::from_parts(4_585_000, 3607) + // Minimum execution time: 4_354_000 picoseconds. + Weight::from_parts(4_613_000, 3607) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -307,8 +307,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 5_639_000 picoseconds. - Weight::from_parts(5_865_000, 3632) + // Minimum execution time: 5_541_000 picoseconds. + Weight::from_parts(5_790_000, 3632) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -319,8 +319,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 5_540_000 picoseconds. - Weight::from_parts(5_954_000, 3607) + // Minimum execution time: 5_502_000 picoseconds. + Weight::from_parts(5_701_000, 3607) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -341,10 +341,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `801 + c * (1 ±0)` // Estimated: `4264 + c * (1 ±0)` - // Minimum execution time: 353_812_000 picoseconds. - Weight::from_parts(337_889_300, 4264) - // Standard Error: 94 - .saturating_add(Weight::from_parts(34_200, 0).saturating_mul(c.into())) + // Minimum execution time: 247_884_000 picoseconds. + Weight::from_parts(265_795_781, 4264) + // Standard Error: 4 + .saturating_add(Weight::from_parts(724, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -372,14 +372,14 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `323` // Estimated: `6262` - // Minimum execution time: 4_499_852_000 picoseconds. - Weight::from_parts(135_265_841, 6262) - // Standard Error: 247 - .saturating_add(Weight::from_parts(72_051, 0).saturating_mul(c.into())) - // Standard Error: 29 - .saturating_add(Weight::from_parts(2_180, 0).saturating_mul(i.into())) - // Standard Error: 29 - .saturating_add(Weight::from_parts(2_195, 0).saturating_mul(s.into())) + // Minimum execution time: 4_500_184_000 picoseconds. + Weight::from_parts(160_729_258, 6262) + // Standard Error: 143 + .saturating_add(Weight::from_parts(52_809, 0).saturating_mul(c.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(2_173, 0).saturating_mul(i.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(2_165, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -405,12 +405,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `560` // Estimated: `4029` - // Minimum execution time: 2_376_075_000 picoseconds. - Weight::from_parts(2_387_885_000, 4029) + // Minimum execution time: 2_219_163_000 picoseconds. + Weight::from_parts(2_236_918_000, 4029) // Standard Error: 32 - .saturating_add(Weight::from_parts(1_036, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(937, 0).saturating_mul(i.into())) // Standard Error: 32 - .saturating_add(Weight::from_parts(936, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(938, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -430,8 +430,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `826` // Estimated: `4291` - // Minimum execution time: 197_222_000 picoseconds. - Weight::from_parts(203_633_000, 4291) + // Minimum execution time: 164_801_000 picoseconds. + Weight::from_parts(167_250_000, 4291) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -448,10 +448,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 325_788_000 picoseconds. - Weight::from_parts(335_491_760, 3607) - // Standard Error: 50 - .saturating_add(Weight::from_parts(35_337, 0).saturating_mul(c.into())) + // Minimum execution time: 225_207_000 picoseconds. + Weight::from_parts(263_665_658, 3607) + // Standard Error: 47 + .saturating_add(Weight::from_parts(50_732, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -468,10 +468,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 336_010_000 picoseconds. - Weight::from_parts(348_030_264, 3607) - // Standard Error: 43 - .saturating_add(Weight::from_parts(35_696, 0).saturating_mul(c.into())) + // Minimum execution time: 230_718_000 picoseconds. + Weight::from_parts(258_359_271, 3607) + // Standard Error: 47 + .saturating_add(Weight::from_parts(51_014, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -487,8 +487,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 40_118_000 picoseconds. - Weight::from_parts(40_987_000, 3780) + // Minimum execution time: 39_668_000 picoseconds. + Weight::from_parts(41_031_000, 3780) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -502,8 +502,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `552` // Estimated: `6492` - // Minimum execution time: 25_236_000 picoseconds. - Weight::from_parts(26_450_000, 6492) + // Minimum execution time: 25_890_000 picoseconds. + Weight::from_parts(26_603_000, 6492) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -512,17 +512,17 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_200_000 picoseconds. - Weight::from_parts(9_773_983, 0) + // Minimum execution time: 8_269_000 picoseconds. + Weight::from_parts(9_227_069, 0) // Standard Error: 74 - .saturating_add(Weight::from_parts(72_257, 0).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(51_396, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 606_000 picoseconds. - Weight::from_parts(672_000, 0) + // Minimum execution time: 602_000 picoseconds. + Weight::from_parts(664_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -530,8 +530,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `354` // Estimated: `3819` - // Minimum execution time: 6_260_000 picoseconds. - Weight::from_parts(6_645_000, 3819) + // Minimum execution time: 6_131_000 picoseconds. + Weight::from_parts(6_468_000, 3819) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) @@ -540,79 +540,79 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `447` // Estimated: `3912` - // Minimum execution time: 7_599_000 picoseconds. - Weight::from_parts(7_913_000, 3912) + // Minimum execution time: 7_557_000 picoseconds. + Weight::from_parts(7_704_000, 3912) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 772_000 picoseconds. - Weight::from_parts(852_000, 0) + // Minimum execution time: 783_000 picoseconds. + Weight::from_parts(848_000, 0) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 390_000 picoseconds. - Weight::from_parts(417_000, 0) + // Minimum execution time: 397_000 picoseconds. + Weight::from_parts(435_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 340_000 picoseconds. - Weight::from_parts(368_000, 0) + // Minimum execution time: 351_000 picoseconds. + Weight::from_parts(372_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 640_000 picoseconds. - Weight::from_parts(672_000, 0) + // Minimum execution time: 608_000 picoseconds. + Weight::from_parts(645_000, 0) } fn seal_gas_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 607_000 picoseconds. - Weight::from_parts(699_000, 0) + // Minimum execution time: 661_000 picoseconds. + Weight::from_parts(729_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 4_519_000 picoseconds. - Weight::from_parts(4_668_000, 0) + // Minimum execution time: 4_545_000 picoseconds. + Weight::from_parts(4_663_000, 0) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 600_000 picoseconds. - Weight::from_parts(639_000, 0) + // Minimum execution time: 614_000 picoseconds. + Weight::from_parts(641_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 579_000 picoseconds. - Weight::from_parts(609_000, 0) + // Minimum execution time: 583_000 picoseconds. + Weight::from_parts(618_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 575_000 picoseconds. - Weight::from_parts(613_000, 0) + // Minimum execution time: 583_000 picoseconds. + Weight::from_parts(617_000, 0) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 554_000 picoseconds. - Weight::from_parts(622_000, 0) + // Minimum execution time: 607_000 picoseconds. + Weight::from_parts(638_000, 0) } /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) @@ -620,8 +620,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `67` // Estimated: `1552` - // Minimum execution time: 4_265_000 picoseconds. - Weight::from_parts(4_525_000, 1552) + // Minimum execution time: 4_172_000 picoseconds. + Weight::from_parts(4_408_000, 1552) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `n` is `[0, 1048572]`. @@ -629,20 +629,20 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 512_000 picoseconds. - Weight::from_parts(524_000, 0) + // Minimum execution time: 475_000 picoseconds. + Weight::from_parts(515_000, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(303, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(298, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048572]`. fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 358_000 picoseconds. - Weight::from_parts(375_000, 0) - // Standard Error: 9 - .saturating_add(Weight::from_parts(481, 0).saturating_mul(n.into())) + // Minimum execution time: 289_000 picoseconds. + Weight::from_parts(357_000, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(405, 0).saturating_mul(n.into())) } /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -655,10 +655,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `319 + n * (78 ±0)` // Estimated: `3784 + n * (2553 ±0)` - // Minimum execution time: 13_267_000 picoseconds. - Weight::from_parts(15_705_698, 3784) - // Standard Error: 7_176 - .saturating_add(Weight::from_parts(3_506_583, 0).saturating_mul(n.into())) + // Minimum execution time: 13_316_000 picoseconds. + Weight::from_parts(15_855_821, 3784) + // Standard Error: 7_274 + .saturating_add(Weight::from_parts(3_447_246, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -671,8 +671,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 3_339_000 picoseconds. - Weight::from_parts(3_544_000, 1561) + // Minimum execution time: 3_468_000 picoseconds. + Weight::from_parts(3_608_000, 1561) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `System::EventTopics` (r:4 w:4) @@ -683,12 +683,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `990 + t * (2475 ±0)` - // Minimum execution time: 3_789_000 picoseconds. - Weight::from_parts(4_070_991, 990) - // Standard Error: 6_319 - .saturating_add(Weight::from_parts(2_264_078, 0).saturating_mul(t.into())) + // Minimum execution time: 3_777_000 picoseconds. + Weight::from_parts(4_028_191, 990) + // Standard Error: 5_907 + .saturating_add(Weight::from_parts(2_183_733, 0).saturating_mul(t.into())) // Standard Error: 1 - .saturating_add(Weight::from_parts(20, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(18, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) @@ -698,10 +698,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 426_000 picoseconds. - Weight::from_parts(465_000, 0) + // Minimum execution time: 400_000 picoseconds. + Weight::from_parts(423_000, 0) // Standard Error: 10 - .saturating_add(Weight::from_parts(1_277, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(1_209, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -711,12 +711,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `250 + o * (1 ±0)` // Estimated: `249 + o * (1 ±0)` - // Minimum execution time: 9_148_000 picoseconds. - Weight::from_parts(8_789_382, 249) - // Standard Error: 2 - .saturating_add(Weight::from_parts(361, 0).saturating_mul(n.into())) - // Standard Error: 2 - .saturating_add(Weight::from_parts(66, 0).saturating_mul(o.into())) + // Minimum execution time: 9_033_000 picoseconds. + Weight::from_parts(8_797_934, 249) + // Standard Error: 1 + .saturating_add(Weight::from_parts(257, 0).saturating_mul(n.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(51, 0).saturating_mul(o.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) @@ -728,10 +728,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 7_344_000 picoseconds. - Weight::from_parts(8_119_197, 248) + // Minimum execution time: 7_167_000 picoseconds. + Weight::from_parts(8_012_194, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(83, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(90, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -743,10 +743,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 6_763_000 picoseconds. - Weight::from_parts(7_669_781, 248) - // Standard Error: 2 - .saturating_add(Weight::from_parts(710, 0).saturating_mul(n.into())) + // Minimum execution time: 6_868_000 picoseconds. + Weight::from_parts(7_801_811, 248) + // Standard Error: 1 + .saturating_add(Weight::from_parts(605, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -757,10 +757,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 6_310_000 picoseconds. - Weight::from_parts(7_039_085, 248) + // Minimum execution time: 6_322_000 picoseconds. + Weight::from_parts(7_103_552, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(84, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(79, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -771,10 +771,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 7_541_000 picoseconds. - Weight::from_parts(8_559_509, 248) - // Standard Error: 1 - .saturating_add(Weight::from_parts(711, 0).saturating_mul(n.into())) + // Minimum execution time: 7_702_000 picoseconds. + Weight::from_parts(8_746_305, 248) + // Standard Error: 2 + .saturating_add(Weight::from_parts(604, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -783,8 +783,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 8_728_000 picoseconds. - Weight::from_parts(9_035_000, 0) + // Minimum execution time: 8_851_000 picoseconds. + Weight::from_parts(9_083_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -800,12 +800,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `620 + t * (280 ±0)` // Estimated: `4085 + t * (2182 ±0)` - // Minimum execution time: 153_385_000 picoseconds. - Weight::from_parts(156_813_102, 4085) - // Standard Error: 290_142 - .saturating_add(Weight::from_parts(42_350_253, 0).saturating_mul(t.into())) + // Minimum execution time: 121_148_000 picoseconds. + Weight::from_parts(119_605_377, 4085) + // Standard Error: 208_337 + .saturating_add(Weight::from_parts(43_153_338, 0).saturating_mul(t.into())) // Standard Error: 0 - .saturating_add(Weight::from_parts(4, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(5, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -820,8 +820,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 140_007_000 picoseconds. - Weight::from_parts(144_781_000, 3895) + // Minimum execution time: 108_159_000 picoseconds. + Weight::from_parts(110_027_000, 3895) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) @@ -834,19 +834,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// The range of component `t` is `[0, 1]`. /// The range of component `i` is `[0, 983040]`. /// The range of component `s` is `[0, 983040]`. - fn seal_instantiate(_t: u32, i: u32, s: u32, ) -> Weight { + fn seal_instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `676` - // Estimated: `4138` - // Minimum execution time: 2_073_851_000 picoseconds. - Weight::from_parts(2_084_321_000, 4138) - // Standard Error: 17 - .saturating_add(Weight::from_parts(986, 0).saturating_mul(i.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(1_261, 0).saturating_mul(s.into())) + // Estimated: `4127` + // Minimum execution time: 1_861_874_000 picoseconds. + Weight::from_parts(1_872_926_000, 4127) + // Standard Error: 23 + .saturating_add(Weight::from_parts(557, 0).saturating_mul(i.into())) + // Standard Error: 23 + .saturating_add(Weight::from_parts(920, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -855,64 +854,64 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 902_000 picoseconds. - Weight::from_parts(10_389_779, 0) + // Minimum execution time: 878_000 picoseconds. + Weight::from_parts(10_993_950, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_422, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_325, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_477_000 picoseconds. - Weight::from_parts(12_143_874, 0) + // Minimum execution time: 1_261_000 picoseconds. + Weight::from_parts(9_759_497, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(3_683, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(3_594, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 778_000 picoseconds. - Weight::from_parts(8_762_544, 0) + // Minimum execution time: 726_000 picoseconds. + Weight::from_parts(9_795_728, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_557, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_455, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 748_000 picoseconds. - Weight::from_parts(10_364_578, 0) + // Minimum execution time: 739_000 picoseconds. + Weight::from_parts(9_701_202, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_550, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_459, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 43_388_000 picoseconds. - Weight::from_parts(42_346_211, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(5_103, 0).saturating_mul(n.into())) + // Minimum execution time: 43_309_000 picoseconds. + Weight::from_parts(41_405_949, 0) + // Standard Error: 8 + .saturating_add(Weight::from_parts(5_336, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 46_825_000 picoseconds. - Weight::from_parts(48_073_000, 0) + // Minimum execution time: 47_880_000 picoseconds. + Weight::from_parts(49_025_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_864_000 picoseconds. - Weight::from_parts(13_065_000, 0) + // Minimum execution time: 13_462_000 picoseconds. + Weight::from_parts(13_631_000, 0) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) @@ -922,8 +921,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 18_406_000 picoseconds. - Weight::from_parts(19_112_000, 3895) + // Minimum execution time: 17_978_000 picoseconds. + Weight::from_parts(18_578_000, 3895) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -933,8 +932,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3820` - // Minimum execution time: 8_441_000 picoseconds. - Weight::from_parts(8_710_000, 3820) + // Minimum execution time: 8_384_000 picoseconds. + Weight::from_parts(8_687_000, 3820) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -944,8 +943,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3558` - // Minimum execution time: 7_525_000 picoseconds. - Weight::from_parts(7_819_000, 3558) + // Minimum execution time: 7_547_000 picoseconds. + Weight::from_parts(7_935_000, 3558) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -953,15 +952,15 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 313_000 picoseconds. - Weight::from_parts(375_000, 0) + // Minimum execution time: 331_000 picoseconds. + Weight::from_parts(363_000, 0) } fn seal_account_reentrance_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 308_000 picoseconds. - Weight::from_parts(334_000, 0) + // Minimum execution time: 349_000 picoseconds. + Weight::from_parts(365_000, 0) } /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -969,8 +968,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1704` - // Minimum execution time: 2_775_000 picoseconds. - Weight::from_parts(3_043_000, 1704) + // Minimum execution time: 2_814_000 picoseconds. + Weight::from_parts(3_038_000, 1704) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. @@ -978,10 +977,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 925_000 picoseconds. - Weight::from_parts(443_142, 0) - // Standard Error: 19 - .saturating_add(Weight::from_parts(15_316, 0).saturating_mul(r.into())) + // Minimum execution time: 693_000 picoseconds. + Weight::from_parts(665_431, 0) + // Standard Error: 12 + .saturating_add(Weight::from_parts(7_030, 0).saturating_mul(r.into())) } } @@ -993,8 +992,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 1_960_000 picoseconds. - Weight::from_parts(2_043_000, 1627) + // Minimum execution time: 1_896_000 picoseconds. + Weight::from_parts(1_990_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1004,10 +1003,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 11_574_000 picoseconds. - Weight::from_parts(11_846_000, 442) - // Standard Error: 1_342 - .saturating_add(Weight::from_parts(1_113_844, 0).saturating_mul(k.into())) + // Minimum execution time: 11_142_000 picoseconds. + Weight::from_parts(11_578_000, 442) + // Standard Error: 1_557 + .saturating_add(Weight::from_parts(1_165_198, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -1021,10 +1020,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 7_709_000 picoseconds. - Weight::from_parts(5_068_795, 6149) + // Minimum execution time: 7_649_000 picoseconds. + Weight::from_parts(4_827_445, 6149) // Standard Error: 5 - .saturating_add(Weight::from_parts(1_689, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_630, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1037,8 +1036,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_477_000 picoseconds. - Weight::from_parts(17_313_000, 6450) + // Minimum execution time: 16_096_000 picoseconds. + Weight::from_parts(16_937_000, 6450) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1051,10 +1050,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_111_000 picoseconds. - Weight::from_parts(3_198_000, 3635) - // Standard Error: 593 - .saturating_add(Weight::from_parts(1_081_746, 0).saturating_mul(k.into())) + // Minimum execution time: 3_131_000 picoseconds. + Weight::from_parts(3_209_000, 3635) + // Standard Error: 481 + .saturating_add(Weight::from_parts(1_087_506, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -1073,10 +1072,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `325 + c * (1 ±0)` // Estimated: `6263 + c * (1 ±0)` - // Minimum execution time: 15_390_000 picoseconds. - Weight::from_parts(16_157_208, 6263) + // Minimum execution time: 15_289_000 picoseconds. + Weight::from_parts(16_157_168, 6263) // Standard Error: 1 - .saturating_add(Weight::from_parts(501, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(395, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1087,8 +1086,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 12_045_000 picoseconds. - Weight::from_parts(12_892_000, 6380) + // Minimum execution time: 12_312_000 picoseconds. + Weight::from_parts(12_650_000, 6380) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1102,8 +1101,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 47_250_000 picoseconds. - Weight::from_parts(49_231_000, 6292) + // Minimum execution time: 47_239_000 picoseconds. + Weight::from_parts(48_617_000, 6292) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1115,8 +1114,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 53_722_000 picoseconds. - Weight::from_parts(55_268_000, 6534) + // Minimum execution time: 52_084_000 picoseconds. + Weight::from_parts(53_838_000, 6534) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1126,8 +1125,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `409` // Estimated: `6349` - // Minimum execution time: 11_707_000 picoseconds. - Weight::from_parts(12_305_000, 6349) + // Minimum execution time: 11_785_000 picoseconds. + Weight::from_parts(12_284_000, 6349) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1137,8 +1136,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_129_000 picoseconds. - Weight::from_parts(2_197_000, 1627) + // Minimum execution time: 2_136_000 picoseconds. + Weight::from_parts(2_233_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1150,8 +1149,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 11_145_000 picoseconds. - Weight::from_parts(11_445_000, 3631) + // Minimum execution time: 10_957_000 picoseconds. + Weight::from_parts(11_314_000, 3631) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1161,8 +1160,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_463_000 picoseconds. - Weight::from_parts(4_585_000, 3607) + // Minimum execution time: 4_354_000 picoseconds. + Weight::from_parts(4_613_000, 3607) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -1173,8 +1172,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 5_639_000 picoseconds. - Weight::from_parts(5_865_000, 3632) + // Minimum execution time: 5_541_000 picoseconds. + Weight::from_parts(5_790_000, 3632) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -1185,8 +1184,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 5_540_000 picoseconds. - Weight::from_parts(5_954_000, 3607) + // Minimum execution time: 5_502_000 picoseconds. + Weight::from_parts(5_701_000, 3607) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1207,10 +1206,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `801 + c * (1 ±0)` // Estimated: `4264 + c * (1 ±0)` - // Minimum execution time: 353_812_000 picoseconds. - Weight::from_parts(337_889_300, 4264) - // Standard Error: 94 - .saturating_add(Weight::from_parts(34_200, 0).saturating_mul(c.into())) + // Minimum execution time: 247_884_000 picoseconds. + Weight::from_parts(265_795_781, 4264) + // Standard Error: 4 + .saturating_add(Weight::from_parts(724, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1238,14 +1237,14 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `323` // Estimated: `6262` - // Minimum execution time: 4_499_852_000 picoseconds. - Weight::from_parts(135_265_841, 6262) - // Standard Error: 247 - .saturating_add(Weight::from_parts(72_051, 0).saturating_mul(c.into())) - // Standard Error: 29 - .saturating_add(Weight::from_parts(2_180, 0).saturating_mul(i.into())) - // Standard Error: 29 - .saturating_add(Weight::from_parts(2_195, 0).saturating_mul(s.into())) + // Minimum execution time: 4_500_184_000 picoseconds. + Weight::from_parts(160_729_258, 6262) + // Standard Error: 143 + .saturating_add(Weight::from_parts(52_809, 0).saturating_mul(c.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(2_173, 0).saturating_mul(i.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(2_165, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -1271,12 +1270,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `560` // Estimated: `4029` - // Minimum execution time: 2_376_075_000 picoseconds. - Weight::from_parts(2_387_885_000, 4029) + // Minimum execution time: 2_219_163_000 picoseconds. + Weight::from_parts(2_236_918_000, 4029) // Standard Error: 32 - .saturating_add(Weight::from_parts(1_036, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(937, 0).saturating_mul(i.into())) // Standard Error: 32 - .saturating_add(Weight::from_parts(936, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(938, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1296,8 +1295,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `826` // Estimated: `4291` - // Minimum execution time: 197_222_000 picoseconds. - Weight::from_parts(203_633_000, 4291) + // Minimum execution time: 164_801_000 picoseconds. + Weight::from_parts(167_250_000, 4291) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1314,10 +1313,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 325_788_000 picoseconds. - Weight::from_parts(335_491_760, 3607) - // Standard Error: 50 - .saturating_add(Weight::from_parts(35_337, 0).saturating_mul(c.into())) + // Minimum execution time: 225_207_000 picoseconds. + Weight::from_parts(263_665_658, 3607) + // Standard Error: 47 + .saturating_add(Weight::from_parts(50_732, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1334,10 +1333,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 336_010_000 picoseconds. - Weight::from_parts(348_030_264, 3607) - // Standard Error: 43 - .saturating_add(Weight::from_parts(35_696, 0).saturating_mul(c.into())) + // Minimum execution time: 230_718_000 picoseconds. + Weight::from_parts(258_359_271, 3607) + // Standard Error: 47 + .saturating_add(Weight::from_parts(51_014, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1353,8 +1352,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 40_118_000 picoseconds. - Weight::from_parts(40_987_000, 3780) + // Minimum execution time: 39_668_000 picoseconds. + Weight::from_parts(41_031_000, 3780) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1368,8 +1367,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `552` // Estimated: `6492` - // Minimum execution time: 25_236_000 picoseconds. - Weight::from_parts(26_450_000, 6492) + // Minimum execution time: 25_890_000 picoseconds. + Weight::from_parts(26_603_000, 6492) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1378,17 +1377,17 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_200_000 picoseconds. - Weight::from_parts(9_773_983, 0) + // Minimum execution time: 8_269_000 picoseconds. + Weight::from_parts(9_227_069, 0) // Standard Error: 74 - .saturating_add(Weight::from_parts(72_257, 0).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(51_396, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 606_000 picoseconds. - Weight::from_parts(672_000, 0) + // Minimum execution time: 602_000 picoseconds. + Weight::from_parts(664_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -1396,8 +1395,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `354` // Estimated: `3819` - // Minimum execution time: 6_260_000 picoseconds. - Weight::from_parts(6_645_000, 3819) + // Minimum execution time: 6_131_000 picoseconds. + Weight::from_parts(6_468_000, 3819) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) @@ -1406,79 +1405,79 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `447` // Estimated: `3912` - // Minimum execution time: 7_599_000 picoseconds. - Weight::from_parts(7_913_000, 3912) + // Minimum execution time: 7_557_000 picoseconds. + Weight::from_parts(7_704_000, 3912) .saturating_add(RocksDbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 772_000 picoseconds. - Weight::from_parts(852_000, 0) + // Minimum execution time: 783_000 picoseconds. + Weight::from_parts(848_000, 0) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 390_000 picoseconds. - Weight::from_parts(417_000, 0) + // Minimum execution time: 397_000 picoseconds. + Weight::from_parts(435_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 340_000 picoseconds. - Weight::from_parts(368_000, 0) + // Minimum execution time: 351_000 picoseconds. + Weight::from_parts(372_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 640_000 picoseconds. - Weight::from_parts(672_000, 0) + // Minimum execution time: 608_000 picoseconds. + Weight::from_parts(645_000, 0) } fn seal_gas_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 607_000 picoseconds. - Weight::from_parts(699_000, 0) + // Minimum execution time: 661_000 picoseconds. + Weight::from_parts(729_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 4_519_000 picoseconds. - Weight::from_parts(4_668_000, 0) + // Minimum execution time: 4_545_000 picoseconds. + Weight::from_parts(4_663_000, 0) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 600_000 picoseconds. - Weight::from_parts(639_000, 0) + // Minimum execution time: 614_000 picoseconds. + Weight::from_parts(641_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 579_000 picoseconds. - Weight::from_parts(609_000, 0) + // Minimum execution time: 583_000 picoseconds. + Weight::from_parts(618_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 575_000 picoseconds. - Weight::from_parts(613_000, 0) + // Minimum execution time: 583_000 picoseconds. + Weight::from_parts(617_000, 0) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 554_000 picoseconds. - Weight::from_parts(622_000, 0) + // Minimum execution time: 607_000 picoseconds. + Weight::from_parts(638_000, 0) } /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) @@ -1486,8 +1485,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `67` // Estimated: `1552` - // Minimum execution time: 4_265_000 picoseconds. - Weight::from_parts(4_525_000, 1552) + // Minimum execution time: 4_172_000 picoseconds. + Weight::from_parts(4_408_000, 1552) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `n` is `[0, 1048572]`. @@ -1495,20 +1494,20 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 512_000 picoseconds. - Weight::from_parts(524_000, 0) + // Minimum execution time: 475_000 picoseconds. + Weight::from_parts(515_000, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(303, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(298, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048572]`. fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 358_000 picoseconds. - Weight::from_parts(375_000, 0) - // Standard Error: 9 - .saturating_add(Weight::from_parts(481, 0).saturating_mul(n.into())) + // Minimum execution time: 289_000 picoseconds. + Weight::from_parts(357_000, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(405, 0).saturating_mul(n.into())) } /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -1521,10 +1520,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `319 + n * (78 ±0)` // Estimated: `3784 + n * (2553 ±0)` - // Minimum execution time: 13_267_000 picoseconds. - Weight::from_parts(15_705_698, 3784) - // Standard Error: 7_176 - .saturating_add(Weight::from_parts(3_506_583, 0).saturating_mul(n.into())) + // Minimum execution time: 13_316_000 picoseconds. + Weight::from_parts(15_855_821, 3784) + // Standard Error: 7_274 + .saturating_add(Weight::from_parts(3_447_246, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -1537,8 +1536,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 3_339_000 picoseconds. - Weight::from_parts(3_544_000, 1561) + // Minimum execution time: 3_468_000 picoseconds. + Weight::from_parts(3_608_000, 1561) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `System::EventTopics` (r:4 w:4) @@ -1549,12 +1548,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `990 + t * (2475 ±0)` - // Minimum execution time: 3_789_000 picoseconds. - Weight::from_parts(4_070_991, 990) - // Standard Error: 6_319 - .saturating_add(Weight::from_parts(2_264_078, 0).saturating_mul(t.into())) + // Minimum execution time: 3_777_000 picoseconds. + Weight::from_parts(4_028_191, 990) + // Standard Error: 5_907 + .saturating_add(Weight::from_parts(2_183_733, 0).saturating_mul(t.into())) // Standard Error: 1 - .saturating_add(Weight::from_parts(20, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(18, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) @@ -1564,10 +1563,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 426_000 picoseconds. - Weight::from_parts(465_000, 0) + // Minimum execution time: 400_000 picoseconds. + Weight::from_parts(423_000, 0) // Standard Error: 10 - .saturating_add(Weight::from_parts(1_277, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(1_209, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -1577,12 +1576,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `250 + o * (1 ±0)` // Estimated: `249 + o * (1 ±0)` - // Minimum execution time: 9_148_000 picoseconds. - Weight::from_parts(8_789_382, 249) - // Standard Error: 2 - .saturating_add(Weight::from_parts(361, 0).saturating_mul(n.into())) - // Standard Error: 2 - .saturating_add(Weight::from_parts(66, 0).saturating_mul(o.into())) + // Minimum execution time: 9_033_000 picoseconds. + Weight::from_parts(8_797_934, 249) + // Standard Error: 1 + .saturating_add(Weight::from_parts(257, 0).saturating_mul(n.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(51, 0).saturating_mul(o.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) @@ -1594,10 +1593,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 7_344_000 picoseconds. - Weight::from_parts(8_119_197, 248) + // Minimum execution time: 7_167_000 picoseconds. + Weight::from_parts(8_012_194, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(83, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(90, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1609,10 +1608,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 6_763_000 picoseconds. - Weight::from_parts(7_669_781, 248) - // Standard Error: 2 - .saturating_add(Weight::from_parts(710, 0).saturating_mul(n.into())) + // Minimum execution time: 6_868_000 picoseconds. + Weight::from_parts(7_801_811, 248) + // Standard Error: 1 + .saturating_add(Weight::from_parts(605, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1623,10 +1622,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 6_310_000 picoseconds. - Weight::from_parts(7_039_085, 248) + // Minimum execution time: 6_322_000 picoseconds. + Weight::from_parts(7_103_552, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(84, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(79, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1637,10 +1636,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 7_541_000 picoseconds. - Weight::from_parts(8_559_509, 248) - // Standard Error: 1 - .saturating_add(Weight::from_parts(711, 0).saturating_mul(n.into())) + // Minimum execution time: 7_702_000 picoseconds. + Weight::from_parts(8_746_305, 248) + // Standard Error: 2 + .saturating_add(Weight::from_parts(604, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1649,8 +1648,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 8_728_000 picoseconds. - Weight::from_parts(9_035_000, 0) + // Minimum execution time: 8_851_000 picoseconds. + Weight::from_parts(9_083_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -1666,12 +1665,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `620 + t * (280 ±0)` // Estimated: `4085 + t * (2182 ±0)` - // Minimum execution time: 153_385_000 picoseconds. - Weight::from_parts(156_813_102, 4085) - // Standard Error: 290_142 - .saturating_add(Weight::from_parts(42_350_253, 0).saturating_mul(t.into())) + // Minimum execution time: 121_148_000 picoseconds. + Weight::from_parts(119_605_377, 4085) + // Standard Error: 208_337 + .saturating_add(Weight::from_parts(43_153_338, 0).saturating_mul(t.into())) // Standard Error: 0 - .saturating_add(Weight::from_parts(4, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(5, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -1686,8 +1685,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 140_007_000 picoseconds. - Weight::from_parts(144_781_000, 3895) + // Minimum execution time: 108_159_000 picoseconds. + Weight::from_parts(110_027_000, 3895) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) @@ -1700,19 +1699,18 @@ impl WeightInfo for () { /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// The range of component `t` is `[0, 1]`. /// The range of component `i` is `[0, 983040]`. /// The range of component `s` is `[0, 983040]`. - fn seal_instantiate(_t: u32, i: u32, s: u32, ) -> Weight { + fn seal_instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `676` - // Estimated: `4138` - // Minimum execution time: 2_073_851_000 picoseconds. - Weight::from_parts(2_084_321_000, 4138) - // Standard Error: 17 - .saturating_add(Weight::from_parts(986, 0).saturating_mul(i.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(1_261, 0).saturating_mul(s.into())) + // Estimated: `4127` + // Minimum execution time: 1_861_874_000 picoseconds. + Weight::from_parts(1_872_926_000, 4127) + // Standard Error: 23 + .saturating_add(Weight::from_parts(557, 0).saturating_mul(i.into())) + // Standard Error: 23 + .saturating_add(Weight::from_parts(920, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1721,64 +1719,64 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 902_000 picoseconds. - Weight::from_parts(10_389_779, 0) + // Minimum execution time: 878_000 picoseconds. + Weight::from_parts(10_993_950, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_422, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_325, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_477_000 picoseconds. - Weight::from_parts(12_143_874, 0) + // Minimum execution time: 1_261_000 picoseconds. + Weight::from_parts(9_759_497, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(3_683, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(3_594, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 778_000 picoseconds. - Weight::from_parts(8_762_544, 0) + // Minimum execution time: 726_000 picoseconds. + Weight::from_parts(9_795_728, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_557, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_455, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 748_000 picoseconds. - Weight::from_parts(10_364_578, 0) + // Minimum execution time: 739_000 picoseconds. + Weight::from_parts(9_701_202, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_550, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_459, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 43_388_000 picoseconds. - Weight::from_parts(42_346_211, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(5_103, 0).saturating_mul(n.into())) + // Minimum execution time: 43_309_000 picoseconds. + Weight::from_parts(41_405_949, 0) + // Standard Error: 8 + .saturating_add(Weight::from_parts(5_336, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 46_825_000 picoseconds. - Weight::from_parts(48_073_000, 0) + // Minimum execution time: 47_880_000 picoseconds. + Weight::from_parts(49_025_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_864_000 picoseconds. - Weight::from_parts(13_065_000, 0) + // Minimum execution time: 13_462_000 picoseconds. + Weight::from_parts(13_631_000, 0) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) @@ -1788,8 +1786,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 18_406_000 picoseconds. - Weight::from_parts(19_112_000, 3895) + // Minimum execution time: 17_978_000 picoseconds. + Weight::from_parts(18_578_000, 3895) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1799,8 +1797,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3820` - // Minimum execution time: 8_441_000 picoseconds. - Weight::from_parts(8_710_000, 3820) + // Minimum execution time: 8_384_000 picoseconds. + Weight::from_parts(8_687_000, 3820) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1810,8 +1808,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3558` - // Minimum execution time: 7_525_000 picoseconds. - Weight::from_parts(7_819_000, 3558) + // Minimum execution time: 7_547_000 picoseconds. + Weight::from_parts(7_935_000, 3558) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1819,15 +1817,15 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 313_000 picoseconds. - Weight::from_parts(375_000, 0) + // Minimum execution time: 331_000 picoseconds. + Weight::from_parts(363_000, 0) } fn seal_account_reentrance_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 308_000 picoseconds. - Weight::from_parts(334_000, 0) + // Minimum execution time: 349_000 picoseconds. + Weight::from_parts(365_000, 0) } /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -1835,8 +1833,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1704` - // Minimum execution time: 2_775_000 picoseconds. - Weight::from_parts(3_043_000, 1704) + // Minimum execution time: 2_814_000 picoseconds. + Weight::from_parts(3_038_000, 1704) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. @@ -1844,9 +1842,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 925_000 picoseconds. - Weight::from_parts(443_142, 0) - // Standard Error: 19 - .saturating_add(Weight::from_parts(15_316, 0).saturating_mul(r.into())) + // Minimum execution time: 693_000 picoseconds. + Weight::from_parts(665_431, 0) + // Standard Error: 12 + .saturating_add(Weight::from_parts(7_030, 0).saturating_mul(r.into())) } } From 07cfcf0b3c9df971c673162b9d16cb5c17fbe97d Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Sat, 8 Jun 2024 10:48:42 +0300 Subject: [PATCH 15/52] frame/proc-macro: Refactor code for better readability (#4712) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Small refactoring PR to improve the readability of the proc macros. - small improvement in docs - use new `let Some(..) else` expression - removed extra indentations by early returns Discovered during metadata v16 poc, extracted from: https://github.com/paritytech/polkadot-sdk/pull/4358 --------- Signed-off-by: Alexandru Vasile Co-authored-by: Bastian Köcher Co-authored-by: command-bot <> Co-authored-by: gupnik --- .../procedural/src/pallet/expand/constants.rs | 3 +- .../procedural/src/pallet/parse/config.rs | 128 +++++++++--------- .../procedural/src/pallet/parse/helper.rs | 16 +-- 3 files changed, 70 insertions(+), 77 deletions(-) diff --git a/substrate/frame/support/procedural/src/pallet/expand/constants.rs b/substrate/frame/support/procedural/src/pallet/expand/constants.rs index 57fa8b7f3cd9..d7fbb5a71897 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/constants.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/constants.rs @@ -30,8 +30,7 @@ struct ConstDef { pub metadata_name: Option, } -/// -/// * Impl fn module_constant_metadata for pallet. +/// Implement the `pallet_constants_metadata` function for the pallet. pub fn expand_constants(def: &mut Def) -> proc_macro2::TokenStream { let frame_support = &def.frame_support; let type_impl_gen = &def.type_impl_generics(proc_macro2::Span::call_site()); diff --git a/substrate/frame/support/procedural/src/pallet/parse/config.rs b/substrate/frame/support/procedural/src/pallet/parse/config.rs index 406072df4b9d..eaeaab247588 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/config.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/config.rs @@ -94,30 +94,26 @@ impl TryFrom<&syn::TraitItemType> for ConstMetadataDef { let bound = trait_ty .bounds .iter() - .find_map(|b| { - if let syn::TypeParamBound::Trait(tb) = b { - tb.path - .segments - .last() - .and_then(|s| if s.ident == "Get" { Some(s) } else { None }) - } else { - None - } + .find_map(|param_bound| { + let syn::TypeParamBound::Trait(trait_bound) = param_bound else { return None }; + + trait_bound.path.segments.last().and_then(|s| (s.ident == "Get").then(|| s)) }) .ok_or_else(|| err(trait_ty.span(), "`Get` trait bound not found"))?; - let type_arg = if let syn::PathArguments::AngleBracketed(ref ab) = bound.arguments { - if ab.args.len() == 1 { - if let syn::GenericArgument::Type(ref ty) = ab.args[0] { - Ok(ty) - } else { - Err(err(ab.args[0].span(), "Expected a type argument")) - } - } else { - Err(err(bound.span(), "Expected a single type argument")) - } - } else { - Err(err(bound.span(), "Expected trait generic args")) - }?; + + let syn::PathArguments::AngleBracketed(ref ab) = bound.arguments else { + return Err(err(bound.span(), "Expected trait generic args")) + }; + + // Only one type argument is expected. + if ab.args.len() != 1 { + return Err(err(bound.span(), "Expected a single type argument")) + } + + let syn::GenericArgument::Type(ref type_arg) = ab.args[0] else { + return Err(err(ab.args[0].span(), "Expected a type argument")) + }; + let type_ = syn::parse2::(replace_self_by_t(type_arg.to_token_stream())) .expect("Internal error: replacing `Self` by `T` should result in valid type"); @@ -223,55 +219,55 @@ fn check_event_type( trait_item: &syn::TraitItem, trait_has_instance: bool, ) -> syn::Result { - if let syn::TraitItem::Type(type_) = trait_item { - if type_.ident == "RuntimeEvent" { - // Check event has no generics - if !type_.generics.params.is_empty() || type_.generics.where_clause.is_some() { - let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must have\ - no generics nor where_clause"; - return Err(syn::Error::new(trait_item.span(), msg)) - } + let syn::TraitItem::Type(type_) = trait_item else { return Ok(false) }; - // Check bound contains IsType and From - let has_is_type_bound = type_.bounds.iter().any(|s| { - syn::parse2::(s.to_token_stream()) - .map_or(false, |b| has_expected_system_config(b.0, frame_system)) - }); - - if !has_is_type_bound { - let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must \ - bound: `IsType<::RuntimeEvent>`".to_string(); - return Err(syn::Error::new(type_.span(), msg)) - } + if type_.ident != "RuntimeEvent" { + return Ok(false) + } - let from_event_bound = type_ - .bounds - .iter() - .find_map(|s| syn::parse2::(s.to_token_stream()).ok()); + // Check event has no generics + if !type_.generics.params.is_empty() || type_.generics.where_clause.is_some() { + let msg = + "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must have\ + no generics nor where_clause"; + return Err(syn::Error::new(trait_item.span(), msg)) + } - let from_event_bound = if let Some(b) = from_event_bound { - b - } else { - let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must \ - bound: `From` or `From>` or `From>`"; - return Err(syn::Error::new(type_.span(), msg)) - }; + // Check bound contains IsType and From + let has_is_type_bound = type_.bounds.iter().any(|s| { + syn::parse2::(s.to_token_stream()) + .map_or(false, |b| has_expected_system_config(b.0, frame_system)) + }); + + if !has_is_type_bound { + let msg = + "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must \ + bound: `IsType<::RuntimeEvent>`" + .to_string(); + return Err(syn::Error::new(type_.span(), msg)) + } - if from_event_bound.is_generic && (from_event_bound.has_instance != trait_has_instance) - { - let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` bounds inconsistent \ + let from_event_bound = type_ + .bounds + .iter() + .find_map(|s| syn::parse2::(s.to_token_stream()).ok()); + + let Some(from_event_bound) = from_event_bound else { + let msg = + "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must \ + bound: `From` or `From>` or `From>`"; + return Err(syn::Error::new(type_.span(), msg)) + }; + + if from_event_bound.is_generic && (from_event_bound.has_instance != trait_has_instance) { + let msg = + "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` bounds inconsistent \ `From`. Config and generic Event must be both with instance or \ without instance"; - return Err(syn::Error::new(type_.span(), msg)) - } - - Ok(true) - } else { - Ok(false) - } - } else { - Ok(false) + return Err(syn::Error::new(type_.span(), msg)) } + + Ok(true) } /// Check that the path to `frame_system::Config` is valid, this is that the path is just @@ -334,9 +330,7 @@ impl ConfigDef { item: &mut syn::Item, enable_default: bool, ) -> syn::Result { - let item = if let syn::Item::Trait(item) = item { - item - } else { + let syn::Item::Trait(item) = item else { let msg = "Invalid pallet::config, expected trait definition"; return Err(syn::Error::new(item.span(), msg)) }; diff --git a/substrate/frame/support/procedural/src/pallet/parse/helper.rs b/substrate/frame/support/procedural/src/pallet/parse/helper.rs index 3187c9139c8f..d4f58a4c56df 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/helper.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/helper.rs @@ -55,16 +55,16 @@ pub(crate) fn take_first_item_pallet_attr( where Attr: syn::parse::Parse, { - let attrs = if let Some(attrs) = item.mut_item_attrs() { attrs } else { return Ok(None) }; + let Some(attrs) = item.mut_item_attrs() else { return Ok(None) }; - if let Some(index) = attrs.iter().position(|attr| { + let Some(index) = attrs.iter().position(|attr| { attr.path().segments.first().map_or(false, |segment| segment.ident == "pallet") - }) { - let pallet_attr = attrs.remove(index); - Ok(Some(syn::parse2(pallet_attr.into_token_stream())?)) - } else { - Ok(None) - } + }) else { + return Ok(None) + }; + + let pallet_attr = attrs.remove(index); + Ok(Some(syn::parse2(pallet_attr.into_token_stream())?)) } /// Take all the pallet attributes (e.g. attribute like `#[pallet..]`) and decode them to `Attr` From cdb297b15ad9c1d952c0501afaf6b764e5fd147c Mon Sep 17 00:00:00 2001 From: batman Date: Sat, 8 Jun 2024 19:37:20 +0800 Subject: [PATCH 16/52] Update README.md to move the PSVM link under a "Tooling" section under the "Releases" section (#4734) This update implements the suggestion from @kianenigma mentioned in https://github.com/paritytech/polkadot-sdk/pull/4718#issuecomment-2153777367 Replaces the "Other useful resources and tooling" section at the bottom with a new (nicer) "Tooling" section just under the "Releases" section. --- README.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 50972da058af..0b027b2958c1 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,12 @@ non-breaking features on a **two week** cadence. `nightly` releases are released every night from the `master` branch, potentially with breaking changes. They have pre-release version numbers in the format `major.0.0-nightlyYYMMDD`. +## 🛠️ Tooling + +[Polkadot SDK Version Manager](https://github.com/paritytech/psvm): +A simple tool to manage and update the Polkadot SDK dependencies in any Cargo.toml file. +It will automatically update the Polkadot SDK dependencies to their correct crates.io version. + ## 🔐 Security The security policy and procedures can be found in @@ -81,7 +87,3 @@ fellowship, this separation, the RFC process This repository is the amalgamation of 3 separate repositories that used to make up Polkadot SDK, namely Substrate, Polkadot and Cumulus. Read more about the merge and its history [here](https://polkadot-public.notion.site/Polkadot-SDK-FAQ-fbc4cecc2c46443fb37b9eeec2f0d85f). - -## Other useful resources and tooling - -* A simple tool to manage and update the Polkadot SDK dependencies (https://github.com/paritytech/psvm) From 497d64ef968d0e4d57bb5cd1fdf487204abbfdbb Mon Sep 17 00:00:00 2001 From: Przemek Rzad Date: Mon, 10 Jun 2024 11:24:06 +0200 Subject: [PATCH 17/52] Revamp the Readme of the parachain template (#4713) - Addresses [this](https://github.com/paritytech/polkadot-sdk/issues/3155#issuecomment-2126934939). - Revamps the Readme, very similar to [the minimal template](https://github.com/paritytech/polkadot-sdk/pull/4649). - Changed `polkadot-launch` to `zombienet`, with instructions how to run it. - See the [rendered version](https://github.com/paritytech/polkadot-sdk/blob/rzadp/parachain-template-readme/templates/parachain/README.md). --- templates/minimal/README.md | 31 +++-- templates/parachain/README.md | 131 +++++++++++++++--- templates/parachain/node/README.md | 18 +++ templates/parachain/pallets/README.md | 13 ++ .../parachain/pallets/template/README.md | 1 - .../parachain/polkadot-launch/config.json | 39 ------ templates/parachain/runtime/README.md | 10 ++ templates/parachain/zombienet.toml | 16 +++ 8 files changed, 188 insertions(+), 71 deletions(-) create mode 100644 templates/parachain/node/README.md create mode 100644 templates/parachain/pallets/README.md delete mode 100644 templates/parachain/pallets/template/README.md delete mode 100644 templates/parachain/polkadot-launch/config.json create mode 100644 templates/parachain/runtime/README.md create mode 100644 templates/parachain/zombienet.toml diff --git a/templates/minimal/README.md b/templates/minimal/README.md index 583ba6242040..f00bfd4d4877 100644 --- a/templates/minimal/README.md +++ b/templates/minimal/README.md @@ -11,12 +11,13 @@ -🤏 This template is a minimal (in terms of complexity and the number of components) template for building a blockchain node. +* 🤏 This template is a minimal (in terms of complexity and the number of components) +template for building a blockchain node. -🔧 Its runtime is configured of a single custom pallet as a starting point, and a handful of ready-made pallets +* 🔧 Its runtime is configured of a single custom pallet as a starting point, and a handful of ready-made pallets such as a [Balances pallet](https://paritytech.github.io/polkadot-sdk/master/pallet_balances/index.html). -👤 The template has no consensus configured - it is best for experimenting with a single node network. +* 👤 The template has no consensus configured - it is best for experimenting with a single node network. ## Template Structure @@ -28,12 +29,12 @@ A Polkadot SDK based project such as this one consists of: ## Getting Started -🦀 The template is using the Rust language. +* 🦀 The template is using the Rust language. -👉 Check the +* 👉 Check the [Rust installation instructions](https://www.rust-lang.org/tools/install) for your system. -🛠️ Depending on your operating system and Rust version, there might be additional +* 🛠️ Depending on your operating system and Rust version, there might be additional packages required to compile this template - please take note of the Rust compiler output. ### Build @@ -69,32 +70,32 @@ Development chains: ### Connect with the Polkadot-JS Apps Front-End -🌐 You can interact with your local node using the +* 🌐 You can interact with your local node using the hosted version of the [Polkadot/Substrate Portal](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9944). -🪐 A hosted version is also +* 🪐 A hosted version is also available on [IPFS](https://dotapps.io/). -🧑‍🔧 You can also find the source code and instructions for hosting your own instance in the +* 🧑‍🔧 You can also find the source code and instructions for hosting your own instance in the [`polkadot-js/apps`](https://github.com/polkadot-js/apps) repository. ## Contributing -🔄 This template is automatically updated after releases in the main [Polkadot SDK monorepo](https://github.com/paritytech/polkadot-sdk). +* 🔄 This template is automatically updated after releases in the main [Polkadot SDK monorepo](https://github.com/paritytech/polkadot-sdk). -➡️ Any pull requests should be directed to this [source](https://github.com/paritytech/polkadot-sdk/tree/master/templates/minimal). +* ➡️ Any pull requests should be directed to this [source](https://github.com/paritytech/polkadot-sdk/tree/master/templates/minimal). -😇 Please refer to the monorepo's +* 😇 Please refer to the monorepo's [contribution guidelines](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md) and [Code of Conduct](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CODE_OF_CONDUCT.md). ## Getting Help -🧑‍🏫 To learn about Polkadot in general, [Polkadot.network](https://polkadot.network/) website is a good starting point. +* 🧑‍🏫 To learn about Polkadot in general, [Polkadot.network](https://polkadot.network/) website is a good starting point. -🧑‍🔧 For technical introduction, [here](https://github.com/paritytech/polkadot-sdk#-documentation) are +* 🧑‍🔧 For technical introduction, [here](https://github.com/paritytech/polkadot-sdk#-documentation) are the Polkadot SDK documentation resources. -👥 Additionally, there are [GitHub issues](https://github.com/paritytech/polkadot-sdk/issues) and +* 👥 Additionally, there are [GitHub issues](https://github.com/paritytech/polkadot-sdk/issues) and [Substrate StackExchange](https://substrate.stackexchange.com/). diff --git a/templates/parachain/README.md b/templates/parachain/README.md index 01e9cc26d9af..a6ac91799b77 100644 --- a/templates/parachain/README.md +++ b/templates/parachain/README.md @@ -1,22 +1,121 @@ -# Substrate Cumulus Parachain Template +
-A new [Cumulus](https://github.com/paritytech/polkadot-sdk/tree/master/cumulus)-based Substrate node, ready for hacking ☁️.. +# Polkadot SDK's Parachain Template -This project is originally a fork of the -[Substrate Node Template](https://github.com/substrate-developer-hub/substrate-node-template) -modified to include dependencies required for registering this node as a **parathread** or -**parachain** to a **relay chain**. +Polkadot SDK Logo +Polkadot SDK Logo -The stand-alone version of this template is hosted on the -[Substrate Devhub Parachain Template](https://github.com/substrate-developer-hub/substrate-parachain-template/) -for each release of Polkadot. It is generated directly to the upstream -[Parachain Template in Cumulus](https://github.com/paritytech/polkadot-sdk/tree/master/cumulus/parachain-template) -at each release branch using the -[Substrate Template Generator](https://github.com/paritytech/substrate-template-generator/). +> This is a template for creating a [parachain](https://wiki.polkadot.network/docs/learn-parachains) based on Polkadot SDK. +> +> This template is automatically updated after releases in the main [Polkadot SDK monorepo](https://github.com/paritytech/polkadot-sdk). -👉 Learn more about parachains [here](https://wiki.polkadot.network/docs/learn-parachains), and -parathreads [here](https://wiki.polkadot.network/docs/learn-parathreads). +
+* ⏫ This template provides a starting point to build a [parachain](https://wiki.polkadot.network/docs/learn-parachains). -🧙 Learn about how to use this template and run your own parachain testnet for it in the -[Devhub Cumulus Tutorial](https://docs.substrate.io/tutorials/v3/cumulus/start-relay/). +* ☁️ It is based on the +[Cumulus](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/cumulus/index.html) framework. + +* 🔧 Its runtime is configured of a single custom pallet as a starting point, and a handful of ready-made pallets +such as a [Balances pallet](https://paritytech.github.io/polkadot-sdk/master/pallet_balances/index.html). + +* 👉 Learn more about parachains [here](https://wiki.polkadot.network/docs/learn-parachains) + +## Template Structure + +A Polkadot SDK based project such as this one consists of: + +* 💿 a [Node](./node/README.md) - the binary application. +* 🧮 the [Runtime](./runtime/README.md) - the core logic of the parachain. +* 🎨 the [Pallets](./pallets/README.md) - from which the runtime is constructed. + +## Getting Started + +* 🦀 The template is using the Rust language. + +* 👉 Check the +[Rust installation instructions](https://www.rust-lang.org/tools/install) for your system. + +* 🛠️ Depending on your operating system and Rust version, there might be additional +packages required to compile this template - please take note of the Rust compiler output. + +### Build + +🔨 Use the following command to build the node without launching it: + +```sh +cargo build --release +``` + +🐳 Alternatively, build the docker image: + +```sh +docker build . -t polkadot-sdk-parachain-template +``` + +### Local Development Chain + +🧟 This project uses [Zombienet](https://github.com/paritytech/zombienet) to orchestrate the relaychain and parachain nodes. +You can grab a [released binary](https://github.com/paritytech/zombienet/releases/latest) or use an [npm version](https://www.npmjs.com/package/@zombienet/cli). + +This template produces a parachain node. +You still need a relaychain node - you can download the `polkadot` +(and the accompanying `polkadot-prepare-worker` and `polkadot-execute-worker`) +binaries from [Polkadot SDK releases](https://github.com/paritytech/polkadot-sdk/releases/latest). + +Make sure to bring the parachain node - as well as `polkadot`, `polkadot-prepare-worker`, `polkadot-execute-worker`, +and `zombienet` - into `PATH` like so: + +```sh +export PATH="./target/release/:$PATH" +``` + +This way, we can conveniently use them un the following steps. + +👥 The following command starts a local development chain, with a single relay chain node and a single parachain collator: + +```sh +zombienet --provider native spawn ./zombienet.toml + +# Alternatively, the npm version: +npx --yes @zombienet/cli --provider native spawn ./zombienet.toml +``` + +Development chains: + +* 🧹 Do not persist the state. +* 💰 Are preconfigured with a genesis state that includes several prefunded development accounts. +* 🧑‍⚖️ Development accounts are used as validators, collators, and `sudo` accounts. + +### Connect with the Polkadot-JS Apps Front-End + +* 🌐 You can interact with your local node using the +hosted version of the Polkadot/Substrate Portal: +[relay chain](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9944) +and [parachain](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9988). + +* 🪐 A hosted version is also +available on [IPFS](https://dotapps.io/). + +* 🧑‍🔧 You can also find the source code and instructions for hosting your own instance in the +[`polkadot-js/apps`](https://github.com/polkadot-js/apps) repository. + +## Contributing + +* 🔄 This template is automatically updated after releases in the main [Polkadot SDK monorepo](https://github.com/paritytech/polkadot-sdk). + +* ➡️ Any pull requests should be directed to this [source](https://github.com/paritytech/polkadot-sdk/tree/master/templates/parachain). + +* 😇 Please refer to the monorepo's +[contribution guidelines](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md) and +[Code of Conduct](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CODE_OF_CONDUCT.md). + +## Getting Help + +* 🧑‍🏫 To learn about Polkadot in general, [Polkadot.network](https://polkadot.network/) website is a good starting point. + +* 🧑‍🔧 For technical introduction, [here](https://github.com/paritytech/polkadot-sdk#-documentation) are +the Polkadot SDK documentation resources. + +* 👥 Additionally, there are [GitHub issues](https://github.com/paritytech/polkadot-sdk/issues) and +[Substrate StackExchange](https://substrate.stackexchange.com/). diff --git a/templates/parachain/node/README.md b/templates/parachain/node/README.md new file mode 100644 index 000000000000..350272c7b6ef --- /dev/null +++ b/templates/parachain/node/README.md @@ -0,0 +1,18 @@ +# Node + +ℹ️ A node - in Polkadot - is a binary executable, whose primary purpose is to execute the [runtime](../runtime/README.md). + +🔗 It communicates with other nodes in the network, and aims for +[consensus](https://wiki.polkadot.network/docs/learn-consensus) among them. + +⚙️ It acts as a remote procedure call (RPC) server, allowing interaction with the blockchain. + +👉 Learn more about the architecture, and a difference between a node and a runtime +[here](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/wasm_meta_protocol/index.html). + +👇 Here are the most important files in this node template: + +- [`chain_spec.rs`](./src/chain_spec.rs): A chain specification is a source code file that defines the chain's +initial (genesis) state. +- [`service.rs`](./src/service.rs): This file defines the node implementation. +It's a place to configure consensus-related topics. diff --git a/templates/parachain/pallets/README.md b/templates/parachain/pallets/README.md new file mode 100644 index 000000000000..9fabe64a3e79 --- /dev/null +++ b/templates/parachain/pallets/README.md @@ -0,0 +1,13 @@ +# Pallets + +ℹ️ A pallet is a unit of encapsulated logic, with a clearly defined responsibility. A pallet is analogous to a +module in the runtime. + +💁 In this template, there is a simple custom pallet based on the FRAME framework. + +👉 Learn more about FRAME +[here](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/frame_runtime/index.html). + +🧑‍🏫 Please refer to +[this guide](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/guides/your_first_pallet/index.html) +to learn how to write a basic pallet. diff --git a/templates/parachain/pallets/template/README.md b/templates/parachain/pallets/template/README.md deleted file mode 100644 index 9e4dc55267d6..000000000000 --- a/templates/parachain/pallets/template/README.md +++ /dev/null @@ -1 +0,0 @@ -License: MIT-0 diff --git a/templates/parachain/polkadot-launch/config.json b/templates/parachain/polkadot-launch/config.json deleted file mode 100644 index f03f983a4975..000000000000 --- a/templates/parachain/polkadot-launch/config.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "relaychain": { - "bin": "../../polkadot/target/release/polkadot", - "chain": "rococo-local", - "nodes": [ - { - "name": "alice", - "wsPort": 9944, - "port": 30444 - }, - { - "name": "bob", - "wsPort": 9955, - "port": 30555 - } - ] - }, - "parachains": [ - { - "bin": "../target/release/polkadot-parachain", - "id": "200", - "balance": "1000000000000000000000", - "nodes": [ - { - "wsPort": 9988, - "name": "alice", - "port": 31200, - "flags": [ - "--force-authoring", - "--", - "--execution=wasm" - ] - } - ] - } - ], - "types": { - } -} diff --git a/templates/parachain/runtime/README.md b/templates/parachain/runtime/README.md new file mode 100644 index 000000000000..acd5939fc542 --- /dev/null +++ b/templates/parachain/runtime/README.md @@ -0,0 +1,10 @@ +# Runtime + +ℹ️ The runtime (in other words, a state transition function), refers to the core logic of the parachain that is +responsible for validating blocks and executing the state changes they define. + +💁 The runtime in this template is constructed using ready-made FRAME pallets that ship with +[Polkadot SDK](https://github.com/paritytech/polkadot-sdk), and a [template for a custom pallet](../pallets/README.md). + +👉 Learn more about FRAME +[here](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/frame_runtime/index.html). diff --git a/templates/parachain/zombienet.toml b/templates/parachain/zombienet.toml new file mode 100644 index 000000000000..336ba1af4dde --- /dev/null +++ b/templates/parachain/zombienet.toml @@ -0,0 +1,16 @@ +[relaychain] +default_command = "polkadot" +chain = "dev" + +[[relaychain.nodes]] +name = "alice" +validator = true +ws_port = 9944 + +[[parachains]] +id = 1000 + +[parachains.collator] +name = "alice" +ws_port = 9988 +command = "parachain-template-node" From 2869fd6aba61f429ea2c006c2aae8dd5405dc5aa Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Mon, 10 Jun 2024 12:44:58 +0300 Subject: [PATCH 18/52] approval-voting: Add no shows debug information (#4726) Add some debug logs to be able to identify the validators and parachains that have most no-shows, this metric is valuable because it will help us identify validators and parachains that regularly have this problem. From the validator_index we can then query the on-chain information and identify the exact validator that is causing the no-shows. --------- Signed-off-by: Alexandru Gheorghe --- .../approval-voting/src/approval_checking.rs | 41 ++++++-- .../node/core/approval-voting/src/import.rs | 1 + polkadot/node/core/approval-voting/src/lib.rs | 98 ++++++++++++++++--- .../node/core/approval-voting/src/tests.rs | 2 + 4 files changed, 124 insertions(+), 18 deletions(-) diff --git a/polkadot/node/core/approval-voting/src/approval_checking.rs b/polkadot/node/core/approval-voting/src/approval_checking.rs index 8667d3639185..96eb25626de8 100644 --- a/polkadot/node/core/approval-voting/src/approval_checking.rs +++ b/polkadot/node/core/approval-voting/src/approval_checking.rs @@ -23,6 +23,7 @@ use polkadot_primitives::ValidatorIndex; use crate::{ persisted_entries::{ApprovalEntry, CandidateEntry, TrancheEntry}, time::Tick, + MAX_RECORDED_NO_SHOW_VALIDATORS_PER_CANDIDATE, }; /// Result of counting the necessary tranches needed for approving a block. @@ -32,6 +33,7 @@ pub struct TranchesToApproveResult { pub required_tranches: RequiredTranches, /// The total number of no_shows at the moment we are doing the counting. pub total_observed_no_shows: usize, + pub no_show_validators: Vec, } /// The required tranches of assignments needed to determine whether a candidate is approved. @@ -188,6 +190,8 @@ struct State { /// The last tick at which a considered assignment was received. last_assignment_tick: Option, total_observed_no_shows: usize, + // The validator's index that are no-shows. + no_show_validators: Vec, } impl State { @@ -203,6 +207,7 @@ impl State { return TranchesToApproveResult { required_tranches: RequiredTranches::All, total_observed_no_shows: self.total_observed_no_shows, + no_show_validators: self.no_show_validators.clone(), } } @@ -217,6 +222,7 @@ impl State { last_assignment_tick: self.last_assignment_tick, }, total_observed_no_shows: self.total_observed_no_shows, + no_show_validators: self.no_show_validators.clone(), } } @@ -234,6 +240,7 @@ impl State { clock_drift, }, total_observed_no_shows: self.total_observed_no_shows, + no_show_validators: self.no_show_validators.clone(), } } else { TranchesToApproveResult { @@ -244,6 +251,7 @@ impl State { clock_drift, }, total_observed_no_shows: self.total_observed_no_shows, + no_show_validators: self.no_show_validators.clone(), } } } @@ -253,11 +261,12 @@ impl State { } fn advance( - &self, + mut self, new_assignments: usize, new_no_shows: usize, next_no_show: Option, last_assignment_tick: Option, + no_show_validators: Vec, ) -> State { let new_covered = if self.depth == 0 { new_assignments @@ -290,6 +299,17 @@ impl State { (self.depth, covering, uncovered) }; + // Make sure we don't store too many no-show validators, since this metric + // is valuable if there are just a few of them to identify the problematic + // validators. + // If there are a lot then we've got bigger problems and no need to make this + // array unnecessarily large. + if self.no_show_validators.len() + no_show_validators.len() < + MAX_RECORDED_NO_SHOW_VALIDATORS_PER_CANDIDATE + { + self.no_show_validators.extend(no_show_validators); + } + State { assignments, depth, @@ -299,6 +319,7 @@ impl State { next_no_show, last_assignment_tick, total_observed_no_shows: self.total_observed_no_shows + new_no_shows, + no_show_validators: self.no_show_validators, } } } @@ -354,8 +375,9 @@ fn count_no_shows( block_tick: Tick, no_show_duration: Tick, drifted_tick_now: Tick, -) -> (usize, Option) { +) -> (usize, Option, Vec) { let mut next_no_show = None; + let mut no_show_validators = Vec::new(); let no_shows = assignments .iter() .map(|(v_index, tick)| { @@ -379,12 +401,14 @@ fn count_no_shows( // the clock drift will be removed again to do the comparison above. next_no_show = super::min_prefer_some(next_no_show, Some(no_show_at + clock_drift)); } - + if is_no_show { + no_show_validators.push(*v_index); + } is_no_show }) .count(); - (no_shows, next_no_show) + (no_shows, next_no_show, no_show_validators) } /// Determine the amount of tranches of assignments needed to determine approval of a candidate. @@ -408,6 +432,7 @@ pub fn tranches_to_approve( next_no_show: None, last_assignment_tick: None, total_observed_no_shows: 0, + no_show_validators: Vec::new(), }; // The `ApprovalEntry` doesn't have any data for empty tranches. We still want to iterate over @@ -446,7 +471,7 @@ pub fn tranches_to_approve( // // While we count the no-shows, we also determine the next possible no-show we might // see within this tranche. - let (no_shows, next_no_show) = count_no_shows( + let (no_shows, next_no_show, no_show_validators) = count_no_shows( assignments, approvals, clock_drift, @@ -455,7 +480,7 @@ pub fn tranches_to_approve( drifted_tick_now, ); - let s = s.advance(n_assignments, no_shows, next_no_show, last_assignment_tick); + let s = s.advance(n_assignments, no_shows, next_no_show, last_assignment_tick, no_show_validators); let output = s.output(tranche, needed_approvals, n_validators, no_show_duration); *state = match output.required_tranches { @@ -1186,7 +1211,7 @@ mod tests { approvals.set(v_index, true); } - let (no_shows, next_no_show) = count_no_shows( + let (no_shows, next_no_show, _) = count_no_shows( &test.assignments, &approvals, test.clock_drift, @@ -1390,6 +1415,7 @@ mod tests { next_no_show: None, last_assignment_tick: None, total_observed_no_shows: 0, + no_show_validators: Default::default(), }; assert_eq!( @@ -1414,6 +1440,7 @@ mod tests { next_no_show: None, last_assignment_tick: None, total_observed_no_shows: 0, + no_show_validators: Default::default(), }; assert_eq!( diff --git a/polkadot/node/core/approval-voting/src/import.rs b/polkadot/node/core/approval-voting/src/import.rs index 59b6f91c0a82..3ddef1e01c45 100644 --- a/polkadot/node/core/approval-voting/src/import.rs +++ b/polkadot/node/core/approval-voting/src/import.rs @@ -662,6 +662,7 @@ pub(crate) mod tests { per_block_assignments_gathering_times: LruMap::new(ByLength::new( MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, )), + no_show_stats: Default::default(), } } diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index eece6b15805c..d4b6855a44d0 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -691,6 +691,7 @@ struct ApprovalStatus { tranche_now: DelayTranche, block_tick: Tick, last_no_shows: usize, + no_show_validators: Vec, } #[derive(Copy, Clone)] @@ -832,6 +833,51 @@ struct State { // tranches we trigger and why. per_block_assignments_gathering_times: LruMap>, + no_show_stats: NoShowStats, +} + +// Regularly dump the no-show stats at this block number frequency. +const NO_SHOW_DUMP_FREQUENCY: BlockNumber = 50; +// The maximum number of validators we record no-shows for, per candidate. +pub(crate) const MAX_RECORDED_NO_SHOW_VALIDATORS_PER_CANDIDATE: usize = 20; + +// No show stats per validator and per parachain. +// This is valuable information when we have to debug live network issue, because +// it gives information if things are going wrong only for some validators or just +// for some parachains. +#[derive(Debug, Clone, PartialEq, Eq, Default)] +struct NoShowStats { + per_validator_no_show: HashMap>, + per_parachain_no_show: HashMap, + last_dumped_block_number: BlockNumber, +} + +impl NoShowStats { + // Print the no-show stats if NO_SHOW_DUMP_FREQUENCY blocks have passed since the last + // print. + fn maybe_print(&mut self, current_block_number: BlockNumber) { + if self.last_dumped_block_number > current_block_number || + current_block_number - self.last_dumped_block_number < NO_SHOW_DUMP_FREQUENCY + { + return + } + if self.per_parachain_no_show.is_empty() && self.per_validator_no_show.is_empty() { + return + } + + gum::debug!( + target: LOG_TARGET, + "Validators with no_show {:?} and parachains with no_shows {:?} since {:}", + self.per_validator_no_show, + self.per_parachain_no_show, + self.last_dumped_block_number + ); + + self.last_dumped_block_number = current_block_number; + + self.per_validator_no_show.clear(); + self.per_parachain_no_show.clear(); + } } #[derive(Debug, Clone, PartialEq, Eq)] @@ -887,21 +933,25 @@ impl State { ); if let Some(approval_entry) = candidate_entry.approval_entry(&block_hash) { - let TranchesToApproveResult { required_tranches, total_observed_no_shows } = - approval_checking::tranches_to_approve( - approval_entry, - candidate_entry.approvals(), - tranche_now, - block_tick, - no_show_duration, - session_info.needed_approvals as _, - ); + let TranchesToApproveResult { + required_tranches, + total_observed_no_shows, + no_show_validators, + } = approval_checking::tranches_to_approve( + approval_entry, + candidate_entry.approvals(), + tranche_now, + block_tick, + no_show_duration, + session_info.needed_approvals as _, + ); let status = ApprovalStatus { required_tranches, block_tick, tranche_now, last_no_shows: total_observed_no_shows, + no_show_validators, }; Some((approval_entry, status)) @@ -1044,6 +1094,26 @@ impl State { }, } } + + fn record_no_shows( + &mut self, + session_index: SessionIndex, + para_id: u32, + no_show_validators: &Vec, + ) { + if !no_show_validators.is_empty() { + *self.no_show_stats.per_parachain_no_show.entry(para_id.into()).or_default() += 1; + } + for validator_index in no_show_validators { + *self + .no_show_stats + .per_validator_no_show + .entry(session_index) + .or_default() + .entry(*validator_index) + .or_default() += 1; + } + } } #[derive(Debug, Clone)] @@ -1096,6 +1166,7 @@ where per_block_assignments_gathering_times: LruMap::new(ByLength::new( MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, )), + no_show_stats: NoShowStats::default(), }; // `None` on start-up. Gets initialized/updated on leaf update @@ -1740,6 +1811,8 @@ async fn handle_from_overseer( "Imported new block.", ); + state.no_show_stats.maybe_print(block_batch.block_number); + for (c_hash, c_entry) in block_batch.imported_candidates { metrics.on_candidate_imported(); @@ -2904,7 +2977,8 @@ where let mut actions = Vec::new(); let block_hash = block_entry.block_hash(); let block_number = block_entry.block_number(); - + let session_index = block_entry.session(); + let para_id = candidate_entry.candidate_receipt().descriptor().para_id; let tick_now = state.clock.tick_now(); let (is_approved, status) = if let Some((approval_entry, status)) = state @@ -3000,7 +3074,9 @@ where if is_approved { approval_entry.mark_approved(); } - + if newly_approved { + state.record_no_shows(session_index, para_id.into(), &status.no_show_validators); + } actions.extend(schedule_wakeup_action( &approval_entry, block_hash, diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index 5cbae7f908fc..64ae86bc013a 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -5068,6 +5068,7 @@ fn test_gathering_assignments_statements() { per_block_assignments_gathering_times: LruMap::new(ByLength::new( MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, )), + no_show_stats: NoShowStats::default(), }; for i in 0..200i32 { @@ -5162,6 +5163,7 @@ fn test_observe_assignment_gathering_status() { per_block_assignments_gathering_times: LruMap::new(ByLength::new( MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, )), + no_show_stats: NoShowStats::default(), }; let metrics_inner = MetricsInner { From a3472c44307c92da3b207310241545bb615fd24d Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Mon, 10 Jun 2024 15:04:43 +0300 Subject: [PATCH 19/52] add pov-recovery unit tests and support for elastic scaling (#4733) - unit tests for pov-recovery - elastic scaling support (recovering multiple candidates in a single relay chain block) - also some small cleanups - also switches to candidates_pending_availability in `handle_empty_block_announce_data` Fixes https://github.com/paritytech/polkadot-sdk/issues/3577 After https://github.com/paritytech/polkadot-sdk/pull/4097 is merged, we should also add a zombienet test, similar to the existing `0002-pov_recovery.toml` but which has a single collator using elastic scaling on multiple cores. --- Cargo.lock | 14 + cumulus/client/consensus/common/Cargo.toml | 1 + cumulus/client/consensus/common/src/tests.rs | 13 + cumulus/client/network/Cargo.toml | 4 + cumulus/client/network/src/lib.rs | 52 +- cumulus/client/network/src/tests.rs | 112 +- cumulus/client/pov-recovery/Cargo.toml | 8 + .../src/active_candidate_recovery.rs | 9 +- cumulus/client/pov-recovery/src/lib.rs | 68 +- cumulus/client/pov-recovery/src/tests.rs | 1404 +++++++++++++++++ .../src/lib.rs | 14 +- .../client/relay-chain-interface/Cargo.toml | 1 + .../client/relay-chain-interface/src/lib.rs | 35 +- .../relay-chain-rpc-interface/src/lib.rs | 15 + prdoc/pr_4733.prdoc | 27 + substrate/client/api/src/client.rs | 2 +- .../primitives/blockchain/src/backend.rs | 2 +- .../primitives/consensus/common/src/lib.rs | 2 +- 18 files changed, 1718 insertions(+), 65 deletions(-) create mode 100644 cumulus/client/pov-recovery/src/tests.rs create mode 100644 prdoc/pr_4733.prdoc diff --git a/Cargo.lock b/Cargo.lock index a96bb680b750..d2b7a47f84c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3730,6 +3730,7 @@ dependencies = [ "sp-timestamp", "sp-tracing 16.0.0", "sp-trie", + "sp-version", "substrate-prometheus-endpoint", "tracing", ] @@ -3784,12 +3785,15 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", "polkadot-node-primitives", + "polkadot-node-subsystem", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-test-client", "portpicker", + "rstest", "sc-cli", "sc-client-api", + "sp-api", "sp-blockchain", "sp-consensus", "sp-core", @@ -3797,6 +3801,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-state-machine", + "sp-version", "substrate-test-utils", "tokio", "tracing", @@ -3830,9 +3835,11 @@ dependencies = [ name = "cumulus-client-pov-recovery" version = "0.7.0" dependencies = [ + "assert_matches", "async-trait", "cumulus-primitives-core", "cumulus-relay-chain-interface", + "cumulus-test-client", "cumulus-test-service", "futures", "futures-timer", @@ -3843,12 +3850,18 @@ dependencies = [ "polkadot-primitives", "portpicker", "rand 0.8.5", + "rstest", "sc-cli", "sc-client-api", "sc-consensus", + "sc-utils", + "sp-api", + "sp-blockchain", "sp-consensus", "sp-maybe-compressed-blob", "sp-runtime", + "sp-tracing 16.0.0", + "sp-version", "substrate-test-utils", "tokio", "tracing", @@ -4219,6 +4232,7 @@ dependencies = [ "sp-api", "sp-blockchain", "sp-state-machine", + "sp-version", "thiserror", ] diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index d369304e2e33..09c2f58d45e4 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -28,6 +28,7 @@ sp-core = { path = "../../../../substrate/primitives/core" } sp-runtime = { path = "../../../../substrate/primitives/runtime" } sp-timestamp = { path = "../../../../substrate/primitives/timestamp" } sp-trie = { path = "../../../../substrate/primitives/trie" } +sp-version = { path = "../../../../substrate/primitives/version" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../../substrate/utils/prometheus" } # Polkadot diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs index aca922657072..2a944bc7f9fa 100644 --- a/cumulus/client/consensus/common/src/tests.rs +++ b/cumulus/client/consensus/common/src/tests.rs @@ -38,6 +38,7 @@ use polkadot_primitives::HeadData; use sc_client_api::{Backend as _, UsageProvider}; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sp_consensus::{BlockOrigin, BlockStatus}; +use sp_version::RuntimeVersion; use std::{ collections::{BTreeMap, HashMap}, pin::Pin, @@ -153,6 +154,14 @@ impl RelayChainInterface for Relaychain { unimplemented!("Not needed for test") } + async fn candidates_pending_availability( + &self, + _: PHash, + _: ParaId, + ) -> RelayChainResult> { + unimplemented!("Not needed for test") + } + async fn session_index_for_child(&self, _: PHash) -> RelayChainResult { Ok(0) } @@ -247,6 +256,10 @@ impl RelayChainInterface for Relaychain { extrinsics_root: PHash::zero(), })) } + + async fn version(&self, _: PHash) -> RelayChainResult { + unimplemented!("Not needed for test") + } } fn sproof_with_best_parent(client: &Client) -> RelayStateSproofBuilder { diff --git a/cumulus/client/network/Cargo.toml b/cumulus/client/network/Cargo.toml index d4fc75287258..0dd7c4fdb0f6 100644 --- a/cumulus/client/network/Cargo.toml +++ b/cumulus/client/network/Cargo.toml @@ -24,11 +24,14 @@ sp-consensus = { path = "../../../substrate/primitives/consensus/common" } sp-core = { path = "../../../substrate/primitives/core" } sp-runtime = { path = "../../../substrate/primitives/runtime" } sp-state-machine = { path = "../../../substrate/primitives/state-machine" } +sp-api = { path = "../../../substrate/primitives/api" } +sp-version = { path = "../../../substrate/primitives/version" } # Polkadot polkadot-node-primitives = { path = "../../../polkadot/node/primitives" } polkadot-parachain-primitives = { path = "../../../polkadot/parachain" } polkadot-primitives = { path = "../../../polkadot/primitives" } +polkadot-node-subsystem = { path = "../../../polkadot/node/subsystem" } # Cumulus cumulus-relay-chain-interface = { path = "../relay-chain-interface" } @@ -37,6 +40,7 @@ cumulus-relay-chain-interface = { path = "../relay-chain-interface" } portpicker = "0.1.1" tokio = { version = "1.32.0", features = ["macros"] } url = "2.4.0" +rstest = "0.18.2" # Substrate sc-cli = { path = "../../../substrate/client/cli" } diff --git a/cumulus/client/network/src/lib.rs b/cumulus/client/network/src/lib.rs index f442ed5840bd..dab15bba590a 100644 --- a/cumulus/client/network/src/lib.rs +++ b/cumulus/client/network/src/lib.rs @@ -20,6 +20,7 @@ //! that use the relay chain provided consensus. See [`RequireSecondedInBlockAnnounce`] //! and [`WaitToAnnounce`] for more information about this implementation. +use sp_api::RuntimeApiInfo; use sp_consensus::block_validation::{ BlockAnnounceValidator as BlockAnnounceValidatorT, Validation, }; @@ -28,6 +29,7 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_node_primitives::{CollationSecondedSignal, Statement}; +use polkadot_node_subsystem::messages::RuntimeApiRequest; use polkadot_parachain_primitives::primitives::HeadData; use polkadot_primitives::{ CandidateReceipt, CompactStatement, Hash as PHash, Id as ParaId, OccupiedCoreAssumption, @@ -266,18 +268,41 @@ where Ok(para_head) } - /// Get the backed block hash of the given parachain in the relay chain. - async fn backed_block_hash( + /// Get the backed block hashes of the given parachain in the relay chain. + async fn backed_block_hashes( relay_chain_interface: &RCInterface, hash: PHash, para_id: ParaId, - ) -> Result, BoxedError> { - let candidate_receipt = relay_chain_interface - .candidate_pending_availability(hash, para_id) + ) -> Result, BoxedError> { + let runtime_api_version = relay_chain_interface + .version(hash) .await .map_err(|e| Box::new(BlockAnnounceError(format!("{:?}", e))) as Box<_>)?; + let parachain_host_runtime_api_version = + runtime_api_version + .api_version( + &>::ID, + ) + .unwrap_or_default(); + + // If the relay chain runtime does not support the new runtime API, fallback to the + // deprecated one. + let candidate_receipts = if parachain_host_runtime_api_version < + RuntimeApiRequest::CANDIDATES_PENDING_AVAILABILITY_RUNTIME_REQUIREMENT + { + #[allow(deprecated)] + relay_chain_interface + .candidate_pending_availability(hash, para_id) + .await + .map(|c| c.into_iter().collect::>()) + } else { + relay_chain_interface.candidates_pending_availability(hash, para_id).await + } + .map_err(|e| Box::new(BlockAnnounceError(format!("{:?}", e))) as Box<_>)?; - Ok(candidate_receipt.map(|cr| cr.descriptor.para_head)) + Ok(candidate_receipts.into_iter().map(|cr| cr.descriptor.para_head)) } /// Handle a block announcement with empty data (no statement) attached to it. @@ -298,15 +323,20 @@ where let best_head = Self::included_block(&relay_chain_interface, relay_chain_best_hash, para_id).await?; let known_best_number = best_head.number(); - let backed_block = || async { - Self::backed_block_hash(&relay_chain_interface, relay_chain_best_hash, para_id).await - }; if best_head == header { tracing::debug!(target: LOG_TARGET, "Announced block matches best block.",); - Ok(Validation::Success { is_new_best: true }) - } else if Some(HeadData(header.encode()).hash()) == backed_block().await? { + return Ok(Validation::Success { is_new_best: true }) + } + + let mut backed_blocks = + Self::backed_block_hashes(&relay_chain_interface, relay_chain_best_hash, para_id) + .await?; + + let head_hash = HeadData(header.encode()).hash(); + + if backed_blocks.any(|block_hash| block_hash == head_hash) { tracing::debug!(target: LOG_TARGET, "Announced block matches latest backed block.",); Ok(Validation::Success { is_new_best: true }) diff --git a/cumulus/client/network/src/tests.rs b/cumulus/client/network/src/tests.rs index 3f5757d5eac1..eb0d7f0e01b3 100644 --- a/cumulus/client/network/src/tests.rs +++ b/cumulus/client/network/src/tests.rs @@ -34,6 +34,7 @@ use polkadot_test_client::{ Client as PClient, ClientBlockImportExt, DefaultTestClientBuilderExt, FullBackend as PBackend, InitPolkadotBlockBuilder, TestClientBuilder, TestClientBuilderExt, }; +use rstest::rstest; use sc_client_api::{Backend, BlockchainEvents}; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; @@ -42,7 +43,8 @@ use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore, KeystorePtr}; use sp_runtime::RuntimeAppPublic; use sp_state_machine::StorageValue; -use std::{collections::BTreeMap, time::Duration}; +use sp_version::RuntimeVersion; +use std::{borrow::Cow, collections::BTreeMap, time::Duration}; fn check_error(error: crate::BoxedError, check_error: impl Fn(&BlockAnnounceError) -> bool) { let error = *error @@ -53,6 +55,33 @@ fn check_error(error: crate::BoxedError, check_error: impl Fn(&BlockAnnounceErro } } +fn dummy_candidate() -> CommittedCandidateReceipt { + CommittedCandidateReceipt { + descriptor: CandidateDescriptor { + para_head: polkadot_parachain_primitives::primitives::HeadData( + default_header().encode(), + ) + .hash(), + para_id: 0u32.into(), + relay_parent: PHash::random(), + collator: CollatorPair::generate().0.public(), + persisted_validation_data_hash: PHash::random(), + pov_hash: PHash::random(), + erasure_root: PHash::random(), + signature: sp_core::sr25519::Signature::default().into(), + validation_code_hash: ValidationCodeHash::from(PHash::random()), + }, + commitments: CandidateCommitments { + upward_messages: Default::default(), + horizontal_messages: Default::default(), + new_validation_code: None, + head_data: HeadData(Vec::new()), + processed_downward_messages: 0, + hrmp_watermark: 0, + }, + } +} + #[derive(Clone)] struct DummyRelayChainInterface { data: Arc>, @@ -69,6 +98,8 @@ impl DummyRelayChainInterface { data: Arc::new(Mutex::new(ApiData { validators: vec![Sr25519Keyring::Alice.public().into()], has_pending_availability: false, + runtime_version: + RuntimeApiRequest::CANDIDATES_PENDING_AVAILABILITY_RUNTIME_REQUIREMENT, })), relay_client: Arc::new(builder.build()), relay_backend, @@ -131,36 +162,37 @@ impl RelayChainInterface for DummyRelayChainInterface { _: PHash, _: ParaId, ) -> RelayChainResult> { + if self.data.lock().runtime_version >= + RuntimeApiRequest::CANDIDATES_PENDING_AVAILABILITY_RUNTIME_REQUIREMENT + { + panic!("Should have used candidates_pending_availability instead"); + } + if self.data.lock().has_pending_availability { - Ok(Some(CommittedCandidateReceipt { - descriptor: CandidateDescriptor { - para_head: polkadot_parachain_primitives::primitives::HeadData( - default_header().encode(), - ) - .hash(), - para_id: 0u32.into(), - relay_parent: PHash::random(), - collator: CollatorPair::generate().0.public(), - persisted_validation_data_hash: PHash::random(), - pov_hash: PHash::random(), - erasure_root: PHash::random(), - signature: sp_core::sr25519::Signature::default().into(), - validation_code_hash: ValidationCodeHash::from(PHash::random()), - }, - commitments: CandidateCommitments { - upward_messages: Default::default(), - horizontal_messages: Default::default(), - new_validation_code: None, - head_data: HeadData(Vec::new()), - processed_downward_messages: 0, - hrmp_watermark: 0, - }, - })) + Ok(Some(dummy_candidate())) } else { Ok(None) } } + async fn candidates_pending_availability( + &self, + _: PHash, + _: ParaId, + ) -> RelayChainResult> { + if self.data.lock().runtime_version < + RuntimeApiRequest::CANDIDATES_PENDING_AVAILABILITY_RUNTIME_REQUIREMENT + { + panic!("Should have used candidate_pending_availability instead"); + } + + if self.data.lock().has_pending_availability { + Ok(vec![dummy_candidate()]) + } else { + Ok(vec![]) + } + } + async fn session_index_for_child(&self, _: PHash) -> RelayChainResult { Ok(0) } @@ -264,6 +296,28 @@ impl RelayChainInterface for DummyRelayChainInterface { Ok(header) } + + async fn version(&self, _: PHash) -> RelayChainResult { + let version = self.data.lock().runtime_version; + + let apis = sp_version::create_apis_vec!([( + >::ID, + version + )]) + .into_owned() + .to_vec(); + + Ok(RuntimeVersion { + spec_name: sp_version::create_runtime_str!("test"), + impl_name: sp_version::create_runtime_str!("test"), + authoring_version: 1, + spec_version: 1, + impl_version: 0, + apis: Cow::Owned(apis), + transaction_version: 5, + state_version: 1, + }) + } } fn make_validator_and_api() -> ( @@ -574,11 +628,14 @@ fn relay_parent_not_imported_when_block_announce_is_processed() { /// Ensures that when we receive a block announcement without a statement included, while the block /// is not yet included by the node checking the announcement, but the node is already backed. -#[test] -fn block_announced_without_statement_and_block_only_backed() { +#[rstest] +#[case(RuntimeApiRequest::CANDIDATES_PENDING_AVAILABILITY_RUNTIME_REQUIREMENT)] +#[case(10)] +fn block_announced_without_statement_and_block_only_backed(#[case] runtime_version: u32) { block_on(async move { let (mut validator, api) = make_validator_and_api(); api.data.lock().has_pending_availability = true; + api.data.lock().runtime_version = runtime_version; let header = default_header(); @@ -592,4 +649,5 @@ fn block_announced_without_statement_and_block_only_backed() { struct ApiData { validators: Vec, has_pending_availability: bool, + runtime_version: u32, } diff --git a/cumulus/client/pov-recovery/Cargo.toml b/cumulus/client/pov-recovery/Cargo.toml index 7afe7fae34bd..539802d69386 100644 --- a/cumulus/client/pov-recovery/Cargo.toml +++ b/cumulus/client/pov-recovery/Cargo.toml @@ -22,6 +22,8 @@ sc-consensus = { path = "../../../substrate/client/consensus/common" } sp-consensus = { path = "../../../substrate/primitives/consensus/common" } sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } sp-runtime = { path = "../../../substrate/primitives/runtime" } +sp-api = { path = "../../../substrate/primitives/api" } +sp-version = { path = "../../../substrate/primitives/version" } # Polkadot polkadot-node-primitives = { path = "../../../polkadot/node/primitives" } @@ -35,8 +37,14 @@ cumulus-relay-chain-interface = { path = "../relay-chain-interface" } async-trait = "0.1.79" [dev-dependencies] +rstest = "0.18.2" tokio = { version = "1.32.0", features = ["macros"] } portpicker = "0.1.1" +sp-blockchain = { path = "../../../substrate/primitives/blockchain" } +cumulus-test-client = { path = "../../test/client" } +sc-utils = { path = "../../../substrate/client/utils" } +sp-tracing = { path = "../../../substrate/primitives/tracing" } +assert_matches = "1.5" # Cumulus cumulus-test-service = { path = "../../test/service" } diff --git a/cumulus/client/pov-recovery/src/active_candidate_recovery.rs b/cumulus/client/pov-recovery/src/active_candidate_recovery.rs index c41c543f04d1..50de98909ea4 100644 --- a/cumulus/client/pov-recovery/src/active_candidate_recovery.rs +++ b/cumulus/client/pov-recovery/src/active_candidate_recovery.rs @@ -21,7 +21,7 @@ use polkadot_node_subsystem::messages::AvailabilityRecoveryMessage; use futures::{channel::oneshot, stream::FuturesUnordered, Future, FutureExt, StreamExt}; -use std::{collections::HashSet, pin::Pin, sync::Arc}; +use std::{pin::Pin, sync::Arc}; use crate::RecoveryHandle; @@ -32,14 +32,12 @@ pub(crate) struct ActiveCandidateRecovery { /// The recoveries that are currently being executed. recoveries: FuturesUnordered>)> + Send>>>, - /// The block hashes of the candidates currently being recovered. - candidates: HashSet, recovery_handle: Box, } impl ActiveCandidateRecovery { pub fn new(recovery_handle: Box) -> Self { - Self { recoveries: Default::default(), candidates: Default::default(), recovery_handle } + Self { recoveries: Default::default(), recovery_handle } } /// Recover the given `candidate`. @@ -63,8 +61,6 @@ impl ActiveCandidateRecovery { ) .await; - self.candidates.insert(block_hash); - self.recoveries.push( async move { match rx.await { @@ -97,7 +93,6 @@ impl ActiveCandidateRecovery { pub async fn wait_for_recovery(&mut self) -> (Block::Hash, Option>) { loop { if let Some(res) = self.recoveries.next().await { - self.candidates.remove(&res.0); return res } else { futures::pending!() diff --git a/cumulus/client/pov-recovery/src/lib.rs b/cumulus/client/pov-recovery/src/lib.rs index 0ca21749c3eb..6ace18155e87 100644 --- a/cumulus/client/pov-recovery/src/lib.rs +++ b/cumulus/client/pov-recovery/src/lib.rs @@ -48,11 +48,12 @@ use sc_client_api::{BlockBackend, BlockchainEvents, UsageProvider}; use sc_consensus::import_queue::{ImportQueueService, IncomingBlock}; +use sp_api::RuntimeApiInfo; use sp_consensus::{BlockOrigin, BlockStatus, SyncOracle}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use polkadot_node_primitives::{PoV, POV_BOMB_LIMIT}; -use polkadot_node_subsystem::messages::AvailabilityRecoveryMessage; +use polkadot_node_subsystem::messages::{AvailabilityRecoveryMessage, RuntimeApiRequest}; use polkadot_overseer::Handle as OverseerHandle; use polkadot_primitives::{ CandidateReceipt, CommittedCandidateReceipt, Id as ParaId, SessionIndex, @@ -75,6 +76,9 @@ use std::{ time::Duration, }; +#[cfg(test)] +mod tests; + mod active_candidate_recovery; use active_candidate_recovery::ActiveCandidateRecovery; @@ -544,7 +548,7 @@ where ) .await { - Ok(pending_candidate_stream) => pending_candidate_stream.fuse(), + Ok(pending_candidates_stream) => pending_candidates_stream.fuse(), Err(err) => { tracing::error!(target: LOG_TARGET, error = ?err, "Unable to retrieve pending candidate stream."); return @@ -554,9 +558,11 @@ where futures::pin_mut!(pending_candidates); loop { select! { - pending_candidate = pending_candidates.next() => { - if let Some((receipt, session_index)) = pending_candidate { - self.handle_pending_candidate(receipt, session_index); + next_pending_candidates = pending_candidates.next() => { + if let Some((candidates, session_index)) = next_pending_candidates { + for candidate in candidates { + self.handle_pending_candidate(candidate, session_index); + } } else { tracing::debug!(target: LOG_TARGET, "Pending candidates stream ended"); return; @@ -615,7 +621,7 @@ async fn pending_candidates( relay_chain_client: impl RelayChainInterface + Clone, para_id: ParaId, sync_service: Arc, -) -> RelayChainResult> { +) -> RelayChainResult, SessionIndex)>> { let import_notification_stream = relay_chain_client.import_notification_stream().await?; let filtered_stream = import_notification_stream.filter_map(move |n| { @@ -632,16 +638,54 @@ async fn pending_candidates( return None } - let pending_availability_result = client_for_closure - .candidate_pending_availability(hash, para_id) + let runtime_api_version = client_for_closure + .version(hash) .await .map_err(|e| { tracing::error!( target: LOG_TARGET, error = ?e, - "Failed to fetch pending candidates.", + "Failed to fetch relay chain runtime version.", ) - }); + }) + .ok()?; + let parachain_host_runtime_api_version = runtime_api_version + .api_version( + &>::ID, + ) + .unwrap_or_default(); + + // If the relay chain runtime does not support the new runtime API, fallback to the + // deprecated one. + let pending_availability_result = if parachain_host_runtime_api_version < + RuntimeApiRequest::CANDIDATES_PENDING_AVAILABILITY_RUNTIME_REQUIREMENT + { + #[allow(deprecated)] + client_for_closure + .candidate_pending_availability(hash, para_id) + .await + .map_err(|e| { + tracing::error!( + target: LOG_TARGET, + error = ?e, + "Failed to fetch pending candidates.", + ) + }) + .map(|candidate| candidate.into_iter().collect::>()) + } else { + client_for_closure.candidates_pending_availability(hash, para_id).await.map_err( + |e| { + tracing::error!( + target: LOG_TARGET, + error = ?e, + "Failed to fetch pending candidates.", + ) + }, + ) + }; + let session_index_result = client_for_closure.session_index_for_child(hash).await.map_err(|e| { tracing::error!( @@ -651,8 +695,8 @@ async fn pending_candidates( ) }); - if let Ok(Some(candidate)) = pending_availability_result { - session_index_result.map(|session_index| (candidate, session_index)).ok() + if let Ok(candidates) = pending_availability_result { + session_index_result.map(|session_index| (candidates, session_index)).ok() } else { None } diff --git a/cumulus/client/pov-recovery/src/tests.rs b/cumulus/client/pov-recovery/src/tests.rs new file mode 100644 index 000000000000..75bf308ef27a --- /dev/null +++ b/cumulus/client/pov-recovery/src/tests.rs @@ -0,0 +1,1404 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::*; +use assert_matches::assert_matches; +use codec::{Decode, Encode}; +use cumulus_primitives_core::relay_chain::{BlockId, CandidateCommitments, CandidateDescriptor}; +use cumulus_relay_chain_interface::{ + InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PHash, PHeader, + PersistedValidationData, StorageValue, ValidationCodeHash, ValidatorId, +}; +use cumulus_test_client::{ + runtime::{Block, Header}, + Sr25519Keyring, +}; +use futures::{channel::mpsc, SinkExt}; +use polkadot_node_primitives::AvailableData; +use polkadot_node_subsystem::{messages::AvailabilityRecoveryMessage, RecoveryError, TimeoutExt}; +use rstest::rstest; +use sc_client_api::{ + BlockImportNotification, ClientInfo, CompactProof, FinalityNotification, FinalityNotifications, + FinalizeSummary, ImportNotifications, StorageEventStream, StorageKey, +}; +use sc_consensus::import_queue::RuntimeOrigin; +use sc_utils::mpsc::{TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_blockchain::Info; +use sp_runtime::{generic::SignedBlock, Justifications}; +use sp_version::RuntimeVersion; +use std::{ + borrow::Cow, + collections::BTreeMap, + ops::Range, + sync::{Arc, Mutex}, +}; +use tokio::task; + +const GENESIS_HASH: PHash = PHash::zero(); +const TEST_SESSION_INDEX: SessionIndex = 0; + +struct AvailabilityRecoverySubsystemHandle { + tx: mpsc::Sender, +} + +impl AvailabilityRecoverySubsystemHandle { + fn new() -> (Self, mpsc::Receiver) { + let (tx, rx) = mpsc::channel(10); + + (Self { tx }, rx) + } +} + +#[async_trait::async_trait] +impl RecoveryHandle for AvailabilityRecoverySubsystemHandle { + async fn send_recovery_msg( + &mut self, + message: AvailabilityRecoveryMessage, + _origin: &'static str, + ) { + self.tx.send(message).await.expect("Receiver dropped"); + } +} + +struct ParachainClientInner { + import_notifications_rx: Option>>, + finality_notifications_rx: Option>>, + usage_infos: Vec>, + block_statuses: Arc>>, +} + +impl ParachainClientInner { + fn new( + usage_infos: Vec>, + block_statuses: Arc>>, + ) -> ( + Self, + TracingUnboundedSender>, + TracingUnboundedSender>, + ) { + let (import_notifications_tx, import_notifications_rx) = + sc_utils::mpsc::tracing_unbounded("import_notif", 10); + let (finality_notifications_tx, finality_notifications_rx) = + sc_utils::mpsc::tracing_unbounded("finality_notif", 10); + ( + Self { + import_notifications_rx: Some(import_notifications_rx), + finality_notifications_rx: Some(finality_notifications_rx), + usage_infos, + block_statuses, + }, + import_notifications_tx, + finality_notifications_tx, + ) + } +} +struct ParachainClient { + inner: Arc>>, +} + +impl ParachainClient { + fn new( + usage_infos: Vec>, + block_statuses: Arc>>, + ) -> ( + Self, + TracingUnboundedSender>, + TracingUnboundedSender>, + ) { + let (inner, import_notifications_tx, finality_notifications_tx) = + ParachainClientInner::new(usage_infos, block_statuses); + ( + Self { inner: Arc::new(Mutex::new(inner)) }, + import_notifications_tx, + finality_notifications_tx, + ) + } +} + +impl BlockchainEvents for ParachainClient { + fn import_notification_stream(&self) -> ImportNotifications { + self.inner + .lock() + .expect("poisoned lock") + .import_notifications_rx + .take() + .expect("Should only be taken once") + } + + fn every_import_notification_stream(&self) -> ImportNotifications { + unimplemented!() + } + + fn finality_notification_stream(&self) -> FinalityNotifications { + self.inner + .lock() + .expect("poisoned lock") + .finality_notifications_rx + .take() + .expect("Should only be taken once") + } + + fn storage_changes_notification_stream( + &self, + _filter_keys: Option<&[StorageKey]>, + _child_filter_keys: Option<&[(StorageKey, Option>)]>, + ) -> sp_blockchain::Result> { + unimplemented!() + } +} + +impl BlockBackend for ParachainClient { + fn block_body( + &self, + _: Block::Hash, + ) -> sp_blockchain::Result::Extrinsic>>> { + unimplemented!() + } + + fn block(&self, _: Block::Hash) -> sp_blockchain::Result>> { + unimplemented!() + } + + fn block_status(&self, hash: Block::Hash) -> sp_blockchain::Result { + Ok(self + .inner + .lock() + .expect("Poisoned lock") + .block_statuses + .lock() + .expect("Poisoned lock") + .get(&hash) + .cloned() + .unwrap_or(BlockStatus::Unknown)) + } + + fn justifications(&self, _: Block::Hash) -> sp_blockchain::Result> { + unimplemented!() + } + + fn block_hash(&self, _: NumberFor) -> sp_blockchain::Result> { + unimplemented!() + } + + fn indexed_transaction(&self, _: Block::Hash) -> sp_blockchain::Result>> { + unimplemented!() + } + + fn has_indexed_transaction(&self, _: Block::Hash) -> sp_blockchain::Result { + unimplemented!() + } + + fn block_indexed_body(&self, _: Block::Hash) -> sp_blockchain::Result>>> { + unimplemented!() + } + + fn requires_full_sync(&self) -> bool { + unimplemented!() + } +} + +impl UsageProvider for ParachainClient { + fn usage_info(&self) -> ClientInfo { + let infos = &mut self.inner.lock().expect("Poisoned lock").usage_infos; + assert!(!infos.is_empty()); + + if infos.len() == 1 { + infos.last().unwrap().clone() + } else { + infos.remove(0) + } + } +} + +struct ParachainImportQueue { + import_requests_tx: TracingUnboundedSender>>, +} + +impl ParachainImportQueue { + fn new() -> (Self, TracingUnboundedReceiver>>) { + let (import_requests_tx, import_requests_rx) = + sc_utils::mpsc::tracing_unbounded("test_import_req_forwarding", 10); + (Self { import_requests_tx }, import_requests_rx) + } +} + +impl ImportQueueService for ParachainImportQueue { + fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { + assert_matches!(origin, BlockOrigin::ConsensusBroadcast); + self.import_requests_tx.unbounded_send(blocks).unwrap(); + } + + fn import_justifications( + &mut self, + _: RuntimeOrigin, + _: Block::Hash, + _: NumberFor, + _: Justifications, + ) { + unimplemented!() + } +} + +#[derive(Default)] +struct DummySyncOracle { + is_major_syncing: bool, +} + +impl DummySyncOracle { + fn new(is_major_syncing: bool) -> Self { + Self { is_major_syncing } + } +} + +impl SyncOracle for DummySyncOracle { + fn is_major_syncing(&self) -> bool { + self.is_major_syncing + } + + fn is_offline(&self) -> bool { + false + } +} + +#[derive(Clone)] +struct RelaychainInner { + runtime_version: u32, + import_notifications: Vec, + candidates_pending_availability: HashMap>, +} + +#[derive(Clone)] +struct Relaychain { + inner: Arc>, +} + +impl Relaychain { + fn new(relay_chain_blocks: Vec<(PHeader, Vec)>) -> Self { + let (candidates_pending_availability, import_notifications) = relay_chain_blocks + .into_iter() + .map(|(header, receipt)| ((header.hash(), receipt), header)) + .unzip(); + Self { + inner: Arc::new(Mutex::new(RelaychainInner { + import_notifications, + candidates_pending_availability, + // The version that introduced candidates_pending_availability + runtime_version: + RuntimeApiRequest::CANDIDATES_PENDING_AVAILABILITY_RUNTIME_REQUIREMENT, + })), + } + } + + fn set_runtime_version(&self, version: u32) { + self.inner.lock().expect("Poisoned lock").runtime_version = version; + } +} + +#[async_trait::async_trait] +impl RelayChainInterface for Relaychain { + async fn version(&self, _: PHash) -> RelayChainResult { + let version = self.inner.lock().expect("Poisoned lock").runtime_version; + + let apis = sp_version::create_apis_vec!([( + >::ID, + version + )]) + .into_owned() + .to_vec(); + + Ok(RuntimeVersion { + spec_name: sp_version::create_runtime_str!("test"), + impl_name: sp_version::create_runtime_str!("test"), + authoring_version: 1, + spec_version: 1, + impl_version: 0, + apis: Cow::Owned(apis), + transaction_version: 5, + state_version: 1, + }) + } + + async fn validators(&self, _: PHash) -> RelayChainResult> { + unimplemented!("Not needed for test") + } + + async fn best_block_hash(&self) -> RelayChainResult { + unimplemented!("Not needed for test") + } + + async fn finalized_block_hash(&self) -> RelayChainResult { + unimplemented!("Not needed for test") + } + + async fn retrieve_dmq_contents( + &self, + _: ParaId, + _: PHash, + ) -> RelayChainResult> { + unimplemented!("Not needed for test") + } + + async fn retrieve_all_inbound_hrmp_channel_contents( + &self, + _: ParaId, + _: PHash, + ) -> RelayChainResult>> { + unimplemented!("Not needed for test") + } + + async fn persisted_validation_data( + &self, + _: PHash, + _: ParaId, + _: OccupiedCoreAssumption, + ) -> RelayChainResult> { + unimplemented!("Not needed for test") + } + + async fn validation_code_hash( + &self, + _: PHash, + _: ParaId, + _: OccupiedCoreAssumption, + ) -> RelayChainResult> { + unimplemented!("Not needed for test") + } + + async fn candidate_pending_availability( + &self, + hash: PHash, + _: ParaId, + ) -> RelayChainResult> { + if self.inner.lock().expect("Poisoned lock").runtime_version >= + RuntimeApiRequest::CANDIDATES_PENDING_AVAILABILITY_RUNTIME_REQUIREMENT + { + panic!("Should have used candidates_pending_availability instead"); + } + + Ok(self + .inner + .lock() + .expect("Poisoned lock") + .candidates_pending_availability + .remove(&hash) + .map(|mut c| { + assert_eq!(c.len(), 1); + c.pop().unwrap() + })) + } + + async fn candidates_pending_availability( + &self, + hash: PHash, + _: ParaId, + ) -> RelayChainResult> { + if self.inner.lock().expect("Poisoned lock").runtime_version < + RuntimeApiRequest::CANDIDATES_PENDING_AVAILABILITY_RUNTIME_REQUIREMENT + { + panic!("Should have used candidate_pending_availability instead"); + } + + Ok(self + .inner + .lock() + .expect("Poisoned lock") + .candidates_pending_availability + .remove(&hash) + .expect("Not found")) + } + + async fn session_index_for_child(&self, _: PHash) -> RelayChainResult { + Ok(TEST_SESSION_INDEX) + } + + async fn import_notification_stream( + &self, + ) -> RelayChainResult + Send>>> { + Ok(Box::pin( + futures::stream::iter(std::mem::take( + &mut self.inner.lock().expect("Poisoned lock").import_notifications, + )) + .chain(futures::stream::pending()), + )) + } + + async fn finality_notification_stream( + &self, + ) -> RelayChainResult + Send>>> { + unimplemented!("Not needed for test") + } + + async fn is_major_syncing(&self) -> RelayChainResult { + unimplemented!("Not needed for test"); + } + + fn overseer_handle(&self) -> RelayChainResult { + unimplemented!("Not needed for test") + } + + async fn get_storage_by_key( + &self, + _: PHash, + _: &[u8], + ) -> RelayChainResult> { + unimplemented!("Not needed for test") + } + + async fn prove_read( + &self, + _: PHash, + _: &Vec>, + ) -> RelayChainResult { + unimplemented!("Not needed for test") + } + + async fn wait_for_block(&self, _: PHash) -> RelayChainResult<()> { + unimplemented!("Not needed for test"); + } + + async fn new_best_notification_stream( + &self, + ) -> RelayChainResult + Send>>> { + unimplemented!("Not needed for test"); + } + + async fn header(&self, _: BlockId) -> RelayChainResult> { + unimplemented!("Not needed for test"); + } +} + +fn make_candidate_chain(candidate_number_range: Range) -> Vec { + let collator = Sr25519Keyring::Ferdie; + let mut latest_parent_hash = GENESIS_HASH; + let mut candidates = vec![]; + + for number in candidate_number_range { + let head_data = Header { + number, + digest: Default::default(), + extrinsics_root: Default::default(), + parent_hash: latest_parent_hash, + state_root: Default::default(), + }; + + latest_parent_hash = head_data.hash(); + + candidates.push(CommittedCandidateReceipt { + descriptor: CandidateDescriptor { + para_id: ParaId::from(1000), + relay_parent: PHash::zero(), + collator: collator.public().into(), + persisted_validation_data_hash: PHash::zero(), + pov_hash: PHash::zero(), + erasure_root: PHash::zero(), + signature: collator.sign(&[0u8; 132]).into(), + para_head: PHash::zero(), + validation_code_hash: PHash::zero().into(), + }, + commitments: CandidateCommitments { + head_data: head_data.encode().into(), + upward_messages: vec![].try_into().expect("empty vec fits within bounds"), + new_validation_code: None, + horizontal_messages: vec![].try_into().expect("empty vec fits within bounds"), + processed_downward_messages: 0, + hrmp_watermark: 0_u32, + }, + }); + } + + candidates +} + +fn dummy_usage_info(finalized_number: u32) -> ClientInfo { + ClientInfo { + chain: Info { + best_hash: PHash::zero(), + best_number: 0, + genesis_hash: PHash::zero(), + finalized_hash: PHash::zero(), + // Only this field is being used. + finalized_number, + finalized_state: None, + number_leaves: 0, + block_gap: None, + }, + usage: None, + } +} + +fn dummy_pvd() -> PersistedValidationData { + PersistedValidationData { + parent_head: vec![].into(), + relay_parent_number: 1, + relay_parent_storage_root: PHash::zero(), + max_pov_size: 100, + } +} + +#[tokio::test] +async fn pending_candidate_height_lower_than_latest_finalized() { + sp_tracing::init_for_tests(); + + for finalized_number in [3, 4, 5] { + let (recovery_subsystem_tx, mut recovery_subsystem_rx) = + AvailabilityRecoverySubsystemHandle::new(); + let recovery_delay_range = + RecoveryDelayRange { min: Duration::from_millis(0), max: Duration::from_millis(10) }; + let (_explicit_recovery_chan_tx, explicit_recovery_chan_rx) = mpsc::channel(10); + let candidates = make_candidate_chain(1..4); + let relay_chain_client = Relaychain::new(vec![( + PHeader { + parent_hash: PHash::from_low_u64_be(0), + number: 1, + state_root: PHash::random(), + extrinsics_root: PHash::random(), + digest: Default::default(), + }, + candidates, + )]); + let (parachain_client, _import_notifications_tx, _finality_notifications_tx) = + ParachainClient::new(vec![dummy_usage_info(finalized_number)], Default::default()); + let (parachain_import_queue, mut import_requests_rx) = ParachainImportQueue::new(); + + // If the latest finalized block has a larger height compared to the pending candidate, the + // new candidate won't be recovered. Candidates have heights is 1, 2 and 3. Latest finalized + // block is 3, 4 or 5. + let pov_recovery = PoVRecovery::::new( + Box::new(recovery_subsystem_tx), + recovery_delay_range, + Arc::new(parachain_client), + Box::new(parachain_import_queue), + relay_chain_client, + ParaId::new(1000), + explicit_recovery_chan_rx, + Arc::new(DummySyncOracle::default()), + ); + + task::spawn(pov_recovery.run()); + + // No recovery message received + assert_matches!( + recovery_subsystem_rx.next().timeout(Duration::from_millis(100)).await, + None + ); + + // No import request received + assert_matches!(import_requests_rx.next().timeout(Duration::from_millis(100)).await, None); + } +} + +#[rstest] +#[case(RuntimeApiRequest::CANDIDATES_PENDING_AVAILABILITY_RUNTIME_REQUIREMENT)] +#[case(10)] +#[tokio::test] +async fn single_pending_candidate_recovery_success(#[case] runtime_version: u32) { + sp_tracing::init_for_tests(); + + let (recovery_subsystem_tx, mut recovery_subsystem_rx) = + AvailabilityRecoverySubsystemHandle::new(); + let recovery_delay_range = + RecoveryDelayRange { min: Duration::from_millis(0), max: Duration::from_millis(10) }; + let (_explicit_recovery_chan_tx, explicit_recovery_chan_rx) = mpsc::channel(10); + let candidates = make_candidate_chain(1..2); + let header = Header::decode(&mut &candidates[0].commitments.head_data.0[..]).unwrap(); + let candidate_hash = candidates[0].hash(); + + let relay_chain_client = Relaychain::new(vec![( + PHeader { + parent_hash: PHash::from_low_u64_be(0), + number: 1, + state_root: PHash::random(), + extrinsics_root: PHash::random(), + digest: Default::default(), + }, + candidates, + )]); + relay_chain_client.set_runtime_version(runtime_version); + + let mut known_blocks = HashMap::new(); + known_blocks.insert(GENESIS_HASH, BlockStatus::InChainWithState); + let (parachain_client, _import_notifications_tx, _finality_notifications_tx) = + ParachainClient::new(vec![dummy_usage_info(0)], Arc::new(Mutex::new(known_blocks))); + let (parachain_import_queue, mut import_requests_rx) = ParachainImportQueue::new(); + + let pov_recovery = PoVRecovery::::new( + Box::new(recovery_subsystem_tx), + recovery_delay_range, + Arc::new(parachain_client), + Box::new(parachain_import_queue), + relay_chain_client, + ParaId::new(1000), + explicit_recovery_chan_rx, + Arc::new(DummySyncOracle::default()), + ); + + task::spawn(pov_recovery.run()); + + assert_matches!( + recovery_subsystem_rx.next().await, + Some(AvailabilityRecoveryMessage::RecoverAvailableData( + receipt, + session_index, + None, + None, + response_tx + )) => { + assert_eq!(receipt.hash(), candidate_hash); + assert_eq!(session_index, TEST_SESSION_INDEX); + response_tx.send( + Ok( + AvailableData { + pov: Arc::new(PoV { + block_data: ParachainBlockData::::new( + header.clone(), + vec![], + CompactProof {encoded_nodes: vec![]} + ).encode().into() + }), + validation_data: dummy_pvd(), + } + ) + ).unwrap() + } + ); + + // No more recovery messages received. + assert_matches!(recovery_subsystem_rx.next().timeout(Duration::from_millis(100)).await, None); + + // Received import request for the recovered candidate + assert_matches!(import_requests_rx.next().await, Some(incoming_blocks) => { + assert_eq!(incoming_blocks.len(), 1); + assert_eq!(incoming_blocks[0].header, Some(header)); + }); + + // No import request received + assert_matches!(import_requests_rx.next().timeout(Duration::from_millis(100)).await, None); +} + +#[tokio::test] +async fn single_pending_candidate_recovery_retry_succeeds() { + sp_tracing::init_for_tests(); + + let (recovery_subsystem_tx, mut recovery_subsystem_rx) = + AvailabilityRecoverySubsystemHandle::new(); + let recovery_delay_range = + RecoveryDelayRange { min: Duration::from_millis(0), max: Duration::from_millis(10) }; + let (_explicit_recovery_chan_tx, explicit_recovery_chan_rx) = mpsc::channel(10); + let candidates = make_candidate_chain(1..2); + let header = Header::decode(&mut &candidates[0].commitments.head_data.0[..]).unwrap(); + let candidate_hash = candidates[0].hash(); + + let relay_chain_client = Relaychain::new(vec![( + PHeader { + parent_hash: PHash::from_low_u64_be(0), + number: 1, + state_root: PHash::random(), + extrinsics_root: PHash::random(), + digest: Default::default(), + }, + candidates, + )]); + let mut known_blocks = HashMap::new(); + known_blocks.insert(GENESIS_HASH, BlockStatus::InChainWithState); + let (parachain_client, _import_notifications_tx, _finality_notifications_tx) = + ParachainClient::new(vec![dummy_usage_info(0)], Arc::new(Mutex::new(known_blocks))); + let (parachain_import_queue, mut import_requests_rx) = ParachainImportQueue::new(); + + let pov_recovery = PoVRecovery::::new( + Box::new(recovery_subsystem_tx), + recovery_delay_range, + Arc::new(parachain_client), + Box::new(parachain_import_queue), + relay_chain_client, + ParaId::new(1000), + explicit_recovery_chan_rx, + Arc::new(DummySyncOracle::default()), + ); + + task::spawn(pov_recovery.run()); + + // First recovery fails. + assert_matches!( + recovery_subsystem_rx.next().await, + Some(AvailabilityRecoveryMessage::RecoverAvailableData( + receipt, + session_index, + None, + None, + response_tx + )) => { + assert_eq!(receipt.hash(), candidate_hash); + assert_eq!(session_index, TEST_SESSION_INDEX); + response_tx.send( + Err(RecoveryError::Unavailable) + ).unwrap() + } + ); + // Candidate is not imported. + assert_matches!(import_requests_rx.next().timeout(Duration::from_millis(100)).await, None); + + // Recovery is retried and it succeeds now. + assert_matches!( + recovery_subsystem_rx.next().await, + Some(AvailabilityRecoveryMessage::RecoverAvailableData( + receipt, + session_index, + None, + None, + response_tx + )) => { + assert_eq!(receipt.hash(), candidate_hash); + assert_eq!(session_index, TEST_SESSION_INDEX); + response_tx.send( + Ok( + AvailableData { + pov: Arc::new(PoV { + block_data: ParachainBlockData::::new( + header.clone(), + vec![], + CompactProof {encoded_nodes: vec![]} + ).encode().into() + }), + validation_data: dummy_pvd(), + } + ) + ).unwrap() + } + ); + + // No more recovery messages received. + assert_matches!(recovery_subsystem_rx.next().timeout(Duration::from_millis(100)).await, None); + + // Received import request for the recovered candidate + assert_matches!(import_requests_rx.next().await, Some(incoming_blocks) => { + assert_eq!(incoming_blocks.len(), 1); + assert_eq!(incoming_blocks[0].header, Some(header)); + }); + + // No import request received + assert_matches!(import_requests_rx.next().timeout(Duration::from_millis(100)).await, None); +} + +#[tokio::test] +async fn single_pending_candidate_recovery_retry_fails() { + sp_tracing::init_for_tests(); + + let (recovery_subsystem_tx, mut recovery_subsystem_rx) = + AvailabilityRecoverySubsystemHandle::new(); + let recovery_delay_range = + RecoveryDelayRange { min: Duration::from_millis(0), max: Duration::from_millis(10) }; + let (_explicit_recovery_chan_tx, explicit_recovery_chan_rx) = mpsc::channel(10); + let candidates = make_candidate_chain(1..2); + let candidate_hash = candidates[0].hash(); + + let relay_chain_client = Relaychain::new(vec![( + PHeader { + parent_hash: PHash::from_low_u64_be(0), + number: 1, + state_root: PHash::random(), + extrinsics_root: PHash::random(), + digest: Default::default(), + }, + candidates, + )]); + let mut known_blocks = HashMap::new(); + known_blocks.insert(GENESIS_HASH, BlockStatus::InChainWithState); + let (parachain_client, _import_notifications_tx, _finality_notifications_tx) = + ParachainClient::new(vec![dummy_usage_info(0)], Arc::new(Mutex::new(known_blocks))); + let (parachain_import_queue, mut import_requests_rx) = ParachainImportQueue::new(); + + let pov_recovery = PoVRecovery::::new( + Box::new(recovery_subsystem_tx), + recovery_delay_range, + Arc::new(parachain_client), + Box::new(parachain_import_queue), + relay_chain_client, + ParaId::new(1000), + explicit_recovery_chan_rx, + Arc::new(DummySyncOracle::default()), + ); + + task::spawn(pov_recovery.run()); + + // First recovery fails. + assert_matches!( + recovery_subsystem_rx.next().await, + Some(AvailabilityRecoveryMessage::RecoverAvailableData( + receipt, + session_index, + None, + None, + response_tx + )) => { + assert_eq!(receipt.hash(), candidate_hash); + assert_eq!(session_index, TEST_SESSION_INDEX); + response_tx.send( + Err(RecoveryError::Unavailable) + ).unwrap() + } + ); + // Candidate is not imported. + assert_matches!(import_requests_rx.next().timeout(Duration::from_millis(100)).await, None); + + // Second retry fails. + assert_matches!( + recovery_subsystem_rx.next().await, + Some(AvailabilityRecoveryMessage::RecoverAvailableData( + receipt, + session_index, + None, + None, + response_tx + )) => { + assert_eq!(receipt.hash(), candidate_hash); + assert_eq!(session_index, TEST_SESSION_INDEX); + response_tx.send( + Err(RecoveryError::Unavailable) + ).unwrap() + } + ); + // Candidate is not imported. + assert_matches!(import_requests_rx.next().timeout(Duration::from_millis(100)).await, None); + + // After the second attempt, give up. + // No more recovery messages received. + assert_matches!(recovery_subsystem_rx.next().timeout(Duration::from_millis(100)).await, None); +} + +#[tokio::test] +async fn single_pending_candidate_recovery_irrecoverable_error() { + sp_tracing::init_for_tests(); + + let (recovery_subsystem_tx, mut recovery_subsystem_rx) = + AvailabilityRecoverySubsystemHandle::new(); + let recovery_delay_range = + RecoveryDelayRange { min: Duration::from_millis(0), max: Duration::from_millis(10) }; + let (_explicit_recovery_chan_tx, explicit_recovery_chan_rx) = mpsc::channel(10); + let candidates = make_candidate_chain(1..2); + let candidate_hash = candidates[0].hash(); + + let relay_chain_client = Relaychain::new(vec![( + PHeader { + parent_hash: PHash::from_low_u64_be(0), + number: 1, + state_root: PHash::random(), + extrinsics_root: PHash::random(), + digest: Default::default(), + }, + candidates, + )]); + let mut known_blocks = HashMap::new(); + known_blocks.insert(GENESIS_HASH, BlockStatus::InChainWithState); + let (parachain_client, _import_notifications_tx, _finality_notifications_tx) = + ParachainClient::new(vec![dummy_usage_info(0)], Arc::new(Mutex::new(known_blocks))); + let (parachain_import_queue, mut import_requests_rx) = ParachainImportQueue::new(); + + let pov_recovery = PoVRecovery::::new( + Box::new(recovery_subsystem_tx), + recovery_delay_range, + Arc::new(parachain_client), + Box::new(parachain_import_queue), + relay_chain_client, + ParaId::new(1000), + explicit_recovery_chan_rx, + Arc::new(DummySyncOracle::default()), + ); + + task::spawn(pov_recovery.run()); + + // Recovery succeeds but the block data is wrong. Will not be retried. + assert_matches!( + recovery_subsystem_rx.next().await, + Some(AvailabilityRecoveryMessage::RecoverAvailableData( + receipt, + session_index, + None, + None, + response_tx + )) => { + assert_eq!(receipt.hash(), candidate_hash); + assert_eq!(session_index, TEST_SESSION_INDEX); + response_tx.send( + Ok( + AvailableData { + pov: Arc::new(PoV { + // Empty block data. It will fail to decode. + block_data: vec![].into() + }), + validation_data: dummy_pvd(), + } + ) + ).unwrap() + } + ); + // Candidate is not imported. + assert_matches!(import_requests_rx.next().timeout(Duration::from_millis(100)).await, None); + + // No more recovery messages received. + assert_matches!(recovery_subsystem_rx.next().timeout(Duration::from_millis(100)).await, None); +} + +#[tokio::test] +async fn pending_candidates_recovery_skipped_while_syncing() { + sp_tracing::init_for_tests(); + + let (recovery_subsystem_tx, mut recovery_subsystem_rx) = + AvailabilityRecoverySubsystemHandle::new(); + let recovery_delay_range = + RecoveryDelayRange { min: Duration::from_millis(0), max: Duration::from_millis(10) }; + let (_explicit_recovery_chan_tx, explicit_recovery_chan_rx) = mpsc::channel(10); + let candidates = make_candidate_chain(1..4); + + let relay_chain_client = Relaychain::new(vec![( + PHeader { + parent_hash: PHash::from_low_u64_be(0), + number: 1, + state_root: PHash::random(), + extrinsics_root: PHash::random(), + digest: Default::default(), + }, + candidates, + )]); + let mut known_blocks = HashMap::new(); + known_blocks.insert(GENESIS_HASH, BlockStatus::InChainWithState); + let (parachain_client, _import_notifications_tx, _finality_notifications_tx) = + ParachainClient::new(vec![dummy_usage_info(0)], Arc::new(Mutex::new(known_blocks))); + let (parachain_import_queue, mut import_requests_rx) = ParachainImportQueue::new(); + + let pov_recovery = PoVRecovery::::new( + Box::new(recovery_subsystem_tx), + recovery_delay_range, + Arc::new(parachain_client), + Box::new(parachain_import_queue), + relay_chain_client, + ParaId::new(1000), + explicit_recovery_chan_rx, + Arc::new(DummySyncOracle::new(true)), + ); + + task::spawn(pov_recovery.run()); + + // No recovery messages received. + assert_matches!(recovery_subsystem_rx.next().timeout(Duration::from_millis(100)).await, None); + + // No candidate is imported. + assert_matches!(import_requests_rx.next().timeout(Duration::from_millis(100)).await, None); +} + +#[tokio::test] +async fn candidate_is_imported_while_awaiting_recovery() { + sp_tracing::init_for_tests(); + + let (recovery_subsystem_tx, mut recovery_subsystem_rx) = + AvailabilityRecoverySubsystemHandle::new(); + let recovery_delay_range = + RecoveryDelayRange { min: Duration::from_millis(0), max: Duration::from_millis(10) }; + let (_explicit_recovery_chan_tx, explicit_recovery_chan_rx) = mpsc::channel(10); + let candidates = make_candidate_chain(1..2); + let header = Header::decode(&mut &candidates[0].commitments.head_data.0[..]).unwrap(); + let candidate_hash = candidates[0].hash(); + + let relay_chain_client = Relaychain::new(vec![( + PHeader { + parent_hash: PHash::from_low_u64_be(0), + number: 1, + state_root: PHash::random(), + extrinsics_root: PHash::random(), + digest: Default::default(), + }, + candidates, + )]); + let mut known_blocks = HashMap::new(); + known_blocks.insert(GENESIS_HASH, BlockStatus::InChainWithState); + let (parachain_client, import_notifications_tx, _finality_notifications_tx) = + ParachainClient::new(vec![dummy_usage_info(0)], Arc::new(Mutex::new(known_blocks))); + let (parachain_import_queue, mut import_requests_rx) = ParachainImportQueue::new(); + + let pov_recovery = PoVRecovery::::new( + Box::new(recovery_subsystem_tx), + recovery_delay_range, + Arc::new(parachain_client), + Box::new(parachain_import_queue), + relay_chain_client, + ParaId::new(1000), + explicit_recovery_chan_rx, + Arc::new(DummySyncOracle::default()), + ); + + task::spawn(pov_recovery.run()); + + let recovery_response_tx; + + assert_matches!( + recovery_subsystem_rx.next().await, + Some(AvailabilityRecoveryMessage::RecoverAvailableData( + receipt, + session_index, + None, + None, + response_tx + )) => { + assert_eq!(receipt.hash(), candidate_hash); + assert_eq!(session_index, TEST_SESSION_INDEX); + recovery_response_tx = response_tx; + } + ); + + // While candidate is pending recovery, import the candidate from external source. + let (unpin_sender, _unpin_receiver) = sc_utils::mpsc::tracing_unbounded("test_unpin", 10); + import_notifications_tx + .unbounded_send(BlockImportNotification::new( + header.hash(), + BlockOrigin::ConsensusBroadcast, + header.clone(), + false, + None, + unpin_sender, + )) + .unwrap(); + + recovery_response_tx + .send(Ok(AvailableData { + pov: Arc::new(PoV { + block_data: ParachainBlockData::::new( + header.clone(), + vec![], + CompactProof { encoded_nodes: vec![] }, + ) + .encode() + .into(), + }), + validation_data: dummy_pvd(), + })) + .unwrap(); + + // Received import request for the recovered candidate. This could be optimised to not trigger a + // reimport. + assert_matches!(import_requests_rx.next().await, Some(incoming_blocks) => { + assert_eq!(incoming_blocks.len(), 1); + assert_eq!(incoming_blocks[0].header, Some(header)); + }); + + // No more recovery messages received. + assert_matches!(recovery_subsystem_rx.next().timeout(Duration::from_millis(100)).await, None); + + // No more import requests received + assert_matches!(import_requests_rx.next().timeout(Duration::from_millis(100)).await, None); +} + +#[tokio::test] +async fn candidate_is_finalized_while_awaiting_recovery() { + sp_tracing::init_for_tests(); + + let (recovery_subsystem_tx, mut recovery_subsystem_rx) = + AvailabilityRecoverySubsystemHandle::new(); + let recovery_delay_range = + RecoveryDelayRange { min: Duration::from_millis(0), max: Duration::from_millis(10) }; + let (_explicit_recovery_chan_tx, explicit_recovery_chan_rx) = mpsc::channel(10); + let candidates = make_candidate_chain(1..2); + let header = Header::decode(&mut &candidates[0].commitments.head_data.0[..]).unwrap(); + let candidate_hash = candidates[0].hash(); + + let relay_chain_client = Relaychain::new(vec![( + PHeader { + parent_hash: PHash::from_low_u64_be(0), + number: 1, + state_root: PHash::random(), + extrinsics_root: PHash::random(), + digest: Default::default(), + }, + candidates, + )]); + let mut known_blocks = HashMap::new(); + known_blocks.insert(GENESIS_HASH, BlockStatus::InChainWithState); + let (parachain_client, _import_notifications_tx, finality_notifications_tx) = + ParachainClient::new(vec![dummy_usage_info(0)], Arc::new(Mutex::new(known_blocks))); + let (parachain_import_queue, mut import_requests_rx) = ParachainImportQueue::new(); + + let pov_recovery = PoVRecovery::::new( + Box::new(recovery_subsystem_tx), + recovery_delay_range, + Arc::new(parachain_client), + Box::new(parachain_import_queue), + relay_chain_client, + ParaId::new(1000), + explicit_recovery_chan_rx, + Arc::new(DummySyncOracle::default()), + ); + + task::spawn(pov_recovery.run()); + + let recovery_response_tx; + + assert_matches!( + recovery_subsystem_rx.next().await, + Some(AvailabilityRecoveryMessage::RecoverAvailableData( + receipt, + session_index, + None, + None, + response_tx + )) => { + assert_eq!(receipt.hash(), candidate_hash); + assert_eq!(session_index, TEST_SESSION_INDEX); + // save it for later. + recovery_response_tx = response_tx; + } + ); + + // While candidate is pending recovery, it gets finalized. + let (unpin_sender, _unpin_receiver) = sc_utils::mpsc::tracing_unbounded("test_unpin", 10); + finality_notifications_tx + .unbounded_send(FinalityNotification::from_summary( + FinalizeSummary { header: header.clone(), finalized: vec![], stale_heads: vec![] }, + unpin_sender, + )) + .unwrap(); + + recovery_response_tx + .send(Ok(AvailableData { + pov: Arc::new(PoV { + block_data: ParachainBlockData::::new( + header.clone(), + vec![], + CompactProof { encoded_nodes: vec![] }, + ) + .encode() + .into(), + }), + validation_data: dummy_pvd(), + })) + .unwrap(); + + // No more recovery messages received. + assert_matches!(recovery_subsystem_rx.next().timeout(Duration::from_millis(100)).await, None); + + // candidate is imported + assert_matches!(import_requests_rx.next().await, Some(incoming_blocks) => { + assert_eq!(incoming_blocks.len(), 1); + assert_eq!(incoming_blocks[0].header, Some(header)); + }); + + // No more import requests received + assert_matches!(import_requests_rx.next().timeout(Duration::from_millis(100)).await, None); +} + +#[tokio::test] +async fn chained_recovery_success() { + sp_tracing::init_for_tests(); + + let (recovery_subsystem_tx, mut recovery_subsystem_rx) = + AvailabilityRecoverySubsystemHandle::new(); + let recovery_delay_range = + RecoveryDelayRange { min: Duration::from_millis(0), max: Duration::from_millis(0) }; + let (_explicit_recovery_chan_tx, explicit_recovery_chan_rx) = mpsc::channel(10); + let candidates = make_candidate_chain(1..4); + let headers = candidates + .iter() + .map(|candidate| Header::decode(&mut &candidate.commitments.head_data.0[..]).unwrap()) + .collect::>(); + let candidate_hashes = candidates.iter().map(|candidate| candidate.hash()).collect::>(); + + let relay_chain_client = Relaychain::new(vec![( + PHeader { + parent_hash: PHash::from_low_u64_be(0), + number: 1, + state_root: PHash::random(), + extrinsics_root: PHash::random(), + digest: Default::default(), + }, + // 3 pending candidates + candidates, + )]); + let mut known_blocks = HashMap::new(); + known_blocks.insert(GENESIS_HASH, BlockStatus::InChainWithState); + let known_blocks = Arc::new(Mutex::new(known_blocks)); + let (parachain_client, import_notifications_tx, _finality_notifications_tx) = + ParachainClient::new(vec![dummy_usage_info(0)], known_blocks.clone()); + let (parachain_import_queue, mut import_requests_rx) = ParachainImportQueue::new(); + + let pov_recovery = PoVRecovery::::new( + Box::new(recovery_subsystem_tx), + recovery_delay_range, + Arc::new(parachain_client), + Box::new(parachain_import_queue), + relay_chain_client, + ParaId::new(1000), + explicit_recovery_chan_rx, + Arc::new(DummySyncOracle::default()), + ); + + task::spawn(pov_recovery.run()); + + // Candidates are recovered in the right order. + for (candidate_hash, header) in candidate_hashes.into_iter().zip(headers.into_iter()) { + assert_matches!( + recovery_subsystem_rx.next().await, + Some(AvailabilityRecoveryMessage::RecoverAvailableData( + receipt, + session_index, + None, + None, + response_tx + )) => { + assert_eq!(receipt.hash(), candidate_hash); + assert_eq!(session_index, TEST_SESSION_INDEX); + response_tx + .send(Ok(AvailableData { + pov: Arc::new(PoV { + block_data: ParachainBlockData::::new( + header.clone(), + vec![], + CompactProof { encoded_nodes: vec![] }, + ) + .encode() + .into(), + }), + validation_data: dummy_pvd(), + })) + .unwrap(); + } + ); + + assert_matches!(import_requests_rx.next().await, Some(incoming_blocks) => { + assert_eq!(incoming_blocks.len(), 1); + assert_eq!(incoming_blocks[0].header, Some(header.clone())); + }); + + known_blocks + .lock() + .expect("Poisoned lock") + .insert(header.hash(), BlockStatus::InChainWithState); + + let (unpin_sender, _unpin_receiver) = sc_utils::mpsc::tracing_unbounded("test_unpin", 10); + import_notifications_tx + .unbounded_send(BlockImportNotification::new( + header.hash(), + BlockOrigin::ConsensusBroadcast, + header, + false, + None, + unpin_sender, + )) + .unwrap(); + } + + // No more recovery messages received. + assert_matches!(recovery_subsystem_rx.next().timeout(Duration::from_millis(100)).await, None); + + // No more import requests received + assert_matches!(import_requests_rx.next().timeout(Duration::from_millis(100)).await, None); +} + +#[tokio::test] +async fn chained_recovery_child_succeeds_before_parent() { + sp_tracing::init_for_tests(); + + let (recovery_subsystem_tx, mut recovery_subsystem_rx) = + AvailabilityRecoverySubsystemHandle::new(); + let recovery_delay_range = + RecoveryDelayRange { min: Duration::from_millis(0), max: Duration::from_millis(0) }; + let (_explicit_recovery_chan_tx, explicit_recovery_chan_rx) = mpsc::channel(10); + let candidates = make_candidate_chain(1..3); + let headers = candidates + .iter() + .map(|candidate| Header::decode(&mut &candidate.commitments.head_data.0[..]).unwrap()) + .collect::>(); + let candidate_hashes = candidates.iter().map(|candidate| candidate.hash()).collect::>(); + + let relay_chain_client = Relaychain::new(vec![( + PHeader { + parent_hash: PHash::from_low_u64_be(0), + number: 1, + state_root: PHash::random(), + extrinsics_root: PHash::random(), + digest: Default::default(), + }, + // 2 pending candidates + candidates, + )]); + let mut known_blocks = HashMap::new(); + known_blocks.insert(GENESIS_HASH, BlockStatus::InChainWithState); + let known_blocks = Arc::new(Mutex::new(known_blocks)); + let (parachain_client, _import_notifications_tx, _finality_notifications_tx) = + ParachainClient::new(vec![dummy_usage_info(0)], known_blocks.clone()); + let (parachain_import_queue, mut import_requests_rx) = ParachainImportQueue::new(); + + let pov_recovery = PoVRecovery::::new( + Box::new(recovery_subsystem_tx), + recovery_delay_range, + Arc::new(parachain_client), + Box::new(parachain_import_queue), + relay_chain_client, + ParaId::new(1000), + explicit_recovery_chan_rx, + Arc::new(DummySyncOracle::default()), + ); + + task::spawn(pov_recovery.run()); + + let mut recovery_responses_senders = vec![]; + + for candidate_hash in candidate_hashes.iter() { + assert_matches!( + recovery_subsystem_rx.next().await, + Some(AvailabilityRecoveryMessage::RecoverAvailableData( + receipt, + session_index, + None, + None, + response_tx + )) => { + assert_eq!(receipt.hash(), *candidate_hash); + assert_eq!(session_index, TEST_SESSION_INDEX); + recovery_responses_senders.push(response_tx); + } + ); + } + + // Send out the responses in reverse order. + for (recovery_response_sender, header) in + recovery_responses_senders.into_iter().zip(headers.iter()).rev() + { + recovery_response_sender + .send(Ok(AvailableData { + pov: Arc::new(PoV { + block_data: ParachainBlockData::::new( + header.clone(), + vec![], + CompactProof { encoded_nodes: vec![] }, + ) + .encode() + .into(), + }), + validation_data: dummy_pvd(), + })) + .unwrap(); + } + + assert_matches!(import_requests_rx.next().await, Some(incoming_blocks) => { + // The two import requests will be batched. + assert_eq!(incoming_blocks.len(), 2); + assert_eq!(incoming_blocks[0].header, Some(headers[0].clone())); + assert_eq!(incoming_blocks[1].header, Some(headers[1].clone())); + }); + + // No more recovery messages received. + assert_matches!(recovery_subsystem_rx.next().timeout(Duration::from_millis(100)).await, None); + + // No more import requests received + assert_matches!(import_requests_rx.next().timeout(Duration::from_millis(100)).await, None); +} diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index 578b942776dc..7871623e8447 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -30,7 +30,7 @@ use futures::{FutureExt, Stream, StreamExt}; use polkadot_service::{ CollatorPair, Configuration, FullBackend, FullClient, Handle, NewFull, TaskManager, }; -use sc_cli::SubstrateCli; +use sc_cli::{RuntimeVersion, SubstrateCli}; use sc_client_api::{ blockchain::BlockStatus, Backend, BlockchainEvents, HeaderBackend, ImportNotifications, StorageProof, @@ -68,6 +68,10 @@ impl RelayChainInProcessInterface { #[async_trait] impl RelayChainInterface for RelayChainInProcessInterface { + async fn version(&self, relay_parent: PHash) -> RelayChainResult { + Ok(self.full_client.runtime_version_at(relay_parent)?) + } + async fn retrieve_dmq_contents( &self, para_id: ParaId, @@ -251,6 +255,14 @@ impl RelayChainInterface for RelayChainInProcessInterface { }); Ok(Box::pin(notifications_stream)) } + + async fn candidates_pending_availability( + &self, + hash: PHash, + para_id: ParaId, + ) -> RelayChainResult> { + Ok(self.full_client.runtime_api().candidates_pending_availability(hash, para_id)?) + } } pub enum BlockCheckStatus { diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index 5d612cdc0eef..e8603693ac8d 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -18,6 +18,7 @@ sp-api = { path = "../../../substrate/primitives/api" } sp-blockchain = { path = "../../../substrate/primitives/blockchain" } sp-state-machine = { path = "../../../substrate/primitives/state-machine" } sc-client-api = { path = "../../../substrate/client/api" } +sp-version = { path = "../../../substrate/primitives/version", default-features = false } futures = "0.3.28" async-trait = "0.1.79" diff --git a/cumulus/client/relay-chain-interface/src/lib.rs b/cumulus/client/relay-chain-interface/src/lib.rs index 7c7796b468c0..46e19b40f010 100644 --- a/cumulus/client/relay-chain-interface/src/lib.rs +++ b/cumulus/client/relay-chain-interface/src/lib.rs @@ -16,10 +16,10 @@ use std::{collections::BTreeMap, pin::Pin, sync::Arc}; +use futures::Stream; use polkadot_overseer::prometheus::PrometheusError; use sc_client_api::StorageProof; - -use futures::Stream; +use sp_version::RuntimeVersion; use async_trait::async_trait; use codec::Error as CodecError; @@ -149,8 +149,12 @@ pub trait RelayChainInterface: Send + Sync { _: OccupiedCoreAssumption, ) -> RelayChainResult>; - /// Get the receipt of a candidate pending availability. This returns `Some` for any paras - /// assigned to occupied cores in `availability_cores` and `None` otherwise. + /// Get the receipt of the first candidate pending availability of this para_id. This returns + /// `Some` for any paras assigned to occupied cores in `availability_cores` and `None` + /// otherwise. + #[deprecated( + note = "`candidate_pending_availability` only returns one candidate and is deprecated. Use `candidates_pending_availability` instead." + )] async fn candidate_pending_availability( &self, block_id: PHash, @@ -203,6 +207,16 @@ pub trait RelayChainInterface: Send + Sync { para_id: ParaId, occupied_core_assumption: OccupiedCoreAssumption, ) -> RelayChainResult>; + + /// Get the receipts of all candidates pending availability for this para_id. + async fn candidates_pending_availability( + &self, + block_id: PHash, + para_id: ParaId, + ) -> RelayChainResult>; + + /// Get the runtime version of the relay chain. + async fn version(&self, relay_parent: PHash) -> RelayChainResult; } #[async_trait] @@ -237,6 +251,7 @@ where .await } + #[allow(deprecated)] async fn candidate_pending_availability( &self, block_id: PHash, @@ -321,4 +336,16 @@ where .validation_code_hash(relay_parent, para_id, occupied_core_assumption) .await } + + async fn candidates_pending_availability( + &self, + block_id: PHash, + para_id: ParaId, + ) -> RelayChainResult> { + (**self).candidates_pending_availability(block_id, para_id).await + } + + async fn version(&self, relay_parent: PHash) -> RelayChainResult { + (**self).version(relay_parent).await + } } diff --git a/cumulus/client/relay-chain-rpc-interface/src/lib.rs b/cumulus/client/relay-chain-rpc-interface/src/lib.rs index 3a4c186e301e..bb7bfa5dc322 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/lib.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/lib.rs @@ -33,6 +33,7 @@ use sc_client_api::StorageProof; use sp_core::sp_std::collections::btree_map::BTreeMap; use sp_state_machine::StorageValue; use sp_storage::StorageKey; +use sp_version::RuntimeVersion; use std::pin::Pin; use cumulus_primitives_core::relay_chain::BlockId; @@ -237,4 +238,18 @@ impl RelayChainInterface for RelayChainRpcInterface { let imported_headers_stream = self.rpc_client.get_best_heads_stream()?; Ok(imported_headers_stream.boxed()) } + + async fn candidates_pending_availability( + &self, + hash: RelayHash, + para_id: ParaId, + ) -> RelayChainResult> { + self.rpc_client + .parachain_host_candidates_pending_availability(hash, para_id) + .await + } + + async fn version(&self, relay_parent: RelayHash) -> RelayChainResult { + self.rpc_client.runtime_version(relay_parent).await + } } diff --git a/prdoc/pr_4733.prdoc b/prdoc/pr_4733.prdoc new file mode 100644 index 000000000000..e63324839852 --- /dev/null +++ b/prdoc/pr_4733.prdoc @@ -0,0 +1,27 @@ +title: Add pov-recovery unit tests and support for elastic scaling + +doc: + - audience: Node Dev + description: | + Adds unit tests for cumulus pov-recovery and support for elastic scaling (recovering multiple candidates in a single relay chain block). + +crates: + - name: cumulus-client-network + bump: patch + - name: cumulus-client-pov-recovery + bump: patch + - name: cumulus-relay-chain-interface + bump: major + validate: false + - name: cumulus-relay-chain-inprocess-interface + bump: minor + - name: cumulus-relay-chain-rpc-interface + bump: minor + - name: cumulus-client-consensus-common + bump: none + - name: sc-client-api + bump: minor + - name: sp-blockchain + bump: minor + - name: sp-consensus + bump: minor diff --git a/substrate/client/api/src/client.rs b/substrate/client/api/src/client.rs index 2de09840e4df..45cfafb25846 100644 --- a/substrate/client/api/src/client.rs +++ b/substrate/client/api/src/client.rs @@ -168,7 +168,7 @@ pub trait ProvideUncles { } /// Client info -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct ClientInfo { /// Best block hash. pub chain: Info, diff --git a/substrate/primitives/blockchain/src/backend.rs b/substrate/primitives/blockchain/src/backend.rs index 06e5b682964a..933e41e2ab45 100644 --- a/substrate/primitives/blockchain/src/backend.rs +++ b/substrate/primitives/blockchain/src/backend.rs @@ -284,7 +284,7 @@ impl DisplacedLeavesAfterFinalization { } /// Blockchain info -#[derive(Debug, Eq, PartialEq)] +#[derive(Debug, Eq, PartialEq, Clone)] pub struct Info { /// Best block hash. pub best_hash: Block::Hash, diff --git a/substrate/primitives/consensus/common/src/lib.rs b/substrate/primitives/consensus/common/src/lib.rs index 01d3b7a24f9c..37636b34b03d 100644 --- a/substrate/primitives/consensus/common/src/lib.rs +++ b/substrate/primitives/consensus/common/src/lib.rs @@ -40,7 +40,7 @@ pub use sp_inherents::InherentData; pub use sp_state_machine::Backend as StateBackend; /// Block status. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone)] pub enum BlockStatus { /// Added to the import queue. Queued, From b65313e81465dd730e48d4ce00deb76922618375 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Mon, 10 Jun 2024 15:54:22 +0300 Subject: [PATCH 20/52] Remove unncessary call remove_from_peers_set (#4742) ... this is superfluous because set_reserved_peers implementation already calls this method here: https://github.com/paritytech/polkadot-sdk/blob/cdb297b15ad9c1d952c0501afaf6b764e5fd147c/substrate/client/network/src/protocol_controller.rs#L571, so the call just ends producing this warnings whenever we manipulate the peers set. ``` Trying to remove unknown reserved node 12D3KooWRCePWvHoBbz9PSkw4aogtdVqkVDhiwpcHZCqh4hdPTXC from SetId(3) peerset warnings (from different peers) ``` Signed-off-by: Alexandru Gheorghe --- .../node/network/bridge/src/validator_discovery.rs | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/polkadot/node/network/bridge/src/validator_discovery.rs b/polkadot/node/network/bridge/src/validator_discovery.rs index b11af8a8a089..f0ef038d5eb4 100644 --- a/polkadot/node/network/bridge/src/validator_discovery.rs +++ b/polkadot/node/network/bridge/src/validator_discovery.rs @@ -88,16 +88,6 @@ impl Service { { gum::warn!(target: LOG_TARGET, err = ?e, "AuthorityDiscoveryService returned an invalid multiaddress"); } - // the addresses are known to be valid - // - // for peer-set management, the main protocol name should be used regardless of - // the negotiated version. - let _ = network_service - .remove_from_peers_set( - self.peerset_protocol_names.get_main_name(peer_set), - peers_to_remove, - ) - .await; network_service } From 96ab6869bafb06352b282576a6395aec8e9f2705 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 11 Jun 2024 15:02:11 +0200 Subject: [PATCH 21/52] finalization: Skip tree route calculation if no forks present (#4721) ## Issue Currently, syncing parachains from scratch can lead to a very long finalization time once they reach the tip of the chain. The problem is that we try to finalize everything from 0 to the tip, which can be thousands or even millions of blocks. We finalize sequentially and try to compute displaced branches during finalization. So for every block on the way, we compute an expensive tree route. ## Proposed Improvements In this PR, I propose improvements that solve this situation: - **Skip tree route calculation if `leaves().len() == 1`:** This should be enough for 90% of cases where there is only one leaf after sync. - **Optimize finalization for long distances:** It can happen that the parachain has imported some leaf and then receives a relay chain notification with the finalized block. In that case, the previous optimization will not trigger. A second mechanism should ensure that we do not need to compute the full tree route. If the finalization distance is long, we check the lowest common ancestor of all the leaves. If it is above the to-be-finalized block, we know that there are no displaced leaves. This is fast because forks are short and close to the tip, so we can leverage the header cache. ## Alternative Approach - The problem was introduced in #3962. Reverting that PR is another possible strategy. - We could store for every fork where it begins, however sounds a bit more involved to me. fixes #4614 --- prdoc/pr_4721.prdoc | 19 +++ substrate/client/db/src/lib.rs | 121 +++++++++++++++++- .../primitives/blockchain/src/backend.rs | 40 +++++- .../blockchain/src/header_metadata.rs | 26 +++- 4 files changed, 198 insertions(+), 8 deletions(-) create mode 100644 prdoc/pr_4721.prdoc diff --git a/prdoc/pr_4721.prdoc b/prdoc/pr_4721.prdoc new file mode 100644 index 000000000000..730ac4d83086 --- /dev/null +++ b/prdoc/pr_4721.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Skip tree route calculation if no forks present + +doc: + - audience: Node Operator + description: | + Fixes an issue with synchronisation on parachains. Once they reached the tip of the chain, + nodes would show `Preparing 0.0 bps`. This is shown because the node is blocked on calculating + the tree route from genesis to the tip of the chain many times. This PR solves that by skipping + tree route calculation if there is only one leave. In addition, further optimizations have been + done to alleviate long finalization distances. + +crates: + - name: sp-blockchain + bump: minor + - name: sc-client-db + bump: none diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 36f9aea817c9..8d8b7a2aff88 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -2547,7 +2547,7 @@ pub(crate) mod tests { backend::{Backend as BTrait, BlockImportOperation as Op}, blockchain::Backend as BLBTrait, }; - use sp_blockchain::{lowest_common_ancestor, tree_route}; + use sp_blockchain::{lowest_common_ancestor, lowest_common_ancestor_multiblock, tree_route}; use sp_core::H256; use sp_runtime::{ testing::{Block as RawBlock, ExtrinsicWrapper, Header}, @@ -3108,6 +3108,125 @@ pub(crate) mod tests { } } + #[test] + fn lowest_common_ancestors_multiblock_works() { + let backend = Backend::::new_test(1000, 100); + let blockchain = backend.blockchain(); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + + // fork from genesis: 3 prong. + // block 0 -> a1 -> a2 -> a3 + // | + // -> b1 -> b2 -> c1 -> c2 + // | + // -> d1 -> d2 + let a1 = insert_header(&backend, 1, block0, None, Default::default()); + let a2 = insert_header(&backend, 2, a1, None, Default::default()); + let a3 = insert_header(&backend, 3, a2, None, Default::default()); + + // fork from genesis: 2 prong. + let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32])); + let b2 = insert_header(&backend, 2, b1, None, Default::default()); + + // fork from b2. + let c1 = insert_header(&backend, 3, b2, None, H256::from([2; 32])); + let c2 = insert_header(&backend, 4, c1, None, Default::default()); + + // fork from b1. + let d1 = insert_header(&backend, 2, b1, None, H256::from([3; 32])); + let d2 = insert_header(&backend, 3, d1, None, Default::default()); + { + let lca = lowest_common_ancestor_multiblock(blockchain, vec![a3, b2]).unwrap().unwrap(); + + assert_eq!(lca.hash, block0); + assert_eq!(lca.number, 0); + } + + { + let lca = lowest_common_ancestor_multiblock(blockchain, vec![a1, a3]).unwrap().unwrap(); + + assert_eq!(lca.hash, a1); + assert_eq!(lca.number, 1); + } + + { + let lca = lowest_common_ancestor_multiblock(blockchain, vec![a3, a1]).unwrap().unwrap(); + + assert_eq!(lca.hash, a1); + assert_eq!(lca.number, 1); + } + + { + let lca = lowest_common_ancestor_multiblock(blockchain, vec![a2, a3]).unwrap().unwrap(); + + assert_eq!(lca.hash, a2); + assert_eq!(lca.number, 2); + } + + { + let lca = lowest_common_ancestor_multiblock(blockchain, vec![a2, a1]).unwrap().unwrap(); + + assert_eq!(lca.hash, a1); + assert_eq!(lca.number, 1); + } + + { + let lca = lowest_common_ancestor_multiblock(blockchain, vec![a2, a2]).unwrap().unwrap(); + + assert_eq!(lca.hash, a2); + assert_eq!(lca.number, 2); + } + + { + let lca = lowest_common_ancestor_multiblock(blockchain, vec![a3, d2, c2]) + .unwrap() + .unwrap(); + + assert_eq!(lca.hash, block0); + assert_eq!(lca.number, 0); + } + + { + let lca = lowest_common_ancestor_multiblock(blockchain, vec![c2, d2, b2]) + .unwrap() + .unwrap(); + + assert_eq!(lca.hash, b1); + assert_eq!(lca.number, 1); + } + + { + let lca = lowest_common_ancestor_multiblock(blockchain, vec![a1, a2, a3]) + .unwrap() + .unwrap(); + + assert_eq!(lca.hash, a1); + assert_eq!(lca.number, 1); + } + + { + let lca = lowest_common_ancestor_multiblock(blockchain, vec![b1, b2, d1]) + .unwrap() + .unwrap(); + + assert_eq!(lca.hash, b1); + assert_eq!(lca.number, 1); + } + + { + let lca = lowest_common_ancestor_multiblock(blockchain, vec![]); + + assert_eq!(true, matches!(lca, Ok(None))); + } + + { + let lca = lowest_common_ancestor_multiblock(blockchain, vec![a1]).unwrap().unwrap(); + + assert_eq!(lca.hash, a1); + assert_eq!(lca.number, 1); + } + } + #[test] fn test_tree_route_regression() { // NOTE: this is a test for a regression introduced in #3665, the result diff --git a/substrate/primitives/blockchain/src/backend.rs b/substrate/primitives/blockchain/src/backend.rs index 933e41e2ab45..76393420da74 100644 --- a/substrate/primitives/blockchain/src/backend.rs +++ b/substrate/primitives/blockchain/src/backend.rs @@ -21,16 +21,15 @@ use log::warn; use parking_lot::RwLock; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, + traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero}, Justifications, }; use std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; -use crate::header_metadata::HeaderMetadata; - use crate::{ error::{Error, Result}, - tree_route, TreeRoute, + header_metadata::{self, HeaderMetadata}, + lowest_common_ancestor_multiblock, tree_route, TreeRoute, }; /// Blockchain database header backend. Does not perform any validation. @@ -229,12 +228,41 @@ pub trait Backend: ) -> std::result::Result, Error> { let mut result = DisplacedLeavesAfterFinalization::default(); - if finalized_block_number == Zero::zero() { + let leaves = self.leaves()?; + + // If we have only one leaf there are no forks, and we can return early. + if finalized_block_number == Zero::zero() || leaves.len() == 1 { return Ok(result) } + let first_leaf = leaves.first().ok_or(Error::Backend( + "Unable to find any leaves. This should not happen.".to_string(), + ))?; + let leaf_block_header = self.expect_header(*first_leaf)?; + + // If the distance between the leafs and the finalized block is large, calculating + // tree routes can be very expensive. In that case, we will try to find the + // lowest common ancestor between all the leaves. The assumption here is that the forks are + // close to the tip and not long. So the LCA can be computed from the header cache. If the + // LCA is above the finalized block, we know that there are no displaced leaves by the + // finalization. + if leaf_block_header + .number() + .checked_sub(&finalized_block_number) + .unwrap_or(0u32.into()) > + header_metadata::LRU_CACHE_SIZE.into() + { + if let Some(lca) = lowest_common_ancestor_multiblock(self, leaves.clone())? { + if lca.number > finalized_block_number { + return Ok(result) + } else { + log::warn!("The distance between leafs and finalized block is large. Finalization can take a long time."); + } + }; + } + // For each leaf determine whether it belongs to a non-canonical branch. - for leaf_hash in self.leaves()? { + for leaf_hash in leaves { let leaf_block_header = self.expect_header(leaf_hash)?; let leaf_number = *leaf_block_header.number(); diff --git a/substrate/primitives/blockchain/src/header_metadata.rs b/substrate/primitives/blockchain/src/header_metadata.rs index 27caaae71add..c2054445b067 100644 --- a/substrate/primitives/blockchain/src/header_metadata.rs +++ b/substrate/primitives/blockchain/src/header_metadata.rs @@ -23,7 +23,7 @@ use schnellru::{ByLength, LruMap}; use sp_runtime::traits::{Block as BlockT, Header, NumberFor, One}; /// Set to the expected max difference between `best` and `finalized` blocks at sync. -const LRU_CACHE_SIZE: u32 = 5_000; +pub(crate) const LRU_CACHE_SIZE: u32 = 5_000; /// Get lowest common ancestor between two blocks in the tree. /// @@ -96,6 +96,30 @@ pub fn lowest_common_ancestor + ?Sized>( Ok(HashAndNumber { hash: header_one.hash, number: header_one.number }) } +/// Get lowest common ancestor between multiple blocks. +pub fn lowest_common_ancestor_multiblock + ?Sized>( + backend: &T, + hashes: Vec, +) -> Result>, T::Error> { + // Ensure the list of hashes is not empty + let mut hashes_iter = hashes.into_iter(); + + let first_hash = match hashes_iter.next() { + Some(hash) => hash, + None => return Ok(None), + }; + + // Start with the first hash as the initial LCA + let first_cached = backend.header_metadata(first_hash)?; + let mut lca = HashAndNumber { number: first_cached.number, hash: first_cached.hash }; + for hash in hashes_iter { + // Calculate the LCA of the current LCA and the next hash + lca = lowest_common_ancestor(backend, lca.hash, hash)?; + } + + Ok(Some(lca)) +} + /// Compute a tree-route between two blocks. See tree-route docs for more details. pub fn tree_route + ?Sized>( backend: &T, From ad8620922bd7c0477b25c7dfd6fc233641cb27ae Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 11 Jun 2024 22:15:05 +0000 Subject: [PATCH 22/52] Append overlay optimization. (#1223) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This branch propose to avoid clones in append by storing offset and size in previous overlay depth. That way on rollback we can just truncate and change size of existing value. To avoid copy it also means that : - append on new overlay layer if there is an existing value: create a new Append entry with previous offsets, and take memory of previous overlay value. - rollback on append: restore value by applying offsets and put it back in previous overlay value - commit on append: appended value overwrite previous value (is an empty vec as the memory was taken). offsets of commited layer are dropped, if there is offset in previous overlay layer they are maintained. - set value (or remove) when append offsets are present: current appended value is moved back to previous overlay value with offset applied and current empty entry is overwrite (no offsets kept). The modify mechanism is not needed anymore. This branch lacks testing and break some existing genericity (bit of duplicated code), but good to have to check direction. Generally I am not sure if it is worth or we just should favor differents directions (transients blob storage for instance), as the current append mechanism is a bit tricky (having a variable length in first position means we sometime need to insert in front of a vector). Fix #30. --------- Signed-off-by: Alexandru Vasile Co-authored-by: EgorPopelyaev Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Co-authored-by: Bastian Köcher Co-authored-by: Oliver Tale-Yazdi Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Co-authored-by: Liam Aharon Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Branislav Kontur Co-authored-by: Bastian Köcher Co-authored-by: Sebastian Kunert --- Cargo.lock | 15 + .../core/pvf/common/src/executor_interface.rs | 12 +- prdoc/pr_1223.prdoc | 13 + .../executor/src/integration_tests/mod.rs | 8 +- substrate/primitives/externalities/src/lib.rs | 16 +- substrate/primitives/io/src/lib.rs | 12 +- substrate/primitives/state-machine/Cargo.toml | 3 + .../primitives/state-machine/fuzz/Cargo.toml | 30 + .../fuzz/fuzz_targets/fuzz_append.rs | 26 + .../primitives/state-machine/src/basic.rs | 57 +- substrate/primitives/state-machine/src/ext.rs | 71 +- .../primitives/state-machine/src/fuzzing.rs | 319 ++++++++ .../state-machine/src/in_memory_backend.rs | 1 + substrate/primitives/state-machine/src/lib.rs | 79 +- .../src/overlayed_changes/changeset.rs | 744 ++++++++++++++++-- .../src/overlayed_changes/mod.rs | 130 +-- .../src/overlayed_changes/offchain.rs | 14 +- .../primitives/state-machine/src/read_only.rs | 12 +- .../primitives/state-machine/src/testing.rs | 16 +- .../frame/remote-externalities/src/lib.rs | 4 +- 20 files changed, 1352 insertions(+), 230 deletions(-) create mode 100644 prdoc/pr_1223.prdoc create mode 100644 substrate/primitives/state-machine/fuzz/Cargo.toml create mode 100644 substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs create mode 100644 substrate/primitives/state-machine/src/fuzzing.rs diff --git a/Cargo.lock b/Cargo.lock index d2b7a47f84c9..fba768c653c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -294,6 +294,9 @@ name = "arbitrary" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +dependencies = [ + "derive_arbitrary", +] [[package]] name = "ark-bls12-377" @@ -4729,6 +4732,17 @@ dependencies = [ "syn 2.0.61", ] +[[package]] +name = "derive_arbitrary" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" +dependencies = [ + "proc-macro2 1.0.82", + "quote 1.0.35", + "syn 2.0.61", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -20296,6 +20310,7 @@ dependencies = [ name = "sp-state-machine" version = "0.35.0" dependencies = [ + "arbitrary", "array-bytes", "assert_matches", "hash-db", diff --git a/polkadot/node/core/pvf/common/src/executor_interface.rs b/polkadot/node/core/pvf/common/src/executor_interface.rs index 87491e70c5f2..47f9ed1604e7 100644 --- a/polkadot/node/core/pvf/common/src/executor_interface.rs +++ b/polkadot/node/core/pvf/common/src/executor_interface.rs @@ -215,19 +215,19 @@ type HostFunctions = ( struct ValidationExternalities(sp_externalities::Extensions); impl sp_externalities::Externalities for ValidationExternalities { - fn storage(&self, _: &[u8]) -> Option> { + fn storage(&mut self, _: &[u8]) -> Option> { panic!("storage: unsupported feature for parachain validation") } - fn storage_hash(&self, _: &[u8]) -> Option> { + fn storage_hash(&mut self, _: &[u8]) -> Option> { panic!("storage_hash: unsupported feature for parachain validation") } - fn child_storage_hash(&self, _: &ChildInfo, _: &[u8]) -> Option> { + fn child_storage_hash(&mut self, _: &ChildInfo, _: &[u8]) -> Option> { panic!("child_storage_hash: unsupported feature for parachain validation") } - fn child_storage(&self, _: &ChildInfo, _: &[u8]) -> Option> { + fn child_storage(&mut self, _: &ChildInfo, _: &[u8]) -> Option> { panic!("child_storage: unsupported feature for parachain validation") } @@ -275,11 +275,11 @@ impl sp_externalities::Externalities for ValidationExternalities { panic!("child_storage_root: unsupported feature for parachain validation") } - fn next_child_storage_key(&self, _: &ChildInfo, _: &[u8]) -> Option> { + fn next_child_storage_key(&mut self, _: &ChildInfo, _: &[u8]) -> Option> { panic!("next_child_storage_key: unsupported feature for parachain validation") } - fn next_storage_key(&self, _: &[u8]) -> Option> { + fn next_storage_key(&mut self, _: &[u8]) -> Option> { panic!("next_storage_key: unsupported feature for parachain validation") } diff --git a/prdoc/pr_1223.prdoc b/prdoc/pr_1223.prdoc new file mode 100644 index 000000000000..08b18557b70c --- /dev/null +++ b/prdoc/pr_1223.prdoc @@ -0,0 +1,13 @@ +title: Optimize storage append operation + +doc: + - audience: [Node Dev, Runtime Dev] + description: | + This pull request optimizes the storage append operation in the `OverlayedChanges`. + Before the internal buffer was cloned every time a new transaction was created. Cloning + the internal buffer is now only done when there is no other possibility. This should + improve the performance in situations like when depositing events from batched calls. + +crates: + - name: sp-state-machine + bump: major diff --git a/substrate/client/executor/src/integration_tests/mod.rs b/substrate/client/executor/src/integration_tests/mod.rs index 7f91b3ffe764..5d94ec6dcd38 100644 --- a/substrate/client/executor/src/integration_tests/mod.rs +++ b/substrate/client/executor/src/integration_tests/mod.rs @@ -178,7 +178,7 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(output, b"all ok!".to_vec().encode()); } - let expected = TestExternalities::new(sp_core::storage::Storage { + let mut expected = TestExternalities::new(sp_core::storage::Storage { top: map![ b"input".to_vec() => value, b"foo".to_vec() => b"bar".to_vec(), @@ -186,7 +186,7 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { ], children_default: map![], }); - assert_eq!(ext, expected); + assert!(ext.eq(&mut expected)); } test_wasm_execution!(clear_prefix_should_work); @@ -208,7 +208,7 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(output, b"all ok!".to_vec().encode()); } - let expected = TestExternalities::new(sp_core::storage::Storage { + let mut expected = TestExternalities::new(sp_core::storage::Storage { top: map![ b"aaa".to_vec() => b"1".to_vec(), b"aab".to_vec() => b"2".to_vec(), @@ -216,7 +216,7 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { ], children_default: map![], }); - assert_eq!(expected, ext); + assert!(expected.eq(&mut ext)); } test_wasm_execution!(blake2_256_should_work); diff --git a/substrate/primitives/externalities/src/lib.rs b/substrate/primitives/externalities/src/lib.rs index 142200f614a6..bcc46ee4f1b2 100644 --- a/substrate/primitives/externalities/src/lib.rs +++ b/substrate/primitives/externalities/src/lib.rs @@ -83,24 +83,24 @@ pub trait Externalities: ExtensionStore { fn set_offchain_storage(&mut self, key: &[u8], value: Option<&[u8]>); /// Read runtime storage. - fn storage(&self, key: &[u8]) -> Option>; + fn storage(&mut self, key: &[u8]) -> Option>; /// Get storage value hash. /// /// This may be optimized for large values. - fn storage_hash(&self, key: &[u8]) -> Option>; + fn storage_hash(&mut self, key: &[u8]) -> Option>; /// Get child storage value hash. /// /// This may be optimized for large values. /// /// Returns an `Option` that holds the SCALE encoded hash. - fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option>; + fn child_storage_hash(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option>; /// Read child runtime storage. /// /// Returns an `Option` that holds the SCALE encoded hash. - fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option>; + fn child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option>; /// Set storage entry `key` of current contract being called (effective immediately). fn set_storage(&mut self, key: Vec, value: Vec) { @@ -124,20 +124,20 @@ pub trait Externalities: ExtensionStore { } /// Whether a storage entry exists. - fn exists_storage(&self, key: &[u8]) -> bool { + fn exists_storage(&mut self, key: &[u8]) -> bool { self.storage(key).is_some() } /// Whether a child storage entry exists. - fn exists_child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> bool { + fn exists_child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> bool { self.child_storage(child_info, key).is_some() } /// Returns the key immediately following the given key, if it exists. - fn next_storage_key(&self, key: &[u8]) -> Option>; + fn next_storage_key(&mut self, key: &[u8]) -> Option>; /// Returns the key immediately following the given key, if it exists, in child storage. - fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option>; + fn next_child_storage_key(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option>; /// Clear an entire child storage. /// diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index c8675a9a90bd..8ef1f41ce019 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -181,7 +181,7 @@ impl From for KillStorageResult { #[runtime_interface] pub trait Storage { /// Returns the data for `key` in the storage or `None` if the key can not be found. - fn get(&self, key: &[u8]) -> Option { + fn get(&mut self, key: &[u8]) -> Option { self.storage(key).map(bytes::Bytes::from) } @@ -190,7 +190,7 @@ pub trait Storage { /// doesn't exist at all. /// If `value_out` length is smaller than the returned length, only `value_out` length bytes /// are copied into `value_out`. - fn read(&self, key: &[u8], value_out: &mut [u8], value_offset: u32) -> Option { + fn read(&mut self, key: &[u8], value_out: &mut [u8], value_offset: u32) -> Option { self.storage(key).map(|value| { let value_offset = value_offset as usize; let data = &value[value_offset.min(value.len())..]; @@ -211,7 +211,7 @@ pub trait Storage { } /// Check whether the given `key` exists in storage. - fn exists(&self, key: &[u8]) -> bool { + fn exists(&mut self, key: &[u8]) -> bool { self.exists_storage(key) } @@ -387,7 +387,7 @@ pub trait DefaultChildStorage { /// /// Parameter `storage_key` is the unprefixed location of the root of the child trie in the /// parent trie. Result is `None` if the value for `key` in the child storage can not be found. - fn get(&self, storage_key: &[u8], key: &[u8]) -> Option> { + fn get(&mut self, storage_key: &[u8], key: &[u8]) -> Option> { let child_info = ChildInfo::new_default(storage_key); self.child_storage(&child_info, key).map(|s| s.to_vec()) } @@ -400,7 +400,7 @@ pub trait DefaultChildStorage { /// If `value_out` length is smaller than the returned length, only `value_out` length bytes /// are copied into `value_out`. fn read( - &self, + &mut self, storage_key: &[u8], key: &[u8], value_out: &mut [u8], @@ -478,7 +478,7 @@ pub trait DefaultChildStorage { /// Check a child storage key. /// /// Check whether the given `key` exists in default child defined at `storage_key`. - fn exists(&self, storage_key: &[u8], key: &[u8]) -> bool { + fn exists(&mut self, storage_key: &[u8], key: &[u8]) -> bool { let child_info = ChildInfo::new_default(storage_key); self.exists_child_storage(&child_info, key) } diff --git a/substrate/primitives/state-machine/Cargo.toml b/substrate/primitives/state-machine/Cargo.toml index c383a17cb006..f6402eccf0df 100644 --- a/substrate/primitives/state-machine/Cargo.toml +++ b/substrate/primitives/state-machine/Cargo.toml @@ -30,6 +30,7 @@ sp-externalities = { path = "../externalities", default-features = false } sp-panic-handler = { path = "../panic-handler", optional = true } sp-trie = { path = "../trie", default-features = false } trie-db = { version = "0.29.0", default-features = false } +arbitrary = { version = "1", features = ["derive"], optional = true } [dev-dependencies] array-bytes = "6.2.2" @@ -37,9 +38,11 @@ pretty_assertions = "1.2.1" rand = "0.8.5" sp-runtime = { path = "../runtime" } assert_matches = "1.5" +arbitrary = { version = "1", features = ["derive"] } [features] default = ["std"] +fuzzing = ["arbitrary"] std = [ "codec/std", "hash-db/std", diff --git a/substrate/primitives/state-machine/fuzz/Cargo.toml b/substrate/primitives/state-machine/fuzz/Cargo.toml new file mode 100644 index 000000000000..416c00c34fda --- /dev/null +++ b/substrate/primitives/state-machine/fuzz/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "sp-state-machine-fuzz" +version = "0.0.0" +publish = false +license = "Apache-2.0" +edition = "2021" + +[package.metadata] +cargo-fuzz = true + +[dependencies] +libfuzzer-sys = "0.4" +sp-runtime = { path = "../../runtime" } + +[dependencies.sp-state-machine] +path = ".." +features = ["fuzzing"] + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[profile.release] +debug = 1 + +[[bin]] +name = "fuzz_append" +path = "fuzz_targets/fuzz_append.rs" +test = false +doc = false diff --git a/substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs b/substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs new file mode 100644 index 000000000000..44847f535655 --- /dev/null +++ b/substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs @@ -0,0 +1,26 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_main] + +use libfuzzer_sys::fuzz_target; +use sp_state_machine::fuzzing::{fuzz_append, FuzzAppendPayload}; +use sp_runtime::traits::BlakeTwo256; + +fuzz_target!(|data: FuzzAppendPayload| { + fuzz_append::(data); +}); diff --git a/substrate/primitives/state-machine/src/basic.rs b/substrate/primitives/state-machine/src/basic.rs index 8b6f746eaba0..6201d60ababd 100644 --- a/substrate/primitives/state-machine/src/basic.rs +++ b/substrate/primitives/state-machine/src/basic.rs @@ -59,16 +59,17 @@ impl BasicExternalities { } /// Consume self and returns inner storages - pub fn into_storages(self) -> Storage { + #[cfg(feature = "std")] + pub fn into_storages(mut self) -> Storage { Storage { top: self .overlay - .changes() + .changes_mut() .filter_map(|(k, v)| v.value().map(|v| (k.to_vec(), v.to_vec()))) .collect(), children_default: self .overlay - .children() + .children_mut() .map(|(iter, i)| { ( i.storage_key().to_vec(), @@ -87,6 +88,7 @@ impl BasicExternalities { /// Execute the given closure `f` with the externalities set and initialized with `storage`. /// /// Returns the result of the closure and updates `storage` with all changes. + #[cfg(feature = "std")] pub fn execute_with_storage( storage: &mut sp_core::storage::Storage, f: impl FnOnce() -> R, @@ -118,19 +120,37 @@ impl BasicExternalities { } } +#[cfg(test)] impl PartialEq for BasicExternalities { - fn eq(&self, other: &BasicExternalities) -> bool { - self.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() == - other.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() && + fn eq(&self, other: &Self) -> bool { + self.overlay + .changes() + .map(|(k, v)| (k, v.value_ref().materialize())) + .collect::>() == + other + .overlay + .changes() + .map(|(k, v)| (k, v.value_ref().materialize())) + .collect::>() && self.overlay .children() - .map(|(iter, i)| (i, iter.map(|(k, v)| (k, v.value())).collect::>())) + .map(|(iter, i)| { + ( + i, + iter.map(|(k, v)| (k, v.value_ref().materialize())) + .collect::>(), + ) + }) .collect::>() == other .overlay .children() .map(|(iter, i)| { - (i, iter.map(|(k, v)| (k, v.value())).collect::>()) + ( + i, + iter.map(|(k, v)| (k, v.value_ref().materialize())) + .collect::>(), + ) }) .collect::>() } @@ -159,27 +179,27 @@ impl From> for BasicExternalities { impl Externalities for BasicExternalities { fn set_offchain_storage(&mut self, _key: &[u8], _value: Option<&[u8]>) {} - fn storage(&self, key: &[u8]) -> Option { + fn storage(&mut self, key: &[u8]) -> Option { self.overlay.storage(key).and_then(|v| v.map(|v| v.to_vec())) } - fn storage_hash(&self, key: &[u8]) -> Option> { + fn storage_hash(&mut self, key: &[u8]) -> Option> { self.storage(key).map(|v| Blake2Hasher::hash(&v).encode()) } - fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + fn child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option { self.overlay.child_storage(child_info, key).and_then(|v| v.map(|v| v.to_vec())) } - fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { + fn child_storage_hash(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option> { self.child_storage(child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) } - fn next_storage_key(&self, key: &[u8]) -> Option { + fn next_storage_key(&mut self, key: &[u8]) -> Option { self.overlay.iter_after(key).find_map(|(k, v)| v.value().map(|_| k.to_vec())) } - fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + fn next_child_storage_key(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option { self.overlay .child_iter_after(child_info.storage_key(), key) .find_map(|(k, v)| v.value().map(|_| k.to_vec())) @@ -243,15 +263,14 @@ impl Externalities for BasicExternalities { MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } - fn storage_append(&mut self, key: Vec, value: Vec) { - let current_value = self.overlay.value_mut_or_insert_with(&key, || Default::default()); - crate::ext::StorageAppend::new(current_value).append(value); + fn storage_append(&mut self, key: Vec, element: Vec) { + self.overlay.append_storage(key, element, Default::default); } fn storage_root(&mut self, state_version: StateVersion) -> Vec { let mut top = self .overlay - .changes() + .changes_mut() .filter_map(|(k, v)| v.value().map(|v| (k.clone(), v.clone()))) .collect::>(); // Single child trie implementation currently allows using the same child @@ -278,7 +297,7 @@ impl Externalities for BasicExternalities { child_info: &ChildInfo, state_version: StateVersion, ) -> Vec { - if let Some((data, child_info)) = self.overlay.child_changes(child_info.storage_key()) { + if let Some((data, child_info)) = self.overlay.child_changes_mut(child_info.storage_key()) { let delta = data.into_iter().map(|(k, v)| (k.as_ref(), v.value().map(|v| v.as_slice()))); crate::in_memory_backend::new_in_mem::() diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index 9aa32bc866cf..7a79c4e8a1f1 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -22,7 +22,7 @@ use crate::overlayed_changes::OverlayedExtensions; use crate::{ backend::Backend, IndexOperation, IterArgs, OverlayedChanges, StorageKey, StorageValue, }; -use codec::{Encode, EncodeAppend}; +use codec::{Compact, CompactLen, Decode, Encode}; use hash_db::Hasher; #[cfg(feature = "std")] use sp_core::hexdisplay::HexDisplay; @@ -31,8 +31,8 @@ use sp_core::storage::{ }; use sp_externalities::{Extension, ExtensionStore, Externalities, MultiRemovalResults}; -use crate::{log_error, trace, warn}; -use alloc::{boxed::Box, vec, vec::Vec}; +use crate::{trace, warn}; +use alloc::{boxed::Box, vec::Vec}; use core::{ any::{Any, TypeId}, cmp::Ordering, @@ -139,7 +139,7 @@ where H::Out: Ord + 'static, B: 'a + Backend, { - pub fn storage_pairs(&self) -> Vec<(StorageKey, StorageValue)> { + pub fn storage_pairs(&mut self) -> Vec<(StorageKey, StorageValue)> { use std::collections::HashMap; self.backend @@ -147,7 +147,7 @@ where .expect("never fails in tests; qed.") .map(|key_value| key_value.expect("never fails in tests; qed.")) .map(|(k, v)| (k, Some(v))) - .chain(self.overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned()))) + .chain(self.overlay.changes_mut().map(|(k, v)| (k.clone(), v.value().cloned()))) .collect::>() .into_iter() .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) @@ -165,7 +165,7 @@ where self.overlay.set_offchain_storage(key, value) } - fn storage(&self, key: &[u8]) -> Option { + fn storage(&mut self, key: &[u8]) -> Option { let _guard = guard(); let result = self .overlay @@ -191,7 +191,7 @@ where result } - fn storage_hash(&self, key: &[u8]) -> Option> { + fn storage_hash(&mut self, key: &[u8]) -> Option> { let _guard = guard(); let result = self .overlay @@ -209,7 +209,7 @@ where result.map(|r| r.encode()) } - fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + fn child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option { let _guard = guard(); let result = self .overlay @@ -231,7 +231,7 @@ where result } - fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { + fn child_storage_hash(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option> { let _guard = guard(); let result = self .overlay @@ -253,7 +253,7 @@ where result.map(|r| r.encode()) } - fn exists_storage(&self, key: &[u8]) -> bool { + fn exists_storage(&mut self, key: &[u8]) -> bool { let _guard = guard(); let result = match self.overlay.storage(key) { Some(x) => x.is_some(), @@ -271,7 +271,7 @@ where result } - fn exists_child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> bool { + fn exists_child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> bool { let _guard = guard(); let result = match self.overlay.child_storage(child_info, key) { @@ -293,7 +293,7 @@ where result } - fn next_storage_key(&self, key: &[u8]) -> Option { + fn next_storage_key(&mut self, key: &[u8]) -> Option { let mut next_backend_key = self.backend.next_storage_key(key).expect(EXT_NOT_ALLOWED_TO_FAIL); let mut overlay_changes = self.overlay.iter_after(key).peekable(); @@ -331,7 +331,7 @@ where } } - fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + fn next_child_storage_key(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option { let mut next_backend_key = self .backend .next_child_storage_key(child_info, key) @@ -501,10 +501,9 @@ where let _guard = guard(); let backend = &mut self.backend; - let current_value = self.overlay.value_mut_or_insert_with(&key, || { + self.overlay.append_storage(key.clone(), value, || { backend.storage(&key).expect(EXT_NOT_ALLOWED_TO_FAIL).unwrap_or_default() }); - StorageAppend::new(current_value).append(value); } fn storage_root(&mut self, state_version: StateVersion) -> Vec { @@ -731,10 +730,27 @@ impl<'a> StorageAppend<'a> { Self(storage) } + /// Extract the length of the list like data structure. + pub fn extract_length(&self) -> Option { + Compact::::decode(&mut &self.0[..]).map(|c| c.0).ok() + } + + /// Replace the length in the encoded data. + /// + /// If `old_length` is `None`, the previous length will be assumed to be `0`. + pub fn replace_length(&mut self, old_length: Option, new_length: u32) { + let old_len_encoded_len = old_length.map(|l| Compact::::compact_len(&l)).unwrap_or(0); + let new_len_encoded = Compact::(new_length).encode(); + self.0.splice(0..old_len_encoded_len, new_len_encoded); + } + /// Append the given `value` to the storage item. /// - /// If appending fails, `[value]` is stored in the storage item. - pub fn append(&mut self, value: Vec) { + /// If appending fails, `[value]` is stored in the storage item and we return false. + #[cfg(any(test, feature = "fuzzing"))] + pub fn append(&mut self, value: Vec) -> bool { + use codec::EncodeAppend; + let mut result = true; let value = vec![EncodeOpaqueValue(value)]; let item = core::mem::take(self.0); @@ -742,13 +758,20 @@ impl<'a> StorageAppend<'a> { *self.0 = match Vec::::append_or_new(item, &value) { Ok(item) => item, Err(_) => { - log_error!( + result = false; + crate::log_error!( target: "runtime", "Failed to append value, resetting storage item to `[value]`.", ); value.encode() }, }; + result + } + + /// Append to current buffer, do not touch the prefixed length. + pub fn append_raw(&mut self, mut value: Vec) { + self.0.append(&mut value) } } @@ -849,7 +872,7 @@ mod tests { ) .into(); - let ext = TestExt::new(&mut overlay, &backend, None); + let mut ext = TestExt::new(&mut overlay, &backend, None); // next_backend < next_overlay assert_eq!(ext.next_storage_key(&[5]), Some(vec![10])); @@ -865,7 +888,7 @@ mod tests { drop(ext); overlay.set_storage(vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &backend, None); + let mut ext = TestExt::new(&mut overlay, &backend, None); // next_overlay exist but next_backend doesn't exist assert_eq!(ext.next_storage_key(&[40]), Some(vec![50])); @@ -895,7 +918,7 @@ mod tests { ) .into(); - let ext = TestExt::new(&mut overlay, &backend, None); + let mut ext = TestExt::new(&mut overlay, &backend, None); assert_eq!(ext.next_storage_key(&[5]), Some(vec![30])); @@ -928,7 +951,7 @@ mod tests { ) .into(); - let ext = TestExt::new(&mut overlay, &backend, None); + let mut ext = TestExt::new(&mut overlay, &backend, None); // next_backend < next_overlay assert_eq!(ext.next_child_storage_key(child_info, &[5]), Some(vec![10])); @@ -944,7 +967,7 @@ mod tests { drop(ext); overlay.set_child_storage(child_info, vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &backend, None); + let mut ext = TestExt::new(&mut overlay, &backend, None); // next_overlay exist but next_backend doesn't exist assert_eq!(ext.next_child_storage_key(child_info, &[40]), Some(vec![50])); @@ -975,7 +998,7 @@ mod tests { ) .into(); - let ext = TestExt::new(&mut overlay, &backend, None); + let mut ext = TestExt::new(&mut overlay, &backend, None); assert_eq!(ext.child_storage(child_info, &[10]), Some(vec![10])); assert_eq!( diff --git a/substrate/primitives/state-machine/src/fuzzing.rs b/substrate/primitives/state-machine/src/fuzzing.rs new file mode 100644 index 000000000000..e147e6e88003 --- /dev/null +++ b/substrate/primitives/state-machine/src/fuzzing.rs @@ -0,0 +1,319 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! State machine fuzzing implementation, behind `fuzzing` feature. + +use super::{ext::Ext, *}; +use crate::ext::StorageAppend; +use arbitrary::Arbitrary; +#[cfg(test)] +use codec::Encode; +use hash_db::Hasher; +use sp_core::{storage::StateVersion, traits::Externalities}; +#[cfg(test)] +use sp_runtime::traits::BlakeTwo256; +use sp_trie::PrefixedMemoryDB; +use std::collections::BTreeMap; + +#[derive(Arbitrary, Debug, Clone)] +enum DataLength { + Zero = 0, + Small = 1, + Medium = 3, + Big = 300, // 2 byte scale encode length +} + +#[derive(Arbitrary, Debug, Clone)] +#[repr(u8)] +enum DataValue { + A = b'a', + B = b'b', + C = b'c', + D = b'd', // This can be read as a multiple byte compact length. + EasyBug = 20u8, // value compact len. +} + +/// Action to fuzz +#[derive(Arbitrary, Debug, Clone)] +enum FuzzAppendItem { + Append(DataValue, DataLength), + Insert(DataValue, DataLength), + StartTransaction, + RollbackTransaction, + CommitTransaction, + Read, + Remove, + // To go over 256 items easily (different compact size then). + Append50(DataValue, DataLength), +} + +/// Arbitrary payload for fuzzing append. +#[derive(Arbitrary, Debug, Clone)] +pub struct FuzzAppendPayload(Vec, Option<(DataValue, DataLength)>); + +struct SimpleOverlay { + data: Vec, Option>>>, +} + +impl Default for SimpleOverlay { + fn default() -> Self { + Self { data: vec![BTreeMap::new()] } + } +} + +impl SimpleOverlay { + fn insert(&mut self, key: Vec, value: Option>) { + self.data.last_mut().expect("always at least one item").insert(key, value); + } + + fn append( + &mut self, + key: Vec, + value: Vec, + backend: &mut TrieBackend, H>, + ) where + H: Hasher, + H::Out: codec::Decode + codec::Encode + 'static, + { + let current_value = self + .data + .last_mut() + .expect("always at least one item") + .entry(key.clone()) + .or_insert_with(|| { + Some(backend.storage(&key).expect("Ext not allowed to fail").unwrap_or_default()) + }); + if current_value.is_none() { + *current_value = Some(vec![]); + } + StorageAppend::new(current_value.as_mut().expect("init above")).append(value); + } + + fn get(&mut self, key: &[u8]) -> Option<&Vec> { + self.data + .last_mut() + .expect("always at least one item") + .get(key) + .and_then(|o| o.as_ref()) + } + + fn commit_transaction(&mut self) { + if let Some(to_commit) = self.data.pop() { + let dest = self.data.last_mut().expect("always at least one item"); + for (k, v) in to_commit.into_iter() { + dest.insert(k, v); + } + } + } + + fn rollback_transaction(&mut self) { + let _ = self.data.pop(); + } + + fn start_transaction(&mut self) { + let cloned = self.data.last().expect("always at least one item").clone(); + self.data.push(cloned); + } +} + +struct FuzzAppendState { + key: Vec, + + // reference simple implementation + reference: SimpleOverlay, + + // trie backend + backend: TrieBackend, H>, + // Standard Overlay + overlay: OverlayedChanges, + + // block dropping/commiting too many transaction + transaction_depth: usize, +} + +impl FuzzAppendState +where + H: Hasher, + H::Out: codec::Decode + codec::Encode + 'static, +{ + fn process_item(&mut self, item: FuzzAppendItem) { + let mut ext = Ext::new(&mut self.overlay, &mut self.backend, None); + match item { + FuzzAppendItem::Append(value, length) => { + let value = vec![value as u8; length as usize]; + ext.storage_append(self.key.clone(), value.clone()); + self.reference.append(self.key.clone(), value, &mut self.backend); + }, + FuzzAppendItem::Append50(value, length) => { + let value = vec![value as u8; length as usize]; + for _ in 0..50 { + let mut ext = Ext::new(&mut self.overlay, &mut self.backend, None); + ext.storage_append(self.key.clone(), value.clone()); + self.reference.append(self.key.clone(), value.clone(), &mut self.backend); + } + }, + FuzzAppendItem::Insert(value, length) => { + let value = vec![value as u8; length as usize]; + ext.set_storage(self.key.clone(), value.clone()); + self.reference.insert(self.key.clone(), Some(value)); + }, + FuzzAppendItem::Remove => { + ext.clear_storage(&self.key); + self.reference.insert(self.key.clone(), None); + }, + FuzzAppendItem::Read => { + let left = ext.storage(self.key.as_slice()); + let right = self.reference.get(self.key.as_slice()); + assert_eq!(left.as_ref(), right); + }, + FuzzAppendItem::StartTransaction => { + self.transaction_depth += 1; + self.reference.start_transaction(); + ext.storage_start_transaction(); + }, + FuzzAppendItem::RollbackTransaction => { + if self.transaction_depth == 0 { + return + } + self.transaction_depth -= 1; + self.reference.rollback_transaction(); + ext.storage_rollback_transaction().unwrap(); + }, + FuzzAppendItem::CommitTransaction => { + if self.transaction_depth == 0 { + return + } + self.transaction_depth -= 1; + self.reference.commit_transaction(); + ext.storage_commit_transaction().unwrap(); + }, + } + } + + fn check_final_state(&mut self) { + let mut ext = Ext::new(&mut self.overlay, &mut self.backend, None); + let left = ext.storage(self.key.as_slice()); + let right = self.reference.get(self.key.as_slice()); + assert_eq!(left.as_ref(), right); + } +} + +#[test] +fn fuzz_scenarii() { + assert_eq!(codec::Compact(5u16).encode()[0], DataValue::EasyBug as u8); + let scenarii = vec![ + ( + vec![ + FuzzAppendItem::Append(DataValue::A, DataLength::Small), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append50(DataValue::D, DataLength::Small), + FuzzAppendItem::Read, + FuzzAppendItem::RollbackTransaction, + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::D, DataLength::Small), + FuzzAppendItem::Read, + FuzzAppendItem::RollbackTransaction, + ], + Some((DataValue::D, DataLength::Small)), + ), + ( + vec![ + FuzzAppendItem::Append(DataValue::B, DataLength::Small), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Small), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Remove, + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Zero), + FuzzAppendItem::CommitTransaction, + FuzzAppendItem::CommitTransaction, + FuzzAppendItem::Remove, + ], + Some((DataValue::EasyBug, DataLength::Small)), + ), + ( + vec![ + FuzzAppendItem::Append(DataValue::A, DataLength::Small), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Medium), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Remove, + FuzzAppendItem::CommitTransaction, + FuzzAppendItem::RollbackTransaction, + ], + Some((DataValue::B, DataLength::Big)), + ), + ( + vec![ + FuzzAppendItem::Append(DataValue::A, DataLength::Big), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Medium), + FuzzAppendItem::Remove, + FuzzAppendItem::RollbackTransaction, + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Zero), + ], + None, + ), + ( + vec![ + FuzzAppendItem::StartTransaction, + FuzzAppendItem::RollbackTransaction, + FuzzAppendItem::RollbackTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Zero), + ], + None, + ), + (vec![FuzzAppendItem::StartTransaction], Some((DataValue::EasyBug, DataLength::Zero))), + ]; + + for (scenario, init) in scenarii.into_iter() { + fuzz_append::(FuzzAppendPayload(scenario, init)); + } +} + +/// Test append operation for a given fuzzing payload. +pub fn fuzz_append(payload: FuzzAppendPayload) +where + H: Hasher, + H::Out: codec::Decode + codec::Encode + 'static, +{ + let FuzzAppendPayload(to_fuzz, initial) = payload; + let key = b"k".to_vec(); + let mut reference = SimpleOverlay::default(); + let initial: BTreeMap<_, _> = initial + .into_iter() + .map(|(v, l)| (key.clone(), vec![v as u8; l as usize])) + .collect(); + for (k, v) in initial.iter() { + reference.data[0].insert(k.clone(), Some(v.clone())); + } + reference.start_transaction(); // level 0 is backend, keep it untouched. + let overlay = OverlayedChanges::default(); + + let mut state = FuzzAppendState:: { + key, + reference, + overlay, + backend: (initial, StateVersion::default()).into(), + transaction_depth: 0, + }; + for item in to_fuzz { + state.process_item(item); + } + state.check_final_state(); +} diff --git a/substrate/primitives/state-machine/src/in_memory_backend.rs b/substrate/primitives/state-machine/src/in_memory_backend.rs index 06fe6d4162a7..7ba7457a6bf1 100644 --- a/substrate/primitives/state-machine/src/in_memory_backend.rs +++ b/substrate/primitives/state-machine/src/in_memory_backend.rs @@ -132,6 +132,7 @@ where } } +#[cfg(feature = "std")] impl From<(Storage, StateVersion)> for TrieBackend, H> where H::Out: Codec + Ord, diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs index 13087431d387..289b08755f68 100644 --- a/substrate/primitives/state-machine/src/lib.rs +++ b/substrate/primitives/state-machine/src/lib.rs @@ -27,6 +27,8 @@ pub mod backend; mod basic; mod error; mod ext; +#[cfg(feature = "fuzzing")] +pub mod fuzzing; #[cfg(feature = "std")] mod in_memory_backend; pub(crate) mod overlayed_changes; @@ -1273,7 +1275,7 @@ mod tests { assert_eq!( overlay - .changes() + .changes_mut() .map(|(k, v)| (k.clone(), v.value().cloned())) .collect::>(), map![ @@ -1299,7 +1301,7 @@ mod tests { assert_eq!( overlay - .changes() + .changes_mut() .map(|(k, v)| (k.clone(), v.value().cloned())) .collect::>(), map![ @@ -1340,7 +1342,7 @@ mod tests { assert_eq!( overlay - .children() + .children_mut() .flat_map(|(iter, _child_info)| iter) .map(|(k, v)| (k.clone(), v.value())) .collect::>(), @@ -1440,11 +1442,78 @@ mod tests { } overlay.rollback_transaction().unwrap(); { - let ext = Ext::new(&mut overlay, backend, None); + let mut ext = Ext::new(&mut overlay, backend, None); assert_eq!(ext.storage(key.as_slice()), Some(vec![reference_data[0].clone()].encode())); } } + // Test that we can append twice to a key, then perform a remove operation. + // The test checks specifically that the append is merged with its parent transaction + // on commit. + #[test] + fn commit_merges_append_with_parent() { + #[derive(codec::Encode, codec::Decode)] + enum Item { + Item1, + Item2, + } + + let key = b"events".to_vec(); + let state = new_in_mem::(); + let backend = state.as_trie_backend(); + let mut overlay = OverlayedChanges::default(); + + // Append first item + overlay.start_transaction(); + { + let mut ext = Ext::new(&mut overlay, backend, None); + ext.clear_storage(key.as_slice()); + ext.storage_append(key.clone(), Item::Item1.encode()); + } + + // Append second item + overlay.start_transaction(); + { + let mut ext = Ext::new(&mut overlay, backend, None); + + assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::Item1].encode())); + + ext.storage_append(key.clone(), Item::Item2.encode()); + + assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::Item1, Item::Item2].encode()),); + } + + // Remove item + overlay.start_transaction(); + { + let mut ext = Ext::new(&mut overlay, backend, None); + + ext.place_storage(key.clone(), None); + + assert_eq!(ext.storage(key.as_slice()), None); + } + + // Remove gets commited and merged into previous transaction + overlay.commit_transaction().unwrap(); + { + let mut ext = Ext::new(&mut overlay, backend, None); + assert_eq!(ext.storage(key.as_slice()), None,); + } + + // Remove gets rolled back, we should see the initial append again. + overlay.rollback_transaction().unwrap(); + { + let mut ext = Ext::new(&mut overlay, backend, None); + assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::Item1].encode())); + } + + overlay.commit_transaction().unwrap(); + { + let mut ext = Ext::new(&mut overlay, backend, None); + assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::Item1].encode())); + } + } + #[test] fn remove_with_append_then_rollback_appended_then_append_again() { #[derive(codec::Encode, codec::Decode)] @@ -1499,7 +1568,7 @@ mod tests { // Then only initialization item and second (committed) item should persist. { - let ext = Ext::new(&mut overlay, backend, None); + let mut ext = Ext::new(&mut overlay, backend, None); assert_eq!( ext.storage(key.as_slice()), Some(vec![Item::InitializationItem, Item::CommittedItem].encode()), diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index 601bc2e29198..c478983e979a 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -21,11 +21,15 @@ use super::{Extrinsics, StorageKey, StorageValue}; #[cfg(not(feature = "std"))] use alloc::collections::btree_set::BTreeSet as Set; +use codec::{Compact, CompactLen}; #[cfg(feature = "std")] use std::collections::HashSet as Set; -use crate::warn; -use alloc::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; +use crate::{ext::StorageAppend, warn}; +use alloc::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + vec::Vec, +}; use core::hash::Hash; use smallvec::SmallVec; @@ -86,10 +90,97 @@ impl Default for OverlayedEntry { } /// History of value, with removal support. -pub type OverlayedValue = OverlayedEntry>; +pub type OverlayedValue = OverlayedEntry; + +/// Content in an overlay for a given transactional depth. +#[derive(Debug, Clone, Default)] +#[cfg_attr(test, derive(PartialEq))] +pub enum StorageEntry { + /// The storage entry should be set to the stored value. + Set(StorageValue), + /// The storage entry should be removed. + #[default] + Remove, + /// The storage entry was appended to. + /// + /// This assumes that the storage entry is encoded as a SCALE list. This means that it is + /// prefixed with a `Compact` that reprensents the length, followed by all the encoded + /// elements. + Append { + /// The value of the storage entry. + /// + /// This may or may not be prefixed by the length, depending on the materialized length. + data: StorageValue, + /// Current number of elements stored in data. + current_length: u32, + /// The number of elements as stored in the prefixed length in `data`. + /// + /// If `None`, than `data` is not yet prefixed with the length. + materialized_length: Option, + /// The size of `data` in the parent transactional layer. + /// + /// Only set when the parent layer is in `Append` state. + parent_size: Option, + }, +} + +impl StorageEntry { + /// Convert to an [`Option`]. + pub(super) fn to_option(mut self) -> Option { + self.materialize_in_place(); + match self { + StorageEntry::Append { data, .. } | StorageEntry::Set(data) => Some(data), + StorageEntry::Remove => None, + } + } + + /// Return as an [`Option`]. + fn as_option(&mut self) -> Option<&StorageValue> { + self.materialize_in_place(); + match self { + StorageEntry::Append { data, .. } | StorageEntry::Set(data) => Some(data), + StorageEntry::Remove => None, + } + } + + /// Materialize the internal state and cache the resulting materialized value. + fn materialize_in_place(&mut self) { + if let StorageEntry::Append { data, materialized_length, current_length, .. } = self { + let current_length = *current_length; + if materialized_length.map_or(false, |m| m == current_length) { + return + } + StorageAppend::new(data).replace_length(*materialized_length, current_length); + *materialized_length = Some(current_length); + } + } + + /// Materialize the internal state. + #[cfg(test)] + pub(crate) fn materialize(&self) -> Option> { + use alloc::borrow::Cow; + + match self { + StorageEntry::Append { data, materialized_length, current_length, .. } => { + let current_length = *current_length; + if materialized_length.map_or(false, |m| m == current_length) { + Some(Cow::Borrowed(data.as_ref())) + } else { + let mut data = data.clone(); + StorageAppend::new(&mut data) + .replace_length(*materialized_length, current_length); + + Some(data.into()) + } + }, + StorageEntry::Remove => None, + StorageEntry::Set(e) => Some(Cow::Borrowed(e.as_ref())), + } + } +} /// Change set for basic key value with extrinsics index recording and removal support. -pub type OverlayedChangeSet = OverlayedMap>; +pub type OverlayedChangeSet = OverlayedMap; /// Holds a set of changes with the ability modify them using nested transactions. #[derive(Debug, Clone)] @@ -120,7 +211,7 @@ impl Default for OverlayedMap { } #[cfg(feature = "std")] -impl From for OverlayedMap> { +impl From for OverlayedMap { fn from(storage: sp_core::storage::StorageMap) -> Self { Self { changes: storage @@ -130,7 +221,7 @@ impl From for OverlayedMap OverlayedEntry { /// /// This makes sure that the old version is not overwritten and can be properly /// rolled back when required. - fn set(&mut self, value: V, first_write_in_tx: bool, at_extrinsic: Option) { + fn set_offchain(&mut self, value: V, first_write_in_tx: bool, at_extrinsic: Option) { if first_write_in_tx || self.transactions.is_empty() { self.transactions.push(InnerValue { value, extrinsics: Default::default() }); } else { @@ -202,10 +293,223 @@ impl OverlayedEntry { } } -impl OverlayedEntry> { +/// Restore the `current_data` from an [`StorageEntry::Append`] back to the parent. +/// +/// When creating a new transaction layer from an appended entry, the `data` will be moved to +/// prevent extra allocations. So, we need to move back the `data` to the parent layer when there is +/// a roll back or the entry is set to some different value. This functions puts back the data to +/// the `parent` and truncates any extra elements that got added in the current layer. +/// +/// The current and the `parent` layer need to be [`StorageEntry::Append`] or otherwise the function +/// is a no-op. +fn restore_append_to_parent( + parent: &mut StorageEntry, + mut current_data: Vec, + current_materialized: Option, + mut target_parent_size: usize, +) { + match parent { + StorageEntry::Append { + data: parent_data, + materialized_length: parent_materialized, + .. + } => { + // Forward the materialized length to the parent with the data. Next time when + // materializing the value, the length will be corrected. This prevents doing a + // potential allocation here. + + let prev = parent_materialized.map(|l| Compact::::compact_len(&l)).unwrap_or(0); + let new = current_materialized.map(|l| Compact::::compact_len(&l)).unwrap_or(0); + let delta = new.abs_diff(prev); + if prev >= new { + target_parent_size -= delta; + } else { + target_parent_size += delta; + } + *parent_materialized = current_materialized; + + // Truncate the data to remove any extra elements + current_data.truncate(target_parent_size); + *parent_data = current_data; + }, + _ => { + // No value or a simple value, no need to restore + }, + } +} + +impl OverlayedEntry { + /// Writes a new version of a value. + /// + /// This makes sure that the old version is not overwritten and can be properly + /// rolled back when required. + fn set( + &mut self, + value: Option, + first_write_in_tx: bool, + at_extrinsic: Option, + ) { + let value = value.map_or_else(|| StorageEntry::Remove, StorageEntry::Set); + + if first_write_in_tx || self.transactions.is_empty() { + self.transactions.push(InnerValue { value, extrinsics: Default::default() }); + } else { + let mut old_value = self.value_mut(); + + let set_prev = if let StorageEntry::Append { + data, + current_length: _, + materialized_length, + parent_size, + } = &mut old_value + { + parent_size + .map(|parent_size| (core::mem::take(data), *materialized_length, parent_size)) + } else { + None + }; + + *old_value = value; + + if let Some((data, current_materialized, parent_size)) = set_prev { + let transactions = self.transactions.len(); + + debug_assert!(transactions >= 2); + let parent = self + .transactions + .get_mut(transactions - 2) + .expect("`set_prev` is only `Some(_)`, if the value came from parent; qed"); + restore_append_to_parent( + &mut parent.value, + data, + current_materialized, + parent_size, + ); + } + } + + if let Some(extrinsic) = at_extrinsic { + self.transaction_extrinsics_mut().insert(extrinsic); + } + } + + /// Append content to a value, updating a prefixed compact encoded length. + /// + /// This makes sure that the old version is not overwritten and can be properly + /// rolled back when required. + /// This avoid copying value from previous transaction. + fn append( + &mut self, + element: StorageValue, + first_write_in_tx: bool, + init: impl Fn() -> StorageValue, + at_extrinsic: Option, + ) { + if self.transactions.is_empty() { + let mut init_value = init(); + + let mut append = StorageAppend::new(&mut init_value); + + // Either the init value is a SCALE list like value to that the `element` gets appended + // or the value is reset to `[element]`. + let (data, current_length, materialized_length) = + if let Some(len) = append.extract_length() { + append.append_raw(element); + + (init_value, len + 1, Some(len)) + } else { + (element, 1, None) + }; + + self.transactions.push(InnerValue { + value: StorageEntry::Append { + data, + current_length, + materialized_length, + parent_size: None, + }, + extrinsics: Default::default(), + }); + } else if first_write_in_tx { + let parent = self.value_mut(); + let (data, current_length, materialized_length, parent_size) = match parent { + StorageEntry::Remove => (element, 1, None, None), + StorageEntry::Append { data, current_length, materialized_length, .. } => { + let parent_len = data.len(); + let mut data_buf = core::mem::take(data); + StorageAppend::new(&mut data_buf).append_raw(element); + (data_buf, *current_length + 1, *materialized_length, Some(parent_len)) + }, + StorageEntry::Set(prev) => { + // For compatibility: append if there is a encoded length, overwrite + // with value otherwhise. + if let Some(current_length) = StorageAppend::new(prev).extract_length() { + // The `prev` is cloned here, but it could be optimized to not do the clone + // here as it is done for `Append` above. + let mut data = prev.clone(); + StorageAppend::new(&mut data).append_raw(element); + (data, current_length + 1, Some(current_length), None) + } else { + // overwrite, same as empty case. + (element, 1, None, None) + } + }, + }; + + self.transactions.push(InnerValue { + value: StorageEntry::Append { + data, + current_length, + materialized_length, + parent_size, + }, + extrinsics: Default::default(), + }); + } else { + // not first transaction write + let old_value = self.value_mut(); + let replace = match old_value { + StorageEntry::Remove => Some((element, 1, None)), + StorageEntry::Set(data) => { + // Note that when the data here is not initialized with append, + // and still starts with a valid compact u32 we can have totally broken + // encoding. + let mut append = StorageAppend::new(data); + + // For compatibility: append if there is a encoded length, overwrite + // with value otherwhise. + if let Some(current_length) = append.extract_length() { + append.append_raw(element); + Some((core::mem::take(data), current_length + 1, Some(current_length))) + } else { + Some((element, 1, None)) + } + }, + StorageEntry::Append { data, current_length, .. } => { + StorageAppend::new(data).append_raw(element); + *current_length += 1; + None + }, + }; + + if let Some((data, current_length, materialized_length)) = replace { + *old_value = StorageEntry::Append { + data, + current_length, + materialized_length, + parent_size: None, + }; + } + } + + if let Some(extrinsic) = at_extrinsic { + self.transaction_extrinsics_mut().insert(extrinsic); + } + } + /// The value as seen by the current transaction. - pub fn value(&self) -> Option<&StorageValue> { - self.value_ref().as_ref() + pub fn value(&mut self) -> Option<&StorageValue> { + self.value_mut().as_option() } } @@ -238,20 +542,20 @@ impl OverlayedMap { } /// Get an optional reference to the value stored for the specified key. - pub fn get(&self, key: &Q) -> Option<&OverlayedEntry> + pub fn get(&mut self, key: &Q) -> Option<&mut OverlayedEntry> where K: core::borrow::Borrow, Q: Ord + ?Sized, { - self.changes.get(key) + self.changes.get_mut(key) } /// Set a new value for the specified key. /// /// Can be rolled back or committed when called inside a transaction. - pub fn set(&mut self, key: K, value: V, at_extrinsic: Option) { + pub fn set_offchain(&mut self, key: K, value: V, at_extrinsic: Option) { let overlayed = self.changes.entry(key.clone()).or_default(); - overlayed.set(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic); + overlayed.set_offchain(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic); } /// Get a list of all changes as seen by current transaction. @@ -259,6 +563,11 @@ impl OverlayedMap { self.changes.iter() } + /// Get a list of all changes as seen by current transaction. + pub fn changes_mut(&mut self) -> impl Iterator)> { + self.changes.iter_mut() + } + /// Get a list of all changes as seen by current transaction, consumes /// the overlay. pub fn into_changes(self) -> impl Iterator)> { @@ -298,7 +607,7 @@ impl OverlayedMap { /// /// This rollbacks all dangling transaction left open by the runtime. /// Calling this while already outside the runtime will return an error. - pub fn exit_runtime(&mut self) -> Result<(), NotInRuntime> { + pub fn exit_runtime_offchain(&mut self) -> Result<(), NotInRuntime> { if let ExecutionMode::Client = self.execution_mode { return Err(NotInRuntime) } @@ -310,7 +619,7 @@ impl OverlayedMap { ); } while self.has_open_runtime_transactions() { - self.rollback_transaction() + self.rollback_transaction_offchain() .expect("The loop condition checks that the transaction depth is > 0; qed"); } Ok(()) @@ -331,24 +640,24 @@ impl OverlayedMap { /// /// Any changes made during that transaction are discarded. Returns an error if /// there is no open transaction that can be rolled back. - pub fn rollback_transaction(&mut self) -> Result<(), NoOpenTransaction> { - self.close_transaction(true) + pub fn rollback_transaction_offchain(&mut self) -> Result<(), NoOpenTransaction> { + self.close_transaction_offchain(true) } /// Commit the last transaction started by `start_transaction`. /// /// Any changes made during that transaction are committed. Returns an error if /// there is no open transaction that can be committed. - pub fn commit_transaction(&mut self) -> Result<(), NoOpenTransaction> { - self.close_transaction(false) + pub fn commit_transaction_offchain(&mut self) -> Result<(), NoOpenTransaction> { + self.close_transaction_offchain(false) } - fn close_transaction(&mut self, rollback: bool) -> Result<(), NoOpenTransaction> { + fn close_transaction_offchain(&mut self, rollback: bool) -> Result<(), NoOpenTransaction> { // runtime is not allowed to close transactions started by the client - if let ExecutionMode::Runtime = self.execution_mode { - if !self.has_open_runtime_transactions() { - return Err(NoOpenTransaction) - } + if matches!(self.execution_mode, ExecutionMode::Runtime) && + !self.has_open_runtime_transactions() + { + return Err(NoOpenTransaction) } for key in self.dirty_keys.pop().ok_or(NoOpenTransaction)? { @@ -398,32 +707,176 @@ impl OverlayedMap { } impl OverlayedChangeSet { - /// Get a mutable reference for a value. + /// Rollback the last transaction started by `start_transaction`. + /// + /// Any changes made during that transaction are discarded. Returns an error if + /// there is no open transaction that can be rolled back. + pub fn rollback_transaction(&mut self) -> Result<(), NoOpenTransaction> { + self.close_transaction(true) + } + + /// Commit the last transaction started by `start_transaction`. + /// + /// Any changes made during that transaction are committed. Returns an error if + /// there is no open transaction that can be committed. + pub fn commit_transaction(&mut self) -> Result<(), NoOpenTransaction> { + self.close_transaction(false) + } + + fn close_transaction(&mut self, rollback: bool) -> Result<(), NoOpenTransaction> { + // runtime is not allowed to close transactions started by the client + if matches!(self.execution_mode, ExecutionMode::Runtime) && + !self.has_open_runtime_transactions() + { + return Err(NoOpenTransaction) + } + + for key in self.dirty_keys.pop().ok_or(NoOpenTransaction)? { + let overlayed = self.changes.get_mut(&key).expect( + "\ + A write to an OverlayedValue is recorded in the dirty key set. Before an + OverlayedValue is removed, its containing dirty set is removed. This + function is only called for keys that are in the dirty set. qed\ + ", + ); + + if rollback { + match overlayed.pop_transaction().value { + StorageEntry::Append { + data, + materialized_length, + parent_size: Some(parent_size), + .. + } => { + debug_assert!(!overlayed.transactions.is_empty()); + restore_append_to_parent( + overlayed.value_mut(), + data, + materialized_length, + parent_size, + ); + }, + _ => (), + } + + // We need to remove the key as an `OverlayValue` with no transactions + // violates its invariant of always having at least one transaction. + if overlayed.transactions.is_empty() { + self.changes.remove(&key); + } + } else { + let has_predecessor = if let Some(dirty_keys) = self.dirty_keys.last_mut() { + // Not the last tx: Did the previous tx write to this key? + !dirty_keys.insert(key) + } else { + // Last tx: Is there already a value in the committed set? + // Check against one rather than empty because the current tx is still + // in the list as it is popped later in this function. + overlayed.transactions.len() > 1 + }; + + // We only need to merge if there is an pre-existing value. It may be a value from + // the previous transaction or a value committed without any open transaction. + if has_predecessor { + let mut committed_tx = overlayed.pop_transaction(); + let mut merge_appends = false; + + // consecutive appends need to keep past `parent_size` value. + if let StorageEntry::Append { parent_size, .. } = &mut committed_tx.value { + if parent_size.is_some() { + let parent = overlayed.value_mut(); + if let StorageEntry::Append { parent_size: keep_me, .. } = parent { + merge_appends = true; + *parent_size = *keep_me; + } + } + } + + if merge_appends { + *overlayed.value_mut() = committed_tx.value; + } else { + let removed = core::mem::replace(overlayed.value_mut(), committed_tx.value); + // The transaction being commited is not an append operation. However, the + // value being overwritten in the previous transaction might be an append + // that needs to be merged with its parent. We only need to handle `Append` + // here because `Set` and `Remove` can directly overwrite previous + // operations. + if let StorageEntry::Append { + parent_size, data, materialized_length, .. + } = removed + { + if let Some(parent_size) = parent_size { + let transactions = overlayed.transactions.len(); + + // info from replaced head so len is at least one + // and parent_size implies a parent transaction + // so length is at least two. + debug_assert!(transactions >= 2); + if let Some(parent) = + overlayed.transactions.get_mut(transactions - 2) + { + restore_append_to_parent( + &mut parent.value, + data, + materialized_length, + parent_size, + ) + } + } + } + } + + overlayed.transaction_extrinsics_mut().extend(committed_tx.extrinsics); + } + } + } + + Ok(()) + } + + /// Call this when control returns from the runtime. + /// + /// This commits all dangling transaction left open by the runtime. + /// Calling this while already outside the runtime will return an error. + pub fn exit_runtime(&mut self) -> Result<(), NotInRuntime> { + if matches!(self.execution_mode, ExecutionMode::Client) { + return Err(NotInRuntime) + } + + self.execution_mode = ExecutionMode::Client; + if self.has_open_runtime_transactions() { + warn!( + "{} storage transactions are left open by the runtime. Those will be rolled back.", + self.transaction_depth() - self.num_client_transactions, + ); + } + while self.has_open_runtime_transactions() { + self.rollback_transaction() + .expect("The loop condition checks that the transaction depth is > 0; qed"); + } + + Ok(()) + } + + /// Set a new value for the specified key. /// /// Can be rolled back or committed when called inside a transaction. - #[must_use = "A change was registered, so this value MUST be modified."] - pub fn modify( + pub fn set(&mut self, key: StorageKey, value: Option, at_extrinsic: Option) { + let overlayed = self.changes.entry(key.clone()).or_default(); + overlayed.set(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic); + } + + /// Append bytes to an existing content. + pub fn append_storage( &mut self, key: StorageKey, + value: StorageValue, init: impl Fn() -> StorageValue, at_extrinsic: Option, - ) -> &mut Option { + ) { let overlayed = self.changes.entry(key.clone()).or_default(); let first_write_in_tx = insert_dirty(&mut self.dirty_keys, key); - let clone_into_new_tx = if let Some(tx) = overlayed.transactions.last() { - if first_write_in_tx { - Some(tx.value.clone()) - } else { - None - } - } else { - Some(Some(init())) - }; - - if let Some(cloned) = clone_into_new_tx { - overlayed.set(cloned, first_write_in_tx, at_extrinsic); - } - overlayed.value_mut() + overlayed.append(value, first_write_in_tx, init, at_extrinsic); } /// Set all values to deleted which are matched by the predicate. @@ -436,7 +889,7 @@ impl OverlayedChangeSet { ) -> u32 { let mut count = 0; for (key, val) in self.changes.iter_mut().filter(|(k, v)| predicate(k, v)) { - if val.value_ref().is_some() { + if matches!(val.value_ref(), StorageEntry::Set(..) | StorageEntry::Append { .. }) { count += 1; } val.set(None, insert_dirty(&mut self.dirty_keys, key.clone()), at_extrinsic); @@ -445,10 +898,13 @@ impl OverlayedChangeSet { } /// Get the iterator over all changes that follow the supplied `key`. - pub fn changes_after(&self, key: &[u8]) -> impl Iterator { + pub fn changes_after( + &mut self, + key: &[u8], + ) -> impl Iterator { use core::ops::Bound; let range = (Bound::Excluded(key), Bound::Unbounded); - self.changes.range::<[u8], _>(range).map(|(k, v)| (k.as_slice(), v)) + self.changes.range_mut::<[u8], _>(range).map(|(k, v)| (k.as_slice(), v)) } } @@ -460,18 +916,19 @@ mod test { type Changes<'a> = Vec<(&'a [u8], (Option<&'a [u8]>, Vec))>; type Drained<'a> = Vec<(&'a [u8], Option<&'a [u8]>)>; - fn assert_changes(is: &OverlayedChangeSet, expected: &Changes) { + fn assert_changes(is: &mut OverlayedChangeSet, expected: &Changes) { let is: Changes = is - .changes() + .changes_mut() .map(|(k, v)| { - (k.as_ref(), (v.value().map(AsRef::as_ref), v.extrinsics().into_iter().collect())) + let extrinsics = v.extrinsics().into_iter().collect(); + (k.as_ref(), (v.value().map(AsRef::as_ref), extrinsics)) }) .collect(); assert_eq!(&is, expected); } fn assert_drained_changes(is: OverlayedChangeSet, expected: Changes) { - let is = is.drain_committed().collect::>(); + let is = is.drain_committed().map(|(k, v)| (k, v.to_option())).collect::>(); let expected = expected .iter() .map(|(k, v)| (k.to_vec(), v.0.map(From::from))) @@ -480,7 +937,7 @@ mod test { } fn assert_drained(is: OverlayedChangeSet, expected: Drained) { - let is = is.drain_committed().collect::>(); + let is = is.drain_committed().map(|(k, v)| (k, v.to_option())).collect::>(); let expected = expected .iter() .map(|(k, v)| (k.to_vec(), v.map(From::from))) @@ -535,7 +992,7 @@ mod test { (b"key7", (Some(b"val7-rolled"), vec![77])), (b"key99", (Some(b"val99"), vec![99])), ]; - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); // this should be no-op changeset.start_transaction(); @@ -546,7 +1003,7 @@ mod test { assert_eq!(changeset.transaction_depth(), 3); changeset.commit_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 2); - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); // roll back our first transactions that actually contains something changeset.rollback_transaction().unwrap(); @@ -558,11 +1015,11 @@ mod test { (b"key42", (Some(b"val42"), vec![42])), (b"key99", (Some(b"val99"), vec![99])), ]; - assert_changes(&changeset, &rolled_back); + assert_changes(&mut changeset, &rolled_back); changeset.commit_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 0); - assert_changes(&changeset, &rolled_back); + assert_changes(&mut changeset, &rolled_back); assert_drained_changes(changeset, rolled_back); } @@ -598,7 +1055,7 @@ mod test { (b"key7", (Some(b"val7-rolled"), vec![77])), (b"key99", (Some(b"val99"), vec![99])), ]; - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); // this should be no-op changeset.start_transaction(); @@ -609,35 +1066,46 @@ mod test { assert_eq!(changeset.transaction_depth(), 3); changeset.commit_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 2); - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); changeset.commit_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 1); - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); changeset.rollback_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 0); let rolled_back: Changes = vec![(b"key0", (Some(b"val0-1"), vec![1, 10])), (b"key1", (Some(b"val1"), vec![1]))]; - assert_changes(&changeset, &rolled_back); + assert_changes(&mut changeset, &rolled_back); assert_drained_changes(changeset, rolled_back); } #[test] - fn modify_works() { + fn append_works() { + use codec::Encode; let mut changeset = OverlayedChangeSet::default(); assert_eq!(changeset.transaction_depth(), 0); - let init = || b"valinit".to_vec(); + let init = || vec![b"valinit".to_vec()].encode(); // committed set - changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(0)); + let val0 = vec![b"val0".to_vec()].encode(); + changeset.set(b"key0".to_vec(), Some(val0.clone()), Some(0)); changeset.set(b"key1".to_vec(), None, Some(1)); - let val = changeset.modify(b"key3".to_vec(), init, Some(3)); - assert_eq!(val, &Some(b"valinit".to_vec())); - val.as_mut().unwrap().extend_from_slice(b"-modified"); + let all_changes: Changes = + vec![(b"key0", (Some(val0.as_slice()), vec![0])), (b"key1", (None, vec![1]))]; + + assert_changes(&mut changeset, &all_changes); + changeset.append_storage(b"key3".to_vec(), b"-modified".to_vec().encode(), init, Some(3)); + let val3 = vec![b"valinit".to_vec(), b"-modified".to_vec()].encode(); + let all_changes: Changes = vec![ + (b"key0", (Some(val0.as_slice()), vec![0])), + (b"key1", (None, vec![1])), + (b"key3", (Some(val3.as_slice()), vec![3])), + ]; + assert_changes(&mut changeset, &all_changes); changeset.start_transaction(); assert_eq!(changeset.transaction_depth(), 1); @@ -645,39 +1113,75 @@ mod test { assert_eq!(changeset.transaction_depth(), 2); // non existing value -> init value should be returned - let val = changeset.modify(b"key2".to_vec(), init, Some(2)); - assert_eq!(val, &Some(b"valinit".to_vec())); - val.as_mut().unwrap().extend_from_slice(b"-modified"); + changeset.append_storage(b"key3".to_vec(), b"-twice".to_vec().encode(), init, Some(15)); - // existing value should be returned by modify - let val = changeset.modify(b"key0".to_vec(), init, Some(10)); - assert_eq!(val, &Some(b"val0".to_vec())); - val.as_mut().unwrap().extend_from_slice(b"-modified"); + // non existing value -> init value should be returned + changeset.append_storage(b"key2".to_vec(), b"-modified".to_vec().encode(), init, Some(2)); + // existing value should be reuse on append + changeset.append_storage(b"key0".to_vec(), b"-modified".to_vec().encode(), init, Some(10)); // should work for deleted keys - let val = changeset.modify(b"key1".to_vec(), init, Some(20)); - assert_eq!(val, &None); - *val = Some(b"deleted-modified".to_vec()); + changeset.append_storage( + b"key1".to_vec(), + b"deleted-modified".to_vec().encode(), + init, + Some(20), + ); + let val0_2 = vec![b"val0".to_vec(), b"-modified".to_vec()].encode(); + let val3_2 = vec![b"valinit".to_vec(), b"-modified".to_vec(), b"-twice".to_vec()].encode(); + let val1 = vec![b"deleted-modified".to_vec()].encode(); + let all_changes: Changes = vec![ + (b"key0", (Some(val0_2.as_slice()), vec![0, 10])), + (b"key1", (Some(val1.as_slice()), vec![1, 20])), + (b"key2", (Some(val3.as_slice()), vec![2])), + (b"key3", (Some(val3_2.as_slice()), vec![3, 15])), + ]; + assert_changes(&mut changeset, &all_changes); + + changeset.start_transaction(); + let val3_3 = + vec![b"valinit".to_vec(), b"-modified".to_vec(), b"-twice".to_vec(), b"-2".to_vec()] + .encode(); + changeset.append_storage(b"key3".to_vec(), b"-2".to_vec().encode(), init, Some(21)); + let all_changes2: Changes = vec![ + (b"key0", (Some(val0_2.as_slice()), vec![0, 10])), + (b"key1", (Some(val1.as_slice()), vec![1, 20])), + (b"key2", (Some(val3.as_slice()), vec![2])), + (b"key3", (Some(val3_3.as_slice()), vec![3, 15, 21])), + ]; + assert_changes(&mut changeset, &all_changes2); + changeset.rollback_transaction().unwrap(); + assert_changes(&mut changeset, &all_changes); + changeset.start_transaction(); + let val3_4 = vec![ + b"valinit".to_vec(), + b"-modified".to_vec(), + b"-twice".to_vec(), + b"-thrice".to_vec(), + ] + .encode(); + changeset.append_storage(b"key3".to_vec(), b"-thrice".to_vec().encode(), init, Some(25)); let all_changes: Changes = vec![ - (b"key0", (Some(b"val0-modified"), vec![0, 10])), - (b"key1", (Some(b"deleted-modified"), vec![1, 20])), - (b"key2", (Some(b"valinit-modified"), vec![2])), - (b"key3", (Some(b"valinit-modified"), vec![3])), + (b"key0", (Some(val0_2.as_slice()), vec![0, 10])), + (b"key1", (Some(val1.as_slice()), vec![1, 20])), + (b"key2", (Some(val3.as_slice()), vec![2])), + (b"key3", (Some(val3_4.as_slice()), vec![3, 15, 25])), ]; - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); + changeset.commit_transaction().unwrap(); changeset.commit_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 1); - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); changeset.rollback_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 0); let rolled_back: Changes = vec![ - (b"key0", (Some(b"val0"), vec![0])), + (b"key0", (Some(val0.as_slice()), vec![0])), (b"key1", (None, vec![1])), - (b"key3", (Some(b"valinit-modified"), vec![3])), + (b"key3", (Some(val3.as_slice()), vec![3])), ]; - assert_changes(&changeset, &rolled_back); + assert_changes(&mut changeset, &rolled_back); assert_drained_changes(changeset, rolled_back); } @@ -695,7 +1199,7 @@ mod test { changeset.clear_where(|k, _| k.starts_with(b"del"), Some(5)); assert_changes( - &changeset, + &mut changeset, &vec![ (b"del1", (None, vec![3, 5])), (b"del2", (None, vec![4, 5])), @@ -707,7 +1211,7 @@ mod test { changeset.rollback_transaction().unwrap(); assert_changes( - &changeset, + &mut changeset, &vec![ (b"del1", (Some(b"delval1"), vec![3])), (b"del2", (Some(b"delval2"), vec![4])), @@ -850,4 +1354,72 @@ mod test { assert_eq!(changeset.exit_runtime(), Ok(())); assert_eq!(changeset.exit_runtime(), Err(NotInRuntime)); } + + #[test] + fn restore_append_to_parent() { + use codec::{Compact, Encode}; + let mut changeset = OverlayedChangeSet::default(); + let key: Vec = b"akey".into(); + + let from = 50; // 1 byte len + let to = 100; // 2 byte len + for i in 0..from { + changeset.append_storage(key.clone(), vec![i], Default::default, None); + } + + // materialized + let encoded = changeset.get(&key).unwrap().value().unwrap(); + let encoded_from_len = Compact(from as u32).encode(); + assert_eq!(encoded_from_len.len(), 1); + assert!(encoded.starts_with(&encoded_from_len[..])); + let encoded_from = encoded.clone(); + + changeset.start_transaction(); + + for i in from..to { + changeset.append_storage(key.clone(), vec![i], Default::default, None); + } + + // materialized + let encoded = changeset.get(&key).unwrap().value().unwrap(); + let encoded_to_len = Compact(to as u32).encode(); + assert_eq!(encoded_to_len.len(), 2); + assert!(encoded.starts_with(&encoded_to_len[..])); + + changeset.rollback_transaction().unwrap(); + + let encoded = changeset.get(&key).unwrap().value().unwrap(); + assert_eq!(&encoded_from, encoded); + } + + /// First we have some `Set` operation with a valid SCALE list. Then we append data and rollback + /// afterwards. + #[test] + fn restore_initial_set_after_append_to_parent() { + use codec::{Compact, Encode}; + let mut changeset = OverlayedChangeSet::default(); + let key: Vec = b"akey".into(); + + let initial_data = vec![1u8; 50].encode(); + + changeset.set(key.clone(), Some(initial_data.clone()), None); + + changeset.start_transaction(); + + // Append until we require 2 bytes for the length prefix. + for i in 0..50 { + changeset.append_storage(key.clone(), vec![i], Default::default, None); + } + + // Materialize the value. + let encoded = changeset.get(&key).unwrap().value().unwrap(); + let encoded_to_len = Compact(100u32).encode(); + assert_eq!(encoded_to_len.len(), 2); + assert!(encoded.starts_with(&encoded_to_len[..])); + + changeset.rollback_transaction().unwrap(); + + let encoded = changeset.get(&key).unwrap().value().unwrap(); + assert_eq!(&initial_data, encoded); + } } diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs index d6fc404e84fb..c2dc637bc71a 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs @@ -289,7 +289,7 @@ impl OverlayedChanges { /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose /// value has been set. - pub fn storage(&self, key: &[u8]) -> Option> { + pub fn storage(&mut self, key: &[u8]) -> Option> { self.top.get(key).map(|x| { let value = x.value(); let size_read = value.map(|x| x.len() as u64).unwrap_or(0); @@ -304,30 +304,11 @@ impl OverlayedChanges { self.storage_transaction_cache = None; } - /// Returns mutable reference to current value. - /// If there is no value in the overlay, the given callback is used to initiate the value. - /// Warning this function registers a change, so the mutable reference MUST be modified. - /// - /// Can be rolled back or committed when called inside a transaction. - #[must_use = "A change was registered, so this value MUST be modified."] - pub fn value_mut_or_insert_with( - &mut self, - key: &[u8], - init: impl Fn() -> StorageValue, - ) -> &mut StorageValue { - self.mark_dirty(); - - let value = self.top.modify(key.to_vec(), init, self.extrinsic_index()); - - // if the value was deleted initialise it back with an empty vec - value.get_or_insert_with(StorageValue::default) - } - /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose /// value has been set. - pub fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { - let map = self.children.get(child_info.storage_key())?; + pub fn child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option> { + let map = self.children.get_mut(child_info.storage_key())?; let value = map.0.get(key)?.value(); let size_read = value.map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_read_modified(size_read); @@ -342,7 +323,21 @@ impl OverlayedChanges { let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_write_overlay(size_write); - self.top.set(key, val, self.extrinsic_index()); + let extrinsic_index = self.extrinsic_index(); + self.top.set(key, val, extrinsic_index); + } + + /// Append a element to storage, init with existing value if first write. + pub fn append_storage( + &mut self, + key: StorageKey, + element: StorageValue, + init: impl Fn() -> StorageValue, + ) { + let extrinsic_index = self.extrinsic_index(); + let size_write = element.len() as u64; + self.stats.tally_write_overlay(size_write); + self.top.append_storage(key, element, init, extrinsic_index); } /// Set a new value for the specified key and child. @@ -396,7 +391,8 @@ impl OverlayedChanges { pub fn clear_prefix(&mut self, prefix: &[u8]) -> u32 { self.mark_dirty(); - self.top.clear_where(|key, _| key.starts_with(prefix), self.extrinsic_index()) + let extrinsic_index = self.extrinsic_index(); + self.top.clear_where(|key, _| key.starts_with(prefix), extrinsic_index) } /// Removes all key-value pairs which keys share the given prefix. @@ -457,7 +453,7 @@ impl OverlayedChanges { }); self.offchain .overlay_mut() - .rollback_transaction() + .rollback_transaction_offchain() .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -475,7 +471,7 @@ impl OverlayedChanges { } self.offchain .overlay_mut() - .commit_transaction() + .commit_transaction_offchain() .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -511,7 +507,7 @@ impl OverlayedChanges { } self.offchain .overlay_mut() - .exit_runtime() + .exit_runtime_offchain() .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -535,11 +531,24 @@ impl OverlayedChanges { self.children.values().map(|v| (v.0.changes(), &v.1)) } + /// Get an iterator over all child changes as seen by the current transaction. + pub fn children_mut( + &mut self, + ) -> impl Iterator, &ChildInfo)> + { + self.children.values_mut().map(|v| (v.0.changes_mut(), &v.1)) + } + /// Get an iterator over all top changes as been by the current transaction. pub fn changes(&self) -> impl Iterator { self.top.changes() } + /// Get an iterator over all top changes as been by the current transaction. + pub fn changes_mut(&mut self) -> impl Iterator { + self.top.changes_mut() + } + /// Get an optional iterator over all child changes stored under the supplied key. pub fn child_changes( &self, @@ -548,6 +557,16 @@ impl OverlayedChanges { self.children.get(key).map(|(overlay, info)| (overlay.changes(), info)) } + /// Get an optional iterator over all child changes stored under the supplied key. + pub fn child_changes_mut( + &mut self, + key: &[u8], + ) -> Option<(impl Iterator, &ChildInfo)> { + self.children + .get_mut(key) + .map(|(overlay, info)| (overlay.changes_mut(), &*info)) + } + /// Get an list of all index operations. pub fn transaction_index_ops(&self) -> &[IndexOperation] { &self.transaction_index_ops @@ -575,11 +594,12 @@ impl OverlayedChanges { }; use core::mem::take; - let main_storage_changes = take(&mut self.top).drain_committed(); - let child_storage_changes = take(&mut self.children) - .into_iter() - .map(|(key, (val, info))| (key, (val.drain_committed(), info))); - + let main_storage_changes = + take(&mut self.top).drain_committed().map(|(k, v)| (k, v.to_option())); + let child_storage_changes = + take(&mut self.children).into_iter().map(|(key, (val, info))| { + (key, (val.drain_committed().map(|(k, v)| (k, v.to_option())), info)) + }); let offchain_storage_changes = self.offchain_drain_committed().collect(); #[cfg(feature = "std")] @@ -610,7 +630,7 @@ impl OverlayedChanges { /// set this index before first and unset after last extrinsic is executed. /// Changes that are made outside of extrinsics, are marked with /// `NO_EXTRINSIC_INDEX` index. - fn extrinsic_index(&self) -> Option { + fn extrinsic_index(&mut self) -> Option { self.collect_extrinsics.then(|| { self.storage(EXTRINSIC_INDEX) .and_then(|idx| idx.and_then(|idx| Decode::decode(&mut &*idx).ok())) @@ -634,10 +654,12 @@ impl OverlayedChanges { return (cache.transaction_storage_root, true) } - let delta = self.changes().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))); - let child_delta = self.children().map(|(changes, info)| { - (info, changes.map(|(k, v)| (&k[..], v.value().map(|v| &v[..])))) - }); + let delta = self.top.changes_mut().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))); + + let child_delta = self + .children + .values_mut() + .map(|v| (&v.1, v.0.changes_mut().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))))); let (root, transaction) = backend.full_storage_root(delta, child_delta, state_version); @@ -677,7 +699,7 @@ impl OverlayedChanges { return Ok((root, true)) } - let root = if let Some((changes, info)) = self.child_changes(storage_key) { + let root = if let Some((changes, info)) = self.child_changes_mut(storage_key) { let delta = changes.map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref))); Some(backend.child_storage_root(info, delta, state_version)) } else { @@ -711,19 +733,19 @@ impl OverlayedChanges { /// Returns an iterator over the keys (in lexicographic order) following `key` (excluding `key`) /// alongside its value. - pub fn iter_after(&self, key: &[u8]) -> impl Iterator { + pub fn iter_after(&mut self, key: &[u8]) -> impl Iterator { self.top.changes_after(key) } /// Returns an iterator over the keys (in lexicographic order) following `key` (excluding `key`) /// alongside its value for the given `storage_key` child. pub fn child_iter_after( - &self, + &mut self, storage_key: &[u8], key: &[u8], - ) -> impl Iterator { + ) -> impl Iterator { self.children - .get(storage_key) + .get_mut(storage_key) .map(|(overlay, _)| overlay.changes_after(key)) .into_iter() .flatten() @@ -858,7 +880,11 @@ mod tests { use sp_core::{traits::Externalities, Blake2Hasher}; use std::collections::BTreeMap; - fn assert_extrinsics(overlay: &OverlayedChangeSet, key: impl AsRef<[u8]>, expected: Vec) { + fn assert_extrinsics( + overlay: &mut OverlayedChangeSet, + key: impl AsRef<[u8]>, + expected: Vec, + ) { assert_eq!( overlay.get(key.as_ref()).unwrap().extrinsics().into_iter().collect::>(), expected @@ -1049,9 +1075,9 @@ mod tests { overlay.set_extrinsic_index(2); overlay.set_storage(vec![1], Some(vec![6])); - assert_extrinsics(&overlay.top, vec![1], vec![0, 2]); - assert_extrinsics(&overlay.top, vec![3], vec![1]); - assert_extrinsics(&overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); + assert_extrinsics(&mut overlay.top, vec![1], vec![0, 2]); + assert_extrinsics(&mut overlay.top, vec![3], vec![1]); + assert_extrinsics(&mut overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); overlay.start_transaction(); @@ -1061,15 +1087,15 @@ mod tests { overlay.set_extrinsic_index(4); overlay.set_storage(vec![1], Some(vec![8])); - assert_extrinsics(&overlay.top, vec![1], vec![0, 2, 4]); - assert_extrinsics(&overlay.top, vec![3], vec![1, 3]); - assert_extrinsics(&overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); + assert_extrinsics(&mut overlay.top, vec![1], vec![0, 2, 4]); + assert_extrinsics(&mut overlay.top, vec![3], vec![1, 3]); + assert_extrinsics(&mut overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); overlay.rollback_transaction().unwrap(); - assert_extrinsics(&overlay.top, vec![1], vec![0, 2]); - assert_extrinsics(&overlay.top, vec![3], vec![1]); - assert_extrinsics(&overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); + assert_extrinsics(&mut overlay.top, vec![1], vec![0, 2]); + assert_extrinsics(&mut overlay.top, vec![3], vec![1]); + assert_extrinsics(&mut overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); } #[test] diff --git a/substrate/primitives/state-machine/src/overlayed_changes/offchain.rs b/substrate/primitives/state-machine/src/overlayed_changes/offchain.rs index 1e6965e87475..517a51b02693 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/offchain.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/offchain.rs @@ -42,7 +42,7 @@ impl OffchainOverlayedChanges { } /// Iterate over all key value pairs by reference. - pub fn iter(&self) -> impl Iterator { + pub fn iter(&mut self) -> impl Iterator { self.0.changes().map(|kv| (kv.0, kv.1.value_ref())) } @@ -53,14 +53,16 @@ impl OffchainOverlayedChanges { /// Remove a key and its associated value from the offchain database. pub fn remove(&mut self, prefix: &[u8], key: &[u8]) { - let _ = self - .0 - .set((prefix.to_vec(), key.to_vec()), OffchainOverlayedChange::Remove, None); + let _ = self.0.set_offchain( + (prefix.to_vec(), key.to_vec()), + OffchainOverlayedChange::Remove, + None, + ); } /// Set the value associated with a key under a prefix to the value provided. pub fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]) { - let _ = self.0.set( + let _ = self.0.set_offchain( (prefix.to_vec(), key.to_vec()), OffchainOverlayedChange::SetValue(value.to_vec()), None, @@ -68,7 +70,7 @@ impl OffchainOverlayedChanges { } /// Obtain a associated value to the given key in storage with prefix. - pub fn get(&self, prefix: &[u8], key: &[u8]) -> Option { + pub fn get(&mut self, prefix: &[u8], key: &[u8]) -> Option { let key = (prefix.to_vec(), key.to_vec()); self.0.get(&key).map(|entry| entry.value_ref()).cloned() } diff --git a/substrate/primitives/state-machine/src/read_only.rs b/substrate/primitives/state-machine/src/read_only.rs index 2056bf986635..b78d17138b0f 100644 --- a/substrate/primitives/state-machine/src/read_only.rs +++ b/substrate/primitives/state-machine/src/read_only.rs @@ -88,39 +88,39 @@ where panic!("Should not be used in read-only externalities!") } - fn storage(&self, key: &[u8]) -> Option { + fn storage(&mut self, key: &[u8]) -> Option { self.backend .storage(key) .expect("Backed failed for storage in ReadOnlyExternalities") } - fn storage_hash(&self, key: &[u8]) -> Option> { + fn storage_hash(&mut self, key: &[u8]) -> Option> { self.backend .storage_hash(key) .expect("Backed failed for storage_hash in ReadOnlyExternalities") .map(|h| h.encode()) } - fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + fn child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option { self.backend .child_storage(child_info, key) .expect("Backed failed for child_storage in ReadOnlyExternalities") } - fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { + fn child_storage_hash(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option> { self.backend .child_storage_hash(child_info, key) .expect("Backed failed for child_storage_hash in ReadOnlyExternalities") .map(|h| h.encode()) } - fn next_storage_key(&self, key: &[u8]) -> Option { + fn next_storage_key(&mut self, key: &[u8]) -> Option { self.backend .next_storage_key(key) .expect("Backed failed for next_storage_key in ReadOnlyExternalities") } - fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + fn next_child_storage_key(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option { self.backend .next_child_storage_key(child_info, key) .expect("Backed failed for next_child_storage_key in ReadOnlyExternalities") diff --git a/substrate/primitives/state-machine/src/testing.rs b/substrate/primitives/state-machine/src/testing.rs index e19ba95755c1..e9d64a891e81 100644 --- a/substrate/primitives/state-machine/src/testing.rs +++ b/substrate/primitives/state-machine/src/testing.rs @@ -209,12 +209,15 @@ where /// /// In contrast to [`commit_all`](Self::commit_all) this will not panic if there are open /// transactions. - pub fn as_backend(&self) -> InMemoryBackend { - let top: Vec<_> = - self.overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned())).collect(); + pub fn as_backend(&mut self) -> InMemoryBackend { + let top: Vec<_> = self + .overlay + .changes_mut() + .map(|(k, v)| (k.clone(), v.value().cloned())) + .collect(); let mut transaction = vec![(None, top)]; - for (child_changes, child_info) in self.overlay.children() { + for (child_changes, child_info) in self.overlay.children_mut() { transaction.push(( Some(child_info.clone()), child_changes.map(|(k, v)| (k.clone(), v.value().cloned())).collect(), @@ -293,13 +296,14 @@ where } } -impl PartialEq for TestExternalities +impl TestExternalities where + H: Hasher, H::Out: Ord + 'static + codec::Codec, { /// This doesn't test if they are in the same state, only if they contains the /// same data at this state - fn eq(&self, other: &TestExternalities) -> bool { + pub fn eq(&mut self, other: &mut TestExternalities) -> bool { self.as_backend().eq(&other.as_backend()) } } diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index 0ecb98f31343..44e5f467d895 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -1383,7 +1383,7 @@ mod remote_tests { init_logger(); // create an ext with children keys - let child_ext = Builder::::new() + let mut child_ext = Builder::::new() .mode(Mode::Online(OnlineConfig { transport: endpoint().clone().into(), pallets: vec!["Proxy".to_owned()], @@ -1396,7 +1396,7 @@ mod remote_tests { .unwrap(); // create an ext without children keys - let ext = Builder::::new() + let mut ext = Builder::::new() .mode(Mode::Online(OnlineConfig { transport: endpoint().clone().into(), pallets: vec!["Proxy".to_owned()], From c4aa2ab642419e6751400a6aabaf5df611a4ea37 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Wed, 12 Jun 2024 16:38:57 +0200 Subject: [PATCH 23/52] Hide `tuplex` dependency and re-export by macro (#4774) Addressing comment: https://github.com/paritytech/polkadot-sdk/pull/4102/files#r1635502496 --------- Co-authored-by: Oliver Tale-Yazdi --- Cargo.lock | 2 -- .../src/extensions/check_obsolete_extension.rs | 10 ++++++++-- .../runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml | 2 -- .../runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml | 2 -- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fba768c653c6..13d658b51351 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2101,7 +2101,6 @@ dependencies = [ "static_assertions", "substrate-wasm-builder", "testnet-parachains-constants", - "tuplex", "xcm-fee-payment-runtime-api", ] @@ -2261,7 +2260,6 @@ dependencies = [ "static_assertions", "substrate-wasm-builder", "testnet-parachains-constants", - "tuplex", "westend-runtime-constants", "xcm-fee-payment-runtime-api", ] diff --git a/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs b/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs index 2c152aef6822..df75092af6e8 100644 --- a/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs +++ b/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs @@ -36,6 +36,12 @@ use sp_runtime::{ transaction_validity::{TransactionPriority, TransactionValidity, ValidTransactionBuilder}, }; +// Re-export to avoid include tuplex dependency everywhere. +#[doc(hidden)] +pub mod __private { + pub use tuplex; +} + /// A duplication of the `FilterCall` trait. /// /// We need this trait in order to be able to implement it for the messages pallet, @@ -313,7 +319,7 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { info: &sp_runtime::traits::DispatchInfoOf, len: usize, ) -> Result { - use tuplex::PushBack; + use $crate::extensions::check_obsolete_extension::__private::tuplex::PushBack; let to_post_dispatch = (); $( let (from_validate, call_filter_validity) = < @@ -336,7 +342,7 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { len: usize, result: &sp_runtime::DispatchResult, ) -> Result<(), sp_runtime::transaction_validity::TransactionValidityError> { - use tuplex::PopFront; + use $crate::extensions::check_obsolete_extension::__private::tuplex::PopFront; let Some((relayer, to_post_dispatch)) = to_post_dispatch else { return Ok(()) }; let has_failed = result.is_err(); $( diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 253a21f5d0ba..5e8639eed36b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -22,7 +22,6 @@ scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } -tuplex = { version = "0.1", default-features = false } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -218,7 +217,6 @@ std = [ "sp-version/std", "substrate-wasm-builder", "testnet-parachains-constants/std", - "tuplex/std", "xcm-builder/std", "xcm-executor/std", "xcm-fee-payment-runtime-api/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 0f16d629fc26..ba8e4cdc8147 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -18,7 +18,6 @@ hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } -tuplex = { version = "0.1", default-features = false } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -182,7 +181,6 @@ std = [ "sp-version/std", "substrate-wasm-builder", "testnet-parachains-constants/std", - "tuplex/std", "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", From eca1052ea1eddeede91da8f9f7452ea8b57e7942 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 13 Jun 2024 10:36:22 +0800 Subject: [PATCH 24/52] Update the pallet guide in `sdk-docs` (#4735) After using this tutorial in PBA, there was a few areas to improve it. Moreover, I have: - Improve `your_first_pallet`, link it in README, improve the parent `guide` section. - Updated the templates page, in light of recent efforts related to in https://github.com/paritytech/polkadot-sdk/issues/3155 - Added small ref docs about metadata, completed the one about native runtime, added one about host functions. - Remove a lot of unfinished stuff from sdk-docs - update diagram for `Hooks` --- Cargo.lock | 5 + README.md | 11 +- docs/mermaid/IA.mmd | 4 +- docs/sdk/Cargo.toml | 16 ++- docs/sdk/src/guides/mod.rs | 25 ++-- docs/sdk/src/guides/your_first_pallet/mod.rs | 109 +++++++++++------- docs/sdk/src/guides/your_first_runtime.rs | 2 + docs/sdk/src/polkadot_sdk/templates.rs | 70 ++++++----- .../reference_docs/blockchain_scalibility.rs | 0 .../src/reference_docs/consensus_swapping.rs | 6 - .../reference_docs/custom_host_functions.rs | 27 +++++ .../src/reference_docs/fee_less_runtime.rs | 1 + .../reference_docs/frame_offchain_workers.rs | 1 - .../reference_docs/frame_system_accounts.rs | 2 + docs/sdk/src/reference_docs/light_nodes.rs | 7 -- docs/sdk/src/reference_docs/metadata.rs | 24 ++++ docs/sdk/src/reference_docs/mod.rs | 19 +-- docs/sdk/src/reference_docs/wasm_memory.rs | 7 -- .../src/reference_docs/wasm_meta_protocol.rs | 79 ++++++++++--- substrate/frame/support/src/traits/hooks.rs | 33 +++--- substrate/primitives/io/Cargo.toml | 11 +- substrate/primitives/io/src/lib.rs | 1 + templates/minimal/node/src/service.rs | 2 + 23 files changed, 302 insertions(+), 160 deletions(-) delete mode 100644 docs/sdk/src/reference_docs/blockchain_scalibility.rs delete mode 100644 docs/sdk/src/reference_docs/consensus_swapping.rs create mode 100644 docs/sdk/src/reference_docs/custom_host_functions.rs delete mode 100644 docs/sdk/src/reference_docs/light_nodes.rs delete mode 100644 docs/sdk/src/reference_docs/wasm_memory.rs diff --git a/Cargo.lock b/Cargo.lock index 13d658b51351..a8b08d280158 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14328,6 +14328,7 @@ dependencies = [ "frame-support", "frame-system", "kitchensink-runtime", + "minimal-template-runtime", "pallet-assets", "pallet-aura", "pallet-authorship", @@ -14350,6 +14351,7 @@ dependencies = [ "pallet-transaction-payment", "pallet-uniques", "pallet-utility", + "parachain-template-runtime", "parity-scale-codec", "polkadot-sdk", "polkadot-sdk-frame", @@ -14369,6 +14371,7 @@ dependencies = [ "sc-service", "scale-info", "simple-mermaid 0.1.1", + "solochain-template-runtime", "sp-api", "sp-arithmetic", "sp-core", @@ -14377,6 +14380,7 @@ dependencies = [ "sp-keyring", "sp-offchain", "sp-runtime", + "sp-runtime-interface 24.0.0", "sp-version", "staging-chain-spec-builder", "staging-node-cli", @@ -20007,6 +20011,7 @@ name = "sp-io" version = "30.0.0" dependencies = [ "bytes", + "docify", "ed25519-dalek 2.1.1", "libsecp256k1", "log", diff --git a/README.md b/README.md index 0b027b2958c1..92901d070db0 100644 --- a/README.md +++ b/README.md @@ -24,8 +24,12 @@ forks](https://img.shields.io/github/forks/paritytech/polkadot-sdk) ## 📚 Documentation * [🦀 rust-docs](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/index.html) - * [Introduction](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/index.html) - to each component of the Polkadot SDK: Substrate, FRAME, Cumulus, and XCM + * [Introduction](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/index.html) + to each component of the Polkadot SDK: Substrate, FRAME, Cumulus, and XCM + * [Guides](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/guides/index.html), + namely how to build your first FRAME pallet. + * [Templates](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/templates/index.html) + for starting a new project. * Other Resources: * [Polkadot Wiki -> Build](https://wiki.polkadot.network/docs/build-guide) @@ -39,6 +43,9 @@ The Polkadot-SDK has two release channels: `stable` and `nightly`. Production so only use `stable`. `nightly` is meant for tinkerers to try out the latest features. The detailed release process is described in [RELEASE.md](docs/RELEASE.md). +You can use [`psvm`](https://github.com/paritytech/psvm) to manage your Polkadot-SDK dependency +versions in downstream projects. + ### 😌 Stable `stable` releases have a support duration of **three months**. In this period, the release will not diff --git a/docs/mermaid/IA.mmd b/docs/mermaid/IA.mmd index fe9a96bcafc0..37417497e1f8 100644 --- a/docs/mermaid/IA.mmd +++ b/docs/mermaid/IA.mmd @@ -1,6 +1,6 @@ flowchart parity[paritytech.github.io] --> devhub[polkadot_sdk_docs] - polkadot[polkadot.network] --> devhub[polkadot_sdk_docs] + polkadot_network[polkadot.network] --> devhub[polkadot_sdk_docs] devhub --> polkadot_sdk devhub --> reference_docs @@ -9,5 +9,5 @@ flowchart polkadot_sdk --> substrate polkadot_sdk --> frame polkadot_sdk --> cumulus - polkadot_sdk --> polkadot + polkadot_sdk --> polkadot[polkadot node] polkadot_sdk --> xcm diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index b0671623f48d..10c091211671 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -83,27 +83,31 @@ pallet-democracy = { path = "../../substrate/frame/democracy" } pallet-uniques = { path = "../../substrate/frame/uniques" } pallet-nfts = { path = "../../substrate/frame/nfts" } pallet-scheduler = { path = "../../substrate/frame/scheduler" } +pallet-referenda = { path = "../../substrate/frame/referenda" } +pallet-broker = { path = "../../substrate/frame/broker" } +pallet-babe = { path = "../../substrate/frame/babe" } # Primitives sp-io = { path = "../../substrate/primitives/io" } +sp-runtime-interface = { path = "../../substrate/primitives/runtime-interface" } sp-api = { path = "../../substrate/primitives/api" } sp-core = { path = "../../substrate/primitives/core" } sp-keyring = { path = "../../substrate/primitives/keyring" } sp-runtime = { path = "../../substrate/primitives/runtime" } sp-arithmetic = { path = "../../substrate/primitives/arithmetic" } sp-genesis-builder = { path = "../../substrate/primitives/genesis-builder" } - -# Misc pallet dependencies -pallet-referenda = { path = "../../substrate/frame/referenda" } -pallet-broker = { path = "../../substrate/frame/broker" } -pallet-babe = { path = "../../substrate/frame/babe" } - sp-offchain = { path = "../../substrate/primitives/offchain" } sp-version = { path = "../../substrate/primitives/version" } + # XCM xcm = { package = "staging-xcm", path = "../../polkadot/xcm" } xcm-docs = { path = "../../polkadot/xcm/docs" } # runtime guides chain-spec-guide-runtime = { path = "./src/reference_docs/chain_spec_runtime" } + +# Templates +minimal-template-runtime = { path = "../../templates/minimal/runtime" } +solochain-template-runtime = { path = "../../templates/solochain/runtime" } +parachain-template-runtime = { path = "../../templates/parachain/runtime" } diff --git a/docs/sdk/src/guides/mod.rs b/docs/sdk/src/guides/mod.rs index f5f6d2b5e0c0..485cdc30636f 100644 --- a/docs/sdk/src/guides/mod.rs +++ b/docs/sdk/src/guides/mod.rs @@ -1,7 +1,16 @@ //! # Polkadot SDK Docs Guides //! -//! This crate contains a collection of guides that are foundational to the developers of -//! Polkadot SDK. They are common user-journeys that are traversed in the Polkadot ecosystem. +//! This crate contains a collection of guides that are foundational to the developers of Polkadot +//! SDK. They are common user-journeys that are traversed in the Polkadot ecosystem. +//! +//! 1. [`crate::guides::your_first_pallet`] is your starting point with Polkadot SDK. It contains +//! the basics of +//! building a simple crypto currency with FRAME. +//! 2. [`crate::guides::your_first_runtime`] is the next step in your journey. It contains the +//! basics of building a runtime that contains this pallet, plus a few common pallets from FRAME. +//! +//! +//! Other guides are related to other miscellaneous topics and are listed as modules below. /// Write your first simple pallet, learning the most most basic features of FRAME along the way. pub mod your_first_pallet; @@ -11,18 +20,18 @@ pub mod your_first_pallet; pub mod your_first_runtime; /// Running the given runtime with a node. No specific consensus mechanism is used at this stage. -pub mod your_first_node; - -/// How to change the consensus engine of both the node and the runtime. -pub mod changing_consensus; +// TODO +// pub mod your_first_node; /// How to enhance a given runtime and node to be cumulus-enabled, run it as a parachain and connect /// it to a relay-chain. -pub mod cumulus_enabled_parachain; +// TODO +// pub mod cumulus_enabled_parachain; /// How to make a given runtime XCM-enabled, capable of sending messages (`Transact`) between itself /// and the relay chain to which it is connected. -pub mod xcm_enabled_parachain; +// TODO +// pub mod xcm_enabled_parachain; /// How to enable storage weight reclaiming in a parachain node and runtime. pub mod enable_pov_reclaim; diff --git a/docs/sdk/src/guides/your_first_pallet/mod.rs b/docs/sdk/src/guides/your_first_pallet/mod.rs index c6e0dd0edf89..0a22b13df814 100644 --- a/docs/sdk/src/guides/your_first_pallet/mod.rs +++ b/docs/sdk/src/guides/your_first_pallet/mod.rs @@ -14,18 +14,14 @@ //! > FRAME-based runtimes use various techniques to re-use a currency pallet instead of writing //! > one. Further advanced FRAME related topics are discussed in [`crate::reference_docs`]. //! -//! ## Topics Covered +//! ## Writing Your First Pallet //! -//! The following FRAME topics are covered in this guide: +//! To get started, use one of the templates mentioned in [`crate::polkadot_sdk::templates`]. We +//! recommend using the `polkadot-sdk-minimal-template`. You might need to change small parts of +//! this guide, namely the crate/package names, based on which tutorial you use. //! -//! - [Storage](frame::pallet_macros::storage) -//! - [Call](frame::pallet_macros::call) -//! - [Event](frame::pallet_macros::event) -//! - [Error](frame::pallet_macros::error) -//! - Basics of testing a pallet -//! - [Constructing a runtime](frame::runtime::prelude::construct_runtime) -//! -//! ## Writing Your First Pallet +//! > Be aware that you can read the entire source code backing this tutorial by clicking on the +//! > [`source`](./mod.rs.html) button at the top right of the page. //! //! You should have studied the following modules as a prelude to this guide: //! @@ -33,16 +29,28 @@ //! - [`crate::reference_docs::trait_based_programming`] //! - [`crate::polkadot_sdk::frame_runtime`] //! +//! ## Topics Covered +//! +//! The following FRAME topics are covered in this guide: +//! +//! - [`pallet::storage`] +//! - [`pallet::call`] +//! - [`pallet::event`] +//! - [`pallet::error`] +//! - Basics of testing a pallet +//! - [Constructing a runtime](frame::runtime::prelude::construct_runtime) +//! //! ### Shell Pallet //! //! Consider the following as a "shell pallet". We continue building the rest of this pallet based //! on this template. //! -//! [`pallet::config`](frame::pallet_macros::config) and -//! [`pallet::pallet`](frame::pallet_macros::pallet) are both mandatory parts of any pallet. Refer -//! to the documentation of each to get an overview of what they do. +//! [`pallet::config`] and [`pallet::pallet`] are both mandatory parts of any pallet. Refer to the +//! documentation of each to get an overview of what they do. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", shell_pallet)] //! +//! All of the code that follows in this guide should live inside of the `mod pallet`. +//! //! ### Storage //! //! First, we will need to create two onchain storage declarations. @@ -55,15 +63,14 @@ //! > generic bounded type in the `Config` trait, and then specify it in the implementation. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Balance)] //! -//! The definition of these two storage items, based on [`frame::pallet_macros::storage`] details, -//! is as follows: +//! The definition of these two storage items, based on [`pallet::storage`] details, is as follows: #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", TotalIssuance)] #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Balances)] //! //! ### Dispatchables //! -//! Next, we will define the dispatchable functions. As per [`frame::pallet_macros::call`], these -//! will be defined as normal `fn`s attached to `struct Pallet`. +//! Next, we will define the dispatchable functions. As per [`pallet::call`], these will be defined +//! as normal `fn`s attached to `struct Pallet`. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", impl_pallet)] //! //! The logic of the functions is self-explanatory. Instead, we will focus on the FRAME-related @@ -79,7 +86,6 @@ //! was signed by `who`. #![doc = docify::embed!("../../substrate/frame/system/src/lib.rs", ensure_signed)] //! -//! //! - Where does `mutate`, `get` and `insert` and other storage APIs come from? All of them are //! explained in the corresponding `type`, for example, for `Balances::::insert`, you can look //! into [`frame::prelude::StorageMap::insert`]. @@ -95,8 +101,7 @@ //! //! - Why are all `get` and `mutate` functions returning an `Option`? This is the default behavior //! of FRAME storage APIs. You can learn more about how to override this by looking into -//! [`frame::pallet_macros::storage`], and -//! [`frame::prelude::ValueQuery`]/[`frame::prelude::OptionQuery`] +//! [`pallet::storage`], and [`frame::prelude::ValueQuery`]/[`frame::prelude::OptionQuery`] //! //! ### Improving Errors //! @@ -116,6 +121,25 @@ //! //! ### Your First (Test) Runtime //! +//! The typical testing code of a pallet lives in a module that imports some preludes useful for +//! testing, similar to: +//! +//! ``` +//! pub mod pallet { +//! // snip -- actually pallet code. +//! } +//! +//! #[cfg(test)] +//! mod tests { +//! // bring in the testing prelude of frame +//! use frame::testing_prelude::*; +//! // bring in all pallet items +//! use super::pallet::*; +//! +//! // snip -- rest of the testing code. +//! } +//! ``` +//! //! Next, we create a "test runtime" in order to test our pallet. Recall from //! [`crate::polkadot_sdk::frame_runtime`] that a runtime is a collection of pallets, expressed //! through [`frame::runtime::prelude::construct_runtime`]. All runtimes also have to include @@ -166,7 +190,6 @@ //! As noted above, the `T::AccountId` is now `u64`. Moreover, `Runtime` is replacing ``. //! This is why for example you see `Balances::::get(..)`. Finally, notice that the //! dispatchables are simply functions that can be called on top of the `Pallet` struct. -// TODO: hard to explain exactly `RuntimeOrigin::signed(ALICE)` at this point. //! //! Congratulations! You have written your first pallet and tested it! Next, we learn a few optional //! steps to improve our pallet. @@ -236,8 +259,7 @@ //! by one character. FRAME errors are exactly a solution to maintain readability, whilst fixing //! the drawbacks mentioned. In short, we use an enum to represent different variants of our //! error. These variants are then mapped in an efficient way (using only `u8` indices) to -//! [`sp_runtime::DispatchError::Module`]. Read more about this in -//! [`frame::pallet_macros::error`]. +//! [`sp_runtime::DispatchError::Module`]. Read more about this in [`pallet::error`]. //! //! - **Event**: Events are akin to the return type of dispatchables. They are mostly data blobs //! emitted by the runtime to let outside world know what is happening inside the pallet. Since @@ -246,20 +268,16 @@ //! use passive tense for event names (eg. `SomethingHappened`). This allows other sub-systems or //! external parties (eg. a light-node, a DApp) to listen to particular events happening, without //! needing to re-execute the whole state transition function. -// TODO: both need to be improved a lot at the pallet-macro rust-doc level. Also my explanation -// of event is probably not the best. //! //! With the explanation out of the way, let's see how these components can be added. Both follow a -//! fairly familiar syntax: normal Rust enums, with extra -//! [`#[frame::event]`](frame::pallet_macros::event) and -//! [`#[frame::error]`](frame::pallet_macros::error) attributes attached. +//! fairly familiar syntax: normal Rust enums, with extra [`pallet::event`] and [`pallet::error`] +//! attributes attached. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Event)] #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Error)] //! -//! One slightly custom part of this is the [`#[pallet::generate_deposit(pub(super) fn -//! deposit_event)]`](frame::pallet_macros::generate_deposit) part. Without going into too -//! much detail, in order for a pallet to emit events to the rest of the system, it needs to do two -//! things: +//! One slightly custom part of this is the [`pallet::generate_deposit`] part. Without going into +//! too much detail, in order for a pallet to emit events to the rest of the system, it needs to do +//! two things: //! //! 1. Declare a type in its `Config` that refers to the overarching event type of the runtime. In //! short, by doing this, the pallet is expressing an important bound: `type RuntimeEvent: @@ -268,8 +286,8 @@ //! store it where needed. //! //! 2. But, doing this conversion and storing is too much to expect each pallet to define. FRAME -//! provides a default way of storing events, and this is what -//! [`pallet::generate_deposit`](frame::pallet_macros::generate_deposit) is doing. +//! provides a default way of storing events, and this is what [`pallet::generate_deposit`] is +//! doing. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", config_v2)] //! //! > These `Runtime*` types are better explained in @@ -297,10 +315,17 @@ //! - [`crate::reference_docs::defensive_programming`]. //! - [`crate::reference_docs::frame_origin`]. //! - [`crate::reference_docs::frame_runtime_types`]. -//! - The pallet we wrote in this guide was using `dev_mode`, learn more in -//! [`frame::pallet_macros::config`]. +//! - The pallet we wrote in this guide was using `dev_mode`, learn more in [`pallet::config`]. //! - Learn more about the individual pallet items/macros, such as event and errors and call, in //! [`frame::pallet_macros`]. +//! +//! [`pallet::storage`]: ../../../frame_support/pallet_macros/attr.config.html +//! [`pallet::call`]: ../../../frame_support/pallet_macros/attr.call.html +//! [`pallet::event`]: ../../../frame_support/pallet_macros/attr.event.html +//! [`pallet::error`]: ../../../frame_support/pallet_macros/attr.error.html +//! [`pallet::pallet`]: ../../../frame_support/pallet_macros/attr.pallet.html +//! [`pallet::config`]: ../../../frame_support/pallet_macros/attr.config.html +//! [`pallet::generate_deposit`]: ../../../frame_support/pallet_macros/attr.generate_deposit.html #[docify::export] #[frame::pallet(dev_mode)] @@ -418,16 +443,22 @@ pub mod pallet { #[cfg(any(test, doc))] pub(crate) mod tests { use crate::guides::your_first_pallet::pallet::*; + + #[docify::export(testing_prelude)] use frame::testing_prelude::*; - const ALICE: u64 = 1; - const BOB: u64 = 2; - const CHARLIE: u64 = 3; + + pub(crate) const ALICE: u64 = 1; + pub(crate) const BOB: u64 = 2; + pub(crate) const CHARLIE: u64 = 3; #[docify::export] + // This runtime is only used for testing, so it should be somewhere like `#[cfg(test)] mod + // tests { .. }` mod runtime { use super::*; // we need to reference our `mod pallet` as an identifier to pass to // `construct_runtime`. + // YOU HAVE TO CHANGE THIS LINE BASED ON YOUR TEMPLATE use crate::guides::your_first_pallet::pallet as pallet_currency; construct_runtime!( diff --git a/docs/sdk/src/guides/your_first_runtime.rs b/docs/sdk/src/guides/your_first_runtime.rs index 3e02ef1b1b28..c58abc1120c1 100644 --- a/docs/sdk/src/guides/your_first_runtime.rs +++ b/docs/sdk/src/guides/your_first_runtime.rs @@ -1 +1,3 @@ //! # Your first Runtime +//! +//! 🚧 diff --git a/docs/sdk/src/polkadot_sdk/templates.rs b/docs/sdk/src/polkadot_sdk/templates.rs index 4bf0e839c798..e87eb9c2bc8a 100644 --- a/docs/sdk/src/polkadot_sdk/templates.rs +++ b/docs/sdk/src/polkadot_sdk/templates.rs @@ -1,19 +1,33 @@ //! # Templates //! -//! ### Internal +//! This document enumerates a non-exhaustive list of templates that one can use to get started with +//! polkadot-sdk. //! -//! The following templates are maintained as a part of the `polkadot-sdk` repository: +//! > Know more tools/templates that are not listed here? please contribute them by opening a PR. //! -//! - classic [`substrate-node-template`]: is a white-labeled substrate-based blockchain with a -//! moderate amount of features. It can act as a great starting point for those who want to learn -//! Substrate/FRAME and want to have a template that is already doing something. -//! - [`substrate-minimal-template`]: Same as the above, but it contains the least amount of code in -//! both the node and runtime. It is a great starting point for those who want to deeply learn -//! Substrate and FRAME. -//! - classic [`cumulus-parachain-template`], which is the de-facto parachain template shipped with -//! Cumulus. It is the parachain-enabled version of [`substrate-node-template`]. +//! ## Internal //! -//! ### External Templates +//! The following [templates](https://github.com/paritytech/polkadot-sdk/blob/master/templates) are +//! maintained as a part of the `polkadot-sdk` repository: +//! +//! - `minimal_template_node`/[`minimal_template_runtime`]: A minimal template that contains the +//! least amount of features to be a functioning blockchain. Suitable for learning, development +//! and testing. This template is not meant to be used in production. +//! - `solochain_template_node`/[`solochain_template_runtime`]: Formerly known as +//! "substrate-node-template", is a white-labeled substrate-based blockchain (aka. solochain) that +//! contains moderate features, such as a basic consensus engine and some FRAME pallets. This +//! template can act as a good starting point for those who want to launch a solochain. +//! - `parachain_template_node`/[`parachain_template_runtime`]: A parachain template ready to be +//! connected to a test relay-chain. +//! +//! These templates are always kept up to date, and are mirrored to external repositories for easy +//! forking: +//! +//! - +//! - +//! - +//! +//! ## External Templates //! //! Noteworthy templates outside of this repository. //! @@ -22,23 +36,17 @@ //! - [`frontier-parachain-template`](https://github.com/paritytech/frontier-parachain-template): A //! parachain template for launching EVM-compatible parachains. //! -//! [`minimal-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/templates/minimal/ -//! [`parachain-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/templates/parachain/ - -// TODO: in general, we need to make a deliberate choice here of moving a few key templates to this -// repo (nothing stays in `substrate-developer-hub`) and the everything else should be community -// maintained. https://github.com/paritytech/polkadot-sdk-docs/issues/67 - -// TODO: we should rename `substrate-node-template` to `substrate-basic-template`, -// `substrate-blockchain-template`. `node` is confusing in the name. -// `substrate-blockchain-template` and `cumulus-parachain-template` go well together 🤝. https://github.com/paritytech/polkadot-sdk-docs/issues/67 - -// NOTE: a super important detail that I am looking forward to here is -// and -// . Meaning that I would not spend time on -// teaching someone too much detail about the ugly thing we call "node" nowadays. In the future, I -// am sure we will either have a better "node-builder" code that can actually be tested, or an -// "omni-node" that can run (almost) any wasm file. We should already build tutorials in this -// direction IMO. This also affects all the templates. If we have a good neat runtime file, which we -// are moving toward, and a good node-builder, we don't need all of these damn templates. These -// templates are only there because the boilerplate is super horrible atm. +//! ## OpenZeppelin +//! +//! In June 2023, OpenZeppelin was awarded a grant from the [Polkadot +//! treasury](https://polkadot.polkassembly.io/treasury/406) for building a number of Polkadot-sdk +//! based templates. These templates are expected to be a great starting point for developers. +//! +//! - +//! +//! ## POP-CLI +//! +//! Is a CLI tool capable of scaffolding a new polkadot-sdk-based project, possibly removing the +//! need for templates. +//! +//! - diff --git a/docs/sdk/src/reference_docs/blockchain_scalibility.rs b/docs/sdk/src/reference_docs/blockchain_scalibility.rs deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/docs/sdk/src/reference_docs/consensus_swapping.rs b/docs/sdk/src/reference_docs/consensus_swapping.rs deleted file mode 100644 index e639761ee97b..000000000000 --- a/docs/sdk/src/reference_docs/consensus_swapping.rs +++ /dev/null @@ -1,6 +0,0 @@ -//! Consensus Swapping -//! -//! Notes: -//! -//! - The typical workshop done by Joshy in some places where he swaps out the consensus to be PoW. -//! - This could also be a tutorial rather than a ref doc, depending on the size. diff --git a/docs/sdk/src/reference_docs/custom_host_functions.rs b/docs/sdk/src/reference_docs/custom_host_functions.rs new file mode 100644 index 000000000000..719b208a2bff --- /dev/null +++ b/docs/sdk/src/reference_docs/custom_host_functions.rs @@ -0,0 +1,27 @@ +//! # Custom Host Functions +//! +//! Host functions are functions that the wasm instance can use to communicate with the node. Learn +//! more about this in [`crate::reference_docs::wasm_meta_protocol`]. +//! +//! ## Finding Host Functions +//! +//! To declare a set of functions as host functions, you need to use the `#[runtime_interface]` +//! ([`sp_runtime_interface`]) attribute macro. The most notable set of host functions are those +//! that allow the runtime to access the chain state, namely [`sp_io::storage`]. Some other notable +//! host functions are also defined in [`sp_io`]. +//! +//! ## Adding New Host Functions +//! +//! > Adding a new host function is a big commitment and should be done with care. Namely, the nodes +//! > in the network need to support all host functions forever in order to be able to sync +//! > historical blocks. +//! +//! Adding host functions is only possible when you are using a node-template, so that you have +//! access to the boilerplate of building your node. +//! +//! A group of host functions can always be grouped to gether as a tuple: +#![doc = docify::embed!("../../substrate/primitives/io/src/lib.rs", SubstrateHostFunctions)] +//! +//! The host functions are attached to the node side's [`sc_executor::WasmExecutor`]. For example in +//! the minimal template, the setup looks as follows: +#![doc = docify::embed!("../../templates/minimal/node/src/service.rs", FullClient)] diff --git a/docs/sdk/src/reference_docs/fee_less_runtime.rs b/docs/sdk/src/reference_docs/fee_less_runtime.rs index 1213c2628253..9146b30ec577 100644 --- a/docs/sdk/src/reference_docs/fee_less_runtime.rs +++ b/docs/sdk/src/reference_docs/fee_less_runtime.rs @@ -1,5 +1,6 @@ //! # Fee-Less Runtime //! +//! 🚧 Work In Progress 🚧 //! //! Notes: //! diff --git a/docs/sdk/src/reference_docs/frame_offchain_workers.rs b/docs/sdk/src/reference_docs/frame_offchain_workers.rs index 7999707e5ee0..1ec9212e2306 100644 --- a/docs/sdk/src/reference_docs/frame_offchain_workers.rs +++ b/docs/sdk/src/reference_docs/frame_offchain_workers.rs @@ -58,7 +58,6 @@ //! [`frame::pallet_macros::hooks`]. //! //! ``` -//! //! #[frame::pallet] //! pub mod pallet { //! use frame::prelude::*; diff --git a/docs/sdk/src/reference_docs/frame_system_accounts.rs b/docs/sdk/src/reference_docs/frame_system_accounts.rs index ae9d2c9e0cb3..523fe7043084 100644 --- a/docs/sdk/src/reference_docs/frame_system_accounts.rs +++ b/docs/sdk/src/reference_docs/frame_system_accounts.rs @@ -1,5 +1,7 @@ //! # FRAME Accounts //! +//! //! 🚧 Work In Progress 🚧 +//! //! How `frame_system` handles accountIds. Nonce. Consumers and Providers, reference counting. // - poorly understood topics, needs one great article to rul them all. diff --git a/docs/sdk/src/reference_docs/light_nodes.rs b/docs/sdk/src/reference_docs/light_nodes.rs deleted file mode 100644 index d6670bf03ab1..000000000000 --- a/docs/sdk/src/reference_docs/light_nodes.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! # Light Clients -//! -//! -//! Notes: should contain only high level information about light clients, then link to how to set -//! it up in PAPI and SubXT -//! -//! diff --git a/docs/sdk/src/reference_docs/metadata.rs b/docs/sdk/src/reference_docs/metadata.rs index 702c1c30fd9c..96f92ac0c412 100644 --- a/docs/sdk/src/reference_docs/metadata.rs +++ b/docs/sdk/src/reference_docs/metadata.rs @@ -1 +1,25 @@ //! # Metadata +//! +//! The existence of metadata in polkadot-sdk goes back to the (forkless) upgrade-ability of all +//! Substrate-based blockchains, which is achieved through +//! [`crate::reference_docs::wasm_meta_protocol`]. You can learn more about the details of how to +//! deal with these upgrades in [`crate::reference_docs::frame_runtime_upgrades_and_migrations`]. +//! +//! Another consequence of upgrade-ability is that as a UI, wallet, or generally an offchain entity, +//! it is hard to know the types internal to the runtime, specifically in light of the fact that +//! they can change at any point in time. +//! +//! This is why all Substrate-based runtimes must expose a [`sp_api::Metadata`] api, which mandates +//! the runtime to return a description of itself. The return type of this api is `Vec`, meaning +//! that it is up to the runtime developer to decide on the format of this. +//! +//! All [`crate::polkadot_sdk::frame_runtime`] based runtimes expose a specific metadata language, +//! maintained in which is adopted in the Polkadot +//! ecosystem. +//! +//! ## Metadata Explorers: +//! +//! A few noteworthy tools that inspect the (FRAME-based) metadata of a chain: +//! +//! +//! diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index 8e0431c48b6f..51150a558375 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -40,7 +40,6 @@ pub mod runtime_vs_smart_contract; pub mod extrinsic_encoding; /// Learn about the signed extensions that form a part of extrinsics. -// TODO: @jsdw https://github.com/paritytech/polkadot-sdk-docs/issues/42 pub mod signed_extensions; /// Learn about *Origins*, a topic in FRAME that enables complex account abstractions to be built. @@ -59,9 +58,11 @@ pub mod fee_less_runtime; /// Learn about metadata, the main means through which an upgradeable runtime communicates its /// properties to the outside world. -// TODO: @jsdw https://github.com/paritytech/polkadot-sdk-docs/issues/47 pub mod metadata; +/// Learn about how to add custom host functions to the node. +pub mod custom_host_functions; + /// Learn about how frame-system handles `account-ids`, nonces, consumers and providers. pub mod frame_system_accounts; @@ -78,26 +79,12 @@ pub mod frame_tokens; /// Learn about chain specification file and the genesis state of the blockchain. pub mod chain_spec_genesis; -/// Learn about all the memory limitations of the WASM runtime when it comes to memory usage. -// TODO: @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/52 -pub mod wasm_memory; - /// Learn about Substrate's CLI, and how it can be extended. -// TODO: @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/53 pub mod cli; -/// Learn about Substrate's consensus algorithms, and how you can switch between two. -// TODO: @JoshOrndorff @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/54 -pub mod consensus_swapping; - /// Learn about Runtime Upgrades and best practices for writing Migrations. pub mod frame_runtime_upgrades_and_migrations; -/// Learn about light nodes, how they function, and how Substrate-based chains come -/// light-node-first out of the box. -// TODO: @jsdw @josepot https://github.com/paritytech/polkadot-sdk-docs/issues/68 -pub mod light_nodes; - /// Learn about the offchain workers, how they function, and how to use them, as provided by the /// [`frame`] APIs. pub mod frame_offchain_workers; diff --git a/docs/sdk/src/reference_docs/wasm_memory.rs b/docs/sdk/src/reference_docs/wasm_memory.rs deleted file mode 100644 index 4f4cda31094e..000000000000 --- a/docs/sdk/src/reference_docs/wasm_memory.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! # WASM Memory Limitations. -//! -//! Notes: -//! -//! - Stack: Need to use `Box<_>` -//! - Heap: Substrate imposes a limit. PvF execution has its own limits -//! - Heap: There is also a maximum amount that a single allocation can have. diff --git a/docs/sdk/src/reference_docs/wasm_meta_protocol.rs b/docs/sdk/src/reference_docs/wasm_meta_protocol.rs index 37d1460f0e1a..0e91e65c55e3 100644 --- a/docs/sdk/src/reference_docs/wasm_meta_protocol.rs +++ b/docs/sdk/src/reference_docs/wasm_meta_protocol.rs @@ -1,11 +1,13 @@ //! # WASM Meta Protocol //! //! All Substrate based chains adhere to a unique architectural design novel to the Polkadot -//! ecosystem. We refer to this design as the "WASM Meta Protocol". +//! ecosystem. We refer to this design as the "**WASM Meta Protocol**". //! //! Consider the fact that a traditional blockchain software is usually a monolithic artifact. -//! Upgrading any part of the system implies upgrading the entire system. This has historically led -//! to cumbersome forkful upgrades to be the status quo in the blockchain ecosystem. +//! **Upgrading any part of the system implies upgrading the entire system**. This has historically +//! led to cumbersome forkful upgrades to be the status quo in blockchain ecosystems. In other +//! words, the entire node software is the specification of the blockchain's [`state transition +//! function`](crate::reference_docs::blockchain_state_machines). //! //! Moreover, the idea of "storing code in the state" is explored in the context of smart contracts //! platforms, but has not been expanded further. @@ -15,17 +17,16 @@ //! that a smart contract platform stores the code of individual contracts in its state. As noted in //! [`crate::reference_docs::blockchain_state_machines`], this state transition function is called //! the **Runtime**, and WASM is chosen as the bytecode. The Runtime is stored under a special key -//! in the state (see -//! [`sp_core::storage::well_known_keys`](../../../sp_core/index.html)) and can be -//! updated as a part of the state transition function's execution, just like a user's account -//! balance can be updated. +//! in the state (see [`sp_core::storage::well_known_keys`]) and can be updated as a part of the +//! state transition function's execution, just like a user's account balance can be updated. //! //! > Note that while we drew an analogy between smart contracts and runtimes in the above, there //! > are fundamental differences between the two, explained in //! > [`crate::reference_docs::runtime_vs_smart_contract`]. //! -//! The rest of the system that is NOT the state transition function is called the **node**, and -//! is a normal binary that is compiled from Rust to different hardware targets. +//! The rest of the system that is NOT the state transition function is called the +//! [**Node**](crate::reference_docs::glossary#node), and is a normal binary that is compiled from +//! Rust to different hardware targets. //! //! This design enables all Substrate-based chains to be fork-less-ly upgradeable, because the //! Runtime can be updates on the fly, within the execution of a block, and the node is (for the @@ -47,15 +48,18 @@ #![doc = simple_mermaid::mermaid!("../../../mermaid/substrate_client_runtime.mmd")] //! //! A runtime must have a set of runtime APIs in order to have any meaningful blockchain -//! functionality, but it can also expose more APIs. See TODO as an example of how to add custom -//! runtime APIs to your FRAME-based runtime. +//! functionality, but it can also expose more APIs. See +//! [`crate::reference_docs::custom_runtime_api_rpc`] as an example of how to add custom runtime +//! APIs to your FRAME-based runtime. //! //! Similarly, for a runtime to be "compatible" with a node, the node must implement the full set of //! host functions that the runtime at any point in time requires. Given the fact that a runtime can //! evolve in time, and a blockchain node (typically) wishes to be capable of re-executing all the //! previous blocks, this means that a node must always maintain support for the old host functions. -//! This also implies that adding a new host function is a big commitment and should be done with -//! care. This is why, for example, adding a new host function to Polkadot always requires an RFC. +//! **This implies that adding a new host function is a big commitment and should be done with +//! care**. This is why, for example, adding a new host function to Polkadot always requires an RFC. +//! Learn how to add a new host function to your runtime in +//! [`crate::reference_docs::custom_host_functions`]. //! //! ## Node vs. Runtime //! @@ -90,11 +94,11 @@ //! //! In fact, [`sp_core::storage::well_known_keys`] are the only state keys that the node side is //! aware of. The rest of the state, including what logic the runtime has, what balance each user -//! has and such are all only comprehensible to the runtime. +//! has and such, are all only comprehensible to the runtime. #![doc = simple_mermaid::mermaid!("../../../mermaid/state.mmd")] //! //! In the above diagram, all of the state keys and values are opaque bytes to the node. The node -//! does not know what they mean, and it does not now what is the type of the corresponding value +//! does not know what they mean, and it does not know what is the type of the corresponding value //! (e.g. if it is a number of a vector). Contrary, the runtime knows both the meaning of their //! keys, and the type of the values. //! @@ -105,9 +109,50 @@ //! //! ## Native Runtime //! -//! TODO +//! Historically, the node software also kept a native copy of the runtime at the time of +//! compilation within it. This used to be called the "Native Runtime". The main purpose of the +//! native runtime used to be leveraging the faster execution time and better debugging +//! infrastructure of native code. However, neither of the two arguments strongly hold and the +//! native runtime is being fully removed from the node-sdk. //! +//! See: +//! +//! > Also, note that the flags [`sc_cli::ExecutionStrategy::Native`] is already a noop and all +//! > chains built with Substrate only use WASM execution. +//! +//! ### Runtime Versions +//! +//! An important detail of the native execution worth learning about is that the node software, +//! obviously, only uses the native runtime if it is the same code as with the wasm blob stored +//! onchain. Else, nodes who run the native runtime will come to a different state transition. How +//! do nodes determine if two runtimes are the same? Through the very important +//! [`sp_version::RuntimeVersion`]. All runtimes expose their version via a runtime api +//! ([`sp_api::Core::version`]) that returns this struct. The node software, or other applications, +//! inspect this struct to examine the identity of a runtime, and to determine if two runtimes are +//! the same. Namely, [`sp_version::RuntimeVersion::spec_version`] is the main key that implies two +//! runtimes are the same. +//! +//! Therefore, it is utmost important to make sure before any runtime upgrade, the spec version is +//! updated. //! //! ## Example: Block Execution. //! -//! TODO +//! As a final example to recap, let's look at how Substrate-based nodes execute blocks. Blocks are +//! received in the node side software as opaque blobs and in the networking layer. +//! +//! At some point, based on the consensus algorithm's rules, the node decides to import (aka. +//! *validate*) a block. +//! +//! * First, the node will then fetch the state of the parent hash of the block that wishes to be +//! imported. +//! * The runtime is fetched from this state, and placed into a WASM execution environment. +//! * The [`sp_api::Core::execute_block`] runtime API is called and the blocked is passed in as an +//! argument. +//! * The runtime will then execute the block, and update the state accordingly. Any state update is +//! issues via the [`sp_io::storage`] host functions. +//! * Both the runtime and node will check the state-root of the state after the block execution to +//! match the one claimed in the block header. +//! +//! > Example taken from [this +//! > lecture](https://polkadot-blockchain-academy.github.io/pba-book/substrate/wasm/page.html#example-2-block-import-9) +//! > of the Polkadot Blockchain Academy. diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index ccccc5063286..1a687cade79f 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -351,6 +351,7 @@ pub trait IntegrityTest { /// - [`crate::traits::misc::OffchainWorker`] /// - [`OnIdle`] /// - [`IntegrityTest`] +/// - [`OnPoll`] /// /// ## Ordering /// @@ -363,34 +364,32 @@ pub trait IntegrityTest { /// /// ```mermaid /// graph LR -/// Optional --> BeforeExtrinsics -/// BeforeExtrinsics --> Extrinsics -/// Extrinsics --> AfterExtrinsics -/// subgraph Optional +/// Optional --> Mandatory +/// Mandatory --> ExtrinsicsMandatory +/// ExtrinsicsMandatory --> Poll +/// Poll --> Extrinsics +/// Extrinsics --> AfterMandatory +/// AfterMandatory --> onIdle +/// +/// subgraph Optional /// OnRuntimeUpgrade /// end /// -/// subgraph BeforeExtrinsics +/// subgraph Mandatory /// OnInitialize /// end /// +/// subgraph ExtrinsicsMandatory +/// Inherent1 --> Inherent2 +/// end +/// /// subgraph Extrinsics /// direction TB -/// Inherent1 -/// Inherent2 -/// Extrinsic1 -/// Extrinsic2 -/// -/// Inherent1 --> Inherent2 -/// Inherent2 --> Extrinsic1 /// Extrinsic1 --> Extrinsic2 /// end /// -/// subgraph AfterExtrinsics -/// OnIdle +/// subgraph AfterMandatory /// OnFinalize -/// -/// OnIdle --> OnFinalize /// end /// ``` /// @@ -466,6 +465,8 @@ pub trait Hooks { /// /// Is not guaranteed to execute in a block and should therefore only be used in no-deadline /// scenarios. + /// + /// This is the non-mandatory version of [`Hooks::on_initialize`]. fn on_poll(_n: BlockNumber, _weight: &mut WeightMeter) {} /// Hook executed when a code change (aka. a "runtime upgrade") is detected by the FRAME diff --git a/substrate/primitives/io/Cargo.toml b/substrate/primitives/io/Cargo.toml index abb16d163da0..f6e157680f9c 100644 --- a/substrate/primitives/io/Cargo.toml +++ b/substrate/primitives/io/Cargo.toml @@ -19,7 +19,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bytes = { version = "1.1.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bytes"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ + "bytes", +] } sp-core = { path = "../core", default-features = false } sp-crypto-hashing = { path = "../crypto/hashing", default-features = false } sp-keystore = { path = "../keystore", default-features = false, optional = true } @@ -31,13 +33,18 @@ sp-trie = { path = "../trie", default-features = false, optional = true } sp-externalities = { path = "../externalities", default-features = false } sp-tracing = { path = "../tracing", default-features = false } log = { optional = true, workspace = true, default-features = true } -secp256k1 = { version = "0.28.0", features = ["global-context", "recovery"], optional = true } +secp256k1 = { version = "0.28.0", features = [ + "global-context", + "recovery", +], optional = true } tracing = { version = "0.1.29", default-features = false } tracing-core = { version = "0.1.32", default-features = false } # Required for backwards compatibility reason, but only used for verifying when `UseDalekExt` is set. ed25519-dalek = { version = "2.1", default-features = false, optional = true } +docify = { version = "0.2.8" } + [target.'cfg(all(any(target_arch = "riscv32", target_arch = "riscv64"), substrate_runtime))'.dependencies] polkavm-derive = { workspace = true } diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index 8ef1f41ce019..67e822ba7e24 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -1805,6 +1805,7 @@ pub type TestExternalities = sp_state_machine::TestExternalities>; + type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; From 988103d7578ad515b13c69578da1237b28fa9f36 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Thu, 13 Jun 2024 10:44:05 +0200 Subject: [PATCH 25/52] Use aggregated types for `RuntimeFreezeReason` and better examples of `MaxFreezes` (#4615) This PR aligns the settings for `MaxFreezes`, `RuntimeFreezeReason`, and `FreezeIdentifier`. #### Future work and improvements https://github.com/paritytech/polkadot-sdk/issues/2997 (remove `MaxFreezes` and `FreezeIdentifier`) --- bridges/modules/messages/src/mock.rs | 1 - .../pallets/inbound-queue/src/mock.rs | 11 +------- bridges/snowbridge/pallets/system/src/mock.rs | 11 +------- .../pallets/collator-selection/src/mock.rs | 13 +--------- cumulus/pallets/xcmp-queue/src/mock.rs | 13 +--------- cumulus/parachains/common/src/impls.rs | 16 ++---------- .../runtime/common/src/assigned_slots/mod.rs | 17 +----------- polkadot/runtime/common/src/auctions.rs | 26 +++++-------------- polkadot/runtime/common/src/claims.rs | 19 ++------------ polkadot/runtime/common/src/crowdloan/mod.rs | 24 ++++------------- polkadot/runtime/common/src/impls.rs | 15 ++--------- .../runtime/common/src/integration_tests.rs | 13 +--------- .../runtime/common/src/paras_registrar/mod.rs | 15 +++-------- polkadot/runtime/common/src/purchase.rs | 17 +----------- polkadot/runtime/common/src/slots/mod.rs | 17 +----------- polkadot/runtime/parachains/src/mock.rs | 11 +------- polkadot/runtime/westend/src/lib.rs | 4 +-- .../relay_token_transactor/parachain/mod.rs | 5 ++-- .../relay_token_transactor/relay_chain/mod.rs | 4 +-- polkadot/xcm/pallet-xcm/src/mock.rs | 13 +--------- polkadot/xcm/xcm-builder/tests/mock/mod.rs | 14 ++-------- .../xcm/xcm-simulator/fuzzer/src/parachain.rs | 14 +--------- .../xcm-simulator/fuzzer/src/relay_chain.rs | 13 +--------- substrate/bin/node/runtime/src/lib.rs | 4 +-- substrate/frame/alliance/src/mock.rs | 17 +----------- .../frame/asset-conversion/ops/src/mock.rs | 3 +-- substrate/frame/asset-conversion/src/mock.rs | 11 +------- substrate/frame/asset-rate/src/mock.rs | 15 ++--------- substrate/frame/assets/src/mock.rs | 15 ++--------- substrate/frame/atomic-swap/src/tests.rs | 18 ++----------- substrate/frame/babe/src/mock.rs | 11 +------- substrate/frame/balances/src/lib.rs | 4 +-- .../balances/src/tests/dispatchable_tests.rs | 2 +- substrate/frame/balances/src/tests/mod.rs | 13 +++------- substrate/frame/beefy/src/mock.rs | 11 +------- substrate/frame/bounties/src/tests.rs | 13 +--------- substrate/frame/child-bounties/src/tests.rs | 13 +--------- substrate/frame/contracts/src/lib.rs | 2 +- .../frame/conviction-voting/src/tests.rs | 13 +--------- substrate/frame/delegated-staking/src/mock.rs | 15 ++++------- substrate/frame/democracy/src/tests.rs | 13 +--------- .../election-provider-multi-phase/src/mock.rs | 14 +--------- .../test-staking-e2e/src/mock.rs | 12 +++------ substrate/frame/elections-phragmen/src/lib.rs | 15 ++--------- substrate/frame/examples/basic/src/tests.rs | 13 +--------- .../frame/examples/dev-mode/src/tests.rs | 15 ++--------- .../frame/examples/kitchensink/src/tests.rs | 18 +++++-------- .../single-block-migrations/src/mock.rs | 15 ++--------- substrate/frame/executive/src/tests.rs | 22 +++++++++++++--- substrate/frame/fast-unstake/src/mock.rs | 11 +------- substrate/frame/grandpa/src/mock.rs | 11 +------- substrate/frame/identity/src/tests.rs | 13 +--------- substrate/frame/indices/src/mock.rs | 13 +--------- substrate/frame/lottery/src/mock.rs | 15 ++--------- .../frame/nft-fractionalization/src/mock.rs | 13 +--------- substrate/frame/nfts/src/mock.rs | 13 +--------- .../nomination-pools/benchmarking/src/mock.rs | 20 +++++++------- substrate/frame/nomination-pools/src/mock.rs | 15 ++++------- .../test-delegate-stake/src/mock.rs | 13 +++------- .../test-transfer-stake/src/mock.rs | 14 +++------- .../frame/offences/benchmarking/src/mock.rs | 12 +-------- substrate/frame/parameters/src/tests/mock.rs | 1 - .../parameters/src/tests/test_renamed.rs | 1 - substrate/frame/preimage/src/mock.rs | 14 ++-------- substrate/frame/recovery/src/mock.rs | 11 +------- substrate/frame/referenda/src/mock.rs | 13 +--------- substrate/frame/root-offences/src/mock.rs | 13 +--------- substrate/frame/safe-mode/src/mock.rs | 12 +-------- substrate/frame/scored-pool/src/mock.rs | 13 +--------- .../frame/session/benchmarking/src/mock.rs | 12 +-------- substrate/frame/staking/src/mock.rs | 10 +------ substrate/frame/statement/src/mock.rs | 12 +-------- .../test/tests/runtime_legacy_ordering.rs | 2 +- substrate/frame/tips/src/tests.rs | 13 +--------- .../asset-conversion-tx-payment/src/mock.rs | 12 +-------- .../asset-tx-payment/src/mock.rs | 12 +-------- .../frame/transaction-payment/src/mock.rs | 15 ++--------- substrate/frame/treasury/src/tests.rs | 14 ++-------- substrate/frame/tx-pause/src/mock.rs | 17 +----------- substrate/frame/uniques/src/mock.rs | 13 +--------- substrate/frame/utility/src/tests.rs | 13 +--------- substrate/frame/vesting/src/mock.rs | 17 ++---------- substrate/frame/whitelist/src/mock.rs | 15 ++--------- .../parachain/runtime/src/configs/mod.rs | 8 +++--- templates/solochain/runtime/src/lib.rs | 13 ++++++---- 85 files changed, 177 insertions(+), 885 deletions(-) diff --git a/bridges/modules/messages/src/mock.rs b/bridges/modules/messages/src/mock.rs index ec63f15b94b5..3a1e0063d533 100644 --- a/bridges/modules/messages/src/mock.rs +++ b/bridges/modules/messages/src/mock.rs @@ -86,7 +86,6 @@ impl frame_system::Config for TestRuntime { #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for TestRuntime { - type ReserveIdentifier = [u8; 8]; type AccountStore = System; } diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index a842f9aa60cb..a031676c6076 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -53,20 +53,11 @@ parameter_types! { pub const ExistentialDeposit: u128 = 1; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index d7fc4152b371..98bd3da9ab27 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -112,20 +112,11 @@ impl frame_system::Config for Test { type Block = Block; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU128<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_xcm_origin::Config for Test { diff --git a/cumulus/pallets/collator-selection/src/mock.rs b/cumulus/pallets/collator-selection/src/mock.rs index 6521c954eac2..459b1cb5fdf2 100644 --- a/cumulus/pallets/collator-selection/src/mock.rs +++ b/cumulus/pallets/collator-selection/src/mock.rs @@ -53,23 +53,12 @@ impl system::Config for Test { parameter_types! { pub const ExistentialDeposit: u64 = 5; - pub const MaxReserves: u32 = 50; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } pub struct Author4; diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index e166a78ee822..7fb96de7a4ea 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -85,25 +85,14 @@ impl frame_system::Config for Test { parameter_types! { pub const ExistentialDeposit: u64 = 5; - pub const MaxReserves: u32 = 50; } pub type Balance = u64; +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } impl cumulus_pallet_parachain_system::Config for Test { diff --git a/cumulus/parachains/common/src/impls.rs b/cumulus/parachains/common/src/impls.rs index ed9c5c483fa7..16cda1a4ed83 100644 --- a/cumulus/parachains/common/src/impls.rs +++ b/cumulus/parachains/common/src/impls.rs @@ -202,7 +202,7 @@ mod tests { use frame_system::{limits, EnsureRoot}; use pallet_collator_selection::IdentityCollator; use polkadot_primitives::AccountId; - use sp_core::{ConstU64, H256}; + use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, BuildStorage, Perbill, @@ -224,7 +224,6 @@ mod tests { parameter_types! { pub BlockLength: limits::BlockLength = limits::BlockLength::max(2 * 1024); pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const MaxReserves: u32 = 50; } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] @@ -253,20 +252,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type MaxLocks = (); - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } pub struct OneAuthor; diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index 368708f25640..d0a531b8b6ca 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -698,24 +698,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } impl parachains_configuration::Config for Test { diff --git a/polkadot/runtime/common/src/auctions.rs b/polkadot/runtime/common/src/auctions.rs index 199b18fba51d..19d82ae85d00 100644 --- a/polkadot/runtime/common/src/auctions.rs +++ b/polkadot/runtime/common/src/auctions.rs @@ -674,7 +674,7 @@ mod tests { use frame_support::{ assert_noop, assert_ok, assert_storage_noop, derive_impl, ord_parameter_types, parameter_types, - traits::{ConstU32, EitherOfDiverse, OnFinalize, OnInitialize}, + traits::{EitherOfDiverse, OnFinalize, OnInitialize}, }; use frame_system::{EnsureRoot, EnsureSignedBy}; use pallet_balances; @@ -725,25 +725,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - pub const MaxReserves: u32 = 50; - } - + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } #[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Debug)] @@ -1426,7 +1410,8 @@ mod tests { #[test] fn initialize_winners_in_ending_period_works() { new_test_ext().execute_with(|| { - assert_eq!(::ExistentialDeposit::get(), 1); + let ed: u64 = ::ExistentialDeposit::get(); + assert_eq!(ed, 1); run_to_block(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 1)); let para_1 = ParaId::from(1_u32); @@ -1539,7 +1524,8 @@ mod tests { #[test] fn less_winning_samples_work() { new_test_ext().execute_with(|| { - assert_eq!(::ExistentialDeposit::get(), 1); + let ed: u64 = ::ExistentialDeposit::get(); + assert_eq!(ed, 1); EndingPeriod::set(30); SampleLength::set(10); diff --git a/polkadot/runtime/common/src/claims.rs b/polkadot/runtime/common/src/claims.rs index 54208e7fd135..c12af215a04d 100644 --- a/polkadot/runtime/common/src/claims.rs +++ b/polkadot/runtime/common/src/claims.rs @@ -708,7 +708,7 @@ mod tests { assert_err, assert_noop, assert_ok, derive_impl, dispatch::{GetDispatchInfo, Pays}, ord_parameter_types, parameter_types, - traits::{ConstU32, ExistenceRequirement, WithdrawReasons}, + traits::{ExistenceRequirement, WithdrawReasons}, }; use pallet_balances; use sp_runtime::{ @@ -738,24 +738,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } parameter_types! { diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs index 1dbba363de56..61d406aa6812 100644 --- a/polkadot/runtime/common/src/crowdloan/mod.rs +++ b/polkadot/runtime/common/src/crowdloan/mod.rs @@ -860,7 +860,7 @@ mod tests { use frame_support::{ assert_noop, assert_ok, derive_impl, parameter_types, - traits::{ConstU32, OnFinalize, OnInitialize}, + traits::{OnFinalize, OnInitialize}, }; use polkadot_primitives::Id as ParaId; use sp_core::H256; @@ -918,24 +918,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } #[derive(Copy, Clone, Eq, PartialEq, Debug)] @@ -980,7 +965,7 @@ mod tests { let fund = Funds::::get(para).unwrap(); let account_id = Crowdloan::fund_account_id(fund.fund_index); if winner { - let ed = ::ExistentialDeposit::get(); + let ed: u64 = ::ExistentialDeposit::get(); let free_balance = Balances::free_balance(&account_id); Balances::reserve(&account_id, free_balance - ed) .expect("should be able to reserve free balance minus ED"); @@ -1815,7 +1800,8 @@ mod tests { #[test] fn withdraw_from_finished_works() { new_test_ext().execute_with(|| { - assert_eq!(::ExistentialDeposit::get(), 1); + let ed: u64 = ::ExistentialDeposit::get(); + assert_eq!(ed, 1); let para = new_para(); let index = NextFundIndex::::get(); let account_id = Crowdloan::fund_account_id(index); diff --git a/polkadot/runtime/common/src/impls.rs b/polkadot/runtime/common/src/impls.rs index ac2288c906a5..c913b90b1538 100644 --- a/polkadot/runtime/common/src/impls.rs +++ b/polkadot/runtime/common/src/impls.rs @@ -249,7 +249,7 @@ mod tests { parameter_types, traits::{ tokens::{PayFromAccount, UnityAssetBalanceConversion}, - ConstU32, FindAuthor, + FindAuthor, }, weights::Weight, PalletId, @@ -315,20 +315,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } parameter_types! { diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index e77035b3f6b4..052fb0389db4 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -173,23 +173,12 @@ impl pallet_timestamp::Config for Test { parameter_types! { pub static ExistentialDeposit: Balance = 1; - pub const MaxReserves: u32 = 50; } - +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } impl configuration::Config for Test { diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index 9bbb152f855f..6b9191f7c6f2 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -721,7 +721,7 @@ mod tests { assert_noop, assert_ok, derive_impl, error::BadOrigin, parameter_types, - traits::{ConstU32, OnFinalize, OnInitialize}, + traits::{OnFinalize, OnInitialize}, }; use frame_system::limits; use pallet_balances::Error as BalancesError; @@ -799,20 +799,11 @@ mod tests { pub const ExistentialDeposit: Balance = 1; } + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u128; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Balance = Balance; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } impl shared::Config for Test { diff --git a/polkadot/runtime/common/src/purchase.rs b/polkadot/runtime/common/src/purchase.rs index 5ae6b422618e..eb480e4efe1f 100644 --- a/polkadot/runtime/common/src/purchase.rs +++ b/polkadot/runtime/common/src/purchase.rs @@ -534,24 +534,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } parameter_types! { diff --git a/polkadot/runtime/common/src/slots/mod.rs b/polkadot/runtime/common/src/slots/mod.rs index 900e04eaff18..747b7b5ca634 100644 --- a/polkadot/runtime/common/src/slots/mod.rs +++ b/polkadot/runtime/common/src/slots/mod.rs @@ -551,24 +551,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } parameter_types! { diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 0a0be8432b25..18722ff463cf 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -139,20 +139,11 @@ parameter_types! { pub static ExistentialDeposit: u64 = 1; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } parameter_types! { diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 77262a98a94c..511b502fea43 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -29,7 +29,7 @@ use frame_support::{ traits::{ fungible::HoldConsideration, tokens::UnityOrOuterConversion, ConstU32, Contains, EitherOf, EitherOfDiverse, EverythingBut, FromContains, InstanceFilter, KeyOwnerProofSystem, - LinearStoragePrice, ProcessMessage, ProcessMessageError, WithdrawReasons, + LinearStoragePrice, ProcessMessage, ProcessMessageError, VariantCountOf, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter, WeightToFee as _}, PalletId, @@ -310,7 +310,7 @@ impl pallet_balances::Config for Runtime { type RuntimeHoldReason = RuntimeHoldReason; type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; + type MaxFreezes = VariantCountOf; } parameter_types! { diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs index e3fdda2e7333..e7d00ac71038 100644 --- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs @@ -36,7 +36,7 @@ construct_runtime! { } } -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type Block = Block; type AccountId = AccountId; @@ -49,8 +49,7 @@ impl mock_message_queue::Config for Runtime { type XcmExecutor = XcmExecutor; } -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type Balance = Balance; type AccountStore = System; } diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs index 25c35dd4aaa8..686f86b37b73 100644 --- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs @@ -36,7 +36,7 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type AccountId = AccountId; type Lookup = IdentityLookup; @@ -44,7 +44,7 @@ impl frame_system::Config for Runtime { type AccountData = pallet_balances::AccountData; } -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { type AccountStore = System; } diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index ead98e1d0460..2be6f301f856 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -266,24 +266,13 @@ impl frame_system::Config for Test { parameter_types! { pub ExistentialDeposit: Balance = 1; - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = MaxLocks; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } #[cfg(feature = "runtime-benchmarks")] diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs index 62b448a9f430..582d596b78f1 100644 --- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs +++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs @@ -17,7 +17,7 @@ use codec::Encode; use frame_support::{ construct_runtime, derive_impl, parameter_types, - traits::{ConstU32, Everything, Nothing}, + traits::{Everything, Nothing}, weights::Weight, }; use frame_system::EnsureRoot; @@ -102,24 +102,14 @@ impl frame_system::Config for Runtime { parameter_types! { pub ExistentialDeposit: Balance = 1 * CENTS; - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = MaxLocks; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } impl shared::Config for Runtime { diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs index 502bcca2d442..11435868d468 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs @@ -24,7 +24,6 @@ use frame_support::{ }; use frame_system::EnsureRoot; -use sp_core::ConstU32; use sp_runtime::{ generic, traits::{AccountIdLookup, BlakeTwo256, Hash, IdentifyAccount, Verify}, @@ -73,24 +72,13 @@ impl frame_system::Config for Runtime { parameter_types! { pub ExistentialDeposit: Balance = 1; - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = MaxLocks; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } parameter_types! { diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs index 4740aee83d87..459d2640b6d9 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -72,24 +72,13 @@ impl frame_system::Config for Runtime { parameter_types! { pub ExistentialDeposit: Balance = 1; - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = MaxLocks; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } impl shared::Config for Runtime { diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 8fb59a9d8474..2bddb3a1adef 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -49,7 +49,7 @@ use frame_support::{ AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, Contains, Currency, EitherOfDiverse, EnsureOriginWithArg, EqualPrivilegeOnly, Imbalance, InsideBoth, InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, LockIdentifier, Nothing, - OnUnbalanced, WithdrawReasons, + OnUnbalanced, VariantCountOf, WithdrawReasons, }, weights::{ constants::{ @@ -542,7 +542,7 @@ impl pallet_balances::Config for Runtime { type AccountStore = frame_system::Pallet; type WeightInfo = pallet_balances::weights::SubstrateWeight; type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; + type MaxFreezes = VariantCountOf; } parameter_types! { diff --git a/substrate/frame/alliance/src/mock.rs b/substrate/frame/alliance/src/mock.rs index a9cfd6d0fde0..1a0a899bcccb 100644 --- a/substrate/frame/alliance/src/mock.rs +++ b/substrate/frame/alliance/src/mock.rs @@ -52,24 +52,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } -parameter_types! { - pub const ExistentialDeposit: u64 = 1; - pub const MaxLocks: u32 = 10; -} +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = MaxLocks; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } const MOTION_DURATION_IN_BLOCKS: BlockNumber = 3; diff --git a/substrate/frame/asset-conversion/ops/src/mock.rs b/substrate/frame/asset-conversion/ops/src/mock.rs index 9454b3a9ad44..91c18b2e7949 100644 --- a/substrate/frame/asset-conversion/ops/src/mock.rs +++ b/substrate/frame/asset-conversion/ops/src/mock.rs @@ -52,7 +52,7 @@ construct_runtime!( } ); -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type Block = Block; type AccountData = pallet_balances::AccountData; @@ -60,7 +60,6 @@ impl frame_system::Config for Test { #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type ReserveIdentifier = [u8; 8]; type AccountStore = System; } diff --git a/substrate/frame/asset-conversion/src/mock.rs b/substrate/frame/asset-conversion/src/mock.rs index 477866e0051b..d8832d70488a 100644 --- a/substrate/frame/asset-conversion/src/mock.rs +++ b/substrate/frame/asset-conversion/src/mock.rs @@ -61,20 +61,11 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { type Balance = u128; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU128<100>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_assets::Config for Test { diff --git a/substrate/frame/asset-rate/src/mock.rs b/substrate/frame/asset-rate/src/mock.rs index d01996dab193..c829d78afa88 100644 --- a/substrate/frame/asset-rate/src/mock.rs +++ b/substrate/frame/asset-rate/src/mock.rs @@ -18,7 +18,7 @@ //! The crate's mock. use crate as pallet_asset_rate; -use frame_support::{derive_impl, traits::ConstU64}; +use frame_support::derive_impl; use sp_runtime::BuildStorage; type Block = frame_system::mocking::MockBlock; @@ -38,20 +38,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = (); } impl pallet_asset_rate::Config for Test { diff --git a/substrate/frame/assets/src/mock.rs b/substrate/frame/assets/src/mock.rs index f6173a451fff..694ef234dffb 100644 --- a/substrate/frame/assets/src/mock.rs +++ b/substrate/frame/assets/src/mock.rs @@ -23,7 +23,7 @@ use crate as pallet_assets; use codec::Encode; use frame_support::{ construct_runtime, derive_impl, parameter_types, - traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, + traits::{AsEnsureOriginWithArg, ConstU32}, }; use sp_io::storage; use sp_runtime::BuildStorage; @@ -49,20 +49,9 @@ impl frame_system::Config for Test { type MaxConsumers = ConstU32<3>; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); - type FreezeIdentifier = (); - type MaxFreezes = (); } pub struct AssetsCallbackHandle; diff --git a/substrate/frame/atomic-swap/src/tests.rs b/substrate/frame/atomic-swap/src/tests.rs index 9f51f04208aa..47ebe6a8f0ac 100644 --- a/substrate/frame/atomic-swap/src/tests.rs +++ b/substrate/frame/atomic-swap/src/tests.rs @@ -20,10 +20,7 @@ use super::*; use crate as pallet_atomic_swap; -use frame_support::{ - derive_impl, - traits::{ConstU32, ConstU64}, -}; +use frame_support::{derive_impl, traits::ConstU32}; use sp_runtime::BuildStorage; type Block = frame_system::mocking::MockBlock; @@ -43,20 +40,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl Config for Test { diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index 395a86e65288..16db40e3cb35 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -112,20 +112,11 @@ impl pallet_timestamp::Config for Test { type WeightInfo = (); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = u128; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU128<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } pallet_staking_reward_curve::build! { diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 56eb81b49e2d..4935323b3aa1 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -222,13 +222,13 @@ pub mod pallet { type ExistentialDeposit = ConstU64<1>; type ReserveIdentifier = (); - type FreezeIdentifier = (); + type FreezeIdentifier = Self::RuntimeFreezeReason; type DustRemoval = (); type MaxLocks = ConstU32<100>; type MaxReserves = ConstU32<100>; - type MaxFreezes = ConstU32<100>; + type MaxFreezes = VariantCountOf; type WeightInfo = (); } diff --git a/substrate/frame/balances/src/tests/dispatchable_tests.rs b/substrate/frame/balances/src/tests/dispatchable_tests.rs index 4bc96f6b43d9..ebc9f1b1a369 100644 --- a/substrate/frame/balances/src/tests/dispatchable_tests.rs +++ b/substrate/frame/balances/src/tests/dispatchable_tests.rs @@ -281,7 +281,7 @@ fn force_adjust_total_issuance_saturates() { ExtBuilder::default().build_and_execute_with(|| { assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 1337, 64)); let ti = Balances::total_issuance(); - let max = Balance::max_value(); + let max = ::Balance::max_value(); assert_eq!(ti, 64); // Increment saturates: diff --git a/substrate/frame/balances/src/tests/mod.rs b/substrate/frame/balances/src/tests/mod.rs index 0abf2251290f..5ed37170407f 100644 --- a/substrate/frame/balances/src/tests/mod.rs +++ b/substrate/frame/balances/src/tests/mod.rs @@ -27,7 +27,7 @@ use frame_support::{ parameter_types, traits::{ fungible, ConstU32, ConstU8, Imbalance as ImbalanceT, OnUnbalanced, StorageMapShim, - StoredMap, VariantCount, WhitelistedStorageKeys, + StoredMap, VariantCount, VariantCountOf, WhitelistedStorageKeys, }, weights::{IdentityFee, Weight}, }; @@ -107,22 +107,17 @@ impl pallet_transaction_payment::Config for Test { type FeeMultiplierUpdate = (); } -pub(crate) type Balance = u64; - +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl Config for Test { - type Balance = Balance; type DustRemoval = DustTrap; - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ExistentialDeposit; type AccountStore = TestAccountStore; - type MaxLocks = ConstU32<50>; type MaxReserves = ConstU32<2>; type ReserveIdentifier = TestId; - type WeightInfo = (); type RuntimeHoldReason = TestId; - type RuntimeFreezeReason = RuntimeFreezeReason; + type RuntimeFreezeReason = TestId; type FreezeIdentifier = TestId; - type MaxFreezes = ConstU32<2>; + type MaxFreezes = VariantCountOf; } #[derive(Clone)] diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index 0b87de6bf5d7..ceca0fd07b73 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -120,20 +120,11 @@ impl pallet_authorship::Config for Test { type EventHandler = (); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = u128; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU128<1>; type AccountStore = System; - type WeightInfo = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); - type FreezeIdentifier = (); - type MaxFreezes = (); } impl pallet_timestamp::Config for Test { diff --git a/substrate/frame/bounties/src/tests.rs b/substrate/frame/bounties/src/tests.rs index a89f4ff9fbf3..212f0bd29590 100644 --- a/substrate/frame/bounties/src/tests.rs +++ b/substrate/frame/bounties/src/tests.rs @@ -66,20 +66,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); diff --git a/substrate/frame/child-bounties/src/tests.rs b/substrate/frame/child-bounties/src/tests.rs index d9405d3d2897..38e86c528e5c 100644 --- a/substrate/frame/child-bounties/src/tests.rs +++ b/substrate/frame/child-bounties/src/tests.rs @@ -69,20 +69,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index e9cf28a66912..47772e0a5a0b 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -529,7 +529,7 @@ pub mod pallet { } } - #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)] + #[derive_impl(frame_system::config_preludes::TestDefaultConfig, no_aggregated_types)] impl frame_system::DefaultConfig for TestDefaultConfig {} #[frame_support::register_default_impl(TestDefaultConfig)] diff --git a/substrate/frame/conviction-voting/src/tests.rs b/substrate/frame/conviction-voting/src/tests.rs index 0e985e25290f..78569fb3c9f2 100644 --- a/substrate/frame/conviction-voting/src/tests.rs +++ b/substrate/frame/conviction-voting/src/tests.rs @@ -54,20 +54,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type MaxLocks = ConstU32<10>; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } #[derive(Clone, PartialEq, Eq, Debug)] diff --git a/substrate/frame/delegated-staking/src/mock.rs b/substrate/frame/delegated-staking/src/mock.rs index c1875055f2fe..0991833f8650 100644 --- a/substrate/frame/delegated-staking/src/mock.rs +++ b/substrate/frame/delegated-staking/src/mock.rs @@ -20,7 +20,7 @@ use frame_support::{ assert_ok, derive_impl, pallet_prelude::*, parameter_types, - traits::{ConstU64, Currency}, + traits::{ConstU64, Currency, VariantCountOf}, PalletId, }; @@ -44,7 +44,7 @@ pub const GENESIS_VALIDATOR: AccountId = 1; pub const GENESIS_NOMINATOR_ONE: AccountId = 101; pub const GENESIS_NOMINATOR_TWO: AccountId = 102; -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type Block = Block; type AccountData = pallet_balances::AccountData; @@ -64,19 +64,14 @@ pub type Balance = u128; parameter_types! { pub static ExistentialDeposit: Balance = 1; } + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = ConstU32<128>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = RuntimeHoldReason; + type MaxFreezes = VariantCountOf; type RuntimeFreezeReason = RuntimeFreezeReason; } diff --git a/substrate/frame/democracy/src/tests.rs b/substrate/frame/democracy/src/tests.rs index 9303c0da504f..7d7066c8af69 100644 --- a/substrate/frame/democracy/src/tests.rs +++ b/substrate/frame/democracy/src/tests.rs @@ -108,20 +108,9 @@ impl pallet_scheduler::Config for Test { type Preimages = (); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type MaxLocks = ConstU32<10>; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { pub static PreimageByteDeposit: u64 = 0; diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs index 92b87d92e99b..4532185b959c 100644 --- a/substrate/frame/election-provider-multi-phase/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/src/mock.rs @@ -237,7 +237,6 @@ impl frame_system::Config for Runtime { const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); parameter_types! { - pub const ExistentialDeposit: u64 = 1; pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights ::with_sensible_defaults( Weight::from_parts(2u64 * constants::WEIGHT_REF_TIME_PER_SECOND, u64::MAX), @@ -245,20 +244,9 @@ parameter_types! { ); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } #[derive(Default, Eq, PartialEq, Debug, Clone, Copy)] diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index e5987ec33f06..9c4991513633 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -19,7 +19,7 @@ use frame_support::{ assert_ok, parameter_types, traits, - traits::{Hooks, UnfilteredDispatchable}, + traits::{Hooks, UnfilteredDispatchable, VariantCountOf}, weights::constants, }; use frame_system::EnsureRoot; @@ -102,20 +102,14 @@ parameter_types! { ); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = traits::ConstU32<1024>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxFreezes = traits::ConstU32<1>; + type MaxFreezes = VariantCountOf; type RuntimeHoldReason = RuntimeHoldReason; type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = RuntimeFreezeReason; - type WeightInfo = (); } impl pallet_timestamp::Config for Runtime { diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs index b4be07030efb..a5b6fca0a8a1 100644 --- a/substrate/frame/elections-phragmen/src/lib.rs +++ b/substrate/frame/elections-phragmen/src/lib.rs @@ -1310,7 +1310,7 @@ mod tests { assert_noop, assert_ok, derive_impl, dispatch::DispatchResultWithPostInfo, parameter_types, - traits::{ConstU32, ConstU64, OnInitialize}, + traits::{ConstU32, OnInitialize}, }; use frame_system::ensure_signed; use sp_runtime::{testing::Header, BuildStorage}; @@ -1322,20 +1322,9 @@ mod tests { type AccountData = pallet_balances::AccountData; } + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = frame_system::Pallet; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } frame_support::parameter_types! { diff --git a/substrate/frame/examples/basic/src/tests.rs b/substrate/frame/examples/basic/src/tests.rs index d351b27eecde..505cd6f906de 100644 --- a/substrate/frame/examples/basic/src/tests.rs +++ b/substrate/frame/examples/basic/src/tests.rs @@ -71,20 +71,9 @@ impl frame_system::Config for Test { type MaxConsumers = frame_support::traits::ConstU32<16>; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl Config for Test { diff --git a/substrate/frame/examples/dev-mode/src/tests.rs b/substrate/frame/examples/dev-mode/src/tests.rs index e8a18ec13fe9..637864b87bc4 100644 --- a/substrate/frame/examples/dev-mode/src/tests.rs +++ b/substrate/frame/examples/dev-mode/src/tests.rs @@ -18,7 +18,7 @@ //! Tests for pallet-dev-mode. use crate::*; -use frame_support::{assert_ok, derive_impl, traits::ConstU64}; +use frame_support::{assert_ok, derive_impl}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -65,20 +65,9 @@ impl frame_system::Config for Test { type MaxConsumers = frame_support::traits::ConstU32<16>; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; } impl Config for Test { diff --git a/substrate/frame/examples/kitchensink/src/tests.rs b/substrate/frame/examples/kitchensink/src/tests.rs index 1205fefc4229..7cf95497bf06 100644 --- a/substrate/frame/examples/kitchensink/src/tests.rs +++ b/substrate/frame/examples/kitchensink/src/tests.rs @@ -18,7 +18,7 @@ //! Tests for pallet-example-kitchensink. use crate::*; -use frame_support::{assert_ok, derive_impl, parameter_types, traits::ConstU64}; +use frame_support::{assert_ok, derive_impl, parameter_types, traits::VariantCountOf}; use sp_runtime::BuildStorage; // Reexport crate as its pallet name for construct_runtime. use crate as pallet_example_kitchensink; @@ -43,20 +43,14 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); + type FreezeIdentifier = RuntimeFreezeReason; + type MaxFreezes = VariantCountOf; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; } parameter_types! { diff --git a/substrate/frame/examples/single-block-migrations/src/mock.rs b/substrate/frame/examples/single-block-migrations/src/mock.rs index 68594cc4ad72..f4cf81ea6474 100644 --- a/substrate/frame/examples/single-block-migrations/src/mock.rs +++ b/substrate/frame/examples/single-block-migrations/src/mock.rs @@ -18,7 +18,7 @@ #![cfg(any(all(feature = "try-runtime", test), doc))] use crate::*; -use frame_support::{derive_impl, traits::ConstU64, weights::constants::ParityDbWeight}; +use frame_support::{derive_impl, weights::constants::ParityDbWeight}; // Re-export crate as its pallet name for construct_runtime. use crate as pallet_example_storage_migration; @@ -41,20 +41,9 @@ impl frame_system::Config for MockRuntime { type DbWeight = ParityDbWeight; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for MockRuntime { - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); } impl Config for MockRuntime {} diff --git a/substrate/frame/executive/src/tests.rs b/substrate/frame/executive/src/tests.rs index e3721f7b6dcb..71cb54d1fab4 100644 --- a/substrate/frame/executive/src/tests.rs +++ b/substrate/frame/executive/src/tests.rs @@ -36,7 +36,7 @@ use frame_support::{ migrations::MultiStepMigrator, pallet_prelude::*, parameter_types, - traits::{fungible, ConstU8, Currency, IsInherent}, + traits::{fungible, ConstU8, Currency, IsInherent, VariantCount, VariantCountOf}, weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight, WeightMeter, WeightToFee}, }; use frame_system::{pallet_prelude::*, ChainContext, LastRuntimeUpgrade, LastRuntimeUpgradeInfo}; @@ -325,12 +325,24 @@ impl frame_system::Config for Runtime { type MultiBlockMigrator = MockedModeGetter; } +#[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, MaxEncodedLen, TypeInfo, RuntimeDebug)] +pub enum FreezeReasonId { + Foo, +} + +impl VariantCount for FreezeReasonId { + const VARIANT_COUNT: u32 = 1; +} + type Balance = u64; #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { type Balance = Balance; type AccountStore = System; + type RuntimeFreezeReason = FreezeReasonId; + type FreezeIdentifier = FreezeReasonId; + type MaxFreezes = VariantCountOf; } parameter_types! { @@ -743,8 +755,12 @@ fn validate_unsigned() { fn can_not_pay_for_tx_fee_on_full_lock() { let mut t = new_test_ext(1); t.execute_with(|| { - as fungible::MutateFreeze>::set_freeze(&(), &1, 110) - .unwrap(); + as fungible::MutateFreeze>::set_freeze( + &FreezeReasonId::Foo, + &1, + 110, + ) + .unwrap(); let xt = TestXt::new( RuntimeCall::System(frame_system::Call::remark { remark: vec![1u8] }), sign_extra(1, 0, 0), diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs index 9238a085141d..63bf533d8ee4 100644 --- a/substrate/frame/fast-unstake/src/mock.rs +++ b/substrate/frame/fast-unstake/src/mock.rs @@ -60,20 +60,11 @@ parameter_types! { pub static ExistentialDeposit: Balance = 1; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = ConstU32<128>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } pallet_staking_reward_curve::build! { diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 38b5536bc598..5642ffe89980 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -108,20 +108,11 @@ impl pallet_authorship::Config for Test { type EventHandler = (); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = u128; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU128<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_timestamp::Config for Test { diff --git a/substrate/frame/identity/src/tests.rs b/substrate/frame/identity/src/tests.rs index b1a953d487ce..09edd5de79bb 100644 --- a/substrate/frame/identity/src/tests.rs +++ b/substrate/frame/identity/src/tests.rs @@ -61,20 +61,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { diff --git a/substrate/frame/indices/src/mock.rs b/substrate/frame/indices/src/mock.rs index 7a8ff98f6d4a..72bbc6dab4a4 100644 --- a/substrate/frame/indices/src/mock.rs +++ b/substrate/frame/indices/src/mock.rs @@ -42,20 +42,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl Config for Test { diff --git a/substrate/frame/lottery/src/mock.rs b/substrate/frame/lottery/src/mock.rs index 596e1a9d837d..d2c442e2ac6e 100644 --- a/substrate/frame/lottery/src/mock.rs +++ b/substrate/frame/lottery/src/mock.rs @@ -22,7 +22,7 @@ use crate as pallet_lottery; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU32, ConstU64, OnFinalize, OnInitialize}, + traits::{ConstU32, OnFinalize, OnInitialize}, }; use frame_support_test::TestRandomness; use frame_system::EnsureRoot; @@ -49,20 +49,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { diff --git a/substrate/frame/nft-fractionalization/src/mock.rs b/substrate/frame/nft-fractionalization/src/mock.rs index 82a608816260..50b41b5fc64e 100644 --- a/substrate/frame/nft-fractionalization/src/mock.rs +++ b/substrate/frame/nft-fractionalization/src/mock.rs @@ -57,20 +57,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = (); } impl pallet_assets::Config for Test { diff --git a/substrate/frame/nfts/src/mock.rs b/substrate/frame/nfts/src/mock.rs index 51cfd5f244bc..5b589f591ca3 100644 --- a/substrate/frame/nfts/src/mock.rs +++ b/substrate/frame/nfts/src/mock.rs @@ -53,20 +53,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { diff --git a/substrate/frame/nomination-pools/benchmarking/src/mock.rs b/substrate/frame/nomination-pools/benchmarking/src/mock.rs index 7cbb61e00a31..b9cff7960716 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/mock.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/mock.rs @@ -17,7 +17,13 @@ use crate::VoterBagsListInstance; use frame_election_provider_support::VoteWeight; -use frame_support::{derive_impl, pallet_prelude::*, parameter_types, traits::ConstU64, PalletId}; +use frame_support::{ + derive_impl, + pallet_prelude::*, + parameter_types, + traits::{ConstU64, VariantCountOf}, + PalletId, +}; use sp_runtime::{ traits::{Convert, IdentityLookup}, BuildStorage, FixedU128, Perbill, @@ -45,20 +51,16 @@ impl pallet_timestamp::Config for Runtime { parameter_types! { pub const ExistentialDeposit: Balance = 10; } + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; + type MaxFreezes = VariantCountOf; type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = (); + type RuntimeFreezeReason = RuntimeFreezeReason; } pallet_staking_reward_curve::build! { diff --git a/substrate/frame/nomination-pools/src/mock.rs b/substrate/frame/nomination-pools/src/mock.rs index 93fe6aa56054..6c0082073f68 100644 --- a/substrate/frame/nomination-pools/src/mock.rs +++ b/substrate/frame/nomination-pools/src/mock.rs @@ -18,7 +18,8 @@ use super::*; use crate::{self as pools}; use frame_support::{ - assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::fungible::Mutate, + assert_ok, derive_impl, ord_parameter_types, parameter_types, + traits::{fungible::Mutate, VariantCountOf}, PalletId, }; use frame_system::{EnsureSignedBy, RawOrigin}; @@ -251,20 +252,14 @@ parameter_types! { pub static ExistentialDeposit: Balance = 5; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = frame_support::traits::ConstU32<1024>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); + type MaxFreezes = VariantCountOf; + type RuntimeFreezeReason = RuntimeFreezeReason; } pub struct BalanceToU256; diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs index 820f2b7718ce..0a456503ad81 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs @@ -20,7 +20,7 @@ use frame_support::{ assert_ok, derive_impl, pallet_prelude::*, parameter_types, - traits::{ConstU64, ConstU8}, + traits::{ConstU64, ConstU8, VariantCountOf}, PalletId, }; use frame_system::EnsureRoot; @@ -63,20 +63,15 @@ parameter_types! { pub static ExistentialDeposit: Balance = 5; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; + type MaxFreezes = VariantCountOf; type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = (); + type RuntimeFreezeReason = RuntimeFreezeReason; } pallet_staking_reward_curve::build! { diff --git a/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs b/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs index eb9d463424c8..570cdea90460 100644 --- a/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs +++ b/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs @@ -20,7 +20,7 @@ use frame_support::{ assert_ok, derive_impl, pallet_prelude::*, parameter_types, - traits::{ConstU64, ConstU8}, + traits::{ConstU64, ConstU8, VariantCountOf}, PalletId, }; use sp_runtime::{ @@ -56,20 +56,14 @@ parameter_types! { pub static ExistentialDeposit: Balance = 5; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); + type MaxFreezes = VariantCountOf; + type RuntimeFreezeReason = RuntimeFreezeReason; } pallet_staking_reward_curve::build! { diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index 6cbdde578528..e45d280ba52e 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -41,20 +41,10 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = ConstU32<128>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU64<10>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_timestamp::Config for Test { diff --git a/substrate/frame/parameters/src/tests/mock.rs b/substrate/frame/parameters/src/tests/mock.rs index 6cfd7c8f30b8..53a3b3e394c4 100644 --- a/substrate/frame/parameters/src/tests/mock.rs +++ b/substrate/frame/parameters/src/tests/mock.rs @@ -37,7 +37,6 @@ impl frame_system::Config for Runtime { #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type ReserveIdentifier = [u8; 8]; type AccountStore = System; } diff --git a/substrate/frame/parameters/src/tests/test_renamed.rs b/substrate/frame/parameters/src/tests/test_renamed.rs index cfc870fbe109..7c371c5e55f8 100644 --- a/substrate/frame/parameters/src/tests/test_renamed.rs +++ b/substrate/frame/parameters/src/tests/test_renamed.rs @@ -39,7 +39,6 @@ impl frame_system::Config for Runtime { #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type ReserveIdentifier = [u8; 8]; type AccountStore = System; } diff --git a/substrate/frame/preimage/src/mock.rs b/substrate/frame/preimage/src/mock.rs index 903c34596aeb..9c72d09cae14 100644 --- a/substrate/frame/preimage/src/mock.rs +++ b/substrate/frame/preimage/src/mock.rs @@ -22,7 +22,7 @@ use super::*; use crate as pallet_preimage; use frame_support::{ derive_impl, ord_parameter_types, parameter_types, - traits::{fungible::HoldConsideration, ConstU32, ConstU64}, + traits::{fungible::HoldConsideration, ConstU64}, }; use frame_system::EnsureSignedBy; use sp_core::H256; @@ -48,20 +48,10 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU64<5>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = (); } ord_parameter_types! { diff --git a/substrate/frame/recovery/src/mock.rs b/substrate/frame/recovery/src/mock.rs index bec7e02c128b..8e30cbe997e1 100644 --- a/substrate/frame/recovery/src/mock.rs +++ b/substrate/frame/recovery/src/mock.rs @@ -47,20 +47,11 @@ parameter_types! { pub const ExistentialDeposit: u64 = 1; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = u128; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { diff --git a/substrate/frame/referenda/src/mock.rs b/substrate/frame/referenda/src/mock.rs index 135476d7cb13..d47da4558119 100644 --- a/substrate/frame/referenda/src/mock.rs +++ b/substrate/frame/referenda/src/mock.rs @@ -83,20 +83,9 @@ impl pallet_scheduler::Config for Test { type OriginPrivilegeCmp = EqualPrivilegeOnly; type Preimages = Preimage; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type MaxLocks = ConstU32<10>; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { pub static AlarmInterval: u64 = 1; diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index 7e7332c3f7e3..ea7044fb6a34 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -84,20 +84,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } pallet_staking_reward_curve::build! { diff --git a/substrate/frame/safe-mode/src/mock.rs b/substrate/frame/safe-mode/src/mock.rs index 0beb911267dc..ec1ad8249514 100644 --- a/substrate/frame/safe-mode/src/mock.rs +++ b/substrate/frame/safe-mode/src/mock.rs @@ -68,20 +68,10 @@ pub enum HoldReason { SafeMode, } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU64<2>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<10>; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } impl pallet_utility::Config for Test { diff --git a/substrate/frame/scored-pool/src/mock.rs b/substrate/frame/scored-pool/src/mock.rs index 9d2f5eb1099f..7708c06e56bd 100644 --- a/substrate/frame/scored-pool/src/mock.rs +++ b/substrate/frame/scored-pool/src/mock.rs @@ -52,20 +52,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 5cba79ef5b9a..b79bae73270e 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -54,20 +54,10 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU64<10>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_timestamp::Config for Test { diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index 8c60dec65a81..6d65500ef907 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -124,20 +124,12 @@ impl frame_system::Config for Test { type Block = Block; type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { type MaxLocks = frame_support::traits::ConstU32<1024>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } sp_runtime::impl_opaque_keys! { diff --git a/substrate/frame/statement/src/mock.rs b/substrate/frame/statement/src/mock.rs index 35d51e7a27bf..34afd332c083 100644 --- a/substrate/frame/statement/src/mock.rs +++ b/substrate/frame/statement/src/mock.rs @@ -51,20 +51,10 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU64<5>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; } ord_parameter_types! { diff --git a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs index 5b74cc172c6e..6330a138e2f2 100644 --- a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs +++ b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs @@ -340,7 +340,7 @@ mod runtime { pub type Module1_9 = module1; } -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type AccountId = AccountId; type Lookup = sp_runtime::traits::IdentityLookup; diff --git a/substrate/frame/tips/src/tests.rs b/substrate/frame/tips/src/tests.rs index 78df3736815a..32a31b7fa13a 100644 --- a/substrate/frame/tips/src/tests.rs +++ b/substrate/frame/tips/src/tests.rs @@ -65,20 +65,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { static TenToFourteenTestValue: Vec = vec![10,11,12,13,14]; diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs index cc43cffd7deb..3f8c7bc0ea34 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs @@ -98,20 +98,10 @@ parameter_types! { pub const ExistentialDeposit: u64 = 10; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU64<10>; type AccountStore = System; - type MaxLocks = (); - type WeightInfo = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl WeightToFeeT for WeightToFee { diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs index fce712c3eba3..e84df1e4eb91 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs @@ -81,20 +81,10 @@ parameter_types! { pub const ExistentialDeposit: u64 = 10; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU64<10>; type AccountStore = System; - type MaxLocks = (); - type WeightInfo = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl WeightToFeeT for WeightToFee { diff --git a/substrate/frame/transaction-payment/src/mock.rs b/substrate/frame/transaction-payment/src/mock.rs index 7b731eeb8250..fa61572e9831 100644 --- a/substrate/frame/transaction-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/src/mock.rs @@ -21,7 +21,7 @@ use frame_support::{ derive_impl, dispatch::DispatchClass, parameter_types, - traits::{fungible, ConstU64, Imbalance, OnUnbalanced}, + traits::{fungible, Imbalance, OnUnbalanced}, weights::{Weight, WeightToFee as WeightToFeeT}, }; use frame_system as system; @@ -73,20 +73,9 @@ impl frame_system::Config for Runtime { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl WeightToFeeT for WeightToFee { diff --git a/substrate/frame/treasury/src/tests.rs b/substrate/frame/treasury/src/tests.rs index 67d81cb5c302..e8b9270cd965 100644 --- a/substrate/frame/treasury/src/tests.rs +++ b/substrate/frame/treasury/src/tests.rs @@ -60,20 +60,10 @@ impl frame_system::Config for Test { type Block = Block; type AccountData = pallet_balances::AccountData; } + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_utility::Config for Test { diff --git a/substrate/frame/tx-pause/src/mock.rs b/substrate/frame/tx-pause/src/mock.rs index f42d4cb58a2a..84ce45e83528 100644 --- a/substrate/frame/tx-pause/src/mock.rs +++ b/substrate/frame/tx-pause/src/mock.rs @@ -36,24 +36,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } -parameter_types! { - pub const ExistentialDeposit: u64 = 1; - pub const MaxLocks: u32 = 10; -} +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = MaxLocks; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type MaxFreezes = ConstU32<0>; } impl pallet_utility::Config for Test { diff --git a/substrate/frame/uniques/src/mock.rs b/substrate/frame/uniques/src/mock.rs index 9fd7f87e159b..c3b74eb8c255 100644 --- a/substrate/frame/uniques/src/mock.rs +++ b/substrate/frame/uniques/src/mock.rs @@ -43,20 +43,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl Config for Test { diff --git a/substrate/frame/utility/src/tests.rs b/substrate/frame/utility/src/tests.rs index 9bcbec99f3b4..eb2047aac28a 100644 --- a/substrate/frame/utility/src/tests.rs +++ b/substrate/frame/utility/src/tests.rs @@ -151,20 +151,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_root_testing::Config for Test { diff --git a/substrate/frame/vesting/src/mock.rs b/substrate/frame/vesting/src/mock.rs index 674a6f6e2a83..f0954a5b989c 100644 --- a/substrate/frame/vesting/src/mock.rs +++ b/substrate/frame/vesting/src/mock.rs @@ -15,10 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::{ - derive_impl, parameter_types, - traits::{ConstU32, WithdrawReasons}, -}; +use frame_support::{derive_impl, parameter_types, traits::WithdrawReasons}; use sp_runtime::{traits::Identity, BuildStorage}; use super::*; @@ -41,20 +38,10 @@ impl frame_system::Config for Test { type Block = Block; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { type AccountStore = System; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ExistentialDeposit; - type MaxLocks = ConstU32<10>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { pub const MinVestedTransfer: u64 = 256 * 2; diff --git a/substrate/frame/whitelist/src/mock.rs b/substrate/frame/whitelist/src/mock.rs index 6fb8711057ef..0a97d1c2df54 100644 --- a/substrate/frame/whitelist/src/mock.rs +++ b/substrate/frame/whitelist/src/mock.rs @@ -21,7 +21,7 @@ use crate as pallet_whitelist; -use frame_support::{construct_runtime, derive_impl, traits::ConstU64}; +use frame_support::{construct_runtime, derive_impl}; use frame_system::EnsureRoot; use sp_runtime::BuildStorage; @@ -43,20 +43,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_preimage::Config for Test { diff --git a/templates/parachain/runtime/src/configs/mod.rs b/templates/parachain/runtime/src/configs/mod.rs index 63e6a67a9063..8a410a27e4a7 100644 --- a/templates/parachain/runtime/src/configs/mod.rs +++ b/templates/parachain/runtime/src/configs/mod.rs @@ -32,7 +32,9 @@ use frame_support::{ derive_impl, dispatch::DispatchClass, parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, + traits::{ + ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin, VariantCountOf, + }, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -154,8 +156,8 @@ impl pallet_balances::Config for Runtime { type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; + type FreezeIdentifier = RuntimeFreezeReason; + type MaxFreezes = VariantCountOf; } parameter_types! { diff --git a/templates/solochain/runtime/src/lib.rs b/templates/solochain/runtime/src/lib.rs index 93a56fb0ad78..c147845fe2fe 100644 --- a/templates/solochain/runtime/src/lib.rs +++ b/templates/solochain/runtime/src/lib.rs @@ -18,7 +18,6 @@ use sp_std::prelude::*; use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use frame_support::genesis_builder_helper::{build_state, get_preset}; pub use frame_support::{ construct_runtime, derive_impl, parameter_types, traits::{ @@ -33,6 +32,10 @@ pub use frame_support::{ }, StorageValue, }; +use frame_support::{ + genesis_builder_helper::{build_state, get_preset}, + traits::VariantCountOf, +}; pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; pub use pallet_timestamp::Call as TimestampCall; @@ -218,10 +221,10 @@ impl pallet_balances::Config for Runtime { type ExistentialDeposit = ConstU128; type AccountStore = System; type WeightInfo = pallet_balances::weights::SubstrateWeight; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); + type FreezeIdentifier = RuntimeFreezeReason; + type MaxFreezes = VariantCountOf; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeHoldReason; } parameter_types! { From 935c7f461ae8b4e607f1db16322ea952b438650e Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Thu, 13 Jun 2024 13:53:07 +0200 Subject: [PATCH 26/52] Add `--version` to the `ChainSpecBuilder` command for `staging-chain-spec-builder` (#4752) ## TODO - [x] test/confirm that the release script is ok --------- Co-authored-by: Javier Viola <363911+pepoviola@users.noreply.github.com> Co-authored-by: Egor_P --- .../release-30_publish_release_draft.yml | 20 ++++++++----------- .../bin/utils/chain-spec-builder/src/lib.rs | 2 +- substrate/utils/frame/omni-bencher/Cargo.toml | 2 +- 3 files changed, 10 insertions(+), 14 deletions(-) diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index f39eb4c1716e..20492f2d3a91 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -31,7 +31,8 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - binary: [ frame-omni-bencher, chain-spec-builder ] + # Tuples of [package, binary-name] + binary: [ [frame-omni-bencher, frame-omni-bencher], [staging-chain-spec-builder, chain-spec-builder] ] steps: - name: Checkout sources uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 @@ -41,21 +42,16 @@ jobs: sudo apt update sudo apt install -y protobuf-compiler - - name: Build ${{ matrix.binary }} binary + - name: Build ${{ matrix.binary[1] }} binary run: | - if [[ ${{ matrix.binary }} =~ chain-spec-builder ]]; then - cargo build --locked --profile=production -p staging-${{ matrix.binary }} --bin ${{ matrix.binary }} - target/production/${{ matrix.binary }} -h - else - cargo build --locked --profile=production -p ${{ matrix.binary }} - target/production/${{ matrix.binary }} --version - fi + cargo build --locked --profile=production -p ${{ matrix.binary[0] }} --bin ${{ matrix.binary[1] }} + target/production/${{ matrix.binary[1] }} --version - - name: Upload ${{ matrix.binary }} binary + - name: Upload ${{ matrix.binary[1] }} binary uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - name: ${{ matrix.binary }} - path: target/production/${{ matrix.binary }} + name: ${{ matrix.binary[1] }} + path: target/production/${{ matrix.binary[1] }} publish-release-draft: diff --git a/substrate/bin/utils/chain-spec-builder/src/lib.rs b/substrate/bin/utils/chain-spec-builder/src/lib.rs index 0f7c003fc8c2..4c00bb3551b3 100644 --- a/substrate/bin/utils/chain-spec-builder/src/lib.rs +++ b/substrate/bin/utils/chain-spec-builder/src/lib.rs @@ -125,7 +125,7 @@ use serde_json::Value; /// A utility to easily create a chain spec definition. #[derive(Debug, Parser)] -#[command(rename_all = "kebab-case")] +#[command(rename_all = "kebab-case", version, about)] pub struct ChainSpecBuilder { #[command(subcommand)] pub command: ChainSpecBuilderCmd, diff --git a/substrate/utils/frame/omni-bencher/Cargo.toml b/substrate/utils/frame/omni-bencher/Cargo.toml index 0c2d1a1b32b1..41e3882c9d79 100644 --- a/substrate/utils/frame/omni-bencher/Cargo.toml +++ b/substrate/utils/frame/omni-bencher/Cargo.toml @@ -11,7 +11,7 @@ license.workspace = true workspace = true [dependencies] -clap = { version = "4.5.2", features = ["derive"] } +clap = { version = "4.5.3", features = ["derive"] } cumulus-primitives-proof-size-hostfunction = { path = "../../../../cumulus/primitives/proof-size-hostfunction" } frame-benchmarking-cli = { path = "../benchmarking-cli", default-features = false } sc-cli = { path = "../../../client/cli" } From 7b6b783cd1a3953ef5fa6e53f3965b1454e3efc8 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Thu, 13 Jun 2024 18:27:51 +0200 Subject: [PATCH 27/52] [Backport] Version bumps and prdoc reorg from 1.13.0 (#4784) This PR backports regular version bumps and prdocs reordering from the release branch back to master --- cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs | 2 +- cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- .../runtimes/collectives/collectives-westend/src/lib.rs | 2 +- .../parachains/runtimes/contracts/contracts-rococo/src/lib.rs | 2 +- cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs | 2 +- .../parachains/runtimes/coretime/coretime-westend/src/lib.rs | 2 +- cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs | 2 +- cumulus/parachains/runtimes/people/people-rococo/src/lib.rs | 2 +- cumulus/parachains/runtimes/people/people-westend/src/lib.rs | 2 +- cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs | 2 +- polkadot/node/primitives/src/lib.rs | 2 +- polkadot/runtime/rococo/src/lib.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 2 +- prdoc/{ => 1.13.0}/pr_1223.prdoc | 0 prdoc/{ => 1.13.0}/pr_1644.prdoc | 0 prdoc/{ => 1.13.0}/pr_3393.prdoc | 0 prdoc/{ => 1.13.0}/pr_3905.prdoc | 0 prdoc/{ => 1.13.0}/pr_3935.prdoc | 0 prdoc/{ => 1.13.0}/pr_3952.prdoc | 0 prdoc/{ => 1.13.0}/pr_4131.prdoc | 0 prdoc/{ => 1.13.0}/pr_4198.prdoc | 0 prdoc/{ => 1.13.0}/pr_4233.prdoc | 0 prdoc/{ => 1.13.0}/pr_4249.prdoc | 0 prdoc/{ => 1.13.0}/pr_4274.prdoc | 0 prdoc/{ => 1.13.0}/pr_4339.prdoc | 0 prdoc/{ => 1.13.0}/pr_4380.prdoc | 0 prdoc/{ => 1.13.0}/pr_4392.prdoc | 0 prdoc/{ => 1.13.0}/pr_4410.prdoc | 0 prdoc/{ => 1.13.0}/pr_4418.prdoc | 0 prdoc/{ => 1.13.0}/pr_4431.prdoc | 0 prdoc/{ => 1.13.0}/pr_4444.prdoc | 0 prdoc/{ => 1.13.0}/pr_4465.prdoc | 0 prdoc/{ => 1.13.0}/pr_4471.prdoc | 0 prdoc/{ => 1.13.0}/pr_4472.prdoc | 0 prdoc/{ => 1.13.0}/pr_4475.prdoc | 0 prdoc/{ => 1.13.0}/pr_4478.prdoc | 0 prdoc/{ => 1.13.0}/pr_4503.prdoc | 0 prdoc/{ => 1.13.0}/pr_4510.prdoc | 0 prdoc/{ => 1.13.0}/pr_4514.prdoc | 0 prdoc/{ => 1.13.0}/pr_4521.prdoc | 0 prdoc/{ => 1.13.0}/pr_4533.prdoc | 0 prdoc/{ => 1.13.0}/pr_4534.prdoc | 0 prdoc/{ => 1.13.0}/pr_4537.prdoc | 0 prdoc/{ => 1.13.0}/pr_4541.prdoc | 0 prdoc/{ => 1.13.0}/pr_4542.prdoc | 0 prdoc/{ => 1.13.0}/pr_4555.prdoc | 0 prdoc/{ => 1.13.0}/pr_4571.prdoc | 0 prdoc/{ => 1.13.0}/pr_4595.prdoc | 0 prdoc/{ => 1.13.0}/pr_4621.prdoc | 0 prdoc/{ => 1.13.0}/pr_4633.prdoc | 0 prdoc/{ => 1.13.0}/pr_4634.prdoc | 0 prdoc/{ => 1.13.0}/pr_4645.prdoc | 0 prdoc/{ => 1.13.0}/pr_4646.prdoc | 0 prdoc/{ => 1.13.0}/pr_4721.prdoc | 0 56 files changed, 15 insertions(+), 15 deletions(-) rename prdoc/{ => 1.13.0}/pr_1223.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_1644.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_3393.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_3905.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_3935.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_3952.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4131.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4198.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4233.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4249.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4274.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4339.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4380.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4392.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4410.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4418.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4431.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4444.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4465.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4471.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4472.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4475.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4478.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4503.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4510.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4514.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4521.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4533.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4534.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4537.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4541.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4542.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4555.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4571.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4595.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4621.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4633.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4634.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4645.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4646.prdoc (100%) rename prdoc/{ => 1.13.0}/pr_4721.prdoc (100%) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index d75b07bd2b9f..5bba1b568d9d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -118,7 +118,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_013_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index e9c2b10f719d..dcf9565f3300 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -117,7 +117,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westmint"), impl_name: create_runtime_str!("westmint"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_013_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index e7868bcbc78d..12707d785500 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -214,7 +214,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-rococo"), impl_name: create_runtime_str!("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_013_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index e26d490f9ac1..6b2d67e29c4a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -189,7 +189,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-westend"), impl_name: create_runtime_str!("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_013_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 5fce8e509541..1d3b8c4581a8 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -122,7 +122,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("collectives-westend"), impl_name: create_runtime_str!("collectives-westend"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_013_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 2d346e66c6c3..59aae99d6a16 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -142,7 +142,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_013_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 7, diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index b3eaf3d127a2..522ee574176a 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -142,7 +142,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-rococo"), impl_name: create_runtime_str!("coretime-rococo"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_013_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 6c22702ce872..8830f1a42a2a 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -141,7 +141,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-westend"), impl_name: create_runtime_str!("coretime-westend"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_013_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 4092fb78594d..910f7569bf95 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -100,7 +100,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("glutton-westend"), impl_name: create_runtime_str!("glutton-westend"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_013_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index c80f6879fb34..bd189c31114c 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -132,7 +132,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-rococo"), impl_name: create_runtime_str!("people-rococo"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_013_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 06c938b8a40c..f071a5f0c9b1 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -132,7 +132,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-westend"), impl_name: create_runtime_str!("people-westend"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_013_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index fd4716ab972e..bf45b437f8bb 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -107,7 +107,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("test-parachain"), impl_name: create_runtime_str!("test-parachain"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_013_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index aded1b8fe734..ecf79eac2883 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -59,7 +59,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.12.0"; +pub const NODE_VERSION: &'static str = "1.13.0"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 91ca5eb5e31d..ebdcdd0cbed7 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -162,7 +162,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 1_012_000, + spec_version: 1_013_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 26, diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 511b502fea43..c8b1826b4767 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -154,7 +154,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 1_012_000, + spec_version: 1_013_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 26, diff --git a/prdoc/pr_1223.prdoc b/prdoc/1.13.0/pr_1223.prdoc similarity index 100% rename from prdoc/pr_1223.prdoc rename to prdoc/1.13.0/pr_1223.prdoc diff --git a/prdoc/pr_1644.prdoc b/prdoc/1.13.0/pr_1644.prdoc similarity index 100% rename from prdoc/pr_1644.prdoc rename to prdoc/1.13.0/pr_1644.prdoc diff --git a/prdoc/pr_3393.prdoc b/prdoc/1.13.0/pr_3393.prdoc similarity index 100% rename from prdoc/pr_3393.prdoc rename to prdoc/1.13.0/pr_3393.prdoc diff --git a/prdoc/pr_3905.prdoc b/prdoc/1.13.0/pr_3905.prdoc similarity index 100% rename from prdoc/pr_3905.prdoc rename to prdoc/1.13.0/pr_3905.prdoc diff --git a/prdoc/pr_3935.prdoc b/prdoc/1.13.0/pr_3935.prdoc similarity index 100% rename from prdoc/pr_3935.prdoc rename to prdoc/1.13.0/pr_3935.prdoc diff --git a/prdoc/pr_3952.prdoc b/prdoc/1.13.0/pr_3952.prdoc similarity index 100% rename from prdoc/pr_3952.prdoc rename to prdoc/1.13.0/pr_3952.prdoc diff --git a/prdoc/pr_4131.prdoc b/prdoc/1.13.0/pr_4131.prdoc similarity index 100% rename from prdoc/pr_4131.prdoc rename to prdoc/1.13.0/pr_4131.prdoc diff --git a/prdoc/pr_4198.prdoc b/prdoc/1.13.0/pr_4198.prdoc similarity index 100% rename from prdoc/pr_4198.prdoc rename to prdoc/1.13.0/pr_4198.prdoc diff --git a/prdoc/pr_4233.prdoc b/prdoc/1.13.0/pr_4233.prdoc similarity index 100% rename from prdoc/pr_4233.prdoc rename to prdoc/1.13.0/pr_4233.prdoc diff --git a/prdoc/pr_4249.prdoc b/prdoc/1.13.0/pr_4249.prdoc similarity index 100% rename from prdoc/pr_4249.prdoc rename to prdoc/1.13.0/pr_4249.prdoc diff --git a/prdoc/pr_4274.prdoc b/prdoc/1.13.0/pr_4274.prdoc similarity index 100% rename from prdoc/pr_4274.prdoc rename to prdoc/1.13.0/pr_4274.prdoc diff --git a/prdoc/pr_4339.prdoc b/prdoc/1.13.0/pr_4339.prdoc similarity index 100% rename from prdoc/pr_4339.prdoc rename to prdoc/1.13.0/pr_4339.prdoc diff --git a/prdoc/pr_4380.prdoc b/prdoc/1.13.0/pr_4380.prdoc similarity index 100% rename from prdoc/pr_4380.prdoc rename to prdoc/1.13.0/pr_4380.prdoc diff --git a/prdoc/pr_4392.prdoc b/prdoc/1.13.0/pr_4392.prdoc similarity index 100% rename from prdoc/pr_4392.prdoc rename to prdoc/1.13.0/pr_4392.prdoc diff --git a/prdoc/pr_4410.prdoc b/prdoc/1.13.0/pr_4410.prdoc similarity index 100% rename from prdoc/pr_4410.prdoc rename to prdoc/1.13.0/pr_4410.prdoc diff --git a/prdoc/pr_4418.prdoc b/prdoc/1.13.0/pr_4418.prdoc similarity index 100% rename from prdoc/pr_4418.prdoc rename to prdoc/1.13.0/pr_4418.prdoc diff --git a/prdoc/pr_4431.prdoc b/prdoc/1.13.0/pr_4431.prdoc similarity index 100% rename from prdoc/pr_4431.prdoc rename to prdoc/1.13.0/pr_4431.prdoc diff --git a/prdoc/pr_4444.prdoc b/prdoc/1.13.0/pr_4444.prdoc similarity index 100% rename from prdoc/pr_4444.prdoc rename to prdoc/1.13.0/pr_4444.prdoc diff --git a/prdoc/pr_4465.prdoc b/prdoc/1.13.0/pr_4465.prdoc similarity index 100% rename from prdoc/pr_4465.prdoc rename to prdoc/1.13.0/pr_4465.prdoc diff --git a/prdoc/pr_4471.prdoc b/prdoc/1.13.0/pr_4471.prdoc similarity index 100% rename from prdoc/pr_4471.prdoc rename to prdoc/1.13.0/pr_4471.prdoc diff --git a/prdoc/pr_4472.prdoc b/prdoc/1.13.0/pr_4472.prdoc similarity index 100% rename from prdoc/pr_4472.prdoc rename to prdoc/1.13.0/pr_4472.prdoc diff --git a/prdoc/pr_4475.prdoc b/prdoc/1.13.0/pr_4475.prdoc similarity index 100% rename from prdoc/pr_4475.prdoc rename to prdoc/1.13.0/pr_4475.prdoc diff --git a/prdoc/pr_4478.prdoc b/prdoc/1.13.0/pr_4478.prdoc similarity index 100% rename from prdoc/pr_4478.prdoc rename to prdoc/1.13.0/pr_4478.prdoc diff --git a/prdoc/pr_4503.prdoc b/prdoc/1.13.0/pr_4503.prdoc similarity index 100% rename from prdoc/pr_4503.prdoc rename to prdoc/1.13.0/pr_4503.prdoc diff --git a/prdoc/pr_4510.prdoc b/prdoc/1.13.0/pr_4510.prdoc similarity index 100% rename from prdoc/pr_4510.prdoc rename to prdoc/1.13.0/pr_4510.prdoc diff --git a/prdoc/pr_4514.prdoc b/prdoc/1.13.0/pr_4514.prdoc similarity index 100% rename from prdoc/pr_4514.prdoc rename to prdoc/1.13.0/pr_4514.prdoc diff --git a/prdoc/pr_4521.prdoc b/prdoc/1.13.0/pr_4521.prdoc similarity index 100% rename from prdoc/pr_4521.prdoc rename to prdoc/1.13.0/pr_4521.prdoc diff --git a/prdoc/pr_4533.prdoc b/prdoc/1.13.0/pr_4533.prdoc similarity index 100% rename from prdoc/pr_4533.prdoc rename to prdoc/1.13.0/pr_4533.prdoc diff --git a/prdoc/pr_4534.prdoc b/prdoc/1.13.0/pr_4534.prdoc similarity index 100% rename from prdoc/pr_4534.prdoc rename to prdoc/1.13.0/pr_4534.prdoc diff --git a/prdoc/pr_4537.prdoc b/prdoc/1.13.0/pr_4537.prdoc similarity index 100% rename from prdoc/pr_4537.prdoc rename to prdoc/1.13.0/pr_4537.prdoc diff --git a/prdoc/pr_4541.prdoc b/prdoc/1.13.0/pr_4541.prdoc similarity index 100% rename from prdoc/pr_4541.prdoc rename to prdoc/1.13.0/pr_4541.prdoc diff --git a/prdoc/pr_4542.prdoc b/prdoc/1.13.0/pr_4542.prdoc similarity index 100% rename from prdoc/pr_4542.prdoc rename to prdoc/1.13.0/pr_4542.prdoc diff --git a/prdoc/pr_4555.prdoc b/prdoc/1.13.0/pr_4555.prdoc similarity index 100% rename from prdoc/pr_4555.prdoc rename to prdoc/1.13.0/pr_4555.prdoc diff --git a/prdoc/pr_4571.prdoc b/prdoc/1.13.0/pr_4571.prdoc similarity index 100% rename from prdoc/pr_4571.prdoc rename to prdoc/1.13.0/pr_4571.prdoc diff --git a/prdoc/pr_4595.prdoc b/prdoc/1.13.0/pr_4595.prdoc similarity index 100% rename from prdoc/pr_4595.prdoc rename to prdoc/1.13.0/pr_4595.prdoc diff --git a/prdoc/pr_4621.prdoc b/prdoc/1.13.0/pr_4621.prdoc similarity index 100% rename from prdoc/pr_4621.prdoc rename to prdoc/1.13.0/pr_4621.prdoc diff --git a/prdoc/pr_4633.prdoc b/prdoc/1.13.0/pr_4633.prdoc similarity index 100% rename from prdoc/pr_4633.prdoc rename to prdoc/1.13.0/pr_4633.prdoc diff --git a/prdoc/pr_4634.prdoc b/prdoc/1.13.0/pr_4634.prdoc similarity index 100% rename from prdoc/pr_4634.prdoc rename to prdoc/1.13.0/pr_4634.prdoc diff --git a/prdoc/pr_4645.prdoc b/prdoc/1.13.0/pr_4645.prdoc similarity index 100% rename from prdoc/pr_4645.prdoc rename to prdoc/1.13.0/pr_4645.prdoc diff --git a/prdoc/pr_4646.prdoc b/prdoc/1.13.0/pr_4646.prdoc similarity index 100% rename from prdoc/pr_4646.prdoc rename to prdoc/1.13.0/pr_4646.prdoc diff --git a/prdoc/pr_4721.prdoc b/prdoc/1.13.0/pr_4721.prdoc similarity index 100% rename from prdoc/pr_4721.prdoc rename to prdoc/1.13.0/pr_4721.prdoc From 7f7f5fa857502b6e3649081abb6b53c3512bfedb Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Fri, 14 Jun 2024 09:29:04 +0300 Subject: [PATCH 28/52] `polkadot-parachain-bin`: small cosmetics and improvements (#4666) Related to: https://github.com/paritytech/polkadot-sdk/issues/5 A couple of cosmetics and improvements related to `polkadot-parachain-bin`: - Adding some convenience traits in order to avoid declaring long duplicate bounds - Specifically check if the runtime exposes `AuraApi` when executing `start_lookahead_aura_consensus()` - Some fixes for the `RelayChainCli`. Details in the commits description --- cumulus/polkadot-parachain/Cargo.toml | 2 +- cumulus/polkadot-parachain/src/cli.rs | 14 +- cumulus/polkadot-parachain/src/command.rs | 14 +- cumulus/polkadot-parachain/src/common/aura.rs | 68 +++++++ cumulus/polkadot-parachain/src/common/mod.rs | 67 +++++++ .../asset_hub_polkadot_aura.rs | 6 - .../src/fake_runtime_api/aura.rs | 6 - cumulus/polkadot-parachain/src/main.rs | 1 + cumulus/polkadot-parachain/src/service.rs | 177 +++++------------- substrate/client/service/src/config.rs | 2 +- 10 files changed, 192 insertions(+), 165 deletions(-) create mode 100644 cumulus/polkadot-parachain/src/common/aura.rs create mode 100644 cumulus/polkadot-parachain/src/common/mod.rs diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 639b8b3d4dcf..890cf5199169 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -18,6 +18,7 @@ path = "src/main.rs" async-trait = "0.1.79" clap = { version = "4.5.3", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.12" } +color-print = "0.3.4" futures = "0.3.28" hex-literal = "0.4.1" log = { workspace = true, default-features = true } @@ -111,7 +112,6 @@ cumulus-client-service = { path = "../client/service" } cumulus-primitives-aura = { path = "../primitives/aura" } cumulus-primitives-core = { path = "../primitives/core" } cumulus-relay-chain-interface = { path = "../client/relay-chain-interface" } -color-print = "0.3.4" [build-dependencies] substrate-build-script-utils = { path = "../../substrate/utils/build-script-utils" } diff --git a/cumulus/polkadot-parachain/src/cli.rs b/cumulus/polkadot-parachain/src/cli.rs index f7d2fd0f0be3..fa4b4da1ba9c 100644 --- a/cumulus/polkadot-parachain/src/cli.rs +++ b/cumulus/polkadot-parachain/src/cli.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +use clap::{CommandFactory, FromArgMatches}; use std::path::PathBuf; /// Sub-commands supported by the collator. @@ -108,18 +109,19 @@ pub struct RelayChainCli { } impl RelayChainCli { - /// Parse the relay chain CLI parameters using the para chain `Configuration`. + /// Parse the relay chain CLI parameters using the parachain `Configuration`. pub fn new<'a>( para_config: &sc_service::Configuration, relay_chain_args: impl Iterator, ) -> Self { + let polkadot_cmd = polkadot_cli::RunCmd::command().no_binary_name(true); + let matches = polkadot_cmd.get_matches_from(relay_chain_args); + let base = FromArgMatches::from_arg_matches(&matches).unwrap_or_else(|e| e.exit()); + let extension = crate::chain_spec::Extensions::try_get(&*para_config.chain_spec); let chain_id = extension.map(|e| e.relay_chain.clone()); + let base_path = para_config.base_path.path().join("polkadot"); - Self { - base_path: Some(base_path), - chain_id, - base: clap::Parser::parse_from(relay_chain_args), - } + Self { base, chain_id, base_path: Some(base_path) } } } diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 653ea3281f0f..6b3f4b4cd0a7 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -530,13 +530,9 @@ pub fn run() -> Result<()> { }), Some(Subcommand::PurgeChain(cmd)) => { let runner = cli.create_runner(cmd)?; + let polkadot_cli = RelayChainCli::new(runner.config(), cli.relaychain_args.iter()); runner.sync_run(|config| { - let polkadot_cli = RelayChainCli::new( - &config, - [RelayChainCli::executable_name()].iter().chain(cli.relaychain_args.iter()), - ); - let polkadot_config = SubstrateCli::create_configuration( &polkadot_cli, &polkadot_cli, @@ -603,6 +599,7 @@ pub fn run() -> Result<()> { Some(Subcommand::Key(cmd)) => Ok(cmd.run(&cli)?), None => { let runner = cli.create_runner(&cli.run.normalize())?; + let polkadot_cli = RelayChainCli::new(runner.config(), cli.relaychain_args.iter()); let collator_options = cli.run.collator_options(); runner.run_node_until_exit(|config| async move { @@ -648,11 +645,6 @@ pub fn run() -> Result<()> { .map(|e| e.para_id) .ok_or("Could not find parachain extension in chain-spec.")?; - let polkadot_cli = RelayChainCli::new( - &config, - [RelayChainCli::executable_name()].iter().chain(cli.relaychain_args.iter()), - ); - let id = ParaId::from(para_id); let parachain_account = @@ -667,7 +659,7 @@ pub fn run() -> Result<()> { info!("Parachain Account: {}", parachain_account); info!("Is collating: {}", if config.role.is_authority() { "yes" } else { "no" }); - match polkadot_config.network.network_backend { + match config.network.network_backend { sc_network::config::NetworkBackendType::Libp2p => start_node::>( config, diff --git a/cumulus/polkadot-parachain/src/common/aura.rs b/cumulus/polkadot-parachain/src/common/aura.rs new file mode 100644 index 000000000000..9f72d847926f --- /dev/null +++ b/cumulus/polkadot-parachain/src/common/aura.rs @@ -0,0 +1,68 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Aura-related primitives for cumulus parachain collators. + +use codec::Codec; +use cumulus_primitives_aura::AuraUnincludedSegmentApi; +use cumulus_primitives_core::BlockT; +use sp_consensus_aura::AuraApi; +use sp_runtime::app_crypto::{AppCrypto, AppPair, AppSignature, Pair}; + +/// Convenience trait for defining the basic bounds of an `AuraId`. +pub trait AuraIdT: AppCrypto + Codec + Send { + /// Extra bounds for the `Pair`. + type BoundedPair: AppPair + AppCrypto; + + /// Extra bounds for the `Signature`. + type BoundedSignature: AppSignature + + TryFrom> + + std::hash::Hash + + sp_runtime::traits::Member + + Codec; +} + +impl AuraIdT for T +where + T: AppCrypto + Codec + Send + Sync, + <::Pair as AppCrypto>::Signature: + TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, +{ + type BoundedPair = ::Pair; + type BoundedSignature = <::Pair as AppCrypto>::Signature; +} + +/// Convenience trait for defining the basic bounds of a parachain runtime that supports +/// the Aura consensus. +pub trait AuraRuntimeApi: + sp_api::ApiExt + + AuraApi::Public> + + AuraUnincludedSegmentApi + + Sized +{ + /// Check if the runtime has the Aura API. + fn has_aura_api(&self, at: Block::Hash) -> bool { + self.has_api::::Public>>(at) + .unwrap_or(false) + } +} + +impl AuraRuntimeApi for T where + T: sp_api::ApiExt + + AuraApi::Public> + + AuraUnincludedSegmentApi +{ +} diff --git a/cumulus/polkadot-parachain/src/common/mod.rs b/cumulus/polkadot-parachain/src/common/mod.rs new file mode 100644 index 000000000000..5adbb4137cd3 --- /dev/null +++ b/cumulus/polkadot-parachain/src/common/mod.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Cumulus parachain collator primitives. + +#![warn(missing_docs)] + +pub mod aura; + +use cumulus_primitives_core::CollectCollationInfo; +use sp_api::{ApiExt, CallApiAt, ConstructRuntimeApi, Metadata}; +use sp_block_builder::BlockBuilder; +use sp_runtime::traits::Block as BlockT; +use sp_session::SessionKeys; +use sp_transaction_pool::runtime_api::TaggedTransactionQueue; + +/// Convenience trait that defines the basic bounds for the `RuntimeApi` of a parachain node. +pub trait NodeRuntimeApi: + ApiExt + + Metadata + + SessionKeys + + BlockBuilder + + TaggedTransactionQueue + + CollectCollationInfo + + Sized +{ +} + +impl NodeRuntimeApi for T where + T: ApiExt + + Metadata + + SessionKeys + + BlockBuilder + + TaggedTransactionQueue + + CollectCollationInfo +{ +} + +/// Convenience trait that defines the basic bounds for the `ConstructRuntimeApi` of a parachain +/// node. +pub trait ConstructNodeRuntimeApi>: + ConstructRuntimeApi + Send + Sync + 'static +{ + /// Basic bounds for the `RuntimeApi` of a parachain node. + type BoundedRuntimeApi: NodeRuntimeApi; +} + +impl> ConstructNodeRuntimeApi for T +where + T: ConstructRuntimeApi + Send + Sync + 'static, + T::RuntimeApi: NodeRuntimeApi, +{ + type BoundedRuntimeApi = T::RuntimeApi; +} diff --git a/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs b/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs index 82c02943c5fc..0b79d338c168 100644 --- a/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs +++ b/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs @@ -105,12 +105,6 @@ sp_api::impl_runtime_apis! { } } - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(_: &::Header) { - unimplemented!() - } - } - impl sp_session::SessionKeys for Runtime { fn generate_session_keys(_: Option>) -> Vec { unimplemented!() diff --git a/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs b/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs index 6b718e912164..823eb9ab584a 100644 --- a/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs +++ b/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs @@ -105,12 +105,6 @@ sp_api::impl_runtime_apis! { } } - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(_: &::Header) { - unimplemented!() - } - } - impl sp_session::SessionKeys for Runtime { fn generate_session_keys(_: Option>) -> Vec { unimplemented!() diff --git a/cumulus/polkadot-parachain/src/main.rs b/cumulus/polkadot-parachain/src/main.rs index 0757bea84aae..2bf659228bc6 100644 --- a/cumulus/polkadot-parachain/src/main.rs +++ b/cumulus/polkadot-parachain/src/main.rs @@ -22,6 +22,7 @@ mod chain_spec; mod cli; mod command; +mod common; mod fake_runtime_api; mod rpc; mod service; diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 19ad75e384ce..9cd3a0037223 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -14,13 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use codec::{Codec, Decode}; +use codec::Decode; use cumulus_client_cli::CollatorOptions; use cumulus_client_collator::service::CollatorService; use cumulus_client_consensus_aura::collators::lookahead::{self as aura, Params as AuraParams}; -use cumulus_client_consensus_common::{ - ParachainBlockImport as TParachainBlockImport, ParachainCandidate, ParachainConsensus, -}; +use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport; use cumulus_client_consensus_proposer::Proposer; #[allow(deprecated)] use cumulus_client_service::old_consensus; @@ -28,22 +26,26 @@ use cumulus_client_service::{ build_network, build_relay_chain_interface, prepare_node_config, start_relay_chain_tasks, BuildNetworkParams, CollatorSybilResistance, DARecoveryProfile, StartRelayChainTasksParams, }; -use cumulus_primitives_core::{ - relay_chain::{Hash as PHash, PersistedValidationData, ValidationCode}, - ParaId, -}; +use cumulus_primitives_core::{relay_chain::ValidationCode, ParaId}; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; use sc_rpc::DenyUnsafe; -use sp_core::Pair; use jsonrpsee::RpcModule; -use crate::{fake_runtime_api::aura::RuntimeApi as FakeRuntimeApi, rpc}; -pub use parachains_common::{AccountId, AuraId, Balance, Block, Hash, Header, Nonce}; +use crate::{ + common::{ + aura::{AuraIdT, AuraRuntimeApi}, + ConstructNodeRuntimeApi, + }, + fake_runtime_api::aura::RuntimeApi as FakeRuntimeApi, + rpc, +}; +pub use parachains_common::{AccountId, AuraId, Balance, Block, Hash, Nonce}; use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; -use futures::{lock::Mutex, prelude::*}; +use futures::prelude::*; use prometheus_endpoint::Registry; +use sc_client_api::Backend as ClientApiBackend; use sc_consensus::{ import_queue::{BasicQueue, Verifier as VerifierT}, BlockImportParams, ImportQueue, @@ -53,8 +55,8 @@ use sc_network::{config::FullNetworkConfiguration, service::traits::NetworkBacke use sc_network_sync::SyncingService; use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}; use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle}; -use sp_api::{ApiExt, ConstructRuntimeApi, ProvideRuntimeApi}; -use sp_consensus_aura::AuraApi; +use sp_api::{ConstructRuntimeApi, ProvideRuntimeApi}; +use sp_blockchain::HeaderBackend; use sp_core::traits::SpawnEssentialNamed; use sp_keystore::KeystorePtr; use sp_runtime::{ @@ -100,13 +102,7 @@ pub fn new_partial( build_import_queue: BIQ, ) -> Result, sc_service::Error> where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder, + RuntimeApi: ConstructNodeRuntimeApi>, BIQ: FnOnce( Arc>, ParachainBlockImport, @@ -200,16 +196,7 @@ async fn start_node_impl( hwbench: Option, ) -> sc_service::error::Result<(TaskManager, Arc>)> where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + substrate_frame_rpc_system::AccountNonceApi, + RuntimeApi: ConstructNodeRuntimeApi>, RB: Fn( DenyUnsafe, Arc>, @@ -529,61 +516,6 @@ impl BuildOnAccess { } } -/// Special [`ParachainConsensus`] implementation that waits for the upgrade from -/// shell to a parachain runtime that implements Aura. -struct WaitForAuraConsensus { - client: Arc, - aura_consensus: Arc>>>>, - relay_chain_consensus: Arc>>>, - _phantom: PhantomData, -} - -impl Clone for WaitForAuraConsensus { - fn clone(&self) -> Self { - Self { - client: self.client.clone(), - aura_consensus: self.aura_consensus.clone(), - relay_chain_consensus: self.relay_chain_consensus.clone(), - _phantom: PhantomData, - } - } -} - -#[async_trait::async_trait] -impl ParachainConsensus for WaitForAuraConsensus -where - Client: sp_api::ProvideRuntimeApi + Send + Sync, - Client::Api: AuraApi, - AuraId: Send + Codec + Sync, -{ - async fn produce_candidate( - &mut self, - parent: &Header, - relay_parent: PHash, - validation_data: &PersistedValidationData, - ) -> Option> { - if self - .client - .runtime_api() - .has_api::>(parent.hash()) - .unwrap_or(false) - { - self.aura_consensus - .lock() - .await - .get_mut() - .produce_candidate(parent, relay_parent, validation_data) - .await - } else { - self.relay_chain_consensus - .lock() - .await - .produce_candidate(parent, relay_parent, validation_data) - .await - } - } -} - struct Verifier { client: Arc, aura_verifier: BuildOnAccess>>, @@ -592,22 +524,16 @@ struct Verifier { } #[async_trait::async_trait] -impl VerifierT for Verifier +impl VerifierT for Verifier where Client: sp_api::ProvideRuntimeApi + Send + Sync, - Client::Api: AuraApi, - AuraId: Send + Sync + Codec, + Client::Api: AuraRuntimeApi, { async fn verify( &mut self, block_import: BlockImportParams, ) -> Result, String> { - if self - .client - .runtime_api() - .has_api::>(*block_import.header.parent_hash()) - .unwrap_or(false) - { + if self.client.runtime_api().has_aura_api(*block_import.header.parent_hash()) { self.aura_verifier.get_mut().verify(block_import).await } else { self.relay_chain_verifier.verify(block_import).await @@ -617,7 +543,7 @@ where /// Build the import queue for parachain runtimes that started with relay chain consensus and /// switched to aura. -pub fn build_relay_to_aura_import_queue( +pub fn build_relay_to_aura_import_queue( client: Arc>, block_import: ParachainBlockImport, config: &Configuration, @@ -625,16 +551,8 @@ pub fn build_relay_to_aura_import_queue( task_manager: &TaskManager, ) -> Result, sc_service::Error> where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + sp_consensus_aura::AuraApi::Pair as Pair>::Public>, - <::Pair as Pair>::Signature: - TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi, { let verifier_client = client.clone(); @@ -714,11 +632,7 @@ pub async fn start_generic_aura_lookahead_node> /// /// Uses the lookahead collator to support async backing. #[sc_tracing::logging::prefix_logs_with("Parachain")] -pub async fn start_asset_hub_lookahead_node< - RuntimeApi, - AuraId: AppCrypto + Send + Codec + Sync, - Net, ->( +pub async fn start_asset_hub_lookahead_node( parachain_config: Configuration, polkadot_config: Configuration, collator_options: CollatorOptions, @@ -726,20 +640,10 @@ pub async fn start_asset_hub_lookahead_node< hwbench: Option, ) -> sc_service::error::Result<(TaskManager, Arc>)> where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + sp_consensus_aura::AuraApi::Pair as Pair>::Public> + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + substrate_frame_rpc_system::AccountNonceApi - + cumulus_primitives_aura::AuraUnincludedSegmentApi, - <::Pair as Pair>::Signature: - TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, + + substrate_frame_rpc_system::AccountNonceApi, Net: NetworkBackend, { start_node_impl::( @@ -807,11 +711,7 @@ where // Check if we have upgraded to an Aura compatible runtime and transition if // necessary. - if client - .runtime_api() - .has_api::>(last_head_hash) - .unwrap_or(false) - { + if client.runtime_api().has_aura_api(last_head_hash) { // Respond to this request before transitioning to Aura. request.complete(None); break @@ -930,14 +830,14 @@ fn start_relay_chain_consensus( } /// Start consensus using the lookahead aura collator. -fn start_lookahead_aura_consensus( - client: Arc>, - block_import: ParachainBlockImport, +fn start_lookahead_aura_consensus( + client: Arc>, + block_import: ParachainBlockImport, prometheus_registry: Option<&Registry>, telemetry: Option, task_manager: &TaskManager, relay_chain_interface: Arc, - transaction_pool: Arc>>, + transaction_pool: Arc>>, sync_oracle: Arc>, keystore: KeystorePtr, relay_chain_slot_duration: Duration, @@ -946,7 +846,16 @@ fn start_lookahead_aura_consensus( overseer_handle: OverseerHandle, announce_block: Arc>) + Send + Sync>, backend: Arc, -) -> Result<(), sc_service::Error> { +) -> Result<(), sc_service::Error> +where + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi, +{ + let info = backend.blockchain().info(); + if !client.runtime_api().has_aura_api(info.finalized_hash) { + return Err(sc_service::error::Error::Other("Missing aura runtime APIs".to_string())); + } + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), client.clone(), diff --git a/substrate/client/service/src/config.rs b/substrate/client/service/src/config.rs index 187e18aa3cac..e4788f1f3376 100644 --- a/substrate/client/service/src/config.rs +++ b/substrate/client/service/src/config.rs @@ -280,7 +280,7 @@ impl Default for RpcMethods { static mut BASE_PATH_TEMP: Option = None; /// The base path that is used for everything that needs to be written on disk to run a node. -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct BasePath { path: PathBuf, } From 977254ccb1afca975780987ff9f19f356e99378f Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Fri, 14 Jun 2024 13:30:08 +0200 Subject: [PATCH 29/52] Bridges - changes for Bridges V2 - relay client part (#4494) Contains mainly changes/nits/refactors related to the relayer code (`client-substrate` and `lib-substrate-relay`) migrated from the Bridges V2 [branch](https://github.com/paritytech/polkadot-sdk/pull/4427). Relates to: https://github.com/paritytech/parity-bridges-common/issues/2976 Companion: https://github.com/paritytech/parity-bridges-common/pull/2988 ## TODO - [x] fix comments ## Questions - [x] Do we need more testing for client V2 stuff? If so, how/what is the ultimate test? @svyatonik - [x] check [comment](https://github.com/paritytech/polkadot-sdk/pull/4494#issuecomment-2117181144) for more testing --------- Co-authored-by: Svyatoslav Nikolsky Co-authored-by: Serban Iorga --- Cargo.lock | 12 + bridges/modules/relayers/src/lib.rs | 2 +- bridges/primitives/relayers/src/lib.rs | 4 +- bridges/primitives/runtime/src/lib.rs | 10 +- bridges/relays/client-substrate/Cargo.toml | 1 + bridges/relays/client-substrate/src/chain.rs | 3 + bridges/relays/client-substrate/src/client.rs | 1032 ----------------- .../client-substrate/src/client/caching.rs | 468 ++++++++ .../relays/client-substrate/src/client/mod.rs | 91 ++ .../relays/client-substrate/src/client/rpc.rs | 743 ++++++++++++ .../src/{rpc.rs => client/rpc_api.rs} | 54 +- .../src/client/subscription.rs | 239 ++++ .../client-substrate/src/client/traits.rs | 230 ++++ bridges/relays/client-substrate/src/error.rs | 315 ++++- bridges/relays/client-substrate/src/guard.rs | 2 +- bridges/relays/client-substrate/src/lib.rs | 10 +- .../src/metrics/float_storage_value.rs | 45 +- .../src/transaction_tracker.rs | 52 +- .../src/cli/chain_schema.rs | 4 +- .../src/cli/detect_equivocations.rs | 2 +- .../relays/lib-substrate-relay/src/cli/mod.rs | 5 + .../src/cli/relay_headers.rs | 1 + .../src/cli/relay_headers_and_messages/mod.rs | 50 +- .../parachain_to_parachain.rs | 47 +- .../relay_to_parachain.rs | 26 +- .../relay_to_relay.rs | 6 +- .../src/cli/relay_messages.rs | 5 +- .../src/cli/relay_parachains.rs | 21 +- .../src/equivocation/mod.rs | 10 +- .../src/equivocation/source.rs | 30 +- .../src/equivocation/target.rs | 23 +- .../src/finality/initialize.rs | 8 +- .../lib-substrate-relay/src/finality/mod.rs | 18 +- .../src/finality/source.rs | 34 +- .../src/finality/target.rs | 35 +- .../src/finality_base/engine.rs | 74 +- .../src/finality_base/mod.rs | 11 +- .../lib-substrate-relay/src/messages_lane.rs | 55 +- .../src/messages_metrics.rs | 2 +- .../src/messages_source.rs | 114 +- .../src/messages_target.rs | 48 +- .../src/on_demand/headers.rs | 77 +- .../src/on_demand/parachains.rs | 59 +- .../src/parachains/source.rs | 30 +- .../src/parachains/target.rs | 55 +- .../test-utils/src/test_data/mod.rs | 4 +- 46 files changed, 2688 insertions(+), 1479 deletions(-) delete mode 100644 bridges/relays/client-substrate/src/client.rs create mode 100644 bridges/relays/client-substrate/src/client/caching.rs create mode 100644 bridges/relays/client-substrate/src/client/mod.rs create mode 100644 bridges/relays/client-substrate/src/client/rpc.rs rename bridges/relays/client-substrate/src/{rpc.rs => client/rpc_api.rs} (80%) create mode 100644 bridges/relays/client-substrate/src/client/subscription.rs create mode 100644 bridges/relays/client-substrate/src/client/traits.rs diff --git a/Cargo.lock b/Cargo.lock index a8b08d280158..71b98d2cd5c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15481,6 +15481,17 @@ dependencies = [ "unsigned-varint", ] +[[package]] +name = "quick_cache" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5253a3a0d56548d5b0be25414171dc780cc6870727746d05bd2bde352eee96c5" +dependencies = [ + "ahash 0.8.11", + "hashbrown 0.13.2", + "parking_lot 0.12.1", +] + [[package]] name = "quickcheck" version = "1.0.3" @@ -15916,6 +15927,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-utility", "parity-scale-codec", + "quick_cache", "rand 0.8.5", "relay-utils", "sc-chain-spec", diff --git a/bridges/modules/relayers/src/lib.rs b/bridges/modules/relayers/src/lib.rs index 7a3a0f9ea94c..2c86ec01f5b9 100644 --- a/bridges/modules/relayers/src/lib.rs +++ b/bridges/modules/relayers/src/lib.rs @@ -63,7 +63,7 @@ pub mod pallet { /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Type of relayer reward. - type Reward: AtLeast32BitUnsigned + Copy + Parameter + MaxEncodedLen; + type Reward: AtLeast32BitUnsigned + Copy + Member + Parameter + MaxEncodedLen; /// Pay rewards scheme. type PaymentProcedure: PaymentProcedure; /// Stake and slash scheme. diff --git a/bridges/primitives/relayers/src/lib.rs b/bridges/primitives/relayers/src/lib.rs index 2a9ef6a8e1e9..436f33db4008 100644 --- a/bridges/primitives/relayers/src/lib.rs +++ b/bridges/primitives/relayers/src/lib.rs @@ -140,8 +140,8 @@ pub struct RelayerRewardsKeyProvider(PhantomData<(AccountId, impl StorageDoubleMapKeyProvider for RelayerRewardsKeyProvider where - AccountId: Codec + EncodeLike, - Reward: Codec + EncodeLike, + AccountId: 'static + Codec + EncodeLike + Send + Sync, + Reward: 'static + Codec + EncodeLike + Send + Sync, { const MAP_NAME: &'static str = "RelayerRewards"; diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs index 5daba0351ad4..d13c9b40efa0 100644 --- a/bridges/primitives/runtime/src/lib.rs +++ b/bridges/primitives/runtime/src/lib.rs @@ -255,9 +255,9 @@ pub trait StorageMapKeyProvider { /// The same as `StorageMap::Hasher1`. type Hasher: StorageHasher; /// The same as `StorageMap::Key1`. - type Key: FullCodec; + type Key: FullCodec + Send + Sync; /// The same as `StorageMap::Value`. - type Value: FullCodec; + type Value: 'static + FullCodec; /// This is a copy of the /// `frame_support::storage::generator::StorageMap::storage_map_final_key`. @@ -277,13 +277,13 @@ pub trait StorageDoubleMapKeyProvider { /// The same as `StorageDoubleMap::Hasher1`. type Hasher1: StorageHasher; /// The same as `StorageDoubleMap::Key1`. - type Key1: FullCodec; + type Key1: FullCodec + Send + Sync; /// The same as `StorageDoubleMap::Hasher2`. type Hasher2: StorageHasher; /// The same as `StorageDoubleMap::Key2`. - type Key2: FullCodec; + type Key2: FullCodec + Send + Sync; /// The same as `StorageDoubleMap::Value`. - type Value: FullCodec; + type Value: 'static + FullCodec; /// This is a copy of the /// `frame_support::storage::generator::StorageDoubleMap::storage_double_map_final_key`. diff --git a/bridges/relays/client-substrate/Cargo.toml b/bridges/relays/client-substrate/Cargo.toml index cb7eae4f340c..ea267ea5e302 100644 --- a/bridges/relays/client-substrate/Cargo.toml +++ b/bridges/relays/client-substrate/Cargo.toml @@ -22,6 +22,7 @@ rand = "0.8.5" scale-info = { version = "2.11.1", features = ["derive"] } tokio = { version = "1.37", features = ["rt-multi-thread"] } thiserror = { workspace = true } +quick_cache = "0.3" # Bridge dependencies diff --git a/bridges/relays/client-substrate/src/chain.rs b/bridges/relays/client-substrate/src/chain.rs index 40269fe64c87..227e9c31c5bf 100644 --- a/bridges/relays/client-substrate/src/chain.rs +++ b/bridges/relays/client-substrate/src/chain.rs @@ -36,6 +36,9 @@ use sp_runtime::{ }; use std::{fmt::Debug, time::Duration}; +/// Signed block type of given chain. +pub type SignedBlockOf = ::SignedBlock; + /// Substrate-based chain from minimal relay-client point of view. pub trait Chain: ChainBase + Clone { /// Chain name. diff --git a/bridges/relays/client-substrate/src/client.rs b/bridges/relays/client-substrate/src/client.rs deleted file mode 100644 index 2e7cb7455f76..000000000000 --- a/bridges/relays/client-substrate/src/client.rs +++ /dev/null @@ -1,1032 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate node client. - -use crate::{ - chain::{Chain, ChainWithTransactions}, - guard::Environment, - rpc::{ - SubstrateAuthorClient, SubstrateChainClient, SubstrateFinalityClient, - SubstrateFrameSystemClient, SubstrateStateClient, SubstrateSystemClient, - }, - transaction_stall_timeout, AccountKeyPairOf, ChainWithGrandpa, ConnectionParams, Error, HashOf, - HeaderIdOf, Result, SignParam, TransactionTracker, UnsignedTransaction, -}; - -use async_std::sync::{Arc, Mutex, RwLock}; -use async_trait::async_trait; -use bp_runtime::{HeaderIdProvider, StorageDoubleMapKeyProvider, StorageMapKeyProvider}; -use codec::{Decode, Encode}; -use frame_support::weights::Weight; -use futures::{SinkExt, StreamExt}; -use jsonrpsee::{ - core::DeserializeOwned, - ws_client::{WsClient as RpcClient, WsClientBuilder as RpcClientBuilder}, -}; -use num_traits::{Saturating, Zero}; -use pallet_transaction_payment::RuntimeDispatchInfo; -use relay_utils::{relay_loop::RECONNECT_DELAY, STALL_TIMEOUT}; -use sp_core::{ - storage::{StorageData, StorageKey}, - Bytes, Hasher, Pair, -}; -use sp_runtime::{ - traits::Header as HeaderT, - transaction_validity::{TransactionSource, TransactionValidity}, -}; -use sp_trie::StorageProof; -use sp_version::RuntimeVersion; -use std::{cmp::Ordering, future::Future}; - -const SUB_API_GRANDPA_AUTHORITIES: &str = "GrandpaApi_grandpa_authorities"; -const SUB_API_GRANDPA_GENERATE_KEY_OWNERSHIP_PROOF: &str = - "GrandpaApi_generate_key_ownership_proof"; -const SUB_API_TXPOOL_VALIDATE_TRANSACTION: &str = "TaggedTransactionQueue_validate_transaction"; -const SUB_API_TX_PAYMENT_QUERY_INFO: &str = "TransactionPaymentApi_query_info"; -const MAX_SUBSCRIPTION_CAPACITY: usize = 4096; - -/// The difference between best block number and number of its ancestor, that is enough -/// for us to consider that ancestor an "ancient" block with dropped state. -/// -/// The relay does not assume that it is connected to the archive node, so it always tries -/// to use the best available chain state. But sometimes it still may use state of some -/// old block. If the state of that block is already dropped, relay will see errors when -/// e.g. it tries to prove something. -/// -/// By default Substrate-based nodes are storing state for last 256 blocks. We'll use -/// half of this value. -pub const ANCIENT_BLOCK_THRESHOLD: u32 = 128; - -/// Returns `true` if we think that the state is already discarded for given block. -pub fn is_ancient_block + PartialOrd + Saturating>(block: N, best: N) -> bool { - best.saturating_sub(block) >= N::from(ANCIENT_BLOCK_THRESHOLD) -} - -/// Opaque justifications subscription type. -pub struct Subscription( - pub(crate) Mutex>>, - // The following field is not explicitly used by the code. But when it is dropped, - // the bakground task receives a shutdown signal. - #[allow(dead_code)] pub(crate) futures::channel::oneshot::Sender<()>, -); - -/// Opaque GRANDPA authorities set. -pub type OpaqueGrandpaAuthoritiesSet = Vec; - -/// A simple runtime version. It only includes the `spec_version` and `transaction_version`. -#[derive(Copy, Clone, Debug)] -pub struct SimpleRuntimeVersion { - /// Version of the runtime specification. - pub spec_version: u32, - /// All existing dispatches are fully compatible when this number doesn't change. - pub transaction_version: u32, -} - -impl SimpleRuntimeVersion { - /// Create a new instance of `SimpleRuntimeVersion` from a `RuntimeVersion`. - pub const fn from_runtime_version(runtime_version: &RuntimeVersion) -> Self { - Self { - spec_version: runtime_version.spec_version, - transaction_version: runtime_version.transaction_version, - } - } -} - -/// Chain runtime version in client -#[derive(Copy, Clone, Debug)] -pub enum ChainRuntimeVersion { - /// Auto query from chain. - Auto, - /// Custom runtime version, defined by user. - Custom(SimpleRuntimeVersion), -} - -/// Substrate client type. -/// -/// Cloning `Client` is a cheap operation that only clones internal references. Different -/// clones of the same client are guaranteed to use the same references. -pub struct Client { - // Lock order: `submit_signed_extrinsic_lock`, `data` - /// Client connection params. - params: Arc, - /// Saved chain runtime version. - chain_runtime_version: ChainRuntimeVersion, - /// If several tasks are submitting their transactions simultaneously using - /// `submit_signed_extrinsic` method, they may get the same transaction nonce. So one of - /// transactions will be rejected from the pool. This lock is here to prevent situations like - /// that. - submit_signed_extrinsic_lock: Arc>, - /// Genesis block hash. - genesis_hash: HashOf, - /// Shared dynamic data. - data: Arc>, -} - -/// Client data, shared by all `Client` clones. -struct ClientData { - /// Tokio runtime handle. - tokio: Arc, - /// Substrate RPC client. - client: Arc, -} - -/// Already encoded value. -struct PreEncoded(Vec); - -impl Encode for PreEncoded { - fn encode(&self) -> Vec { - self.0.clone() - } -} - -#[async_trait] -impl relay_utils::relay_loop::Client for Client { - type Error = Error; - - async fn reconnect(&mut self) -> Result<()> { - let mut data = self.data.write().await; - let (tokio, client) = Self::build_client(&self.params).await?; - data.tokio = tokio; - data.client = client; - Ok(()) - } -} - -impl Clone for Client { - fn clone(&self) -> Self { - Client { - params: self.params.clone(), - chain_runtime_version: self.chain_runtime_version, - submit_signed_extrinsic_lock: self.submit_signed_extrinsic_lock.clone(), - genesis_hash: self.genesis_hash, - data: self.data.clone(), - } - } -} - -impl std::fmt::Debug for Client { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("Client").field("genesis_hash", &self.genesis_hash).finish() - } -} - -impl Client { - /// Returns client that is able to call RPCs on Substrate node over websocket connection. - /// - /// This function will keep connecting to given Substrate node until connection is established - /// and is functional. If attempt fail, it will wait for `RECONNECT_DELAY` and retry again. - pub async fn new(params: ConnectionParams) -> Self { - let params = Arc::new(params); - loop { - match Self::try_connect(params.clone()).await { - Ok(client) => return client, - Err(error) => log::error!( - target: "bridge", - "Failed to connect to {} node: {:?}. Going to retry in {}s", - C::NAME, - error, - RECONNECT_DELAY.as_secs(), - ), - } - - async_std::task::sleep(RECONNECT_DELAY).await; - } - } - - /// Try to connect to Substrate node over websocket. Returns Substrate RPC client if connection - /// has been established or error otherwise. - pub async fn try_connect(params: Arc) -> Result { - let (tokio, client) = Self::build_client(¶ms).await?; - - let number: C::BlockNumber = Zero::zero(); - let genesis_hash_client = client.clone(); - let genesis_hash = tokio - .spawn(async move { - SubstrateChainClient::::block_hash(&*genesis_hash_client, Some(number)).await - }) - .await??; - - let chain_runtime_version = params.chain_runtime_version; - let mut client = Self { - params, - chain_runtime_version, - submit_signed_extrinsic_lock: Arc::new(Mutex::new(())), - genesis_hash, - data: Arc::new(RwLock::new(ClientData { tokio, client })), - }; - Self::ensure_correct_runtime_version(&mut client, chain_runtime_version).await?; - Ok(client) - } - - // Check runtime version to understand if we need are connected to expected version, or we - // need to wait for upgrade, we need to abort immediately. - async fn ensure_correct_runtime_version>( - env: &mut E, - expected: ChainRuntimeVersion, - ) -> Result<()> { - // we are only interested if version mode is bundled or passed using CLI - let expected = match expected { - ChainRuntimeVersion::Auto => return Ok(()), - ChainRuntimeVersion::Custom(expected) => expected, - }; - - // we need to wait if actual version is < than expected, we are OK of versions are the - // same and we need to abort if actual version is > than expected - let actual = SimpleRuntimeVersion::from_runtime_version(&env.runtime_version().await?); - match actual.spec_version.cmp(&expected.spec_version) { - Ordering::Less => - Err(Error::WaitingForRuntimeUpgrade { chain: C::NAME.into(), expected, actual }), - Ordering::Equal => Ok(()), - Ordering::Greater => { - log::error!( - target: "bridge", - "The {} client is configured to use runtime version {expected:?} and actual \ - version is {actual:?}. Aborting", - C::NAME, - ); - env.abort().await; - Err(Error::Custom("Aborted".into())) - }, - } - } - - /// Build client to use in connection. - async fn build_client( - params: &ConnectionParams, - ) -> Result<(Arc, Arc)> { - let tokio = tokio::runtime::Runtime::new()?; - - let uri = match params.uri { - Some(ref uri) => uri.clone(), - None => { - format!( - "{}://{}:{}{}", - if params.secure { "wss" } else { "ws" }, - params.host, - params.port, - match params.path { - Some(ref path) => format!("/{}", path), - None => String::new(), - }, - ) - }, - }; - log::info!(target: "bridge", "Connecting to {} node at {}", C::NAME, uri); - - let client = tokio - .spawn(async move { - RpcClientBuilder::default() - .max_buffer_capacity_per_subscription(MAX_SUBSCRIPTION_CAPACITY) - .build(&uri) - .await - }) - .await??; - - Ok((Arc::new(tokio), Arc::new(client))) - } -} - -impl Client { - /// Return simple runtime version, only include `spec_version` and `transaction_version`. - pub async fn simple_runtime_version(&self) -> Result { - Ok(match &self.chain_runtime_version { - ChainRuntimeVersion::Auto => { - let runtime_version = self.runtime_version().await?; - SimpleRuntimeVersion::from_runtime_version(&runtime_version) - }, - ChainRuntimeVersion::Custom(version) => *version, - }) - } - - /// Returns true if client is connected to at least one peer and is in synced state. - pub async fn ensure_synced(&self) -> Result<()> { - self.jsonrpsee_execute(|client| async move { - let health = SubstrateSystemClient::::health(&*client).await?; - let is_synced = !health.is_syncing && (!health.should_have_peers || health.peers > 0); - if is_synced { - Ok(()) - } else { - Err(Error::ClientNotSynced(health)) - } - }) - .await - } - - /// Return hash of the genesis block. - pub fn genesis_hash(&self) -> &C::Hash { - &self.genesis_hash - } - - /// Return hash of the best finalized block. - pub async fn best_finalized_header_hash(&self) -> Result { - self.jsonrpsee_execute(|client| async move { - Ok(SubstrateChainClient::::finalized_head(&*client).await?) - }) - .await - .map_err(|e| Error::FailedToReadBestFinalizedHeaderHash { - chain: C::NAME.into(), - error: e.boxed(), - }) - } - - /// Return number of the best finalized block. - pub async fn best_finalized_header_number(&self) -> Result { - Ok(*self.best_finalized_header().await?.number()) - } - - /// Return header of the best finalized block. - pub async fn best_finalized_header(&self) -> Result { - self.header_by_hash(self.best_finalized_header_hash().await?).await - } - - /// Returns the best Substrate header. - pub async fn best_header(&self) -> Result - where - C::Header: DeserializeOwned, - { - self.jsonrpsee_execute(|client| async move { - Ok(SubstrateChainClient::::header(&*client, None).await?) - }) - .await - .map_err(|e| Error::FailedToReadBestHeader { chain: C::NAME.into(), error: e.boxed() }) - } - - /// Get a Substrate block from its hash. - pub async fn get_block(&self, block_hash: Option) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateChainClient::::block(&*client, block_hash).await?) - }) - .await - } - - /// Get a Substrate header by its hash. - pub async fn header_by_hash(&self, block_hash: C::Hash) -> Result - where - C::Header: DeserializeOwned, - { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateChainClient::::header(&*client, Some(block_hash)).await?) - }) - .await - .map_err(|e| Error::FailedToReadHeaderByHash { - chain: C::NAME.into(), - hash: format!("{block_hash}"), - error: e.boxed(), - }) - } - - /// Get a Substrate block hash by its number. - pub async fn block_hash_by_number(&self, number: C::BlockNumber) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateChainClient::::block_hash(&*client, Some(number)).await?) - }) - .await - } - - /// Get a Substrate header by its number. - pub async fn header_by_number(&self, block_number: C::BlockNumber) -> Result - where - C::Header: DeserializeOwned, - { - let block_hash = Self::block_hash_by_number(self, block_number).await?; - let header_by_hash = Self::header_by_hash(self, block_hash).await?; - Ok(header_by_hash) - } - - /// Return runtime version. - pub async fn runtime_version(&self) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateStateClient::::runtime_version(&*client).await?) - }) - .await - } - - /// Read value from runtime storage. - pub async fn storage_value( - &self, - storage_key: StorageKey, - block_hash: Option, - ) -> Result> { - self.raw_storage_value(storage_key, block_hash) - .await? - .map(|encoded_value| { - T::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed) - }) - .transpose() - } - - /// Read `MapStorage` value from runtime storage. - pub async fn storage_map_value( - &self, - pallet_prefix: &str, - key: &T::Key, - block_hash: Option, - ) -> Result> { - let storage_key = T::final_key(pallet_prefix, key); - - self.raw_storage_value(storage_key, block_hash) - .await? - .map(|encoded_value| { - T::Value::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed) - }) - .transpose() - } - - /// Read `DoubleMapStorage` value from runtime storage. - pub async fn storage_double_map_value( - &self, - pallet_prefix: &str, - key1: &T::Key1, - key2: &T::Key2, - block_hash: Option, - ) -> Result> { - let storage_key = T::final_key(pallet_prefix, key1, key2); - - self.raw_storage_value(storage_key, block_hash) - .await? - .map(|encoded_value| { - T::Value::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed) - }) - .transpose() - } - - /// Read raw value from runtime storage. - pub async fn raw_storage_value( - &self, - storage_key: StorageKey, - block_hash: Option, - ) -> Result> { - let cloned_storage_key = storage_key.clone(); - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateStateClient::::storage(&*client, storage_key.clone(), block_hash) - .await?) - }) - .await - .map_err(|e| Error::FailedToReadRuntimeStorageValue { - chain: C::NAME.into(), - key: cloned_storage_key, - error: e.boxed(), - }) - } - - /// Get the nonce of the given Substrate account. - /// - /// Note: It's the caller's responsibility to make sure `account` is a valid SS58 address. - pub async fn next_account_index(&self, account: C::AccountId) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateFrameSystemClient::::account_next_index(&*client, account).await?) - }) - .await - } - - /// Submit unsigned extrinsic for inclusion in a block. - /// - /// Note: The given transaction needs to be SCALE encoded beforehand. - pub async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result { - // one last check that the transaction is valid. Most of checks happen in the relay loop and - // it is the "final" check before submission. - let best_header_hash = self.best_header().await?.hash(); - self.validate_transaction(best_header_hash, PreEncoded(transaction.0.clone())) - .await - .map_err(|e| { - log::error!(target: "bridge", "Pre-submit {} transaction validation failed: {:?}", C::NAME, e); - e - })??; - - self.jsonrpsee_execute(move |client| async move { - let tx_hash = SubstrateAuthorClient::::submit_extrinsic(&*client, transaction) - .await - .map_err(|e| { - log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); - e - })?; - log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); - Ok(tx_hash) - }) - .await - } - - async fn build_sign_params(&self, signer: AccountKeyPairOf) -> Result> - where - C: ChainWithTransactions, - { - let runtime_version = self.simple_runtime_version().await?; - Ok(SignParam:: { - spec_version: runtime_version.spec_version, - transaction_version: runtime_version.transaction_version, - genesis_hash: self.genesis_hash, - signer, - }) - } - - /// Submit an extrinsic signed by given account. - /// - /// All calls of this method are synchronized, so there can't be more than one active - /// `submit_signed_extrinsic()` call. This guarantees that no nonces collision may happen - /// if all client instances are clones of the same initial `Client`. - /// - /// Note: The given transaction needs to be SCALE encoded beforehand. - pub async fn submit_signed_extrinsic( - &self, - signer: &AccountKeyPairOf, - prepare_extrinsic: impl FnOnce(HeaderIdOf, C::Nonce) -> Result> - + Send - + 'static, - ) -> Result - where - C: ChainWithTransactions, - C::AccountId: From<::Public>, - { - let _guard = self.submit_signed_extrinsic_lock.lock().await; - let transaction_nonce = self.next_account_index(signer.public().into()).await?; - let best_header = self.best_header().await?; - let signing_data = self.build_sign_params(signer.clone()).await?; - - // By using parent of best block here, we are protecing again best-block reorganizations. - // E.g. transaction may have been submitted when the best block was `A[num=100]`. Then it - // has been changed to `B[num=100]`. Hash of `A` has been included into transaction - // signature payload. So when signature will be checked, the check will fail and transaction - // will be dropped from the pool. - let best_header_id = best_header.parent_id().unwrap_or_else(|| best_header.id()); - - let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; - let signed_extrinsic = C::sign_transaction(signing_data, extrinsic)?.encode(); - - // one last check that the transaction is valid. Most of checks happen in the relay loop and - // it is the "final" check before submission. - self.validate_transaction(best_header_id.1, PreEncoded(signed_extrinsic.clone())) - .await - .map_err(|e| { - log::error!(target: "bridge", "Pre-submit {} transaction validation failed: {:?}", C::NAME, e); - e - })??; - - self.jsonrpsee_execute(move |client| async move { - let tx_hash = - SubstrateAuthorClient::::submit_extrinsic(&*client, Bytes(signed_extrinsic)) - .await - .map_err(|e| { - log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); - e - })?; - log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); - Ok(tx_hash) - }) - .await - } - - /// Does exactly the same as `submit_signed_extrinsic`, but keeps watching for extrinsic status - /// after submission. - pub async fn submit_and_watch_signed_extrinsic( - &self, - signer: &AccountKeyPairOf, - prepare_extrinsic: impl FnOnce(HeaderIdOf, C::Nonce) -> Result> - + Send - + 'static, - ) -> Result> - where - C: ChainWithTransactions, - C::AccountId: From<::Public>, - { - let self_clone = self.clone(); - let signing_data = self.build_sign_params(signer.clone()).await?; - let _guard = self.submit_signed_extrinsic_lock.lock().await; - let transaction_nonce = self.next_account_index(signer.public().into()).await?; - let best_header = self.best_header().await?; - let best_header_id = best_header.id(); - - let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; - let stall_timeout = transaction_stall_timeout( - extrinsic.era.mortality_period(), - C::AVERAGE_BLOCK_INTERVAL, - STALL_TIMEOUT, - ); - let signed_extrinsic = C::sign_transaction(signing_data, extrinsic)?.encode(); - - // one last check that the transaction is valid. Most of checks happen in the relay loop and - // it is the "final" check before submission. - self.validate_transaction(best_header_id.1, PreEncoded(signed_extrinsic.clone())) - .await - .map_err(|e| { - log::error!(target: "bridge", "Pre-submit {} transaction validation failed: {:?}", C::NAME, e); - e - })??; - - let (cancel_sender, cancel_receiver) = futures::channel::oneshot::channel(); - let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); - let (tracker, subscription) = self - .jsonrpsee_execute(move |client| async move { - let tx_hash = C::Hasher::hash(&signed_extrinsic); - let subscription = SubstrateAuthorClient::::submit_and_watch_extrinsic( - &*client, - Bytes(signed_extrinsic), - ) - .await - .map_err(|e| { - log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); - e - })?; - log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); - let tracker = TransactionTracker::new( - self_clone, - stall_timeout, - tx_hash, - Subscription(Mutex::new(receiver), cancel_sender), - ); - Ok((tracker, subscription)) - }) - .await?; - self.data.read().await.tokio.spawn(Subscription::background_worker( - C::NAME.into(), - "extrinsic".into(), - subscription, - sender, - cancel_receiver, - )); - Ok(tracker) - } - - /// Returns pending extrinsics from transaction pool. - pub async fn pending_extrinsics(&self) -> Result> { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateAuthorClient::::pending_extrinsics(&*client).await?) - }) - .await - } - - /// Validate transaction at given block state. - pub async fn validate_transaction( - &self, - at_block: C::Hash, - transaction: SignedTransaction, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let call = SUB_API_TXPOOL_VALIDATE_TRANSACTION.to_string(); - let data = Bytes((TransactionSource::External, transaction, at_block).encode()); - - let encoded_response = - SubstrateStateClient::::call(&*client, call, data, Some(at_block)).await?; - let validity = TransactionValidity::decode(&mut &encoded_response.0[..]) - .map_err(Error::ResponseParseFailed)?; - - Ok(validity) - }) - .await - } - - /// Returns weight of the given transaction. - pub async fn extimate_extrinsic_weight( - &self, - transaction: SignedTransaction, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let transaction_len = transaction.encoded_size() as u32; - - let call = SUB_API_TX_PAYMENT_QUERY_INFO.to_string(); - let data = Bytes((transaction, transaction_len).encode()); - - let encoded_response = - SubstrateStateClient::::call(&*client, call, data, None).await?; - let dispatch_info = - RuntimeDispatchInfo::::decode(&mut &encoded_response.0[..]) - .map_err(Error::ResponseParseFailed)?; - - Ok(dispatch_info.weight) - }) - .await - } - - /// Get the GRANDPA authority set at given block. - pub async fn grandpa_authorities_set( - &self, - block: C::Hash, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let call = SUB_API_GRANDPA_AUTHORITIES.to_string(); - let data = Bytes(Vec::new()); - - let encoded_response = - SubstrateStateClient::::call(&*client, call, data, Some(block)).await?; - let authority_list = encoded_response.0; - - Ok(authority_list) - }) - .await - } - - /// Execute runtime call at given block, provided the input and output types. - /// It also performs the input encode and output decode. - pub async fn typed_state_call( - &self, - method_name: String, - input: Input, - at_block: Option, - ) -> Result { - let encoded_output = self - .state_call(method_name.clone(), Bytes(input.encode()), at_block) - .await - .map_err(|e| Error::ErrorExecutingRuntimeCall { - chain: C::NAME.into(), - method: method_name, - error: e.boxed(), - })?; - Output::decode(&mut &encoded_output.0[..]).map_err(Error::ResponseParseFailed) - } - - /// Execute runtime call at given block. - pub async fn state_call( - &self, - method: String, - data: Bytes, - at_block: Option, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - SubstrateStateClient::::call(&*client, method, data, at_block) - .await - .map_err(Into::into) - }) - .await - } - - /// Returns storage proof of given storage keys. - pub async fn prove_storage( - &self, - keys: Vec, - at_block: C::Hash, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - SubstrateStateClient::::prove_storage(&*client, keys, Some(at_block)) - .await - .map(|proof| { - StorageProof::new(proof.proof.into_iter().map(|b| b.0).collect::>()) - }) - .map_err(Into::into) - }) - .await - } - - /// Return `tokenDecimals` property from the set of chain properties. - pub async fn token_decimals(&self) -> Result> { - self.jsonrpsee_execute(move |client| async move { - let system_properties = SubstrateSystemClient::::properties(&*client).await?; - Ok(system_properties.get("tokenDecimals").and_then(|v| v.as_u64())) - }) - .await - } - - /// Return new finality justifications stream. - pub async fn subscribe_finality_justifications>( - &self, - ) -> Result> { - let subscription = self - .jsonrpsee_execute(move |client| async move { - Ok(FC::subscribe_justifications(&client).await?) - }) - .await?; - let (cancel_sender, cancel_receiver) = futures::channel::oneshot::channel(); - let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); - self.data.read().await.tokio.spawn(Subscription::background_worker( - C::NAME.into(), - "justification".into(), - subscription, - sender, - cancel_receiver, - )); - Ok(Subscription(Mutex::new(receiver), cancel_sender)) - } - - /// Generates a proof of key ownership for the given authority in the given set. - pub async fn generate_grandpa_key_ownership_proof( - &self, - at: HashOf, - set_id: sp_consensus_grandpa::SetId, - authority_id: sp_consensus_grandpa::AuthorityId, - ) -> Result> - where - C: ChainWithGrandpa, - { - self.typed_state_call( - SUB_API_GRANDPA_GENERATE_KEY_OWNERSHIP_PROOF.into(), - (set_id, authority_id), - Some(at), - ) - .await - } - - /// Execute jsonrpsee future in tokio context. - async fn jsonrpsee_execute(&self, make_jsonrpsee_future: MF) -> Result - where - MF: FnOnce(Arc) -> F + Send + 'static, - F: Future> + Send + 'static, - T: Send + 'static, - { - let data = self.data.read().await; - let client = data.client.clone(); - data.tokio.spawn(make_jsonrpsee_future(client)).await? - } - - /// Returns `true` if version guard can be started. - /// - /// There's no reason to run version guard when version mode is set to `Auto`. It can - /// lead to relay shutdown when chain is upgraded, even though we have explicitly - /// said that we don't want to shutdown. - pub fn can_start_version_guard(&self) -> bool { - !matches!(self.chain_runtime_version, ChainRuntimeVersion::Auto) - } -} - -impl Subscription { - /// Consumes subscription and returns future statuses stream. - pub fn into_stream(self) -> impl futures::Stream { - futures::stream::unfold(Some(self), |mut this| async move { - let Some(this) = this.take() else { return None }; - let item = this.0.lock().await.next().await.unwrap_or(None); - match item { - Some(item) => Some((item, Some(this))), - None => { - // let's make it explicit here - let _ = this.1.send(()); - None - }, - } - }) - } - - /// Return next item from the subscription. - pub async fn next(&self) -> Result> { - let mut receiver = self.0.lock().await; - let item = receiver.next().await; - Ok(item.unwrap_or(None)) - } - - /// Background worker that is executed in tokio context as `jsonrpsee` requires. - async fn background_worker( - chain_name: String, - item_type: String, - subscription: jsonrpsee::core::client::Subscription, - mut sender: futures::channel::mpsc::Sender>, - cancel_receiver: futures::channel::oneshot::Receiver<()>, - ) { - log::trace!( - target: "bridge", - "Starting background worker for {} {} subscription stream.", - chain_name, - item_type, - ); - - futures::pin_mut!(subscription, cancel_receiver); - loop { - match futures::future::select(subscription.next(), &mut cancel_receiver).await { - futures::future::Either::Left((Some(Ok(item)), _)) => - if sender.send(Some(item)).await.is_err() { - log::trace!( - target: "bridge", - "{} {} subscription stream: no listener. Stopping background worker.", - chain_name, - item_type, - ); - - break - }, - futures::future::Either::Left((Some(Err(e)), _)) => { - log::trace!( - target: "bridge", - "{} {} subscription stream has returned '{:?}'. Stream needs to be restarted. Stopping background worker.", - chain_name, - item_type, - e, - ); - let _ = sender.send(None).await; - break - }, - futures::future::Either::Left((None, _)) => { - log::trace!( - target: "bridge", - "{} {} subscription stream has returned None. Stream needs to be restarted. Stopping background worker.", - chain_name, - item_type, - ); - let _ = sender.send(None).await; - break - }, - futures::future::Either::Right((_, _)) => { - log::trace!( - target: "bridge", - "{} {} subscription stream: listener has been dropped. Stopping background worker.", - chain_name, - item_type, - ); - break; - }, - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{guard::tests::TestEnvironment, test_chain::TestChain}; - use futures::{channel::mpsc::unbounded, FutureExt}; - - async fn run_ensure_correct_runtime_version( - expected: ChainRuntimeVersion, - actual: RuntimeVersion, - ) -> Result<()> { - let ( - (mut runtime_version_tx, runtime_version_rx), - (slept_tx, _slept_rx), - (aborted_tx, mut aborted_rx), - ) = (unbounded(), unbounded(), unbounded()); - runtime_version_tx.send(actual).await.unwrap(); - let mut env = TestEnvironment { runtime_version_rx, slept_tx, aborted_tx }; - - let ensure_correct_runtime_version = - Client::::ensure_correct_runtime_version(&mut env, expected).boxed(); - let aborted = aborted_rx.next().map(|_| Err(Error::Custom("".into()))).boxed(); - futures::pin_mut!(ensure_correct_runtime_version, aborted); - futures::future::select(ensure_correct_runtime_version, aborted) - .await - .into_inner() - .0 - } - - #[async_std::test] - async fn ensure_correct_runtime_version_works() { - // when we are configured to use auto version - assert!(matches!( - run_ensure_correct_runtime_version( - ChainRuntimeVersion::Auto, - RuntimeVersion { - spec_version: 100, - transaction_version: 100, - ..Default::default() - }, - ) - .await, - Ok(()), - )); - // when actual == expected - assert!(matches!( - run_ensure_correct_runtime_version( - ChainRuntimeVersion::Custom(SimpleRuntimeVersion { - spec_version: 100, - transaction_version: 100 - }), - RuntimeVersion { - spec_version: 100, - transaction_version: 100, - ..Default::default() - }, - ) - .await, - Ok(()), - )); - // when actual spec version < expected spec version - assert!(matches!( - run_ensure_correct_runtime_version( - ChainRuntimeVersion::Custom(SimpleRuntimeVersion { - spec_version: 100, - transaction_version: 100 - }), - RuntimeVersion { spec_version: 99, transaction_version: 100, ..Default::default() }, - ) - .await, - Err(Error::WaitingForRuntimeUpgrade { - expected: SimpleRuntimeVersion { spec_version: 100, transaction_version: 100 }, - actual: SimpleRuntimeVersion { spec_version: 99, transaction_version: 100 }, - .. - }), - )); - // when actual spec version > expected spec version - assert!(matches!( - run_ensure_correct_runtime_version( - ChainRuntimeVersion::Custom(SimpleRuntimeVersion { - spec_version: 100, - transaction_version: 100 - }), - RuntimeVersion { - spec_version: 101, - transaction_version: 100, - ..Default::default() - }, - ) - .await, - Err(Error::Custom(_)), - )); - } -} diff --git a/bridges/relays/client-substrate/src/client/caching.rs b/bridges/relays/client-substrate/src/client/caching.rs new file mode 100644 index 000000000000..cb898cf51726 --- /dev/null +++ b/bridges/relays/client-substrate/src/client/caching.rs @@ -0,0 +1,468 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Client implementation that is caching (whenever possible) results of its backend +//! method calls. + +use crate::{ + client::{Client, SubscriptionBroadcaster}, + error::{Error, Result}, + AccountIdOf, AccountKeyPairOf, BlockNumberOf, Chain, ChainWithGrandpa, ChainWithTransactions, + HashOf, HeaderIdOf, HeaderOf, NonceOf, SignedBlockOf, SimpleRuntimeVersion, Subscription, + TransactionTracker, UnsignedTransaction, ANCIENT_BLOCK_THRESHOLD, +}; +use std::{cmp::Ordering, future::Future, task::Poll}; + +use async_std::{ + sync::{Arc, Mutex, RwLock}, + task::JoinHandle, +}; +use async_trait::async_trait; +use codec::Encode; +use frame_support::weights::Weight; +use futures::{FutureExt, StreamExt}; +use quick_cache::unsync::Cache; +use sp_consensus_grandpa::{AuthorityId, OpaqueKeyOwnershipProof, SetId}; +use sp_core::{ + storage::{StorageData, StorageKey}, + Bytes, Pair, +}; +use sp_runtime::{traits::Header as _, transaction_validity::TransactionValidity}; +use sp_trie::StorageProof; +use sp_version::RuntimeVersion; + +/// `quick_cache::unsync::Cache` wrapped in async-aware synchronization primitives. +type SyncCache = Arc>>; + +/// Client implementation that is caching (whenever possible) results of its backend +/// method calls. Apart from caching call results, it also supports some (at the +/// moment: justifications) subscription sharing, meaning that the single server +/// subscription may be shared by multiple subscribers at the client side. +#[derive(Clone)] +pub struct CachingClient> { + backend: B, + data: Arc>, +} + +/// Client data, shared by all `CachingClient` clones. +struct ClientData { + grandpa_justifications: Arc>>>, + beefy_justifications: Arc>>>, + background_task_handle: Arc>>>, + best_header: Arc>>>, + best_finalized_header: Arc>>>, + // `quick_cache::sync::Cache` has the `get_or_insert_async` method, which fits our needs, + // but it uses synchronization primitives that are not aware of async execution. They + // can block the executor threads and cause deadlocks => let's use primitives from + // `async_std` crate around `quick_cache::unsync::Cache` + header_hash_by_number_cache: SyncCache, HashOf>, + header_by_hash_cache: SyncCache, HeaderOf>, + block_by_hash_cache: SyncCache, SignedBlockOf>, + raw_storage_value_cache: SyncCache<(HashOf, StorageKey), Option>, + state_call_cache: SyncCache<(HashOf, String, Bytes), Bytes>, +} + +impl> CachingClient { + /// Creates new `CachingClient` on top of given `backend`. + pub async fn new(backend: B) -> Self { + // most of relayer operations will never touch more than `ANCIENT_BLOCK_THRESHOLD` + // headers, so we'll use this as a cache capacity for all chain-related caches + let chain_state_capacity = ANCIENT_BLOCK_THRESHOLD as usize; + let best_header = Arc::new(RwLock::new(None)); + let best_finalized_header = Arc::new(RwLock::new(None)); + let header_by_hash_cache = Arc::new(RwLock::new(Cache::new(chain_state_capacity))); + let background_task_handle = Self::start_background_task( + backend.clone(), + best_header.clone(), + best_finalized_header.clone(), + header_by_hash_cache.clone(), + ) + .await; + CachingClient { + backend, + data: Arc::new(ClientData { + grandpa_justifications: Arc::new(Mutex::new(None)), + beefy_justifications: Arc::new(Mutex::new(None)), + background_task_handle: Arc::new(Mutex::new(background_task_handle)), + best_header, + best_finalized_header, + header_hash_by_number_cache: Arc::new(RwLock::new(Cache::new( + chain_state_capacity, + ))), + header_by_hash_cache, + block_by_hash_cache: Arc::new(RwLock::new(Cache::new(chain_state_capacity))), + raw_storage_value_cache: Arc::new(RwLock::new(Cache::new(1_024))), + state_call_cache: Arc::new(RwLock::new(Cache::new(1_024))), + }), + } + } + + /// Try to get value from the cache, or compute and insert it using given future. + async fn get_or_insert_async( + &self, + cache: &Arc>>, + key: &K, + with: impl std::future::Future>, + ) -> Result { + // try to get cached value first using read lock + { + let cache = cache.read().await; + if let Some(value) = cache.get(key) { + return Ok(value.clone()) + } + } + + // let's compute the value without holding any locks - it may cause additional misses and + // double insertions, but that's better than holding a lock for a while + let value = with.await?; + + // insert/update the value in the cache + cache.write().await.insert(key.clone(), value.clone()); + Ok(value) + } + + /// Subscribe to finality justifications, trying to reuse existing subscription. + async fn subscribe_finality_justifications<'a>( + &'a self, + maybe_broadcaster: &Mutex>>, + do_subscribe: impl Future>> + 'a, + ) -> Result> { + let mut maybe_broadcaster = maybe_broadcaster.lock().await; + let broadcaster = match maybe_broadcaster.as_ref() { + Some(justifications) => justifications, + None => { + let broadcaster = match SubscriptionBroadcaster::new(do_subscribe.await?) { + Ok(broadcaster) => broadcaster, + Err(subscription) => return Ok(subscription), + }; + maybe_broadcaster.get_or_insert(broadcaster) + }, + }; + + broadcaster.subscribe().await + } + + /// Start background task that reads best (and best finalized) headers from subscriptions. + async fn start_background_task( + backend: B, + best_header: Arc>>>, + best_finalized_header: Arc>>>, + header_by_hash_cache: SyncCache, HeaderOf>, + ) -> JoinHandle> { + async_std::task::spawn(async move { + // initialize by reading headers directly from backend to avoid doing that in the + // high-level code + let mut last_finalized_header = + backend.header_by_hash(backend.best_finalized_header_hash().await?).await?; + *best_header.write().await = Some(backend.best_header().await?); + *best_finalized_header.write().await = Some(last_finalized_header.clone()); + + // ...and then continue with subscriptions + let mut best_headers = backend.subscribe_best_headers().await?; + let mut finalized_headers = backend.subscribe_finalized_headers().await?; + loop { + futures::select! { + new_best_header = best_headers.next().fuse() => { + // we assume that the best header is always the actual best header, even if its + // number is lower than the number of previous-best-header (chain may use its own + // best header selection algorithms) + let new_best_header = new_best_header + .ok_or_else(|| Error::ChannelError(format!("Mandatory best headers subscription for {} has finished", C::NAME)))?; + let new_best_header_hash = new_best_header.hash(); + header_by_hash_cache.write().await.insert(new_best_header_hash, new_best_header.clone()); + *best_header.write().await = Some(new_best_header); + }, + new_finalized_header = finalized_headers.next().fuse() => { + // in theory we'll always get finalized headers in order, but let's double check + let new_finalized_header = new_finalized_header. + ok_or_else(|| Error::ChannelError(format!("Finalized headers subscription for {} has finished", C::NAME)))?; + let new_finalized_header_number = *new_finalized_header.number(); + let last_finalized_header_number = *last_finalized_header.number(); + match new_finalized_header_number.cmp(&last_finalized_header_number) { + Ordering::Greater => { + let new_finalized_header_hash = new_finalized_header.hash(); + header_by_hash_cache.write().await.insert(new_finalized_header_hash, new_finalized_header.clone()); + *best_finalized_header.write().await = Some(new_finalized_header.clone()); + last_finalized_header = new_finalized_header; + }, + Ordering::Less => { + return Err(Error::unordered_finalized_headers::( + new_finalized_header_number, + last_finalized_header_number, + )); + }, + _ => (), + } + }, + } + } + }) + } + + /// Ensure that the background task is active. + async fn ensure_background_task_active(&self) -> Result<()> { + let mut background_task_handle = self.data.background_task_handle.lock().await; + if let Poll::Ready(result) = futures::poll!(&mut *background_task_handle) { + return Err(Error::ChannelError(format!( + "Background task of {} client has exited with result: {:?}", + C::NAME, + result + ))) + } + + Ok(()) + } + + /// Try to get header, read elsewhere by background task through subscription. + async fn read_header_from_background<'a>( + &'a self, + header: &Arc>>>, + read_header_from_backend: impl Future>> + 'a, + ) -> Result> { + // ensure that the background task is active + self.ensure_background_task_active().await?; + + // now we know that the background task is active, so we could trust that the + // `header` has the most recent updates from it + match header.read().await.clone() { + Some(header) => Ok(header), + None => { + // header has not yet been read from the subscription, which means that + // we are just starting - let's read header directly from backend this time + read_header_from_backend.await + }, + } + } +} + +impl> std::fmt::Debug for CachingClient { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.write_fmt(format_args!("CachingClient<{:?}>", self.backend)) + } +} + +#[async_trait] +impl> Client for CachingClient { + async fn ensure_synced(&self) -> Result<()> { + self.backend.ensure_synced().await + } + + async fn reconnect(&self) -> Result<()> { + self.backend.reconnect().await?; + // since we have new underlying client, we need to restart subscriptions too + *self.data.grandpa_justifications.lock().await = None; + *self.data.beefy_justifications.lock().await = None; + // also restart background task too + *self.data.best_header.write().await = None; + *self.data.best_finalized_header.write().await = None; + *self.data.background_task_handle.lock().await = Self::start_background_task( + self.backend.clone(), + self.data.best_header.clone(), + self.data.best_finalized_header.clone(), + self.data.header_by_hash_cache.clone(), + ) + .await; + Ok(()) + } + + fn genesis_hash(&self) -> HashOf { + self.backend.genesis_hash() + } + + async fn header_hash_by_number(&self, number: BlockNumberOf) -> Result> { + self.get_or_insert_async( + &self.data.header_hash_by_number_cache, + &number, + self.backend.header_hash_by_number(number), + ) + .await + } + + async fn header_by_hash(&self, hash: HashOf) -> Result> { + self.get_or_insert_async( + &self.data.header_by_hash_cache, + &hash, + self.backend.header_by_hash(hash), + ) + .await + } + + async fn block_by_hash(&self, hash: HashOf) -> Result> { + self.get_or_insert_async( + &self.data.block_by_hash_cache, + &hash, + self.backend.block_by_hash(hash), + ) + .await + } + + async fn best_finalized_header_hash(&self) -> Result> { + self.read_header_from_background( + &self.data.best_finalized_header, + self.backend.best_finalized_header(), + ) + .await + .map(|h| h.hash()) + } + + async fn best_header(&self) -> Result> { + self.read_header_from_background(&self.data.best_header, self.backend.best_header()) + .await + } + + async fn subscribe_best_headers(&self) -> Result>> { + // we may share the sunbscription here, but atm there's no callers of this method + self.backend.subscribe_best_headers().await + } + + async fn subscribe_finalized_headers(&self) -> Result>> { + // we may share the sunbscription here, but atm there's no callers of this method + self.backend.subscribe_finalized_headers().await + } + + async fn subscribe_grandpa_finality_justifications(&self) -> Result> + where + C: ChainWithGrandpa, + { + self.subscribe_finality_justifications( + &self.data.grandpa_justifications, + self.backend.subscribe_grandpa_finality_justifications(), + ) + .await + } + + async fn generate_grandpa_key_ownership_proof( + &self, + at: HashOf, + set_id: SetId, + authority_id: AuthorityId, + ) -> Result> { + self.backend + .generate_grandpa_key_ownership_proof(at, set_id, authority_id) + .await + } + + async fn subscribe_beefy_finality_justifications(&self) -> Result> { + self.subscribe_finality_justifications( + &self.data.beefy_justifications, + self.backend.subscribe_beefy_finality_justifications(), + ) + .await + } + + async fn token_decimals(&self) -> Result> { + self.backend.token_decimals().await + } + + async fn runtime_version(&self) -> Result { + self.backend.runtime_version().await + } + + async fn simple_runtime_version(&self) -> Result { + self.backend.simple_runtime_version().await + } + + fn can_start_version_guard(&self) -> bool { + self.backend.can_start_version_guard() + } + + async fn raw_storage_value( + &self, + at: HashOf, + storage_key: StorageKey, + ) -> Result> { + self.get_or_insert_async( + &self.data.raw_storage_value_cache, + &(at, storage_key.clone()), + self.backend.raw_storage_value(at, storage_key), + ) + .await + } + + async fn pending_extrinsics(&self) -> Result> { + self.backend.pending_extrinsics().await + } + + async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result> { + self.backend.submit_unsigned_extrinsic(transaction).await + } + + async fn submit_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>, + { + self.backend.submit_signed_extrinsic(signer, prepare_extrinsic).await + } + + async fn submit_and_watch_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>, + { + self.backend + .submit_and_watch_signed_extrinsic(signer, prepare_extrinsic) + .await + .map(|t| t.switch_environment(self.clone())) + } + + async fn validate_transaction( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result { + self.backend.validate_transaction(at, transaction).await + } + + async fn estimate_extrinsic_weight( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result { + self.backend.estimate_extrinsic_weight(at, transaction).await + } + + async fn raw_state_call( + &self, + at: HashOf, + method: String, + arguments: Args, + ) -> Result { + let encoded_arguments = Bytes(arguments.encode()); + self.get_or_insert_async( + &self.data.state_call_cache, + &(at, method.clone(), encoded_arguments), + self.backend.raw_state_call(at, method, arguments), + ) + .await + } + + async fn prove_storage(&self, at: HashOf, keys: Vec) -> Result { + self.backend.prove_storage(at, keys).await + } +} diff --git a/bridges/relays/client-substrate/src/client/mod.rs b/bridges/relays/client-substrate/src/client/mod.rs new file mode 100644 index 000000000000..62a1119d718f --- /dev/null +++ b/bridges/relays/client-substrate/src/client/mod.rs @@ -0,0 +1,91 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Layered Substrate client implementation. + +use crate::{Chain, ConnectionParams}; + +use caching::CachingClient; +use num_traits::Saturating; +use rpc::RpcClient; +use sp_version::RuntimeVersion; + +pub mod caching; +pub mod rpc; + +mod rpc_api; +mod subscription; +mod traits; + +pub use subscription::{StreamDescription, Subscription, SubscriptionBroadcaster}; +pub use traits::Client; + +/// Type of RPC client with caching support. +pub type RpcWithCachingClient = CachingClient>; + +/// Creates new RPC client with caching support. +pub async fn rpc_with_caching(params: ConnectionParams) -> RpcWithCachingClient { + let rpc = rpc::RpcClient::::new(params).await; + caching::CachingClient::new(rpc).await +} + +/// The difference between best block number and number of its ancestor, that is enough +/// for us to consider that ancestor an "ancient" block with dropped state. +/// +/// The relay does not assume that it is connected to the archive node, so it always tries +/// to use the best available chain state. But sometimes it still may use state of some +/// old block. If the state of that block is already dropped, relay will see errors when +/// e.g. it tries to prove something. +/// +/// By default Substrate-based nodes are storing state for last 256 blocks. We'll use +/// half of this value. +pub const ANCIENT_BLOCK_THRESHOLD: u32 = 128; + +/// Returns `true` if we think that the state is already discarded for given block. +pub fn is_ancient_block + PartialOrd + Saturating>(block: N, best: N) -> bool { + best.saturating_sub(block) >= N::from(ANCIENT_BLOCK_THRESHOLD) +} + +/// Opaque GRANDPA authorities set. +pub type OpaqueGrandpaAuthoritiesSet = Vec; + +/// A simple runtime version. It only includes the `spec_version` and `transaction_version`. +#[derive(Copy, Clone, Debug)] +pub struct SimpleRuntimeVersion { + /// Version of the runtime specification. + pub spec_version: u32, + /// All existing dispatches are fully compatible when this number doesn't change. + pub transaction_version: u32, +} + +impl SimpleRuntimeVersion { + /// Create a new instance of `SimpleRuntimeVersion` from a `RuntimeVersion`. + pub const fn from_runtime_version(runtime_version: &RuntimeVersion) -> Self { + Self { + spec_version: runtime_version.spec_version, + transaction_version: runtime_version.transaction_version, + } + } +} + +/// Chain runtime version in client +#[derive(Copy, Clone, Debug)] +pub enum ChainRuntimeVersion { + /// Auto query from chain. + Auto, + /// Custom runtime version, defined by user. + Custom(SimpleRuntimeVersion), +} diff --git a/bridges/relays/client-substrate/src/client/rpc.rs b/bridges/relays/client-substrate/src/client/rpc.rs new file mode 100644 index 000000000000..bf7442a95141 --- /dev/null +++ b/bridges/relays/client-substrate/src/client/rpc.rs @@ -0,0 +1,743 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Client implementation that connects to the Substrate node over `ws`/`wss` connection +//! and is using RPC methods to get required data and submit transactions. + +use crate::{ + client::{ + rpc_api::{ + SubstrateAuthorClient, SubstrateBeefyClient, SubstrateChainClient, + SubstrateFrameSystemClient, SubstrateGrandpaClient, SubstrateStateClient, + SubstrateSystemClient, + }, + subscription::{StreamDescription, Subscription}, + Client, + }, + error::{Error, Result}, + guard::Environment, + transaction_stall_timeout, AccountIdOf, AccountKeyPairOf, BalanceOf, BlockNumberOf, Chain, + ChainRuntimeVersion, ChainWithGrandpa, ChainWithTransactions, ConnectionParams, HashOf, + HeaderIdOf, HeaderOf, NonceOf, SignParam, SignedBlockOf, SimpleRuntimeVersion, + TransactionTracker, UnsignedTransaction, +}; + +use async_std::sync::{Arc, Mutex, RwLock}; +use async_trait::async_trait; +use bp_runtime::HeaderIdProvider; +use codec::Encode; +use frame_support::weights::Weight; +use futures::TryFutureExt; +use jsonrpsee::{ + core::{client::Subscription as RpcSubscription, ClientError}, + ws_client::{WsClient, WsClientBuilder}, +}; +use num_traits::Zero; +use pallet_transaction_payment::RuntimeDispatchInfo; +use relay_utils::{relay_loop::RECONNECT_DELAY, STALL_TIMEOUT}; +use sp_core::{ + storage::{StorageData, StorageKey}, + Bytes, Hasher, Pair, +}; +use sp_runtime::transaction_validity::{TransactionSource, TransactionValidity}; +use sp_trie::StorageProof; +use sp_version::RuntimeVersion; +use std::{cmp::Ordering, future::Future, marker::PhantomData}; + +const MAX_SUBSCRIPTION_CAPACITY: usize = 4096; + +const SUB_API_TXPOOL_VALIDATE_TRANSACTION: &str = "TaggedTransactionQueue_validate_transaction"; +const SUB_API_TX_PAYMENT_QUERY_INFO: &str = "TransactionPaymentApi_query_info"; +const SUB_API_GRANDPA_GENERATE_KEY_OWNERSHIP_PROOF: &str = + "GrandpaApi_generate_key_ownership_proof"; + +/// Client implementation that connects to the Substrate node over `ws`/`wss` connection +/// and is using RPC methods to get required data and submit transactions. +pub struct RpcClient { + // Lock order: `submit_signed_extrinsic_lock`, `data` + /// Client connection params. + params: Arc, + /// If several tasks are submitting their transactions simultaneously using + /// `submit_signed_extrinsic` method, they may get the same transaction nonce. So one of + /// transactions will be rejected from the pool. This lock is here to prevent situations like + /// that. + submit_signed_extrinsic_lock: Arc>, + /// Genesis block hash. + genesis_hash: HashOf, + /// Shared dynamic data. + data: Arc>, + /// Generic arguments dump. + _phantom: PhantomData, +} + +/// Client data, shared by all `RpcClient` clones. +struct ClientData { + /// Tokio runtime handle. + tokio: Arc, + /// Substrate RPC client. + client: Arc, +} + +/// Already encoded value. +struct PreEncoded(Vec); + +impl Encode for PreEncoded { + fn encode(&self) -> Vec { + self.0.clone() + } +} + +impl std::fmt::Debug for RpcClient { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.write_fmt(format_args!("RpcClient<{}>", C::NAME)) + } +} + +impl RpcClient { + /// Returns client that is able to call RPCs on Substrate node over websocket connection. + /// + /// This function will keep connecting to given Substrate node until connection is established + /// and is functional. If attempt fail, it will wait for `RECONNECT_DELAY` and retry again. + pub async fn new(params: ConnectionParams) -> Self { + let params = Arc::new(params); + loop { + match Self::try_connect(params.clone()).await { + Ok(client) => return client, + Err(error) => log::error!( + target: "bridge", + "Failed to connect to {} node: {:?}. Going to retry in {}s", + C::NAME, + error, + RECONNECT_DELAY.as_secs(), + ), + } + + async_std::task::sleep(RECONNECT_DELAY).await; + } + } + + /// Try to connect to Substrate node over websocket. Returns Substrate RPC client if connection + /// has been established or error otherwise. + async fn try_connect(params: Arc) -> Result { + let (tokio, client) = Self::build_client(¶ms).await?; + + let genesis_hash_client = client.clone(); + let genesis_hash = tokio + .spawn(async move { + SubstrateChainClient::::block_hash(&*genesis_hash_client, Some(Zero::zero())) + .await + }) + .await??; + + let chain_runtime_version = params.chain_runtime_version; + let mut client = Self { + params, + submit_signed_extrinsic_lock: Arc::new(Mutex::new(())), + genesis_hash, + data: Arc::new(RwLock::new(ClientData { tokio, client })), + _phantom: PhantomData, + }; + Self::ensure_correct_runtime_version(&mut client, chain_runtime_version).await?; + Ok(client) + } + + // Check runtime version to understand if we need are connected to expected version, or we + // need to wait for upgrade, we need to abort immediately. + async fn ensure_correct_runtime_version>( + env: &mut E, + expected: ChainRuntimeVersion, + ) -> Result<()> { + // we are only interested if version mode is bundled or passed using CLI + let expected = match expected { + ChainRuntimeVersion::Auto => return Ok(()), + ChainRuntimeVersion::Custom(expected) => expected, + }; + + // we need to wait if actual version is < than expected, we are OK of versions are the + // same and we need to abort if actual version is > than expected + let actual = SimpleRuntimeVersion::from_runtime_version(&env.runtime_version().await?); + match actual.spec_version.cmp(&expected.spec_version) { + Ordering::Less => + Err(Error::WaitingForRuntimeUpgrade { chain: C::NAME.into(), expected, actual }), + Ordering::Equal => Ok(()), + Ordering::Greater => { + log::error!( + target: "bridge", + "The {} client is configured to use runtime version {expected:?} and actual \ + version is {actual:?}. Aborting", + C::NAME, + ); + env.abort().await; + Err(Error::Custom("Aborted".into())) + }, + } + } + + /// Build client to use in connection. + async fn build_client( + params: &ConnectionParams, + ) -> Result<(Arc, Arc)> { + let tokio = tokio::runtime::Runtime::new()?; + let uri = match params.uri { + Some(ref uri) => uri.clone(), + None => { + format!( + "{}://{}:{}{}", + if params.secure { "wss" } else { "ws" }, + params.host, + params.port, + match params.path { + Some(ref path) => format!("/{}", path), + None => String::new(), + }, + ) + }, + }; + log::info!(target: "bridge", "Connecting to {} node at {}", C::NAME, uri); + + let client = tokio + .spawn(async move { + WsClientBuilder::default() + .max_buffer_capacity_per_subscription(MAX_SUBSCRIPTION_CAPACITY) + .build(&uri) + .await + }) + .await??; + + Ok((Arc::new(tokio), Arc::new(client))) + } + + /// Execute jsonrpsee future in tokio context. + async fn jsonrpsee_execute(&self, make_jsonrpsee_future: MF) -> Result + where + MF: FnOnce(Arc) -> F + Send + 'static, + F: Future> + Send + 'static, + T: Send + 'static, + { + let data = self.data.read().await; + let client = data.client.clone(); + data.tokio.spawn(make_jsonrpsee_future(client)).await? + } + + /// Prepare parameters used to sign chain transactions. + async fn build_sign_params(&self, signer: AccountKeyPairOf) -> Result> + where + C: ChainWithTransactions, + { + let runtime_version = self.simple_runtime_version().await?; + Ok(SignParam:: { + spec_version: runtime_version.spec_version, + transaction_version: runtime_version.transaction_version, + genesis_hash: self.genesis_hash, + signer, + }) + } + + /// Get the nonce of the given Substrate account. + pub async fn next_account_index(&self, account: AccountIdOf) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateFrameSystemClient::::account_next_index(&*client, account).await?) + }) + .await + } + + /// Subscribe to finality justifications. + async fn subscribe_finality_justifications( + &self, + gadget_name: &str, + do_subscribe: impl FnOnce(Arc) -> Fut + Send + 'static, + ) -> Result> + where + Fut: Future, ClientError>> + Send, + { + let subscription = self + .jsonrpsee_execute(move |client| async move { Ok(do_subscribe(client).await?) }) + .map_err(|e| Error::failed_to_subscribe_justification::(e)) + .await?; + + Ok(Subscription::new_forwarded( + StreamDescription::new(format!("{} justifications", gadget_name), C::NAME.into()), + subscription, + )) + } + + /// Subscribe to headers stream. + async fn subscribe_headers( + &self, + stream_name: &str, + do_subscribe: impl FnOnce(Arc) -> Fut + Send + 'static, + map_err: impl FnOnce(Error) -> Error, + ) -> Result>> + where + Fut: Future>, ClientError>> + Send, + { + let subscription = self + .jsonrpsee_execute(move |client| async move { Ok(do_subscribe(client).await?) }) + .map_err(map_err) + .await?; + + Ok(Subscription::new_forwarded( + StreamDescription::new(format!("{} headers", stream_name), C::NAME.into()), + subscription, + )) + } +} + +impl Clone for RpcClient { + fn clone(&self) -> Self { + RpcClient { + params: self.params.clone(), + submit_signed_extrinsic_lock: self.submit_signed_extrinsic_lock.clone(), + genesis_hash: self.genesis_hash, + data: self.data.clone(), + _phantom: PhantomData, + } + } +} + +#[async_trait] +impl Client for RpcClient { + async fn ensure_synced(&self) -> Result<()> { + let health = self + .jsonrpsee_execute(|client| async move { + Ok(SubstrateSystemClient::::health(&*client).await?) + }) + .await + .map_err(|e| Error::failed_to_get_system_health::(e))?; + + let is_synced = !health.is_syncing && (!health.should_have_peers || health.peers > 0); + if is_synced { + Ok(()) + } else { + Err(Error::ClientNotSynced(health)) + } + } + + async fn reconnect(&self) -> Result<()> { + let mut data = self.data.write().await; + let (tokio, client) = Self::build_client(&self.params).await?; + data.tokio = tokio; + data.client = client; + Ok(()) + } + + fn genesis_hash(&self) -> HashOf { + self.genesis_hash + } + + async fn header_hash_by_number(&self, number: BlockNumberOf) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateChainClient::::block_hash(&*client, Some(number)).await?) + }) + .await + .map_err(|e| Error::failed_to_read_header_hash_by_number::(number, e)) + } + + async fn header_by_hash(&self, hash: HashOf) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateChainClient::::header(&*client, Some(hash)).await?) + }) + .await + .map_err(|e| Error::failed_to_read_header_by_hash::(hash, e)) + } + + async fn block_by_hash(&self, hash: HashOf) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateChainClient::::block(&*client, Some(hash)).await?) + }) + .await + .map_err(|e| Error::failed_to_read_block_by_hash::(hash, e)) + } + + async fn best_finalized_header_hash(&self) -> Result> { + self.jsonrpsee_execute(|client| async move { + Ok(SubstrateChainClient::::finalized_head(&*client).await?) + }) + .await + .map_err(|e| Error::failed_to_read_best_finalized_header_hash::(e)) + } + + async fn best_header(&self) -> Result> { + self.jsonrpsee_execute(|client| async move { + Ok(SubstrateChainClient::::header(&*client, None).await?) + }) + .await + .map_err(|e| Error::failed_to_read_best_header::(e)) + } + + async fn subscribe_best_headers(&self) -> Result>> { + self.subscribe_headers( + "best headers", + move |client| async move { SubstrateChainClient::::subscribe_new_heads(&*client).await }, + |e| Error::failed_to_subscribe_best_headers::(e), + ) + .await + } + + async fn subscribe_finalized_headers(&self) -> Result>> { + self.subscribe_headers( + "best finalized headers", + move |client| async move { + SubstrateChainClient::::subscribe_finalized_heads(&*client).await + }, + |e| Error::failed_to_subscribe_finalized_headers::(e), + ) + .await + } + + async fn subscribe_grandpa_finality_justifications(&self) -> Result> + where + C: ChainWithGrandpa, + { + self.subscribe_finality_justifications("GRANDPA", move |client| async move { + SubstrateGrandpaClient::::subscribe_justifications(&*client).await + }) + .await + } + + async fn generate_grandpa_key_ownership_proof( + &self, + at: HashOf, + set_id: sp_consensus_grandpa::SetId, + authority_id: sp_consensus_grandpa::AuthorityId, + ) -> Result> { + self.state_call( + at, + SUB_API_GRANDPA_GENERATE_KEY_OWNERSHIP_PROOF.into(), + (set_id, authority_id), + ) + .await + } + + async fn subscribe_beefy_finality_justifications(&self) -> Result> { + self.subscribe_finality_justifications("BEEFY", move |client| async move { + SubstrateBeefyClient::::subscribe_justifications(&*client).await + }) + .await + } + + async fn token_decimals(&self) -> Result> { + self.jsonrpsee_execute(move |client| async move { + let system_properties = SubstrateSystemClient::::properties(&*client).await?; + Ok(system_properties.get("tokenDecimals").and_then(|v| v.as_u64())) + }) + .await + } + + async fn runtime_version(&self) -> Result { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateStateClient::::runtime_version(&*client).await?) + }) + .await + .map_err(|e| Error::failed_to_read_runtime_version::(e)) + } + + async fn simple_runtime_version(&self) -> Result { + Ok(match self.params.chain_runtime_version { + ChainRuntimeVersion::Auto => { + let runtime_version = self.runtime_version().await?; + SimpleRuntimeVersion::from_runtime_version(&runtime_version) + }, + ChainRuntimeVersion::Custom(ref version) => *version, + }) + } + + fn can_start_version_guard(&self) -> bool { + !matches!(self.params.chain_runtime_version, ChainRuntimeVersion::Auto) + } + + async fn raw_storage_value( + &self, + at: HashOf, + storage_key: StorageKey, + ) -> Result> { + let cloned_storage_key = storage_key.clone(); + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateStateClient::::storage(&*client, cloned_storage_key, Some(at)).await?) + }) + .await + .map_err(|e| Error::failed_to_read_storage_value::(at, storage_key, e)) + } + + async fn pending_extrinsics(&self) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateAuthorClient::::pending_extrinsics(&*client).await?) + }) + .await + .map_err(|e| Error::failed_to_get_pending_extrinsics::(e)) + } + + async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result> { + // one last check that the transaction is valid. Most of checks happen in the relay loop and + // it is the "final" check before submission. + let best_header_hash = self.best_header_hash().await?; + self.validate_transaction(best_header_hash, PreEncoded(transaction.0.clone())) + .await + .map_err(|e| Error::failed_to_submit_transaction::(e))? + .map_err(|e| Error::failed_to_submit_transaction::(Error::TransactionInvalid(e)))?; + + self.jsonrpsee_execute(move |client| async move { + let tx_hash = SubstrateAuthorClient::::submit_extrinsic(&*client, transaction) + .await + .map_err(|e| { + log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); + e + })?; + log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); + Ok(tx_hash) + }) + .await + .map_err(|e| Error::failed_to_submit_transaction::(e)) + } + + async fn submit_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>, + { + let _guard = self.submit_signed_extrinsic_lock.lock().await; + let transaction_nonce = self.next_account_index(signer.public().into()).await?; + let best_header = self.best_header().await?; + let signing_data = self.build_sign_params(signer.clone()).await?; + + // By using parent of best block here, we are protecting again best-block reorganizations. + // E.g. transaction may have been submitted when the best block was `A[num=100]`. Then it + // has been changed to `B[num=100]`. Hash of `A` has been included into transaction + // signature payload. So when signature will be checked, the check will fail and transaction + // will be dropped from the pool. + let best_header_id = best_header.parent_id().unwrap_or_else(|| best_header.id()); + + let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; + let signed_extrinsic = C::sign_transaction(signing_data, extrinsic)?.encode(); + self.submit_unsigned_extrinsic(Bytes(signed_extrinsic)).await + } + + async fn submit_and_watch_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>, + { + let self_clone = self.clone(); + let signing_data = self.build_sign_params(signer.clone()).await?; + let _guard = self.submit_signed_extrinsic_lock.lock().await; + let transaction_nonce = self.next_account_index(signer.public().into()).await?; + let best_header = self.best_header().await?; + let best_header_id = best_header.id(); + + let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; + let stall_timeout = transaction_stall_timeout( + extrinsic.era.mortality_period(), + C::AVERAGE_BLOCK_INTERVAL, + STALL_TIMEOUT, + ); + let signed_extrinsic = C::sign_transaction(signing_data, extrinsic)?.encode(); + + // one last check that the transaction is valid. Most of checks happen in the relay loop and + // it is the "final" check before submission. + self.validate_transaction(best_header_id.hash(), PreEncoded(signed_extrinsic.clone())) + .await + .map_err(|e| Error::failed_to_submit_transaction::(e))? + .map_err(|e| Error::failed_to_submit_transaction::(Error::TransactionInvalid(e)))?; + + self.jsonrpsee_execute(move |client| async move { + let tx_hash = C::Hasher::hash(&signed_extrinsic); + let subscription: jsonrpsee::core::client::Subscription<_> = + SubstrateAuthorClient::::submit_and_watch_extrinsic( + &*client, + Bytes(signed_extrinsic), + ) + .await + .map_err(|e| { + log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); + e + })?; + log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); + Ok(TransactionTracker::new( + self_clone, + stall_timeout, + tx_hash, + Subscription::new_forwarded( + StreamDescription::new("transaction events".into(), C::NAME.into()), + subscription, + ), + )) + }) + .await + .map_err(|e| Error::failed_to_submit_transaction::(e)) + } + + async fn validate_transaction( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result { + self.state_call( + at, + SUB_API_TXPOOL_VALIDATE_TRANSACTION.into(), + (TransactionSource::External, transaction, at), + ) + .await + } + + async fn estimate_extrinsic_weight( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result { + let transaction_len = transaction.encoded_size() as u32; + let dispatch_info: RuntimeDispatchInfo> = self + .state_call(at, SUB_API_TX_PAYMENT_QUERY_INFO.into(), (transaction, transaction_len)) + .await?; + + Ok(dispatch_info.weight) + } + + async fn raw_state_call( + &self, + at: HashOf, + method: String, + arguments: Args, + ) -> Result { + let arguments = Bytes(arguments.encode()); + let arguments_clone = arguments.clone(); + let method_clone = method.clone(); + self.jsonrpsee_execute(move |client| async move { + SubstrateStateClient::::call(&*client, method, arguments, Some(at)) + .await + .map_err(Into::into) + }) + .await + .map_err(|e| Error::failed_state_call::(at, method_clone, arguments_clone, e)) + } + + async fn prove_storage(&self, at: HashOf, keys: Vec) -> Result { + let keys_clone = keys.clone(); + self.jsonrpsee_execute(move |client| async move { + SubstrateStateClient::::prove_storage(&*client, keys, Some(at)) + .await + .map(|proof| StorageProof::new(proof.proof.into_iter().map(|b| b.0))) + .map_err(Into::into) + }) + .await + .map_err(|e| Error::failed_to_prove_storage::(at, keys_clone, e)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{guard::tests::TestEnvironment, test_chain::TestChain}; + use futures::{channel::mpsc::unbounded, FutureExt, SinkExt, StreamExt}; + + async fn run_ensure_correct_runtime_version( + expected: ChainRuntimeVersion, + actual: RuntimeVersion, + ) -> Result<()> { + let ( + (mut runtime_version_tx, runtime_version_rx), + (slept_tx, _slept_rx), + (aborted_tx, mut aborted_rx), + ) = (unbounded(), unbounded(), unbounded()); + runtime_version_tx.send(actual).await.unwrap(); + let mut env = TestEnvironment { runtime_version_rx, slept_tx, aborted_tx }; + + let ensure_correct_runtime_version = + RpcClient::::ensure_correct_runtime_version(&mut env, expected).boxed(); + let aborted = aborted_rx.next().map(|_| Err(Error::Custom("".into()))).boxed(); + futures::pin_mut!(ensure_correct_runtime_version, aborted); + futures::future::select(ensure_correct_runtime_version, aborted) + .await + .into_inner() + .0 + } + + #[async_std::test] + async fn ensure_correct_runtime_version_works() { + // when we are configured to use auto version + assert!(matches!( + run_ensure_correct_runtime_version( + ChainRuntimeVersion::Auto, + RuntimeVersion { + spec_version: 100, + transaction_version: 100, + ..Default::default() + }, + ) + .await, + Ok(()), + )); + // when actual == expected + assert!(matches!( + run_ensure_correct_runtime_version( + ChainRuntimeVersion::Custom(SimpleRuntimeVersion { + spec_version: 100, + transaction_version: 100 + }), + RuntimeVersion { + spec_version: 100, + transaction_version: 100, + ..Default::default() + }, + ) + .await, + Ok(()), + )); + // when actual spec version < expected spec version + assert!(matches!( + run_ensure_correct_runtime_version( + ChainRuntimeVersion::Custom(SimpleRuntimeVersion { + spec_version: 100, + transaction_version: 100 + }), + RuntimeVersion { spec_version: 99, transaction_version: 100, ..Default::default() }, + ) + .await, + Err(Error::WaitingForRuntimeUpgrade { + expected: SimpleRuntimeVersion { spec_version: 100, transaction_version: 100 }, + actual: SimpleRuntimeVersion { spec_version: 99, transaction_version: 100 }, + .. + }), + )); + // when actual spec version > expected spec version + assert!(matches!( + run_ensure_correct_runtime_version( + ChainRuntimeVersion::Custom(SimpleRuntimeVersion { + spec_version: 100, + transaction_version: 100 + }), + RuntimeVersion { + spec_version: 101, + transaction_version: 100, + ..Default::default() + }, + ) + .await, + Err(Error::Custom(_)), + )); + } +} diff --git a/bridges/relays/client-substrate/src/rpc.rs b/bridges/relays/client-substrate/src/client/rpc_api.rs similarity index 80% rename from bridges/relays/client-substrate/src/rpc.rs rename to bridges/relays/client-substrate/src/client/rpc_api.rs index 60c29cdeb5c7..9cac69f7a13d 100644 --- a/bridges/relays/client-substrate/src/rpc.rs +++ b/bridges/relays/client-substrate/src/client/rpc_api.rs @@ -16,15 +16,9 @@ //! The most generic Substrate node RPC interface. -use async_trait::async_trait; - use crate::{Chain, ChainWithGrandpa, TransactionStatusOf}; -use jsonrpsee::{ - core::{client::Subscription, ClientError}, - proc_macros::rpc, - ws_client::WsClient, -}; +use jsonrpsee::proc_macros::rpc; use pallet_transaction_payment_rpc_runtime_api::FeeDetails; use sc_rpc_api::{state::ReadProof, system::Health}; use sp_core::{ @@ -60,6 +54,20 @@ pub(crate) trait SubstrateChain { /// Return signed block (with justifications) by its hash. #[method(name = "getBlock")] async fn block(&self, block_hash: Option) -> RpcResult; + /// Subscribe to best headers. + #[subscription( + name = "subscribeNewHeads" => "newHead", + unsubscribe = "unsubscribeNewHeads", + item = C::Header + )] + async fn subscribe_new_heads(&self); + /// Subscribe to finalized headers. + #[subscription( + name = "subscribeFinalizedHeads" => "finalizedHead", + unsubscribe = "unsubscribeFinalizedHeads", + item = C::Header + )] + async fn subscribe_finalized_heads(&self); } /// RPC methods of Substrate `author` namespace, that we are using. @@ -106,15 +114,6 @@ pub(crate) trait SubstrateState { ) -> RpcResult>; } -/// RPC methods that we are using for a certain finality gadget. -#[async_trait] -pub trait SubstrateFinalityClient { - /// Subscribe to finality justifications. - async fn subscribe_justifications( - client: &WsClient, - ) -> Result, ClientError>; -} - /// RPC methods of Substrate `grandpa` namespace, that we are using. #[rpc(client, client_bounds(C: ChainWithGrandpa), namespace = "grandpa")] pub(crate) trait SubstrateGrandpa { @@ -123,17 +122,6 @@ pub(crate) trait SubstrateGrandpa { async fn subscribe_justifications(&self); } -/// RPC finality methods of Substrate `grandpa` namespace, that we are using. -pub struct SubstrateGrandpaFinalityClient; -#[async_trait] -impl SubstrateFinalityClient for SubstrateGrandpaFinalityClient { - async fn subscribe_justifications( - client: &WsClient, - ) -> Result, ClientError> { - SubstrateGrandpaClient::::subscribe_justifications(client).await - } -} - // TODO: Use `ChainWithBeefy` instead of `Chain` after #1606 is merged /// RPC methods of Substrate `beefy` namespace, that we are using. #[rpc(client, client_bounds(C: Chain), namespace = "beefy")] @@ -143,18 +131,6 @@ pub(crate) trait SubstrateBeefy { async fn subscribe_justifications(&self); } -/// RPC finality methods of Substrate `beefy` namespace, that we are using. -pub struct SubstrateBeefyFinalityClient; -// TODO: Use `ChainWithBeefy` instead of `Chain` after #1606 is merged -#[async_trait] -impl SubstrateFinalityClient for SubstrateBeefyFinalityClient { - async fn subscribe_justifications( - client: &WsClient, - ) -> Result, ClientError> { - SubstrateBeefyClient::::subscribe_justifications(client).await - } -} - /// RPC methods of Substrate `system` frame pallet, that we are using. #[rpc(client, client_bounds(C: Chain), namespace = "system")] pub(crate) trait SubstrateFrameSystem { diff --git a/bridges/relays/client-substrate/src/client/subscription.rs b/bridges/relays/client-substrate/src/client/subscription.rs new file mode 100644 index 000000000000..43a46573f987 --- /dev/null +++ b/bridges/relays/client-substrate/src/client/subscription.rs @@ -0,0 +1,239 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::error::Result as ClientResult; + +use async_std::{ + channel::{bounded, Receiver, Sender}, + stream::StreamExt, +}; +use futures::{FutureExt, Stream}; +use jsonrpsee::core::ClientError; +use sp_runtime::DeserializeOwned; +use std::{ + fmt::Debug, + pin::Pin, + result::Result as StdResult, + task::{Context, Poll}, +}; + +/// Once channel reaches this capacity, the subscription breaks. +const CHANNEL_CAPACITY: usize = 128; + +/// Structure describing a stream. +#[derive(Clone)] +pub struct StreamDescription { + stream_name: String, + chain_name: String, +} + +impl StreamDescription { + /// Create a new instance of `StreamDescription`. + pub fn new(stream_name: String, chain_name: String) -> Self { + Self { stream_name, chain_name } + } + + /// Get a stream description. + fn get(&self) -> String { + format!("{} stream of {}", self.stream_name, self.chain_name) + } +} + +/// Chainable stream that transforms items of type `Result` to items of type `T`. +/// +/// If it encounters an item of type `Err`, it returns `Poll::Ready(None)` +/// and terminates the underlying stream. +struct Unwrap>, T, E> { + desc: StreamDescription, + stream: Option, +} + +impl>, T, E> Unwrap { + /// Create a new instance of `Unwrap`. + pub fn new(desc: StreamDescription, stream: S) -> Self { + Self { desc, stream: Some(stream) } + } +} + +impl> + Unpin, T: DeserializeOwned, E: Debug> Stream + for Unwrap +{ + type Item = T; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Poll::Ready(match self.stream.as_mut() { + Some(subscription) => match futures::ready!(Pin::new(subscription).poll_next(cx)) { + Some(Ok(item)) => Some(item), + Some(Err(e)) => { + self.stream.take(); + log::debug!( + target: "bridge", + "{} has returned error: {:?}. It may need to be restarted", + self.desc.get(), + e, + ); + None + }, + None => { + self.stream.take(); + log::debug!( + target: "bridge", + "{} has returned `None`. It may need to be restarted", + self.desc.get() + ); + None + }, + }, + None => None, + }) + } +} + +/// Subscription factory that produces subscriptions, sharing the same background thread. +#[derive(Clone)] +pub struct SubscriptionBroadcaster { + desc: StreamDescription, + subscribers_sender: Sender>, +} + +impl SubscriptionBroadcaster { + /// Create new subscription factory. + pub fn new(subscription: Subscription) -> StdResult> { + // It doesn't make sense to further broadcast a broadcasted subscription. + if subscription.is_broadcasted { + return Err(subscription) + } + + let desc = subscription.desc().clone(); + let (subscribers_sender, subscribers_receiver) = bounded(CHANNEL_CAPACITY); + async_std::task::spawn(background_worker(subscription, subscribers_receiver)); + Ok(Self { desc, subscribers_sender }) + } + + /// Produce new subscription. + pub async fn subscribe(&self) -> ClientResult> { + let (items_sender, items_receiver) = bounded(CHANNEL_CAPACITY); + self.subscribers_sender.try_send(items_sender)?; + + Ok(Subscription::new_broadcasted(self.desc.clone(), items_receiver)) + } +} + +/// Subscription to some chain events. +pub struct Subscription { + desc: StreamDescription, + subscription: Box + Unpin + Send>, + is_broadcasted: bool, +} + +impl Subscription { + /// Create new forwarded subscription. + pub fn new_forwarded( + desc: StreamDescription, + subscription: impl Stream> + Unpin + Send + 'static, + ) -> Self { + Self { + desc: desc.clone(), + subscription: Box::new(Unwrap::new(desc, subscription)), + is_broadcasted: false, + } + } + + /// Create new broadcasted subscription. + pub fn new_broadcasted( + desc: StreamDescription, + subscription: impl Stream + Unpin + Send + 'static, + ) -> Self { + Self { desc, subscription: Box::new(subscription), is_broadcasted: true } + } + + /// Get the description of the underlying stream + pub fn desc(&self) -> &StreamDescription { + &self.desc + } +} + +impl Stream for Subscription { + type Item = T; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Poll::Ready(futures::ready!(Pin::new(&mut self.subscription).poll_next(cx))) + } +} + +/// Background worker that is executed in tokio context as `jsonrpsee` requires. +/// +/// This task may exit under some circumstances. It'll send the correspondent +/// message (`Err` or `None`) to all known listeners. Also, when it stops, all +/// subsequent reads and new subscribers will get the connection error (`ChannelError`). +async fn background_worker( + mut subscription: Subscription, + mut subscribers_receiver: Receiver>, +) { + fn log_task_exit(desc: &StreamDescription, reason: &str) { + log::debug!( + target: "bridge", + "Background task of subscription broadcaster for {} has stopped: {}", + desc.get(), + reason, + ); + } + + // wait for first subscriber until actually starting subscription + let subscriber = match subscribers_receiver.next().await { + Some(subscriber) => subscriber, + None => { + // it means that the last subscriber/factory has been dropped, so we need to + // exit too + return log_task_exit(subscription.desc(), "client has stopped") + }, + }; + + // actually subscribe + let mut subscribers = vec![subscriber]; + + // start listening for new items and receivers + loop { + futures::select! { + subscriber = subscribers_receiver.next().fuse() => { + match subscriber { + Some(subscriber) => subscribers.push(subscriber), + None => { + // it means that the last subscriber/factory has been dropped, so we need to + // exit too + return log_task_exit(subscription.desc(), "client has stopped") + }, + } + }, + maybe_item = subscription.subscription.next().fuse() => { + match maybe_item { + Some(item) => { + // notify subscribers + subscribers.retain(|subscriber| { + let send_result = subscriber.try_send(item.clone()); + send_result.is_ok() + }); + } + None => { + // The underlying client has dropped, so we can't do anything here + // and need to stop the task. + return log_task_exit(subscription.desc(), "stream has finished"); + } + } + }, + } + } +} diff --git a/bridges/relays/client-substrate/src/client/traits.rs b/bridges/relays/client-substrate/src/client/traits.rs new file mode 100644 index 000000000000..49f5c001c3f7 --- /dev/null +++ b/bridges/relays/client-substrate/src/client/traits.rs @@ -0,0 +1,230 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::{ + error::{Error, Result}, + AccountIdOf, AccountKeyPairOf, BlockNumberOf, Chain, ChainWithGrandpa, ChainWithTransactions, + HashOf, HeaderIdOf, HeaderOf, NonceOf, SignedBlockOf, SimpleRuntimeVersion, Subscription, + TransactionTracker, UnsignedTransaction, +}; + +use async_trait::async_trait; +use bp_runtime::{StorageDoubleMapKeyProvider, StorageMapKeyProvider}; +use codec::{Decode, Encode}; +use frame_support::weights::Weight; +use sp_core::{ + storage::{StorageData, StorageKey}, + Bytes, Pair, +}; +use sp_runtime::{traits::Header as _, transaction_validity::TransactionValidity}; +use sp_trie::StorageProof; +use sp_version::RuntimeVersion; +use std::fmt::Debug; + +/// Relay uses the `Client` to communicate with the node, connected to Substrate +/// chain `C`. +#[async_trait] +pub trait Client: 'static + Send + Sync + Clone + Debug { + /// Returns error if client has no connected peers or it believes it is far + /// behind the chain tip. + async fn ensure_synced(&self) -> Result<()>; + /// Reconnects the client. + async fn reconnect(&self) -> Result<()>; + + /// Return hash of the genesis block. + fn genesis_hash(&self) -> HashOf; + /// Get header hash by number. + async fn header_hash_by_number(&self, number: BlockNumberOf) -> Result>; + /// Get header by hash. + async fn header_by_hash(&self, hash: HashOf) -> Result>; + /// Get header by number. + async fn header_by_number(&self, number: BlockNumberOf) -> Result> { + self.header_by_hash(self.header_hash_by_number(number).await?).await + } + /// Get block by hash. + async fn block_by_hash(&self, hash: HashOf) -> Result>; + + /// Get best finalized header hash. + async fn best_finalized_header_hash(&self) -> Result>; + /// Get best finalized header number. + async fn best_finalized_header_number(&self) -> Result> { + Ok(*self.best_finalized_header().await?.number()) + } + /// Get best finalized header. + async fn best_finalized_header(&self) -> Result> { + self.header_by_hash(self.best_finalized_header_hash().await?).await + } + + /// Get best header. + async fn best_header(&self) -> Result>; + /// Get best header hash. + async fn best_header_hash(&self) -> Result> { + Ok(self.best_header().await?.hash()) + } + + /// Subscribe to new best headers. + async fn subscribe_best_headers(&self) -> Result>>; + /// Subscribe to new finalized headers. + async fn subscribe_finalized_headers(&self) -> Result>>; + + /// Subscribe to GRANDPA finality justifications. + async fn subscribe_grandpa_finality_justifications(&self) -> Result> + where + C: ChainWithGrandpa; + /// Generates a proof of key ownership for the given authority in the given set. + async fn generate_grandpa_key_ownership_proof( + &self, + at: HashOf, + set_id: sp_consensus_grandpa::SetId, + authority_id: sp_consensus_grandpa::AuthorityId, + ) -> Result>; + + /// Subscribe to BEEFY finality justifications. + async fn subscribe_beefy_finality_justifications(&self) -> Result>; + + /// Return `tokenDecimals` property from the set of chain properties. + async fn token_decimals(&self) -> Result>; + /// Get runtime version of the connected chain. + async fn runtime_version(&self) -> Result; + /// Get partial runtime version, to use when signing transactions. + async fn simple_runtime_version(&self) -> Result; + /// Returns `true` if version guard can be started. + /// + /// There's no reason to run version guard when version mode is set to `Auto`. It can + /// lead to relay shutdown when chain is upgraded, even though we have explicitly + /// said that we don't want to shutdown. + fn can_start_version_guard(&self) -> bool; + + /// Read raw value from runtime storage. + async fn raw_storage_value( + &self, + at: HashOf, + storage_key: StorageKey, + ) -> Result>; + /// Read and decode value from runtime storage. + async fn storage_value( + &self, + at: HashOf, + storage_key: StorageKey, + ) -> Result> { + self.raw_storage_value(at, storage_key.clone()) + .await? + .map(|encoded_value| { + T::decode(&mut &encoded_value.0[..]).map_err(|e| { + Error::failed_to_read_storage_value::(at, storage_key, e.into()) + }) + }) + .transpose() + } + /// Read and decode value from runtime storage map. + /// + /// `pallet_prefix` is the name of the pallet (used in `construct_runtime`), which + /// "contains" the storage map. + async fn storage_map_value( + &self, + at: HashOf, + pallet_prefix: &str, + storage_key: &T::Key, + ) -> Result> { + self.storage_value(at, T::final_key(pallet_prefix, storage_key)).await + } + /// Read and decode value from runtime storage double map. + /// + /// `pallet_prefix` is the name of the pallet (used in `construct_runtime`), which + /// "contains" the storage double map. + async fn storage_double_map_value( + &self, + at: HashOf, + pallet_prefix: &str, + key1: &T::Key1, + key2: &T::Key2, + ) -> Result> { + self.storage_value(at, T::final_key(pallet_prefix, key1, key2)).await + } + + /// Returns pending extrinsics from transaction pool. + async fn pending_extrinsics(&self) -> Result>; + /// Submit unsigned extrinsic for inclusion in a block. + /// + /// Note: The given transaction needs to be SCALE encoded beforehand. + async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result>; + /// Submit an extrinsic signed by given account. + /// + /// All calls of this method are synchronized, so there can't be more than one active + /// `submit_signed_extrinsic()` call. This guarantees that no nonces collision may happen + /// if all client instances are clones of the same initial `Client`. + /// + /// Note: The given transaction needs to be SCALE encoded beforehand. + async fn submit_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>; + /// Does exactly the same as `submit_signed_extrinsic`, but keeps watching for extrinsic status + /// after submission. + async fn submit_and_watch_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>; + /// Validate transaction at given block. + async fn validate_transaction( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result; + /// Returns weight of the given transaction. + async fn estimate_extrinsic_weight( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result; + + /// Execute runtime call at given block. + async fn raw_state_call( + &self, + at: HashOf, + method: String, + arguments: Args, + ) -> Result; + /// Execute runtime call at given block, provided the input and output types. + /// It also performs the input encode and output decode. + async fn state_call( + &self, + at: HashOf, + method: String, + arguments: Args, + ) -> Result { + let encoded_arguments = arguments.encode(); + let encoded_output = self.raw_state_call(at, method.clone(), arguments).await?; + Ret::decode(&mut &encoded_output.0[..]).map_err(|e| { + Error::failed_state_call::(at, method, Bytes(encoded_arguments), e.into()) + }) + } + + /// Returns storage proof of given storage keys. + async fn prove_storage(&self, at: HashOf, keys: Vec) -> Result; +} diff --git a/bridges/relays/client-substrate/src/error.rs b/bridges/relays/client-substrate/src/error.rs index 2133c1888784..b09e2c7abdc6 100644 --- a/bridges/relays/client-substrate/src/error.rs +++ b/bridges/relays/client-substrate/src/error.rs @@ -16,13 +16,13 @@ //! Substrate node RPC errors. -use crate::SimpleRuntimeVersion; +use crate::{BlockNumberOf, Chain, HashOf, SimpleRuntimeVersion}; use bp_header_chain::SubmitFinalityProofCallExtras; use bp_polkadot_core::parachains::ParaId; use jsonrpsee::core::ClientError as RpcError; use relay_utils::MaybeConnectionError; use sc_rpc_api::system::Health; -use sp_core::storage::StorageKey; +use sp_core::{storage::StorageKey, Bytes}; use sp_runtime::transaction_validity::TransactionValidityError; use thiserror::Error; @@ -43,12 +43,10 @@ pub enum Error { /// The response from the server could not be SCALE decoded. #[error("Response parse failed: {0}")] ResponseParseFailed(#[from] codec::Error), - /// Account does not exist on the chain. - #[error("Account does not exist on the chain.")] - AccountDoesNotExist, - /// Runtime storage is missing some mandatory value. - #[error("Mandatory storage value is missing from the runtime storage.")] - MissingMandatoryStorageValue, + /// Internal channel error - communication channel is either closed, or full. + /// It can be solved with reconnect. + #[error("Internal communication channel error: {0:?}.")] + ChannelError(String), /// Required parachain head is not present at the relay chain. #[error("Parachain {0:?} head {1} is missing from the relay chain storage.")] MissingRequiredParachainHead(ParaId, u64), @@ -58,6 +56,14 @@ pub enum Error { /// The client we're connected to is not synced, so we can't rely on its state. #[error("Substrate client is not synced {0}.")] ClientNotSynced(Health), + /// Failed to get system health. + #[error("Failed to get system health of {chain} node: {error:?}.")] + FailedToGetSystemHealth { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, /// Failed to read best finalized header hash from given chain. #[error("Failed to read best finalized header hash of {chain}: {error:?}.")] FailedToReadBestFinalizedHeaderHash { @@ -74,6 +80,16 @@ pub enum Error { /// Underlying error. error: Box, }, + /// Failed to read header hash by number from given chain. + #[error("Failed to read header hash by number {number} of {chain}: {error:?}.")] + FailedToReadHeaderHashByNumber { + /// Name of the chain where the error has happened. + chain: String, + /// Number of the header we've tried to read. + number: String, + /// Underlying error. + error: Box, + }, /// Failed to read header by hash from given chain. #[error("Failed to read header {hash} of {chain}: {error:?}.")] FailedToReadHeaderByHash { @@ -84,35 +100,119 @@ pub enum Error { /// Underlying error. error: Box, }, - /// Failed to execute runtime call at given chain. - #[error("Failed to execute runtime call {method} at {chain}: {error:?}.")] - ErrorExecutingRuntimeCall { + /// Failed to read block by hash from given chain. + #[error("Failed to read block {hash} of {chain}: {error:?}.")] + FailedToReadBlockByHash { /// Name of the chain where the error has happened. chain: String, - /// Runtime method name. - method: String, + /// Hash of the header we've tried to read. + hash: String, /// Underlying error. error: Box, }, /// Failed to read sotrage value at given chain. #[error("Failed to read storage value {key:?} at {chain}: {error:?}.")] - FailedToReadRuntimeStorageValue { + FailedToReadStorageValue { /// Name of the chain where the error has happened. chain: String, + /// Hash of the block we've tried to read value from. + hash: String, /// Runtime storage key key: StorageKey, /// Underlying error. error: Box, }, + /// Failed to read runtime version of given chain. + #[error("Failed to read runtime version of {chain}: {error:?}.")] + FailedToReadRuntimeVersion { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Failed to get pending extrinsics. + #[error("Failed to get pending extrinsics of {chain}: {error:?}.")] + FailedToGetPendingExtrinsics { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Failed to submit transaction. + #[error("Failed to submit {chain} transaction: {error:?}.")] + FailedToSubmitTransaction { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Runtime call has failed. + #[error("Runtime call {method} with arguments {arguments:?} of chain {chain} at {hash} has failed: {error:?}.")] + FailedStateCall { + /// Name of the chain where the error has happened. + chain: String, + /// Hash of the block we've tried to call at. + hash: String, + /// Runtime API method. + method: String, + /// Encoded method arguments. + arguments: Bytes, + /// Underlying error. + error: Box, + }, + /// Failed to prove storage keys. + #[error("Failed to prove storage keys {storage_keys:?} of {chain} at {hash}: {error:?}.")] + FailedToProveStorage { + /// Name of the chain where the error has happened. + chain: String, + /// Hash of the block we've tried to prove keys at. + hash: String, + /// Storage keys we have tried to prove. + storage_keys: Vec, + /// Underlying error. + error: Box, + }, + /// Failed to subscribe to GRANDPA justifications stream. + #[error("Failed to subscribe to {chain} best headers: {error:?}.")] + FailedToSubscribeBestHeaders { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Failed to subscribe to GRANDPA justifications stream. + #[error("Failed to subscribe to {chain} finalized headers: {error:?}.")] + FailedToSubscribeFinalizedHeaders { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Failed to subscribe to GRANDPA justifications stream. + #[error("Failed to subscribe to {chain} justifications: {error:?}.")] + FailedToSubscribeJustifications { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Headers of the chain are finalized out of order. Maybe chain has been + /// restarted? + #[error("Finalized headers of {chain} are unordered: previously finalized {prev_number} vs new {next_number}")] + UnorderedFinalizedHeaders { + /// Name of the chain where the error has happened. + chain: String, + /// Previously finalized header number. + prev_number: String, + /// New finalized header number. + next_number: String, + }, /// The bridge pallet is halted and all transactions will be rejected. #[error("Bridge pallet is halted.")] BridgePalletIsHalted, /// The bridge pallet is not yet initialized and all transactions will be rejected. #[error("Bridge pallet is not initialized.")] BridgePalletIsNotInitialized, - /// There's no best head of the parachain at the `pallet-bridge-parachains` at the target side. - #[error("No head of the ParaId({0}) at the bridge parachains pallet at {1}.")] - NoParachainHeadAtTarget(u32, String), /// An error has happened when we have tried to parse storage proof. #[error("Error when parsing storage proof: {0:?}.")] StorageProofError(bp_runtime::StorageProofError), @@ -143,7 +243,19 @@ pub enum Error { impl From for Error { fn from(error: tokio::task::JoinError) -> Self { - Error::Custom(format!("Failed to wait tokio task: {error}")) + Error::ChannelError(format!("failed to wait tokio task: {error}")) + } +} + +impl From> for Error { + fn from(error: async_std::channel::TrySendError) -> Self { + Error::ChannelError(format!("`try_send` has failed: {error:?}")) + } +} + +impl From for Error { + fn from(error: async_std::channel::RecvError) -> Self { + Error::ChannelError(format!("`recv` has failed: {error:?}")) } } @@ -152,21 +264,170 @@ impl Error { pub fn boxed(self) -> Box { Box::new(self) } + + /// Returns nested error reference. + pub fn nested(&self) -> Option<&Self> { + match *self { + Self::FailedToReadBestFinalizedHeaderHash { ref error, .. } => Some(&**error), + Self::FailedToReadBestHeader { ref error, .. } => Some(&**error), + Self::FailedToReadHeaderHashByNumber { ref error, .. } => Some(&**error), + Self::FailedToReadHeaderByHash { ref error, .. } => Some(&**error), + Self::FailedToReadBlockByHash { ref error, .. } => Some(&**error), + Self::FailedToReadStorageValue { ref error, .. } => Some(&**error), + Self::FailedToReadRuntimeVersion { ref error, .. } => Some(&**error), + Self::FailedToGetPendingExtrinsics { ref error, .. } => Some(&**error), + Self::FailedToSubmitTransaction { ref error, .. } => Some(&**error), + Self::FailedStateCall { ref error, .. } => Some(&**error), + Self::FailedToProveStorage { ref error, .. } => Some(&**error), + Self::FailedToGetSystemHealth { ref error, .. } => Some(&**error), + Self::FailedToSubscribeBestHeaders { ref error, .. } => Some(&**error), + Self::FailedToSubscribeFinalizedHeaders { ref error, .. } => Some(&**error), + Self::FailedToSubscribeJustifications { ref error, .. } => Some(&**error), + _ => None, + } + } + + /// Constructs `FailedToReadHeaderHashByNumber` variant. + pub fn failed_to_read_header_hash_by_number( + number: BlockNumberOf, + e: Error, + ) -> Self { + Error::FailedToReadHeaderHashByNumber { + chain: C::NAME.into(), + number: format!("{number}"), + error: e.boxed(), + } + } + + /// Constructs `FailedToReadHeaderByHash` variant. + pub fn failed_to_read_header_by_hash(hash: HashOf, e: Error) -> Self { + Error::FailedToReadHeaderByHash { + chain: C::NAME.into(), + hash: format!("{hash}"), + error: e.boxed(), + } + } + + /// Constructs `FailedToReadBlockByHash` variant. + pub fn failed_to_read_block_by_hash(hash: HashOf, e: Error) -> Self { + Error::FailedToReadHeaderByHash { + chain: C::NAME.into(), + hash: format!("{hash}"), + error: e.boxed(), + } + } + + /// Constructs `FailedToReadBestFinalizedHeaderHash` variant. + pub fn failed_to_read_best_finalized_header_hash(e: Error) -> Self { + Error::FailedToReadBestFinalizedHeaderHash { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToReadBestHeader` variant. + pub fn failed_to_read_best_header(e: Error) -> Self { + Error::FailedToReadBestHeader { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToReadRuntimeVersion` variant. + pub fn failed_to_read_runtime_version(e: Error) -> Self { + Error::FailedToReadRuntimeVersion { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToReadStorageValue` variant. + pub fn failed_to_read_storage_value( + at: HashOf, + key: StorageKey, + e: Error, + ) -> Self { + Error::FailedToReadStorageValue { + chain: C::NAME.into(), + hash: format!("{at}"), + key, + error: e.boxed(), + } + } + + /// Constructs `FailedToGetPendingExtrinsics` variant. + pub fn failed_to_get_pending_extrinsics(e: Error) -> Self { + Error::FailedToGetPendingExtrinsics { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToSubmitTransaction` variant. + pub fn failed_to_submit_transaction(e: Error) -> Self { + Error::FailedToSubmitTransaction { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedStateCall` variant. + pub fn failed_state_call( + at: HashOf, + method: String, + arguments: Bytes, + e: Error, + ) -> Self { + Error::FailedStateCall { + chain: C::NAME.into(), + hash: format!("{at}"), + method, + arguments, + error: e.boxed(), + } + } + + /// Constructs `FailedToProveStorage` variant. + pub fn failed_to_prove_storage( + at: HashOf, + storage_keys: Vec, + e: Error, + ) -> Self { + Error::FailedToProveStorage { + chain: C::NAME.into(), + hash: format!("{at}"), + storage_keys, + error: e.boxed(), + } + } + + /// Constructs `FailedToGetSystemHealth` variant. + pub fn failed_to_get_system_health(e: Error) -> Self { + Error::FailedToGetSystemHealth { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToSubscribeBestHeaders` variant. + pub fn failed_to_subscribe_best_headers(e: Error) -> Self { + Error::FailedToSubscribeBestHeaders { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToSubscribeFinalizedHeaders` variant. + pub fn failed_to_subscribe_finalized_headers(e: Error) -> Self { + Error::FailedToSubscribeFinalizedHeaders { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToSubscribeJustifications` variant. + pub fn failed_to_subscribe_justification(e: Error) -> Self { + Error::FailedToSubscribeJustifications { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `Un` + pub fn unordered_finalized_headers( + prev_number: BlockNumberOf, + next_number: BlockNumberOf, + ) -> Self { + Error::UnorderedFinalizedHeaders { + chain: C::NAME.into(), + prev_number: format!("{}", prev_number), + next_number: format!("{}", next_number), + } + } } impl MaybeConnectionError for Error { fn is_connection_error(&self) -> bool { match *self { - Error::RpcError(RpcError::Transport(_)) | - Error::RpcError(RpcError::RestartNeeded(_)) | + Error::ChannelError(_) => true, + Error::RpcError(ref e) => + matches!(*e, RpcError::Transport(_) | RpcError::RestartNeeded(_),), Error::ClientNotSynced(_) => true, - Error::FailedToReadBestFinalizedHeaderHash { ref error, .. } => - error.is_connection_error(), - Error::FailedToReadBestHeader { ref error, .. } => error.is_connection_error(), - Error::FailedToReadHeaderByHash { ref error, .. } => error.is_connection_error(), - Error::ErrorExecutingRuntimeCall { ref error, .. } => error.is_connection_error(), - Error::FailedToReadRuntimeStorageValue { ref error, .. } => error.is_connection_error(), - _ => false, + Error::UnorderedFinalizedHeaders { .. } => true, + _ => self.nested().map(|e| e.is_connection_error()).unwrap_or(false), } } } diff --git a/bridges/relays/client-substrate/src/guard.rs b/bridges/relays/client-substrate/src/guard.rs index 47454892cd03..3dbf95bff8e1 100644 --- a/bridges/relays/client-substrate/src/guard.rs +++ b/bridges/relays/client-substrate/src/guard.rs @@ -98,7 +98,7 @@ fn conditions_check_delay() -> Duration { } #[async_trait] -impl Environment for Client { +impl> Environment for Clnt { type Error = Error; async fn runtime_version(&mut self) -> Result { diff --git a/bridges/relays/client-substrate/src/lib.rs b/bridges/relays/client-substrate/src/lib.rs index d5b8d4dcced2..12a1c48c09c7 100644 --- a/bridges/relays/client-substrate/src/lib.rs +++ b/bridges/relays/client-substrate/src/lib.rs @@ -21,7 +21,6 @@ mod chain; mod client; mod error; -mod rpc; mod sync_header; mod transaction_tracker; @@ -37,14 +36,15 @@ pub use crate::{ AccountKeyPairOf, BlockWithJustification, CallOf, Chain, ChainWithBalances, ChainWithGrandpa, ChainWithMessages, ChainWithRuntimeVersion, ChainWithTransactions, ChainWithUtilityPallet, FullRuntimeUtilityPallet, MockedRuntimeUtilityPallet, Parachain, - RelayChain, SignParam, TransactionStatusOf, UnsignedTransaction, UtilityPallet, + RelayChain, SignParam, SignedBlockOf, TransactionStatusOf, UnsignedTransaction, + UtilityPallet, }, client::{ - is_ancient_block, ChainRuntimeVersion, Client, OpaqueGrandpaAuthoritiesSet, - SimpleRuntimeVersion, Subscription, ANCIENT_BLOCK_THRESHOLD, + is_ancient_block, rpc_with_caching as new, ChainRuntimeVersion, Client, + OpaqueGrandpaAuthoritiesSet, RpcWithCachingClient, SimpleRuntimeVersion, StreamDescription, + Subscription, ANCIENT_BLOCK_THRESHOLD, }, error::{Error, Result}, - rpc::{SubstrateBeefyFinalityClient, SubstrateFinalityClient, SubstrateGrandpaFinalityClient}, sync_header::SyncHeader, transaction_tracker::TransactionTracker, }; diff --git a/bridges/relays/client-substrate/src/metrics/float_storage_value.rs b/bridges/relays/client-substrate/src/metrics/float_storage_value.rs index 7bb92693b38d..27c9d8cd7a8b 100644 --- a/bridges/relays/client-substrate/src/metrics/float_storage_value.rs +++ b/bridges/relays/client-substrate/src/metrics/float_storage_value.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::{chain::Chain, client::Client, Error as SubstrateError}; +use crate::{Chain, Client, Error as SubstrateError}; use async_std::sync::{Arc, RwLock}; use async_trait::async_trait; @@ -66,20 +66,20 @@ impl FloatStorageValue for FixedU128OrOne { /// Metric that represents fixed-point runtime storage value as float gauge. #[derive(Clone, Debug)] -pub struct FloatStorageValueMetric { +pub struct FloatStorageValueMetric { value_converter: V, - client: Client, + client: Clnt, storage_key: StorageKey, metric: Gauge, shared_value_ref: F64SharedRef, - _phantom: PhantomData, + _phantom: PhantomData<(C, V)>, } -impl FloatStorageValueMetric { +impl FloatStorageValueMetric { /// Create new metric. pub fn new( value_converter: V, - client: Client, + client: Clnt, storage_key: StorageKey, name: String, help: String, @@ -101,32 +101,39 @@ impl FloatStorageValueMetric { } } -impl Metric for FloatStorageValueMetric { +impl, V: FloatStorageValue> Metric + for FloatStorageValueMetric +{ fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { register(self.metric.clone(), registry).map(drop) } } #[async_trait] -impl StandaloneMetric for FloatStorageValueMetric { +impl, V: FloatStorageValue> StandaloneMetric + for FloatStorageValueMetric +{ fn update_interval(&self) -> Duration { C::AVERAGE_BLOCK_INTERVAL * UPDATE_INTERVAL_IN_BLOCKS } async fn update(&self) { - let value = self - .client - .raw_storage_value(self.storage_key.clone(), None) - .await - .and_then(|maybe_storage_value| { - self.value_converter.decode(maybe_storage_value).map(|maybe_fixed_point_value| { - maybe_fixed_point_value.map(|fixed_point_value| { - fixed_point_value.into_inner().unique_saturated_into() as f64 / - V::Value::DIV.unique_saturated_into() as f64 - }) + let value = async move { + let best_header_hash = self.client.best_header_hash().await?; + let maybe_storage_value = self + .client + .raw_storage_value(best_header_hash, self.storage_key.clone()) + .await?; + self.value_converter.decode(maybe_storage_value).map(|maybe_fixed_point_value| { + maybe_fixed_point_value.map(|fixed_point_value| { + fixed_point_value.into_inner().unique_saturated_into() as f64 / + V::Value::DIV.unique_saturated_into() as f64 }) }) - .map_err(|e| e.to_string()); + } + .await + .map_err(|e| e.to_string()); + relay_utils::metrics::set_gauge_value(&self.metric, value.clone()); *self.shared_value_ref.write().await = value.ok().and_then(|x| x); } diff --git a/bridges/relays/client-substrate/src/transaction_tracker.rs b/bridges/relays/client-substrate/src/transaction_tracker.rs index b181a945c2c1..b4801c89f51e 100644 --- a/bridges/relays/client-substrate/src/transaction_tracker.rs +++ b/bridges/relays/client-substrate/src/transaction_tracker.rs @@ -16,7 +16,7 @@ //! Helper for tracking transaction invalidation events. -use crate::{Chain, Client, Error, HashOf, HeaderIdOf, Subscription, TransactionStatusOf}; +use crate::{Chain, Error, HashOf, HeaderIdOf, Subscription, TransactionStatusOf}; use async_trait::async_trait; use futures::{future::Either, Future, FutureExt, Stream, StreamExt}; @@ -31,8 +31,10 @@ pub trait Environment: Send + Sync { async fn header_id_by_hash(&self, hash: HashOf) -> Result, Error>; } +// TODO (https://github.com/paritytech/parity-bridges-common/issues/2133): remove `Environment` trait +// after test client is implemented #[async_trait] -impl Environment for Client { +impl> Environment for T { async fn header_id_by_hash(&self, hash: HashOf) -> Result, Error> { self.header_by_hash(hash).await.map(|h| HeaderId(*h.number(), hash)) } @@ -76,6 +78,21 @@ impl> TransactionTracker { Self { environment, stall_timeout, transaction_hash, subscription } } + // TODO (https://github.com/paritytech/parity-bridges-common/issues/2133): remove me after + // test client is implemented + /// Converts self into tracker with different environment. + pub fn switch_environment>( + self, + environment: NewE, + ) -> TransactionTracker { + TransactionTracker { + environment, + stall_timeout: self.stall_timeout, + transaction_hash: self.transaction_hash, + subscription: self.subscription, + } + } + /// Wait for final transaction status and return it along with last known internal invalidation /// status. async fn do_wait( @@ -88,7 +105,7 @@ impl> TransactionTracker { let wait_for_invalidation = watch_transaction_status::<_, C, _>( self.environment, self.transaction_hash, - self.subscription.into_stream(), + self.subscription, ); futures::pin_mut!(wait_for_stall_timeout, wait_for_invalidation); @@ -284,7 +301,7 @@ async fn watch_transaction_status< #[cfg(test)] mod tests { use super::*; - use crate::test_chain::TestChain; + use crate::{test_chain::TestChain, StreamDescription}; use futures::{FutureExt, SinkExt}; use sc_transaction_pool_api::TransactionStatus; @@ -306,22 +323,27 @@ mod tests { TrackedTransactionStatus>, InvalidationStatus>, )> { - let (cancel_sender, _cancel_receiver) = futures::channel::oneshot::channel(); let (mut sender, receiver) = futures::channel::mpsc::channel(1); let tx_tracker = TransactionTracker::::new( TestEnvironment(Ok(HeaderId(0, Default::default()))), Duration::from_secs(0), Default::default(), - Subscription(async_std::sync::Mutex::new(receiver), cancel_sender), + Subscription::new_forwarded( + StreamDescription::new("test".into(), "test".into()), + receiver, + ), ); - let wait_for_stall_timeout = futures::future::pending(); + // we can't do `.now_or_never()` on `do_wait()` call, because `Subscription` has its own + // background thread, which may cause additional async task switches => let's leave some + // relatively small timeout here + let wait_for_stall_timeout = async_std::task::sleep(std::time::Duration::from_millis(100)); let wait_for_stall_timeout_rest = futures::future::ready(()); - sender.send(Some(status)).await.unwrap(); - tx_tracker - .do_wait(wait_for_stall_timeout, wait_for_stall_timeout_rest) - .now_or_never() - .map(|(ts, is)| (ts, is.unwrap())) + sender.send(Ok(status)).await.unwrap(); + + let (ts, is) = + tx_tracker.do_wait(wait_for_stall_timeout, wait_for_stall_timeout_rest).await; + is.map(|is| (ts, is)) } #[async_std::test] @@ -429,13 +451,15 @@ mod tests { #[async_std::test] async fn lost_on_timeout_when_waiting_for_invalidation_status() { - let (cancel_sender, _cancel_receiver) = futures::channel::oneshot::channel(); let (_sender, receiver) = futures::channel::mpsc::channel(1); let tx_tracker = TransactionTracker::::new( TestEnvironment(Ok(HeaderId(0, Default::default()))), Duration::from_secs(0), Default::default(), - Subscription(async_std::sync::Mutex::new(receiver), cancel_sender), + Subscription::new_forwarded( + StreamDescription::new("test".into(), "test".into()), + receiver, + ), ); let wait_for_stall_timeout = futures::future::ready(()).shared(); diff --git a/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs b/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs index 6246bdbf0151..d985d35c9e80 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs @@ -123,11 +123,11 @@ macro_rules! declare_chain_connection_params_cli_schema { #[allow(dead_code)] pub async fn into_client( self, - ) -> anyhow::Result> { + ) -> anyhow::Result<$crate::cli::DefaultClient> { let chain_runtime_version = self .[<$chain_prefix _runtime_version>] .into_runtime_version(Chain::RUNTIME_VERSION)?; - Ok(relay_substrate_client::Client::new(relay_substrate_client::ConnectionParams { + Ok(relay_substrate_client::new(relay_substrate_client::ConnectionParams { uri: self.[<$chain_prefix _uri>], host: self.[<$chain_prefix _host>], port: self.[<$chain_prefix _port>], diff --git a/bridges/relays/lib-substrate-relay/src/cli/detect_equivocations.rs b/bridges/relays/lib-substrate-relay/src/cli/detect_equivocations.rs index b98e41b2a43e..3921685d9e8a 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/detect_equivocations.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/detect_equivocations.rs @@ -23,7 +23,7 @@ use crate::{ }; use async_trait::async_trait; -use relay_substrate_client::ChainWithTransactions; +use relay_substrate_client::{ChainWithTransactions, Client}; use structopt::StructOpt; /// Start equivocation detection loop. diff --git a/bridges/relays/lib-substrate-relay/src/cli/mod.rs b/bridges/relays/lib-substrate-relay/src/cli/mod.rs index 270608bf6ed8..ddb3e416dc32 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/mod.rs @@ -35,6 +35,11 @@ pub mod relay_parachains; /// The target that will be used when publishing logs related to this pallet. pub const LOG_TARGET: &str = "bridge"; +/// Default Substrate client type that we are using. We'll use it all over the glue CLI code +/// to avoid multiple level generic arguments and constraints. We still allow usage of other +/// clients in the **core logic code**. +pub type DefaultClient = relay_substrate_client::RpcWithCachingClient; + /// Lane id. #[derive(Debug, Clone, PartialEq, Eq)] pub struct HexLaneId(pub [u8; 4]); diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs index 093f98ef21ed..ea92a0c9acce 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs @@ -29,6 +29,7 @@ use crate::{ finality::SubstrateFinalitySyncPipeline, HeadersToRelay, }; +use relay_substrate_client::Client; /// Chain headers relaying params. #[derive(StructOpt)] diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs index a796df6721b8..05a061c2ea60 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs @@ -37,7 +37,7 @@ use structopt::StructOpt; use futures::{FutureExt, TryFutureExt}; use crate::{ - cli::{bridge::MessagesCliBridge, HexLaneId, PrometheusParams}, + cli::{bridge::MessagesCliBridge, DefaultClient, HexLaneId, PrometheusParams}, messages_lane::{MessagesRelayLimits, MessagesRelayParams}, on_demand::OnDemandRelay, HeadersToRelay, TaggedAccount, TransactionParams, @@ -46,7 +46,7 @@ use bp_messages::LaneId; use bp_runtime::BalanceOf; use relay_substrate_client::{ AccountIdOf, AccountKeyPairOf, Chain, ChainWithBalances, ChainWithMessages, - ChainWithRuntimeVersion, ChainWithTransactions, Client, + ChainWithRuntimeVersion, ChainWithTransactions, }; use relay_utils::metrics::MetricsParams; use sp_core::Pair; @@ -118,7 +118,7 @@ impl< /// Parameters that are associated with one side of the bridge. pub struct BridgeEndCommonParams { /// Chain client. - pub client: Client, + pub client: DefaultClient, /// Params used for sending transactions to the chain. pub tx_params: TransactionParams>, /// Accounts, which balances are exposed as metrics by the relay process. @@ -165,7 +165,7 @@ where target_to_source_headers_relay: Arc>, lane_id: LaneId, maybe_limits: Option, - ) -> MessagesRelayParams { + ) -> MessagesRelayParams, DefaultClient> { MessagesRelayParams { source_client: self.source.client.clone(), source_transaction_params: self.source.tx_params.clone(), @@ -317,28 +317,30 @@ where // Need 2x capacity since we consider both directions for each lane let mut message_relays = Vec::with_capacity(lanes.len() * 2); for lane in lanes { - let left_to_right_messages = crate::messages_lane::run::< - ::MessagesLane, - >(self.left_to_right().messages_relay_params( - left_to_right_on_demand_headers.clone(), - right_to_left_on_demand_headers.clone(), - lane, - Self::L2R::maybe_messages_limits(), - )) - .map_err(|e| anyhow::format_err!("{}", e)) - .boxed(); + let left_to_right_messages = + crate::messages_lane::run::<::MessagesLane, _, _>( + self.left_to_right().messages_relay_params( + left_to_right_on_demand_headers.clone(), + right_to_left_on_demand_headers.clone(), + lane, + Self::L2R::maybe_messages_limits(), + ), + ) + .map_err(|e| anyhow::format_err!("{}", e)) + .boxed(); message_relays.push(left_to_right_messages); - let right_to_left_messages = crate::messages_lane::run::< - ::MessagesLane, - >(self.right_to_left().messages_relay_params( - right_to_left_on_demand_headers.clone(), - left_to_right_on_demand_headers.clone(), - lane, - Self::R2L::maybe_messages_limits(), - )) - .map_err(|e| anyhow::format_err!("{}", e)) - .boxed(); + let right_to_left_messages = + crate::messages_lane::run::<::MessagesLane, _, _>( + self.right_to_left().messages_relay_params( + right_to_left_on_demand_headers.clone(), + left_to_right_on_demand_headers.clone(), + lane, + Self::R2L::maybe_messages_limits(), + ), + ) + .map_err(|e| anyhow::format_err!("{}", e)) + .boxed(); message_relays.push(right_to_left_messages); } diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs index 7f6f40777823..8104be7af807 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs @@ -23,6 +23,7 @@ use crate::{ cli::{ bridge::{CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge}, relay_headers_and_messages::{Full2WayBridgeBase, Full2WayBridgeCommonParams}, + DefaultClient, }, finality::SubstrateFinalitySyncPipeline, on_demand::{ @@ -52,9 +53,9 @@ pub struct ParachainToParachainBridge< pub common: Full2WayBridgeCommonParams<::Target, ::Target>, /// Client of the left relay chain. - pub left_relay: Client<::SourceRelay>, + pub left_relay: DefaultClient<::SourceRelay>, /// Client of the right relay chain. - pub right_relay: Client<::SourceRelay>, + pub right_relay: DefaultClient<::SourceRelay>, } /// Create set of configuration objects specific to parachain-to-parachain relayer. @@ -175,25 +176,33 @@ where ) .await?; - let left_relay_to_right_on_demand_headers = - OnDemandHeadersRelay::<::RelayFinality>::new( - self.left_relay.clone(), - self.common.right.client.clone(), - self.common.right.tx_params.clone(), - self.common.shared.headers_to_relay(), - Some(self.common.metrics_params.clone()), - ); - let right_relay_to_left_on_demand_headers = - OnDemandHeadersRelay::<::RelayFinality>::new( - self.right_relay.clone(), - self.common.left.client.clone(), - self.common.left.tx_params.clone(), - self.common.shared.headers_to_relay(), - Some(self.common.metrics_params.clone()), - ); + let left_relay_to_right_on_demand_headers = OnDemandHeadersRelay::< + ::RelayFinality, + _, + _, + >::new( + self.left_relay.clone(), + self.common.right.client.clone(), + self.common.right.tx_params.clone(), + self.common.shared.headers_to_relay(), + Some(self.common.metrics_params.clone()), + ); + let right_relay_to_left_on_demand_headers = OnDemandHeadersRelay::< + ::RelayFinality, + _, + _, + >::new( + self.right_relay.clone(), + self.common.left.client.clone(), + self.common.left.tx_params.clone(), + self.common.shared.headers_to_relay(), + Some(self.common.metrics_params.clone()), + ); let left_to_right_on_demand_parachains = OnDemandParachainsRelay::< ::ParachainFinality, + _, + _, >::new( self.left_relay.clone(), self.common.right.client.clone(), @@ -202,6 +211,8 @@ where ); let right_to_left_on_demand_parachains = OnDemandParachainsRelay::< ::ParachainFinality, + _, + _, >::new( self.right_relay.clone(), self.common.left.client.clone(), diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs index 5911fe49df4a..6c078973fedc 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs @@ -26,6 +26,7 @@ use crate::{ RelayToRelayHeadersCliBridge, }, relay_headers_and_messages::{Full2WayBridgeBase, Full2WayBridgeCommonParams}, + DefaultClient, }, finality::SubstrateFinalitySyncPipeline, on_demand::{ @@ -54,7 +55,7 @@ pub struct RelayToParachainBridge< pub common: Full2WayBridgeCommonParams<::Target, ::Target>, /// Client of the right relay chain. - pub right_relay: Client<::SourceRelay>, + pub right_relay: DefaultClient<::SourceRelay>, } /// Create set of configuration objects specific to relay-to-parachain relayer. @@ -167,23 +168,28 @@ where .await?; let left_to_right_on_demand_headers = - OnDemandHeadersRelay::<::Finality>::new( + OnDemandHeadersRelay::<::Finality, _, _>::new( self.common.left.client.clone(), self.common.right.client.clone(), self.common.right.tx_params.clone(), self.common.shared.headers_to_relay(), None, ); - let right_relay_to_left_on_demand_headers = - OnDemandHeadersRelay::<::RelayFinality>::new( - self.right_relay.clone(), - self.common.left.client.clone(), - self.common.left.tx_params.clone(), - self.common.shared.headers_to_relay(), - Some(self.common.metrics_params.clone()), - ); + let right_relay_to_left_on_demand_headers = OnDemandHeadersRelay::< + ::RelayFinality, + _, + _, + >::new( + self.right_relay.clone(), + self.common.left.client.clone(), + self.common.left.tx_params.clone(), + self.common.shared.headers_to_relay(), + Some(self.common.metrics_params.clone()), + ); let right_to_left_on_demand_parachains = OnDemandParachainsRelay::< ::ParachainFinality, + _, + _, >::new( self.right_relay.clone(), self.common.left.client.clone(), diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs index 832df4ae4003..3f8c8bb40c99 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs @@ -32,7 +32,7 @@ use crate::{ on_demand::{headers::OnDemandHeadersRelay, OnDemandRelay}, }; use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, ChainWithRuntimeVersion, ChainWithTransactions, + AccountIdOf, AccountKeyPairOf, ChainWithRuntimeVersion, ChainWithTransactions, Client, }; use sp_core::Pair; @@ -148,7 +148,7 @@ where .await?; let left_to_right_on_demand_headers = - OnDemandHeadersRelay::<::Finality>::new( + OnDemandHeadersRelay::<::Finality, _, _>::new( self.common.left.client.clone(), self.common.right.client.clone(), self.common.right.tx_params.clone(), @@ -156,7 +156,7 @@ where None, ); let right_to_left_on_demand_headers = - OnDemandHeadersRelay::<::Finality>::new( + OnDemandHeadersRelay::<::Finality, _, _>::new( self.common.right.client.clone(), self.common.left.client.clone(), self.common.left.tx_params.clone(), diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs index 943feba072e4..a17ae7c0c01f 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs @@ -29,7 +29,8 @@ use structopt::StructOpt; use bp_messages::MessageNonce; use bp_runtime::HeaderIdProvider; use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, BalanceOf, Chain, ChainWithRuntimeVersion, ChainWithTransactions, + AccountIdOf, AccountKeyPairOf, BalanceOf, Chain, ChainWithRuntimeVersion, + ChainWithTransactions, Client, }; use relay_utils::UniqueSaturatedInto; @@ -116,7 +117,7 @@ where let target_sign = data.target_sign.to_keypair::()?; let target_transactions_mortality = data.target_sign.transactions_mortality()?; - crate::messages_lane::run::(MessagesRelayParams { + crate::messages_lane::run::(MessagesRelayParams { source_client, source_transaction_params: TransactionParams { signer: source_sign, diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs index 00f8cf79ef1f..77cd395ff722 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs @@ -21,7 +21,7 @@ use async_trait::async_trait; use bp_polkadot_core::BlockNumber as RelayBlockNumber; use bp_runtime::HeaderIdProvider; use parachains_relay::parachains_loop::{AvailableHeader, SourceClient, TargetClient}; -use relay_substrate_client::Parachain; +use relay_substrate_client::{Client, Parachain}; use relay_utils::metrics::{GlobalMetrics, StandaloneMetric}; use std::sync::Arc; use structopt::StructOpt; @@ -30,7 +30,7 @@ use crate::{ cli::{ bridge::{CliBridgeBase, ParachainToRelayHeadersCliBridge}, chain_schema::*, - PrometheusParams, + DefaultClient, PrometheusParams, }, parachains::{source::ParachainsSource, target::ParachainsTarget, ParachainsPipelineAdapter}, TransactionParams, @@ -72,16 +72,19 @@ pub struct RelayParachainHeadParams { #[async_trait] pub trait ParachainsRelayer: ParachainToRelayHeadersCliBridge where - ParachainsSource: + ParachainsSource>: SourceClient>, - ParachainsTarget: - TargetClient>, + ParachainsTarget< + Self::ParachainFinality, + DefaultClient, + DefaultClient, + >: TargetClient>, ::Source: Parachain, { /// Start relaying parachains finality. async fn relay_parachains(data: RelayParachainsParams) -> anyhow::Result<()> { let source_chain_client = data.source.into_client::().await?; - let source_client = ParachainsSource::::new( + let source_client = ParachainsSource::::new( source_chain_client.clone(), Arc::new(Mutex::new(AvailableHeader::Missing)), ); @@ -91,7 +94,7 @@ where mortality: data.target_sign.target_transactions_mortality, }; let target_chain_client = data.target.into_client::().await?; - let target_client = ParachainsTarget::::new( + let target_client = ParachainsTarget::::new( source_chain_client, target_chain_client, target_transaction_params, @@ -121,7 +124,7 @@ where .map_err(|e| anyhow::format_err!("{}", e))? .id(); - let source_client = ParachainsSource::::new( + let source_client = ParachainsSource::::new( source_chain_client.clone(), Arc::new(Mutex::new(AvailableHeader::Missing)), ); @@ -131,7 +134,7 @@ where mortality: data.target_sign.target_transactions_mortality, }; let target_chain_client = data.target.into_client::().await?; - let target_client = ParachainsTarget::::new( + let target_client = ParachainsTarget::::new( source_chain_client, target_chain_client, target_transaction_params, diff --git a/bridges/relays/lib-substrate-relay/src/equivocation/mod.rs b/bridges/relays/lib-substrate-relay/src/equivocation/mod.rs index f6d58cbaa4ab..f8077923b820 100644 --- a/bridges/relays/lib-substrate-relay/src/equivocation/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/equivocation/mod.rs @@ -69,7 +69,7 @@ pub trait SubstrateEquivocationDetectionPipeline: /// Add relay guards if required. async fn start_relay_guards( - source_client: &Client, + source_client: &impl Client, enable_version_guard: bool, ) -> relay_substrate_client::Result<()> { if enable_version_guard { @@ -199,8 +199,8 @@ macro_rules! generate_report_equivocation_call_builder { /// Run Substrate-to-Substrate equivocations detection loop. pub async fn run( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, source_transaction_params: TransactionParams>, metrics_params: MetricsParams, ) -> anyhow::Result<()> { @@ -212,8 +212,8 @@ pub async fn run( ); equivocation_detector::run( - SubstrateEquivocationSource::

::new(source_client, source_transaction_params), - SubstrateEquivocationTarget::

::new(target_client), + SubstrateEquivocationSource::::new(source_client, source_transaction_params), + SubstrateEquivocationTarget::::new(target_client), P::TargetChain::AVERAGE_BLOCK_INTERVAL, metrics_params, futures::future::pending(), diff --git a/bridges/relays/lib-substrate-relay/src/equivocation/source.rs b/bridges/relays/lib-substrate-relay/src/equivocation/source.rs index a0c7dcf5cbc3..66d651600a1e 100644 --- a/bridges/relays/lib-substrate-relay/src/equivocation/source.rs +++ b/bridges/relays/lib-substrate-relay/src/equivocation/source.rs @@ -35,29 +35,35 @@ use relay_substrate_client::{ use relay_utils::relay_loop::Client as RelayClient; /// Substrate node as equivocation source. -pub struct SubstrateEquivocationSource { - client: Client, +pub struct SubstrateEquivocationSource { + client: SourceClnt, transaction_params: TransactionParams>, } -impl SubstrateEquivocationSource

{ +impl> + SubstrateEquivocationSource +{ /// Create new instance of `SubstrateEquivocationSource`. pub fn new( - client: Client, + client: SourceClnt, transaction_params: TransactionParams>, ) -> Self { Self { client, transaction_params } } } -impl Clone for SubstrateEquivocationSource

{ +impl> Clone + for SubstrateEquivocationSource +{ fn clone(&self) -> Self { Self { client: self.client.clone(), transaction_params: self.transaction_params.clone() } } } #[async_trait] -impl RelayClient for SubstrateEquivocationSource

{ +impl> RelayClient + for SubstrateEquivocationSource +{ type Error = Error; async fn reconnect(&mut self) -> Result<(), Error> { @@ -66,8 +72,9 @@ impl RelayClient for SubstrateEquivoc } #[async_trait] -impl - SourceClientBase> for SubstrateEquivocationSource

+impl> + SourceClientBase> + for SubstrateEquivocationSource { type FinalityProofsStream = SubstrateFinalityProofsStream

; @@ -77,10 +84,11 @@ impl } #[async_trait] -impl - SourceClient> for SubstrateEquivocationSource

+impl> + SourceClient> + for SubstrateEquivocationSource { - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn report_equivocation( &self, diff --git a/bridges/relays/lib-substrate-relay/src/equivocation/target.rs b/bridges/relays/lib-substrate-relay/src/equivocation/target.rs index 6eee2ab91d45..7d054e843d0d 100644 --- a/bridges/relays/lib-substrate-relay/src/equivocation/target.rs +++ b/bridges/relays/lib-substrate-relay/src/equivocation/target.rs @@ -34,27 +34,33 @@ use sp_runtime::traits::Header; use std::marker::PhantomData; /// Substrate node as equivocation source. -pub struct SubstrateEquivocationTarget { - client: Client, +pub struct SubstrateEquivocationTarget { + client: TargetClnt, _phantom: PhantomData

, } -impl SubstrateEquivocationTarget

{ +impl> + SubstrateEquivocationTarget +{ /// Create new instance of `SubstrateEquivocationTarget`. - pub fn new(client: Client) -> Self { + pub fn new(client: TargetClnt) -> Self { Self { client, _phantom: Default::default() } } } -impl Clone for SubstrateEquivocationTarget

{ +impl> Clone + for SubstrateEquivocationTarget +{ fn clone(&self) -> Self { Self { client: self.client.clone(), _phantom: Default::default() } } } #[async_trait] -impl RelayClient for SubstrateEquivocationTarget

{ +impl> RelayClient + for SubstrateEquivocationTarget +{ type Error = Error; async fn reconnect(&mut self) -> Result<(), Error> { @@ -63,8 +69,9 @@ impl RelayClient for SubstrateEquivoc } #[async_trait] -impl - TargetClient> for SubstrateEquivocationTarget

+impl> + TargetClient> + for SubstrateEquivocationTarget { async fn best_finalized_header_number( &self, diff --git a/bridges/relays/lib-substrate-relay/src/finality/initialize.rs b/bridges/relays/lib-substrate-relay/src/finality/initialize.rs index 5dde46c39dd6..a972f743e117 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/initialize.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/initialize.rs @@ -39,8 +39,8 @@ pub async fn initialize< TargetChain: ChainWithTransactions, F, >( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, target_signer: AccountKeyPairOf, prepare_initialize_transaction: F, dry_run: bool, @@ -101,8 +101,8 @@ async fn do_initialize< TargetChain: ChainWithTransactions, F, >( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, target_signer: AccountKeyPairOf, prepare_initialize_transaction: F, dry_run: bool, diff --git a/bridges/relays/lib-substrate-relay/src/finality/mod.rs b/bridges/relays/lib-substrate-relay/src/finality/mod.rs index 0293e1da224a..a2379eb4812e 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/mod.rs @@ -77,7 +77,7 @@ pub trait SubstrateFinalitySyncPipeline: BaseSubstrateFinalitySyncPipeline { /// Add relay guards if required. async fn start_relay_guards( - target_client: &Client, + target_client: &impl Client, enable_version_guard: bool, ) -> relay_substrate_client::Result<()> { if enable_version_guard { @@ -240,8 +240,8 @@ macro_rules! generate_submit_finality_proof_ex_call_builder { /// Run Substrate-to-Substrate finality sync loop. pub async fn run( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, headers_to_relay: HeadersToRelay, transaction_params: TransactionParams>, metrics_params: MetricsParams, @@ -255,8 +255,8 @@ pub async fn run( ); finality_relay::run( - SubstrateFinalitySource::

::new(source_client, None), - SubstrateFinalityTarget::

::new(target_client, transaction_params.clone()), + SubstrateFinalitySource::::new(source_client, None), + SubstrateFinalityTarget::::new(target_client, transaction_params.clone()), finality_relay::FinalitySyncParams { tick: std::cmp::max( P::SourceChain::AVERAGE_BLOCK_INTERVAL, @@ -279,12 +279,12 @@ pub async fn run( /// Relay single header. No checks are made to ensure that transaction will succeed. pub async fn relay_single_header( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, transaction_params: TransactionParams>, header_number: BlockNumberOf, ) -> anyhow::Result<()> { - let finality_source = SubstrateFinalitySource::

::new(source_client, None); + let finality_source = SubstrateFinalitySource::::new(source_client, None); let (header, proof) = finality_source.header_and_finality_proof(header_number).await?; let Some(proof) = proof else { return Err(anyhow::format_err!( @@ -295,7 +295,7 @@ pub async fn relay_single_header( )); }; - let finality_target = SubstrateFinalityTarget::

::new(target_client, transaction_params); + let finality_target = SubstrateFinalityTarget::::new(target_client, transaction_params); let tx_tracker = finality_target.submit_finality_proof(header, proof, false).await?; match tx_tracker.wait().await { TrackedTransactionStatus::Finalized(_) => Ok(()), diff --git a/bridges/relays/lib-substrate-relay/src/finality/source.rs b/bridges/relays/lib-substrate-relay/src/finality/source.rs index c94af6108957..f6fa5c24add5 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/source.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/source.rs @@ -40,22 +40,24 @@ use relay_utils::{relay_loop::Client as RelayClient, UniqueSaturatedInto}; pub type RequiredHeaderNumberRef = Arc::BlockNumber>>; /// Substrate node as finality source. -pub struct SubstrateFinalitySource { - client: Client, +pub struct SubstrateFinalitySource { + client: SourceClnt, maximal_header_number: Option>, } -impl SubstrateFinalitySource

{ +impl> + SubstrateFinalitySource +{ /// Create new headers source using given client. pub fn new( - client: Client, + client: SourceClnt, maximal_header_number: Option>, ) -> Self { SubstrateFinalitySource { client, maximal_header_number } } /// Returns reference to the underlying RPC client. - pub fn client(&self) -> &Client { + pub fn client(&self) -> &SourceClnt { &self.client } @@ -174,7 +176,9 @@ impl SubstrateFinalitySource

{ } } -impl Clone for SubstrateFinalitySource

{ +impl Clone + for SubstrateFinalitySource +{ fn clone(&self) -> Self { SubstrateFinalitySource { client: self.client.clone(), @@ -184,7 +188,9 @@ impl Clone for SubstrateFinalitySource

{ } #[async_trait] -impl RelayClient for SubstrateFinalitySource

{ +impl> RelayClient + for SubstrateFinalitySource +{ type Error = Error; async fn reconnect(&mut self) -> Result<(), Error> { @@ -193,8 +199,8 @@ impl RelayClient for SubstrateFinalitySource

SourceClientBase> - for SubstrateFinalitySource

+impl> + SourceClientBase> for SubstrateFinalitySource { type FinalityProofsStream = SubstrateFinalityProofsStream

; @@ -204,8 +210,8 @@ impl SourceClientBase SourceClient> - for SubstrateFinalitySource

+impl> + SourceClient> for SubstrateFinalitySource { async fn best_finalized_block_number(&self) -> Result, Error> { let mut finalized_header_number = self.on_chain_best_finalized_block_number().await?; @@ -235,7 +241,7 @@ impl SourceClient( - client: &Client, + client: &impl Client, number: BlockNumberOf, ) -> Result< ( @@ -244,8 +250,8 @@ async fn header_and_finality_proof( ), Error, > { - let header_hash = client.block_hash_by_number(number).await?; - let signed_block = client.get_block(Some(header_hash)).await?; + let header_hash = client.header_hash_by_number(number).await?; + let signed_block = client.block_by_hash(header_hash).await?; let justification = signed_block .justification(P::FinalityEngine::ID) diff --git a/bridges/relays/lib-substrate-relay/src/finality/target.rs b/bridges/relays/lib-substrate-relay/src/finality/target.rs index 52ab2462c62c..18b696685dd4 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/target.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/target.rs @@ -28,22 +28,25 @@ use async_trait::async_trait; use bp_runtime::BlockNumberOf; use finality_relay::TargetClient; use relay_substrate_client::{ - AccountKeyPairOf, Chain, Client, Error, HeaderIdOf, HeaderOf, SyncHeader, TransactionEra, - TransactionTracker, UnsignedTransaction, + AccountIdOf, AccountKeyPairOf, Chain, Client, Error, HeaderIdOf, HeaderOf, SyncHeader, + TransactionEra, TransactionTracker, UnsignedTransaction, }; use relay_utils::relay_loop::Client as RelayClient; +use sp_core::Pair; use sp_runtime::traits::Header; /// Substrate client as Substrate finality target. -pub struct SubstrateFinalityTarget { - client: Client, +pub struct SubstrateFinalityTarget { + client: TargetClnt, transaction_params: TransactionParams>, } -impl SubstrateFinalityTarget

{ +impl> + SubstrateFinalityTarget +{ /// Create new Substrate headers target. pub fn new( - client: Client, + client: TargetClnt, transaction_params: TransactionParams>, ) -> Self { SubstrateFinalityTarget { client, transaction_params } @@ -65,7 +68,9 @@ impl SubstrateFinalityTarget

{ } } -impl Clone for SubstrateFinalityTarget

{ +impl Clone + for SubstrateFinalityTarget +{ fn clone(&self) -> Self { SubstrateFinalityTarget { client: self.client.clone(), @@ -75,7 +80,9 @@ impl Clone for SubstrateFinalityTarget

{ } #[async_trait] -impl RelayClient for SubstrateFinalityTarget

{ +impl> RelayClient + for SubstrateFinalityTarget +{ type Error = Error; async fn reconnect(&mut self) -> Result<(), Error> { @@ -84,10 +91,12 @@ impl RelayClient for SubstrateFinalityTarget

TargetClient> - for SubstrateFinalityTarget

+impl> + TargetClient> for SubstrateFinalityTarget +where + AccountIdOf: From< as Pair>::Public>, { - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn best_finalized_source_block_id(&self) -> Result, Error> { // we can't continue to relay finality if target node is out of sync, because @@ -109,10 +118,10 @@ impl TargetClient Result>, Self::Error> { Ok(self .client - .typed_state_call( + .state_call( + self.client.best_header().await?.hash(), P::SourceChain::FREE_HEADERS_INTERVAL_METHOD.into(), (), - Some(self.client.best_header().await?.hash()), ) .await .unwrap_or_else(|e| { diff --git a/bridges/relays/lib-substrate-relay/src/finality_base/engine.rs b/bridges/relays/lib-substrate-relay/src/finality_base/engine.rs index 5a9ec42fde5a..4f15d6877194 100644 --- a/bridges/relays/lib-substrate-relay/src/finality_base/engine.rs +++ b/bridges/relays/lib-substrate-relay/src/finality_base/engine.rs @@ -28,10 +28,11 @@ use bp_header_chain::{ }; use bp_runtime::{BasicOperatingMode, HeaderIdProvider, OperatingMode}; use codec::{Decode, Encode}; +use futures::stream::StreamExt; use num_traits::{One, Zero}; use relay_substrate_client::{ BlockNumberOf, Chain, ChainWithGrandpa, Client, Error as SubstrateError, HashOf, HeaderOf, - Subscription, SubstrateFinalityClient, SubstrateGrandpaFinalityClient, + Subscription, }; use sp_consensus_grandpa::{AuthorityList as GrandpaAuthoritiesSet, GRANDPA_ENGINE_ID}; use sp_core::{storage::StorageKey, Bytes}; @@ -45,8 +46,6 @@ pub trait Engine: Send { const ID: ConsensusEngineId; /// A reader that can extract the consensus log from the header digest and interpret it. type ConsensusLogReader: ConsensusLogReader; - /// Type of Finality RPC client used by this engine. - type FinalityClient: SubstrateFinalityClient; /// Type of finality proofs, used by consensus engine. type FinalityProof: FinalityProof, BlockNumberOf> + Decode + Encode; /// The context needed for verifying finality proofs. @@ -74,10 +73,10 @@ pub trait Engine: Send { /// Returns `Ok(true)` if finality pallet at the bridged chain has already been initialized. async fn is_initialized( - target_client: &Client, + target_client: &impl Client, ) -> Result { Ok(target_client - .raw_storage_value(Self::is_initialized_key(), None) + .raw_storage_value(target_client.best_header_hash().await?, Self::is_initialized_key()) .await? .is_some()) } @@ -88,10 +87,13 @@ pub trait Engine: Send { /// Returns `Ok(true)` if finality pallet at the bridged chain is halted. async fn is_halted( - target_client: &Client, + target_client: &impl Client, ) -> Result { Ok(target_client - .storage_value::(Self::pallet_operating_mode_key(), None) + .storage_value::( + target_client.best_header_hash().await?, + Self::pallet_operating_mode_key(), + ) .await? .map(|operating_mode| operating_mode.is_halted()) .unwrap_or(false)) @@ -99,17 +101,15 @@ pub trait Engine: Send { /// A method to subscribe to encoded finality proofs, given source client. async fn source_finality_proofs( - source_client: &Client, - ) -> Result, SubstrateError> { - source_client.subscribe_finality_justifications::().await - } + source_client: &impl Client, + ) -> Result, SubstrateError>; /// Verify and optimize finality proof before sending it to the target node. /// /// Apart from optimization, we expect this method to perform all required checks /// that the `header` and `proof` are valid at the current state of the target chain. async fn verify_and_optimize_proof( - target_client: &Client, + target_client: &impl Client, header: &C::Header, proof: &mut Self::FinalityProof, ) -> Result; @@ -123,19 +123,19 @@ pub trait Engine: Send { /// Prepare initialization data for the finality bridge pallet. async fn prepare_initialization_data( - client: Client, + client: impl Client, ) -> Result, BlockNumberOf>>; /// Get the context needed for validating a finality proof. async fn finality_verification_context( - target_client: &Client, + target_client: &impl Client, at: HashOf, ) -> Result; /// Returns the finality info associated to the source headers synced with the target /// at the provided block. async fn synced_headers_finality_info( - target_client: &Client, + target_client: &impl Client, at: TargetChain::Hash, ) -> Result< Vec>, @@ -144,7 +144,7 @@ pub trait Engine: Send { /// Generate key ownership proof for the provided equivocation. async fn generate_source_key_ownership_proof( - source_client: &Client, + source_client: &impl Client, at: C::Hash, equivocation: &Self::EquivocationProof, ) -> Result; @@ -156,7 +156,7 @@ pub struct Grandpa(PhantomData); impl Grandpa { /// Read header by hash from the source client. async fn source_header( - source_client: &Client, + source_client: &impl Client, header_hash: C::Hash, ) -> Result, BlockNumberOf>> { source_client @@ -167,15 +167,15 @@ impl Grandpa { /// Read GRANDPA authorities set at given header. async fn source_authorities_set( - source_client: &Client, + source_client: &impl Client, header_hash: C::Hash, ) -> Result, BlockNumberOf>> { - let raw_authorities_set = source_client - .grandpa_authorities_set(header_hash) + const SUB_API_GRANDPA_AUTHORITIES: &str = "GrandpaApi_grandpa_authorities"; + + source_client + .state_call(header_hash, SUB_API_GRANDPA_AUTHORITIES.to_string(), ()) .await - .map_err(|err| Error::RetrieveAuthorities(C::NAME, header_hash, err))?; - GrandpaAuthoritiesSet::decode(&mut &raw_authorities_set[..]) - .map_err(|err| Error::DecodeAuthorities(C::NAME, header_hash, err)) + .map_err(|err| Error::RetrieveAuthorities(C::NAME, header_hash, err)) } } @@ -183,7 +183,6 @@ impl Grandpa { impl Engine for Grandpa { const ID: ConsensusEngineId = GRANDPA_ENGINE_ID; type ConsensusLogReader = GrandpaConsensusLogReader<::Number>; - type FinalityClient = SubstrateGrandpaFinalityClient; type FinalityProof = GrandpaJustification>; type FinalityVerificationContext = JustificationVerificationContext; type EquivocationProof = sp_consensus_grandpa::EquivocationProof, BlockNumberOf>; @@ -200,8 +199,14 @@ impl Engine for Grandpa { bp_header_chain::storage_keys::pallet_operating_mode_key(C::WITH_CHAIN_GRANDPA_PALLET_NAME) } + async fn source_finality_proofs( + client: &impl Client, + ) -> Result, SubstrateError> { + client.subscribe_grandpa_finality_justifications().await + } + async fn verify_and_optimize_proof( - target_client: &Client, + target_client: &impl Client, header: &C::Header, proof: &mut Self::FinalityProof, ) -> Result { @@ -239,7 +244,7 @@ impl Engine for Grandpa { /// Prepare initialization data for the GRANDPA verifier pallet. async fn prepare_initialization_data( - source_client: Client, + source_client: impl Client, ) -> Result, BlockNumberOf>> { // In ideal world we just need to get best finalized header and then to read GRANDPA // authorities set (`pallet_grandpa::CurrentSetId` + `GrandpaApi::grandpa_authorities()`) at @@ -248,17 +253,14 @@ impl Engine for Grandpa { // But now there are problems with this approach - `CurrentSetId` may return invalid value. // So here we're waiting for the next justification, read the authorities set and then try // to figure out the set id with bruteforce. - let justifications = Self::source_finality_proofs(&source_client) + let mut justifications = Self::source_finality_proofs(&source_client) .await .map_err(|err| Error::Subscribe(C::NAME, err))?; // Read next justification - the header that it finalizes will be used as initial header. let justification = justifications .next() .await - .map_err(|e| Error::ReadJustification(C::NAME, e)) - .and_then(|justification| { - justification.ok_or(Error::ReadJustificationStreamEnded(C::NAME)) - })?; + .ok_or(Error::ReadJustificationStreamEnded(C::NAME))?; // Read initial header. let justification: GrandpaJustification = @@ -359,14 +361,14 @@ impl Engine for Grandpa { } async fn finality_verification_context( - target_client: &Client, + target_client: &impl Client, at: HashOf, ) -> Result { let current_authority_set_key = bp_header_chain::storage_keys::current_authority_set_key( C::WITH_CHAIN_GRANDPA_PALLET_NAME, ); let authority_set: AuthoritySet = target_client - .storage_value(current_authority_set_key, Some(at)) + .storage_value(at, current_authority_set_key) .await? .map(Ok) .unwrap_or(Err(SubstrateError::Custom(format!( @@ -385,11 +387,11 @@ impl Engine for Grandpa { } async fn synced_headers_finality_info( - target_client: &Client, + target_client: &impl Client, at: TargetChain::Hash, ) -> Result>>, SubstrateError> { let stored_headers_grandpa_info: Vec>> = target_client - .typed_state_call(C::SYNCED_HEADERS_GRANDPA_INFO_METHOD.to_string(), (), Some(at)) + .state_call(at, C::SYNCED_HEADERS_GRANDPA_INFO_METHOD.to_string(), ()) .await?; let mut headers_grandpa_info = vec![]; @@ -407,7 +409,7 @@ impl Engine for Grandpa { } async fn generate_source_key_ownership_proof( - source_client: &Client, + source_client: &impl Client, at: C::Hash, equivocation: &Self::EquivocationProof, ) -> Result { diff --git a/bridges/relays/lib-substrate-relay/src/finality_base/mod.rs b/bridges/relays/lib-substrate-relay/src/finality_base/mod.rs index 825960b1b3ef..71d15ca3868e 100644 --- a/bridges/relays/lib-substrate-relay/src/finality_base/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/finality_base/mod.rs @@ -50,11 +50,11 @@ pub type SubstrateFinalityProofsStream

= /// Subscribe to new finality proofs. pub async fn finality_proofs( - client: &Client, + client: &impl Client, ) -> Result, Error> { Ok(unfold( P::FinalityEngine::source_finality_proofs(client).await?, - move |subscription| async move { + move |mut subscription| async move { loop { let log_error = |err| { log::error!( @@ -65,8 +65,7 @@ pub async fn finality_proofs( ); }; - let next_justification = - subscription.next().await.map_err(|err| log_error(err.to_string())).ok()??; + let next_justification = subscription.next().await?; let decoded_justification = >::FinalityProof::decode( @@ -93,7 +92,7 @@ pub async fn finality_proofs( /// /// The runtime API method should be `FinalityApi::best_finalized()`. pub async fn best_synced_header_id( - target_client: &Client, + target_client: &impl Client, at: HashOf, ) -> Result>, Error> where @@ -102,6 +101,6 @@ where { // now let's read id of best finalized peer header at our best finalized block target_client - .typed_state_call(SourceChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), (), Some(at)) + .state_call(at, SourceChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), ()) .await } diff --git a/bridges/relays/lib-substrate-relay/src/messages_lane.rs b/bridges/relays/lib-substrate-relay/src/messages_lane.rs index 08550d19bae0..e3786dcdc5e3 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_lane.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_lane.rs @@ -88,13 +88,13 @@ impl MessageLane for MessageLaneAdapter

{ } /// Substrate <-> Substrate messages relay parameters. -pub struct MessagesRelayParams { +pub struct MessagesRelayParams { /// Messages source client. - pub source_client: Client, + pub source_client: SourceClnt, /// Source transaction params. pub source_transaction_params: TransactionParams>, /// Messages target client. - pub target_client: Client, + pub target_client: TargetClnt, /// Target transaction params. pub target_transaction_params: TransactionParams>, /// Optional on-demand source to target headers relay. @@ -179,8 +179,13 @@ impl>> } /// Run Substrate-to-Substrate messages sync loop. -pub async fn run(params: MessagesRelayParams

) -> anyhow::Result<()> +pub async fn run( + params: MessagesRelayParams, +) -> anyhow::Result<()> where + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, AccountIdOf: From< as Pair>::Public>, AccountIdOf: From< as Pair>::Public>, BalanceOf: TryFrom>, @@ -190,7 +195,7 @@ where let limits = match params.limits { Some(limits) => limits, None => - select_delivery_transaction_limits_rpc::

( + select_delivery_transaction_limits_rpc( ¶ms, P::TargetChain::max_extrinsic_weight(), P::SourceChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, @@ -250,14 +255,14 @@ where max_messages_size_in_single_batch, }, }, - SubstrateMessagesSource::

::new( + SubstrateMessagesSource::::new( source_client.clone(), target_client.clone(), params.lane_id, params.source_transaction_params, params.target_to_source_headers_relay, ), - SubstrateMessagesTarget::

::new( + SubstrateMessagesTarget::::new( target_client, source_client, params.lane_id, @@ -278,8 +283,8 @@ where /// Deliver range of Substrate-to-Substrate messages. No checks are made to ensure that transaction /// will succeed. pub async fn relay_messages_range( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, source_transaction_params: TransactionParams>, target_transaction_params: TransactionParams>, at_source_block: HeaderIdOf, @@ -295,14 +300,14 @@ where let relayer_id_at_source: AccountIdOf = source_transaction_params.signer.public().into(); messages_relay::relay_messages_range( - SubstrateMessagesSource::

::new( + SubstrateMessagesSource::::new( source_client.clone(), target_client.clone(), lane_id, source_transaction_params, None, ), - SubstrateMessagesTarget::

::new( + SubstrateMessagesTarget::::new( target_client, source_client, lane_id, @@ -321,8 +326,8 @@ where /// Relay messages delivery confirmation of Substrate-to-Substrate messages. /// No checks are made to ensure that transaction will succeed. pub async fn relay_messages_delivery_confirmation( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, source_transaction_params: TransactionParams>, at_target_block: HeaderIdOf, lane_id: LaneId, @@ -335,14 +340,14 @@ where let relayer_id_at_source: AccountIdOf = source_transaction_params.signer.public().into(); messages_relay::relay_messages_delivery_confirmation( - SubstrateMessagesSource::

::new( + SubstrateMessagesSource::::new( source_client.clone(), target_client.clone(), lane_id, source_transaction_params, None, ), - SubstrateMessagesTarget::

::new( + SubstrateMessagesTarget::::new( target_client, source_client, lane_id, @@ -546,12 +551,15 @@ macro_rules! generate_receive_message_delivery_proof_call_builder { } /// Returns maximal number of messages and their maximal cumulative dispatch weight. -async fn select_delivery_transaction_limits_rpc( - params: &MessagesRelayParams

, +async fn select_delivery_transaction_limits_rpc( + params: &MessagesRelayParams, max_extrinsic_weight: Weight, max_unconfirmed_messages_at_inbound_lane: MessageNonce, ) -> anyhow::Result where + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, AccountIdOf: From< as Pair>::Public>, { // We may try to guess accurate value, based on maximal number of messages and per-message @@ -567,20 +575,21 @@ where let weight_for_messages_dispatch = max_extrinsic_weight - weight_for_delivery_tx; // weight of empty message delivery with outbound lane state - let delivery_tx_with_zero_messages = dummy_messages_delivery_transaction::

(params, 0)?; + let best_target_block_hash = params.target_client.best_header_hash().await?; + let delivery_tx_with_zero_messages = dummy_messages_delivery_transaction::(params, 0)?; let delivery_tx_with_zero_messages_weight = params .target_client - .extimate_extrinsic_weight(delivery_tx_with_zero_messages) + .estimate_extrinsic_weight(best_target_block_hash, delivery_tx_with_zero_messages) .await .map_err(|e| { anyhow::format_err!("Failed to estimate delivery extrinsic weight: {:?}", e) })?; // weight of single message delivery with outbound lane state - let delivery_tx_with_one_message = dummy_messages_delivery_transaction::

(params, 1)?; + let delivery_tx_with_one_message = dummy_messages_delivery_transaction::(params, 1)?; let delivery_tx_with_one_message_weight = params .target_client - .extimate_extrinsic_weight(delivery_tx_with_one_message) + .estimate_extrinsic_weight(best_target_block_hash, delivery_tx_with_one_message) .await .map_err(|e| { anyhow::format_err!("Failed to estimate delivery extrinsic weight: {:?}", e) @@ -615,8 +624,8 @@ where } /// Returns dummy message delivery transaction with zero messages and `1kb` proof. -fn dummy_messages_delivery_transaction( - params: &MessagesRelayParams

, +fn dummy_messages_delivery_transaction( + params: &MessagesRelayParams, messages: u32, ) -> anyhow::Result<::SignedTransaction> where diff --git a/bridges/relays/lib-substrate-relay/src/messages_metrics.rs b/bridges/relays/lib-substrate-relay/src/messages_metrics.rs index b30e75bd8bac..8845f43dcb62 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_metrics.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_metrics.rs @@ -36,7 +36,7 @@ use std::{fmt::Debug, marker::PhantomData}; /// Add relay accounts balance metrics. pub async fn add_relay_balances_metrics( - client: Client, + client: impl Client, metrics: &MetricsParams, relay_accounts: &Vec>>, lanes: &[LaneId], diff --git a/bridges/relays/lib-substrate-relay/src/messages_source.rs b/bridges/relays/lib-substrate-relay/src/messages_source.rs index 49deff046f9c..1f597e278da4 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_source.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_source.rs @@ -63,19 +63,21 @@ pub type SubstrateMessagesProof = (Weight, FromBridgedChainMessagesProof = Vec<(MessagePayload, &'a mut OutboundMessageDetails)>; /// Substrate client as Substrate messages source. -pub struct SubstrateMessagesSource { - source_client: Client, - target_client: Client, +pub struct SubstrateMessagesSource { + source_client: SourceClnt, + target_client: TargetClnt, lane_id: LaneId, transaction_params: TransactionParams>, target_to_source_headers_relay: Option>>, } -impl SubstrateMessagesSource

{ +impl, TargetClnt> + SubstrateMessagesSource +{ /// Create new Substrate headers source. pub fn new( - source_client: Client, - target_client: Client, + source_client: SourceClnt, + target_client: TargetClnt, lane_id: LaneId, transaction_params: TransactionParams>, target_to_source_headers_relay: Option< @@ -98,22 +100,25 @@ impl SubstrateMessagesSource

{ ) -> Result, SubstrateError> { self.source_client .storage_value( + id.hash(), outbound_lane_data_key( P::TargetChain::WITH_CHAIN_MESSAGES_PALLET_NAME, &self.lane_id, ), - Some(id.1), ) .await } /// Ensure that the messages pallet at source chain is active. async fn ensure_pallet_active(&self) -> Result<(), SubstrateError> { - ensure_messages_pallet_active::(&self.source_client).await + ensure_messages_pallet_active::(&self.source_client) + .await } } -impl Clone for SubstrateMessagesSource

{ +impl Clone + for SubstrateMessagesSource +{ fn clone(&self) -> Self { Self { source_client: self.source_client.clone(), @@ -126,7 +131,12 @@ impl Clone for SubstrateMessagesSource

{ } #[async_trait] -impl RelayClient for SubstrateMessagesSource

{ +impl< + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, + > RelayClient for SubstrateMessagesSource +{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -150,13 +160,17 @@ impl RelayClient for SubstrateMessagesSource

{ } #[async_trait] -impl SourceClient> for SubstrateMessagesSource

+impl< + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, + > SourceClient> for SubstrateMessagesSource where AccountIdOf: From< as Pair>::Public>, { type BatchTransaction = BatchProofTransaction; - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn state(&self) -> Result>, SubstrateError> { // we can't continue to deliver confirmations if source node is out of sync, because @@ -169,7 +183,7 @@ where // we can't relay confirmations if messages pallet at source chain is halted self.ensure_pallet_active().await?; - read_client_state(&self.source_client, Some(&self.target_client)).await + read_client_state_from_both_chains(&self.source_client, &self.target_client).await } async fn latest_generated_nonce( @@ -203,12 +217,12 @@ where id: SourceHeaderIdOf>, nonces: RangeInclusive, ) -> Result>, SubstrateError> { - let mut out_msgs_details = self + let mut out_msgs_details: Vec<_> = self .source_client - .typed_state_call::<_, Vec<_>>( + .state_call::<_, Vec<_>>( + id.hash(), P::TargetChain::TO_CHAIN_MESSAGE_DETAILS_METHOD.into(), (self.lane_id, *nonces.start(), *nonces.end()), - Some(id.1), ) .await?; validate_out_msgs_details::(&out_msgs_details, nonces)?; @@ -226,7 +240,7 @@ where out_msg_details.nonce, ); let msg_payload: MessagePayload = - self.source_client.storage_value(msg_key, Some(id.1)).await?.ok_or_else(|| { + self.source_client.storage_value(id.hash(), msg_key).await?.ok_or_else(|| { SubstrateError::Custom(format!( "Message to {} {:?}/{} is missing from runtime the storage of {} at {:?}", P::TargetChain::NAME, @@ -240,15 +254,16 @@ where msgs_to_refine.push((msg_payload, out_msg_details)); } + let best_target_header_hash = self.target_client.best_header_hash().await?; for mut msgs_to_refine_batch in split_msgs_to_refine::(self.lane_id, msgs_to_refine)? { let in_msgs_details = self .target_client - .typed_state_call::<_, Vec>( + .state_call::<_, Vec>( + best_target_header_hash, P::SourceChain::FROM_CHAIN_MESSAGE_DETAILS_METHOD.into(), (self.lane_id, &msgs_to_refine_batch), - None, ) .await?; if in_msgs_details.len() != msgs_to_refine_batch.len() { @@ -326,7 +341,7 @@ where let proof = self .source_client - .prove_storage(storage_keys, id.1) + .prove_storage(id.1, storage_keys) .await? .into_iter_nodes() .collect(); @@ -387,15 +402,19 @@ where } /// Ensure that the messages pallet at source chain is active. -pub(crate) async fn ensure_messages_pallet_active( - client: &Client, +pub(crate) async fn ensure_messages_pallet_active( + client: &AtChainClient, ) -> Result<(), SubstrateError> where AtChain: ChainWithMessages, WithChain: ChainWithMessages, + AtChainClient: Client, { let operating_mode = client - .storage_value(operating_mode_key(WithChain::WITH_CHAIN_MESSAGES_PALLET_NAME), None) + .storage_value( + client.best_header_hash().await?, + operating_mode_key(WithChain::WITH_CHAIN_MESSAGES_PALLET_NAME), + ) .await?; let is_halted = operating_mode == Some(MessagesOperatingMode::Basic(BasicOperatingMode::Halted)); @@ -412,11 +431,10 @@ where /// bridge GRANDPA pallet deployed and it provides `best_finalized_header_id_method_name` /// runtime API to read the best finalized Bridged chain header. /// -/// If `peer_client` is `None`, the value of `actual_best_finalized_peer_at_best_self` will -/// always match the `best_finalized_peer_at_best_self`. +/// The value of `actual_best_finalized_peer_at_best_self` will always match +/// the `best_finalized_peer_at_best_self`. pub async fn read_client_state( - self_client: &Client, - peer_client: Option<&Client>, + self_client: &impl Client, ) -> Result, HeaderIdOf>, SubstrateError> where SelfChain: Chain, @@ -431,30 +449,42 @@ where let peer_on_self_best_finalized_id = best_synced_header_id::(self_client, self_best_id.hash()).await?; - // read actual header, matching the `peer_on_self_best_finalized_id` from the peer chain - let actual_peer_on_self_best_finalized_id = - match (peer_client, peer_on_self_best_finalized_id.as_ref()) { - (Some(peer_client), Some(peer_on_self_best_finalized_id)) => { - let actual_peer_on_self_best_finalized = - peer_client.header_by_number(peer_on_self_best_finalized_id.number()).await?; - Some(actual_peer_on_self_best_finalized.id()) - }, - _ => peer_on_self_best_finalized_id, - }; - Ok(ClientState { best_self: self_best_id, best_finalized_self: self_best_finalized_id, best_finalized_peer_at_best_self: peer_on_self_best_finalized_id, - actual_best_finalized_peer_at_best_self: actual_peer_on_self_best_finalized_id, + actual_best_finalized_peer_at_best_self: peer_on_self_best_finalized_id, }) } +/// Does the same stuff as `read_client_state`, but properly fills the +/// `actual_best_finalized_peer_at_best_self` field of the result. +pub async fn read_client_state_from_both_chains( + self_client: &impl Client, + peer_client: &impl Client, +) -> Result, HeaderIdOf>, SubstrateError> +where + SelfChain: Chain, + PeerChain: Chain, +{ + let mut client_state = read_client_state::(self_client).await?; + client_state.actual_best_finalized_peer_at_best_self = + match client_state.best_finalized_peer_at_best_self.as_ref() { + Some(peer_on_self_best_finalized_id) => { + let actual_peer_on_self_best_finalized = + peer_client.header_by_number(peer_on_self_best_finalized_id.number()).await?; + Some(actual_peer_on_self_best_finalized.id()) + }, + _ => client_state.best_finalized_peer_at_best_self, + }; + Ok(client_state) +} + /// Reads best `PeerChain` header known to the `SelfChain` using provided runtime API method. /// /// Method is supposed to be the `FinalityApi::best_finalized()` method. pub async fn best_finalized_peer_header_at_self( - self_client: &Client, + self_client: &impl Client, at_self_hash: HashOf, ) -> Result>, SubstrateError> where @@ -463,10 +493,10 @@ where { // now let's read id of best finalized peer header at our best finalized block self_client - .typed_state_call::<_, Option<_>>( + .state_call::<_, Option<_>>( + at_self_hash, PeerChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), (), - Some(at_self_hash), ) .await } diff --git a/bridges/relays/lib-substrate-relay/src/messages_target.rs b/bridges/relays/lib-substrate-relay/src/messages_target.rs index 5ffb2b6c771e..e1c7645eac68 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_target.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_target.rs @@ -23,7 +23,9 @@ use crate::{ BatchProofTransaction, MessageLaneAdapter, ReceiveMessagesProofCallBuilder, SubstrateMessageLane, }, - messages_source::{ensure_messages_pallet_active, read_client_state, SubstrateMessagesProof}, + messages_source::{ + ensure_messages_pallet_active, read_client_state_from_both_chains, SubstrateMessagesProof, + }, on_demand::OnDemandRelay, TransactionParams, }; @@ -52,20 +54,24 @@ pub type SubstrateMessagesDeliveryProof = (UnrewardedRelayersState, FromBridgedChainMessagesDeliveryProof>); /// Substrate client as Substrate messages target. -pub struct SubstrateMessagesTarget { - target_client: Client, - source_client: Client, +pub struct SubstrateMessagesTarget { + target_client: TargetClnt, + source_client: SourceClnt, lane_id: LaneId, relayer_id_at_source: AccountIdOf, transaction_params: Option>>, source_to_target_headers_relay: Option>>, } -impl SubstrateMessagesTarget

{ +impl SubstrateMessagesTarget +where + P: SubstrateMessageLane, + TargetClnt: Client, +{ /// Create new Substrate headers target. pub fn new( - target_client: Client, - source_client: Client, + target_client: TargetClnt, + source_client: SourceClnt, lane_id: LaneId, relayer_id_at_source: AccountIdOf, transaction_params: Option>>, @@ -90,22 +96,25 @@ impl SubstrateMessagesTarget

{ ) -> Result>>, SubstrateError> { self.target_client .storage_value( + id.hash(), inbound_lane_data_key( P::SourceChain::WITH_CHAIN_MESSAGES_PALLET_NAME, &self.lane_id, ), - Some(id.1), ) .await } /// Ensure that the messages pallet at target chain is active. async fn ensure_pallet_active(&self) -> Result<(), SubstrateError> { - ensure_messages_pallet_active::(&self.target_client).await + ensure_messages_pallet_active::(&self.target_client) + .await } } -impl Clone for SubstrateMessagesTarget

{ +impl Clone + for SubstrateMessagesTarget +{ fn clone(&self) -> Self { Self { target_client: self.target_client.clone(), @@ -119,7 +128,12 @@ impl Clone for SubstrateMessagesTarget

{ } #[async_trait] -impl RelayClient for SubstrateMessagesTarget

{ +impl< + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, + > RelayClient for SubstrateMessagesTarget +{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -143,14 +157,18 @@ impl RelayClient for SubstrateMessagesTarget

{ } #[async_trait] -impl TargetClient> for SubstrateMessagesTarget

+impl< + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, + > TargetClient> for SubstrateMessagesTarget where AccountIdOf: From< as Pair>::Public>, BalanceOf: TryFrom>, { type BatchTransaction = BatchProofTransaction; - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn state(&self) -> Result>, SubstrateError> { // we can't continue to deliver confirmations if source node is out of sync, because @@ -163,7 +181,7 @@ where // we can't relay messages if messages pallet at target chain is halted self.ensure_pallet_active().await?; - read_client_state(&self.target_client, Some(&self.source_client)).await + read_client_state_from_both_chains(&self.target_client, &self.source_client).await } async fn latest_received_nonce( @@ -219,7 +237,7 @@ where ); let proof = self .target_client - .prove_storage(vec![inbound_data_key], id.1) + .prove_storage(id.hash(), vec![inbound_data_key]) .await? .into_iter_nodes() .collect(); diff --git a/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs b/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs index 202f53ea4e4f..d18c582dfac4 100644 --- a/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs +++ b/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs @@ -53,25 +53,30 @@ use crate::{ /// relay) needs it to continue its regular work. When enough headers are relayed, on-demand stops /// syncing headers. #[derive(Clone)] -pub struct OnDemandHeadersRelay { +pub struct OnDemandHeadersRelay { /// Relay task name. relay_task_name: String, /// Shared reference to maximal required finalized header number. required_header_number: RequiredHeaderNumberRef, /// Client of the source chain. - source_client: Client, + source_client: SourceClnt, /// Client of the target chain. - target_client: Client, + target_client: TargetClnt, } -impl OnDemandHeadersRelay

{ +impl< + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, + TargetClnt: Client, + > OnDemandHeadersRelay +{ /// Create new on-demand headers relay. /// /// If `metrics_params` is `Some(_)`, the metrics of the finality relay are registered. /// Otherwise, all required metrics must be exposed outside of this method. pub fn new( - source_client: Client, - target_client: Client, + source_client: SourceClnt, + target_client: TargetClnt, target_transaction_params: TransactionParams>, headers_to_relay: HeadersToRelay, metrics_params: Option, @@ -104,8 +109,12 @@ impl OnDemandHeadersRelay

{ } #[async_trait] -impl OnDemandRelay - for OnDemandHeadersRelay

+impl< + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, + TargetClnt: Client, + > OnDemandRelay + for OnDemandHeadersRelay { async fn reconnect(&self) -> Result<(), SubstrateError> { // using clone is fine here (to avoid mut requirement), because clone on Client clones @@ -139,7 +148,7 @@ impl OnDemandRelay::new(self.source_client.clone(), None); + SubstrateFinalitySource::::new(self.source_client.clone(), None); let (header, mut proof) = finality_source.prove_block_finality(current_required_header).await?; let header_id = header.id(); @@ -198,8 +207,8 @@ impl OnDemandRelay( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, target_transaction_params: TransactionParams>, headers_to_relay: HeadersToRelay, required_header_number: RequiredHeaderNumberRef, @@ -209,7 +218,7 @@ async fn background_task( { let relay_task_name = on_demand_headers_relay_name::(); let target_transactions_mortality = target_transaction_params.mortality; - let mut finality_source = SubstrateFinalitySource::

::new( + let mut finality_source = SubstrateFinalitySource::::new( source_client.clone(), Some(required_header_number.clone()), ); @@ -246,7 +255,8 @@ async fn background_task( // read best finalized source header number from target let best_finalized_source_header_at_target = - best_finalized_source_header_at_target::

(&finality_target, &relay_task_name).await; + best_finalized_source_header_at_target::(&finality_target, &relay_task_name) + .await; if matches!(best_finalized_source_header_at_target, Err(ref e) if e.is_connection_error()) { relay_utils::relay_loop::reconnect_failed_client( FailedClient::Target, @@ -410,13 +420,17 @@ async fn mandatory_headers_scan_range( /// it. /// /// Returns `true` if header was found and (asked to be) relayed and `false` otherwise. -async fn relay_mandatory_header_from_range( - finality_source: &SubstrateFinalitySource

, +async fn relay_mandatory_header_from_range( + finality_source: &SubstrateFinalitySource, required_header_number: &RequiredHeaderNumberRef, best_finalized_source_header_at_target: String, range: (BlockNumberOf, BlockNumberOf), relay_task_name: &str, -) -> Result { +) -> Result +where + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, +{ // search for mandatory header first let mandatory_source_header_number = find_mandatory_header_in_range(finality_source, range).await?; @@ -451,10 +465,14 @@ async fn relay_mandatory_header_from_range( /// Read best finalized source block number from source client. /// /// Returns `None` if we have failed to read the number. -async fn best_finalized_source_header_at_source( - finality_source: &SubstrateFinalitySource

, +async fn best_finalized_source_header_at_source( + finality_source: &SubstrateFinalitySource, relay_task_name: &str, -) -> Result, relay_substrate_client::Error> { +) -> Result, relay_substrate_client::Error> +where + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, +{ finality_source.on_chain_best_finalized_block_number().await.map_err(|error| { log::error!( target: "bridge", @@ -470,11 +488,16 @@ async fn best_finalized_source_header_at_source( - finality_target: &SubstrateFinalityTarget

, +async fn best_finalized_source_header_at_target( + finality_target: &SubstrateFinalityTarget, relay_task_name: &str, -) -> Result, as RelayClient>::Error> +) -> Result< + BlockNumberOf, + as RelayClient>::Error, +> where + P: SubstrateFinalitySyncPipeline, + TargetClnt: Client, AccountIdOf: From< as sp_core::Pair>::Public>, { finality_target @@ -496,10 +519,14 @@ where /// Read first mandatory header in given inclusive range. /// /// Returns `Ok(None)` if there were no mandatory headers in the range. -async fn find_mandatory_header_in_range( - finality_source: &SubstrateFinalitySource

, +async fn find_mandatory_header_in_range( + finality_source: &SubstrateFinalitySource, range: (BlockNumberOf, BlockNumberOf), -) -> Result>, relay_substrate_client::Error> { +) -> Result>, relay_substrate_client::Error> +where + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, +{ let mut current = range.0; while current <= range.1 { let header = finality_source.client().header_by_number(current).await?; diff --git a/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs b/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs index 966bdc310720..654cb6628d5f 100644 --- a/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs +++ b/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs @@ -53,29 +53,34 @@ use std::fmt::Debug; /// (e.g. messages relay) needs it to continue its regular work. When enough parachain headers /// are relayed, on-demand stops syncing headers. #[derive(Clone)] -pub struct OnDemandParachainsRelay { +pub struct OnDemandParachainsRelay { /// Relay task name. relay_task_name: String, /// Channel used to communicate with background task and ask for relay of parachain heads. required_header_number_sender: Sender>, /// Source relay chain client. - source_relay_client: Client, + source_relay_client: SourceRelayClnt, /// Target chain client. - target_client: Client, + target_client: TargetClnt, /// On-demand relay chain relay. on_demand_source_relay_to_target_headers: Arc>, } -impl OnDemandParachainsRelay

{ +impl< + P: SubstrateParachainsPipeline, + SourceRelayClnt: Client, + TargetClnt: Client, + > OnDemandParachainsRelay +{ /// Create new on-demand parachains relay. /// /// Note that the argument is the source relay chain client, not the parachain client. /// That's because parachain finality is determined by the relay chain and we don't /// need to connect to the parachain itself here. pub fn new( - source_relay_client: Client, - target_client: Client, + source_relay_client: SourceRelayClnt, + target_client: TargetClnt, target_transaction_params: TransactionParams>, on_demand_source_relay_to_target_headers: Arc< dyn OnDemandRelay, @@ -114,10 +119,13 @@ impl OnDemandParachainsRelay

{ } #[async_trait] -impl OnDemandRelay - for OnDemandParachainsRelay

+impl + OnDemandRelay + for OnDemandParachainsRelay where P::SourceParachain: Chain, + SourceRelayClnt: Client, + TargetClnt: Client, { async fn reconnect(&self) -> Result<(), SubstrateError> { // using clone is fine here (to avoid mut requirement), because clone on Client clones @@ -147,7 +155,7 @@ where required_parachain_header: BlockNumberOf, ) -> Result<(HeaderIdOf, Vec>), SubstrateError> { // select headers to prove - let parachains_source = ParachainsSource::

::new( + let parachains_source = ParachainsSource::::new( self.source_relay_client.clone(), Arc::new(Mutex::new(AvailableHeader::Missing)), ); @@ -231,8 +239,8 @@ where /// Background task that is responsible for starting parachain headers relay. async fn background_task( - source_relay_client: Client, - target_client: Client, + source_relay_client: impl Client, + target_client: impl Client, target_transaction_params: TransactionParams>, on_demand_source_relay_to_target_headers: Arc< dyn OnDemandRelay, @@ -255,9 +263,11 @@ async fn background_task( let parachains_relay_task = futures::future::Fuse::terminated(); futures::pin_mut!(parachains_relay_task); - let mut parachains_source = - ParachainsSource::

::new(source_relay_client.clone(), required_para_header_ref.clone()); - let mut parachains_target = ParachainsTarget::

::new( + let mut parachains_source = ParachainsSource::::new( + source_relay_client.clone(), + required_para_header_ref.clone(), + ); + let mut parachains_target = ParachainsTarget::::new( source_relay_client.clone(), target_client.clone(), target_transaction_params.clone(), @@ -446,9 +456,9 @@ struct RelayData { } /// Read required data from source and target clients. -async fn read_relay_data( - source: &ParachainsSource

, - target: &ParachainsTarget

, +async fn read_relay_data( + source: &ParachainsSource, + target: &ParachainsTarget, required_header_number: BlockNumberOf, ) -> Result< RelayData< @@ -459,7 +469,9 @@ async fn read_relay_data( FailedClient, > where - ParachainsTarget

: + SourceRelayClnt: Client, + TargetClnt: Client, + ParachainsTarget: TargetClient> + RelayClient, { let map_target_err = |e| { @@ -642,13 +654,19 @@ trait SelectHeadersToProveEnvironment { } #[async_trait] -impl<'a, P: SubstrateParachainsPipeline> +impl<'a, P: SubstrateParachainsPipeline, SourceRelayClnt, TargetClnt> SelectHeadersToProveEnvironment< BlockNumberOf, HashOf, BlockNumberOf, HashOf, - > for (&'a OnDemandParachainsRelay

, &'a ParachainsSource

) + > + for ( + &'a OnDemandParachainsRelay, + &'a ParachainsSource, + ) where + SourceRelayClnt: Client, + TargetClnt: Client, { fn parachain_id(&self) -> ParaId { ParaId(P::SourceParachain::PARACHAIN_ID) @@ -665,7 +683,6 @@ impl<'a, P: SubstrateParachainsPipeline> ) -> Result, SubstrateError> { Ok(crate::messages_source::read_client_state::( &self.0.target_client, - None, ) .await? .best_finalized_peer_at_best_self diff --git a/bridges/relays/lib-substrate-relay/src/parachains/source.rs b/bridges/relays/lib-substrate-relay/src/parachains/source.rs index 4cc512b9d9b4..11b9d6dbf5bd 100644 --- a/bridges/relays/lib-substrate-relay/src/parachains/source.rs +++ b/bridges/relays/lib-substrate-relay/src/parachains/source.rs @@ -37,22 +37,24 @@ pub type RequiredHeaderIdRef = Arc>>>; /// Substrate client as parachain heads source. #[derive(Clone)] -pub struct ParachainsSource { - client: Client, +pub struct ParachainsSource { + client: SourceRelayClnt, max_head_id: RequiredHeaderIdRef, } -impl ParachainsSource

{ +impl> + ParachainsSource +{ /// Creates new parachains source client. pub fn new( - client: Client, + client: SourceRelayClnt, max_head_id: RequiredHeaderIdRef, ) -> Self { ParachainsSource { client, max_head_id } } /// Returns reference to the underlying RPC client. - pub fn client(&self) -> &Client { + pub fn client(&self) -> &SourceRelayClnt { &self.client } @@ -64,8 +66,8 @@ impl ParachainsSource

{ let para_id = ParaId(P::SourceParachain::PARACHAIN_ID); let storage_key = parachain_head_storage_key_at_source(P::SourceRelayChain::PARAS_PALLET_NAME, para_id); - let para_head = self.client.raw_storage_value(storage_key, Some(at_block.1)).await?; - let para_head = para_head.map(|h| ParaHead::decode(&mut &h.0[..])).transpose()?; + let para_head: Option = + self.client.storage_value(at_block.hash(), storage_key).await?; let para_head = match para_head { Some(para_head) => para_head, None => return Ok(None), @@ -76,7 +78,9 @@ impl ParachainsSource

{ } #[async_trait] -impl RelayClient for ParachainsSource

{ +impl> RelayClient + for ParachainsSource +{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -85,8 +89,8 @@ impl RelayClient for ParachainsSource

{ } #[async_trait] -impl SourceClient> - for ParachainsSource

+impl> + SourceClient> for ParachainsSource where P::SourceParachain: Chain, { @@ -151,7 +155,7 @@ where parachain_head_storage_key_at_source(P::SourceRelayChain::PARAS_PALLET_NAME, parachain); let parachain_heads_proof = self .client - .prove_storage(vec![storage_key.clone()], at_block.1) + .prove_storage(at_block.hash(), vec![storage_key.clone()]) .await? .into_iter_nodes() .collect(); @@ -165,10 +169,8 @@ where // rereading actual value here let parachain_head = self .client - .raw_storage_value(storage_key, Some(at_block.1)) + .storage_value::(at_block.hash(), storage_key) .await? - .map(|h| ParaHead::decode(&mut &h.0[..])) - .transpose()? .ok_or_else(|| { SubstrateError::Custom(format!( "Failed to read expected parachain {parachain:?} head at {at_block:?}" diff --git a/bridges/relays/lib-substrate-relay/src/parachains/target.rs b/bridges/relays/lib-substrate-relay/src/parachains/target.rs index 531d55b53223..f66b193340c1 100644 --- a/bridges/relays/lib-substrate-relay/src/parachains/target.rs +++ b/bridges/relays/lib-substrate-relay/src/parachains/target.rs @@ -42,31 +42,42 @@ use relay_substrate_client::{ }; use relay_utils::relay_loop::Client as RelayClient; use sp_core::Pair; +use sp_runtime::traits::Header; /// Substrate client as parachain heads source. -pub struct ParachainsTarget { - source_client: Client, - target_client: Client, +pub struct ParachainsTarget { + source_client: SourceClnt, + target_client: TargetClnt, transaction_params: TransactionParams>, } -impl ParachainsTarget

{ +impl< + P: SubstrateParachainsPipeline, + SourceClnt: Client, + TargetClnt: Client, + > ParachainsTarget +{ /// Creates new parachains target client. pub fn new( - source_client: Client, - target_client: Client, + source_client: SourceClnt, + target_client: TargetClnt, transaction_params: TransactionParams>, ) -> Self { ParachainsTarget { source_client, target_client, transaction_params } } /// Returns reference to the underlying RPC client. - pub fn target_client(&self) -> &Client { + pub fn target_client(&self) -> &TargetClnt { &self.target_client } } -impl Clone for ParachainsTarget

{ +impl< + P: SubstrateParachainsPipeline, + SourceClnt: Client, + TargetClnt: Clone, + > Clone for ParachainsTarget +{ fn clone(&self) -> Self { ParachainsTarget { source_client: self.source_client.clone(), @@ -77,7 +88,12 @@ impl Clone for ParachainsTarget

{ } #[async_trait] -impl RelayClient for ParachainsTarget

{ +impl< + P: SubstrateParachainsPipeline, + SourceClnt: Client, + TargetClnt: Client, + > RelayClient for ParachainsTarget +{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -88,14 +104,17 @@ impl RelayClient for ParachainsTarget

{ } #[async_trait] -impl

TargetClient> for ParachainsTarget

+impl TargetClient> + for ParachainsTarget where P: SubstrateParachainsPipeline, + SourceClnt: Client, + TargetClnt: Client, AccountIdOf: From< as Pair>::Public>, P::SourceParachain: ChainBase, P::SourceRelayChain: ChainBase, { - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn best_block(&self) -> Result, Self::Error> { let best_header = self.target_client.best_header().await?; @@ -109,10 +128,10 @@ where at_block: &HeaderIdOf, ) -> Result, Self::Error> { self.target_client - .typed_state_call::<_, Option>>( + .state_call::<_, Option>>( + at_block.hash(), P::SourceRelayChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), (), - Some(at_block.1), ) .await? .map(Ok) @@ -124,7 +143,11 @@ where ) -> Result>, Self::Error> { Ok(self .target_client - .typed_state_call(P::SourceRelayChain::FREE_HEADERS_INTERVAL_METHOD.into(), (), None) + .state_call( + self.target_client.best_header().await?.hash(), + P::SourceRelayChain::FREE_HEADERS_INTERVAL_METHOD.into(), + (), + ) .await .unwrap_or_else(|e| { log::info!( @@ -151,7 +174,7 @@ where &P::SourceParachain::PARACHAIN_ID.into(), ); let storage_value: Option = - self.target_client.storage_value(storage_key, Some(at_block.hash())).await?; + self.target_client.storage_value(at_block.hash(), storage_key).await?; let para_info = match storage_value { Some(para_info) => para_info, None => return Ok(None), @@ -172,7 +195,7 @@ where ¶_info.best_head_hash.head_hash, ); let storage_value: Option = - self.target_client.storage_value(storage_key, Some(at_block.hash())).await?; + self.target_client.storage_value(at_block.hash(), storage_key).await?; let para_head_number = match storage_value { Some(para_head_data) => para_head_data.decode_parachain_head_data::()?.number, diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs index 9285a1e7ad45..ee3fc1ed2c41 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs @@ -39,8 +39,8 @@ pub fn prepare_inbound_xcm( xcm_message: Xcm, destination: InteriorLocation, ) -> Vec { - let location = xcm::VersionedInteriorLocation::V4(destination); - let xcm = xcm::VersionedXcm::::V4(xcm_message); + let location = xcm::VersionedInteriorLocation::from(destination); + let xcm = xcm::VersionedXcm::::from(xcm_message); // this is the `BridgeMessage` from polkadot xcm builder, but it has no constructor // or public fields, so just tuple // (double encoding, because `.encode()` is called on original Xcm BLOB when it is pushed From ae0b3bf6733e7b9e18badb16128a6b25bef1923b Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Fri, 14 Jun 2024 15:42:46 +0300 Subject: [PATCH 30/52] CheckWeight: account for extrinsic len as proof size (#4765) Fix https://github.com/paritytech/polkadot-sdk/issues/4743 which allows us to remove the defensive limit on pov size in Cumulus after relay chain gets upgraded with these changes. Also add unit test to ensure `CheckWeight` - `StorageWeightReclaim` integration works. TODO: - [x] PRDoc - [x] Add a len to all the other tests in storage weight reclaim and call `CheckWeight::pre_dispatch` --------- Signed-off-by: Andrei Sandu --- .../storage-weight-reclaim/src/lib.rs | 115 ++++++++- prdoc/pr_4765.prdoc | 18 ++ substrate/frame/executive/src/tests.rs | 7 +- .../system/src/extensions/check_weight.rs | 240 ++++++------------ 4 files changed, 205 insertions(+), 175 deletions(-) create mode 100644 prdoc/pr_4765.prdoc diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs index c09c12d7a0ab..35fa334f51c6 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/lib.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs @@ -201,7 +201,7 @@ mod tests { use super::*; use frame_support::{ assert_ok, - dispatch::DispatchClass, + dispatch::{DispatchClass, PerDispatchClass}, weights::{Weight, WeightMeter}, }; use frame_system::{BlockWeight, CheckWeight}; @@ -215,7 +215,7 @@ mod tests { pages: 0u64, }); const ALICE: AccountId32 = AccountId32::new([1u8; 32]); - const LEN: usize = 0; + const LEN: usize = 150; pub fn new_test_ext() -> sp_io::TestExternalities { let ext: sp_io::TestExternalities = cumulus_test_runtime::RuntimeGenesisConfig::default() @@ -256,6 +256,10 @@ mod tests { }); } + fn get_storage_weight() -> PerDispatchClass { + BlockWeight::::get() + } + #[test] fn basic_refund() { // The real cost will be 100 bytes of storage size @@ -268,6 +272,9 @@ mod tests { let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; let post_info = PostDispatchInfo::default(); + // Should add 500 + 150 (len) to weight. + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); @@ -283,7 +290,7 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 600); + assert_eq!(get_storage_weight().total().proof_size(), 1250); }) } @@ -299,6 +306,9 @@ mod tests { let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; let post_info = PostDispatchInfo::default(); + // Adds 500 + 150 (len) weight + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); @@ -313,7 +323,7 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 1000); + assert_eq!(get_storage_weight().total().proof_size(), 1650); }) } @@ -327,6 +337,9 @@ mod tests { let info = DispatchInfo { weight: Weight::from_parts(0, 100), ..Default::default() }; let post_info = PostDispatchInfo::default(); + // Weight added should be 100 + 150 (len) + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); @@ -342,7 +355,10 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 1100); + assert_eq!( + get_storage_weight().total().proof_size(), + 1100 + LEN as u64 + info.weight.proof_size() + ); }) } @@ -354,6 +370,8 @@ mod tests { let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; let post_info = PostDispatchInfo::default(); + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); @@ -368,7 +386,8 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 0); + // Proof size should be exactly equal to extrinsic length + assert_eq!(get_storage_weight().total().proof_size(), LEN as u64); }); } @@ -382,12 +401,17 @@ mod tests { let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; let post_info = PostDispatchInfo::default(); + // Adds 500 + 150 (len) weight, total weight is 1950 + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(300)); + // Refund 500 unspent weight according to `post_info`, total weight is now 1650 assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + // Recorded proof size is negative -200, total weight is now 1450 assert_ok!(StorageWeightReclaim::::post_dispatch( Some(pre), &info, @@ -396,7 +420,7 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 800); + assert_eq!(get_storage_weight().total().proof_size(), 1450); }); } @@ -416,6 +440,9 @@ mod tests { pays_fee: Default::default(), }; + // Should add 300 + 150 (len) of weight + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); @@ -432,7 +459,8 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 900); + // Reclaimed 100 + assert_eq!(get_storage_weight().total().proof_size(), 1350); }) } @@ -451,6 +479,9 @@ mod tests { pays_fee: Default::default(), }; + // Adds 50 + 150 (len) weight, total weight 1200 + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); @@ -458,7 +489,56 @@ mod tests { // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. + + // Refunds unspent 25 weight according to `post_info`, 1175 assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + // Adds 200 - 25 (unspent) == 175 weight, total weight 1350 + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(get_storage_weight().total().proof_size(), 1350); + }) + } + + #[test] + fn test_nothing_relcaimed() { + let mut test_ext = setup_test_externalities(&[100, 200]); + + test_ext.execute_with(|| { + set_current_storage_weight(0); + // Benchmarked storage weight: 100 + let info = DispatchInfo { weight: Weight::from_parts(100, 100), ..Default::default() }; + + // Actual proof size is 100 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 100)), + pays_fee: Default::default(), + }; + + // Adds benchmarked weight 100 + 150 (len), total weight is now 250 + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + + // Weight should go up by 150 len + 100 proof size weight, total weight 250 + assert_eq!(get_storage_weight().total().proof_size(), 250); + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + // Should return `setup_test_externalities` proof recorder value: 100. + assert_eq!(pre, Some(100)); + + // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + // Nothing to refund, unspent is 0, total weight 250 + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, LEN, &Ok(()))); + // `setup_test_externalities` proof recorder value: 200, so this means the extrinsic + // actually used 100 proof size. + // Nothing to refund or add, weight matches proof recorder assert_ok!(StorageWeightReclaim::::post_dispatch( Some(pre), &info, @@ -467,7 +547,9 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 1150); + // Check block len weight was not reclaimed: + // 100 weight + 150 extrinsic len == 250 proof size + assert_eq!(get_storage_weight().total().proof_size(), 250); }) } @@ -487,11 +569,15 @@ mod tests { pays_fee: Default::default(), }; + // Adds 300 + 150 (len) weight, total weight 1450 + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(100)); + // This refunds 100 - 50(unspent), total weight is now 1400 assert_ok!(StorageWeightReclaim::::post_dispatch( Some(pre), &info, @@ -504,7 +590,8 @@ mod tests { // we always need to call `post_dispatch` to verify that they interoperate correctly. assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_eq!(BlockWeight::::get().total().proof_size(), 900); + // Above call refunds 50 (unspent), total weight is 1350 now + assert_eq!(get_storage_weight().total().proof_size(), 1350); }) } @@ -523,11 +610,15 @@ mod tests { pays_fee: Default::default(), }; + // Adds 50 + 150 (len) weight, total weight is 1200 + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(100)); + // Adds additional 150 weight recorded assert_ok!(StorageWeightReclaim::::post_dispatch( Some(pre), &info, @@ -540,7 +631,7 @@ mod tests { // we always need to call `post_dispatch` to verify that they interoperate correctly. assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_eq!(BlockWeight::::get().total().proof_size(), 1150); + assert_eq!(get_storage_weight().total().proof_size(), 1350); }) } @@ -644,7 +735,7 @@ mod tests { // We reclaimed 3 bytes of storage size! assert_eq!(reclaimed, Some(Weight::from_parts(0, 3))); - assert_eq!(BlockWeight::::get().total().proof_size(), 10); + assert_eq!(get_storage_weight().total().proof_size(), 10); assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(10, 8)); } } diff --git a/prdoc/pr_4765.prdoc b/prdoc/pr_4765.prdoc new file mode 100644 index 000000000000..f64b2fdc51ab --- /dev/null +++ b/prdoc/pr_4765.prdoc @@ -0,0 +1,18 @@ +title: CheckWeight - account for extrinsic len as proof size + +doc: + - audience: Runtime Dev + description: | + This changes how CheckWeight extension works. It will now account for the extrinsic length + as proof size. When `on_idle` is called, the remaining weight parameter reflects this. + +crates: + - name: frame-system + bump: patch + - name: frame-executive + bump: none + - name: cumulus-primitives-storage-weight-reclaim + bump: none + + + diff --git a/substrate/frame/executive/src/tests.rs b/substrate/frame/executive/src/tests.rs index 71cb54d1fab4..69a970a89d93 100644 --- a/substrate/frame/executive/src/tests.rs +++ b/substrate/frame/executive/src/tests.rs @@ -649,8 +649,8 @@ fn block_weight_limit_enforced() { assert!(res.is_ok()); assert_eq!( >::block_weight().total(), - //--------------------- on_initialize + block_execution + extrinsic_base weight - Weight::from_parts((encoded_len + 5) * (nonce + 1), 0) + base_block_weight, + //--------------------- on_initialize + block_execution + extrinsic_base weight + extrinsic len + Weight::from_parts((encoded_len + 5) * (nonce + 1), (nonce + 1)* encoded_len) + base_block_weight, ); assert_eq!( >::extrinsic_index(), @@ -698,9 +698,10 @@ fn block_weight_and_size_is_stored_per_tx() { ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; + // Check we account for all extrinsic weight and their len. assert_eq!( >::block_weight().total(), - base_block_weight + 3u64 * extrinsic_weight, + base_block_weight + 3u64 * extrinsic_weight + 3u64 * Weight::from_parts(0, len as u64), ); assert_eq!(>::all_extrinsics_len(), 3 * len); diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs index 5d6c68989ed5..d4705f200efd 100644 --- a/substrate/frame/system/src/extensions/check_weight.rs +++ b/substrate/frame/system/src/extensions/check_weight.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{limits::BlockWeights, Config, DispatchClass, Pallet, LOG_TARGET}; +use crate::{limits::BlockWeights, Config, Pallet, LOG_TARGET}; use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, PostDispatchInfo}, @@ -106,8 +106,7 @@ where let all_weight = Pallet::::block_weight(); let maximum_weight = T::BlockWeights::get(); let next_weight = - calculate_consumed_weight::(&maximum_weight, all_weight, info)?; - check_combined_proof_size::(info, &maximum_weight, next_len, &next_weight)?; + calculate_consumed_weight::(&maximum_weight, all_weight, info, len)?; Self::check_extrinsic_weight(info)?; crate::AllExtrinsicsLen::::put(next_len); @@ -130,36 +129,6 @@ where } } -/// Check that the combined extrinsic length and proof size together do not exceed the PoV limit. -pub fn check_combined_proof_size( - info: &DispatchInfoOf, - maximum_weight: &BlockWeights, - next_len: u32, - next_weight: &crate::ConsumedWeight, -) -> Result<(), TransactionValidityError> -where - Call: Dispatchable, -{ - // This extra check ensures that the extrinsic length does not push the - // PoV over the limit. - let total_pov_size = next_weight.total().proof_size().saturating_add(next_len as u64); - if total_pov_size > maximum_weight.max_block.proof_size() { - log::debug!( - target: LOG_TARGET, - "Extrinsic exceeds total pov size. Still including if mandatory. size: {}kb, limit: {}kb, is_mandatory: {}", - total_pov_size as f64/1024.0, - maximum_weight.max_block.proof_size() as f64/1024.0, - info.class == DispatchClass::Mandatory - ); - return match info.class { - // Allow mandatory extrinsics - DispatchClass::Mandatory => Ok(()), - _ => Err(InvalidTransaction::ExhaustsResources.into()), - }; - } - Ok(()) -} - /// Checks if the current extrinsic can fit into the block with respect to block weight limits. /// /// Upon successes, it returns the new block weight as a `Result`. @@ -167,12 +136,16 @@ pub fn calculate_consumed_weight( maximum_weight: &BlockWeights, mut all_weight: crate::ConsumedWeight, info: &DispatchInfoOf, + len: usize, ) -> Result where Call: Dispatchable, { - let extrinsic_weight = - info.weight.saturating_add(maximum_weight.get(info.class).base_extrinsic); + // Also Consider extrinsic length as proof weight. + let extrinsic_weight = info + .weight + .saturating_add(maximum_weight.get(info.class).base_extrinsic) + .saturating_add(Weight::from_parts(0, len as u64)); let limit_per_class = maximum_weight.get(info.class); // add the weight. If class is unlimited, use saturating add instead of checked one. @@ -772,168 +745,115 @@ mod tests { &maximum_weight, all_weight.clone(), &mandatory1, + 0 )); assert_err!( calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, all_weight, &mandatory2, + 0 ), InvalidTransaction::ExhaustsResources ); } #[test] - fn maximum_proof_size_includes_length() { + fn proof_size_includes_length() { let maximum_weight = BlockWeights::builder() .base_block(Weight::zero()) .for_class(DispatchClass::non_mandatory(), |w| { w.base_extrinsic = Weight::zero(); - w.max_total = Some(Weight::from_parts(20, 10)); + w.max_total = Some(Weight::from_parts(20, 1000)); }) .for_class(DispatchClass::Mandatory, |w| { w.base_extrinsic = Weight::zero(); - w.reserved = Some(Weight::from_parts(5, 10)); - w.max_total = None; + w.max_total = Some(Weight::from_parts(20, 1000)); }) .build_or_panic(); + let all_weight = crate::ConsumedWeight::new(|class| match class { + DispatchClass::Normal => Weight::from_parts(5, 0), + DispatchClass::Operational => Weight::from_parts(5, 0), + DispatchClass::Mandatory => Weight::from_parts(0, 0), + }); - assert_eq!(maximum_weight.max_block, Weight::from_parts(20, 10)); + let normal = DispatchInfo { + weight: Weight::from_parts(5, 0), + class: DispatchClass::Normal, + ..Default::default() + }; - let info = DispatchInfo { class: DispatchClass::Normal, ..Default::default() }; - let mandatory = DispatchInfo { class: DispatchClass::Mandatory, ..Default::default() }; - // We have 10 reftime and 5 proof size left over. - let next_weight = crate::ConsumedWeight::new(|class| match class { - DispatchClass::Normal => Weight::from_parts(10, 5), - DispatchClass::Operational => Weight::from_parts(0, 0), - DispatchClass::Mandatory => Weight::zero(), - }); + let mandatory = DispatchInfo { + weight: Weight::from_parts(5, 0), + class: DispatchClass::Mandatory, + ..Default::default() + }; - // Simple checks for the length - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, + // Using 0 length extrinsics. + let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, + all_weight.clone(), + &normal, 0, - &next_weight - )); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, + ) + .unwrap(); + + assert_eq!(consumed.total().saturating_sub(all_weight.total()), normal.weight); + + let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, - 5, - &next_weight - )); - assert_err!( - check_combined_proof_size::<::RuntimeCall>( - &info, - &maximum_weight, - 6, - &next_weight - ), - InvalidTransaction::ExhaustsResources - ); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( + all_weight.clone(), &mandatory, - &maximum_weight, - 6, - &next_weight - )); + 0, + ) + .unwrap(); + assert_eq!(consumed.total().saturating_sub(all_weight.total()), mandatory.weight); - // We have 10 reftime and 0 proof size left over. - let next_weight = crate::ConsumedWeight::new(|class| match class { - DispatchClass::Normal => Weight::from_parts(10, 10), - DispatchClass::Operational => Weight::from_parts(0, 0), - DispatchClass::Mandatory => Weight::zero(), - }); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, + // Using non zero length extrinsics. + let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, - 0, - &next_weight - )); - assert_err!( - check_combined_proof_size::<::RuntimeCall>( - &info, - &maximum_weight, - 1, - &next_weight - ), - InvalidTransaction::ExhaustsResources + all_weight.clone(), + &normal, + 100, + ) + .unwrap(); + // Must account for the len in the proof size + assert_eq!( + consumed.total().saturating_sub(all_weight.total()), + normal.weight.add_proof_size(100) ); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &mandatory, - &maximum_weight, - 1, - &next_weight - )); - // We have 10 reftime and 2 proof size left over. - // Used weight is spread across dispatch classes this time. - let next_weight = crate::ConsumedWeight::new(|class| match class { - DispatchClass::Normal => Weight::from_parts(10, 5), - DispatchClass::Operational => Weight::from_parts(0, 3), - DispatchClass::Mandatory => Weight::zero(), - }); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, + let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, - 0, - &next_weight - )); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, - &maximum_weight, - 2, - &next_weight - )); - assert_err!( - check_combined_proof_size::<::RuntimeCall>( - &info, - &maximum_weight, - 3, - &next_weight - ), - InvalidTransaction::ExhaustsResources - ); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( + all_weight.clone(), &mandatory, - &maximum_weight, - 3, - &next_weight - )); + 100, + ) + .unwrap(); + // Must account for the len in the proof size + assert_eq!( + consumed.total().saturating_sub(all_weight.total()), + mandatory.weight.add_proof_size(100) + ); - // Ref time is over the limit. Should not happen, but we should make sure that it is - // ignored. - let next_weight = crate::ConsumedWeight::new(|class| match class { - DispatchClass::Normal => Weight::from_parts(30, 5), - DispatchClass::Operational => Weight::from_parts(0, 0), - DispatchClass::Mandatory => Weight::zero(), - }); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, + // Using oversized zero length extrinsics. + let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, - 0, - &next_weight - )); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, - &maximum_weight, - 5, - &next_weight - )); - assert_err!( - check_combined_proof_size::<::RuntimeCall>( - &info, - &maximum_weight, - 6, - &next_weight - ), - InvalidTransaction::ExhaustsResources + all_weight.clone(), + &normal, + 2000, ); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &mandatory, + // errors out + assert_eq!(consumed, Err(InvalidTransaction::ExhaustsResources.into())); + + // Using oversized zero length extrinsics. + let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, - 6, - &next_weight - )); + all_weight.clone(), + &mandatory, + 2000, + ); + // errors out + assert_eq!(consumed, Err(InvalidTransaction::ExhaustsResources.into())); } } From 2f643816d79a76155aec790a35b9b72a5d8bb726 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Mon, 17 Jun 2024 11:31:15 +0800 Subject: [PATCH 31/52] add ref doc for logging practices in FRAME (#4768) --- Cargo.lock | 3 + docs/sdk/Cargo.toml | 2 + docs/sdk/src/reference_docs/frame_logging.rs | 116 ++++++++++++++++++ docs/sdk/src/reference_docs/mod.rs | 3 + docs/sdk/src/reference_docs/umbrella_crate.rs | 5 +- substrate/primitives/api/Cargo.toml | 1 + substrate/primitives/api/src/lib.rs | 1 + 7 files changed, 129 insertions(+), 2 deletions(-) create mode 100644 docs/sdk/src/reference_docs/frame_logging.rs diff --git a/Cargo.lock b/Cargo.lock index 71b98d2cd5c4..4f4e0a988cec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14381,6 +14381,8 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-runtime-interface 24.0.0", + "sp-std 14.0.0", + "sp-tracing 16.0.0", "sp-version", "staging-chain-spec-builder", "staging-node-cli", @@ -19530,6 +19532,7 @@ dependencies = [ name = "sp-api" version = "26.0.0" dependencies = [ + "docify", "hash-db", "log", "parity-scale-codec", diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 10c091211671..ee603f8c4946 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -89,6 +89,8 @@ pallet-babe = { path = "../../substrate/frame/babe" } # Primitives sp-io = { path = "../../substrate/primitives/io" } +sp-std = { path = "../../substrate/primitives/std" } +sp-tracing = { path = "../../substrate/primitives/tracing" } sp-runtime-interface = { path = "../../substrate/primitives/runtime-interface" } sp-api = { path = "../../substrate/primitives/api" } sp-core = { path = "../../substrate/primitives/core" } diff --git a/docs/sdk/src/reference_docs/frame_logging.rs b/docs/sdk/src/reference_docs/frame_logging.rs new file mode 100644 index 000000000000..301fa7ef83f8 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_logging.rs @@ -0,0 +1,116 @@ +//! # FRAME Logging +//! +//! This reference docs briefly explores how to do logging and printing runtimes, mainly +//! FRAME-based. +//! +//! ## Using `println!` +//! +//! To recap, as with standard Rust, you can use `println!` _in your tests_, but it will only print +//! out if executed with `--nocapture`, or if the test panics. +//! +//! ``` +//! fn it_print() { +//! println!("Hello, world!"); +//! } +//! ``` +//! +//! within the pallet, if you want to use the standard `println!`, it needs to be wrapped in +//! [`sp_std::if_std`]. Of course, this means that this print code is only available to you in the +//! `std` compiler flag, and never present in a wasm build. +//! +//! ``` +//! // somewhere in your pallet. This is not a real pallet code. +//! mod pallet { +//! struct Pallet; +//! impl Pallet { +//! fn print() { +//! sp_std::if_std! { +//! println!("Hello, world!"); +//! } +//! } +//! } +//! } +//! ``` +//! +//! ## Using `log` +//! +//! First, ensure you are familiar with the `log` crate. In short, each log statement has: +//! +//! 1. `log-level`, signifying how important it is +//! 2. `log-target`, signifying to which component it belongs. +//! +//! Add log statements to your pallet as such: +//! +//! You can add the log crate to the `Cargo.toml` of the pallet. +//! +//! ```text +//! #[dependencies] +//! log = { version = "x.y.z", default-features = false } +//! +//! #[features] +//! std = [ +//! // snip -- other pallets +//! "log/std" +//! ] +//! ``` +//! +//! More conveniently, the `frame` umbrella crate re-exports the log crate as [`frame::log`]. +//! +//! Then, the pallet can use this crate to emit log statements. In this statement, we use the info +//! level, and the target is `pallet-example`. +//! +//! ``` +//! mod pallet { +//! struct Pallet; +//! +//! impl Pallet { +//! fn logs() { +//! frame::log::info!(target: "pallet-example", "Hello, world!"); +//! } +//! } +//! } +//! ``` +//! +//! This will in itself just emit the log messages, **but unless if captured by a logger, they will +//! not go anywhere**. [`sp_api`] provides a handy function to enable the runtime logging: +//! +//! ``` +//! // in your test +//! fn it_also_prints() { +//! sp_api::init_runtime_logger(); +//! // call into your pallet, and now it will print `log` statements. +//! } +//! ``` +//! +//! Alternatively, you can use [`sp_tracing::try_init_simple`]. +//! +//! `info`, `error` and `warn` logs are printed by default, but if you want lower level logs to also +//! be printed, you must to add the following compiler flag: +//! +//! ```text +//! RUST_LOG=pallet-example=trace cargo test +//! ``` +//! +//! ## Enabling Logs in Production +//! +//! All logs from the runtime are emitted by default, but there is a feature flag in [`sp_api`], +//! called `disable-logging`, that can be used to disable all logs in the runtime. This is useful +//! for production chains to reduce the size and overhead of the wasm runtime. +#![doc = docify::embed!("../../substrate/primitives/api/src/lib.rs", init_runtime_logger)] +//! +//! Similar to the above, the proper `RUST_LOG` must also be passed to your compiler flag when +//! compiling the runtime. +//! +//! ## Log Target Prefixing +//! +//! Many [`crate::polkadot_sdk::frame_runtime`] pallets emit logs with log target `runtime::`, for example `runtime::system`. This then allows one to run a node with a wasm blob +//! compiled with `LOG_TARGET=runtime=debug`, which enables the log target of all pallets who's log +//! target starts with `runtime`. +//! +//! ## Low Level Primitives +//! +//! Under the hood, logging is another instance of host functions under the hood (as defined in +//! [`crate::reference_docs::wasm_meta_protocol`]). The runtime uses a set of host functions under +//! [`sp_io::logging`] and [`sp_io::misc`] to emit all logs and prints. You typically do not need to +//! use these APIs directly. diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index 51150a558375..688339b7e380 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -93,6 +93,9 @@ pub mod frame_offchain_workers; /// together. pub mod frame_pallet_coupling; +/// Learn about how to do logging in FRAME-based runtimes. +pub mod frame_logging; + /// Learn about the Polkadot Umbrella crate that re-exports all other crates. pub mod umbrella_crate; diff --git a/docs/sdk/src/reference_docs/umbrella_crate.rs b/docs/sdk/src/reference_docs/umbrella_crate.rs index 9751b0ad5ad6..0b3445cfc4bc 100644 --- a/docs/sdk/src/reference_docs/umbrella_crate.rs +++ b/docs/sdk/src/reference_docs/umbrella_crate.rs @@ -28,8 +28,9 @@ //! `node` feature. For docs.rs the manifest contains specific configuration to make it show up //! all re-exports. //! -//! There is a specific `zepter` check in place to ensure that the features of the umbrella are -//! correctly configured. This check is run in CI and locally when running `zepter`. +//! There is a specific [`zepter`](https://github.com/ggwpez/zepter) check in place to ensure that +//! the features of the umbrella are correctly configured. This check is run in CI and locally when +//! running `zepter`. //! //! ## Generation //! diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index f48480f398d0..b334880785f2 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -33,6 +33,7 @@ scale-info = { version = "2.11.1", default-features = false, features = [ ] } sp-metadata-ir = { path = "../metadata-ir", default-features = false, optional = true } log = { workspace = true } +docify = { version = "0.2.1" } [dev-dependencies] sp-test-primitives = { path = "../test-primitives" } diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index 20f989c4882e..cd8da8ba2374 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -532,6 +532,7 @@ pub trait ConstructRuntimeApi> { fn construct_runtime_api(call: &C) -> ApiRef; } +#[docify::export] /// Init the [`RuntimeLogger`](sp_runtime::runtime_logger::RuntimeLogger). pub fn init_runtime_logger() { #[cfg(not(feature = "disable-logging"))] From 796890979e5d7d16a522c304376d78eec120f3cb Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 17 Jun 2024 13:46:04 +0200 Subject: [PATCH 32/52] CI: Add stable clobber (#4780) Clobbers the `stable` branch with the `audited` tag as described in the [RELEASE.md](https://github.com/paritytech/polkadot-sdk/blob/master/docs/RELEASE.md#clobbering). Example of the `staging_stable` branch now after force-pushing the `staging_audited` tag for a few times. The `staging_` prefix has now been removed and should be ready for normal use. The only trigger is currently manual, but can easily be set to three months. ![Screenshot 2024-06-13 at 00 47 43](https://github.com/paritytech/polkadot-sdk/assets/10380170/97e070ad-ce2d-4504-83a0-ad6717b6e73e) --------- Signed-off-by: Oliver Tale-Yazdi --- .github/workflows/release-clobber-stable.yml | 70 ++++++++++++++++++++ .github/workflows/release-srtool.yml | 2 - 2 files changed, 70 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/release-clobber-stable.yml diff --git a/.github/workflows/release-clobber-stable.yml b/.github/workflows/release-clobber-stable.yml new file mode 100644 index 000000000000..643c14daa15b --- /dev/null +++ b/.github/workflows/release-clobber-stable.yml @@ -0,0 +1,70 @@ +name: Clobber Stable + +# This action implements the +# [Clobbering](https://github.com/paritytech/polkadot-sdk/blob/master/docs/RELEASE.md#clobbering) +# process from the release process. It pushes a new commit to the `stable` branch with all the +# current content of the `audited` tag. It does not use a merge commit, but rather 'clobbers' the +# branch with a single commit that contains all the changes. It has a naming scheme of `Clobber with +# audited ($COMMIT)`. +# Currently, the script is only triggered manually, but can be easily changed to a schedule. + +on: + workflow_dispatch: + +permissions: + contents: write + +jobs: + clobber-stable: + runs-on: ubuntu-latest + timeout-minutes: 5 + env: + STABLE: stable + UNSTABLE: master + AUDITED: audited + steps: + - name: Checkout + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Prechecks + run: | + # Properly fetch + git fetch --prune --unshallow origin tag $AUDITED + git fetch origin $STABLE + + # Sanity checks + git checkout -q tags/$AUDITED || (echo "Could not find the '$AUDITED' tag." && exit 1) + COMMIT=$(git rev-parse tags/$AUDITED) + #$(git branch --contains $COMMIT | grep -q $UNSTABLE) || (echo "The '$AUDITED' tag is not on the '$UNSTABLE' branch." && exit 1) + + git config --global user.email "admin@parity.io" + git config --global user.name "Parity Release Team" + + - name: Prepare commit + run: | + git checkout --quiet origin/$STABLE + + # Delete all tracked files in the working directory + git ls-files -z | xargs -0 rm -f + + # Find and delete any empty directories + find . -type d -empty -delete + + git add . 1>/dev/null 2>/dev/null + git commit -qm "Delete all files" + + # Grab the files from the commit + git checkout --quiet tags/$AUDITED -- . + + # Stage, commit, and push the working directory which now matches 'audited' 1:1 + git status + COMMIT=$(git rev-parse --short=10 tags/$AUDITED) + git add . 1>/dev/null 2>/dev/null + git commit --allow-empty --amend -qm "Clobber with $AUDITED ($COMMIT)" + + - name: Push stable branch + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + git log -3 + git push --verbose origin HEAD:$STABLE diff --git a/.github/workflows/release-srtool.yml b/.github/workflows/release-srtool.yml index 95b1846b98e0..69a4bdbdda9a 100644 --- a/.github/workflows/release-srtool.yml +++ b/.github/workflows/release-srtool.yml @@ -6,8 +6,6 @@ env: on: push: - tags: - - "*" branches: - release-v[0-9]+.[0-9]+.[0-9]+* - release-cumulus-v[0-9]+* From d91cbbd453c1d4553d7e3dc8753a2007fc4c5a67 Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Mon, 17 Jun 2024 14:35:15 +0200 Subject: [PATCH 33/52] Impl and use default config for pallet-staking in tests (#4797) --- substrate/frame/babe/src/mock.rs | 19 +----- substrate/frame/beefy/src/mock.rs | 18 +---- substrate/frame/delegated-staking/src/mock.rs | 21 +----- .../frame/delegated-staking/src/tests.rs | 26 +++---- .../test-staking-e2e/src/mock.rs | 11 +-- substrate/frame/fast-unstake/src/mock.rs | 20 +----- substrate/frame/grandpa/src/mock.rs | 15 +---- .../nomination-pools/benchmarking/src/mock.rs | 19 +----- .../test-delegate-stake/src/mock.rs | 18 +---- .../test-transfer-stake/src/mock.rs | 18 +---- .../frame/offences/benchmarking/src/mock.rs | 18 +---- substrate/frame/root-offences/src/mock.rs | 15 +---- .../frame/session/benchmarking/src/mock.rs | 18 +---- substrate/frame/staking/src/mock.rs | 8 +-- substrate/frame/staking/src/pallet/mod.rs | 67 ++++++++++++++++++- 15 files changed, 92 insertions(+), 219 deletions(-) diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index 16db40e3cb35..be38e3e7e5db 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -28,7 +28,6 @@ use frame_support::{ traits::{ConstU128, ConstU32, ConstU64, KeyOwnerProofSystem, OnInitialize}, }; use pallet_session::historical as pallet_session_historical; -use pallet_staking::FixedNominationsQuota; use sp_consensus_babe::{AuthorityId, AuthorityPair, Randomness, Slot, VrfSignature}; use sp_core::{ crypto::{KeyTypeId, Pair, VrfSecret}, @@ -133,7 +132,6 @@ pallet_staking_reward_curve::build! { parameter_types! { pub const SessionsPerEra: SessionIndex = 3; pub const BondingDuration: EraIndex = 3; - pub const SlashDeferDuration: EraIndex = 0; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); } @@ -148,35 +146,20 @@ impl onchain::Config for OnChainSeqPhragmen { type Bounds = ElectionsBounds; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Test { - type RewardRemainder = (); - type CurrencyToVote = (); - type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type CurrencyBalance = ::Balance; - type Slash = (); - type Reward = (); type SessionsPerEra = SessionsPerEra; type BondingDuration = BondingDuration; - type SlashDeferDuration = SlashDeferDuration; type AdminOrigin = frame_system::EnsureRoot; type SessionInterface = Self; type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; - type MaxExposurePageSize = ConstU32<64>; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_offences::Config for Test { diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index ceca0fd07b73..35bf172d6063 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -162,35 +162,19 @@ impl onchain::Config for OnChainSeqPhragmen { type Bounds = ElectionsBoundsOnChain; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Test { - type RewardRemainder = (); - type CurrencyToVote = (); type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type CurrencyBalance = ::Balance; - type Slash = (); - type Reward = (); - type SessionsPerEra = SessionsPerEra; - type BondingDuration = BondingDuration; - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; type SessionInterface = Self; type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; - type MaxExposurePageSize = ConstU32<64>; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_offences::Config for Test { diff --git a/substrate/frame/delegated-staking/src/mock.rs b/substrate/frame/delegated-staking/src/mock.rs index 0991833f8650..811d5739f4e9 100644 --- a/substrate/frame/delegated-staking/src/mock.rs +++ b/substrate/frame/delegated-staking/src/mock.rs @@ -88,7 +88,6 @@ pallet_staking_reward_curve::build! { parameter_types! { pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; - pub static BondingDuration: u32 = 3; pub static ElectionsBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default().build(); } pub struct OnChainSeqPhragmen; @@ -101,35 +100,17 @@ impl onchain::Config for OnChainSeqPhragmen { type Bounds = ElectionsBoundsOnChain; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Runtime { type Currency = Balances; - type CurrencyBalance = Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = ConstU32<1>; - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; - type BondingDuration = BondingDuration; - type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; - type NextNewSession = (); - type HistoryDepth = ConstU32<84>; - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<10>; - type MaxControllersInDeprecationBatch = ConstU32<100>; type EventListeners = (Pools, DelegatedStaking); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/delegated-staking/src/tests.rs b/substrate/frame/delegated-staking/src/tests.rs index d40539d40ddd..2295f7d0c871 100644 --- a/substrate/frame/delegated-staking/src/tests.rs +++ b/substrate/frame/delegated-staking/src/tests.rs @@ -501,17 +501,17 @@ mod staking_integration { ExtBuilder::default().build_and_execute(|| { start_era(1); let agent = 200; - setup_delegation_stake(agent, 201, (300..350).collect(), 100, 0); + setup_delegation_stake(agent, 201, (300..350).collect(), 320, 0); // verify withdraw not possible yet assert_noop!( - DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 300, 100, 0), + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 300, 320, 0), Error::::NotEnoughFunds ); // fill up unlocking chunks in core staking. - // 10 is the max chunks - for i in 2..=11 { + // 32 is the max chunks + for i in 2..=33 { start_era(i); assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), 10)); // no withdrawals from core staking yet. @@ -519,35 +519,35 @@ mod staking_integration { } // another unbond would trigger withdrawal - start_era(12); + start_era(34); assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), 10)); - // 8 previous unbonds would be withdrawn as they were already unlocked. Unlocking period - // is 3 eras. - assert_eq!(get_agent_ledger(&agent).ledger.unclaimed_withdrawals, 8 * 10); + // 30 previous unbonds would be withdrawn as they were already unlocked. Unlocking + // period is 3 eras. + assert_eq!(get_agent_ledger(&agent).ledger.unclaimed_withdrawals, 30 * 10); // release some delegation now. assert_ok!(DelegatedStaking::release_delegation( RawOrigin::Signed(agent).into(), 300, - 40, + 160, 0 )); - assert_eq!(get_agent_ledger(&agent).ledger.unclaimed_withdrawals, 80 - 40); + assert_eq!(get_agent_ledger(&agent).ledger.unclaimed_withdrawals, 300 - 160); // cannot release more than available assert_noop!( - DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 300, 50, 0), + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 300, 141, 0), Error::::NotEnoughFunds ); assert_ok!(DelegatedStaking::release_delegation( RawOrigin::Signed(agent).into(), 300, - 40, + 140, 0 )); - assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(300)), 100 - 80); + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(300)), 320 - 300); }); } diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index 9c4991513633..bb1bdb314205 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -229,7 +229,6 @@ parameter_types! { pub const SessionsPerEra: sp_staking::SessionIndex = 2; pub static BondingDuration: sp_staking::EraIndex = 28; pub const SlashDeferDuration: sp_staking::EraIndex = 7; // 1/4 the bonding duration. - pub HistoryDepth: u32 = 84; } impl pallet_bags_list::Config for Runtime { @@ -285,15 +284,11 @@ const MAX_QUOTA_NOMINATIONS: u32 = 16; /// Disabling factor set explicitly to byzantine threshold pub(crate) const SLASHING_DISABLING_FACTOR: usize = 3; +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Runtime { type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = Timestamp; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); // burn slashes - type Reward = (); // rewards are minted from the void type SessionsPerEra = SessionsPerEra; type BondingDuration = BondingDuration; type SlashDeferDuration = SlashDeferDuration; @@ -308,12 +303,10 @@ impl pallet_staking::Config for Runtime { type NominationsQuota = pallet_staking::FixedNominationsQuota; type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = MaxUnlockingChunks; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = HistoryDepth; type EventListeners = Pools; type WeightInfo = pallet_staking::weights::SubstrateWeight; - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; + type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; } impl frame_system::offchain::SendTransactionTypes for Runtime diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs index 63bf533d8ee4..7ce7fee14107 100644 --- a/substrate/frame/fast-unstake/src/mock.rs +++ b/substrate/frame/fast-unstake/src/mock.rs @@ -104,35 +104,17 @@ impl frame_election_provider_support::ElectionProvider for MockElection { } } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Runtime { type Currency = Balances; - type CurrencyBalance = Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; type BondingDuration = BondingDuration; - type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; - type NextNewSession = (); - type HistoryDepth = ConstU32<84>; - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = MockElection; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 5642ffe89980..5ba7da7f9fda 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -150,35 +150,22 @@ impl onchain::Config for OnChainSeqPhragmen { type Bounds = ElectionsBoundsOnChain; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Test { - type RewardRemainder = (); - type CurrencyToVote = (); - type RuntimeEvent = RuntimeEvent; type Currency = Balances; type CurrencyBalance = ::Balance; - type Slash = (); - type Reward = (); type SessionsPerEra = SessionsPerEra; type BondingDuration = BondingDuration; - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; type SessionInterface = Self; type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; - type MaxExposurePageSize = ConstU32<64>; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_offences::Config for Test { diff --git a/substrate/frame/nomination-pools/benchmarking/src/mock.rs b/substrate/frame/nomination-pools/benchmarking/src/mock.rs index b9cff7960716..15d9e2c56031 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/mock.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/mock.rs @@ -76,36 +76,19 @@ pallet_staking_reward_curve::build! { parameter_types! { pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Runtime { type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; - type BondingDuration = ConstU32<3>; - type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; - type NextNewSession = (); - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type MaxUnlockingChunks = ConstU32<32>; - type HistoryDepth = ConstU32<84>; type EventListeners = (Pools, DelegatedStaking); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs index 0a456503ad81..ed47932a323b 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs @@ -90,36 +90,20 @@ parameter_types! { pub static BondingDuration: u32 = 3; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Runtime { type Currency = Balances; - type CurrencyBalance = Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; type BondingDuration = BondingDuration; - type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; - type NextNewSession = (); - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; type EventListeners = (Pools, DelegatedStaking); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs b/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs index 570cdea90460..d913c5fe6948 100644 --- a/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs +++ b/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs @@ -82,36 +82,20 @@ parameter_types! { pub static BondingDuration: u32 = 3; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Runtime { type Currency = Balances; - type CurrencyBalance = Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; type BondingDuration = BondingDuration; - type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; - type NextNewSession = (); - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; type EventListeners = Pools; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index e45d280ba52e..e243ad0e718e 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -124,35 +124,19 @@ impl onchain::Config for OnChainSeqPhragmen { type Bounds = ElectionsBounds; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Test { type Currency = Balances; type CurrencyBalance = ::Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; - type BondingDuration = (); type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_im_online::Config for Test { diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index ea7044fb6a34..3c758b91d52f 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -124,15 +124,11 @@ parameter_types! { pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Test { type Currency = Balances; type CurrencyBalance = ::Balance; type UnixTime = Timestamp; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); type SessionsPerEra = SessionsPerEra; type SlashDeferDuration = SlashDeferDuration; type AdminOrigin = frame_system::EnsureRoot; @@ -140,19 +136,10 @@ impl pallet_staking::Config for Test { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type HistoryDepth = ConstU32<84>; - type MaxControllersInDeprecationBatch = ConstU32<100>; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_session::historical::Config for Test { diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index b79bae73270e..2aec58cceded 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -129,35 +129,19 @@ impl onchain::Config for OnChainSeqPhragmen { type Bounds = ElectionsBounds; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Test { type Currency = Balances; type CurrencyBalance = ::Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; - type BondingDuration = (); type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl crate::Config for Test {} diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index 6d65500ef907..7e6a87955b08 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -261,19 +261,15 @@ impl OnStakingUpdate for EventListenerMock { // Disabling threshold for `UpToLimitDisablingStrategy` pub(crate) const DISABLING_LIMIT_FACTOR: usize = 3; +#[derive_impl(crate::config_preludes::TestDefaultConfig)] impl crate::pallet::pallet::Config for Test { type Currency = Balances; - type CurrencyBalance = ::Balance; type UnixTime = Timestamp; - type CurrencyToVote = (); type RewardRemainder = RewardRemainderMock; - type RuntimeEvent = RuntimeEvent; - type Slash = (); type Reward = MockReward; type SessionsPerEra = SessionsPerEra; type SlashDeferDuration = SlashDeferDuration; type AdminOrigin = EnsureOneOrRoot; - type BondingDuration = BondingDuration; type SessionInterface = Self; type EraPayout = ConvertCurve; type NextNewSession = Session; @@ -288,8 +284,6 @@ impl crate::pallet::pallet::Config for Test { type HistoryDepth = HistoryDepth; type MaxControllersInDeprecationBatch = MaxControllersInDeprecationBatch; type EventListeners = EventListenerMock; - type BenchmarkingConfig = TestBenchmarkingConfig; - type WeightInfo = (); type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 284a801a0f05..a76e47edf380 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -86,9 +86,10 @@ pub mod pallet { Remove, } - #[pallet::config] + #[pallet::config(with_default)] pub trait Config: frame_system::Config { /// The staking balance. + #[pallet::no_default] type Currency: LockableCurrency< Self::AccountId, Moment = BlockNumberFor, @@ -109,6 +110,7 @@ pub mod pallet { /// /// It is guaranteed to start being called from the first `on_finalize`. Thus value at /// genesis is not used. + #[pallet::no_default] type UnixTime: UnixTime; /// Convert a balance into a number used for election calculation. This must fit into a @@ -117,9 +119,11 @@ pub mod pallet { /// in 128. /// Consequently, the backward convert is used convert the u128s from sp-elections back to a /// [`BalanceOf`]. + #[pallet::no_default_bounds] type CurrencyToVote: sp_staking::currency_to_vote::CurrencyToVote>; /// Something that provides the election functionality. + #[pallet::no_default] type ElectionProvider: ElectionProvider< AccountId = Self::AccountId, BlockNumber = BlockNumberFor, @@ -127,6 +131,7 @@ pub mod pallet { DataProvider = Pallet, >; /// Something that provides the election functionality at genesis. + #[pallet::no_default] type GenesisElectionProvider: ElectionProvider< AccountId = Self::AccountId, BlockNumber = BlockNumberFor, @@ -134,6 +139,7 @@ pub mod pallet { >; /// Something that defines the maximum number of nominations per nominator. + #[pallet::no_default_bounds] type NominationsQuota: NominationsQuota>; /// Number of eras to keep in history. @@ -161,17 +167,21 @@ pub mod pallet { /// Tokens have been minted and are unused for validator-reward. /// See [Era payout](./index.html#era-payout). + #[pallet::no_default_bounds] type RewardRemainder: OnUnbalanced>; /// The overarching event type. + #[pallet::no_default_bounds] type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Handler for the unbalanced reduction when slashing a staker. + #[pallet::no_default_bounds] type Slash: OnUnbalanced>; /// Handler for the unbalanced increment when rewarding a staker. /// NOTE: in most cases, the implementation of `OnUnbalanced` should modify the total /// issuance. + #[pallet::no_default_bounds] type Reward: OnUnbalanced>; /// Number of sessions per era. @@ -192,6 +202,7 @@ pub mod pallet { /// The origin which can manage less critical staking parameters that does not require root. /// /// Supported actions: (1) cancel deferred slash, (2) set minimum commission. + #[pallet::no_default] type AdminOrigin: EnsureOrigin; /// Interface for interacting with a session pallet. @@ -199,10 +210,12 @@ pub mod pallet { /// The payout for validators and the system for the current era. /// See [Era payout](./index.html#era-payout). + #[pallet::no_default] type EraPayout: EraPayout>; /// Something that can estimate the next session change, accurately or as a best effort /// guess. + #[pallet::no_default_bounds] type NextNewSession: EstimateNextNewSession>; /// The maximum size of each `T::ExposurePage`. @@ -230,6 +243,7 @@ pub mod pallet { /// staker. In case of `bags-list`, this always means using `rebag` and `putInFrontOf`. /// /// Invariant: what comes out of this list will always be a nominator. + #[pallet::no_default] type VoterList: SortedListProvider; /// WIP: This is a noop as of now, the actual business logic that's described below is going @@ -252,6 +266,7 @@ pub mod pallet { /// validators, they can chill at any point, and their approval stakes will still be /// recorded. This implies that what comes out of iterating this list MIGHT NOT BE AN ACTIVE /// VALIDATOR. + #[pallet::no_default] type TargetList: SortedListProvider>; /// The maximum number of `unlocking` chunks a [`StakingLedger`] can @@ -274,18 +289,66 @@ pub mod pallet { /// receives. /// /// WARNING: this only reports slashing and withdraw events for the time being. + #[pallet::no_default_bounds] type EventListeners: sp_staking::OnStakingUpdate>; - // `DisablingStragegy` controls how validators are disabled + /// `DisablingStragegy` controls how validators are disabled + #[pallet::no_default_bounds] type DisablingStrategy: DisablingStrategy; /// Some parameters of the benchmarking. + #[cfg(feature = "std")] + type BenchmarkingConfig: BenchmarkingConfig; + + #[cfg(not(feature = "std"))] + #[pallet::no_default] type BenchmarkingConfig: BenchmarkingConfig; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } + /// Default implementations of [`DefaultConfig`], which can be used to implement [`Config`]. + pub mod config_preludes { + use super::*; + use frame_support::{derive_impl, parameter_types, traits::ConstU32}; + pub struct TestDefaultConfig; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig, no_aggregated_types)] + impl frame_system::DefaultConfig for TestDefaultConfig {} + + parameter_types! { + pub const SessionsPerEra: SessionIndex = 3; + pub const BondingDuration: EraIndex = 3; + } + + #[frame_support::register_default_impl(TestDefaultConfig)] + impl DefaultConfig for TestDefaultConfig { + #[inject_runtime_type] + type RuntimeEvent = (); + type CurrencyBalance = u128; + type CurrencyToVote = (); + type NominationsQuota = crate::FixedNominationsQuota<16>; + type HistoryDepth = ConstU32<84>; + type RewardRemainder = (); + type Slash = (); + type Reward = (); + type SessionsPerEra = SessionsPerEra; + type BondingDuration = BondingDuration; + type SlashDeferDuration = (); + type SessionInterface = (); + type NextNewSession = (); + type MaxExposurePageSize = ConstU32<64>; + type MaxUnlockingChunks = ConstU32<32>; + type MaxControllersInDeprecationBatch = ConstU32<100>; + type EventListeners = (); + type DisablingStrategy = crate::UpToLimitDisablingStrategy; + #[cfg(feature = "std")] + type BenchmarkingConfig = crate::TestBenchmarkingConfig; + type WeightInfo = (); + } + } + /// The ideal number of active validators. #[pallet::storage] #[pallet::getter(fn validator_count)] From 2e39e052adfe951a3f2b70833111a8026ce3f992 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 17 Jun 2024 15:56:00 +0200 Subject: [PATCH 34/52] Improve pruning CLI documentation (#4810) Closes: https://github.com/paritytech/polkadot-sdk/issues/4801 @andreclaro I hope this makes it more clear from the docs directly. --- .../client/cli/src/params/pruning_params.rs | 41 +++++++++++++------ 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/substrate/client/cli/src/params/pruning_params.rs b/substrate/client/cli/src/params/pruning_params.rs index 25b17b532898..88ae006c638e 100644 --- a/substrate/client/cli/src/params/pruning_params.rs +++ b/substrate/client/cli/src/params/pruning_params.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use crate::error; -use clap::Args; +use clap::{builder::PossibleValue, Args, ValueEnum}; use sc_service::{BlocksPruning, PruningMode}; /// Parameters to define the pruning mode @@ -29,29 +29,24 @@ pub struct PruningParams { /// should be pruned (ie, removed) from the database. /// This setting can only be set on the first creation of the database. Every subsequent run /// will load the pruning mode from the database and will error if the stored mode doesn't - /// match this CLI value. It is fine to drop this CLI flag for subsequent runs. - /// Possible values: - /// - archive: Keep the state of all blocks. - /// - 'archive-canonical' Keep only the state of finalized blocks. - /// - number Keep the state of the last number of finalized blocks. + /// match this CLI value. It is fine to drop this CLI flag for subsequent runs. The only + /// exception is that `` can change between subsequent runs (increasing it will not + /// lead to restoring pruned state). + /// /// [default: 256] - #[arg(alias = "pruning", long, value_name = "PRUNING_MODE")] + #[arg(alias = "pruning", long, value_name = "PRUNING_MODE", value_enum)] pub state_pruning: Option, /// Specify the blocks pruning mode. /// /// This mode specifies when the block's body (including justifications) /// should be pruned (ie, removed) from the database. - /// Possible values: - /// - 'archive' Keep all blocks. - /// - 'archive-canonical' Keep only finalized blocks. - /// - number - /// Keep the last `number` of finalized blocks. #[arg( alias = "keep-blocks", long, value_name = "PRUNING_MODE", - default_value = "archive-canonical" + default_value_t = DatabasePruningMode::ArchiveCanonical, + value_enum )] pub blocks_pruning: DatabasePruningMode, } @@ -83,6 +78,26 @@ pub enum DatabasePruningMode { Custom(u32), } +impl ValueEnum for DatabasePruningMode { + fn value_variants<'a>() -> &'a [Self] { + &[Self::Archive, Self::ArchiveCanonical, Self::Custom(0)] + } + + fn to_possible_value(&self) -> Option { + Some(match self { + Self::Archive => PossibleValue::new("archive").help("Keep the data of all blocks."), + Self::ArchiveCanonical => PossibleValue::new("archive-canonical") + .help("Keep only the data of finalized blocks."), + Self::Custom(_) => PossibleValue::new("") + .help("Keep the data of the last of finalized blocks."), + }) + } + + fn from_str(input: &str, _: bool) -> Result { + ::from_str(input) + } +} + impl std::str::FromStr for DatabasePruningMode { type Err = String; From fed508f962f283600b2cc15335a3659efb0ceae9 Mon Sep 17 00:00:00 2001 From: hattizai Date: Mon, 17 Jun 2024 22:13:48 +0800 Subject: [PATCH 35/52] chore: remove unnecessary words (#4796) remove unnecessary words in comments. --- docs/sdk/src/guides/your_first_pallet/mod.rs | 6 +++--- substrate/primitives/trie/src/lib.rs | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/sdk/src/guides/your_first_pallet/mod.rs b/docs/sdk/src/guides/your_first_pallet/mod.rs index 0a22b13df814..da4624f5ac2b 100644 --- a/docs/sdk/src/guides/your_first_pallet/mod.rs +++ b/docs/sdk/src/guides/your_first_pallet/mod.rs @@ -626,7 +626,7 @@ pub mod pallet { #[test] fn transfer_works() { StateBuilder::default().build_and_execute(|| { - // given the the initial state, when: + // given the initial state, when: assert_ok!(Pallet::::transfer(RuntimeOrigin::signed(ALICE), BOB, 50)); // then: @@ -648,7 +648,7 @@ pub mod pallet { #[test] fn transfer_from_non_existent_fails() { StateBuilder::default().build_and_execute(|| { - // given the the initial state, when: + // given the initial state, when: assert_err!( Pallet::::transfer(RuntimeOrigin::signed(CHARLIE), ALICE, 10), "NonExistentAccount" @@ -769,7 +769,7 @@ pub mod pallet_v2 { // the final assertion. System::set_block_number(ALICE); - // given the the initial state, when: + // given the initial state, when: assert_ok!(Pallet::::transfer(RuntimeOrigin::signed(ALICE), BOB, 50)); // then: diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index 54f202eda0c9..0c14e3af196d 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -195,11 +195,11 @@ pub type MemoryDB = memory_db::MemoryDB, trie_db::DB /// Reexport from `hash_db`, with genericity set for `Hasher` trait. pub type GenericMemoryDB = memory_db::MemoryDB; -/// Persistent trie database read-access interface for the a given hasher. +/// Persistent trie database read-access interface for a given hasher. pub type TrieDB<'a, 'cache, L> = trie_db::TrieDB<'a, 'cache, L>; /// Builder for creating a [`TrieDB`]. pub type TrieDBBuilder<'a, 'cache, L> = trie_db::TrieDBBuilder<'a, 'cache, L>; -/// Persistent trie database write-access interface for the a given hasher. +/// Persistent trie database write-access interface for a given hasher. pub type TrieDBMut<'a, L> = trie_db::TrieDBMut<'a, L>; /// Builder for creating a [`TrieDBMut`]. pub type TrieDBMutBuilder<'a, L> = trie_db::TrieDBMutBuilder<'a, L>; @@ -212,17 +212,17 @@ pub type TrieHash = <::Hash as Hasher>::Out; pub mod trie_types { use super::*; - /// Persistent trie database read-access interface for the a given hasher. + /// Persistent trie database read-access interface for a given hasher. /// /// Read only V1 and V0 are compatible, thus we always use V1. pub type TrieDB<'a, 'cache, H> = super::TrieDB<'a, 'cache, LayoutV1>; /// Builder for creating a [`TrieDB`]. pub type TrieDBBuilder<'a, 'cache, H> = super::TrieDBBuilder<'a, 'cache, LayoutV1>; - /// Persistent trie database write-access interface for the a given hasher. + /// Persistent trie database write-access interface for a given hasher. pub type TrieDBMutV0<'a, H> = super::TrieDBMut<'a, LayoutV0>; /// Builder for creating a [`TrieDBMutV0`]. pub type TrieDBMutBuilderV0<'a, H> = super::TrieDBMutBuilder<'a, LayoutV0>; - /// Persistent trie database write-access interface for the a given hasher. + /// Persistent trie database write-access interface for a given hasher. pub type TrieDBMutV1<'a, H> = super::TrieDBMut<'a, LayoutV1>; /// Builder for creating a [`TrieDBMutV1`]. pub type TrieDBMutBuilderV1<'a, H> = super::TrieDBMutBuilder<'a, LayoutV1>; From 6cb3bd23910ec48ab37a3c95a6b03286ff2979bf Mon Sep 17 00:00:00 2001 From: Tom Mi Date: Mon, 17 Jun 2024 18:11:21 +0300 Subject: [PATCH 36/52] Ibp bootnodes for Kusama People (#6) (#4741) * fix rotko's pcollectives bootnode * Update people-kusama.json * Add Dwellir People Kusama bootnode * add Gatotech bootnodes to `people-kusama` * Add Dwellir People Kusama bootnode * Update Amforc bootnodes for Kusama and Polkadot (#4668) --------- Co-authored-by: RadiumBlock Co-authored-by: Jonathan Udd Co-authored-by: Milos Kriz Co-authored-by: tugy <33746108+tugytur@users.noreply.github.com> Co-authored-by: Kutsal Kaan Bilgin Co-authored-by: Petr Mensik Co-authored-by: Tommi --- .../chain-specs/collectives-polkadot.json | 5 ++--- .../parachains/chain-specs/people-kusama.json | 20 ++++++++++++++++++- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index a0d5ddff6ebb..a6ba01ffa394 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -23,9 +23,8 @@ "/dns/polkadot-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWDMFYCNRAQcSRNV7xu2xv8319goSEbSHW4TnXRz6EpPKc", "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c", "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c", - "/dns/pch13.rotko.net/tcp/33573/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", - "/dns/pch13.rotko.net/tcp/34573/ws/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", - "/dns/pch13.rotko.net/tcp/35573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", + "/dns/pch16.rotko.net/tcp/33576/p2p/12D3KooWKrm3XmuGzJH17Wcn4HRDGsEjLZGDgN77q3ZhwnnQP7y1", + "/dns/pch16.rotko.net/tcp/35576/wss/p2p/12D3KooWKrm3XmuGzJH17Wcn4HRDGsEjLZGDgN77q3ZhwnnQP7y1", "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30526/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30528/wss/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", "/dns/boot-polkadot-collectives.luckyfriday.io/tcp/443/wss/p2p/12D3KooWCzifnPooTt4kvTnXT7FTKTymVL7xn7DURQLsS2AKpf6w" diff --git a/cumulus/parachains/chain-specs/people-kusama.json b/cumulus/parachains/chain-specs/people-kusama.json index 00a38b675def..3352cb25a289 100644 --- a/cumulus/parachains/chain-specs/people-kusama.json +++ b/cumulus/parachains/chain-specs/people-kusama.json @@ -8,7 +8,25 @@ "/dns/kusama-people-connect-0.polkadot.io/tcp/443/wss/p2p/12D3KooWQaqG5TNmDfRWrtH7tMsN7YeqwVkSfoZT4GkemSzezNi1", "/dns/kusama-people-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWKhYoQH9LdSyvY3SVZY9gFf6ZV1bFh6317TRehUP3r5fm", "/dns/people-kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWPjzgKZe5jdG6TY4gwcFq8QxyyhqsYbQo6N29pwGePWLA", - "/dns/people-kusama.bootnode.amforc.com/tcp/30004/p2p/12D3KooWPjzgKZe5jdG6TY4gwcFq8QxyyhqsYbQo6N29pwGePWLA" + "/dns/people-kusama.bootnode.amforc.com/tcp/30004/p2p/12D3KooWPjzgKZe5jdG6TY4gwcFq8QxyyhqsYbQo6N29pwGePWLA", + "/dns/boot.gatotech.network/tcp/33240/p2p/12D3KooWLi9TzaKX4zniJpiM521PnYG4EocpdqjPpJUhXq9QGkRX", + "/dns/boot.gatotech.network/tcp/35240/wss/p2p/12D3KooWLi9TzaKX4zniJpiM521PnYG4EocpdqjPpJUhXq9QGkRX", + "/dns/people-kusama-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWGP1C9iWTHnZyeaSjYZ7LdK8douXWc1n1dBv25XEASHaj", + "/dns/people-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWGP1C9iWTHnZyeaSjYZ7LdK8douXWc1n1dBv25XEASHaj", + "/dns/kppl16.rotko.net/tcp/33756/p2p/12D3KooWSKQwgoydfbN6mNN2aNwdqfkR2ExAnTRs8mmdrPQTtDLo", + "/dns/kppl16.rotko.net/tcp/35756/wss/p2p/12D3KooWSKQwgoydfbN6mNN2aNwdqfkR2ExAnTRs8mmdrPQTtDLo", + "/dns/people-kusama-boot-ng.dwellir.com/tcp/30359/p2p/12D3KooWM6T8MMibxLZhhpq6F612CZ4FgnfDSJSkWDMiVUDe1aGb", + "/dns/people-kusama-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWM6T8MMibxLZhhpq6F612CZ4FgnfDSJSkWDMiVUDe1aGb", + "/dns/people-kusama-bootnode.turboflakes.io/tcp/30645/p2p/12D3KooWCR2Q8J2NFFfuofDak4zSgWkuBq7orP96HFaxLgAoDUBV", + "/dns/people-kusama-bootnode.turboflakes.io/tcp/30745/wss/p2p/12D3KooWCR2Q8J2NFFfuofDak4zSgWkuBq7orP96HFaxLgAoDUBV", + "/dns/boot-node.helikon.io/tcp/7510/p2p/12D3KooWM1X4setrMWjwnV8iDkAtYhqFHNkGozdWdq6sawWh5Yhv", + "/dns/boot-node.helikon.io/tcp/7512/wss/p2p/12D3KooWM1X4setrMWjwnV8iDkAtYhqFHNkGozdWdq6sawWh5Yhv", + "/dns/people-kusama.bootnodes.polkadotters.com/tcp/30377/p2p/12D3KooWHy7TAuK6EoVij2tfaeh3KkaEJxhTmumbEom3HfRnSEsp", + "/dns/people-kusama.bootnodes.polkadotters.com/tcp/30379/wss/p2p/12D3KooWHy7TAuK6EoVij2tfaeh3KkaEJxhTmumbEom3HfRnSEsp", + "/dns/boot.metaspan.io/tcp/25068/p2p/12D3KooWDoDLtLvQi8hhFVyubPZhaYuAwSAJrPFtyGWJ2NSfBiyP", + "/dns/boot.metaspan.io/tcp/25069/wss/p2p/12D3KooWDoDLtLvQi8hhFVyubPZhaYuAwSAJrPFtyGWJ2NSfBiyP", + "/dns/ibp-boot-kusama-people.luckyfriday.io/tcp/30342/p2p/12D3KooWM4bRafMH2StfBEQtyj5cMWfGLYbuikCZmvKv9m1MQVPn", + "/dns/ibp-boot-kusama-people.luckyfriday.io/tcp/443/wss/p2p/12D3KooWM4bRafMH2StfBEQtyj5cMWfGLYbuikCZmvKv9m1MQVPn" ], "telemetryEndpoints": null, "protocolId": null, From 5055294521021c0ffa1c449d6793ec9d264e5bd5 Mon Sep 17 00:00:00 2001 From: Florian Franzen Date: Mon, 17 Jun 2024 20:47:36 +0200 Subject: [PATCH 37/52] node-inspect: do not depend on rocksdb (#4783) The crate `sc-cli` otherwise enables the `rocksdb` feature. --- substrate/bin/node/inspect/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/bin/node/inspect/Cargo.toml b/substrate/bin/node/inspect/Cargo.toml index 5e4488903bf4..e23a4c4f37e5 100644 --- a/substrate/bin/node/inspect/Cargo.toml +++ b/substrate/bin/node/inspect/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] clap = { version = "4.5.3", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.12" } thiserror = { workspace = true } -sc-cli = { path = "../../../client/cli" } +sc-cli = { path = "../../../client/cli", default-features = false } sc-client-api = { path = "../../../client/api" } sc-service = { path = "../../../client/service", default-features = false } sp-blockchain = { path = "../../../primitives/blockchain" } From 55a13abcd2f67e7fdfc8843f5c4a54798e26a9df Mon Sep 17 00:00:00 2001 From: Kantapat chankasem Date: Tue, 18 Jun 2024 05:30:13 +0700 Subject: [PATCH 38/52] remove pallet::getter usage from pallet-timestamp (#3374) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit this pr is a part of #3326 --------- Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Bastian Köcher --- cumulus/test/runtime/src/lib.rs | 4 ++-- polkadot/runtime/test-runtime/src/lib.rs | 3 ++- prdoc/pr_3374.prdoc | 13 +++++++++++++ .../test-staking-e2e/src/lib.rs | 5 +++-- substrate/frame/timestamp/src/benchmarking.rs | 4 ++-- substrate/frame/timestamp/src/lib.rs | 13 ++++++------- substrate/frame/timestamp/src/tests.rs | 2 +- 7 files changed, 29 insertions(+), 15 deletions(-) create mode 100644 prdoc/pr_3374.prdoc diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 452b3241d0bf..26c6635e1ad3 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -66,7 +66,7 @@ use frame_system::{ pub use pallet_balances::Call as BalancesCall; pub use pallet_glutton::Call as GluttonCall; pub use pallet_sudo::Call as SudoCall; -pub use pallet_timestamp::Call as TimestampCall; +pub use pallet_timestamp::{Call as TimestampCall, Now}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; @@ -499,7 +499,7 @@ impl_runtime_apis! { impl crate::GetLastTimestamp for Runtime { fn get_last_timestamp() -> u64 { - Timestamp::now() + Now::::get() } } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 8178639946f8..334c6eb733a1 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -53,6 +53,7 @@ use frame_support::{ }; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_session::historical as session_historical; +use pallet_timestamp::Now; use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; use polkadot_primitives::{ slashing, AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, @@ -1186,7 +1187,7 @@ sp_api::impl_runtime_apis! { impl crate::GetLastTimestamp for Runtime { fn get_last_timestamp() -> u64 { - Timestamp::now() + Now::::get() } } diff --git a/prdoc/pr_3374.prdoc b/prdoc/pr_3374.prdoc new file mode 100644 index 000000000000..76744f778db0 --- /dev/null +++ b/prdoc/pr_3374.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: removed `pallet::getter` from `pallet-timestamp` + +doc: + - audience: Runtime Dev + description: | + This PR removes all the `pallet::getter` usages from `pallet-timestamp`, and updates depdendant runtimes accordingly. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-timestamp \ No newline at end of file diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs index 2b1f1335c6fe..aaffbb6681cd 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs @@ -22,6 +22,7 @@ pub(crate) const LOG_TARGET: &str = "tests::e2e-epm"; use frame_support::{assert_err, assert_noop, assert_ok}; use mock::*; +use pallet_timestamp::Now; use sp_core::Get; use sp_runtime::Perbill; @@ -46,7 +47,7 @@ fn log_current_time() { Session::current_index(), Staking::current_era(), ElectionProviderMultiPhase::current_phase(), - Timestamp::now() + Now::::get() ); } @@ -209,7 +210,7 @@ fn continuous_slashes_below_offending_threshold() { // failed due to election minimum score. if start_next_active_era(pool_state.clone()).is_err() { assert!(ElectionProviderMultiPhase::current_phase().is_emergency()); - break + break; } active_validator_set = Session::validators(); diff --git a/substrate/frame/timestamp/src/benchmarking.rs b/substrate/frame/timestamp/src/benchmarking.rs index 82dfdfa8b312..d8c27b4967af 100644 --- a/substrate/frame/timestamp/src/benchmarking.rs +++ b/substrate/frame/timestamp/src/benchmarking.rs @@ -25,7 +25,7 @@ use frame_support::{ensure, traits::OnFinalize}; use frame_system::RawOrigin; use sp_storage::TrackedStorageKey; -use crate::Pallet as Timestamp; +use crate::{Now, Pallet as Timestamp}; const MAX_TIME: u32 = 100; @@ -42,7 +42,7 @@ benchmarks! { }); }: _(RawOrigin::None, t.into()) verify { - ensure!(Timestamp::::now() == t.into(), "Time was not set."); + ensure!(Now::::get() == t.into(), "Time was not set."); } on_finalize { diff --git a/substrate/frame/timestamp/src/lib.rs b/substrate/frame/timestamp/src/lib.rs index 5269f17eca6b..6a22ab1cd5ef 100644 --- a/substrate/frame/timestamp/src/lib.rs +++ b/substrate/frame/timestamp/src/lib.rs @@ -202,7 +202,6 @@ pub mod pallet { /// The current time for the current block. #[pallet::storage] - #[pallet::getter(fn now)] pub type Now = StorageValue<_, T::Moment, ValueQuery>; /// Whether the timestamp has been updated in this block. @@ -261,7 +260,7 @@ pub mod pallet { pub fn set(origin: OriginFor, #[pallet::compact] now: T::Moment) -> DispatchResult { ensure_none(origin)?; assert!(!DidUpdate::::exists(), "Timestamp must be updated only once in the block"); - let prev = Self::now(); + let prev = Now::::get(); assert!( prev.is_zero() || now >= prev + T::MinimumPeriod::get(), "Timestamp must increment by at least between sequential blocks" @@ -296,7 +295,7 @@ pub mod pallet { .expect("Timestamp inherent data must be provided"); let data = (*inherent_data).saturated_into::(); - let next_time = cmp::max(data, Self::now() + T::MinimumPeriod::get()); + let next_time = cmp::max(data, Now::::get() + T::MinimumPeriod::get()); Some(Call::set { now: next_time }) } @@ -317,7 +316,7 @@ pub mod pallet { .expect("Timestamp inherent data not correctly encoded") .expect("Timestamp inherent data must be provided"); - let minimum = (Self::now() + T::MinimumPeriod::get()).saturated_into::(); + let minimum = (Now::::get() + T::MinimumPeriod::get()).saturated_into::(); if t > *(data + MAX_TIMESTAMP_DRIFT_MILLIS) { Err(InherentError::TooFarInFuture) } else if t < minimum { @@ -339,7 +338,7 @@ impl Pallet { /// NOTE: if this function is called prior to setting the timestamp, /// it will return the timestamp of the previous block. pub fn get() -> T::Moment { - Self::now() + Now::::get() } /// Set the timestamp to something in particular. Only used for tests. @@ -356,7 +355,7 @@ impl Time for Pallet { type Moment = T::Moment; fn now() -> Self::Moment { - Self::now() + Now::::get() } } @@ -367,7 +366,7 @@ impl UnixTime for Pallet { fn now() -> core::time::Duration { // now is duration since unix epoch in millisecond as documented in // `sp_timestamp::InherentDataProvider`. - let now = Self::now(); + let now = Now::::get(); sp_std::if_std! { if now == T::Moment::zero() { log::error!( diff --git a/substrate/frame/timestamp/src/tests.rs b/substrate/frame/timestamp/src/tests.rs index cc49d8a3296e..a83855561889 100644 --- a/substrate/frame/timestamp/src/tests.rs +++ b/substrate/frame/timestamp/src/tests.rs @@ -25,7 +25,7 @@ fn timestamp_works() { new_test_ext().execute_with(|| { crate::Now::::put(46); assert_ok!(Timestamp::set(RuntimeOrigin::none(), 69)); - assert_eq!(Timestamp::now(), 69); + assert_eq!(crate::Now::::get(), 69); assert_eq!(Some(69), get_captured_moment()); }); } From 7c847f8db6ab9ca9ef990c4cf61275415c78d106 Mon Sep 17 00:00:00 2001 From: Daan van der Plas <93204684+Daanvdplas@users.noreply.github.com> Date: Tue, 18 Jun 2024 09:51:47 +0200 Subject: [PATCH 39/52] refactor: parachain template (#4684) Update parachain template pallet based on polkadot sdk docs @kianenigma --------- Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- prdoc/pr_4684.prdoc | 13 +++ .../parachain/pallets/template/src/lib.rs | 42 ++++--- .../parachain/pallets/template/src/mock.rs | 45 +++++--- .../parachain/pallets/template/src/weights.rs | 2 +- templates/parachain/runtime/Cargo.toml | 2 +- templates/parachain/runtime/src/lib.rs | 107 +++++++++++------- 6 files changed, 133 insertions(+), 78 deletions(-) create mode 100644 prdoc/pr_4684.prdoc diff --git a/prdoc/pr_4684.prdoc b/prdoc/pr_4684.prdoc new file mode 100644 index 000000000000..b1c429c57822 --- /dev/null +++ b/prdoc/pr_4684.prdoc @@ -0,0 +1,13 @@ +title: "Refactor of the parachain template" + +doc: + - audience: Runtime Dev + description: | + Introduce the construct runtime V2 to the parachain template runtime. In addition, url links in the parachain pallet + template now direct to the polkadot sdk docs. + +crates: + - name: pallet-parachain-template + bump: none + - name: parachain-template-runtime + bump: none diff --git a/templates/parachain/pallets/template/src/lib.rs b/templates/parachain/pallets/template/src/lib.rs index 11587d1df426..0420e2b90821 100644 --- a/templates/parachain/pallets/template/src/lib.rs +++ b/templates/parachain/pallets/template/src/lib.rs @@ -1,8 +1,5 @@ #![cfg_attr(not(feature = "std"), no_std)] -/// Edit this file to define custom logic or remove it if it is not needed. -/// Learn more about FRAME and the core library of Substrate FRAME pallets: -/// pub use pallet::*; #[cfg(test)] @@ -16,6 +13,12 @@ pub mod weights; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +// +// +// +// To see a full list of `pallet` macros and their use cases, see: +// +// #[frame_support::pallet] pub mod pallet { use frame_support::{dispatch::DispatchResultWithPostInfo, pallet_prelude::*}; @@ -25,7 +28,9 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// Because this pallet emits events, it depends on the runtime's definition of an event. + /// type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// A type representing the weights required by the dispatchables of this pallet. type WeightInfo: crate::weights::WeightInfo; } @@ -33,24 +38,23 @@ pub mod pallet { #[pallet::pallet] pub struct Pallet(_); - // The pallet's runtime storage items. - // https://docs.substrate.io/v3/runtime/storage + /// The pallet's storage items. + /// + /// #[pallet::storage] - // Learn more about declaring storage items: - // https://docs.substrate.io/v3/runtime/storage#declaring-storage-items pub type Something = StorageValue<_, u32>; - // Pallets use events to inform users when important changes are made. - // https://docs.substrate.io/v3/runtime/events-and-errors + /// Pallets use events to inform users when important changes are made. + /// #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// Event documentation should end with an array that provides descriptive names for event - /// parameters. [something, who] - SomethingStored(u32, T::AccountId), + /// We usually use passive tense for events. + SomethingStored { something: u32, who: T::AccountId }, } - // Errors inform users that something went wrong. + /// Errors inform users that something went wrong. + /// #[pallet::error] pub enum Error { /// Error names should be descriptive. @@ -62,9 +66,10 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet {} - // Dispatchable functions allows users to interact with the pallet and invoke state changes. - // These functions materialize as "extrinsics", which are often compared to transactions. - // Dispatchable functions must be annotated with a weight and must return a DispatchResult. + /// Dispatchable functions allows users to interact with the pallet and invoke state changes. + /// These functions materialize as "extrinsics", which are often compared to transactions. + /// Dispatchable functions must be annotated with a weight and must return a DispatchResult. + /// #[pallet::call] impl Pallet { /// An example dispatchable that takes a singles value as a parameter, writes the value to @@ -74,14 +79,15 @@ pub mod pallet { pub fn do_something(origin: OriginFor, something: u32) -> DispatchResultWithPostInfo { // Check that the extrinsic was signed and get the signer. // This function will return an error if the extrinsic is not signed. - // https://docs.substrate.io/v3/runtime/origins + // let who = ensure_signed(origin)?; // Update storage. >::put(something); // Emit an event. - Self::deposit_event(Event::SomethingStored(something, who)); + Self::deposit_event(Event::SomethingStored { something, who }); + // Return a successful DispatchResultWithPostInfo Ok(().into()) } diff --git a/templates/parachain/pallets/template/src/mock.rs b/templates/parachain/pallets/template/src/mock.rs index ebb0598df97b..46e3117596f5 100644 --- a/templates/parachain/pallets/template/src/mock.rs +++ b/templates/parachain/pallets/template/src/mock.rs @@ -1,25 +1,36 @@ -use frame_support::{derive_impl, parameter_types}; -use frame_system as system; -use sp_runtime::BuildStorage; - -type Block = frame_system::mocking::MockBlock; +use frame_support::{derive_impl, weights::constants::RocksDbWeight}; +use frame_system::{mocking::MockBlock, GenesisConfig}; +use sp_runtime::{traits::ConstU64, BuildStorage}; // Configure a mock runtime to test the pallet. -frame_support::construct_runtime!( - pub enum Test - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - TemplateModule: crate::{Pallet, Call, Storage, Event}, - } -); +#[frame_support::runtime] +mod test_runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Test; -parameter_types! { - pub const SS58Prefix: u8 = 42; + #[runtime::pallet_index(0)] + pub type System = frame_system; + #[runtime::pallet_index(1)] + pub type TemplateModule = crate; } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] -impl system::Config for Test { - type Block = Block; +impl frame_system::Config for Test { + type Nonce = u64; + type Block = MockBlock; + type BlockHashCount = ConstU64<250>; + type DbWeight = RocksDbWeight; } impl crate::Config for Test { @@ -29,5 +40,5 @@ impl crate::Config for Test { // Build genesis storage according to the mock runtime. pub fn new_test_ext() -> sp_io::TestExternalities { - system::GenesisConfig::::default().build_storage().unwrap().into() + GenesisConfig::::default().build_storage().unwrap().into() } diff --git a/templates/parachain/pallets/template/src/weights.rs b/templates/parachain/pallets/template/src/weights.rs index 7c42936e09f2..5bfe28e8b71e 100644 --- a/templates/parachain/pallets/template/src/weights.rs +++ b/templates/parachain/pallets/template/src/weights.rs @@ -4,7 +4,7 @@ //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev //! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `Alexs-MacBook-Pro-2.local`, CPU: `` +//! HOSTNAME: `_`, CPU: `` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml index 059c79367969..48d9f6912894 100644 --- a/templates/parachain/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -35,7 +35,7 @@ pallet-parachain-template = { path = "../pallets/template", default-features = f frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-executive = { path = "../../../substrate/frame/executive", default-features = false } frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["experimental"] } frame-system = { path = "../../../substrate/frame/system", default-features = false } frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index 987b88af8444..6e6491a19adf 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -7,6 +7,8 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); pub mod apis; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarks; mod configs; mod weights; @@ -22,12 +24,9 @@ use sp_std::prelude::*; use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use frame_support::{ - construct_runtime, - weights::{ - constants::WEIGHT_REF_TIME_PER_SECOND, Weight, WeightToFeeCoefficient, - WeightToFeeCoefficients, WeightToFeePolynomial, - }, +use frame_support::weights::{ + constants::WEIGHT_REF_TIME_PER_SECOND, Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, + WeightToFeePolynomial, }; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::{MultiAddress, Perbill, Permill}; @@ -232,43 +231,69 @@ pub fn native_version() -> NativeVersion { } // Create the runtime by composing the FRAME pallets that were previously configured. -construct_runtime!( - pub enum Runtime { - // System support stuff. - System: frame_system = 0, - ParachainSystem: cumulus_pallet_parachain_system = 1, - Timestamp: pallet_timestamp = 2, - ParachainInfo: parachain_info = 3, - - // Monetary stuff. - Balances: pallet_balances = 10, - TransactionPayment: pallet_transaction_payment = 11, - - // Governance - Sudo: pallet_sudo = 15, - - // Collator support. The order of these 4 are important and shall not change. - Authorship: pallet_authorship = 20, - CollatorSelection: pallet_collator_selection = 21, - Session: pallet_session = 22, - Aura: pallet_aura = 23, - AuraExt: cumulus_pallet_aura_ext = 24, - - // XCM helpers. - XcmpQueue: cumulus_pallet_xcmp_queue = 30, - PolkadotXcm: pallet_xcm = 31, - CumulusXcm: cumulus_pallet_xcm = 32, - MessageQueue: pallet_message_queue = 33, - - // Template - TemplatePallet: pallet_parachain_template = 50, - } -); +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + #[runtime::pallet_index(0)] + pub type System = frame_system; + #[runtime::pallet_index(1)] + pub type ParachainSystem = cumulus_pallet_parachain_system; + #[runtime::pallet_index(2)] + pub type Timestamp = pallet_timestamp; + #[runtime::pallet_index(3)] + pub type ParachainInfo = parachain_info; + + // Monetary stuff. + #[runtime::pallet_index(10)] + pub type Balances = pallet_balances; + #[runtime::pallet_index(11)] + pub type TransactionPayment = pallet_transaction_payment; + + // Governance + #[runtime::pallet_index(15)] + pub type Sudo = pallet_sudo; + + // Collator support. The order of these 4 are important and shall not change. + #[runtime::pallet_index(20)] + pub type Authorship = pallet_authorship; + #[runtime::pallet_index(21)] + pub type CollatorSelection = pallet_collator_selection; + #[runtime::pallet_index(22)] + pub type Session = pallet_session; + #[runtime::pallet_index(23)] + pub type Aura = pallet_aura; + #[runtime::pallet_index(24)] + pub type AuraExt = cumulus_pallet_aura_ext; + + // XCM helpers. + #[runtime::pallet_index(30)] + pub type XcmpQueue = cumulus_pallet_xcmp_queue; + #[runtime::pallet_index(31)] + pub type PolkadotXcm = pallet_xcm; + #[runtime::pallet_index(32)] + pub type CumulusXcm = cumulus_pallet_xcm; + #[runtime::pallet_index(33)] + pub type MessageQueue = pallet_message_queue; + + // Template + #[runtime::pallet_index(50)] + pub type TemplatePallet = pallet_parachain_template; +} cumulus_pallet_parachain_system::register_validate_block! { Runtime = Runtime, BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, } - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarks; From 1dc68de8eec934b3c7f35a330f869d1172943da4 Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Tue, 18 Jun 2024 11:57:57 +0300 Subject: [PATCH 40/52] glutton: also increase parachain block length (#4728) Glutton currently is useful mostly for stress testing relay chain validators. It is unusable for testing the collator networking and block announcement and import scenarios. This PR resolves that by improving glutton pallet to also buff up the blocks, up to the runtime configured `BlockLength`. ### How it works Includes an additional inherent in each parachain block. The `garbage` argument passed to the inherent is filled with trash data. It's size is computed by applying the newly introduced `block_length` percentage to the maximum block length for mandatory dispatch class. After https://github.com/paritytech/polkadot-sdk/pull/4765 is merged, the length of inherent extrinsic will be added to the total block proof size. The remaining weight is burnt in `on_idle` as configured by the `storage` percentage parameter. TODO: - [x] PRDoc - [x] Readme update - [x] Add tests --------- Signed-off-by: Andrei Sandu --- Cargo.lock | 1 + .../glutton/glutton-westend/src/lib.rs | 1 + prdoc/pr_4728.prdoc | 17 +++++ substrate/bin/node/bench/src/import.rs | 9 ++- .../cli/tests/res/default_genesis_config.json | 1 + substrate/frame/glutton/Cargo.toml | 2 + substrate/frame/glutton/README.md | 7 +- substrate/frame/glutton/src/lib.rs | 76 +++++++++++++++++++ substrate/frame/glutton/src/mock.rs | 8 +- substrate/frame/glutton/src/tests.rs | 45 ++++++++++- 10 files changed, 154 insertions(+), 13 deletions(-) create mode 100644 prdoc/pr_4728.prdoc diff --git a/Cargo.lock b/Cargo.lock index 4f4e0a988cec..113cfa06a84a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10477,6 +10477,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-core", + "sp-inherents", "sp-io", "sp-runtime", "sp-std 14.0.0", diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 910f7569bf95..b8a328c3db69 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -296,6 +296,7 @@ pub type SignedExtra = ( frame_system::CheckGenesis, frame_system::CheckEra, frame_system::CheckNonce, + frame_system::CheckWeight, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/prdoc/pr_4728.prdoc b/prdoc/pr_4728.prdoc new file mode 100644 index 000000000000..1494fbdbb2b9 --- /dev/null +++ b/prdoc/pr_4728.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Glutton - add support for bloating the parachain block length" + +doc: + - audience: [Runtime Dev, Runtime User] + description: | + Introduce a new configuration parameter `block_length` which can be configured via a call to + `set_block_length`. This sets the ration of the block length that is to be filled with trash. + This is implemented by an inherent that takes trash data as a parameter filling the block length. + +crates: + - name: pallet-glutton + bump: major + - name: glutton-westend-runtime + bump: major diff --git a/substrate/bin/node/bench/src/import.rs b/substrate/bin/node/bench/src/import.rs index 78b280076e0b..e340869dea02 100644 --- a/substrate/bin/node/bench/src/import.rs +++ b/substrate/bin/node/bench/src/import.rs @@ -122,7 +122,8 @@ impl core::Benchmark for ImportBenchmark { match self.block_type { BlockType::RandomTransfersKeepAlive => { // should be 8 per signed extrinsic + 1 per unsigned - // we have 1 unsigned and the rest are signed in the block + // we have 2 unsigned (timestamp and glutton bloat) while the rest are + // signed in the block. // those 8 events per signed are: // - transaction paid for the transaction payment // - withdraw (Balances::Withdraw) for charging the transaction fee @@ -135,18 +136,18 @@ impl core::Benchmark for ImportBenchmark { // - extrinsic success assert_eq!( kitchensink_runtime::System::events().len(), - (self.block.extrinsics.len() - 1) * 8 + 1, + (self.block.extrinsics.len() - 2) * 8 + 2, ); }, BlockType::Noop => { assert_eq!( kitchensink_runtime::System::events().len(), // should be 2 per signed extrinsic + 1 per unsigned - // we have 1 unsigned and the rest are signed in the block + // we have 2 unsigned and the rest are signed in the block // those 2 events per signed are: // - deposit event for charging transaction fee // - extrinsic success - (self.block.extrinsics.len() - 1) * 2 + 1, + (self.block.extrinsics.len() - 2) * 2 + 2, ); }, _ => {}, diff --git a/substrate/bin/node/cli/tests/res/default_genesis_config.json b/substrate/bin/node/cli/tests/res/default_genesis_config.json index e21fbb47da8c..d8713764ab21 100644 --- a/substrate/bin/node/cli/tests/res/default_genesis_config.json +++ b/substrate/bin/node/cli/tests/res/default_genesis_config.json @@ -74,6 +74,7 @@ "glutton": { "compute": "0", "storage": "0", + "blockLength": "0", "trashDataCount": 0 }, "assets": { diff --git a/substrate/frame/glutton/Cargo.toml b/substrate/frame/glutton/Cargo.toml index 730c4e70935c..39d2b49b50e5 100644 --- a/substrate/frame/glutton/Cargo.toml +++ b/substrate/frame/glutton/Cargo.toml @@ -27,6 +27,7 @@ sp-core = { path = "../../primitives/core", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } +sp-inherents = { path = "../../primitives/inherents", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } @@ -43,6 +44,7 @@ std = [ "pallet-balances/std", "scale-info/std", "sp-core/std", + "sp-inherents/std", "sp-io/std", "sp-runtime/std", "sp-std/std", diff --git a/substrate/frame/glutton/README.md b/substrate/frame/glutton/README.md index 89dbe26ec7a9..43642df19104 100644 --- a/substrate/frame/glutton/README.md +++ b/substrate/frame/glutton/README.md @@ -7,6 +7,7 @@ The `Glutton` pallet gets the name from its property to consume vast amounts of resources. It can be used to push para-chains and their relay-chains to the limits. This is good for testing out theoretical limits in a practical way. -The `Glutton` can be set to consume a fraction of the available unused weight of a chain. It accomplishes this by -utilizing the `on_idle` hook and consuming a specific ration of the remaining weight. The rations can be set via -`set_compute` and `set_storage`. Initially the `Glutton` needs to be initialized once with `initialize_pallet`. +The `Glutton` can be set to consume a fraction of the available block length and unused weight of a chain. It +accomplishes this by filling the block length up to a ration and utilizing the `on_idle` hook to consume a +specific ration of the remaining weight. The rations can be set via `set_compute`, `set_storage` and `set_block_length`. +Initially the `Glutton` needs to be initialized once with `initialize_pallet`. diff --git a/substrate/frame/glutton/src/lib.rs b/substrate/frame/glutton/src/lib.rs index 344a70becaeb..5427173b486b 100644 --- a/substrate/frame/glutton/src/lib.rs +++ b/substrate/frame/glutton/src/lib.rs @@ -89,6 +89,11 @@ pub mod pallet { /// The storage limit. storage: FixedU64, }, + /// The block length limit has been updated. + BlockLengthLimitSet { + /// The block length limit. + block_length: FixedU64, + }, } #[pallet::error] @@ -116,6 +121,13 @@ pub mod pallet { #[pallet::storage] pub(crate) type Storage = StorageValue<_, FixedU64, ValueQuery>; + /// The proportion of the `block length` to consume on each block. + /// + /// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to + /// over `1.0` could stall the chain. + #[pallet::storage] + pub(crate) type Length = StorageValue<_, FixedU64, ValueQuery>; + /// Storage map used for wasting proof size. /// /// It contains no meaningful data - hence the name "Trash". The maximal number of entries is @@ -146,6 +158,8 @@ pub mod pallet { pub storage: FixedU64, /// The amount of trash data for wasting proof size. pub trash_data_count: u32, + /// The block length limit. + pub block_length: FixedU64, #[serde(skip)] /// The required configuration field. pub _config: sp_std::marker::PhantomData, @@ -170,6 +184,9 @@ pub mod pallet { assert!(self.storage <= RESOURCE_HARD_LIMIT, "Storage limit is insane"); >::put(self.storage); + + assert!(self.block_length <= RESOURCE_HARD_LIMIT, "Block length limit is insane"); + >::put(self.block_length); } } @@ -208,6 +225,40 @@ pub mod pallet { } } + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + type Error = sp_inherents::MakeFatalError<()>; + + const INHERENT_IDENTIFIER: InherentIdentifier = *b"bloated0"; + + fn create_inherent(_data: &InherentData) -> Option { + let max_block_length = *T::BlockLength::get().max.get(DispatchClass::Mandatory); + let bloat_size = Length::::get().saturating_mul_int(max_block_length) as usize; + let amount_trash = bloat_size / VALUE_SIZE; + let garbage = TrashData::::iter() + .map(|(_k, v)| v) + .collect::>() + .into_iter() + .cycle() + .take(amount_trash) + .collect::>(); + + Some(Call::bloat { garbage }) + } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::bloat { .. }) + } + + fn check_inherent(call: &Self::Call, _: &InherentData) -> Result<(), Self::Error> { + match call { + Call::bloat { .. } => Ok(()), + _ => unreachable!("other calls are not inherents"), + } + } + } + #[pallet::call(weight = T::WeightInfo)] impl Pallet { /// Initialize the pallet. Should be called once, if no genesis state was provided. @@ -277,6 +328,31 @@ pub mod pallet { Self::deposit_event(Event::StorageLimitSet { storage }); Ok(()) } + + /// Increase the block size by including the specified garbage bytes. + #[pallet::call_index(3)] + #[pallet::weight((0, DispatchClass::Mandatory))] + pub fn bloat(_origin: OriginFor, _garbage: Vec<[u8; VALUE_SIZE]>) -> DispatchResult { + Ok(()) + } + + /// Set how much of the block length should be filled with trash data on each block. + /// + /// `1.0` means that all block should be filled. If set to `1.0`, storage proof size will + /// be close to zero. + /// + /// Only callable by Root or `AdminOrigin`. + #[pallet::call_index(4)] + #[pallet::weight({1})] + pub fn set_block_length(origin: OriginFor, block_length: FixedU64) -> DispatchResult { + T::AdminOrigin::ensure_origin_or_root(origin)?; + + ensure!(block_length <= RESOURCE_HARD_LIMIT, Error::::InsaneLimit); + Length::::set(block_length); + + Self::deposit_event(Event::BlockLengthLimitSet { block_length }); + Ok(()) + } } impl Pallet { diff --git a/substrate/frame/glutton/src/mock.rs b/substrate/frame/glutton/src/mock.rs index 132ef5cfbcbb..7163d7c46781 100644 --- a/substrate/frame/glutton/src/mock.rs +++ b/substrate/frame/glutton/src/mock.rs @@ -50,10 +50,14 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext } -/// Set the `compute` and `storage` limits. +/// Set the `compute`, `storage` and `block_length` limits. /// /// `1.0` corresponds to `100%`. -pub fn set_limits(compute: f64, storage: f64) { +pub fn set_limits(compute: f64, storage: f64, block_length: f64) { assert_ok!(Glutton::set_compute(RuntimeOrigin::root(), FixedU64::from_float(compute))); assert_ok!(Glutton::set_storage(RuntimeOrigin::root(), FixedU64::from_float(storage))); + assert_ok!(Glutton::set_block_length( + RuntimeOrigin::root(), + FixedU64::from_float(block_length) + )); } diff --git a/substrate/frame/glutton/src/tests.rs b/substrate/frame/glutton/src/tests.rs index b72d52727725..81d228f39a93 100644 --- a/substrate/frame/glutton/src/tests.rs +++ b/substrate/frame/glutton/src/tests.rs @@ -123,6 +123,43 @@ fn setting_compute_respects_limit() { }); } +#[test] +fn setting_block_length_works() { + new_test_ext().execute_with(|| { + assert_eq!(Compute::::get(), Zero::zero()); + + assert_ok!(Glutton::set_block_length(RuntimeOrigin::root(), FixedU64::from_float(0.3))); + assert_eq!(Length::::get(), FixedU64::from_float(0.3)); + System::assert_last_event( + Event::BlockLengthLimitSet { block_length: FixedU64::from_float(0.3) }.into(), + ); + + assert_noop!( + Glutton::set_block_length(RuntimeOrigin::signed(1), FixedU64::from_float(0.5)), + DispatchError::BadOrigin + ); + assert_noop!( + Glutton::set_block_length(RuntimeOrigin::none(), FixedU64::from_float(0.5)), + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn setting_block_length_respects_limit() { + new_test_ext().execute_with(|| { + // < 1000% is fine + assert_ok!(Glutton::set_block_length(RuntimeOrigin::root(), FixedU64::from_float(9.99)),); + // == 1000% is fine + assert_ok!(Glutton::set_block_length(RuntimeOrigin::root(), FixedU64::from_u32(10)),); + // > 1000% is not + assert_noop!( + Glutton::set_block_length(RuntimeOrigin::root(), FixedU64::from_float(10.01)), + Error::::InsaneLimit + ); + }); +} + #[test] fn setting_storage_works() { new_test_ext().execute_with(|| { @@ -163,7 +200,7 @@ fn setting_storage_respects_limit() { #[test] fn on_idle_works() { new_test_ext().execute_with(|| { - set_limits(One::one(), One::one()); + set_limits(One::one(), One::one(), One::one()); Glutton::on_idle(1, Weight::from_parts(20_000_000, 0)); }); @@ -173,7 +210,7 @@ fn on_idle_works() { #[test] fn on_idle_weight_high_proof_is_close_enough_works() { new_test_ext().execute_with(|| { - set_limits(One::one(), One::one()); + set_limits(One::one(), One::one(), One::one()); let should = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, WEIGHT_PROOF_SIZE_PER_MB * 5); let got = Glutton::on_idle(1, should); @@ -196,7 +233,7 @@ fn on_idle_weight_high_proof_is_close_enough_works() { #[test] fn on_idle_weight_low_proof_is_close_enough_works() { new_test_ext().execute_with(|| { - set_limits(One::one(), One::one()); + set_limits(One::one(), One::one(), One::one()); let should = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, WEIGHT_PROOF_SIZE_PER_KB * 20); let got = Glutton::on_idle(1, should); @@ -224,7 +261,7 @@ fn on_idle_weight_over_unity_is_close_enough_works() { let max_block = Weight::from_parts(500 * WEIGHT_REF_TIME_PER_MILLIS, 5 * WEIGHT_PROOF_SIZE_PER_MB); // But now we tell it to consume more than that. - set_limits(1.75, 1.5); + set_limits(1.75, 1.5, 0.0); let want = Weight::from_parts( (1.75 * max_block.ref_time() as f64) as u64, (1.5 * max_block.proof_size() as f64) as u64, From 40677b64c5f15f0bdeb30e9d0a2037c0eec2447d Mon Sep 17 00:00:00 2001 From: Tin Chung <56880684+chungquantin@users.noreply.github.com> Date: Tue, 18 Jun 2024 19:16:08 +0800 Subject: [PATCH 41/52] Remove deprecated treasury pallet calls (#3820) # ISSUE - Link to the issue: https://github.com/paritytech/polkadot-sdk/issues/3800 # Deliverables - [x] remove deprecated calls; (https://github.com/paritytech/polkadot-sdk/pull/3820/commits/d579b673672454d0dc7b5049e5cbbe6077da520b) - [x] set explicit coded indexes for Error and Event enums, remove unused variants and keep the same indexes for the rest; (https://github.com/paritytech/polkadot-sdk/pull/3820/commits/d579b673672454d0dc7b5049e5cbbe6077da520b) - [x] remove unused Config's type parameters; (https://github.com/paritytech/polkadot-sdk/pull/3820/commits/d579b673672454d0dc7b5049e5cbbe6077da520b) - [x] remove irrelevant tests and adopt relevant using old api; (https://github.com/paritytech/polkadot-sdk/pull/3820/commits/d579b673672454d0dc7b5049e5cbbe6077da520b) - [x] remove benchmarks for removed calls; (https://github.com/paritytech/polkadot-sdk/pull/3820/commits/1a3d5f1f96756555ddebd1b898c03464ffffdb25) - [x] prdoc (https://github.com/paritytech/polkadot-sdk/pull/3820/commits/d579b673672454d0dc7b5049e5cbbe6077da520b) - [x] remove deprecated methods from the `treasury/README.md` and add up-to-date dispatchable functions documentation (https://github.com/paritytech/polkadot-sdk/pull/3820/commits/d579b673672454d0dc7b5049e5cbbe6077da520b) - [x] remove deprecated weight functions (https://github.com/paritytech/polkadot-sdk/pull/3820/commits/8f74134b82df9a6df2824bbbe1555667223f1a83) > ### Separated to other issues > - [ ] remove storage items like Proposals and ProposalCount, that are not used anymore Adjust all treasury pallet instances within polkadot-sdk - [x] `pallet_bounty`, `tip`, `child_bounties`: https://github.com/openguild-labs/polkadot-sdk/pull/3 - [x] Remove deprecated treasury weight functions used in Westend and Rococo runtime `collective-westend`, `collective-rococo` Add migration for westend and rococo to clean the data from removed storage items - [ ] https://github.com/paritytech/polkadot-sdk/pull/3828 # Test Outcomes Successful tests by running `cargo test --features runtime-benchmarks` ``` running 38 tests test tests::__construct_runtime_integrity_test::runtime_integrity_tests ... ok test benchmarking::benchmarks::bench_check_status ... ok test benchmarking::benchmarks::bench_payout ... ok test benchmarking::benchmarks::bench_spend_local ... ok test tests::accepted_spend_proposal_enacted_on_spend_period ... ok test benchmarking::benchmarks::bench_spend ... ok test tests::accepted_spend_proposal_ignored_outside_spend_period ... ok test benchmarking::benchmarks::bench_void_spend ... ok test benchmarking::benchmarks::bench_remove_approval ... ok test tests::genesis_funding_works ... ok test tests::genesis_config_works ... ok test tests::inexistent_account_works ... ok test tests::minting_works ... ok test tests::check_status_works ... ok test tests::payout_retry_works ... ok test tests::pot_underflow_should_not_diminish ... ok test tests::remove_already_removed_approval_fails ... ok test tests::spend_local_origin_permissioning_works ... ok test tests::spend_valid_from_works ... ok test tests::spend_expires ... ok test tests::spend_works ... ok test tests::test_genesis_config_builds ... ok test tests::spend_payout_works ... ok test tests::spend_local_origin_works ... ok test tests::spend_origin_works ... ok test tests::spending_local_in_batch_respects_max_total ... ok test tests::spending_in_batch_respects_max_total ... ok test tests::try_state_proposals_invariant_2_works ... ok test tests::try_state_proposals_invariant_1_works ... ok test tests::try_state_spends_invariant_2_works ... ok test tests::try_state_spends_invariant_1_works ... ok test tests::treasury_account_doesnt_get_deleted ... ok test tests::try_state_spends_invariant_3_works ... ok test tests::unused_pot_should_diminish ... ok test tests::void_spend_works ... ok test tests::try_state_proposals_invariant_3_works ... ok test tests::max_approvals_limited ... ok test benchmarking::benchmarks::bench_on_initialize_proposals ... ok test result: ok. 38 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.08s Doc-tests pallet_treasury running 2 tests test substrate/frame/treasury/src/lib.rs - (line 52) ... ignored test substrate/frame/treasury/src/lib.rs - (line 79) ... ignored test result: ok. 0 passed; 0 failed; 2 ignored; 0 measured; 0 filtered out; finished in 0.00s ``` polkadot address: 19nSqFQorfF2HxD3oBzWM3oCh4SaCRKWt1yvmgaPYGCo71J --- .../collectives-westend/src/fellowship/mod.rs | 26 -- .../src/weights/pallet_treasury.rs | 37 --- polkadot/runtime/common/src/impls.rs | 3 - polkadot/runtime/rococo/src/lib.rs | 6 - .../rococo/src/weights/pallet_treasury.rs | 45 --- polkadot/runtime/westend/src/lib.rs | 6 - .../westend/src/weights/pallet_treasury.rs | 45 --- prdoc/pr_3820.prdoc | 32 +++ substrate/bin/node/runtime/src/lib.rs | 5 - substrate/frame/bounties/src/tests.rs | 199 +------------- substrate/frame/child-bounties/src/tests.rs | 4 - substrate/frame/tips/src/tests.rs | 7 - substrate/frame/treasury/README.md | 14 +- substrate/frame/treasury/src/benchmarking.rs | 78 +----- substrate/frame/treasury/src/lib.rs | 147 +--------- substrate/frame/treasury/src/tests.rs | 256 ++---------------- substrate/frame/treasury/src/weights.rs | 95 +------ 17 files changed, 87 insertions(+), 918 deletions(-) create mode 100644 prdoc/pr_3820.prdoc diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs index 6a4a18207967..6f13c3d9d5de 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs @@ -55,8 +55,6 @@ use xcm_builder::{AliasesIntoAccountId32, PayOverXcm}; #[cfg(feature = "runtime-benchmarks")] use crate::impls::benchmarks::{OpenHrmpChannel, PayWithEnsure}; -#[cfg(feature = "runtime-benchmarks")] -use testnet_parachains_constants::westend::currency::DOLLARS; /// The Fellowship members' ranks. pub mod ranks { @@ -270,16 +268,6 @@ parameter_types! { pub SelfParaId: ParaId = ParachainInfo::parachain_id(); } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - // Benchmark bond. Needed to make `propose_spend` work. - pub const TenPercent: Permill = Permill::from_percent(10); - // Benchmark minimum. Needed to make `propose_spend` work. - pub const BenchmarkProposalBondMinimum: Balance = 1 * DOLLARS; - // Benchmark maximum. Needed to make `propose_spend` work. - pub const BenchmarkProposalBondMaximum: Balance = 10 * DOLLARS; -} - /// [`PayOverXcm`] setup to pay the Fellowship Treasury. pub type FellowshipTreasuryPaymaster = PayOverXcm< FellowshipTreasuryInteriorLocation, @@ -302,20 +290,6 @@ impl pallet_treasury::Config for Runtime { // TODO: replace with `NeverEnsure` once polkadot-sdk 1.5 is released. type ApproveOrigin = NeverEnsureOrigin<()>; type OnSlash = (); - #[cfg(not(feature = "runtime-benchmarks"))] - type ProposalBond = HundredPercent; - #[cfg(not(feature = "runtime-benchmarks"))] - type ProposalBondMinimum = MaxBalance; - #[cfg(not(feature = "runtime-benchmarks"))] - type ProposalBondMaximum = MaxBalance; - - #[cfg(feature = "runtime-benchmarks")] - type ProposalBond = TenPercent; - #[cfg(feature = "runtime-benchmarks")] - type ProposalBondMinimum = BenchmarkProposalBondMinimum; - #[cfg(feature = "runtime-benchmarks")] - type ProposalBondMaximum = BenchmarkProposalBondMaximum; - // end. type WeightInfo = weights::pallet_treasury::WeightInfo; type PalletId = FellowshipTreasuryPalletId; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_treasury.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_treasury.rs index 58540e646d8c..5c513c3754ce 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_treasury.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_treasury.rs @@ -62,43 +62,6 @@ impl pallet_treasury::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `FellowshipTreasury::ProposalCount` (r:1 w:1) - /// Proof: `FellowshipTreasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `FellowshipTreasury::Proposals` (r:0 w:1) - /// Proof: `FellowshipTreasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - fn propose_spend() -> Weight { - // Proof Size summary in bytes: - // Measured: `143` - // Estimated: `1489` - // Minimum execution time: 264_000_000 picoseconds. - Weight::from_parts(277_000_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `FellowshipTreasury::Proposals` (r:1 w:1) - /// Proof: `FellowshipTreasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn reject_proposal() -> Weight { - // Proof Size summary in bytes: - // Measured: `301` - // Estimated: `3593` - // Minimum execution time: 289_000_000 picoseconds. - Weight::from_parts(312_000_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// The range of component `p` is `[0, 99]`. - fn approve_proposal(_p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `FellowshipTreasury::Approvals` (r:1 w:1) /// Proof: `FellowshipTreasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn remove_approval() -> Weight { diff --git a/polkadot/runtime/common/src/impls.rs b/polkadot/runtime/common/src/impls.rs index c913b90b1538..72ece79f1940 100644 --- a/polkadot/runtime/common/src/impls.rs +++ b/polkadot/runtime/common/src/impls.rs @@ -332,9 +332,6 @@ mod tests { type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; type OnSlash = (); - type ProposalBond = (); - type ProposalBondMinimum = (); - type ProposalBondMaximum = (); type SpendPeriod = (); type Burn = (); type BurnDestination = (); diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index ebdcdd0cbed7..abbdca013aed 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -476,9 +476,6 @@ parameter_types! { } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); - pub const ProposalBondMinimum: Balance = 2000 * CENTS; - pub const ProposalBondMaximum: Balance = 1 * GRAND; pub const SpendPeriod: BlockNumber = 6 * DAYS; pub const Burn: Permill = Permill::from_perthousand(2); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); @@ -505,9 +502,6 @@ impl pallet_treasury::Config for Runtime { type RejectOrigin = EitherOfDiverse, Treasurer>; type RuntimeEvent = RuntimeEvent; type OnSlash = Treasury; - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ProposalBondMinimum; - type ProposalBondMaximum = ProposalBondMaximum; type SpendPeriod = SpendPeriod; type Burn = Burn; type BurnDestination = Society; diff --git a/polkadot/runtime/rococo/src/weights/pallet_treasury.rs b/polkadot/runtime/rococo/src/weights/pallet_treasury.rs index 144e9d5b8723..06246ada72f1 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_treasury.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_treasury.rs @@ -63,51 +63,6 @@ impl pallet_treasury::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - fn propose_spend() -> Weight { - // Proof Size summary in bytes: - // Measured: `143` - // Estimated: `1489` - // Minimum execution time: 354_000_000 picoseconds. - Weight::from_parts(376_000_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Treasury Proposals (r:1 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - fn reject_proposal() -> Weight { - // Proof Size summary in bytes: - // Measured: `301` - // Estimated: `3593` - // Minimum execution time: 547_000_000 picoseconds. - Weight::from_parts(550_000_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Treasury Proposals (r:1 w:0) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// The range of component `p` is `[0, 99]`. - fn approve_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `470 + p * (8 ±0)` - // Estimated: `3573` - // Minimum execution time: 104_000_000 picoseconds. - Weight::from_parts(121_184_402, 0) - .saturating_add(Weight::from_parts(0, 3573)) - // Standard Error: 42_854 - .saturating_add(Weight::from_parts(153_112, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } /// Storage: Treasury Approvals (r:1 w:1) /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn remove_approval() -> Weight { diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index c8b1826b4767..789e023730b4 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -661,9 +661,6 @@ impl pallet_fast_unstake::Config for Runtime { } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); - pub const ProposalBondMinimum: Balance = 2000 * CENTS; - pub const ProposalBondMaximum: Balance = 1 * GRAND; pub const SpendPeriod: BlockNumber = 6 * DAYS; pub const Burn: Permill = Permill::from_perthousand(2); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); @@ -690,9 +687,6 @@ impl pallet_treasury::Config for Runtime { type RejectOrigin = EitherOfDiverse, Treasurer>; type RuntimeEvent = RuntimeEvent; type OnSlash = Treasury; - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ProposalBondMinimum; - type ProposalBondMaximum = ProposalBondMaximum; type SpendPeriod = SpendPeriod; type Burn = Burn; type BurnDestination = (); diff --git a/polkadot/runtime/westend/src/weights/pallet_treasury.rs b/polkadot/runtime/westend/src/weights/pallet_treasury.rs index 144e9d5b8723..06246ada72f1 100644 --- a/polkadot/runtime/westend/src/weights/pallet_treasury.rs +++ b/polkadot/runtime/westend/src/weights/pallet_treasury.rs @@ -63,51 +63,6 @@ impl pallet_treasury::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - fn propose_spend() -> Weight { - // Proof Size summary in bytes: - // Measured: `143` - // Estimated: `1489` - // Minimum execution time: 354_000_000 picoseconds. - Weight::from_parts(376_000_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Treasury Proposals (r:1 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - fn reject_proposal() -> Weight { - // Proof Size summary in bytes: - // Measured: `301` - // Estimated: `3593` - // Minimum execution time: 547_000_000 picoseconds. - Weight::from_parts(550_000_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Treasury Proposals (r:1 w:0) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// The range of component `p` is `[0, 99]`. - fn approve_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `470 + p * (8 ±0)` - // Estimated: `3573` - // Minimum execution time: 104_000_000 picoseconds. - Weight::from_parts(121_184_402, 0) - .saturating_add(Weight::from_parts(0, 3573)) - // Standard Error: 42_854 - .saturating_add(Weight::from_parts(153_112, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } /// Storage: Treasury Approvals (r:1 w:1) /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn remove_approval() -> Weight { diff --git a/prdoc/pr_3820.prdoc b/prdoc/pr_3820.prdoc new file mode 100644 index 000000000000..33e8129df92a --- /dev/null +++ b/prdoc/pr_3820.prdoc @@ -0,0 +1,32 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove deprecated calls from treasury pallet + +doc: + - audience: Runtime User + description: | + This PR remove deprecated calls, relevant tests from `pallet-treasury`. + - Remove deprecated calls `propose_spend`, `reject_proposal`, `approve_proposal`. + - Replace the code flow of `propose_spend` then `approve_proposal` with `spend_local` + - Remove deprecated calls' related weight functions and test cases. + - Remove deprecated parameter types: ProposalBond, ProposalBondMaximum, ProposalBondMinimum + - Remove pallet treasury's relevant deprecated code in pallet-tips, pallet-bounties and pallet-child-bounties + +crates: + - name: pallet-treasury + bump: major + - name: pallet-tips + bump: patch + - name: pallet-child-bounties + bump: patch + - name: pallet-bounties + bump: patch + - name: polkadot-runtime-common + bump: patch + - name: rococo-runtime + bump: patch + - name: westend-runtime + bump: patch + - name: collectives-westend-runtime + bump: patch diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 2bddb3a1adef..d5db82cb1fb5 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -1212,8 +1212,6 @@ impl pallet_membership::Config for Runtime { } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); - pub const ProposalBondMinimum: Balance = 1 * DOLLARS; pub const SpendPeriod: BlockNumber = 1 * DAYS; pub const Burn: Permill = Permill::from_percent(50); pub const TipCountdown: BlockNumber = 1 * DAYS; @@ -1240,9 +1238,6 @@ impl pallet_treasury::Config for Runtime { >; type RuntimeEvent = RuntimeEvent; type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ProposalBondMinimum; - type ProposalBondMaximum = (); type SpendPeriod = SpendPeriod; type Burn = Burn; type BurnDestination = (); diff --git a/substrate/frame/bounties/src/tests.rs b/substrate/frame/bounties/src/tests.rs index 212f0bd29590..205765e9286e 100644 --- a/substrate/frame/bounties/src/tests.rs +++ b/substrate/frame/bounties/src/tests.rs @@ -71,7 +71,6 @@ impl pallet_balances::Config for Test { type AccountStore = System; } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); pub static Burn: Permill = Permill::from_percent(50); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); pub const TreasuryPalletId2: PalletId = PalletId(*b"py/trsr2"); @@ -88,9 +87,6 @@ impl pallet_treasury::Config for Test { type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ConstU64<1>; - type ProposalBondMaximum = (); type SpendPeriod = ConstU64<2>; type Burn = Burn; type BurnDestination = (); // Just gets burned. @@ -115,9 +111,6 @@ impl pallet_treasury::Config for Test { type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ConstU64<1>; - type ProposalBondMaximum = (); type SpendPeriod = ConstU64<2>; type Burn = Burn; type BurnDestination = (); // Just gets burned. @@ -216,56 +209,12 @@ fn minting_works() { }); } -#[test] -fn spend_proposal_takes_min_deposit() { - new_test_ext().execute_with(|| { - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) - }); - assert_eq!(Balances::free_balance(0), 99); - assert_eq!(Balances::reserved_balance(0), 1); - }); -} - -#[test] -fn spend_proposal_takes_proportional_deposit() { - new_test_ext().execute_with(|| { - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_eq!(Balances::free_balance(0), 95); - assert_eq!(Balances::reserved_balance(0), 5); - }); -} - -#[test] -fn spend_proposal_fails_when_proposer_poor() { - new_test_ext().execute_with(|| { - assert_noop!( - { - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(2), 100, 3) - }, - TreasuryError::InsufficientProposersBalance, - ); - }); -} - #[test] fn accepted_spend_proposal_ignored_outside_spend_period() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 100, 3) }); >::on_initialize(1); assert_eq!(Balances::free_balance(3), 0); @@ -286,112 +235,13 @@ fn unused_pot_should_diminish() { }); } -#[test] -fn rejected_spend_proposal_ignored_on_spend_period() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }); - - >::on_initialize(2); - assert_eq!(Balances::free_balance(3), 0); - assert_eq!(Treasury::pot(), 50); - }); -} - -#[test] -fn reject_already_rejected_spend_proposal_fails() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }); - assert_noop!( - { - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }, - TreasuryError::InvalidIndex - ); - }); -} - -#[test] -fn reject_non_existent_spend_proposal_fails() { - new_test_ext().execute_with(|| { - assert_noop!( - { - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }, - pallet_treasury::Error::::InvalidIndex - ); - }); -} - -#[test] -fn accept_non_existent_spend_proposal_fails() { - new_test_ext().execute_with(|| { - assert_noop!( - { - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }, - TreasuryError::InvalidIndex - ); - }); -} - -#[test] -fn accept_already_rejected_spend_proposal_fails() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }); - assert_noop!( - { - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }, - TreasuryError::InvalidIndex - ); - }); -} - #[test] fn accepted_spend_proposal_enacted_on_spend_period() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 100, 3) }); >::on_initialize(2); assert_eq!(Balances::free_balance(3), 100); @@ -405,14 +255,7 @@ fn pot_underflow_should_not_diminish() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 150, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 150, 3) }); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed @@ -433,26 +276,12 @@ fn treasury_account_doesnt_get_deleted() { assert_eq!(Treasury::pot(), 100); let treasury_balance = Balances::free_balance(&Treasury::account_id()); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), treasury_balance, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), treasury_balance, 3) }); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), Treasury::pot(), 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 1) - }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), Treasury::pot(), 3) }); >::on_initialize(4); assert_eq!(Treasury::pot(), 0); // Pot is emptied @@ -475,22 +304,8 @@ fn inexistent_account_works() { assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist assert_eq!(Treasury::pot(), 0); // Pot is empty - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 99, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 1) - }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 99, 3) }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 1, 3) }); >::on_initialize(2); assert_eq!(Treasury::pot(), 0); // Pot hasn't changed assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed diff --git a/substrate/frame/child-bounties/src/tests.rs b/substrate/frame/child-bounties/src/tests.rs index 38e86c528e5c..be6fd62bb4c5 100644 --- a/substrate/frame/child-bounties/src/tests.rs +++ b/substrate/frame/child-bounties/src/tests.rs @@ -74,7 +74,6 @@ impl pallet_balances::Config for Test { type AccountStore = System; } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); pub const Burn: Permill = Permill::from_percent(50); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); pub TreasuryAccount: u128 = Treasury::account_id(); @@ -88,9 +87,6 @@ impl pallet_treasury::Config for Test { type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ConstU64<1>; - type ProposalBondMaximum = (); type SpendPeriod = ConstU64<2>; type Burn = Burn; type BurnDestination = (); diff --git a/substrate/frame/tips/src/tests.rs b/substrate/frame/tips/src/tests.rs index 32a31b7fa13a..ad987cc6cd66 100644 --- a/substrate/frame/tips/src/tests.rs +++ b/substrate/frame/tips/src/tests.rs @@ -94,7 +94,6 @@ impl ContainsLengthBound for TenToFourteen { } } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); pub const Burn: Permill = Permill::from_percent(50); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); pub const TreasuryPalletId2: PalletId = PalletId(*b"py/trsr2"); @@ -109,9 +108,6 @@ impl pallet_treasury::Config for Test { type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ConstU64<1>; - type ProposalBondMaximum = (); type SpendPeriod = ConstU64<2>; type Burn = Burn; type BurnDestination = (); // Just gets burned. @@ -136,9 +132,6 @@ impl pallet_treasury::Config for Test { type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ConstU64<1>; - type ProposalBondMaximum = (); type SpendPeriod = ConstU64<2>; type Burn = Burn; type BurnDestination = (); // Just gets burned. diff --git a/substrate/frame/treasury/README.md b/substrate/frame/treasury/README.md index 4945d79d1429..2bd58a9817aa 100644 --- a/substrate/frame/treasury/README.md +++ b/substrate/frame/treasury/README.md @@ -26,6 +26,14 @@ and use the funds to pay developers. ### Dispatchable Functions General spending/proposal protocol: -- `propose_spend` - Make a spending proposal and stake the required deposit. -- `reject_proposal` - Reject a proposal, slashing the deposit. -- `approve_proposal` - Accept the proposal, returning the deposit. +- `spend_local` - Propose and approve a spend of treasury funds, enables the + creation of spends using the native currency of the chain, utilizing the funds + stored in the pot +- `spend` - Propose and approve a spend of treasury funds, allows spending any + asset kind managed by the treasury +- `remove_approval` - Force a previously approved proposal to be removed from + the approval queue +- `payout` - Claim a spend +- `check_status` - Check the status of the spend and remove it from the storage + if processed +- `void_spend` - Void previously approved spend diff --git a/substrate/frame/treasury/src/benchmarking.rs b/substrate/frame/treasury/src/benchmarking.rs index 0b9999e37fbe..63978c94e682 100644 --- a/substrate/frame/treasury/src/benchmarking.rs +++ b/substrate/frame/treasury/src/benchmarking.rs @@ -59,12 +59,12 @@ where const SEED: u32 = 0; -// Create the pre-requisite information needed to create a treasury `propose_spend`. +// Create the pre-requisite information needed to create a treasury `spend_local`. fn setup_proposal, I: 'static>( u: u32, ) -> (T::AccountId, BalanceOf, AccountIdLookupOf) { let caller = account("caller", u, SEED); - let value: BalanceOf = T::ProposalBondMinimum::get().saturating_mul(100u32.into()); + let value: BalanceOf = T::Currency::minimum_balance() * 100u32.into(); let _ = T::Currency::make_free_balance_be(&caller, value); let beneficiary = account("beneficiary", u, SEED); let beneficiary_lookup = T::Lookup::unlookup(beneficiary); @@ -73,12 +73,10 @@ fn setup_proposal, I: 'static>( // Create proposals that are approved for use in `on_initialize`. fn create_approved_proposals, I: 'static>(n: u32) -> Result<(), &'static str> { + let origin = T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; for i in 0..n { - let (caller, value, lookup) = setup_proposal::(i); - #[allow(deprecated)] - Treasury::::propose_spend(RawOrigin::Signed(caller).into(), value, lookup)?; - let proposal_id = >::get() - 1; - Approvals::::try_append(proposal_id).unwrap(); + let (_, value, lookup) = setup_proposal::(i); + Treasury::::spend_local(origin.clone(), value, lookup)?; } ensure!(>::get().len() == n as usize, "Not all approved"); Ok(()) @@ -126,71 +124,13 @@ mod benchmarks { Ok(()) } - #[benchmark] - fn propose_spend() -> Result<(), BenchmarkError> { - let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - - #[extrinsic_call] - _(RawOrigin::Signed(caller), value, beneficiary_lookup); - - Ok(()) - } - - #[benchmark] - fn reject_proposal() -> Result<(), BenchmarkError> { - let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); - #[allow(deprecated)] - Treasury::::propose_spend( - RawOrigin::Signed(caller).into(), - value, - beneficiary_lookup, - )?; - let proposal_id = Treasury::::proposal_count() - 1; - let reject_origin = - T::RejectOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - - #[extrinsic_call] - _(reject_origin as T::RuntimeOrigin, proposal_id); - - Ok(()) - } - - #[benchmark] - fn approve_proposal( - p: Linear<0, { T::MaxApprovals::get() - 1 }>, - ) -> Result<(), BenchmarkError> { - let approve_origin = - T::ApproveOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - create_approved_proposals::(p)?; - let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); - #[allow(deprecated)] - Treasury::::propose_spend( - RawOrigin::Signed(caller).into(), - value, - beneficiary_lookup, - )?; - let proposal_id = Treasury::::proposal_count() - 1; - - #[extrinsic_call] - _(approve_origin as T::RuntimeOrigin, proposal_id); - - Ok(()) - } - #[benchmark] fn remove_approval() -> Result<(), BenchmarkError> { - let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); - #[allow(deprecated)] - Treasury::::propose_spend( - RawOrigin::Signed(caller).into(), - value, - beneficiary_lookup, - )?; + let origin = + T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + let (_, value, beneficiary_lookup) = setup_proposal::(SEED); + Treasury::::spend_local(origin, value, beneficiary_lookup)?; let proposal_id = Treasury::::proposal_count() - 1; - Approvals::::try_append(proposal_id).unwrap(); let reject_origin = T::RejectOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; diff --git a/substrate/frame/treasury/src/lib.rs b/substrate/frame/treasury/src/lib.rs index 1ccd84566432..4677a0e0335c 100644 --- a/substrate/frame/treasury/src/lib.rs +++ b/substrate/frame/treasury/src/lib.rs @@ -218,19 +218,6 @@ pub mod pallet { /// Handler for the unbalanced decrease when slashing for a rejected proposal or bounty. type OnSlash: OnUnbalanced>; - /// Fraction of a proposal's value that should be bonded in order to place the proposal. - /// An accepted proposal gets these back. A rejected proposal does not. - #[pallet::constant] - type ProposalBond: Get; - - /// Minimum amount of funds that should be placed in a deposit for making a proposal. - #[pallet::constant] - type ProposalBondMinimum: Get>; - - /// Maximum amount of funds that should be placed in a deposit for making a proposal. - #[pallet::constant] - type ProposalBondMaximum: Get>>; - /// Period between successive spends. #[pallet::constant] type SpendPeriod: Get>; @@ -363,14 +350,10 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { - /// New proposal. - Proposed { proposal_index: ProposalIndex }, /// We have ended a spend period and will now allocate funds. Spending { budget_remaining: BalanceOf }, /// Some funds have been allocated. Awarded { proposal_index: ProposalIndex, award: BalanceOf, account: T::AccountId }, - /// A proposal was rejected; funds were slashed. - Rejected { proposal_index: ProposalIndex, slashed: BalanceOf }, /// Some of our funds have been burnt. Burnt { burnt_funds: BalanceOf }, /// Spending has finished; this is the amount that rolls over until next spend. @@ -408,8 +391,6 @@ pub mod pallet { /// Error for the treasury pallet. #[pallet::error] pub enum Error { - /// Proposer's balance is too low. - InsufficientProposersBalance, /// No proposal, bounty or spend at that index. InvalidIndex, /// Too many approvals in the queue. @@ -476,123 +457,6 @@ pub mod pallet { #[pallet::call] impl, I: 'static> Pallet { - /// Put forward a suggestion for spending. - /// - /// ## Dispatch Origin - /// - /// Must be signed. - /// - /// ## Details - /// A deposit proportional to the value is reserved and slashed if the proposal is rejected. - /// It is returned once the proposal is awarded. - /// - /// ### Complexity - /// - O(1) - /// - /// ## Events - /// - /// Emits [`Event::Proposed`] if successful. - #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::propose_spend())] - #[allow(deprecated)] - #[deprecated( - note = "`propose_spend` will be removed in February 2024. Use `spend` instead." - )] - pub fn propose_spend( - origin: OriginFor, - #[pallet::compact] value: BalanceOf, - beneficiary: AccountIdLookupOf, - ) -> DispatchResult { - let proposer = ensure_signed(origin)?; - let beneficiary = T::Lookup::lookup(beneficiary)?; - - let bond = Self::calculate_bond(value); - T::Currency::reserve(&proposer, bond) - .map_err(|_| Error::::InsufficientProposersBalance)?; - - let c = Self::proposal_count(); - >::put(c + 1); - >::insert(c, Proposal { proposer, value, beneficiary, bond }); - - Self::deposit_event(Event::Proposed { proposal_index: c }); - Ok(()) - } - - /// Reject a proposed spend. - /// - /// ## Dispatch Origin - /// - /// Must be [`Config::RejectOrigin`]. - /// - /// ## Details - /// The original deposit will be slashed. - /// - /// ### Complexity - /// - O(1) - /// - /// ## Events - /// - /// Emits [`Event::Rejected`] if successful. - #[pallet::call_index(1)] - #[pallet::weight((T::WeightInfo::reject_proposal(), DispatchClass::Operational))] - #[allow(deprecated)] - #[deprecated( - note = "`reject_proposal` will be removed in February 2024. Use `spend` instead." - )] - pub fn reject_proposal( - origin: OriginFor, - #[pallet::compact] proposal_id: ProposalIndex, - ) -> DispatchResult { - T::RejectOrigin::ensure_origin(origin)?; - - let proposal = - >::take(&proposal_id).ok_or(Error::::InvalidIndex)?; - let value = proposal.bond; - let imbalance = T::Currency::slash_reserved(&proposal.proposer, value).0; - T::OnSlash::on_unbalanced(imbalance); - - Self::deposit_event(Event::::Rejected { - proposal_index: proposal_id, - slashed: value, - }); - Ok(()) - } - - /// Approve a proposal. - /// - /// ## Dispatch Origin - /// - /// Must be [`Config::ApproveOrigin`]. - /// - /// ## Details - /// - /// At a later time, the proposal will be allocated to the beneficiary and the original - /// deposit will be returned. - /// - /// ### Complexity - /// - O(1). - /// - /// ## Events - /// - /// No events are emitted from this dispatch. - #[pallet::call_index(2)] - #[pallet::weight((T::WeightInfo::approve_proposal(T::MaxApprovals::get()), DispatchClass::Operational))] - #[allow(deprecated)] - #[deprecated( - note = "`approve_proposal` will be removed in February 2024. Use `spend` instead." - )] - pub fn approve_proposal( - origin: OriginFor, - #[pallet::compact] proposal_id: ProposalIndex, - ) -> DispatchResult { - T::ApproveOrigin::ensure_origin(origin)?; - - ensure!(>::contains_key(proposal_id), Error::::InvalidIndex); - Approvals::::try_append(proposal_id) - .map_err(|_| Error::::TooManyApprovals)?; - Ok(()) - } - /// Propose and approve a spend of treasury funds. /// /// ## Dispatch Origin @@ -794,7 +658,7 @@ pub mod pallet { /// /// ## Dispatch Origin /// - /// Must be signed. + /// Must be signed /// /// ## Details /// @@ -934,15 +798,6 @@ impl, I: 'static> Pallet { T::PalletId::get().into_account_truncating() } - /// The needed bond for a proposal whose spend is `value`. - fn calculate_bond(value: BalanceOf) -> BalanceOf { - let mut r = T::ProposalBondMinimum::get().max(T::ProposalBond::get() * value); - if let Some(m) = T::ProposalBondMaximum::get() { - r = r.min(m); - } - r - } - /// Spend some money! returns number of approvals before spend. pub fn spend_funds() -> Weight { let mut total_weight = Weight::zero(); diff --git a/substrate/frame/treasury/src/tests.rs b/substrate/frame/treasury/src/tests.rs index e8b9270cd965..94f5e6b70942 100644 --- a/substrate/frame/treasury/src/tests.rs +++ b/substrate/frame/treasury/src/tests.rs @@ -126,7 +126,6 @@ impl Pay for TestPay { } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); pub const Burn: Permill = Permill::from_percent(50); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); pub TreasuryAccount: u128 = Treasury::account_id(); @@ -142,6 +141,7 @@ impl frame_support::traits::EnsureOrigin for TestSpendOrigin { frame_system::RawOrigin::Signed(11) => Ok(10), frame_system::RawOrigin::Signed(12) => Ok(20), frame_system::RawOrigin::Signed(13) => Ok(50), + frame_system::RawOrigin::Signed(14) => Ok(500), r => Err(RuntimeOrigin::from(r)), }) } @@ -168,9 +168,6 @@ impl Config for Test { type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ConstU64<1>; - type ProposalBondMaximum = (); type SpendPeriod = ConstU64<2>; type Burn = Burn; type BurnDestination = (); // Just gets burned. @@ -285,56 +282,12 @@ fn minting_works() { }); } -#[test] -fn spend_proposal_takes_min_deposit() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) - }); - assert_eq!(Balances::free_balance(0), 99); - assert_eq!(Balances::reserved_balance(0), 1); - }); -} - -#[test] -fn spend_proposal_takes_proportional_deposit() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_eq!(Balances::free_balance(0), 95); - assert_eq!(Balances::reserved_balance(0), 5); - }); -} - -#[test] -fn spend_proposal_fails_when_proposer_poor() { - ExtBuilder::default().build().execute_with(|| { - assert_noop!( - { - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(2), 100, 3) - }, - Error::::InsufficientProposersBalance, - ); - }); -} - #[test] fn accepted_spend_proposal_ignored_outside_spend_period() { ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3)); >::on_initialize(1); assert_eq!(Balances::free_balance(3), 0); @@ -355,112 +308,13 @@ fn unused_pot_should_diminish() { }); } -#[test] -fn rejected_spend_proposal_ignored_on_spend_period() { - ExtBuilder::default().build().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }); - - >::on_initialize(2); - assert_eq!(Balances::free_balance(3), 0); - assert_eq!(Treasury::pot(), 50); - }); -} - -#[test] -fn reject_already_rejected_spend_proposal_fails() { - ExtBuilder::default().build().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }); - assert_noop!( - { - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }, - Error::::InvalidIndex - ); - }); -} - -#[test] -fn reject_non_existent_spend_proposal_fails() { - ExtBuilder::default().build().execute_with(|| { - assert_noop!( - { - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }, - Error::::InvalidIndex - ); - }); -} - -#[test] -fn accept_non_existent_spend_proposal_fails() { - ExtBuilder::default().build().execute_with(|| { - assert_noop!( - { - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }, - Error::::InvalidIndex - ); - }); -} - -#[test] -fn accept_already_rejected_spend_proposal_fails() { - ExtBuilder::default().build().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }); - assert_noop!( - { - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }, - Error::::InvalidIndex - ); - }); -} - #[test] fn accepted_spend_proposal_enacted_on_spend_period() { ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3)); >::on_initialize(2); assert_eq!(Balances::free_balance(3), 100); @@ -474,14 +328,7 @@ fn pot_underflow_should_not_diminish() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 150, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 150, 3)); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed @@ -502,26 +349,12 @@ fn treasury_account_doesnt_get_deleted() { assert_eq!(Treasury::pot(), 100); let treasury_balance = Balances::free_balance(&Treasury::account_id()); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), treasury_balance, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), treasury_balance, 3)); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), Treasury::pot(), 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 1) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), Treasury::pot(), 3)); >::on_initialize(4); assert_eq!(Treasury::pot(), 0); // Pot is emptied @@ -544,22 +377,9 @@ fn inexistent_account_works() { assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist assert_eq!(Treasury::pot(), 0); // Pot is empty - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 99, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 1) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 99, 3)); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 1, 3)); + >::on_initialize(2); assert_eq!(Treasury::pot(), 0); // Pot hasn't changed assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed @@ -601,26 +421,12 @@ fn max_approvals_limited() { Balances::make_free_balance_be(&0, u64::MAX); for _ in 0..::MaxApprovals::get() { - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3)); } // One too many will fail - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); assert_noop!( - { - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }, + Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3), Error::::TooManyApprovals ); }); @@ -631,14 +437,8 @@ fn remove_already_removed_approval_fails() { ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3)); + assert_eq!(Treasury::approvals(), vec![0]); assert_ok!(Treasury::remove_approval(RuntimeOrigin::root(), 0)); assert_eq!(Treasury::approvals(), vec![]); @@ -972,11 +772,9 @@ fn check_status_works() { fn try_state_proposals_invariant_1_works() { ExtBuilder::default().build().execute_with(|| { use frame_support::pallet_prelude::DispatchError::Other; - // Add a proposal using `propose_spend` - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) - }); + // Add a proposal and approve using `spend_local` + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 1, 3)); + assert_eq!(Proposals::::iter().count(), 1); assert_eq!(ProposalCount::::get(), 1); // Check invariant 1 holds @@ -995,12 +793,11 @@ fn try_state_proposals_invariant_1_works() { fn try_state_proposals_invariant_2_works() { ExtBuilder::default().build().execute_with(|| { use frame_support::pallet_prelude::DispatchError::Other; - // Add a proposal using `propose_spend` - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) - }); + // Add a proposal and approve using `spend_local` + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 1, 3)); + assert_eq!(Proposals::::iter().count(), 1); + assert_eq!(Approvals::::get().len(), 1); let current_proposal_count = ProposalCount::::get(); assert_eq!(current_proposal_count, 1); // Check invariant 2 holds @@ -1025,17 +822,10 @@ fn try_state_proposals_invariant_2_works() { fn try_state_proposals_invariant_3_works() { ExtBuilder::default().build().execute_with(|| { use frame_support::pallet_prelude::DispatchError::Other; - // Add a proposal using `propose_spend` - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 10, 3) - }); + // Add a proposal and approve using `spend_local` + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 10, 3)); + assert_eq!(Proposals::::iter().count(), 1); - // Approve the proposal - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); assert_eq!(Approvals::::get().len(), 1); // Check invariant 3 holds assert!(Approvals::::get() diff --git a/substrate/frame/treasury/src/weights.rs b/substrate/frame/treasury/src/weights.rs index 82277e2d28f6..8c9c6eb1d0fb 100644 --- a/substrate/frame/treasury/src/weights.rs +++ b/substrate/frame/treasury/src/weights.rs @@ -52,9 +52,6 @@ use core::marker::PhantomData; /// Weight functions needed for `pallet_treasury`. pub trait WeightInfo { fn spend_local() -> Weight; - fn propose_spend() -> Weight; - fn reject_proposal() -> Weight; - fn approve_proposal(p: u32, ) -> Weight; fn remove_approval() -> Weight; fn on_initialize_proposals(p: u32, ) -> Weight; fn spend() -> Weight; @@ -81,50 +78,8 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: `Treasury::ProposalCount` (r:1 w:1) - /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Treasury::Proposals` (r:0 w:1) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - fn propose_spend() -> Weight { - // Proof Size summary in bytes: - // Measured: `177` - // Estimated: `1489` - // Minimum execution time: 24_704_000 picoseconds. - Weight::from_parts(25_484_000, 1489) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: `Treasury::Proposals` (r:1 w:1) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn reject_proposal() -> Weight { - // Proof Size summary in bytes: - // Measured: `335` - // Estimated: `3593` - // Minimum execution time: 26_632_000 picoseconds. - Weight::from_parts(27_325_000, 3593) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: `Treasury::Proposals` (r:1 w:0) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - /// Storage: `Treasury::Approvals` (r:1 w:1) - /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 99]`. - fn approve_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `504 + p * (8 ±0)` - // Estimated: `3573` - // Minimum execution time: 8_436_000 picoseconds. - Weight::from_parts(11_268_438, 3573) - // Standard Error: 1_039 - .saturating_add(Weight::from_parts(70_903, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Treasury::Approvals` (r:1 w:1) - /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: Treasury Approvals (r:1 w:1) + /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn remove_approval() -> Weight { // Proof Size summary in bytes: // Measured: `161` @@ -232,50 +187,8 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: `Treasury::ProposalCount` (r:1 w:1) - /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Treasury::Proposals` (r:0 w:1) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - fn propose_spend() -> Weight { - // Proof Size summary in bytes: - // Measured: `177` - // Estimated: `1489` - // Minimum execution time: 24_704_000 picoseconds. - Weight::from_parts(25_484_000, 1489) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: `Treasury::Proposals` (r:1 w:1) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn reject_proposal() -> Weight { - // Proof Size summary in bytes: - // Measured: `335` - // Estimated: `3593` - // Minimum execution time: 26_632_000 picoseconds. - Weight::from_parts(27_325_000, 3593) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: `Treasury::Proposals` (r:1 w:0) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - /// Storage: `Treasury::Approvals` (r:1 w:1) - /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 99]`. - fn approve_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `504 + p * (8 ±0)` - // Estimated: `3573` - // Minimum execution time: 8_436_000 picoseconds. - Weight::from_parts(11_268_438, 3573) - // Standard Error: 1_039 - .saturating_add(Weight::from_parts(70_903, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Treasury::Approvals` (r:1 w:1) - /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: Treasury Approvals (r:1 w:1) + /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn remove_approval() -> Weight { // Proof Size summary in bytes: // Measured: `161` From cc387132facc83214bd3e67f0ae724bf617f0292 Mon Sep 17 00:00:00 2001 From: Tin Chung <56880684+chungquantin@users.noreply.github.com> Date: Tue, 18 Jun 2024 19:30:13 +0800 Subject: [PATCH 42/52] Add set_partial_params dispatchable function (#3843) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # ISSUE - Link to issue: https://github.com/paritytech/polkadot-sdk/issues/3617 # Description > Any set parameter / update config call with multiple arguments should have each argument to be an Option field. Please put this to some best practice document. This allows new update config call does not need to duplicate the fields that does not need to update. It also makes concurrent votes of update call possible, otherwise there will be race condition. It also helps with review such proposal otherwise reviewers need to check the other fields should remain the same. - [ ] Concurrent call & race condition testing - [x] Each argument of the `ParamsType` is an `Option` field. Introduce through ```rust pub type PartialParamsOf = ParamsType>::Balance>, Option>, RANK_COUNT>; ``` # Outcome ```rust let params = ParamsType { active_salary: [None; 9], passive_salary: [None; 9], demotion_period: [None, Some(10), None, None, None, None, None, None, None], min_promotion_period: [None; 9], offboard_timeout: Some(1), }; CoreFellowship::set_partial_params(signed(2), Box::new(params.clone())), ``` Test coverage ```diff running 21 tests test tests::unit::__construct_runtime_integrity_test::runtime_integrity_tests ... ok test tests::unit::basic_stuff ... ok test tests::integration::test_genesis_config_builds ... ok test tests::integration::__construct_runtime_integrity_test::runtime_integrity_tests ... ok test tests::unit::auto_demote_offboard_works ... ok test tests::unit::auto_demote_works ... ok test tests::unit::get_salary_works ... ok test tests::unit::active_changing_get_salary_works ... ok test tests::integration::swap_bad_noops ... ok test tests::unit::promote_postpones_auto_demote ... ok test tests::unit::infinite_demotion_period_works ... ok test tests::unit::proof_postpones_auto_demote ... ok test tests::unit::induct_works ... ok test tests::unit::set_params_works ... ok test tests::unit::test_genesis_config_builds ... ok test tests::unit::offboard_works ... ok test tests::unit::sync_works ... ok + test tests::unit::set_partial_params_works ... ok test tests::integration::swap_exhaustive_works ... ok test tests::unit::promote_works ... ok test tests::integration::swap_simple_works ... ok test result: ok. 21 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.01s Doc-tests pallet_core_fellowship running 0 tests test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s ``` polkadot address: 19nSqFQorfF2HxD3oBzWM3oCh4SaCRKWt1yvmgaPYGCo71J --------- Co-authored-by: Dónal Murray Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Bastian Köcher --- .../pallet_core_fellowship_ambassador_core.rs | 11 ++++ .../pallet_core_fellowship_fellowship_core.rs | 11 ++++ prdoc/pr_3843.prdoc | 17 ++++++ .../frame/core-fellowship/src/benchmarking.rs | 39 +++++++++++++ substrate/frame/core-fellowship/src/lib.rs | 55 +++++++++++++++++++ .../frame/core-fellowship/src/tests/unit.rs | 34 ++++++++++++ .../frame/core-fellowship/src/weights.rs | 21 +++++++ 7 files changed, 188 insertions(+) create mode 100644 prdoc/pr_3843.prdoc diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs index f40940a8b25f..dbe681f51bb2 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs @@ -58,6 +58,17 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `AmbassadorCore::Params` (r:0 w:1) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + fn set_partial_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_000_000 picoseconds. + Weight::from_parts(11_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } /// Storage: `AmbassadorCore::Member` (r:1 w:1) /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) /// Storage: `AmbassadorCollective::Members` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs index 471ee82ead72..7e6264c0c10d 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs @@ -57,6 +57,17 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `FellowshipCore::Params` (r:0 w:1) + /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + fn set_partial_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_000_000 picoseconds. + Weight::from_parts(12_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } /// Storage: `FellowshipCore::Member` (r:1 w:1) /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) /// Storage: `FellowshipCollective::Members` (r:1 w:1) diff --git a/prdoc/pr_3843.prdoc b/prdoc/pr_3843.prdoc new file mode 100644 index 000000000000..e01900dcc25b --- /dev/null +++ b/prdoc/pr_3843.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Introduce a new dispatchable function `set_partial_params` in `pallet-core-fellowship` + +doc: + - audience: Runtime Dev + description: | + This PR adds a new dispatchable function `set_partial_params` + to update config with multiple arguments without duplicating the + fields that does not need to update. + +crates: + - name: pallet-core-fellowship + bump: major + - name: collectives-westend-runtime + bump: patch diff --git a/substrate/frame/core-fellowship/src/benchmarking.rs b/substrate/frame/core-fellowship/src/benchmarking.rs index b3ee3ab7d165..2dabab3983d0 100644 --- a/substrate/frame/core-fellowship/src/benchmarking.rs +++ b/substrate/frame/core-fellowship/src/benchmarking.rs @@ -85,6 +85,45 @@ mod benchmarks { Ok(()) } + #[benchmark] + fn set_partial_params() -> Result<(), BenchmarkError> { + let max_rank = T::MaxRank::get().try_into().unwrap(); + + // Set up the initial default state for the Params storage + let params = ParamsType { + active_salary: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + passive_salary: BoundedVec::try_from(vec![10u32.into(); max_rank]).unwrap(), + demotion_period: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + min_promotion_period: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + offboard_timeout: 1u32.into(), + }; + CoreFellowship::::set_params(RawOrigin::Root.into(), Box::new(params))?; + + let default_params = Params::::get(); + let expected_params = ParamsType { + active_salary: default_params.active_salary, + passive_salary: BoundedVec::try_from(vec![10u32.into(); max_rank]).unwrap(), + demotion_period: default_params.demotion_period, + min_promotion_period: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + offboard_timeout: 1u32.into(), + }; + + let params_payload = ParamsType { + active_salary: BoundedVec::try_from(vec![None; max_rank]).unwrap(), + passive_salary: BoundedVec::try_from(vec![Some(10u32.into()); max_rank]).unwrap(), + demotion_period: BoundedVec::try_from(vec![None; max_rank]).unwrap(), + min_promotion_period: BoundedVec::try_from(vec![Some(100u32.into()); max_rank]) + .unwrap(), + offboard_timeout: None, + }; + + #[extrinsic_call] + _(RawOrigin::Root, Box::new(params_payload.clone())); + + assert_eq!(Params::::get(), expected_params); + Ok(()) + } + #[benchmark] fn bump_offboard() -> Result<(), BenchmarkError> { set_benchmark_params::()?; diff --git a/substrate/frame/core-fellowship/src/lib.rs b/substrate/frame/core-fellowship/src/lib.rs index 94339b85d052..6f0bb77714d9 100644 --- a/substrate/frame/core-fellowship/src/lib.rs +++ b/substrate/frame/core-fellowship/src/lib.rs @@ -222,6 +222,11 @@ pub mod pallet { pub type ParamsOf = ParamsType<>::Balance, BlockNumberFor, >::MaxRank>; + pub type PartialParamsOf = ParamsType< + Option<>::Balance>, + Option>, + >::MaxRank, + >; pub type MemberStatusOf = MemberStatus>; pub type RankOf = <>::Members as RankedMembers>::Rank; @@ -558,9 +563,59 @@ pub mod pallet { Ok(Pays::No.into()) } + + /// Set the parameters partially. + /// + /// - `origin`: An origin complying with `ParamsOrigin` or root. + /// - `partial_params`: The new parameters for the pallet. + /// + /// This update config with multiple arguments without duplicating + /// the fields that does not need to update (set to None). + #[pallet::weight(T::WeightInfo::set_partial_params())] + #[pallet::call_index(9)] + pub fn set_partial_params( + origin: OriginFor, + partial_params: Box>, + ) -> DispatchResult { + T::ParamsOrigin::ensure_origin_or_root(origin)?; + let params = Params::::mutate(|p| { + Self::set_partial_params_slice(&mut p.active_salary, partial_params.active_salary); + Self::set_partial_params_slice( + &mut p.passive_salary, + partial_params.passive_salary, + ); + Self::set_partial_params_slice( + &mut p.demotion_period, + partial_params.demotion_period, + ); + Self::set_partial_params_slice( + &mut p.min_promotion_period, + partial_params.min_promotion_period, + ); + if let Some(new_offboard_timeout) = partial_params.offboard_timeout { + p.offboard_timeout = new_offboard_timeout; + } + p.clone() + }); + Self::deposit_event(Event::::ParamsChanged { params }); + Ok(()) + } } impl, I: 'static> Pallet { + /// Partially update the base slice with a new slice + /// + /// Only elements in the base slice which has a new value in the new slice will be updated. + pub(crate) fn set_partial_params_slice( + base_slice: &mut BoundedVec>::MaxRank>, + new_slice: BoundedVec, >::MaxRank>, + ) { + for (base_element, new_element) in base_slice.iter_mut().zip(new_slice) { + if let Some(element) = new_element { + *base_element = element; + } + } + } /// Convert a rank into a `0..RANK_COUNT` index suitable for the arrays in Params. /// /// Rank 1 becomes index 0, rank `RANK_COUNT` becomes index `RANK_COUNT - 1`. Any rank not diff --git a/substrate/frame/core-fellowship/src/tests/unit.rs b/substrate/frame/core-fellowship/src/tests/unit.rs index 9245e5159a90..5d6d59c5c891 100644 --- a/substrate/frame/core-fellowship/src/tests/unit.rs +++ b/substrate/frame/core-fellowship/src/tests/unit.rs @@ -187,6 +187,40 @@ fn set_params_works() { }); } +#[test] +fn set_partial_params_works() { + new_test_ext().execute_with(|| { + let params = ParamsType { + active_salary: bounded_vec![None; 9], + passive_salary: bounded_vec![None; 9], + demotion_period: bounded_vec![None, Some(10), None, None, None, None, None, None, None], + min_promotion_period: bounded_vec![None; 9], + offboard_timeout: Some(2), + }; + assert_noop!( + CoreFellowship::set_partial_params(signed(2), Box::new(params.clone())), + DispatchError::BadOrigin + ); + assert_ok!(CoreFellowship::set_partial_params(signed(1), Box::new(params))); + + // Update params from the base params value declared in `new_test_ext` + let raw_updated_params = ParamsType { + active_salary: bounded_vec![10, 20, 30, 40, 50, 60, 70, 80, 90], + passive_salary: bounded_vec![1, 2, 3, 4, 5, 6, 7, 8, 9], + demotion_period: bounded_vec![2, 10, 6, 8, 10, 12, 14, 16, 18], + min_promotion_period: bounded_vec![3, 6, 9, 12, 15, 18, 21, 24, 27], + offboard_timeout: 2, + }; + // Updated params stored in Params storage value + let updated_params = Params::::get(); + assert_eq!(raw_updated_params, updated_params); + + System::assert_last_event( + Event::::ParamsChanged { params: updated_params }.into(), + ); + }); +} + #[test] fn induct_works() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/core-fellowship/src/weights.rs b/substrate/frame/core-fellowship/src/weights.rs index 8fad6f585c11..c1042d0ddfaf 100644 --- a/substrate/frame/core-fellowship/src/weights.rs +++ b/substrate/frame/core-fellowship/src/weights.rs @@ -50,6 +50,7 @@ use core::marker::PhantomData; /// Weight functions needed for `pallet_core_fellowship`. pub trait WeightInfo { fn set_params() -> Weight; + fn set_partial_params() -> Weight; fn bump_offboard() -> Weight; fn bump_demote() -> Weight; fn set_active() -> Weight; @@ -74,6 +75,16 @@ impl WeightInfo for SubstrateWeight { Weight::from_parts(8_018_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: CoreFellowship Params (r:0 w:1) + /// Proof: CoreFellowship Params (max_values: Some(1), max_size: Some(364), added: 859, mode: MaxEncodedLen) + fn set_partial_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_454_000 picoseconds. + Weight::from_parts(9_804_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } /// Storage: `CoreFellowship::Member` (r:1 w:1) /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::Members` (r:1 w:1) @@ -245,6 +256,16 @@ impl WeightInfo for () { Weight::from_parts(8_018_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: CoreFellowship Params (r:0 w:1) + /// Proof: CoreFellowship Params (max_values: Some(1), max_size: Some(364), added: 859, mode: MaxEncodedLen) + fn set_partial_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_454_000 picoseconds. + Weight::from_parts(9_804_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } /// Storage: `CoreFellowship::Member` (r:1 w:1) /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::Members` (r:1 w:1) From 029a6562152fd83d4a4d26ec8e177b443c593872 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 18 Jun 2024 13:38:04 +0200 Subject: [PATCH 43/52] Unify `code_at` logic between `CallExecutor` & `Client` (#4618) This unifies the logic between `CallExecutor` and `Client` when it comes to fetching the `code` for a given block. The actual `code` depends on potential overrides/substitutes. Besides that it changes the logic in the lookahead collator on which `ValidationCodeHash` it sends to the validator alongside the `POV`. We are now sending the code hash as found on the relay chain. This is done as the local node could run with an override which is compatible to the validation code on the relay chain, but has a different hash. --- .../consensus/aura/src/collators/lookahead.rs | 12 +- .../consensus/aura/src/collators/mod.rs | 2 +- prdoc/pr_4618.prdoc | 20 + .../service/src/client/call_executor.rs | 222 +---------- substrate/client/service/src/client/client.rs | 23 +- .../service/src/client/code_provider.rs | 348 ++++++++++++++++++ substrate/client/service/src/client/mod.rs | 8 +- .../service/src/client/wasm_substitutes.rs | 6 +- substrate/test-utils/client/src/client_ext.rs | 2 +- 9 files changed, 407 insertions(+), 236 deletions(-) create mode 100644 prdoc/pr_4618.prdoc create mode 100644 substrate/client/service/src/client/code_provider.rs diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 09416233ea9b..b6f7b07f55d3 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -363,13 +363,11 @@ where Ok(x) => x, }; - let validation_code_hash = match params.code_hash_provider.code_hash_at(parent_hash) - { - None => { - tracing::error!(target: crate::LOG_TARGET, ?parent_hash, "Could not fetch validation code hash"); - break - }, - Some(v) => v, + let Some(validation_code_hash) = + params.code_hash_provider.code_hash_at(parent_hash) + else { + tracing::error!(target: crate::LOG_TARGET, ?parent_hash, "Could not fetch validation code hash"); + break }; super::check_validation_code_or_log( diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 6e0067d0cedb..0abc034c1ed6 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -64,7 +64,7 @@ async fn check_validation_code_or_log( ?relay_parent, ?local_validation_code_hash, relay_validation_code_hash = ?state, - "Parachain code doesn't match validation code stored in the relay chain state", + "Parachain code doesn't match validation code stored in the relay chain state.", ); }, None => { diff --git a/prdoc/pr_4618.prdoc b/prdoc/pr_4618.prdoc new file mode 100644 index 000000000000..3dd0fce81eee --- /dev/null +++ b/prdoc/pr_4618.prdoc @@ -0,0 +1,20 @@ +title: Unify logic for fetching the `:code` of a block + +doc: + - audience: Node Operator + description: | + Fixes an issue on parachains when running with a custom `substitute` of the on chain wasm code + and having replaced the wasm code on the relay chain. The relay chain was rejecting blocks + build this way, because the collator was reporting the actual on chain wasm code hash + to the relay chain. However, the relay chain was expecting the code hash of the wasm code substitute + that was also registered on the relay chain. + - audience: Node Dev + description: | + `Client::code_at` will now use the same `substitute` to determine the code for a given block as it is + done when executing any runtime call. + +crates: + - name: cumulus-client-consensus-aura + bump: minor + - name: sc-service + bump: minor diff --git a/substrate/client/service/src/client/call_executor.rs b/substrate/client/service/src/client/call_executor.rs index 9da4d2192576..1341aa0e7205 100644 --- a/substrate/client/service/src/client/call_executor.rs +++ b/substrate/client/service/src/client/call_executor.rs @@ -16,19 +16,19 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use super::{client::ClientConfig, wasm_override::WasmOverride, wasm_substitutes::WasmSubstitutes}; +use super::{code_provider::CodeProvider, ClientConfig}; use sc_client_api::{ backend, call_executor::CallExecutor, execution_extensions::ExecutionExtensions, HeaderBackend, }; use sc_executor::{RuntimeVersion, RuntimeVersionOf}; use sp_api::ProofRecorder; -use sp_core::traits::{CallContext, CodeExecutor, RuntimeCode}; +use sp_core::traits::{CallContext, CodeExecutor}; use sp_externalities::Extensions; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, HashingFor}, }; -use sp_state_machine::{backend::AsTrieBackend, Ext, OverlayedChanges, StateMachine, StorageProof}; +use sp_state_machine::{backend::AsTrieBackend, OverlayedChanges, StateMachine, StorageProof}; use std::{cell::RefCell, sync::Arc}; /// Call executor that executes methods locally, querying all required @@ -36,8 +36,7 @@ use std::{cell::RefCell, sync::Arc}; pub struct LocalCallExecutor { backend: Arc, executor: E, - wasm_override: Arc>, - wasm_substitutes: WasmSubstitutes, + code_provider: CodeProvider, execution_extensions: Arc>, } @@ -53,81 +52,15 @@ where client_config: ClientConfig, execution_extensions: ExecutionExtensions, ) -> sp_blockchain::Result { - let wasm_override = client_config - .wasm_runtime_overrides - .as_ref() - .map(|p| WasmOverride::new(p.clone(), &executor)) - .transpose()?; - - let wasm_substitutes = WasmSubstitutes::new( - client_config.wasm_runtime_substitutes, - executor.clone(), - backend.clone(), - )?; + let code_provider = CodeProvider::new(&client_config, executor.clone(), backend.clone())?; Ok(LocalCallExecutor { backend, executor, - wasm_override: Arc::new(wasm_override), - wasm_substitutes, + code_provider, execution_extensions: Arc::new(execution_extensions), }) } - - /// Check if local runtime code overrides are enabled and one is available - /// for the given `BlockId`. If yes, return it; otherwise return the same - /// `RuntimeCode` instance that was passed. - fn check_override<'a>( - &'a self, - onchain_code: RuntimeCode<'a>, - state: &B::State, - hash: Block::Hash, - ) -> sp_blockchain::Result<(RuntimeCode<'a>, RuntimeVersion)> - where - Block: BlockT, - B: backend::Backend, - { - let on_chain_version = self.on_chain_runtime_version(&onchain_code, state)?; - let code_and_version = if let Some(d) = self.wasm_override.as_ref().as_ref().and_then(|o| { - o.get( - &on_chain_version.spec_version, - onchain_code.heap_pages, - &on_chain_version.spec_name, - ) - }) { - log::debug!(target: "wasm_overrides", "using WASM override for block {}", hash); - d - } else if let Some(s) = - self.wasm_substitutes - .get(on_chain_version.spec_version, onchain_code.heap_pages, hash) - { - log::debug!(target: "wasm_substitutes", "Using WASM substitute for block {:?}", hash); - s - } else { - log::debug!( - target: "wasm_overrides", - "Neither WASM override nor substitute available for block {hash}, using onchain code", - ); - (onchain_code, on_chain_version) - }; - - Ok(code_and_version) - } - - /// Returns the on chain runtime version. - fn on_chain_runtime_version( - &self, - code: &RuntimeCode, - state: &B::State, - ) -> sp_blockchain::Result { - let mut overlay = OverlayedChanges::default(); - - let mut ext = Ext::new(&mut overlay, state, None); - - self.executor - .runtime_version(&mut ext, code) - .map_err(|e| sp_blockchain::Error::VersionInvalid(e.to_string())) - } } impl Clone for LocalCallExecutor @@ -138,8 +71,7 @@ where LocalCallExecutor { backend: self.backend.clone(), executor: self.executor.clone(), - wasm_override: self.wasm_override.clone(), - wasm_substitutes: self.wasm_substitutes.clone(), + code_provider: self.code_provider.clone(), execution_extensions: self.execution_extensions.clone(), } } @@ -175,7 +107,7 @@ where let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; + let runtime_code = self.code_provider.maybe_override_code(runtime_code, &state, at_hash)?.0; let mut extensions = self.execution_extensions.extensions(at_hash, at_number); @@ -215,7 +147,7 @@ where let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; + let runtime_code = self.code_provider.maybe_override_code(runtime_code, &state, at_hash)?.0; let mut extensions = extensions.borrow_mut(); match recorder { @@ -263,7 +195,9 @@ where let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - self.check_override(runtime_code, &state, at_hash).map(|(_, v)| v) + self.code_provider + .maybe_override_code(runtime_code, &state, at_hash) + .map(|(_, v)| v) } fn prove_execution( @@ -281,7 +215,7 @@ where let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(trie_backend); let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; + let runtime_code = self.code_provider.maybe_override_code(runtime_code, &state, at_hash)?.0; sp_state_machine::prove_execution_on_trie_backend( trie_backend, @@ -331,133 +265,3 @@ where self.executor.native_version() } } - -#[cfg(test)] -mod tests { - use super::*; - use backend::Backend; - use sc_client_api::in_mem; - use sc_executor::WasmExecutor; - use sp_core::{ - testing::TaskExecutor, - traits::{FetchRuntimeCode, WrappedRuntimeCode}, - }; - use std::collections::HashMap; - use substrate_test_runtime_client::{runtime, GenesisInit}; - - #[test] - fn should_get_override_if_exists() { - let executor = WasmExecutor::default(); - - let overrides = crate::client::wasm_override::dummy_overrides(); - let onchain_code = WrappedRuntimeCode(substrate_test_runtime::wasm_binary_unwrap().into()); - let onchain_code = RuntimeCode { - code_fetcher: &onchain_code, - heap_pages: Some(128), - hash: vec![0, 0, 0, 0], - }; - - let backend = Arc::new(in_mem::Backend::::new()); - - // wasm_runtime_overrides is `None` here because we construct the - // LocalCallExecutor directly later on - let client_config = ClientConfig::default(); - - let genesis_block_builder = crate::GenesisBlockBuilder::new( - &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), - !client_config.no_genesis, - backend.clone(), - executor.clone(), - ) - .expect("Creates genesis block builder"); - - // client is used for the convenience of creating and inserting the genesis block. - let _client = - crate::client::new_with_backend::<_, _, runtime::Block, _, runtime::RuntimeApi>( - backend.clone(), - executor.clone(), - genesis_block_builder, - Box::new(TaskExecutor::new()), - None, - None, - client_config, - ) - .expect("Creates a client"); - - let call_executor = LocalCallExecutor { - backend: backend.clone(), - executor: executor.clone(), - wasm_override: Arc::new(Some(overrides)), - wasm_substitutes: WasmSubstitutes::new( - Default::default(), - executor.clone(), - backend.clone(), - ) - .unwrap(), - execution_extensions: Arc::new(ExecutionExtensions::new( - None, - Arc::new(executor.clone()), - )), - }; - - let check = call_executor - .check_override( - onchain_code, - &backend.state_at(backend.blockchain().info().genesis_hash).unwrap(), - backend.blockchain().info().genesis_hash, - ) - .expect("RuntimeCode override") - .0; - - assert_eq!(Some(vec![2, 2, 2, 2, 2, 2, 2, 2]), check.fetch_runtime_code().map(Into::into)); - } - - #[test] - fn returns_runtime_version_from_substitute() { - const SUBSTITUTE_SPEC_NAME: &str = "substitute-spec-name-cool"; - - let executor = WasmExecutor::default(); - - let backend = Arc::new(in_mem::Backend::::new()); - - // Let's only override the `spec_name` for our testing purposes. - let substitute = sp_version::embed::embed_runtime_version( - &substrate_test_runtime::WASM_BINARY_BLOATY.unwrap(), - sp_version::RuntimeVersion { - spec_name: SUBSTITUTE_SPEC_NAME.into(), - ..substrate_test_runtime::VERSION - }, - ) - .unwrap(); - - let client_config = crate::client::ClientConfig { - wasm_runtime_substitutes: vec![(0, substitute)].into_iter().collect::>(), - ..Default::default() - }; - - let genesis_block_builder = crate::GenesisBlockBuilder::new( - &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), - !client_config.no_genesis, - backend.clone(), - executor.clone(), - ) - .expect("Creates genesis block builder"); - - // client is used for the convenience of creating and inserting the genesis block. - let client = - crate::client::new_with_backend::<_, _, runtime::Block, _, runtime::RuntimeApi>( - backend.clone(), - executor.clone(), - genesis_block_builder, - Box::new(TaskExecutor::new()), - None, - None, - client_config, - ) - .expect("Creates a client"); - - let version = client.runtime_version_at(client.chain_info().genesis_hash).unwrap(); - - assert_eq!(SUBSTITUTE_SPEC_NAME, &*version.spec_name); - } -} diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 3c25c233775b..2fbcc3ba4f75 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -18,7 +18,10 @@ //! Substrate Client -use super::block_rules::{BlockRules, LookupResult as BlockLookupResult}; +use super::{ + block_rules::{BlockRules, LookupResult as BlockLookupResult}, + CodeProvider, +}; use crate::client::notification_pinning::NotificationPinningWorker; use log::{debug, info, trace, warn}; use parking_lot::{Mutex, RwLock}; @@ -57,10 +60,7 @@ use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_core::{ - storage::{ - well_known_keys, ChildInfo, ChildType, PrefixedStorageKey, StorageChild, StorageData, - StorageKey, - }, + storage::{ChildInfo, ChildType, PrefixedStorageKey, StorageChild, StorageData, StorageKey}, traits::{CallContext, SpawnNamed}, }; use sp_runtime::{ @@ -115,6 +115,7 @@ where config: ClientConfig, telemetry: Option, unpin_worker_sender: TracingUnboundedSender>, + code_provider: CodeProvider, _phantom: PhantomData, } @@ -410,6 +411,7 @@ where Block, BlockImportOperation = >::BlockImportOperation, >, + E: Clone, B: 'static, { let info = backend.blockchain().info(); @@ -438,6 +440,7 @@ where ); let unpin_worker = NotificationPinningWorker::new(rx, backend.clone()); spawn_handle.spawn("notification-pinning-worker", None, Box::pin(unpin_worker.run())); + let code_provider = CodeProvider::new(&config, executor.clone(), backend.clone())?; Ok(Client { backend, @@ -453,6 +456,7 @@ where config, telemetry, unpin_worker_sender, + code_provider, _phantom: Default::default(), }) } @@ -475,13 +479,10 @@ where } /// Get the code at a given block. + /// + /// This takes any potential substitutes into account, but ignores overrides. pub fn code_at(&self, hash: Block::Hash) -> sp_blockchain::Result> { - Ok(StorageProvider::storage(self, hash, &StorageKey(well_known_keys::CODE.to_vec()))? - .expect( - "None is returned if there's no value stored for the given key;\ - ':code' key is always defined; qed", - ) - .0) + self.code_provider.code_at_ignoring_overrides(hash) } /// Get the RuntimeVersion at a given block. diff --git a/substrate/client/service/src/client/code_provider.rs b/substrate/client/service/src/client/code_provider.rs new file mode 100644 index 000000000000..8ba7766ea65b --- /dev/null +++ b/substrate/client/service/src/client/code_provider.rs @@ -0,0 +1,348 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use super::{client::ClientConfig, wasm_override::WasmOverride, wasm_substitutes::WasmSubstitutes}; +use sc_client_api::backend; +use sc_executor::{RuntimeVersion, RuntimeVersionOf}; +use sp_core::traits::{FetchRuntimeCode, RuntimeCode}; +use sp_runtime::traits::Block as BlockT; +use sp_state_machine::{Ext, OverlayedChanges}; +use std::sync::Arc; + +/// Provider for fetching `:code` of a block. +/// +/// As a node can run with code overrides or substitutes, this will ensure that these are taken into +/// account before returning the actual `code` for a block. +pub struct CodeProvider { + backend: Arc, + executor: Arc, + wasm_override: Arc>, + wasm_substitutes: WasmSubstitutes, +} + +impl Clone for CodeProvider { + fn clone(&self) -> Self { + Self { + backend: self.backend.clone(), + executor: self.executor.clone(), + wasm_override: self.wasm_override.clone(), + wasm_substitutes: self.wasm_substitutes.clone(), + } + } +} + +impl CodeProvider +where + Block: BlockT, + Backend: backend::Backend, + Executor: RuntimeVersionOf, +{ + /// Create a new instance. + pub fn new( + client_config: &ClientConfig, + executor: Executor, + backend: Arc, + ) -> sp_blockchain::Result { + let wasm_override = client_config + .wasm_runtime_overrides + .as_ref() + .map(|p| WasmOverride::new(p.clone(), &executor)) + .transpose()?; + + let executor = Arc::new(executor); + + let wasm_substitutes = WasmSubstitutes::new( + client_config.wasm_runtime_substitutes.clone(), + executor.clone(), + backend.clone(), + )?; + + Ok(Self { backend, executor, wasm_override: Arc::new(wasm_override), wasm_substitutes }) + } + + /// Returns the `:code` for the given `block`. + /// + /// This takes into account potential overrides/substitutes. + pub fn code_at_ignoring_overrides(&self, block: Block::Hash) -> sp_blockchain::Result> { + let state = self.backend.state_at(block)?; + + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + let runtime_code = + state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; + + self.maybe_override_code_internal(runtime_code, &state, block, true) + .and_then(|r| { + r.0.fetch_runtime_code().map(Into::into).ok_or_else(|| { + sp_blockchain::Error::Backend("Could not find `:code` in backend.".into()) + }) + }) + } + + /// Maybe override the given `onchain_code`. + /// + /// This takes into account potential overrides/substitutes. + pub fn maybe_override_code<'a>( + &'a self, + onchain_code: RuntimeCode<'a>, + state: &Backend::State, + hash: Block::Hash, + ) -> sp_blockchain::Result<(RuntimeCode<'a>, RuntimeVersion)> { + self.maybe_override_code_internal(onchain_code, state, hash, false) + } + + /// Maybe override the given `onchain_code`. + /// + /// This takes into account potential overrides(depending on `ignore_overrides`)/substitutes. + fn maybe_override_code_internal<'a>( + &'a self, + onchain_code: RuntimeCode<'a>, + state: &Backend::State, + hash: Block::Hash, + ignore_overrides: bool, + ) -> sp_blockchain::Result<(RuntimeCode<'a>, RuntimeVersion)> { + let on_chain_version = self.on_chain_runtime_version(&onchain_code, state)?; + let code_and_version = if let Some(d) = self.wasm_override.as_ref().as_ref().and_then(|o| { + if ignore_overrides { + return None + } + + o.get( + &on_chain_version.spec_version, + onchain_code.heap_pages, + &on_chain_version.spec_name, + ) + }) { + tracing::debug!(target: "code-provider::overrides", block = ?hash, "using WASM override"); + d + } else if let Some(s) = + self.wasm_substitutes + .get(on_chain_version.spec_version, onchain_code.heap_pages, hash) + { + tracing::debug!(target: "code-provider::substitutes", block = ?hash, "Using WASM substitute"); + s + } else { + tracing::debug!( + target: "code-provider", + block = ?hash, + "Neither WASM override nor substitute available, using onchain code", + ); + (onchain_code, on_chain_version) + }; + + Ok(code_and_version) + } + + /// Returns the on chain runtime version. + fn on_chain_runtime_version( + &self, + code: &RuntimeCode, + state: &Backend::State, + ) -> sp_blockchain::Result { + let mut overlay = OverlayedChanges::default(); + + let mut ext = Ext::new(&mut overlay, state, None); + + self.executor + .runtime_version(&mut ext, code) + .map_err(|e| sp_blockchain::Error::VersionInvalid(e.to_string())) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use backend::Backend; + use sc_client_api::{in_mem, HeaderBackend}; + use sc_executor::WasmExecutor; + use sp_core::{ + testing::TaskExecutor, + traits::{FetchRuntimeCode, WrappedRuntimeCode}, + }; + use std::collections::HashMap; + use substrate_test_runtime_client::{runtime, GenesisInit}; + + #[test] + fn no_override_no_substitutes_work() { + let executor = WasmExecutor::default(); + + let code_fetcher = WrappedRuntimeCode(substrate_test_runtime::wasm_binary_unwrap().into()); + let onchain_code = RuntimeCode { + code_fetcher: &code_fetcher, + heap_pages: Some(128), + hash: vec![0, 0, 0, 0], + }; + + let backend = Arc::new(in_mem::Backend::::new()); + + // wasm_runtime_overrides is `None` here because we construct the + // LocalCallExecutor directly later on + let client_config = ClientConfig::default(); + + let genesis_block_builder = crate::GenesisBlockBuilder::new( + &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + !client_config.no_genesis, + backend.clone(), + executor.clone(), + ) + .expect("Creates genesis block builder"); + + // client is used for the convenience of creating and inserting the genesis block. + let _client = + crate::client::new_with_backend::<_, _, runtime::Block, _, runtime::RuntimeApi>( + backend.clone(), + executor.clone(), + genesis_block_builder, + Box::new(TaskExecutor::new()), + None, + None, + client_config.clone(), + ) + .expect("Creates a client"); + + let executor = Arc::new(executor); + + let code_provider = CodeProvider { + backend: backend.clone(), + executor: executor.clone(), + wasm_override: Arc::new(None), + wasm_substitutes: WasmSubstitutes::new(Default::default(), executor, backend.clone()) + .unwrap(), + }; + + let check = code_provider + .maybe_override_code( + onchain_code, + &backend.state_at(backend.blockchain().info().genesis_hash).unwrap(), + backend.blockchain().info().genesis_hash, + ) + .expect("RuntimeCode override") + .0; + + assert_eq!(code_fetcher.fetch_runtime_code(), check.fetch_runtime_code()); + } + + #[test] + fn should_get_override_if_exists() { + let executor = WasmExecutor::default(); + + let overrides = crate::client::wasm_override::dummy_overrides(); + let onchain_code = WrappedRuntimeCode(substrate_test_runtime::wasm_binary_unwrap().into()); + let onchain_code = RuntimeCode { + code_fetcher: &onchain_code, + heap_pages: Some(128), + hash: vec![0, 0, 0, 0], + }; + + let backend = Arc::new(in_mem::Backend::::new()); + + // wasm_runtime_overrides is `None` here because we construct the + // LocalCallExecutor directly later on + let client_config = ClientConfig::default(); + + let genesis_block_builder = crate::GenesisBlockBuilder::new( + &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + !client_config.no_genesis, + backend.clone(), + executor.clone(), + ) + .expect("Creates genesis block builder"); + + // client is used for the convenience of creating and inserting the genesis block. + let _client = + crate::client::new_with_backend::<_, _, runtime::Block, _, runtime::RuntimeApi>( + backend.clone(), + executor.clone(), + genesis_block_builder, + Box::new(TaskExecutor::new()), + None, + None, + client_config.clone(), + ) + .expect("Creates a client"); + + let executor = Arc::new(executor); + + let code_provider = CodeProvider { + backend: backend.clone(), + executor: executor.clone(), + wasm_override: Arc::new(Some(overrides)), + wasm_substitutes: WasmSubstitutes::new(Default::default(), executor, backend.clone()) + .unwrap(), + }; + + let check = code_provider + .maybe_override_code( + onchain_code, + &backend.state_at(backend.blockchain().info().genesis_hash).unwrap(), + backend.blockchain().info().genesis_hash, + ) + .expect("RuntimeCode override") + .0; + + assert_eq!(Some(vec![2, 2, 2, 2, 2, 2, 2, 2]), check.fetch_runtime_code().map(Into::into)); + } + + #[test] + fn returns_runtime_version_from_substitute() { + const SUBSTITUTE_SPEC_NAME: &str = "substitute-spec-name-cool"; + + let executor = WasmExecutor::default(); + + let backend = Arc::new(in_mem::Backend::::new()); + + // Let's only override the `spec_name` for our testing purposes. + let substitute = sp_version::embed::embed_runtime_version( + &substrate_test_runtime::WASM_BINARY_BLOATY.unwrap(), + sp_version::RuntimeVersion { + spec_name: SUBSTITUTE_SPEC_NAME.into(), + ..substrate_test_runtime::VERSION + }, + ) + .unwrap(); + + let client_config = crate::client::ClientConfig { + wasm_runtime_substitutes: vec![(0, substitute)].into_iter().collect::>(), + ..Default::default() + }; + + let genesis_block_builder = crate::GenesisBlockBuilder::new( + &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + !client_config.no_genesis, + backend.clone(), + executor.clone(), + ) + .expect("Creates genesis block builder"); + + // client is used for the convenience of creating and inserting the genesis block. + let client = + crate::client::new_with_backend::<_, _, runtime::Block, _, runtime::RuntimeApi>( + backend.clone(), + executor.clone(), + genesis_block_builder, + Box::new(TaskExecutor::new()), + None, + None, + client_config, + ) + .expect("Creates a client"); + + let version = client.runtime_version_at(client.chain_info().genesis_hash).unwrap(); + + assert_eq!(SUBSTITUTE_SPEC_NAME, &*version.spec_name); + } +} diff --git a/substrate/client/service/src/client/mod.rs b/substrate/client/service/src/client/mod.rs index 0703cc2b47d1..ec77a92f162f 100644 --- a/substrate/client/service/src/client/mod.rs +++ b/substrate/client/service/src/client/mod.rs @@ -47,14 +47,14 @@ mod block_rules; mod call_executor; mod client; +mod code_provider; mod notification_pinning; mod wasm_override; mod wasm_substitutes; -pub use self::{ - call_executor::LocalCallExecutor, - client::{Client, ClientConfig}, -}; +pub use call_executor::LocalCallExecutor; +pub use client::{Client, ClientConfig}; +pub(crate) use code_provider::CodeProvider; #[cfg(feature = "test-helpers")] pub use self::client::{new_in_mem, new_with_backend}; diff --git a/substrate/client/service/src/client/wasm_substitutes.rs b/substrate/client/service/src/client/wasm_substitutes.rs index 70db0ef20f5a..07ca6c960628 100644 --- a/substrate/client/service/src/client/wasm_substitutes.rs +++ b/substrate/client/service/src/client/wasm_substitutes.rs @@ -94,7 +94,7 @@ impl From for sp_blockchain::Error { pub struct WasmSubstitutes { /// spec_version -> WasmSubstitute substitutes: Arc>>, - executor: Executor, + executor: Arc, backend: Arc, } @@ -110,14 +110,14 @@ impl Clone for WasmSubstitutes WasmSubstitutes where - Executor: RuntimeVersionOf + Clone + 'static, + Executor: RuntimeVersionOf, Backend: backend::Backend, Block: BlockT, { /// Create a new instance. pub fn new( substitutes: HashMap, Vec>, - executor: Executor, + executor: Arc, backend: Arc, ) -> Result { let substitutes = substitutes diff --git a/substrate/test-utils/client/src/client_ext.rs b/substrate/test-utils/client/src/client_ext.rs index 73581a4f0efa..9dc4739eb795 100644 --- a/substrate/test-utils/client/src/client_ext.rs +++ b/substrate/test-utils/client/src/client_ext.rs @@ -153,7 +153,7 @@ where Self: BlockImport, RA: Send, B: Send + Sync, - E: Send, + E: Send + Sync, { async fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); From e1729592a22973a980ac455557d9c559d7b1a567 Mon Sep 17 00:00:00 2001 From: tianyeyouyou <150894831+tianyeyouyou@users.noreply.github.com> Date: Tue, 18 Jun 2024 19:44:31 +0800 Subject: [PATCH 44/52] chore: remove redundant words. (#4653) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit remove redundant words in comments. Co-authored-by: Bastian Köcher Co-authored-by: Oliver Tale-Yazdi --- substrate/primitives/storage/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/substrate/primitives/storage/src/lib.rs b/substrate/primitives/storage/src/lib.rs index 197994f57471..3b9afae4ca07 100644 --- a/substrate/primitives/storage/src/lib.rs +++ b/substrate/primitives/storage/src/lib.rs @@ -293,7 +293,7 @@ impl ChildInfo { } } - /// Return a the full location in the direct parent of + /// Return the full location in the direct parent of /// this trie. pub fn prefixed_storage_key(&self) -> PrefixedStorageKey { match self { @@ -302,7 +302,7 @@ impl ChildInfo { } } - /// Returns a the full location in the direct parent of + /// Returns the full location in the direct parent of /// this trie. pub fn into_prefixed_storage_key(self) -> PrefixedStorageKey { match self { From 6daa939bc7c3f26c693a876d5a4b7ea00c6b2d7f Mon Sep 17 00:00:00 2001 From: Javier Bullrich Date: Tue, 18 Jun 2024 15:12:03 +0200 Subject: [PATCH 45/52] Migrated commands to github actions (#4701) Migrated commands individually to work as GitHub actions with a [`workflow_dispatch`](https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#workflow_dispatch) event. This will not disable the command-bot yet, but it's the first step before disabling it. ### Commands migrated - [x] bench-all - [x] bench-overhead - [x] bench - [x] fmt - [x] update-ui Also created an action that will inform users about the new documentation when they comment `bot`. ### Created documentation Created a detailed documentation on how to use this action. Found the documentation [here](https://github.com/paritytech/polkadot-sdk/blob/bullrich/cmd-action/.github/commands-readme.md). --------- Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Co-authored-by: Przemek Rzad --- .github/actions/set-up-gh/action.yml | 36 ++++ .github/command-screnshot.png | Bin 0 -> 31548 bytes .github/commands-readme.md | 199 +++++++++++++++++++ .github/workflows/command-bench-all.yml | 97 +++++++++ .github/workflows/command-bench-overhead.yml | 75 +++++++ .github/workflows/command-bench.yml | 122 ++++++++++++ .github/workflows/command-fmt.yml | 54 +++++ .github/workflows/command-inform.yml | 15 ++ .github/workflows/command-update-ui.yml | 55 +++++ scripts/bench-all.sh | 16 ++ scripts/bench.sh | 117 +++++++++++ scripts/command-utils.sh | 80 ++++++++ scripts/lib/bench-all-cumulus.sh | 139 +++++++++++++ scripts/lib/bench-all-pallet.sh | 96 +++++++++ scripts/lib/bench-all-polkadot.sh | 88 ++++++++ scripts/lib/bench-all-substrate.sh | 148 ++++++++++++++ scripts/lib/bench-overhead.sh | 66 ++++++ scripts/lib/bench-pallet.sh | 178 +++++++++++++++++ scripts/sync.sh | 74 +++++++ 19 files changed, 1655 insertions(+) create mode 100644 .github/actions/set-up-gh/action.yml create mode 100644 .github/command-screnshot.png create mode 100644 .github/commands-readme.md create mode 100644 .github/workflows/command-bench-all.yml create mode 100644 .github/workflows/command-bench-overhead.yml create mode 100644 .github/workflows/command-bench.yml create mode 100644 .github/workflows/command-fmt.yml create mode 100644 .github/workflows/command-inform.yml create mode 100644 .github/workflows/command-update-ui.yml create mode 100755 scripts/bench-all.sh create mode 100755 scripts/bench.sh create mode 100644 scripts/command-utils.sh create mode 100755 scripts/lib/bench-all-cumulus.sh create mode 100644 scripts/lib/bench-all-pallet.sh create mode 100644 scripts/lib/bench-all-polkadot.sh create mode 100644 scripts/lib/bench-all-substrate.sh create mode 100644 scripts/lib/bench-overhead.sh create mode 100644 scripts/lib/bench-pallet.sh create mode 100755 scripts/sync.sh diff --git a/.github/actions/set-up-gh/action.yml b/.github/actions/set-up-gh/action.yml new file mode 100644 index 000000000000..fc16ce0b2633 --- /dev/null +++ b/.github/actions/set-up-gh/action.yml @@ -0,0 +1,36 @@ +name: 'install gh' +description: 'Install the gh cli in a debian based distro and switches to the PR branch.' +inputs: + pr-number: + description: "Number of the PR" + required: true + GH_TOKEN: + description: "GitHub token" + required: true +outputs: + branch: + description: 'Branch name for the PR' + value: ${{ steps.branch.outputs.branch }} +runs: + using: "composite" + steps: + - name: Instal gh cli + shell: bash + # Here it would get the script from previous step + run: | + (type -p wget >/dev/null || (apt update && apt-get install wget -y)) + mkdir -p -m 755 /etc/apt/keyrings + wget -qO- https://cli.github.com/packages/githubcli-archive-keyring.gpg | tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null + chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg + echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null + apt update + apt install gh -y + git config --global --add safe.directory '*' + - run: gh pr checkout ${{ inputs.pr-number }} + shell: bash + env: + GITHUB_TOKEN: ${{ inputs.GH_TOKEN }} + - name: Export branch name + shell: bash + run: echo "branch=$(git rev-parse --abbrev-ref HEAD)" >> "$GITHUB_OUTPUT" + id: branch diff --git a/.github/command-screnshot.png b/.github/command-screnshot.png new file mode 100644 index 0000000000000000000000000000000000000000..1451fabca8b975534778e8321facd261e3b803fb GIT binary patch literal 31548 zcmdSAg;!hK^ZyMLEf(A%K!M`!TA)~qyIU!xxVsfE?oL|V-QC^Y-Q8V&>FvFr_WKt+ z>sjk0D=W!4nX~uI-ZQUxCrD049Qif=YX}GkWJw881qcYJKnMuPFnCz-JD9iUJP;5_ zf+iv&a*`qu_k#}c+=a2Q}gPs?FF=h=3 zrZ{Si4Q83VCMeuUWbrkGdh9^#Cx{;oc;_KdIDvs>zY?i?K1F;MPL;!#-Bkg>b zp3-YY0g7WlS{Fi6h3An@7zqkcLtXd#*4WCGHALCBsg}i9NvhrcnflIFD`>G~aORbX z1JsAS7aqk{`F+E$eu-z207cU3w*X2p;X>FlLmdaLgz?WG^(lF6M#acNW4kONtqX-P z4@*XSUXN3}E0cN{UuhjtAk9r1zpu_iyxX1lYDQAc_|Hz9V|KJbT(! zcM1dP{k_g`2})i#MPbK>Yei6n89V2@ZF=l=Z6c6LtvmVMVU#K$hcK9Gy8|X~v}dC0 zZtUzk*I7HA#g|GlVmCZ^I5PkQuFz{!`~;+OuhOQPGMr^E;7kw z@^3R-a`4Z-5)kM9-FE}7Fh3y9bpzD8rXVv1ToE_P%i9zKZQtScdfzDAy&a3F5j}-w zY4>#6Y47(r+is;fc)585KBVfuqtxn?fa7PGgrg7g?u0OaXAb-lOt30Yi$jc3;!(=8 z@o~}6ig@J+K^B3?SKRl&SEl_)vYo6?R$k0YCYL00@cn#vU2D;gY}(^+R}uT%kb`yD zR!B#054Bpx%fr{|)hrh3D!76sFXpg98OfW@>)8uVXVhhqz7B&u>|5VM*BARN-~-bF4+SgUX-n8<HI>b_%^a60i5sj(wHJ{d=PVq48=4h*3M5XOkrmORUqKFHBZT`Z(QOD29^ogcohS+OJ32JiPCR`w z0SO>_SSMMZ1WlB8Ab^?$BQZRNbgP%=6RU#H;d>NPs>lloI2!`zz)N6Br0iS&{Pg); zw6A_)qKR)2kwrGf5Wc|jz|e~Zk5TO6SK=@Ear~ef3*NP_#Ble%HiS@Yvtw2qgfuiT zqkH|KZJZf_gMhY+ru1teimCs|9nEyml>v!0nM@B}?JKP|QYWb4j-1tsv#whnH@Ic5 znswE)04I{?P$6<$^h1O+XcT=qQXi&RIpJKO89Cvb6o1#e;L|XPH~Z*CLC02%lo%C( ztbxHn2SHbz7@a8Vh6aK`qT^I^(T$SwZy2aSO2Qf+=HK(k)C=#ExdRI#qg zp+5Hbg}Tyh9$f?Z2=fT?$Om70NIo>9TQxaDC2#tN!~3Hwp7%=%saXQ@O>#}b?k)Ee zS31bw{bOM@eF~U0GZZ!=x}`%sr0cie4!s@H8tO{oWgjthFpaD%tc0q%)#%dDtdud~ z&o;EP{gWNDfkGyP5BSTTF zsk^@0qg%b3HZ&aXCSg?C?V~Rxt#nv2pYbk}S_ok`z8G!B$mA$nr_$H@xyq%>io`1Q zZ|afiO>^$H`P>tHOKw@t!OkGRp!dFAAA>c5dE+oYWE*8~eI2NXJ1HtGiYmlbW7n|P zJW#hSL@Y8bI++_ab2HDLRa(%hpqOVduP~ok(5V(V(mG-}qB&xi%ec0<7Jj(8CL)I? z=MxJQQwp!$`?23@^VTZY-f1`Y0QXpJb9VRECbO$ICUQVLjp%NuZ;HCdq+nu6UL!+T z94quI@zwm?aN>^Eo|`$Fx!ve4cSf>Ui%pp$TCJH?-IkPNsFn8W{QCXW(314J%FgtL z#ai0F&Xz}X-cZPAhR#t4^D0ClET3dTkx#niK2A##4# z96FNS$W+d{I-6~rYQ%99e3tSEZ`G7zaEd!l)k#}UT}v$^%b=RztRSmU?Zqy}XzqXA zH=l4Nz)R!mdMzZQAmh%!&B=ZfA@L!G{%fikpY!}CN?B8_=puKQZGS$sdlXY&HQyUc zVI9S0j>kyL=y?gxNCCQ2>Rh^8CU)!nXOpA}(TVx&1!-NjCv|D)9LNG-owf{dfUMr%#pn$0zStczWv5S=N24ptXQ0;R1jNB-E6Lt#o31LH_tqJ zdD_UBMXl8;y$xZvpH})0>0`mu4EHMij7@UeN-FZ#M$DFHe!AjM_1iPv6nh`$}~mD_DCC`YTLmRRM@)+W_!)Ysce-zH^OVVlMm zJ(V-qE?r5z^xxBTX^}D3T^{( z!UAeYnE4f)m3DT=4JQq|dSJtFh))Db1;$)iHa0inB}L0DjlT`2 z@C@*{KBnB%k?-)2kkq#}jyjDU8HgmCF0`#^dkW4TPcK{bJt<5Wg%1o78S;c_pEcQ> ze{G#&o#C2c&6s@eV9u9#tS+bZW6CmbSYq>XL)l&8gOdF3ZamGtYzb>MX(fm$rsQN6<>V$LLGx2KmLt zoU9}$%{}ej@6u>Ub)jTvrM-EHpqB_2U`wHOR{{Y`o0NvqdT<0W8U^{I;{wkrm5MtoIJzY9-ZXAvUT=jDXC>GWiq62i5*eFJAwl>}9Y4cW^H)hmpcUkPsH>$z=K zAz+PLPjPdma*^%S^85PWoyekKjZpSJl!wI&%zdkyVW3%BF1VuLGE$TLEG-Q|1AYw; z0U2Nd0S$fy34Y*%AK>ES2h@M>Kn4DI_1|mAuwNfO9$UshKnOudioR2Ff;>!xb5nYc z(`EU($uScSvP>Qm6EZeEkVv#k7Jx|B^?kO%AS;k85Ev+dg-JKtRjeili6s|-GHxnN zZY61%g>Y!WmVRCPFRr!J7r)O7xAE|hlZpYX=kZa+FlFYIbS zF|RQYTEgp}1E_&O_;HSi%^F?|kjNA$x@8_M945hL@WHrut;MV=a#~YGW-p2*wx}wO zhC7Ep3PlWnTx1;a4 z8$C-1c}is!P>C@mrIl}3r^-~B{x=6gQ2OFPTw9b1(->NGQZdBwbs*8k21Td>;_r%n}#DC00V_ujR}FF7&%b|2Gm5LTT{68ie(+ zEQqR$wN~&rG%CW#p7Amm-*Y4E1@EU($e1ne!h8~^eqP~!$M$=0w5UjHw=t5E+>7eU zavCFxN=U%%RorvMNsIj7B_R+9m7yYR0#Jye_#dtZiZ}Vo-FGb9J)N+FW2hMc!6w-( zu(%GxbbwGTrF;d(S1U~-x~TeU%0-jZO1)uHIXHu_{!CSb5EllQ(`kg69Bomk+~mS} z3|*?gX5rW49qBG5#?Ai(w(&u@Lf$IpZYjEapKGQzi1E<6^ zs6VHDzq$vHH2d>B2NEhIOBmR*_{Z*Xhuq#k5&ShZaS)Y6Y{Zpm({2Dihd^%WH#Df} zuYn36l1QaOH5uKa5bgjrbRgt^hxaa-9vd+mNhN4`j8Yp_7_-~dI(NPhBjy`lrDX{v zgi#+1_1M*CJ<~syK|udF`*EeG@j#9IrG{TgSbpRk+d6m@3ZzT*XZ!ud{_{w%QGitF zkZIO=)CrVOi4eyw()PTsBZRou+?jvRl_(Wt#?_Cf5GFPRPugD-~K-ra;o~h*TMFITW zN)bVdKEWSovOY@W<50o+@$dU=f@WdUOQKY=1x(;4M*^BcjR*b=m=`n@29pM0eiY9D zB_*-!Z)3{dwbH3b5*SoZpiV3Wm9=>lz4^y+21 z|6Z^Xgx7M%tTRDr)}N18Wq~4y*+cJ?)&c!vVQddOJKTRe^d!0$tS>ej`G5vc0awi2 zvqJx`Qxm<$e1i~Yh~l^^WsiZ0&+VcmCX+5Y*U5;E-#=Si4gNmtVstVg-YB0 zJ}nVm&3pGmV=yKf?eDkcKeSGLGYM9zb>aTXn~eOIFO5Kt2P783ja~Q+O8jT?uZWm} zc7t9=yAs91|9im9&`xK(G=TMn!)1OxMHGMe026cw<>eVLNtF2S+cSvwa$%`hZ~|d) zdh7oe%iy!Tb17E82w`QyF@NPv`pX>eMzmf);C`L!{~29e@R!>QSW&WV~ zejOuEyC7~*w^WXy;gN=*#bC~WAS|XejEHwD?SVLkWc&FoH8pkpe7xfxoe#*BMznx z>DY2wN%M0zN7>h@d>h-9?hSKcs)&!u(u*LxI84NoI!;v#x|Q7z`IQZa8y zW%=*8a@f?!kbbX#qLkiVP|PgJQ41P|U#y0bs-RHiw)$f?XY1|x=_A0;1!)_#raKII zYGvOhm2C+>#L?~DU907mS+3}$wn#*hC1SJa2`ogCn}qYwY1dbV&$d2FA=PxCqoZfp z?J4jaKG%3Wd6qjY@klPG79PaMD2YqO_NSoE)Y?;PR3=z>=DQ583%=0BrZzbANm8W& z0sP!}+e<_ex}E4aIE+REacC1^MGMt)0|hF1WD7NxI9C&*{jv0>8%@x1C7$sm5z+*l zeQhFeFh)}aj)iY1d0Nbs-dQ9Ix6MpqAHCNJ1o|2 z*g9Pk;URYS=GnLxe9_3qz~6cdh%sZhF$?m3$!!$SH^^4u;Jw1cW7);a({517W`j?I zBk}ks@VLA2(WJ>mhw)~awN-r3s6U37`RgUFvq|S$_v!7!icgQVV;l#^OAT2j71-Bq zgS6zEi?;I%7o0gQ6sUstz5)Tc33t6@7nT39cJOZfLv&{N&7yryYsZKGmcV$SkMN>_9Rnqe^DY)z5bz>e zRVJQkjW5Rw^RA!A*JAgl8gzxu{P;@32pCfdMY zq!vFW@lCyqn~jjp7B>lqC{^-h_=Vs;yH(F@a-2Y7~L62g{3XliHaN?2? zVZUyMv`p)ffbS3@BkqF9AbC;H=@WXoJ$9|AqHY1o&- z=pz4j1@H8Svpx`7ZZs;gudX@W3aTy>Oe_zWB^Iy zO83yynK6Msu8;X#x$^VO6Avwlyr5Z`&0(4S)`DeA{D=T3eR}Veg<7 z6?RmLhXyPAYr+!$Tw2)v{Eh0opBttwmzWz_-)_#P#>lJU(Fch=5B0gzJ5LN{`HTiy zp~5!8Epw?_uo<*^yN$7Kx8ilXu6U;N)fMZtrX`u|_#S*v&22V))PkFHS0Mz(S%T>7 z&9{Iuv`cm4>Nv}0>4E_g&qpH|GU!y(Sx&2o>>-!1H_)B_$OL9K(cPhTxCOpTw`Ydp zA1=0Y_A5;%OV7x^&N>^^8=p#7k*sx~E?EY!n(Oa*CqG=rK{_rUSGe>Eq7JXoH$8U? zIP~ItGkmONb`YRY5xm;UWC z;%5~?G8D4yu>caLJ}7HL?0)FzlBRbNP`C2yj41MUrT1u0mYt;ie#SG}Unc7&vcO+& zsyg?r_lIHsFP&k6HV_6@b1yiyb-APR72+uBVOSvf8Poy_lC^F9@Ss0bOlUqd%J6o~ zkYD$JJnQ1&kZPnzh{edZ@!CBejvU=PJ>6Yy%60cYt2wu|`*AE`34QKG528x3Y8*0z zI?bI@EpQK|S0Ww|au5rifO>EIy<5YMH8+PvgAHTDRP89Dz z^uPd=EOR{cT-Q=PQrZr=-AsfMghr!c{S3oF-p2iXVw+<0VAJX1s=k3Z`Wk)cRnkk( zI=trxMnl}$P{$73VGbRlMFPzkJ&!j?B6+yy90dU_k|fw5&J#{e(clTz&J;X@_hi)` z2%p%eG7v(GwCcv8-NUj41R{xLV3G0SB$Zri>3Zl)A64j7ixs#h*ue?JMG)%@ct%E$ zzgc}dvV>AEy&qdzWE*wI75_KOt0RrzfMe!VCG2W^woQ*?(2g_7ol$}dpSLagBENE{ zRHTtgy5jy+rw4O3v*Jbz&?&xwy?8EVxd|Q-81I@a@hn_IhbH@aTS{ti#?xqeV?nK* zAA=&|yZuS6D&#KMa>PyCxNDSUABy)T#o^v*IV)Wv)TwagPu~3K-K&8|L z5%`{F{&-;al{Osvo!6s%1QzlKCRoDhe;TAaW!gDDTR3sUae>Ac{vaU%a9TTVa`hT? zvMqnS?U)fL;3P$#Db~&jHn+3s{yMo4duuvVES;^I^MSG2dMcl2LxpO<%9KJWR6ncC zSxd7Z&g85(-YQ+gOaCy%!Q5>dp}xRjY0WB{#I}Ew*pLcjmyWP~S9?3MawSZl*gC4; zv9+VSojCD>ugLWjJUNNadc>QyZo^FSVT1UiOi{q{3Dh9x3EX6APw zkdr}oqkt3s(m@2IEUb6W+3PCQHKTZ4iUO6Q>6xud-C*pT1-g<@&klcC!J;}8!wL&l zOaY5W=;Y;oOZj&jVkQO3k&yPbJ8)#jkv(axZ5kaLHrH13gOE^aep;1ZhAIYgL1A3 zqD)pd6o1N`dk%(4>-$fG4Q^4L6xIBgvKzez@RVKWi4f0J1YT5|&wr6kK107goDyf? zYRPcH|0sR;96)k(%6fj?oR>55TBUPqibkan*rAI{wp3iFgX8U-=(zIWHIH~^)Ntmg zGr%!puefljXG)BF|FkX>fAG`!@Z{!<3Up_N=u=&XP1kIZ_!9|7NzlJsnlAY_nmkAF zZ4&YoKWb8E&D0DhC}e7V_WVo?T7ba~OS`>SYoe|o@N7CqH)!Fmz#1yW(Y1-T$Gt#; zTTDFLL~#Pb3Eos5DvjuLr};1iVv}px`QD?>=x2OE(siw>Hea}f@4P-*xYzo=_Suo# zaM02Y<_(LXYa(^DPW-TTakqfIKdnuchiuPSrvQ&xGTW zfrgg6S^-lPbE)RkQiqyF_Bp|WPwJOgPrpG1osw(fJ7Z*-B3ZsH7JxIS7?kvz5vyk zm8huw-qN2wm(hlg6IR4@&EEYS)@S>0q85W7kgIv_1KQ2`$qrYJ<;1w{3~|jf(!U-{ z;$6EL6GI;L{057U#Hp)Zx;{1(e;RpVy4|Pt%NV*mR{RD&+bLh8DZcNW&%xCG?CY(8 zIZ~tOxGA(`WWo$Wu?I?U)N(k zoIA{^9H}s6zoI{u=5COANOo?`!JO^Ju(s^8`|lJ$6q`YEVVW_|Wy{W3cs^Q*OaWLf z@;6R6mj%*#1pdH}Ks2xw%;RHblz+@{q7d@ljm5n9 z!yAP{06&C6BEvqj{uzK)TFBR;C7B7~e{41Ig@7>#v13b?`~z=v5afU^6kTqAYz2^m ztuV~YnEUw0jQ>xDOelROWG%+>#oMRc*75tBlR4UlYmx|&0F(g5D>WSSKoh8-AOwX< z34Lny#)KGZW#R9b444yqvrfP44itq#qX3X1aaznV@H$Y4lbKE@LK}R8@F=AqnWyce zb~WnQHdm;&lgv{nBlI))cqu>t9u)vRG?^ICVxdfP+G5tME}lVMSn4e*3?J61+Asdy z?gIf$#`i$o-_b(L{r=@gM|eHHGOgrYc~*NyR3XeiQzTRkO(qPqn6DRcB^yPD?|8k{ zFNgVgq3Giq?{DN@BxV#C!IFLGU6;o82iK_@Z`a|Le|7|9+4z;isB$kS{xeyv7~Z8g zfv1E939nW1gt_^(w(^VyMWNovtwIB_3zTRW-^b*5UQ8d|SEM9F2+10F-yqTcL(Atd z;184s6Kucu$5NwIoJeFxfjNIGpQxL^{_V$&3hCb-53dWQPY);q!uabOxwZYeFS2+D zfsR|cjzMEy8_kZSU%@V9hXJqY4Ug)h{x4nk?xoTb7`Qc?2*$m8>cjrUr5U|Tu>xmz z0I=b&RH?;J2qNf%>7j1v2pH4vLVB}yf8Z^#`g9N^5>dzVyCU}Bq< zn4jM8!>^zLy1@Gu33Up5WjeH#02>%c47JT_F0e5+!BXw}>jLQkzbN_-gLun+l;_b1+f9Pn?t#aL}Ew!X*G z8r`)FWC?@WVg4sc*LPZODTInu`IxlWPqKE-*0&$dw=eN*JbJ>;PeoAGVE;v3zkUfD z{7aY5$CG#GX0FW+M;bOV57#!fEdo&y5fSFg64GGK7a0@7Uv7GHEMeX*Uv%UB3JzM3 zPH(F}F1A0G_JHZ--$l(^f~u8t8-iagUDTSHF}m^V)K|Qld8fLqYTi#L%auE#$!x@i z(GZ<$vDzimCCnvZ!P4+%?7tmY3H=A$ScxWx@U~XVZOo6up(l*!d_z`nfcK=4*4Fb; z(&JXY*sVlor_$2+;_nXHn5Y+VC!tG4AwJKclMbA-{;XV{0);vJnK%WeZFxrk>Oi)1 zGIU3THyt~>a<$nKO>ev=7@2_%=c{N{)24Ybp9PAC5uVuu9b+uMDrvlwRpFc~Sw_r# z8P6Kvn^~bZeYifVr~iTx`tRHcOL`HDhT8>W)6-B9dN!IZ)=F02BNWvvCsC7&H>Hmg z^t<0Ik%JeRXvUU%jrhmTjqrXj95OpOznWR;H7${ixVylatrepJ(cx%~%0D|Qs}dMP z5%K5%;ZTXAcG8@sRvhP(CyuIcVNV>cUN*baIKdS1{UF6Pyfy1us3gnL5T|`pq#U?f zHd);Lw71-p%m&7B$uokbpN}-~1l)YjPS4h7KM*IGewy+pgRmnu1D!Q%>A6Z$zNeEfY?2c8GX(r4lztJxE3}}pF)Fnj~umXjOLwv)$K6X7p6b(03llxX@IS`|V$6;5R zoY`db3go8oIq#?S%<6kzr>=vaa3}G!3@7W92TXh*_xt5T@?xu{9FPYk*gv9XbO~r< zX_U)nj#Pp*hIwCv>EATGwLa_ZK%K8Vgvtw0G%s)A5^Z-}c>y}2)QS&tfr%XPMgCAQ z#Jqs@IOdd0WW!1Q1l7@44@gcCYWY{m`D1CfXQsQEhW)EjjkCx7opD{Bqac{o|0>t( z9{US6W8=kl>st$wQnPr{qyjG=NSu0~^_XUUK7%~2>pVgUnYDoap>=7)_6O4i&LuSm z*GYDaJq+4B2WE9xL6`wLyDX@`f>>10d$2fu*V49F1D^TxM)0L~>1Hvf3t#j-xQati zgKbm>1gP!tIj=+C-Db==6q7ZYiV-?+-4G@+yXQZ$-SFZG9xLySu4!p6U*cKz?N1fF zibJ-BM`JcRCtx|*sXKpmt;jSQV6|IjY`#_4W>nj?T}n{x(XgJq>Zkp*9}{$bG^6{4 zbncDzs=(bOs{q+3Cd3J)KPz)#m}&FF_J=xCp(J<_f}D@;6Q8EU;ThnG6V6ihqqQF4 zM)XGrisP9xbmJb5n-+8s){v|Nl{dj%0pAdhEh7?n#F7+Vq>wdDa+I~K4t7hKR61Ef zcDYpq-*1(xT}K{bad3~Gt*%{ddX8%5ZkY`yu+t~=l@%#NNOGEzDbaL< zI@e$rv&z~;{52?71(cj|a-P=Y-s|F^ot>>2@~rGdf06FW!qRk30;D-`h#)9o8a zi4~jI3l?K0$DWMgg!%&_kL*h&(>gLvXfKZkjGP52F8$ORg$!z?Lig8oeN;mS;80oy zSJmerk9iXXaLd3OZgO^(xn$P-lSYb1QlWzIJQB=HfvB_3{gwT6DzD={sd`F^=d-hm z21GkRnJUfdo7Oz{kl}sDB1zVbbW=oTfL+-wyhCFWrIqqb<(lE;1sKkcJ{S~hw&=8; zuD1Eqp8&7FV~xxbRD6x4G4QQnka!A!kx|T%p2-2!GB$AyHal989G1*^3MJusygbd> zXw}1W&Lorj{>+OGx^TJNm(1k4l#2N$(*|$0azNN!V^z%hJ^+?-3}APuAs&-jYgW2^ z%{DfND&k$rDpoI(7NRxe$dU&84P%79~dsGsg0mOB%x`l&@c zX9NaP#s(c6+>k$*zyI3B!0;0$eT4vGxe1IdtHA$l05j+z<#j+u<18W=&?vL)))}*y zCm=JbtkL;qPy-C~BGE|!WSTV=kxLEHMmZXhhYT9b&P#?quqMPToG5GGV`=tt!CT&3 zX4i+fU8YW$MkQ|~k*#AwKF*OoD$r)yJX0sqAQIJr@1>AV7b(L{NlwEq>@v3}?H=Dz zw;=M4nl}Px7?8o% zG@-=AY*kTq0FK1{lg!cBBd&LEfFpXE;tVk6qGKv1HdZ2&i_O>RcWGn=E>Y^)lyF5N zul9)!Fc2;A{o~gXT@KH@&+k$gp%HPCI4!rM8A+Mjb{6^Dw7SbSx(9mW#p}k>I@F!% zHYNhXmcE6+=PTxf5gc(JerLT&*MrBZX_WINjVZ(J)n-225^!9$ZorhH%Oqy!T6(QV z7uALs4)hIhEWMXsJ?R@PJ_L7a@84|jN2ls9kbC}2`O#R>#^M<%VU0^yP8aY`k{T{_ z+U%YlE}f07kBbR~R;{_cGdup}1!=#3yAOg@#sruIUpR33O z98y#Hi2+~AI7#LqtPP;0bWU(&X50mg!-|F{`!L++!$=oXkm#5=0VNHvfjUIXFT)L$ zPCc(J22a@Bmdc^b-EG?ukj=3NS-aMgtV)q0aZ#%DiGJn$;7TeIqNc9zeTg0-4(kg; zfgeB}>*nO#(|$aN>a$MnIT&Zd4!H&3=%%Hzzq{)Y2$&GwA(pjJ zwa#42HsI!SxsJJsfsWcCljU;Bd3$gNOxhd6P=YLTYQte$p0u%azu|9wfY}Pbx7W#* zp5QIrMR7l+cto0~QExU4#@~;fG1N)D1`g_Vc$w>MWX~b&V%;1SV*$_MT!r{>%c;Sh zQ(G6y0i!LjR= zz;`D$65^X1@AY4tZTEob(^9W`>&CEnr-z@@IO6>T9ba2Sne%rPUsI4rP3{wp;CRQh z+U2CCxtM^_KASr#(%ty)IgtQr!CM8+l~9#ZSG*|lF48#>Uj)YM3|^I*8DFV!(OGzF zyDlL+=-H9x=d7N3pgSzQ#m_41<_#qJ zQ9Vulwn5k&!N-q+iO(ID(NL|b$yj#VSV6T=P2QsRS;N`<5jqu1)&SvCLdGt`B(-98 z<+9rXoj_Z(_Nfht-%X%P{vb!y9+&Tit|vV>=XK*!9P26I&f~l}NTyEvPrD)~_iHB> zB2GUgyfRzTv^b_xq>??yYwy}RmNQi8YOC-YN*jVgXH^Q#`S}r7PzB6Pe=>&J`qM9! zuS4-!;_iu+-s%S2Qo#u@%xEu&1k2aZ!AG_Eq{Rv}xpP*@NJzNyLTlg^onYy8v8|WE zLmRah?=4G{G(F;gJX(aDH#R0XuEml}GLlE69Sf$w9rH`_GbCf%p*b3lLE4N<;t0xN z{$-DaA{KB4?IL2sNvuCe(1l?J#sAKDv_f`(ea7xW|M0wnI8yD2aF=@l7q&;yvAc;! zgHeU$hCMlHW08I!^Ws0*%}+`|&h;H{I>{GWYnO}-tOJfcSB|f0oRc@h)sKE3;tnqF zCD|*6aDsvoUIo%iGeHL_5B}sBv|ng+uF#0F5emz5_!yV$wo^lV{C)8;4lCrK&G}&3 z{Jw4sV5oECg@3PG(pUm0WzSbdeJl4g&@N0~VTB;_=#8{xF<+^EeYix9)vp@y8f*MI z7Ab~vej~0}EhAZN)YCXUm384C%EgVw|2t~I$WJ87LYDy^Qb4 zoPR;UQz|eGcy{l^|3d>>O1iv_>)dbNVmANME}GQr~f2jz(xcx zHK9|ejfkPwR+w0*cQzb{~U}dp1GI0P=LsUmJ z(DXgjo7EsN^yzl}BCwFg@QLy_OUaP=)oI~FV$W9@m*cQpl8JPhTp?~GMSu=~=p+^B zU~a>XH5!6ABRpSj-v zc%Tw&1NDtIxC=G%8dq8WqfnMm`iubY5-|CS0;{u3InAbZ*>20Jwaa~D`%68%XowKBgIn8VzKXi%+~Hy>Szo^hq%2HL zTTn44g(k)v^y*lEG06j=0WHDnj^1P0=``?Z90;N@LTuW;jD$$M8}I)(3?w={gLkP; z7vW-d`F}mze>gMaFN`Z^K>UY21(N>4xRY~cG=JRE3Cx^#{m?IdImhoq2nh|0aeoef zR{VpNS`on*moX;cpDg3I?;8YyF)orlN3zsk7#ExmyZr3r`x6NOkzWWbw_;A|j~S=J z;My9IVw)7Ks{4I-^!+?!FmE?4J;71T*JL z;UMR~0@D8%`Ia-T$opf)svkI6#gQ5m`C}`yD42Zv^CV0EB?c5y_!j{TA3;&$D^3|zWjXNs|I(=z}K z#wR*>FZTr3GC67EO1~;F{_v|-aP2*?O|J=M!1dCI0LGE|8iiT!mQH?iWtHEoTV~LeX*S0K zc@o;I-Qf06VF}XFrw5eO73UE0IQ8c6zYoTawb|+i=ZeakV5*e4g-4%hwbm}+eD3m% zMC3!rSJW=YtDwYeWgDd<=B&KoU!S*DHI=tPVX$0d~{rm|(_wA^gdb zcxqJgMhq^u+tTNs#>H~z`P)qVUX z^RUbr58e{$l<+eJiiq+BHvLHT`x?s`mF_U-xioQ?)rJ#~-fDyl9cSgQ)TEc^y*OK) z7-1tEmX#F5O6_w`yy&^^xS#eN)GQbOWwZ~p1>uMh(_1jwX--=+&4mvNA8#>kRe z@2_6&*nU`^=-Bji;oNtgy|IJ_xB4Vn1UUP^(mkWTl^6JJM0_8K&bzCbt&xg}Vsn2^ z?2aoP7&4DV`(wMHys*YhK1W&=LT&Ha+_63z>6#Z4{riE&mCaYiGMkJok2Jwkqjz8- zq~1ZVkP{l$kL_lQhdnCoJQ^ZiDZ(x{CwqV?20JvF3{b|^*8dXRH16y%Zh?4sIN5o< zozliMvzv}MQj>c9@CdX2!6Zu{91Jqrp^0BPu?XB<+7juW%+{BiKRDdoZpdcY#P*MG zmCJmSyqqu|oFtCU%;Xur`p%$`A!eT{IO^Hg*Y~Ub+j=0`mf9Q7%9PybwmIfLg0}4_ zFW2>a(0;x*jil44R6`-;dwX>-6}^E!8DB9X!1|%2d)(-yA1s6*foe%C&sX_A1S+-|6?#pnM3yYp zrO#{af}iGE2J9afNhY3KlRrxE+PyVI(FW^S&iO%@%iAQ|_scztmRI{zEVk1P+e#$N zhlIJyloR02^k%YUi{jH=1}3A9mW*IgqL!71cy!8M;I-Su(am`cx{2q@MR*LIy26Mp z@7-o}CK!Y!p2N|mI}~ZyIL8lH*KQBVT;UDB81Jzp``15ZEt_@^o2l0bW#$HuG?Mgs%?|JwvV5T`LMtTl>8A6f#mMR8{TDOUH7LVNd-?%{5kH_Us`Ig zFtamz?Se43IHBFU#oLos_UU|!7OW0d&Px007(9_=%v*zV0WWxW$$E8T98{=XBjZkq z`l0JPR;EjpVP>KW$(jyW%~^iavqJRvSbOI^$_A?s0a>Hp zA%XAoZY0x?o{W`-R%r9Wpq!7L~A20x8ufRy*(t2rZDT^&E~k{3s`P0>L(=H zpzXssnVM0nzahq{4ooV2yOWm#x(MQ=!lF@%csC+IQLKt(0`R_Beu==pavJ$GrN7w| zzL*2T`?Rri!wa4IuuOP+ORfSG=BqbRL)u{kv^*WV?g_3gdx4}EPrV$m_DMIn%+G?aE7aY5UC;0Mip1lxY;{XR2mEh=u9bp#CQj!i z7iRFEP9hS8t}Vc#TSPk84&m<4jqDT9A`W(4q!v-zp;8zr+u5gldcko*9P&eb=Qshq zaMrFbKH;}o-xz5)T=%4%ZBjX+CX&8GM1pThh^-l_CdClAC+IfE((PXj7+_ zF&6k*Vx`V|S4lXqjx38y)Z>w8cdj5jyjS#bSGDIZP^7ahVg)O}q^#}EGYtLlR{y)} zCnH%wUNy01zIiGJsQ>dPlB(Bb=X3*Yhb9pX`2q zn~>v@drLauQ_CaHThz#gtFWP5s8cPS9RE@#r3G1_`EpRo|;-09Pg1 zp`pIuoFO*SaoK&v<4mZsnaeGc@4DBA(zIF}$w*%wLdDgyz@O*lL@>hKgE0tz6OQWE zPjvtmu#9Qmw?^-kIV_jSSeMgGayVN}ufRy7?~aIf*3^e+w#Bn0L}A%(X0g8htPFj*!nowP z<B0dcNflf zSH7Cz^#E@!Fbm(;Sj%LFaGsB9R$^D(hjPqPDot#wV`xDf_~oC6G(eGfNKGIQ7h6~4 z?EB4W6>!6VaIslCj_*3*k<&JNvSlaj<9oBGV_tQhFA;~bAcabYd%L`;4DoEY5O7M~ zOXMAh7HIu-SpVi&bs!@gd6NE7VhGvp<#w}pZrR3=jQ3iZEj_0hohg-}NwoU^wfB`# zRXyRmfS@2qN+TSQ4(U!wkrI&ZMj8&?Atln?f}$YZod-nelEe-D=pj8v_&oFD#v2hL-2OSWzMV__uY@w znF9u@q~5fr%Mg+VEJ!AP&&qa@>x`?iFsIKZu?}KGMv|Ec7n+opY&z%=*e2wFX;Kqr zuEDWE;Z>#TkR%4ot+C>@Skpse7f-Kg0>RS~ugfZjL8`=w0vKInUTLM}*pDgpIo2;0 z5tF5CUKT;GTJW9o%O@v}=!R#}>O`(gW{W331{&Bi*6g1KWv*~9TQT4~^%D`x{Jb%q zk|P=Z-mL>5*xSY&*{{!=Aqj`*6FhSg9vjEU-gA?E+Pa8878Xo~Vx&u$*I{IK%>7C3 z63TWQ_KbrJ=5;nmbFzo{t`(2n90N_+iYUMKef12hNam8T>^Vy&MuV-6rL%>y;3<}U z5}SYcqKEddT%q>dim5U*Tx%Vl0-sTR{V8U@&{mJ9Em*l;jagb{ws!~E@bTk6i*uW) zmyxcN`dTV{kw1C_T}xHYbN@l*AINY^dbq-Qq1~w4TRr&kzI|6fW+kdHth&nw1wUWd zK@wz$Za@n--8z_pR5G8f>-{GaRZ=(G_H*{pFVT%*FA~OY>J)Z)2!&+tNaEh`8b;A;H}zdfw4KdY+0(2~4re&Fn(#k|e4E zTa021QU$~l%f z9o1xL+G*A=JHDV&q|MVk^6N=w4|Q}od%U)aiU?sVdX8-9t>;|j+*=2PbC$jlLOIJB zXqewHj64(_yUG+=Xnsi)#+ltp#1GcLR?9NV`BJL#5>#_vfm>ID@GXVvXFLW~j(o=F zp`8QkOxG8*%zG-TxrjOQwDgF|&##ac+X9=y-m{Fc@N3o5?_AqBzhZH_CKFtT@v@#D zg0)K>a{5==i}Fsbo^KlT&sxKRhORnnZWVX8 z@$W`Gfv%oUmz{)jzkG6>4Mcnp6GaRQEj9-orRGItp`ar5#41i%J6oKFp*r1(j$N&j z)uZYFH9Jz7p>+i9FlOViEAH8uMt*B^BFJcM<;(<{aDrYP%%!e63k+VRC7xM~cUv)L#i3!Ev!9IP!z7Q4(C2xq;`+jLcNH0AUZBPo`uf zQI^_^dWJeVzR&T~;JJWFG4DA+m*n-2&eL2jMOXO(p&nP>AF>hD(_$K2ODT`HXs9B3 zlR7;|=sSs{MdC1XRSL_Np1%)c#Gtr<>yPCWbKE7V+P>)YL07ebMVYEOtG>~yzIYfY z=y7^sTo}^?%8WS3fP;Hxewzy*J7~_Rai4({r@6dbL~Ewz9F_kvuJ)i6`4U6IS{VT! zp`D;lhaJ^^y2c!REH{JI=h05PXXVB5mf3_c&&O*YS>xmhBZLN(B#ab%{>7`sgT3y8 zE{50NT|fv7hK(rp|_x;M@uNo@HV|)3l@YS-GfF^Kl!htAt3BjU`S-r#6%_55>vsrn26Y zjf%oKA7)pT#xuhmImYbOh+A#Np;XH$v&T&rAHCXAq76o+I#T|AS$VdES0mMU1pz8Q zOF!EH^YV6AmkiPB!a~@PE4sI87^il=Cg%=Dl7@u&ke$#6nDr{hFjkn{RbzBATObPp zu}%HSHHPqdAU}1V54r5lB&VE``V*I(lXNSxf-Zw{yD`lw0kLMi86Ix!f$NRtGRL(d zv}fb%<6-eksa)J+%f~)1BSJA8-$u19q7$qx)@3G(oCiEPU4M$9?iW%P97&K9q*$-c zOu>~0Gq4Ftsy|*8a$mX@IkvkTx`N|4XTirr6@2e?FseqKv|)Z@n)EiEpH09EIw~4f zz(|)Dp)f>+uaorV`?Rs04{{O@xZQ~Ppe+uO(UIsZZZceiiJtwWe<%kY$*|#KYCEMF ze^d@iq|0mi9yiz|g(LdHWsd1VT%z|vVf7}3JduDpvyuj%amco^G9p(4tm(0`@Q{U$ zOZu1=3&125!9(;kV={}To6cr#Jjy1?f1PI-q=uSYs)L56z=yZIIb3-);LzM`T- z-v-eoSpYv^cfrWUMFqV#Kqc9Rt~cu=Pgx6GoW%|`N}WJ)hv6HI!o{+$0fMN z>pGj$7K+ z!k1#L&U8s)r^noL?q9<+>}}Z;UJ-|gVLoSBG4A{Xd&PEMGUN#ei&i<7J5x9<#=m@> zH{GX@&pb$IWL703hOC{1(qv-62I02lF1prB9o)Oik3E%^g@4+XwkMSqPmIF!w}rC4 z#o0#eYu24?m}R17Xd)h5Ed-O*OA8qYJB_Y7)aE#9p! z)yg+V(F`VfqDAlKbpn6=-k2a5$$g<5D zO6G0)9iNfkvEjSfcfS0Wm#8+3B)mCp-6X{w3GA-)afEE0qD}0Ym4nB*ud*5@J)FXi ze=g3NstC5JTLsf(&j-324R*0w;Bt5$Js+qu^`5S?N4|v7b~Z>@Z5Yo!pc;7wSL+pn zv-&uL)x$YLK`B%XYH4}8ohKNqn_0K0=aT^-)MwZsFEA)l?BXX2xcDL8x^GO|$#3)n zCwuTob~j)W9sr+boSboFX*pIQ=BE0~qvRy_1-`2+*ls=9c4DkF!}?OX1+u{HtmeGx z$Rf?It1VW@@E6(uD;LYEh|Gz2E+O@6uoQH1s-beBI6v~7fATG{3U=Ar>rw%6?PI>^ z;vB(XF3!Y}b*tIxo0_3;95?Y`K`qANLcyTylFMuX<5@tiXL`F7ZLLfnF=&*1LL)q? zP{56f=%=c}cIe1bpmy@#`Fz!THP9Ww6m(54d;Mt4)aA*c5ssIY?GlkV{jA4LrXqJF zv81V9*QONN?1Ot{Ze+%tU2$aVNyM#*sSSR+9X!DH5$%?+yR zB0lWVa49eUj3%D3z7y0eXX^oXcz+n5 zpDpPa*0yP?Y;H83PEs5y(joSUfWwQK*RA2olYf#)({N_ovxqomJK3P2TCDk$&O_-_ zc1mLSg{mThXF0Z8&8l!^C=3t+BWk?{B1h`g7-4IorJM?QDrOugQ7wv;C3e!28M)G< zUe&ds?tZS138AXDQR?|nUwpsyJdh~`fI0bjVt;=LG8ME*=qEmOxPzf?|M*S`AW_NJ z-!y+~O@3jf-vK}=2#pDq`nEs|;^GyaUf3yWo^PQ3Vc?tOUx7UO4F{inq6OS)x- zcXI2+A#zIM{1N`Ogej0JvUH#|8hh zRYdky5E5!*`t+X}E&KqIxL1($AMpyHhd?rvb!5MlKHT-t?5!Z=|LijCr={_i7$kA& z^~#iP{kjv?FkccrM;`3dR=&SiOPt)Vy&aKG^HWJ2qpoaM96hd{=1jV9CFW)5S z{P2k5wvTjne8T6cPOnC*6{T|Ixc5tz(I$w>pjTD#m|0Uin6)Vqy@jCyroW(r*bn`il}jnVZ{sRb$APi6s&<@Z`#GCp#`vstC-oI~sFC3>2?V0+J zZ(-RiX>&>i#CJ#elEd{H9-&sCaXjiVCy8aAjxb=r%qr-$FzMk%)TXujPm&gKB6xEG zb3$_>0uFf|4=ji($S_lkK$XF0I%DzG(!9^#QGxXmPV;`s{mJ{L74ke>G*(CyT&+O{ zAAhvBm*~mhM0j=c{cZhQLU?l`bK;~yJ_A(=o%CCY2tTr_`19p7v2g!?og~h4S8QSY z6HsXNz%6jpRq&3d5BMV4fxd%Wl3Q<$>f2O7MCu0zgv5raJpV}<15!c}_pd_n@cX{7 z^1|z~{1+znYaII_|NesjnQ`eKKJAMrDh13jM=K4syS@Ub(v-j)4%y~XP5B3H|4*_1 z>5%{12)QNv7#jN99A}9Ii;cM_{3msm6p(HT7`P-WQyz6X;}6@jB_3|w-=6YmQ5H)I z{Wr=e{QivZbYR{k0Ba`n zOYa`BqT>Bc93_H4P6a1-&-xy_Ce0`A9-{?d%7;CEMhPsbBfXOVJBT#3^FP_UK%U41 za`(dT72*`(RUu%te0|OMrS|!oJz#|{LepTrTcLq~bTnMW`vX#<@Yp2@q&RYb92Dmi z{xzN>rB|z3nG$ax^=$aITT@Zr)%0m+>R`Tb760ZRM zp2J2A)u*?b8}F$WABQd6a2Z|yIUMASEblCPRwP6+pud!galW!7XAzP1hRsNK|(Q@zYfYs5;1V_4Uz%Go|&z%0z0D<$ju>UPNoHhcmwDZ=;ncQ-~{=Rho zho19`_IrEH{)f@@|3L!t6K76tW9m=owOq~@ZjPQy>`%lcR_!`j-idLinN^- zvDBINVzUWy5j>QJq}S!!cfGPW{H&)IZuJix4L1YKd%LB-frMIh$zh% z4SF1mboqH<1`+@#P9cQD;wibWk@aV%BEoV@;b~73Lt}vKOPB}61q|9HJVzK5X~7LW zE0|{&yOUC0VIy1mg&@(Xr_0(W&DpFT56Z$(U@0^2YqXmE%?x>dOBBV*z%18H&VXDw zvlmA?13Fg!ezeAr!PzORUn=sq8MjK-Z7?!BxIi@0-v#h_sC?aO_bJ+X%xLa}x4>SM z-r5C~py0Odfg= zill-xoA~GcnEei}S#h`?dSs*!dv@01?luu*Qs#D=kh^(iXD$n^>s_8GQrP&)r~#{< z7FgiwAEl3b^&(@BOPz>BdF~SgMkO%SK$3Y5>m+u*;&TOmPlGnpU{GWcu!NG3OiOaAYIeto+ zrbcw1SN4kfZl`{$z&{!v#Eh<_ABjsW_K@C9c5ZIbl3?ILaH1cr;)ZSa z^B(RN@BO&gIl|VYz0>@P2myD7i&p+LcXdh6ld^Co)tV#@fmI$}Xw6R`!5q!Lq@c^_ zu){1Ee^LOAL$^>e9egoe9)zc?@8-!-FuGwPbXw64-Ccya77 zYAm`pR`H_UFoe32IJhTFy~>qP#T!SX=}m3HY#p_-&O^3r=MGIJxze+hIX4H|<5xvr zl|HKzzR}(?)~Gd6X_{2|YEI`d7tYT;Zl`;1z58A;`uA+7FsLSC@!jmm1qh_;rTk;o zdc{cXp`7vqY8Aqzg}5OuIsK-g! zim90D?y<1ugy_sKmA$$j5cV)!JFAwyl{T8FSCcD!RZl6=Ds>B-=BbKI6;!TK%6q37 zg-`cd{ENX07_@oyyAJ;JMr^fiygS?4je_HJFse{Y7FpALY{uvhlU{zAMAP(7FO%st zkDih>bgK8Tod>@;+#t)GkO|fmd;tTAwCD1ETq(>no8*}GIePT;_`E6La=DE>agqa3 zx7Pd9NU^3ur{Lkq(&4qR@YyHmFV#CR7u%vm=1uAs)Q4GemAh^pYlS&qtTZ;*CuY_)^4|zaqZ#ACr*v{(XukG zh5cvUa|WD|&MKNT>wcK_W#S{TY4n@9atEuGIw1QLmRJMs%LII6CS+qDa0)e~4x@5$ zKv?es7>~4yCF=e|+^EjnYup_uw{);XJcjmBEvBS3tc%vjCxGc|p ziF?c2L1xM3=LQ?v#NRAj#`QLiRf5k$qWyYs&Zmdq#JWKzJae=&KL73~p7d~x8w*yIC{%PH6@Sc1!@7@!_^9kB> zquxv|;f60`6@B|WlW&jU-U5#~7`!q$7f9yZ({Y2HVqBMxLl-69b)U@^Ka5}rfe0luYJ+@tV)-~H2~!3Z>_v4A8W4{KGq?BZtyit=}X?pEO~ar;ZL_x zULP;=aXn0D$ws`x)~iUz8l#VB3iuQy^u)8RE2*mMN0hih21&T0==T+fCov%VbSF3* zq%V;>E}u0?^$F%R%I^&ol*?Y(l%Ze3s^5%J*V^=im%-+}VU(HRY0HK~lC}WkOSRrs zAS3b`X22fo#*ae<%Fe8E4L?ZEeyy{D^2b%52w)?`(@LJ(A5=JdSO#Jf#5 zvKqn>=;9dd?eN;Emz5sb%yrX|^_&*76063S-t=clU+`rE);;Ci%Cs-7SN0s%?IuMr zUGZnKEVwg?Z!8YD-Fa~AJ5~7msMD3cgrYO6qN2pA{?i z##+1fBrX;3a73#MT5SfO2l@q#MW32*O0m+|hh&|Wa!35s<<`E}ue&#Paqg;!m8qAn z*i^@~*!SafyaYvnXUPEPH3A=`o6l!dlz6V}hLF$mqO^DCs$)*Acg&u1I5^IJ5nYNn ztBC#J?|26TK|FXqy!H3Shb-@6fUk7%_n86VG^03z7~ zRlTy14{+~s=q4~D%^oAZDWk9sApmd4^H(M7Kci?XiNy1y5Wc<)Lsw%~(?MOpvrr!y z-##aV-jl(jGss5Dueq1JC2hU#$~e<7&PQAAtUN^w_K=P^kIbKJS(SFiBboSqG?Xb)B=SQ39!$R(Q+$b{cPH z_ahtIahti=1oiO}^rJCX6^OLTt;5k(=~bjl{M^s6-!M!xs~_b==vA6j3T32>yw)7G zee*2V7IxDYM0lslcmaf;lf;+}NI^kQ!e_vPI@IQo1fm?yh8JCG;0U9Dug5dPQNiRR zJ<~0c7RQI7qY4A3+9eklUg93_6So^;`H_RN4ffyGR`KcVgNigR+VHub7+5(Op)r&| zja>P9QwlAuvFz~QdY{v(=*b72cs!Vf-)1|5%jV9B0tS$`^2l2z24)a#qIN$hVM{`&1ys6G$P2N?@ zPwM?PQGdFd(11pjT2_)x;Jig#t#$AYN$lxpd2zs-U5d1|`bS-_kCfHWD(qcj!Y#|w zcicb)23pS6gHS@O8ZLRKS-4eF9R`np&_DD;Ybl zSKn7@7m2E-sJo!42;#3zrnExiX#p-CrpzwI!=4>5cf_;2s&KzVtn7Q>qn=qmQd`_O zz`hjAk*7D);nl*|P;lm@)lB2)yXUcRtzhWesqID7ZFtvYcY!C9HmaKx%&P(yVRXup zx)G?V%yH6l3Z;ovf?45?JiOK&CE-0raFKPy2OHfd7HD*O9;=+1J!(u^DxG2?=$>nD z>@#VNo~a*Z?g_}ib}X-o&yO(l`aYA~1!+NPDDB0%&58gu%UEwgd*Y)ax0-nEDP%ER zk3?Ys*gMt!81gyPne1No5nR0#Jm^#A+QTxFD6SXbwKfw??Y*x=WrS4XGr^_o>#GrE z7_<7hKa+h8vv3!W6Izxe91>o!m*Ji{TQQk#iGo$h>6YJfWs-n)ZkI&&leF9-viAc* zurm8(!e-m6H^n~CpvbY44AckXQV;6FG|CJY@yt~5Q#~N;@7AO~D$Hy}WAt~Ca(Lu= zHZBv#&a9o~u%kt~W!ylygfGDny^&=zoktUh_wT|^J;*gc`g<0e!J(Cnr#AL6UzZm`yTJB1q za*cL?)aG-Ni?Bw!JUyFDdpom9|2*4_;<$ExLh55KHr8~nN zkOBGI1i&&y$h*zAbmw^kA7lU+EL;C?pa%E=2jEZVLjNg(y??7(3kbN|v;2y2Jr422 z_~Qrt!Y$5jm5u~t_*E)U`0qH_$lM55+i$fZON4G6N=!GzHr2p*1`YXP0f+D13Ej~T zGB~OI^l?}+aiJ3^&gYE3GnN5u1{996Uo~l*-Tj!T{6Ls5PeQw6#2+jN0k&-Qz7=)8jfwUr*1H@J_~-2@@fKaRa+6!mgJDTyyDLtmqAft!Ai32n_%6_QPJZiy zhu0N(VYI%Jb;at%*Y_sswv+}Dq0E=V?~w~Job}v2E(6qjD_Uy9Vfz5~nUVB_SE5|4 zu6OgQz+zd%+-WlloQ0Tg8C3mJ_dN^}QAZUA)t*(L=8U-saz1(2?(Y`6U$h_v2X(;6 zA^DjhHiDjs<*91^$1%r5LkMl9r5rKQ9bkN@u?l5l0rs?8MiP{Umw#C;n5g_BM51 zeDpFp{0GvnNU0b)!x{on`EP{Ll>##{Eyh7SV*U-4eGDKMVfbl1gb&D%$CTlbqTlqw zrs`8?x!&TJf%RT{PRZFVpF=Oqg}KQxs1I_bCwb~fsDzNEHW9~``!*cqu^ zJ?6B8qIjTj#mKYffXJboYx`nQtmHUNOt33RzlOx4&mRr}xuCq|6oYhfU)SRpY0PFT zHdqL!>S}aC1ubOjD!|YR8RQET0{OO-A(|q!Tl&OymHF~u-2U?GXAw*g=uln+4jvKn z*IO zHN_;p5z;^Vdrlo+Uv+!~;=6O2*TBJ-F4)2aMgB4gcpE`N4spV&!un&EUl6J>{Gbmn zJL*5X#gMuflaR#MG^cx~2e=U-lE0haM~yndu1^KxA`B{h`n!oheTaU}f~*Juch!Ri zwBb#}Ofge0p=Qf>R<*qLV*?$YL-Itn#qYXp81Ih z1F_oZ3eR7YS~QW32?vVV0&4JtU*Gq;I~k*V3j&1;8#8)b!d*xCso=gyE@2B8x2V?X zhJ7)K>E$Te1Z{BttIE+Y?l|r`4j!6Qw-Q+~MGn%{)?Qvwm%1|yQ~fDWLg}PC7>_hA zy%*2MiEGEz$2B9-=*eF@^}`XBe#%Vr2d%OjtURCCNP+h5F!6g2Z53m|i#`8a(|4rX zQp_gL<7*v5-CWG3Hk~6CX3X17R!L%<{)`F{{U?ZV2XV(0pSBYt^97UNThp@&B9xZb zE}R&?@bnz%?JkSBYcHteJ_6km9hB~J0-KqcfmqcPle02c_(ApQwv8!Ow{Sg;WlIa@ zU01*(Qhu=d^8QPGA``UZDsk&=>gz*6Zyo8wzSgFl>z&i5qK3V}a(~UhMS6@Lzf(cG zgNezGPl?1k)(Aq5uBpsXd^(XRobl3H?7wxs3ly}xTjkOp65GwwpQ>&^=>G^ zBNmnV3W0OGyEeeV{g?S2{WFu5q}9i0?XGUTUONt!ECjNTk+qm$l|8JW&C#FNTN@Jt zPn;CI8l#@#KDOQ2sY%}!rZye7O_yB-Cw@XFAdYSmrg#q^SxD`$42jpsXyk+b zMreU~5uaJRqX?x9*$U%I4M#=t4D)0QcrU5y(*#}8CkpKmfwrnAwalFE;#zy1wbplB zP^cx?M@XC`R$+>WsM$mJ*)l^mUk@uy$XgVdpE^8W;nz|k43sDgw$>xZOXBp%T{>xW zKdvfNX;#MF3Bu)ER@Uc>b}vkZ7;5n!5q@(8%j6cTDw|bEO&Rz;jH8>Mm>pMS_`r6f zz?X3{VQJo`%BhYA421bf0P{W_M(7*dI-Wj|?)Ua-K0FKqLuX*i;M1K2jNM*tU6;Lv z>z~udv>_r(NM%0N!;En`n-YiCPx_6Nq8kgMZs?(}WVnHmmQ2NZc}lu@epp)i7=6p@ zC6lIzEDdTFBVzO80mq)7qw{f5xk1W=TS?3~n?dLbS0?5vz5UU)`CFPp=kJ_wQ%Xl7 zmz~UB8{GE&fU93|Gp{3=&xyprr*|HEb56v>2k^CO>uCM`g|qrCW`o5a7FiG^R;3>& ztPDx+j3Q_xxGCu>4Rj|OHm?-K<%?338U2(La{KZYmwHoFXHHi@VK71XxjkcHCmSAS z;k`+$dmj)I>6eR5l1Gx}mips`0+PBI_BJBSE8{owi~O^h_=@7kn>ew#c1j{XNAwe4 zDV>kQG2gEJhe(UeD(O5|dRNNpZ7_k(S_~Jm8Q946(fry#-gv|XudUwBY2D`e+#d9& z+d~A7(3Y1E)pwJJpcdZJek_gj^yevE%1t(1ij(*-l;EW*1FCmdpOjS~S> $GITHUB_OUTPUT + cmd-bench-all: + needs: [set-image] + runs-on: arc-runners-polkadot-sdk-benchmark + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Run bench all + run: | + "./scripts/bench-all.sh" "${{ inputs.benchmark }}" --runtime "${{ inputs.runtime }}" --pallet "${{ inputs.pallet }}" --target_dir "${{ inputs.target_dir }}" + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed ❌

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - run: git pull --rebase + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: cmd-action - ${{ github.workflow }} + branch: ${{ steps.gh.outputs.branch }} + - name: Report succeed + run: gh pr comment ${{ inputs.pr }} --body "

Action completed 🎉🎉

Run by @${{ github.actor }} for ${{ github.workflow }} completed 🎉. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/command-bench-overhead.yml b/.github/workflows/command-bench-overhead.yml new file mode 100644 index 000000000000..735b40102106 --- /dev/null +++ b/.github/workflows/command-bench-overhead.yml @@ -0,0 +1,75 @@ +name: Command Bench Overhead + +on: + workflow_dispatch: + inputs: + pr: + description: Number of the Pull Request + required: true + benchmark: + description: Pallet benchmark + type: choice + required: true + options: + - default + - substrate + - cumulus + runtime: + description: Runtime + type: choice + options: + - rococo + - westend + - asset-hub-rococo + - asset-hub-westend + target_dir: + description: Target directory + type: choice + options: + - polkadot + - substrate + - cumulus + +jobs: + set-image: + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + cmd-bench-overhead: + needs: [set-image] + runs-on: arc-runners-polkadot-sdk-benchmark + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Run bench overhead + run: | + "./scripts/bench.sh" "${{ inputs.benchmark }}" --subcommand "overhead" --runtime "${{ inputs.runtime }}" --target_dir "${{ inputs.target_dir }}" + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed ❌

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - run: git pull --rebase + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: cmd-action - ${{ github.workflow }} + branch: ${{ steps.gh.outputs.branch }} + - name: Report succeed + run: gh pr comment ${{ inputs.pr }} --body "

Action completed 🎉🎉

Run by @${{ github.actor }} for ${{ github.workflow }} completed 🎉. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/command-bench.yml b/.github/workflows/command-bench.yml new file mode 100644 index 000000000000..0ff166be48c1 --- /dev/null +++ b/.github/workflows/command-bench.yml @@ -0,0 +1,122 @@ +name: Command Bench + +on: + workflow_dispatch: + inputs: + pr: + description: Number of the Pull Request + required: true + benchmark: + description: Pallet benchmark + type: choice + required: true + options: + - substrate-pallet + - polkadot-pallet + - cumulus-assets + - cumulus-collectives + - cumulus-coretime + - cumulus-bridge-hubs + - cumulus-contracts + - cumulus-glutton + - cumulus-starters + - cumulus-people + - cumulus-testing + subcommand: + description: Subcommand + type: choice + required: true + options: + - pallet + - xcm + runtime: + description: Runtime + type: choice + options: + - dev + - rococo + - westend + - asset-hub-westend + - asset-hub-rococo + - collectives-westend + - coretime-rococo + - coretime-westend + - bridge-hub-rococo + - bridge-hub-westend + - contracts-rococo + - glutton-westend + - glutton-westend-dev-1300 + - seedling + - shell + - people-westend + - people-rococo + - penpal + - rococo-parachain + pallet: + description: Pallet + type: string + default: pallet_name + target_dir: + description: Target directory + type: choice + options: + - substrate + - polkadot + - cumulus + runtime_dir: + description: Runtime directory + type: choice + options: + - people + - collectives + - coretime + - bridge-hubs + - contracts + - glutton + - starters + - testing + + +jobs: + set-image: + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + cmd-bench: + needs: [set-image] + runs-on: arc-runners-polkadot-sdk-benchmark + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Run bench + run: | + "./scripts/bench.sh" "${{ inputs.benchmark }}" --runtime "${{ inputs.runtime }}" --pallet "${{ inputs.pallet }}" --target_dir "${{ inputs.target_dir }}" --subcommand "${{ inputs.subcommand }}" --runtime_dir "${{ inputs.runtime_dir }}" + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed ❌

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - run: git pull --rebase + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: cmd-action - ${{ github.workflow }} + branch: ${{ steps.gh.outputs.branch }} + - name: Report succeed + run: gh pr comment ${{ inputs.pr }} --body "

Action completed 🎉🎉

Run by @${{ github.actor }} for ${{ github.workflow }} completed 🎉. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/command-fmt.yml b/.github/workflows/command-fmt.yml new file mode 100644 index 000000000000..d415007d9383 --- /dev/null +++ b/.github/workflows/command-fmt.yml @@ -0,0 +1,54 @@ +name: Command FMT + +on: + workflow_dispatch: + inputs: + pr: + description: Number of the Pull Request + required: true + +jobs: + set-image: + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + cmd-fmt: + needs: [set-image] + runs-on: ubuntu-latest + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Run FMT + run: | + # format toml. + # since paritytech/ci-unified:bullseye-1.73.0-2023-11-01-v20231204 includes taplo-cli + taplo format --config .config/taplo.toml + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed ❌

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - run: git pull --rebase + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: cmd-action - ${{ github.workflow }} + branch: ${{ steps.gh.outputs.branch }} + - name: Report succeed + run: gh pr comment ${{ inputs.pr }} --body "

Action completed 🎉🎉

Run by @${{ github.actor }} for ${{ github.workflow }} completed 🎉. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/command-inform.yml b/.github/workflows/command-inform.yml new file mode 100644 index 000000000000..1c7323c998df --- /dev/null +++ b/.github/workflows/command-inform.yml @@ -0,0 +1,15 @@ +name: Inform of new command action + +on: + issue_comment: + types: [created] + +jobs: + comment: + runs-on: ubuntu-latest + steps: + - name: Inform that the new command exist + if: ${{ github.event.issue.pull_request && startsWith(github.event.comment.body, 'bot ') }} + run: gh pr comment ${{ github.event.issue.number }} --body 'We are migrating this bot to be a GitHub Action

Please, see the documentation on how to use it' + env: + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/command-update-ui.yml b/.github/workflows/command-update-ui.yml new file mode 100644 index 000000000000..9b9c45c5c0b9 --- /dev/null +++ b/.github/workflows/command-update-ui.yml @@ -0,0 +1,55 @@ +name: Command Update UI + +on: + workflow_dispatch: + inputs: + pr: + description: Number of the Pull Request + required: true + rust-version: + description: Version of rust. Example 1.70 + required: false + +jobs: + set-image: + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + cmd-update-ui: + needs: [set-image] + runs-on: arc-runners-polkadot-sdk-beefy + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Run update-ui + run: | + "./scripts/update-ui-tests.sh" "${{ inputs.rust-version }}" + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed ❌

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - run: git pull --rebase + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: cmd-action - ${{ github.workflow }} + branch: ${{ steps.gh.outputs.branch }} + - name: Report succeed + run: gh pr comment ${{ inputs.pr }} --body "

Action completed 🎉🎉

Run by @${{ github.actor }} for ${{ github.workflow }} completed 🎉. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} diff --git a/scripts/bench-all.sh b/scripts/bench-all.sh new file mode 100755 index 000000000000..e5512e26bbad --- /dev/null +++ b/scripts/bench-all.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -eu -o pipefail +shopt -s inherit_errexit +shopt -s globstar + +. "$(realpath "$(dirname "${BASH_SOURCE[0]}")/command-utils.sh")" + +get_arg optional --pallet "$@" +PALLET="${out:-""}" + +if [[ ! -z "$PALLET" ]]; then + . "$(dirname "${BASH_SOURCE[0]}")/lib/bench-all-pallet.sh" "$@" +else + . "$(dirname "${BASH_SOURCE[0]}")/bench.sh" --subcommand=all "$@" +fi diff --git a/scripts/bench.sh b/scripts/bench.sh new file mode 100755 index 000000000000..2f4ef7ec6a14 --- /dev/null +++ b/scripts/bench.sh @@ -0,0 +1,117 @@ +#!/bin/bash +# Initially based on https://github.com/paritytech/bench-bot/blob/cd3b2943d911ae29e41fe6204788ef99c19412c3/bench.js + +# Most external variables used in this script, such as $GH_CONTRIBUTOR, are +# related to https://github.com/paritytech/try-runtime-bot + +# This script relies on $GITHUB_TOKEN which is probably a protected GitLab CI +# variable; if this assumption holds true, it is implied that this script should +# be ran only on protected pipelines + +set -eu -o pipefail +shopt -s inherit_errexit + +# realpath allows to reuse the current +BENCH_ROOT_DIR=$(realpath "$(dirname "${BASH_SOURCE[0]}")") + +. "$(realpath "$(dirname "${BASH_SOURCE[0]}")/command-utils.sh")" + +repository_name="$(basename "$PWD")" + +get_arg optional --target_dir "$@" +target_dir="${out:-""}" + +get_arg optional --noexit "$@" +noexit="${out:-""}" + +output_path="." + +profile="production" + +if [[ "$repository_name" == "polkadot-sdk" ]]; then + output_path="./$target_dir" +fi + +cargo_run_benchmarks="cargo run --quiet --profile=${profile}" + +echo "Repository: $repository_name" +echo "Target Dir: $target_dir" +echo "Output Path: $output_path" + +cargo_run() { + echo "Running $cargo_run_benchmarks" "${args[@]}" + + # if not patched with PATCH_something=123 then use --locked + if [[ -z "${BENCH_PATCHED:-}" ]]; then + cargo_run_benchmarks+=" --locked" + fi + + $cargo_run_benchmarks "${args[@]}" +} + + +main() { + + # Remove the "github" remote since the same repository might be reused by a + # GitLab runner, therefore the remote might already exist from a previous run + # in case it was not cleaned up properly for some reason + &>/dev/null git remote remove github || : + + tmp_dirs=() + cleanup() { + exit_code=$? + # Clean up the "github" remote at the end since it contains the + # $GITHUB_TOKEN secret, which is only available for protected pipelines on + # GitLab + &>/dev/null git remote remove github || : + rm -rf "${tmp_dirs[@]}" + echo "Done, exit: $exit_code" + exit $exit_code + } + + # avoid exit if --noexit is passed + if [ -z "$noexit" ]; then + trap cleanup EXIT + fi + + # set -x + + get_arg required --subcommand "$@" + local subcommand="${out:-""}" + + case "$subcommand" in + runtime|pallet|xcm) + echo 'Running bench_pallet' + . "$BENCH_ROOT_DIR/lib/bench-pallet.sh" "$@" + ;; + overhead) + echo 'Running bench_overhead' + . "$BENCH_ROOT_DIR/lib/bench-overhead.sh" "$@" + ;; + all) + echo "Running all-$target_dir" + . "$BENCH_ROOT_DIR/lib/bench-all-${target_dir}.sh" "$@" + ;; + *) + die "Invalid subcommand $subcommand to process_args" + ;; + esac + + # set +x + + # in case we used diener to patch some dependency during benchmark execution, + # revert the patches so that they're not included in the diff + git checkout --quiet HEAD Cargo.toml + + # Save the generated weights to GitLab artifacts in case commit+push fails + echo "Showing weights diff for command" + git diff -P | tee -a "${ARTIFACTS_DIR}/weights.patch" + echo "Wrote weights patch to \"${ARTIFACTS_DIR}/weights.patch\"" + + + # instead of using `cargo run --locked`, we allow the Cargo files to be updated + # but avoid committing them. It is so `cmd_runner_apply_patches` can work + git restore --staged Cargo.* +} + +main "$@" diff --git a/scripts/command-utils.sh b/scripts/command-utils.sh new file mode 100644 index 000000000000..252e4c86480e --- /dev/null +++ b/scripts/command-utils.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash + +if [ "${LOADED_UTILS_SH:-}" ]; then + return +else + export LOADED_UTILS_SH=true +fi + +export ARTIFACTS_DIR="$PWD/.git/.artifacts" + +die() { + if [ "${1:-}" ]; then + >&2 echo "$1" + fi + exit 1 +} + +get_arg() { + local arg_type="$1" + shift + + local is_required + case "$arg_type" in + required|required-many) + is_required=true + ;; + optional|optional-many) ;; + *) + die "Invalid is_required argument \"$2\" in get_arg" + ;; + esac + + local has_many_values + if [ "${arg_type: -6}" == "-many" ]; then + has_many_values=true + fi + + local option_arg="$1" + shift + + local args=("$@") + + unset out + out=() + + local get_next_arg + for arg in "${args[@]}"; do + if [ "${get_next_arg:-}" ]; then + out+=("$arg") + unset get_next_arg + if [ ! "${has_many_values:-}" ]; then + break + fi + # --foo=bar (get the value after '=') + elif [ "${arg:0:$(( ${#option_arg} + 1 ))}" == "$option_arg=" ]; then + out+=("${arg:$(( ${#option_arg} + 1 ))}") + if [ ! "${has_many_values:-}" ]; then + break + fi + # --foo bar (get the next argument) + elif [ "$arg" == "$option_arg" ]; then + get_next_arg=true + fi + done + + # arg list ended with --something but no argument was provided next + if [ "${get_next_arg:-}" ]; then + die "Expected argument after \"${args[-1]}"\" + fi + + if [ "${out[0]:-}" ]; then + if [ ! "${has_many_values:-}" ]; then + out="${out[0]}" + fi + elif [ "${is_required:-}" ]; then + die "Argument $option_arg is required, but was not found" + else + unset out + fi +} diff --git a/scripts/lib/bench-all-cumulus.sh b/scripts/lib/bench-all-cumulus.sh new file mode 100755 index 000000000000..f4c2a35c6b6b --- /dev/null +++ b/scripts/lib/bench-all-cumulus.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash +# originally moved from https://github.com/paritytech/cumulus/blob/445f9277ab55b4d930ced4fbbb38d27c617c6658/scripts/benchmarks-ci.sh + +# default RUST_LOG is warn, but could be overridden +export RUST_LOG="${RUST_LOG:-error}" + +THIS_DIR=$(dirname "${BASH_SOURCE[0]}") +. "$THIS_DIR/../command-utils.sh" + +POLKADOT_PARACHAIN="./target/$profile/polkadot-parachain" + +run_cumulus_bench() { + local artifactsDir="$ARTIFACTS_DIR" + local category=$1 + local runtimeName=$2 + local paraId=${3:-} + + local benchmarkOutput="$output_path/parachains/runtimes/$category/$runtimeName/src/weights" + local benchmarkRuntimeChain + if [[ ! -z "$paraId" ]]; then + benchmarkRuntimeChain="${runtimeName}-dev-$paraId" + else + benchmarkRuntimeChain="$runtimeName-dev" + fi + + local benchmarkMetadataOutputDir="$artifactsDir/$runtimeName" + mkdir -p "$benchmarkMetadataOutputDir" + + # Load all pallet names in an array. + echo "[+] Listing pallets for runtime $runtimeName for chain: $benchmarkRuntimeChain ..." + local pallets=($( + $POLKADOT_PARACHAIN benchmark pallet --list --chain="${benchmarkRuntimeChain}" |\ + tail -n+2 |\ + cut -d',' -f1 |\ + sort |\ + uniq + )) + + if [ ${#pallets[@]} -ne 0 ]; then + echo "[+] Benchmarking ${#pallets[@]} pallets for runtime $runtimeName for chain: $benchmarkRuntimeChain, pallets:" + for pallet in "${pallets[@]}"; do + echo " [+] $pallet" + done + else + echo "$runtimeName pallet list not found in benchmarks-ci.sh" + exit 1 + fi + + for pallet in "${pallets[@]}"; do + # (by default) do not choose output_file, like `pallet_assets.rs` because it does not work for multiple instances + # `benchmark pallet` command will decide the output_file name if there are multiple instances + local output_file="" + local extra_args="" + # a little hack for pallet_xcm_benchmarks - we want to force custom implementation for XcmWeightInfo + if [[ "$pallet" == "pallet_xcm_benchmarks::generic" ]] || [[ "$pallet" == "pallet_xcm_benchmarks::fungible" ]]; then + output_file="xcm/${pallet//::/_}.rs" + extra_args="--template=$output_path/templates/xcm-bench-template.hbs" + fi + $POLKADOT_PARACHAIN benchmark pallet \ + $extra_args \ + --chain="${benchmarkRuntimeChain}" \ + --wasm-execution=compiled \ + --pallet="$pallet" \ + --no-storage-info \ + --no-median-slopes \ + --no-min-squares \ + --extrinsic='*' \ + --steps=50 \ + --repeat=20 \ + --json \ + --header="$output_path/file_header.txt" \ + --output="${benchmarkOutput}/${output_file}" >> "$benchmarkMetadataOutputDir/${pallet//::/_}_benchmark.json" + done +} + + +echo "[+] Compiling benchmarks..." +cargo build --profile $profile --locked --features=runtime-benchmarks -p polkadot-parachain-bin + +# Run benchmarks for all pallets of a given runtime if runtime argument provided +get_arg optional --runtime "$@" +runtime="${out:-""}" + +if [[ $runtime ]]; then + paraId="" + case "$runtime" in + asset-*) + category="assets" + ;; + collectives-*) + category="collectives" + ;; + coretime-*) + category="coretime" + ;; + bridge-*) + category="bridge-hubs" + ;; + contracts-*) + category="contracts" + ;; + people-*) + category="people" + ;; + glutton-*) + category="glutton" + paraId="1300" + ;; + *) + echo "Unknown runtime: $runtime" + exit 1 + ;; + esac + + run_cumulus_bench $category $runtime $paraId + +else # run all + # Assets + run_cumulus_bench assets asset-hub-rococo + run_cumulus_bench assets asset-hub-westend + + # Collectives + run_cumulus_bench collectives collectives-westend + + # Coretime + run_cumulus_bench coretime coretime-rococo + run_cumulus_bench coretime coretime-westend + + # People + run_cumulus_bench people people-rococo + run_cumulus_bench people people-westend + + # Bridge Hubs + run_cumulus_bench bridge-hubs bridge-hub-rococo + run_cumulus_bench bridge-hubs bridge-hub-westend + + # Glutton + run_cumulus_bench glutton glutton-westend 1300 +fi diff --git a/scripts/lib/bench-all-pallet.sh b/scripts/lib/bench-all-pallet.sh new file mode 100644 index 000000000000..e6908045ddbd --- /dev/null +++ b/scripts/lib/bench-all-pallet.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +set -eu -o pipefail +shopt -s inherit_errexit +shopt -s globstar + +. "$(dirname "${BASH_SOURCE[0]}")/../command-utils.sh" + +get_arg required --pallet "$@" +PALLET="${out:-""}" + +REPO_NAME="$(basename "$PWD")" +BASE_COMMAND="$(dirname "${BASH_SOURCE[0]}")/../../bench/bench.sh --noexit=true --subcommand=pallet" + +WEIGHT_FILE_PATHS=( $(find . -type f -name "${PALLET}.rs" -path "**/weights/*" | sed 's|^\./||g') ) + +# convert pallet_ranked_collective to ranked-collective +CLEAN_PALLET=$(echo $PALLET | sed 's/pallet_//g' | sed 's/_/-/g') + +# add substrate pallet weights to a list +SUBSTRATE_PALLET_PATH=$(ls substrate/frame/$CLEAN_PALLET/src/weights.rs || :) +if [ ! -z "${SUBSTRATE_PALLET_PATH}" ]; then + WEIGHT_FILE_PATHS+=("$SUBSTRATE_PALLET_PATH") +fi + +# add trappist pallet weights to a list +TRAPPIST_PALLET_PATH=$(ls pallet/$CLEAN_PALLET/src/weights.rs || :) +if [ ! -z "${TRAPPIST_PALLET_PATH}" ]; then + WEIGHT_FILE_PATHS+=("$TRAPPIST_PALLET_PATH") +fi + +COMMANDS=() + +if [ "${#WEIGHT_FILE_PATHS[@]}" -eq 0 ]; then + echo "No weights files found for pallet: $PALLET" + exit 1 +else + echo "Found weights files for pallet: $PALLET" +fi + +for f in ${WEIGHT_FILE_PATHS[@]}; do + echo "- $f" + # f examples: + # cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_balances.rs + # polkadot/runtime/rococo/src/weights/pallet_balances.rs + # runtime/trappist/src/weights/pallet_assets.rs + TARGET_DIR=$(echo $f | cut -d'/' -f 1) + + if [ "$REPO_NAME" == "polkadot-sdk" ]; then + case $TARGET_DIR in + cumulus) + TYPE=$(echo $f | cut -d'/' -f 2) + # Example: cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_balances.rs + if [ "$TYPE" == "parachains" ]; then + RUNTIME=$(echo $f | cut -d'/' -f 5) + RUNTIME_DIR=$(echo $f | cut -d'/' -f 4) + COMMANDS+=("$BASE_COMMAND --runtime=$RUNTIME --runtime_dir=$RUNTIME_DIR --target_dir=$TARGET_DIR --pallet=$PALLET") + fi + ;; + polkadot) + # Example: polkadot/runtime/rococo/src/weights/pallet_balances.rs + RUNTIME=$(echo $f | cut -d'/' -f 3) + COMMANDS+=("$BASE_COMMAND --runtime=$RUNTIME --target_dir=$TARGET_DIR --pallet=$PALLET") + ;; + substrate) + # Example: substrate/frame/contracts/src/weights.rs + COMMANDS+=("$BASE_COMMAND --target_dir=$TARGET_DIR --runtime=dev --pallet=$PALLET") + ;; + *) + echo "Unknown dir: $TARGET_DIR" + exit 1 + ;; + esac + fi + + if [ "$REPO_NAME" == "trappist" ]; then + case $TARGET_DIR in + runtime) + TYPE=$(echo $f | cut -d'/' -f 2) + if [ "$TYPE" == "trappist" || "$TYPE" == "stout" ]; then + # Example: runtime/trappist/src/weights/pallet_assets.rs + COMMANDS+=("$BASE_COMMAND --target_dir=trappist --runtime=$TYPE --pallet=$PALLET") + fi + ;; + *) + echo "Unknown dir: $TARGET_DIR" + exit 1 + ;; + esac + fi +done + +for cmd in "${COMMANDS[@]}"; do + echo "Running command: $cmd" + . $cmd +done diff --git a/scripts/lib/bench-all-polkadot.sh b/scripts/lib/bench-all-polkadot.sh new file mode 100644 index 000000000000..ac52e00140e3 --- /dev/null +++ b/scripts/lib/bench-all-polkadot.sh @@ -0,0 +1,88 @@ +#!/bin/bash + +# Runs all benchmarks for all pallets, for a given runtime, provided by $1 +# Should be run on a reference machine to gain accurate benchmarks +# current reference machine: https://github.com/paritytech/polkadot/pull/6508/files +# original source: https://github.com/paritytech/polkadot/blob/b9842c4b52f6791fef6c11ecd020b22fe614f041/scripts/run_all_benches.sh + +get_arg required --runtime "$@" +runtime="${out:-""}" + +# default RUST_LOG is error, but could be overridden +export RUST_LOG="${RUST_LOG:-error}" + +echo "[+] Compiling benchmarks..." +cargo build --profile $profile --locked --features=runtime-benchmarks -p polkadot + +POLKADOT_BIN="./target/$profile/polkadot" + +# Update the block and extrinsic overhead weights. +echo "[+] Benchmarking block and extrinsic overheads..." +OUTPUT=$( + $POLKADOT_BIN benchmark overhead \ + --chain="${runtime}-dev" \ + --wasm-execution=compiled \ + --weight-path="$output_path/runtime/${runtime}/constants/src/weights/" \ + --warmup=10 \ + --repeat=100 \ + --header="$output_path/file_header.txt" +) +if [ $? -ne 0 ]; then + echo "$OUTPUT" >> "$ERR_FILE" + echo "[-] Failed to benchmark the block and extrinsic overheads. Error written to $ERR_FILE; continuing..." +fi + + +# Load all pallet names in an array. +PALLETS=($( + $POLKADOT_BIN benchmark pallet --list --chain="${runtime}-dev" |\ + tail -n+2 |\ + cut -d',' -f1 |\ + sort |\ + uniq +)) + +echo "[+] Benchmarking ${#PALLETS[@]} pallets for runtime $runtime" + +# Define the error file. +ERR_FILE="${ARTIFACTS_DIR}/benchmarking_errors.txt" +# Delete the error file before each run. +rm -f $ERR_FILE + +# Benchmark each pallet. +for PALLET in "${PALLETS[@]}"; do + echo "[+] Benchmarking $PALLET for $runtime"; + + output_file="" + if [[ $PALLET == *"::"* ]]; then + # translates e.g. "pallet_foo::bar" to "pallet_foo_bar" + output_file="${PALLET//::/_}.rs" + fi + + OUTPUT=$( + $POLKADOT_BIN benchmark pallet \ + --chain="${runtime}-dev" \ + --steps=50 \ + --repeat=20 \ + --no-storage-info \ + --no-median-slopes \ + --no-min-squares \ + --pallet="$PALLET" \ + --extrinsic="*" \ + --execution=wasm \ + --wasm-execution=compiled \ + --header="$output_path/file_header.txt" \ + --output="$output_path/runtime/${runtime}/src/weights/${output_file}" 2>&1 + ) + if [ $? -ne 0 ]; then + echo "$OUTPUT" >> "$ERR_FILE" + echo "[-] Failed to benchmark $PALLET. Error written to $ERR_FILE; continuing..." + fi +done + +# Check if the error file exists. +if [ -f "$ERR_FILE" ]; then + echo "[-] Some benchmarks failed. See: $ERR_FILE" +else + echo "[+] All benchmarks passed." +fi diff --git a/scripts/lib/bench-all-substrate.sh b/scripts/lib/bench-all-substrate.sh new file mode 100644 index 000000000000..eeb18cdd8bbb --- /dev/null +++ b/scripts/lib/bench-all-substrate.sh @@ -0,0 +1,148 @@ +#!/usr/bin/env bash + +# This file is part of Substrate. +# Copyright (C) 2022 Parity Technologies (UK) Ltd. +# SPDX-License-Identifier: Apache-2.0 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script has three parts which all use the Substrate runtime: +# - Pallet benchmarking to update the pallet weights +# - Overhead benchmarking for the Extrinsic and Block weights +# - Machine benchmarking +# +# Should be run on a reference machine to gain accurate benchmarks +# current reference machine: https://github.com/paritytech/substrate/pull/5848 + +# Original source: https://github.com/paritytech/substrate/blob/ff9921a260a67e3a71f25c8b402cd5c7da787a96/scripts/run_all_benchmarks.sh +# Fail if any sub-command in a pipe fails, not just the last one. +set -o pipefail +# Fail on undeclared variables. +set -u +# Fail if any sub-command fails. +set -e +# Fail on traps. +# set -E + +# default RUST_LOG is warn, but could be overridden +export RUST_LOG="${RUST_LOG:-error}" + +echo "[+] Compiling Substrate benchmarks..." +cargo build --profile=$profile --locked --features=runtime-benchmarks -p staging-node-cli + +# The executable to use. +SUBSTRATE="./target/$profile/substrate-node" + +# Manually exclude some pallets. +EXCLUDED_PALLETS=( + # Helper pallets + "pallet_election_provider_support_benchmarking" + # Pallets without automatic benchmarking + "pallet_babe" + "pallet_grandpa" + "pallet_mmr" + "pallet_offences" + # Only used for testing, does not need real weights. + "frame_benchmarking_pallet_pov" + "pallet_example_tasks" + "pallet_example_basic" + "pallet_example_split" + "pallet_example_kitchensink" + "pallet_example_mbm" + "tasks_example" +) + +# Load all pallet names in an array. +ALL_PALLETS=($( + $SUBSTRATE benchmark pallet --list --chain=dev |\ + tail -n+2 |\ + cut -d',' -f1 |\ + sort |\ + uniq +)) + +# Define the error file. +ERR_FILE="${ARTIFACTS_DIR}/benchmarking_errors.txt" + +# Delete the error file before each run. +rm -f "$ERR_FILE" + +mkdir -p "$(dirname "$ERR_FILE")" + +# Update the block and extrinsic overhead weights. +echo "[+] Benchmarking block and extrinsic overheads..." +OUTPUT=$( + $SUBSTRATE benchmark overhead \ + --chain=dev \ + --wasm-execution=compiled \ + --weight-path="$output_path/frame/support/src/weights/" \ + --header="$output_path/HEADER-APACHE2" \ + --warmup=10 \ + --repeat=100 2>&1 +) +if [ $? -ne 0 ]; then + echo "$OUTPUT" >> "$ERR_FILE" + echo "[-] Failed to benchmark the block and extrinsic overheads. Error written to $ERR_FILE; continuing..." +fi + +echo "[+] Benchmarking ${#ALL_PALLETS[@]} Substrate pallets and excluding ${#EXCLUDED_PALLETS[@]}." + +echo "[+] Excluded pallets ${EXCLUDED_PALLETS[@]}" +echo "[+] ------ " +echo "[+] Whole list pallets ${ALL_PALLETS[@]}" + +# Benchmark each pallet. +for PALLET in "${ALL_PALLETS[@]}"; do + FOLDER="$(echo "${PALLET#*_}" | tr '_' '-')"; + WEIGHT_FILE="$output_path/frame/${FOLDER}/src/weights.rs" + + # Skip the pallet if it is in the excluded list. + + if [[ " ${EXCLUDED_PALLETS[@]} " =~ " ${PALLET} " ]]; then + echo "[+] Skipping $PALLET as it is in the excluded list." + continue + fi + + echo "[+] Benchmarking $PALLET with weight file $WEIGHT_FILE"; + + set +e # Disable exit on error for the benchmarking of the pallets + OUTPUT=$( + $SUBSTRATE benchmark pallet \ + --chain=dev \ + --steps=50 \ + --repeat=20 \ + --pallet="$PALLET" \ + --no-storage-info \ + --no-median-slopes \ + --no-min-squares \ + --extrinsic="*" \ + --wasm-execution=compiled \ + --heap-pages=4096 \ + --output="$WEIGHT_FILE" \ + --header="$output_path/HEADER-APACHE2" \ + --template="$output_path/.maintain/frame-weight-template.hbs" 2>&1 + ) + if [ $? -ne 0 ]; then + echo -e "$PALLET: $OUTPUT\n" >> "$ERR_FILE" + echo "[-] Failed to benchmark $PALLET. Error written to $ERR_FILE; continuing..." + fi + set -e # Re-enable exit on error +done + + +# Check if the error file exists. +if [ -s "$ERR_FILE" ]; then + echo "[-] Some benchmarks failed. See: $ERR_FILE" + exit 1 +else + echo "[+] All benchmarks passed." +fi diff --git a/scripts/lib/bench-overhead.sh b/scripts/lib/bench-overhead.sh new file mode 100644 index 000000000000..c4cca8b4c128 --- /dev/null +++ b/scripts/lib/bench-overhead.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +THIS_DIR=$(dirname "${BASH_SOURCE[0]}") +. "$THIS_DIR/../command-utils.sh" + +bench_overhead_common_args=( + -- + benchmark + overhead + --wasm-execution=compiled + --warmup=10 + --repeat=100 +) +bench_overhead() { + local args + case "$target_dir" in + substrate) + args=( + --bin=substrate + "${bench_overhead_common_args[@]}" + --header="$output_path/HEADER-APACHE2" + --weight-path="$output_path/frame/support/src/weights" + --chain="dev" + ) + ;; + polkadot) + get_arg required --runtime "$@" + local runtime="${out:-""}" + args=( + --bin=polkadot + "${bench_overhead_common_args[@]}" + --header="$output_path/file_header.txt" + --weight-path="$output_path/runtime/$runtime/constants/src/weights" + --chain="$runtime-dev" + ) + ;; + cumulus) + get_arg required --runtime "$@" + local runtime="${out:-""}" + args=( + -p=polkadot-parachain-bin + "${bench_overhead_common_args[@]}" + --header="$output_path/file_header.txt" + --weight-path="$output_path/parachains/runtimes/assets/$runtime/src/weights" + --chain="$runtime" + ) + ;; + trappist) + get_arg required --runtime "$@" + local runtime="${out:-""}" + args=( + "${bench_overhead_common_args[@]}" + --header="$output_path/templates/file_header.txt" + --weight-path="$output_path/runtime/$runtime/src/weights" + --chain="$runtime-dev" + ) + ;; + *) + die "Target Dir \"$target_dir\" is not supported in bench_overhead" + ;; + esac + + cargo_run "${args[@]}" +} + +bench_overhead "$@" diff --git a/scripts/lib/bench-pallet.sh b/scripts/lib/bench-pallet.sh new file mode 100644 index 000000000000..15eac31e3a45 --- /dev/null +++ b/scripts/lib/bench-pallet.sh @@ -0,0 +1,178 @@ +#!/bin/bash + +THIS_DIR=$(dirname "${BASH_SOURCE[0]}") +. "$THIS_DIR/../command-utils.sh" + +bench_pallet_common_args=( + -- + benchmark + pallet + --steps=50 + --repeat=20 + --extrinsic="*" + --wasm-execution=compiled + --heap-pages=4096 + --json-file="${ARTIFACTS_DIR}/bench.json" +) +bench_pallet() { + get_arg required --subcommand "$@" + local subcommand="${out:-""}" + + get_arg required --runtime "$@" + local runtime="${out:-""}" + + get_arg required --pallet "$@" + local pallet="${out:-""}" + + local args + case "$target_dir" in + substrate) + args=( + --features=runtime-benchmarks + --manifest-path="$output_path/bin/node/cli/Cargo.toml" + "${bench_pallet_common_args[@]}" + --pallet="$pallet" + --chain="$runtime" + ) + + case "$subcommand" in + pallet) + # Translates e.g. "pallet_foo::bar" to "pallet_foo_bar" + local output_dir="${pallet//::/_}" + + # Substrate benchmarks are output to the "frame" directory but they aren't + # named exactly after the $pallet argument. For example: + # - When $pallet == pallet_balances, the output folder is frame/balances + # - When $pallet == frame_benchmarking, the output folder is frame/benchmarking + # The common pattern we infer from those examples is that we should remove + # the prefix + if [[ "$output_dir" =~ ^[A-Za-z]*[^A-Za-z](.*)$ ]]; then + output_dir="${BASH_REMATCH[1]}" + fi + + # We also need to translate '_' to '-' due to the folders' naming + # conventions + output_dir="${output_dir//_/-}" + + args+=( + --header="$output_path/HEADER-APACHE2" + --output="$output_path/frame/$output_dir/src/weights.rs" + --template="$output_path/.maintain/frame-weight-template.hbs" + ) + ;; + *) + die "Subcommand $subcommand is not supported for $target_dir in bench_pallet" + ;; + esac + ;; + polkadot) + # For backward compatibility: replace "-dev" with "" + runtime=${runtime/-dev/} + + local weights_dir="$output_path/runtime/${runtime}/src/weights" + + args=( + --bin=polkadot + --features=runtime-benchmarks + "${bench_pallet_common_args[@]}" + --pallet="$pallet" + --chain="${runtime}-dev" + ) + + case "$subcommand" in + pallet) + args+=( + --header="$output_path/file_header.txt" + --output="${weights_dir}/" + ) + ;; + xcm) + args+=( + --header="$output_path/file_header.txt" + --template="$output_path/xcm/pallet-xcm-benchmarks/template.hbs" + --output="${weights_dir}/xcm/" + ) + ;; + *) + die "Subcommand $subcommand is not supported for $target_dir in bench_pallet" + ;; + esac + ;; + cumulus) + get_arg required --runtime_dir "$@" + local runtime_dir="${out:-""}" + local chain="$runtime" + + # to support specifying parachain id from runtime name (e.g. ["glutton-westend", "glutton-westend-dev-1300"]) + # If runtime ends with "-dev" or "-dev-\d+", leave as it is, otherwise concat "-dev" at the end of $chain + if [[ ! "$runtime" =~ -dev(-[0-9]+)?$ ]]; then + chain="${runtime}-dev" + fi + + # replace "-dev" or "-dev-\d+" with "" for runtime + runtime=$(echo "$runtime" | sed 's/-dev.*//g') + + args=( + -p=polkadot-parachain-bin + --features=runtime-benchmarks + "${bench_pallet_common_args[@]}" + --pallet="$pallet" + --chain="${chain}" + --header="$output_path/file_header.txt" + ) + + case "$subcommand" in + pallet) + args+=( + --output="$output_path/parachains/runtimes/$runtime_dir/$runtime/src/weights/" + ) + ;; + xcm) + mkdir -p "$output_path/parachains/runtimes/$runtime_dir/$runtime/src/weights/xcm" + args+=( + --template="$output_path/templates/xcm-bench-template.hbs" + --output="$output_path/parachains/runtimes/$runtime_dir/$runtime/src/weights/xcm/" + ) + ;; + *) + die "Subcommand $subcommand is not supported for $target_dir in bench_pallet" + ;; + esac + ;; + trappist) + local weights_dir="$output_path/runtime/$runtime/src/weights" + + args=( + --features=runtime-benchmarks + "${bench_pallet_common_args[@]}" + --pallet="$pallet" + --chain="${runtime}-dev" + --header="$output_path/templates/file_header.txt" + ) + + case "$subcommand" in + pallet) + args+=( + --output="${weights_dir}/" + ) + ;; + xcm) + args+=( + --template="$output_path/templates/xcm-bench-template.hbs" + --output="${weights_dir}/xcm/" + ) + ;; + *) + die "Subcommand $subcommand is not supported for $target_dir in bench_pallet" + ;; + esac + ;; + *) + die "Repository $target_dir is not supported in bench_pallet" + ;; + esac + + cargo_run "${args[@]}" +} + +bench_pallet "$@" diff --git a/scripts/sync.sh b/scripts/sync.sh new file mode 100755 index 000000000000..b5d8a5219937 --- /dev/null +++ b/scripts/sync.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash + +set -eu -o pipefail + +. "$(realpath "$(dirname "${BASH_SOURCE[0]}")/command-utils.sh")" + + +# Function to check syncing status +check_syncing() { + # Send the system_health request and parse the isSyncing field + RESPONSE=$(curl -sSX POST http://127.0.0.1:9944 \ + --header 'Content-Type: application/json' \ + --data-raw '{"jsonrpc": "2.0", "method": "system_health", "params": [], "id": "1"}') + + # Check for errors in the curl command + if [ $? -ne 0 ]; then + echo "Error: Unable to send request to Polkadot node" + fi + + IS_SYNCING=$(echo $RESPONSE | jq -r '.result.isSyncing') + + # Check for errors in the jq command or missing field in the response + if [ $? -ne 0 ] || [ "$IS_SYNCING" == "null" ]; then + echo "Error: Unable to parse sync status from response" + fi + + # Return the isSyncing value + echo $IS_SYNCING +} + +main() { + get_arg required --chain "$@" + local chain="${out:-""}" + + get_arg required --type "$@" + local type="${out:-""}" + + export RUST_LOG="${RUST_LOG:-remote-ext=debug,runtime=trace}" + + cargo build --release + + cp "./target/release/polkadot" ./polkadot-bin + + # Start sync. + # "&" runs the process in the background + # "> /dev/tty" redirects the output of the process to the terminal + ./polkadot-bin --sync="$type" --chain="$chain" > "$ARTIFACTS_DIR/sync.log" 2>&1 & + + # Get the PID of process + POLKADOT_SYNC_PID=$! + + sleep 10 + + # Poll the node every 100 seconds until syncing is complete + while :; do + SYNC_STATUS="$(check_syncing)" + if [ "$SYNC_STATUS" == "true" ]; then + echo "Node is still syncing..." + sleep 100 + elif [ "$SYNC_STATUS" == "false" ]; then + echo "Node sync is complete!" + kill "$POLKADOT_SYNC_PID" # Stop the Polkadot node process once syncing is complete + exit 0 # Success + elif [[ "$SYNC_STATUS" = Error:* ]]; then + echo "$SYNC_STATUS" + exit 1 # Error + else + echo "Unknown error: $SYNC_STATUS" + exit 1 # Unknown error + fi + done +} + +main "$@" From 739c37bfd6df30fac0ffb9b491ee2495e1753054 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Wed, 19 Jun 2024 12:58:29 +0300 Subject: [PATCH 46/52] Fix core sharing and make use of scheduling_lookahead (#4724) Implements most of https://github.com/paritytech/polkadot-sdk/issues/1797 Core sharing (two parachains or more marachains scheduled on the same core with the same `PartsOf57600` value) was not working correctly. The expected behaviour is to have Backed and Included event in each block for the paras sharing the core and the paras should take turns. E.g. for two cores we expect: Backed(a); Included(a)+Backed(b); Included(b)+Backed(a); etc. Instead of this each block contains just one event and there are a lot of gaps (blocks w/o events) during the session. Core sharing should also work when collators are building collations ahead of time TODOs: - [x] Add a zombienet test verifying that the behaviour mentioned above works. - [x] prdoc --------- Co-authored-by: alindima --- .gitlab/pipeline/zombienet/polkadot.yml | 17 ++ Cargo.lock | 1 + polkadot/node/core/backing/src/lib.rs | 88 ++++++----- polkadot/node/core/backing/src/tests/mod.rs | 45 +++++- .../src/tests/prospective_parachains.rs | 20 +++ .../core/prospective-parachains/Cargo.toml | 1 + .../core/prospective-parachains/src/lib.rs | 73 +++++---- .../core/prospective-parachains/src/tests.rs | 145 +++++++++++++----- .../src/collator_side/mod.rs | 30 ++-- .../src/collator_side/tests/mod.rs | 57 +++++-- .../tests/prospective_parachains.rs | 89 +---------- .../src/validator_side/collation.rs | 6 +- .../src/validator_side/mod.rs | 74 ++++----- .../src/validator_side/tests/mod.rs | 38 ++++- .../tests/prospective_parachains.rs | 20 +++ .../statement-distribution/src/v2/mod.rs | 42 ++--- polkadot/node/subsystem-util/src/vstaging.rs | 15 +- .../src/runtime_api_impl/vstaging.rs | 8 +- polkadot/runtime/parachains/src/scheduler.rs | 3 + polkadot/zombienet_tests/assign-core.js | 48 ++++++ .../0001-basic-3cores-6s-blocks.zndsl | 4 +- ...stic-scaling-doesnt-break-parachains.zndsl | 4 +- .../elastic_scaling/assign-core.js | 40 +---- .../functional/0015-coretime-shared-core.toml | 44 ++++++ .../0015-coretime-shared-core.zndsl | 16 ++ .../functional/0015-force-register-paras.js | 63 ++++++++ .../zombienet_tests/functional/assign-core.js | 1 + prdoc/pr_4724.prdoc | 24 +++ 28 files changed, 675 insertions(+), 341 deletions(-) create mode 100644 polkadot/zombienet_tests/assign-core.js mode change 100644 => 120000 polkadot/zombienet_tests/elastic_scaling/assign-core.js create mode 100644 polkadot/zombienet_tests/functional/0015-coretime-shared-core.toml create mode 100644 polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl create mode 100644 polkadot/zombienet_tests/functional/0015-force-register-paras.js create mode 120000 polkadot/zombienet_tests/functional/assign-core.js create mode 100644 prdoc/pr_4724.prdoc diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index b158cbe0b5aa..90251082077c 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -162,6 +162,9 @@ zombienet-polkadot-elastic-scaling-0001-basic-3cores-6s-blocks: - .zombienet-polkadot-common variables: FORCED_INFRA_INSTANCE: "spot-iops" + before_script: + - !reference [.zombienet-polkadot-common, before_script] + - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/elastic_scaling script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh --local-dir="${LOCAL_DIR}/elastic_scaling" @@ -170,6 +173,9 @@ zombienet-polkadot-elastic-scaling-0001-basic-3cores-6s-blocks: zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains: extends: - .zombienet-polkadot-common + before_script: + - !reference [.zombienet-polkadot-common, before_script] + - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/elastic_scaling script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh --local-dir="${LOCAL_DIR}/elastic_scaling" @@ -199,6 +205,17 @@ zombienet-polkadot-functional-0014-chunk-fetching-network-compatibility: --local-dir="${LOCAL_DIR}/functional" --test="0014-chunk-fetching-network-compatibility.zndsl" +zombienet-polkadot-functional-0015-coretime-shared-core: + extends: + - .zombienet-polkadot-common + before_script: + - !reference [.zombienet-polkadot-common, before_script] + - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/functional + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0015-coretime-shared-core.zndsl" + zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common diff --git a/Cargo.lock b/Cargo.lock index 113cfa06a84a..bbb785a618a8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13209,6 +13209,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rstest", "sc-keystore", "sp-application-crypto", "sp-core", diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 38e8a93bb048..1bda81c5197e 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -102,6 +102,7 @@ use polkadot_node_subsystem_util::{ runtime::{ self, prospective_parachains_mode, request_min_backing_votes, ProspectiveParachainsMode, }, + vstaging::{fetch_claim_queue, ClaimQueueSnapshot}, Validator, }; use polkadot_primitives::{ @@ -212,8 +213,6 @@ struct PerRelayParentState { parent: Hash, /// Session index. session_index: SessionIndex, - /// The `ParaId` assigned to the local validator at this relay parent. - assigned_para: Option, /// The `CoreIndex` assigned to the local validator at this relay parent. assigned_core: Option, /// The candidates that are backed by enough validators in their group, by hash. @@ -233,8 +232,11 @@ struct PerRelayParentState { /// If true, we're appending extra bits in the BackedCandidate validator indices bitfield, /// which represent the assigned core index. True if ElasticScalingMVP is enabled. inject_core_index: bool, - /// The core states for all cores. - cores: Vec, + /// The number of cores. + n_cores: u32, + /// Claim queue state. If the runtime API is not available, it'll be populated with info from + /// availability cores. + claim_queue: ClaimQueueSnapshot, /// The validator index -> group mapping at this relay parent. validator_to_group: Arc>>, /// The associated group rotation information. @@ -1004,20 +1006,19 @@ macro_rules! try_runtime_api { fn core_index_from_statement( validator_to_group: &IndexedVec>, group_rotation_info: &GroupRotationInfo, - cores: &[CoreState], + n_cores: u32, + claim_queue: &ClaimQueueSnapshot, statement: &SignedFullStatementWithPVD, ) -> Option { let compact_statement = statement.as_unchecked(); let candidate_hash = CandidateHash(*compact_statement.unchecked_payload().candidate_hash()); - let n_cores = cores.len(); - gum::trace!( target:LOG_TARGET, ?group_rotation_info, ?statement, ?validator_to_group, - n_cores = ?cores.len(), + n_cores, ?candidate_hash, "Extracting core index from statement" ); @@ -1029,7 +1030,7 @@ fn core_index_from_statement( ?group_rotation_info, ?statement, ?validator_to_group, - n_cores = ?cores.len() , + n_cores, ?candidate_hash, "Invalid validator index: {:?}", statement_validator_index @@ -1038,37 +1039,25 @@ fn core_index_from_statement( }; // First check if the statement para id matches the core assignment. - let core_index = group_rotation_info.core_for_group(*group_index, n_cores); + let core_index = group_rotation_info.core_for_group(*group_index, n_cores as _); - if core_index.0 as usize > n_cores { + if core_index.0 > n_cores { gum::warn!(target: LOG_TARGET, ?candidate_hash, ?core_index, n_cores, "Invalid CoreIndex"); return None } if let StatementWithPVD::Seconded(candidate, _pvd) = statement.payload() { let candidate_para_id = candidate.descriptor.para_id; - let assigned_para_id = match &cores[core_index.0 as usize] { - CoreState::Free => { - gum::debug!(target: LOG_TARGET, ?candidate_hash, "Invalid CoreIndex, core is not assigned to any para_id"); - return None - }, - CoreState::Occupied(occupied) => - if let Some(next) = &occupied.next_up_on_available { - next.para_id - } else { - return None - }, - CoreState::Scheduled(scheduled) => scheduled.para_id, - }; + let mut assigned_paras = claim_queue.iter_claims_for_core(&core_index); - if assigned_para_id != candidate_para_id { + if !assigned_paras.any(|id| id == &candidate_para_id) { gum::debug!( target: LOG_TARGET, ?candidate_hash, ?core_index, - ?assigned_para_id, + assigned_paras = ?claim_queue.iter_claims_for_core(&core_index).collect::>(), ?candidate_para_id, - "Invalid CoreIndex, core is assigned to a different para_id" + "Invalid CoreIndex, core is not assigned to this para_id" ); return None } @@ -1129,6 +1118,8 @@ async fn construct_per_relay_parent_state( Error::UtilError(TryFrom::try_from(e).expect("the conversion is infallible; qed")) })?; + let maybe_claim_queue = try_runtime_api!(fetch_claim_queue(ctx.sender(), parent).await); + let signing_context = SigningContext { parent_hash: parent, session_index }; let validator = match Validator::construct( &validators, @@ -1153,31 +1144,35 @@ async fn construct_per_relay_parent_state( let mut groups = HashMap::>::new(); let mut assigned_core = None; - let mut assigned_para = None; + + let has_claim_queue = maybe_claim_queue.is_some(); + let mut claim_queue = maybe_claim_queue.unwrap_or_default().0; for (idx, core) in cores.iter().enumerate() { - let core_para_id = match core { - CoreState::Scheduled(scheduled) => scheduled.para_id, - CoreState::Occupied(occupied) => - if mode.is_enabled() { + let core_index = CoreIndex(idx as _); + + if !has_claim_queue { + match core { + CoreState::Scheduled(scheduled) => + claim_queue.insert(core_index, [scheduled.para_id].into_iter().collect()), + CoreState::Occupied(occupied) if mode.is_enabled() => { // Async backing makes it legal to build on top of // occupied core. if let Some(next) = &occupied.next_up_on_available { - next.para_id + claim_queue.insert(core_index, [next.para_id].into_iter().collect()) } else { continue } - } else { - continue }, - CoreState::Free => continue, - }; + _ => continue, + }; + } else if !claim_queue.contains_key(&core_index) { + continue + } - let core_index = CoreIndex(idx as _); let group_index = group_rotation_info.group_for_core(core_index, n_cores); if let Some(g) = validator_groups.get(group_index.0 as usize) { if validator.as_ref().map_or(false, |v| g.contains(&v.index())) { - assigned_para = Some(core_para_id); assigned_core = Some(core_index); } groups.insert(core_index, g.clone()); @@ -1212,7 +1207,6 @@ async fn construct_per_relay_parent_state( parent, session_index, assigned_core, - assigned_para, backed: HashSet::new(), table: Table::new(table_config), table_context, @@ -1221,7 +1215,8 @@ async fn construct_per_relay_parent_state( fallbacks: HashMap::new(), minimum_backing_votes, inject_core_index, - cores, + n_cores: cores.len() as u32, + claim_queue: ClaimQueueSnapshot::from(claim_queue), validator_to_group: validator_to_group.clone(), group_rotation_info, })) @@ -1674,7 +1669,8 @@ async fn import_statement( let core = core_index_from_statement( &rp_state.validator_to_group, &rp_state.group_rotation_info, - &rp_state.cores, + rp_state.n_cores, + &rp_state.claim_queue, statement, ) .ok_or(Error::CoreIndexUnavailable)?; @@ -2098,12 +2094,14 @@ async fn handle_second_message( return Ok(()) } + let assigned_paras = rp_state.assigned_core.and_then(|core| rp_state.claim_queue.0.get(&core)); + // Sanity check that candidate is from our assignment. - if Some(candidate.descriptor().para_id) != rp_state.assigned_para { + if !matches!(assigned_paras, Some(paras) if paras.contains(&candidate.descriptor().para_id)) { gum::debug!( target: LOG_TARGET, our_assignment_core = ?rp_state.assigned_core, - our_assignment_para = ?rp_state.assigned_para, + our_assignment_paras = ?assigned_paras, collation = ?candidate.descriptor().para_id, "Subsystem asked to second for para outside of our assignment", ); @@ -2113,7 +2111,7 @@ async fn handle_second_message( gum::debug!( target: LOG_TARGET, our_assignment_core = ?rp_state.assigned_core, - our_assignment_para = ?rp_state.assigned_para, + our_assignment_paras = ?assigned_paras, collation = ?candidate.descriptor().para_id, "Current assignments vs collation", ); diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index bb23c7fbeb24..5f2bc7e18424 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -42,7 +42,10 @@ use sp_application_crypto::AppCrypto; use sp_keyring::Sr25519Keyring; use sp_keystore::Keystore; use sp_tracing as _; -use std::{collections::HashMap, time::Duration}; +use std::{ + collections::{BTreeMap, HashMap, VecDeque}, + time::Duration, +}; mod prospective_parachains; @@ -75,6 +78,7 @@ pub(crate) struct TestState { validator_groups: (Vec>, GroupRotationInfo), validator_to_group: IndexedVec>, availability_cores: Vec, + claim_queue: BTreeMap>, head_data: HashMap, signing_context: SigningContext, relay_parent: Hash, @@ -130,6 +134,10 @@ impl Default for TestState { CoreState::Scheduled(ScheduledCore { para_id: chain_b, collator: None }), ]; + let mut claim_queue = BTreeMap::new(); + claim_queue.insert(CoreIndex(0), [chain_a].into_iter().collect()); + claim_queue.insert(CoreIndex(1), [chain_b].into_iter().collect()); + let mut head_data = HashMap::new(); head_data.insert(chain_a, HeadData(vec![4, 5, 6])); head_data.insert(chain_b, HeadData(vec![5, 6, 7])); @@ -153,6 +161,7 @@ impl Default for TestState { validator_groups: (validator_groups, group_rotation_info), validator_to_group, availability_cores, + claim_queue, head_data, validation_data, signing_context, @@ -338,6 +347,26 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS tx.send(Ok(test_state.disabled_validators.clone())).unwrap(); } ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok( + test_state.claim_queue.clone() + )).unwrap(); + } + ); } async fn assert_validation_requests( @@ -730,11 +759,16 @@ fn get_backed_candidate_preserves_order() { // Assign the second core to the same para as the first one. test_state.availability_cores[1] = CoreState::Scheduled(ScheduledCore { para_id: test_state.chain_ids[0], collator: None }); + *test_state.claim_queue.get_mut(&CoreIndex(1)).unwrap() = + [test_state.chain_ids[0]].into_iter().collect(); // Add another availability core for paraid 2. test_state.availability_cores.push(CoreState::Scheduled(ScheduledCore { para_id: test_state.chain_ids[1], collator: None, })); + test_state + .claim_queue + .insert(CoreIndex(2), [test_state.chain_ids[1]].into_iter().collect()); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; @@ -1103,7 +1137,8 @@ fn extract_core_index_from_statement_works() { let core_index_1 = core_index_from_statement( &test_state.validator_to_group, &test_state.validator_groups.1, - &test_state.availability_cores, + test_state.availability_cores.len() as _, + &test_state.claim_queue.clone().into(), &signed_statement_1, ) .unwrap(); @@ -1113,7 +1148,8 @@ fn extract_core_index_from_statement_works() { let core_index_2 = core_index_from_statement( &test_state.validator_to_group, &test_state.validator_groups.1, - &test_state.availability_cores, + test_state.availability_cores.len() as _, + &test_state.claim_queue.clone().into(), &signed_statement_2, ); @@ -1123,7 +1159,8 @@ fn extract_core_index_from_statement_works() { let core_index_3 = core_index_from_statement( &test_state.validator_to_group, &test_state.validator_groups.1, - &test_state.availability_cores, + test_state.availability_cores.len() as _, + &test_state.claim_queue.clone().into(), &signed_statement_3, ) .unwrap(); diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index 74490c84eb18..15bc0b4a1139 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -212,6 +212,26 @@ async fn activate_leaf( tx.send(Ok(Vec::new())).unwrap(); } ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + ) if parent == hash => { + tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx)) + ) if parent == hash => { + tx.send(Ok( + test_state.claim_queue.clone() + )).unwrap(); + } + ); } } diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index f3193153be89..b9573ee98519 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -32,3 +32,4 @@ sc-keystore = { path = "../../../../substrate/client/keystore" } sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } sp-keyring = { path = "../../../../substrate/primitives/keyring" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } +rstest = "0.18.2" diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index d5bb5ff76ba8..e4b6deffdf4a 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -44,6 +44,7 @@ use polkadot_node_subsystem_util::{ inclusion_emulator::{Constraints, RelayChainBlockInfo}, request_session_index_for_child, runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, + vstaging::fetch_claim_queue, }; use polkadot_primitives::{ async_backing::CandidatePendingAvailability, BlockNumber, CandidateHash, @@ -870,37 +871,51 @@ async fn fetch_backing_state( async fn fetch_upcoming_paras( ctx: &mut Context, relay_parent: Hash, -) -> JfyiErrorResult> { - let (tx, rx) = oneshot::channel(); - - // This'll have to get more sophisticated with parathreads, - // but for now we can just use the `AvailabilityCores`. - ctx.send_message(RuntimeApiMessage::Request( - relay_parent, - RuntimeApiRequest::AvailabilityCores(tx), - )) - .await; - - let cores = rx.await.map_err(JfyiError::RuntimeApiRequestCanceled)??; - let mut upcoming = HashSet::new(); - for core in cores { - match core { - CoreState::Occupied(occupied) => { - if let Some(next_up_on_available) = occupied.next_up_on_available { - upcoming.insert(next_up_on_available.para_id); - } - if let Some(next_up_on_time_out) = occupied.next_up_on_time_out { - upcoming.insert(next_up_on_time_out.para_id); +) -> JfyiErrorResult> { + Ok(match fetch_claim_queue(ctx.sender(), relay_parent).await? { + Some(claim_queue) => { + // Runtime supports claim queue - use it + claim_queue + .iter_all_claims() + .flat_map(|(_, paras)| paras.into_iter()) + .copied() + .collect() + }, + None => { + // fallback to availability cores - remove this branch once claim queue is released + // everywhere + let (tx, rx) = oneshot::channel(); + ctx.send_message(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::AvailabilityCores(tx), + )) + .await; + + let cores = rx.await.map_err(JfyiError::RuntimeApiRequestCanceled)??; + + let mut upcoming = HashSet::with_capacity(cores.len()); + for core in cores { + match core { + CoreState::Occupied(occupied) => { + // core sharing won't work optimally with this branch because the collations + // can't be prepared in advance. + if let Some(next_up_on_available) = occupied.next_up_on_available { + upcoming.insert(next_up_on_available.para_id); + } + if let Some(next_up_on_time_out) = occupied.next_up_on_time_out { + upcoming.insert(next_up_on_time_out.para_id); + } + }, + CoreState::Scheduled(scheduled) => { + upcoming.insert(scheduled.para_id); + }, + CoreState::Free => {}, } - }, - CoreState::Scheduled(scheduled) => { - upcoming.insert(scheduled.para_id); - }, - CoreState::Free => {}, - } - } + } - Ok(upcoming.into_iter().collect()) + upcoming + }, + }) } // Fetch ancestors in descending order, up to the amount requested. diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index d2fc3cbd3623..221fbf4c4e60 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -26,11 +26,15 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ async_backing::{AsyncBackingParams, BackingState, Constraints, InboundHrmpLimitations}, - CommittedCandidateReceipt, HeadData, Header, PersistedValidationData, ScheduledCore, + CommittedCandidateReceipt, CoreIndex, HeadData, Header, PersistedValidationData, ScheduledCore, ValidationCodeHash, }; use polkadot_primitives_test_helpers::make_candidate; -use std::sync::Arc; +use rstest::rstest; +use std::{ + collections::{BTreeMap, VecDeque}, + sync::Arc, +}; use test_helpers::mock::new_leaf; const ALLOWED_ANCESTRY_LEN: u32 = 3; @@ -70,7 +74,8 @@ fn dummy_constraints( } struct TestState { - availability_cores: Vec, + claim_queue: BTreeMap>, + runtime_api_version: u32, validation_code_hash: ValidationCodeHash, } @@ -79,13 +84,23 @@ impl Default for TestState { let chain_a = ParaId::from(1); let chain_b = ParaId::from(2); - let availability_cores = vec![ - CoreState::Scheduled(ScheduledCore { para_id: chain_a, collator: None }), - CoreState::Scheduled(ScheduledCore { para_id: chain_b, collator: None }), - ]; + let mut claim_queue = BTreeMap::new(); + claim_queue.insert(CoreIndex(0), [chain_a].into_iter().collect()); + claim_queue.insert(CoreIndex(1), [chain_b].into_iter().collect()); + let validation_code_hash = Hash::repeat_byte(42).into(); - Self { availability_cores, validation_code_hash } + Self { + validation_code_hash, + claim_queue, + runtime_api_version: RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, + } + } +} + +impl TestState { + fn set_runtime_api_version(&mut self, version: u32) { + self.runtime_api_version = version; } } @@ -227,12 +242,39 @@ async fn handle_leaf_activation( assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) ) if parent == *hash => { - tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + tx.send( + Ok(test_state.runtime_api_version) + ).unwrap(); } ); + if test_state.runtime_api_version < RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) + ) if parent == *hash => { + tx.send(Ok(test_state.claim_queue.values().map(|paras| CoreState::Scheduled( + ScheduledCore { + para_id: *paras.front().unwrap(), + collator: None + } + )).collect())).unwrap(); + } + ); + } else { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx)) + ) if parent == *hash => { + tx.send(Ok(test_state.claim_queue.clone())).unwrap(); + } + ); + } + send_block_header(virtual_overseer, *hash, *number).await; // Check that subsystem job issues a request for ancestors. @@ -277,14 +319,16 @@ async fn handle_leaf_activation( ); } - for _ in 0..test_state.availability_cores.len() { + let paras: HashSet<_> = test_state.claim_queue.values().flatten().collect(); + + for _ in 0..paras.len() { let message = virtual_overseer.recv().await; // Get the para we are working with since the order is not deterministic. - let para_id = match message { + let para_id = match &message { AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, RuntimeApiRequest::ParaBackingState(p_id, _), - )) => p_id, + )) => *p_id, _ => panic!("received unexpected message {:?}", message), }; @@ -505,9 +549,18 @@ fn should_do_no_work_if_async_backing_disabled_for_leaf() { // - Two for the same leaf A (one for parachain 1 and one for parachain 2) // - One for leaf B on parachain 1 // - One for leaf C on parachain 2 +// Also tests a claim queue size larger than 1. #[test] fn introduce_candidates_basic() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); + + let chain_a = ParaId::from(1); + let chain_b = ParaId::from(2); + let mut claim_queue = BTreeMap::new(); + claim_queue.insert(CoreIndex(0), [chain_a, chain_b].into_iter().collect()); + + test_state.claim_queue = claim_queue; + let view = test_harness(|mut virtual_overseer| async move { // Leaf A let leaf_a = TestLeaf { @@ -2032,9 +2085,15 @@ fn check_pvd_query() { // Test simultaneously activating and deactivating leaves, and simultaneously deactivating // multiple leaves. -#[test] -fn correctly_updates_leaves() { - let test_state = TestState::default(); +// This test is parametrised with the runtime api version. For versions that don't support the claim +// queue API, we check that av-cores are used. +#[rstest] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] +#[case(8)] +fn correctly_updates_leaves(#[case] runtime_api_version: u32) { + let mut test_state = TestState::default(); + test_state.set_runtime_api_version(runtime_api_version); + let view = test_harness(|mut virtual_overseer| async move { // Leaf A let leaf_a = TestLeaf { @@ -2140,15 +2199,12 @@ fn correctly_updates_leaves() { fn persists_pending_availability_candidate() { let mut test_state = TestState::default(); let para_id = ParaId::from(1); - test_state.availability_cores = test_state - .availability_cores + test_state.claim_queue = test_state + .claim_queue .into_iter() - .filter(|core| match core { - CoreState::Scheduled(scheduled_core) => scheduled_core.para_id == para_id, - _ => false, - }) + .filter(|(_, paras)| matches!(paras.front(), Some(para) if para == ¶_id)) .collect(); - assert_eq!(test_state.availability_cores.len(), 1); + assert_eq!(test_state.claim_queue.len(), 1); test_harness(|mut virtual_overseer| async move { let para_head = HeadData(vec![1, 2, 3]); @@ -2237,18 +2293,15 @@ fn persists_pending_availability_candidate() { } #[test] -fn backwards_compatible() { +fn backwards_compatible_with_non_async_backing_params() { let mut test_state = TestState::default(); let para_id = ParaId::from(1); - test_state.availability_cores = test_state - .availability_cores + test_state.claim_queue = test_state + .claim_queue .into_iter() - .filter(|core| match core { - CoreState::Scheduled(scheduled_core) => scheduled_core.para_id == para_id, - _ => false, - }) + .filter(|(_, paras)| matches!(paras.front(), Some(para) if para == ¶_id)) .collect(); - assert_eq!(test_state.availability_cores.len(), 1); + assert_eq!(test_state.claim_queue.len(), 1); test_harness(|mut virtual_overseer| async move { let para_head = HeadData(vec![1, 2, 3]); @@ -2350,20 +2403,30 @@ fn uses_ancestry_only_within_session() { .await; assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) - ) if parent == hash => { - tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len - })).unwrap(); } - ); + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::AsyncBackingParams(tx) + )) if parent == hash => { + tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len})).unwrap(); + }); assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + ) if parent == hash => { + tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx)) ) if parent == hash => { - tx.send(Ok(Vec::new())).unwrap(); + tx.send(Ok(BTreeMap::new())).unwrap(); } ); diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index 80a85420b392..5c201542eb56 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -51,6 +51,7 @@ use polkadot_node_subsystem_util::{ get_availability_cores, get_group_rotation_info, prospective_parachains_mode, ProspectiveParachainsMode, RuntimeInfo, }, + vstaging::fetch_claim_queue, TimeoutExt, }; use polkadot_primitives::{ @@ -579,22 +580,27 @@ async fn determine_cores( let cores = get_availability_cores(sender, relay_parent).await?; let n_cores = cores.len(); let mut assigned_cores = Vec::new(); + let maybe_claim_queue = fetch_claim_queue(sender, relay_parent).await?; for (idx, core) in cores.iter().enumerate() { - let core_para_id = match core { - CoreState::Scheduled(scheduled) => Some(scheduled.para_id), - CoreState::Occupied(occupied) => - if relay_parent_mode.is_enabled() { - // With async backing we don't care about the core state, - // it is only needed for figuring our validators group. - Some(occupied.candidate_descriptor.para_id) - } else { - None - }, - CoreState::Free => None, + let core_is_scheduled = match maybe_claim_queue { + Some(ref claim_queue) => { + // Runtime supports claim queue - use it. + claim_queue + .iter_claims_for_core(&CoreIndex(idx as u32)) + .any(|para| para == ¶_id) + }, + None => match core { + CoreState::Scheduled(scheduled) if scheduled.para_id == para_id => true, + CoreState::Occupied(occupied) if relay_parent_mode.is_enabled() => + // With async backing we don't care about the core state, + // it is only needed for figuring our validators group. + occupied.next_up_on_available.as_ref().map(|c| c.para_id) == Some(para_id), + _ => false, + }, }; - if core_para_id == Some(para_id) { + if core_is_scheduled { assigned_cores.push(CoreIndex::from(idx as u32)); } } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index a13e99df4ab4..13601ca7a005 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -16,7 +16,11 @@ use super::*; -use std::{collections::HashSet, sync::Arc, time::Duration}; +use std::{ + collections::{BTreeMap, HashSet, VecDeque}, + sync::Arc, + time::Duration, +}; use assert_matches::assert_matches; use futures::{executor, future, Future}; @@ -66,7 +70,7 @@ struct TestState { group_rotation_info: GroupRotationInfo, validator_peer_id: Vec, relay_parent: Hash, - availability_cores: Vec, + claim_queue: BTreeMap>, local_peer_id: PeerId, collator_pair: CollatorPair, session_index: SessionIndex, @@ -105,8 +109,9 @@ impl Default for TestState { let group_rotation_info = GroupRotationInfo { session_start_block: 0, group_rotation_frequency: 100, now: 1 }; - let availability_cores = - vec![CoreState::Scheduled(ScheduledCore { para_id, collator: None }), CoreState::Free]; + let mut claim_queue = BTreeMap::new(); + claim_queue.insert(CoreIndex(0), [para_id].into_iter().collect()); + claim_queue.insert(CoreIndex(1), VecDeque::new()); let relay_parent = Hash::random(); @@ -133,7 +138,7 @@ impl Default for TestState { group_rotation_info, validator_peer_id, relay_parent, - availability_cores, + claim_queue, local_peer_id, collator_pair, session_index: 1, @@ -147,17 +152,14 @@ impl TestState { pub fn with_elastic_scaling() -> Self { let mut state = Self::default(); let para_id = state.para_id; - state - .availability_cores - .push(CoreState::Scheduled(ScheduledCore { para_id, collator: None })); - state - .availability_cores - .push(CoreState::Scheduled(ScheduledCore { para_id, collator: None })); + + state.claim_queue.insert(CoreIndex(2), [para_id].into_iter().collect()); + state.claim_queue.insert(CoreIndex(3), [para_id].into_iter().collect()); state } fn current_group_validator_indices(&self) -> &[ValidatorIndex] { - let core_num = self.availability_cores.len(); + let core_num = self.claim_queue.len(); let GroupIndex(group_idx) = self.group_rotation_info.group_for_core(CoreIndex(0), core_num); &self.session_info.validator_groups.get(GroupIndex::from(group_idx)).unwrap() } @@ -395,7 +397,36 @@ async fn distribute_collation_with_receipt( RuntimeApiRequest::AvailabilityCores(tx) )) => { assert_eq!(relay_parent, _relay_parent); - tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + tx.send(Ok(test_state.claim_queue.values().map(|paras| + if let Some(para) = paras.front() { + CoreState::Scheduled(ScheduledCore { para_id: *para, collator: None }) + } else { + CoreState::Free + } + ).collect())).unwrap(); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _relay_parent, + RuntimeApiRequest::Version(tx) + )) => { + assert_eq!(relay_parent, _relay_parent); + tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap(); + } + ); + + // obtain the claim queue schedule. + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _relay_parent, + RuntimeApiRequest::ClaimQueue(tx) + )) => { + assert_eq!(relay_parent, _relay_parent); + tx.send(Ok(test_state.claim_queue.clone())).unwrap(); } ); diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs index 0a0a85fb1f27..ea8fdb0e04fb 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -19,7 +19,7 @@ use super::*; use polkadot_node_subsystem::messages::ChainApiMessage; -use polkadot_primitives::{AsyncBackingParams, Header, OccupiedCore}; +use polkadot_primitives::{AsyncBackingParams, Header}; const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; @@ -665,90 +665,3 @@ fn advertise_and_send_collation_by_hash() { }, ) } - -/// Tests that collator distributes collation built on top of occupied core. -#[test] -fn advertise_core_occupied() { - let mut test_state = TestState::default(); - let candidate = - TestCandidateBuilder { para_id: test_state.para_id, ..Default::default() }.build(); - test_state.availability_cores[0] = CoreState::Occupied(OccupiedCore { - next_up_on_available: None, - occupied_since: 0, - time_out_at: 0, - next_up_on_time_out: None, - availability: BitVec::default(), - group_responsible: GroupIndex(0), - candidate_hash: candidate.hash(), - candidate_descriptor: candidate.descriptor, - }); - - let local_peer_id = test_state.local_peer_id; - let collator_pair = test_state.collator_pair.clone(); - - test_harness( - local_peer_id, - collator_pair, - ReputationAggregator::new(|_| true), - |mut test_harness| async move { - let virtual_overseer = &mut test_harness.virtual_overseer; - - let head_a = Hash::from_low_u64_be(128); - let head_a_num: u32 = 64; - - // Grandparent of head `a`. - let head_b = Hash::from_low_u64_be(130); - - // Set collating para id. - overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) - .await; - // Activated leaf is `a`, but the collation will be based on `b`. - update_view(virtual_overseer, vec![(head_a, head_a_num)], 1).await; - - let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; - let candidate = TestCandidateBuilder { - para_id: test_state.para_id, - relay_parent: head_b, - pov_hash: pov.hash(), - ..Default::default() - } - .build(); - let candidate_hash = candidate.hash(); - distribute_collation_with_receipt( - virtual_overseer, - &test_state, - head_b, - true, - candidate, - pov, - Hash::zero(), - ) - .await; - - let validators = test_state.current_group_validator_authority_ids(); - let peer_ids = test_state.current_group_validator_peer_ids(); - - connect_peer( - virtual_overseer, - peer_ids[0], - CollationVersion::V2, - Some(validators[0].clone()), - ) - .await; - expect_declare_msg_v2(virtual_overseer, &test_state, &peer_ids[0]).await; - // Peer is aware of the leaf. - send_peer_view_change(virtual_overseer, &peer_ids[0], vec![head_a]).await; - - // Collation is advertised. - expect_advertise_collation_msg( - virtual_overseer, - &peer_ids[0], - head_b, - Some(vec![candidate_hash]), - ) - .await; - - test_harness - }, - ) -} diff --git a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs index 001df1fb3da9..96ffe9f13db3 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs @@ -270,7 +270,7 @@ impl Collations { // We don't need to fetch any other collation when we already have seconded one. CollationStatus::Seconded => None, CollationStatus::Waiting => - if !self.is_seconded_limit_reached(relay_parent_mode) { + if self.is_seconded_limit_reached(relay_parent_mode) { None } else { self.waiting_queue.pop_front() @@ -280,7 +280,7 @@ impl Collations { } } - /// Checks the limit of seconded candidates for a given para. + /// Checks the limit of seconded candidates. pub(super) fn is_seconded_limit_reached( &self, relay_parent_mode: ProspectiveParachainsMode, @@ -293,7 +293,7 @@ impl Collations { } else { 1 }; - self.seconded_count < seconded_limit + self.seconded_count >= seconded_limit } } diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 9f037a983e51..f5c9726f3f6a 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -19,7 +19,7 @@ use futures::{ }; use futures_timer::Delay; use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, + collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, future::Future, time::{Duration, Instant}, }; @@ -51,6 +51,7 @@ use polkadot_node_subsystem_util::{ backing_implicit_view::View as ImplicitView, reputation::{ReputationAggregator, REPUTATION_CHANGE_INTERVAL}, runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, + vstaging::fetch_claim_queue, }; use polkadot_primitives::{ CandidateHash, CollatorId, CoreState, Hash, HeadData, Id as ParaId, OccupiedCoreAssumption, @@ -362,8 +363,8 @@ impl PeerData { #[derive(Debug)] struct GroupAssignments { - /// Current assignment. - current: Option, + /// Current assignments. + current: Vec, } struct PerRelayParent { @@ -376,7 +377,7 @@ impl PerRelayParent { fn new(mode: ProspectiveParachainsMode) -> Self { Self { prospective_parachains_mode: mode, - assignment: GroupAssignments { current: None }, + assignment: GroupAssignments { current: vec![] }, collations: Collations::default(), } } @@ -491,34 +492,34 @@ where .await .map_err(Error::CancelledAvailabilityCores)??; - let para_now = match polkadot_node_subsystem_util::signing_key_and_index(&validators, keystore) - .and_then(|(_, index)| polkadot_node_subsystem_util::find_validator_group(&groups, index)) - { - Some(group) => { - let core_now = rotation_info.core_for_group(group, cores.len()); - - cores.get(core_now.0 as usize).and_then(|c| match c { - CoreState::Occupied(core) if relay_parent_mode.is_enabled() => Some(core.para_id()), - CoreState::Scheduled(core) => Some(core.para_id), - CoreState::Occupied(_) | CoreState::Free => None, - }) - }, - None => { - gum::trace!(target: LOG_TARGET, ?relay_parent, "Not a validator"); - - return Ok(()) - }, + let core_now = if let Some(group) = + polkadot_node_subsystem_util::signing_key_and_index(&validators, keystore).and_then( + |(_, index)| polkadot_node_subsystem_util::find_validator_group(&groups, index), + ) { + rotation_info.core_for_group(group, cores.len()) + } else { + gum::trace!(target: LOG_TARGET, ?relay_parent, "Not a validator"); + return Ok(()) }; - // This code won't work well, if at all for on-demand parachains. For on-demand we'll - // have to be aware of which core the on-demand claim is going to be multiplexed - // onto. The on-demand claim will also have a known collator, and we should always - // allow an incoming connection from that collator. If not even connecting to them - // directly. - // - // However, this'll work fine for parachains, as each parachain gets a dedicated - // core. - if let Some(para_id) = para_now.as_ref() { + let paras_now = match fetch_claim_queue(sender, relay_parent).await.map_err(Error::Runtime)? { + // Runtime supports claim queue - use it + // + // `relay_parent_mode` is not examined here because if the runtime supports claim queue + // then it supports async backing params too (`ASYNC_BACKING_STATE_RUNTIME_REQUIREMENT` + // < `CLAIM_QUEUE_RUNTIME_REQUIREMENT`). + Some(mut claim_queue) => claim_queue.0.remove(&core_now), + // Claim queue is not supported by the runtime - use availability cores instead. + None => cores.get(core_now.0 as usize).and_then(|c| match c { + CoreState::Occupied(core) if relay_parent_mode.is_enabled() => + core.next_up_on_available.as_ref().map(|c| [c.para_id].into_iter().collect()), + CoreState::Scheduled(core) => Some([core.para_id].into_iter().collect()), + CoreState::Occupied(_) | CoreState::Free => None, + }), + } + .unwrap_or_else(|| VecDeque::new()); + + for para_id in paras_now.iter() { let entry = current_assignments.entry(*para_id).or_default(); *entry += 1; if *entry == 1 { @@ -531,7 +532,7 @@ where } } - *group_assignment = GroupAssignments { current: para_now }; + *group_assignment = GroupAssignments { current: paras_now.into_iter().collect() }; Ok(()) } @@ -542,7 +543,7 @@ fn remove_outgoing( ) { let GroupAssignments { current, .. } = per_relay_parent.assignment; - if let Some(cur) = current { + for cur in current { if let Entry::Occupied(mut occupied) = current_assignments.entry(cur) { *occupied.get_mut() -= 1; if *occupied.get() == 0 { @@ -857,7 +858,8 @@ async fn process_incoming_peer_message( peer_id = ?origin, ?collator_id, ?para_id, - "Declared as collator for unneeded para", + "Declared as collator for unneeded para. Current assignments: {:?}", + &state.current_assignments ); modify_reputation( @@ -1089,7 +1091,7 @@ where peer_data.collating_para().ok_or(AdvertisementError::UndeclaredCollator)?; // Check if this is assigned to us. - if assignment.current.map_or(true, |id| id != collator_para_id) { + if !assignment.current.contains(&collator_para_id) { return Err(AdvertisementError::InvalidAssignment) } @@ -1105,7 +1107,7 @@ where ) .map_err(AdvertisementError::Invalid)?; - if !per_relay_parent.collations.is_seconded_limit_reached(relay_parent_mode) { + if per_relay_parent.collations.is_seconded_limit_reached(relay_parent_mode) { return Err(AdvertisementError::SecondedLimitReached) } @@ -1197,7 +1199,7 @@ where }); let collations = &mut per_relay_parent.collations; - if !collations.is_seconded_limit_reached(relay_parent_mode) { + if collations.is_seconded_limit_reached(relay_parent_mode) { gum::trace!( target: LOG_TARGET, peer_id = ?peer_id, diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs index 3f4459d8e65d..44e25efd4dfc 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -21,7 +21,12 @@ use sc_network::ProtocolName; use sp_core::{crypto::Pair, Encode}; use sp_keyring::Sr25519Keyring; use sp_keystore::Keystore; -use std::{iter, sync::Arc, time::Duration}; +use std::{ + collections::{BTreeMap, VecDeque}, + iter, + sync::Arc, + time::Duration, +}; use polkadot_node_network_protocol::{ our_view, @@ -37,7 +42,7 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::{reputation::add_reputation, TimeoutExt}; use polkadot_primitives::{ - CandidateReceipt, CollatorPair, CoreState, GroupIndex, GroupRotationInfo, HeadData, + CandidateReceipt, CollatorPair, CoreIndex, CoreState, GroupIndex, GroupRotationInfo, HeadData, OccupiedCore, PersistedValidationData, ScheduledCore, ValidatorId, ValidatorIndex, }; use polkadot_primitives_test_helpers::{ @@ -71,6 +76,7 @@ struct TestState { validator_groups: Vec>, group_rotation_info: GroupRotationInfo, cores: Vec, + claim_queue: BTreeMap>, } impl Default for TestState { @@ -104,7 +110,7 @@ impl Default for TestState { CoreState::Scheduled(ScheduledCore { para_id: chain_ids[0], collator: None }), CoreState::Free, CoreState::Occupied(OccupiedCore { - next_up_on_available: None, + next_up_on_available: Some(ScheduledCore { para_id: chain_ids[1], collator: None }), occupied_since: 0, time_out_at: 1, next_up_on_time_out: None, @@ -120,6 +126,11 @@ impl Default for TestState { }), ]; + let mut claim_queue = BTreeMap::new(); + claim_queue.insert(CoreIndex(0), [chain_ids[0]].into_iter().collect()); + claim_queue.insert(CoreIndex(1), VecDeque::new()); + claim_queue.insert(CoreIndex(2), [chain_ids[1]].into_iter().collect()); + Self { chain_ids, relay_parent, @@ -128,6 +139,7 @@ impl Default for TestState { validator_groups, group_rotation_info, cores, + claim_queue, } } } @@ -264,6 +276,26 @@ async fn respond_to_core_info_queries( let _ = tx.send(Ok(test_state.cores.clone())); } ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::Version(tx), + )) => { + let _ = tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::ClaimQueue(tx), + )) => { + let _ = tx.send(Ok(test_state.claim_queue.clone())); + } + ); } /// Assert that the next message is a `CandidateBacking(Second())`. diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index 178dcb85e035..472731b506ab 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -72,6 +72,26 @@ async fn assert_assign_incoming( tx.send(Ok(test_state.cores.clone())).unwrap(); } ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::Version(tx), + )) if parent == hash => { + let _ = tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::ClaimQueue(tx), + )) if parent == hash => { + let _ = tx.send(Ok(test_state.claim_queue.clone())); + } + ); } /// Handle a view update. diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 73416b193bbe..2bb9c82c6a6f 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -195,8 +195,8 @@ struct ActiveValidatorState { index: ValidatorIndex, // our validator group group: GroupIndex, - // the assignment of our validator group, if any. - assignment: Option, + // the assignments of our validator group, if any. + assignments: Vec, // the 'direct-in-group' communication at this relay-parent. cluster_tracker: ClusterTracker, } @@ -740,8 +740,8 @@ fn find_active_validator_state( let our_group = groups.by_validator_index(validator_index)?; let core_index = group_rotation_info.core_for_group(our_group, availability_cores.len()); - let para_assigned_to_core = if let Some(claim_queue) = maybe_claim_queue { - claim_queue.get_claim_for(core_index, 0) + let paras_assigned_to_core = if let Some(claim_queue) = maybe_claim_queue { + claim_queue.iter_claims_for_core(&core_index).copied().collect() } else { availability_cores .get(core_index.0 as usize) @@ -753,6 +753,8 @@ fn find_active_validator_state( .map(|scheduled_core| scheduled_core.para_id), CoreState::Free | CoreState::Occupied(_) => None, }) + .into_iter() + .collect() }; let group_validators = groups.get(our_group)?.to_owned(); @@ -760,7 +762,7 @@ fn find_active_validator_state( active: Some(ActiveValidatorState { index: validator_index, group: our_group, - assignment: para_assigned_to_core, + assignments: paras_assigned_to_core, cluster_tracker: ClusterTracker::new(group_validators, seconding_limit) .expect("group is non-empty because we are in it; qed"), }), @@ -1162,10 +1164,10 @@ pub(crate) async fn share_local_statement( None => return Ok(()), }; - let (local_index, local_assignment, local_group) = + let (local_index, local_assignments, local_group) = match per_relay_parent.active_validator_state() { None => return Err(JfyiError::InvalidShare), - Some(l) => (l.index, l.assignment, l.group), + Some(l) => (l.index, &l.assignments, l.group), }; // Two possibilities: either the statement is `Seconded` or we already @@ -1203,7 +1205,7 @@ pub(crate) async fn share_local_statement( return Err(JfyiError::InvalidShare) } - if local_assignment != Some(expected_para) || relay_parent != expected_relay_parent { + if !local_assignments.contains(&expected_para) || relay_parent != expected_relay_parent { return Err(JfyiError::InvalidShare) } @@ -2144,12 +2146,11 @@ async fn determine_groups_per_para( let n_cores = availability_cores.len(); // Determine the core indices occupied by each para at the current relay parent. To support - // on-demand parachains we also consider the core indices at next block if core has a candidate - // pending availability. - let para_core_indices: Vec<_> = if let Some(claim_queue) = maybe_claim_queue { + // on-demand parachains we also consider the core indices at next blocks. + let schedule: HashMap> = if let Some(claim_queue) = maybe_claim_queue { claim_queue - .iter_claims_at_depth(0) - .map(|(core_index, para)| (para, core_index)) + .iter_all_claims() + .map(|(core_index, paras)| (*core_index, paras.iter().copied().collect())) .collect() } else { availability_cores @@ -2157,12 +2158,12 @@ async fn determine_groups_per_para( .enumerate() .filter_map(|(index, core)| match core { CoreState::Scheduled(scheduled_core) => - Some((scheduled_core.para_id, CoreIndex(index as u32))), + Some((CoreIndex(index as u32), vec![scheduled_core.para_id])), CoreState::Occupied(occupied_core) => if max_candidate_depth >= 1 { - occupied_core - .next_up_on_available - .map(|scheduled_core| (scheduled_core.para_id, CoreIndex(index as u32))) + occupied_core.next_up_on_available.map(|scheduled_core| { + (CoreIndex(index as u32), vec![scheduled_core.para_id]) + }) } else { None }, @@ -2173,9 +2174,12 @@ async fn determine_groups_per_para( let mut groups_per_para = HashMap::new(); // Map from `CoreIndex` to `GroupIndex` and collect as `HashMap`. - for (para, core_index) in para_core_indices { + for (core_index, paras) in schedule { let group_index = group_rotation_info.group_for_core(core_index, n_cores); - groups_per_para.entry(para).or_insert_with(Vec::new).push(group_index) + + for para in paras { + groups_per_para.entry(para).or_insert_with(Vec::new).push(group_index); + } } groups_per_para diff --git a/polkadot/node/subsystem-util/src/vstaging.rs b/polkadot/node/subsystem-util/src/vstaging.rs index b166a54f75c4..b6cd73f412b3 100644 --- a/polkadot/node/subsystem-util/src/vstaging.rs +++ b/polkadot/node/subsystem-util/src/vstaging.rs @@ -31,7 +31,7 @@ const LOG_TARGET: &'static str = "parachain::subsystem-util-vstaging"; /// A snapshot of the runtime claim queue at an arbitrary relay chain block. #[derive(Default)] -pub struct ClaimQueueSnapshot(BTreeMap>); +pub struct ClaimQueueSnapshot(pub BTreeMap>); impl From>> for ClaimQueueSnapshot { fn from(claim_queue_snapshot: BTreeMap>) -> Self { @@ -56,6 +56,19 @@ impl ClaimQueueSnapshot { .iter() .filter_map(move |(core_index, paras)| Some((*core_index, *paras.get(depth)?))) } + + /// Returns an iterator over all claims on the given core. + pub fn iter_claims_for_core( + &self, + core_index: &CoreIndex, + ) -> impl Iterator + '_ { + self.0.get(core_index).map(|c| c.iter()).into_iter().flatten() + } + + /// Returns an iterator over the whole claim queue. + pub fn iter_all_claims(&self) -> impl Iterator)> + '_ { + self.0.iter() + } } // TODO: https://github.com/paritytech/polkadot-sdk/issues/1940 diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index 62e96e9fbb05..f4e3db185fea 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -28,10 +28,10 @@ use sp_std::{ pub fn claim_queue() -> BTreeMap> { let now = >::block_number() + One::one(); - // This explicit update is only strictly required for session boundaries: - // - // At the end of a session we clear the claim queues: Without this update call, nothing would be - // scheduled to the client. + // This is needed so that the claim queue always has the right size (equal to + // scheduling_lookahead). Otherwise, if a candidate is backed in the same block where the + // previous candidate is included, the claim queue will have already pop()-ed the next item + // from the queue and the length would be `scheduling_lookahead - 1`. >::free_cores_and_fill_claim_queue(Vec::new(), now); let config = configuration::ActiveConfig::::get(); // Extra sanity, config should already never be smaller than 1: diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index 33b4d849c490..d7fe5c06863c 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -351,6 +351,9 @@ impl Pallet { } /// Note that the given cores have become occupied. Update the claim queue accordingly. + /// This will not push a new entry onto the claim queue, so the length after this call will be + /// the expected length - 1. The claim_queue runtime API will take care of adding another entry + /// here, to ensure the right lookahead. pub(crate) fn occupied( now_occupied: BTreeMap, ) -> BTreeMap { diff --git a/polkadot/zombienet_tests/assign-core.js b/polkadot/zombienet_tests/assign-core.js new file mode 100644 index 000000000000..5ddb86930f5a --- /dev/null +++ b/polkadot/zombienet_tests/assign-core.js @@ -0,0 +1,48 @@ +async function run(nodeName, networkInfo, args) { + const wsUri = networkInfo.nodesByName[nodeName].wsUri; + const api = await zombie.connect(wsUri); + + let core = Number(args[0]); + + let assignments = []; + + for (let i = 1; i < args.length; i += 2) { + let [para, parts] = [args[i], args[i + 1]]; + + console.log(`Assigning para ${para} to core ${core}`); + + assignments.push( + [{ task: para }, parts] + ); + } + await zombie.util.cryptoWaitReady(); + + // account to submit tx + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + await new Promise(async (resolve, reject) => { + const unsub = await api.tx.sudo + .sudo(api.tx.coretime.assignCore(core, 0, assignments, null)) + .signAndSend(alice, ({ status, isError }) => { + if (status.isInBlock) { + console.log( + `Transaction included at blockhash ${status.asInBlock}`, + ); + } else if (status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${status.asFinalized}`, + ); + unsub(); + return resolve(); + } else if (isError) { + console.log(`Transaction error`); + reject(`Transaction error`); + } + }); + }); + + return 0; +} + +module.exports = { run }; diff --git a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl index d624cbaf9df6..d47ef8f415f7 100644 --- a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl +++ b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl @@ -11,8 +11,8 @@ elastic-validator-4: reports node_roles is 4 # Register 2 extra cores to this some-parachain. -elastic-validator-0: js-script ./assign-core.js with "2000,0" return is 0 within 600 seconds -elastic-validator-0: js-script ./assign-core.js with "2000,1" return is 0 within 600 seconds +elastic-validator-0: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds +elastic-validator-0: js-script ./assign-core.js with "1,2000,57600" return is 0 within 600 seconds # Wait for 20 relay chain blocks elastic-validator-0: reports substrate_block_height{status="best"} is at least 20 within 600 seconds diff --git a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl index 900a3befbc6f..7ba896e1c903 100644 --- a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl +++ b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl @@ -11,8 +11,8 @@ validator: reports substrate_block_height{status="finalized"} is at least 10 wit validator: parachain 2000 block height is at least 10 within 200 seconds # Register the second core assigned to this parachain. -alice: js-script ./assign-core.js with "2000,0" return is 0 within 600 seconds -alice: js-script ./assign-core.js with "2000,1" return is 0 within 600 seconds +alice: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds +alice: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds validator: reports substrate_block_height{status="finalized"} is at least 35 within 100 seconds diff --git a/polkadot/zombienet_tests/elastic_scaling/assign-core.js b/polkadot/zombienet_tests/elastic_scaling/assign-core.js deleted file mode 100644 index add63b6d3085..000000000000 --- a/polkadot/zombienet_tests/elastic_scaling/assign-core.js +++ /dev/null @@ -1,39 +0,0 @@ -async function run(nodeName, networkInfo, args) { - const wsUri = networkInfo.nodesByName[nodeName].wsUri; - const api = await zombie.connect(wsUri); - - let para = Number(args[0]); - let core = Number(args[1]); - console.log(`Assigning para ${para} to core ${core}`); - - await zombie.util.cryptoWaitReady(); - - // account to submit tx - const keyring = new zombie.Keyring({ type: "sr25519" }); - const alice = keyring.addFromUri("//Alice"); - - await new Promise(async (resolve, reject) => { - const unsub = await api.tx.sudo - .sudo(api.tx.coretime.assignCore(core, 0, [[{ task: para }, 57600]], null)) - .signAndSend(alice, ({ status, isError }) => { - if (status.isInBlock) { - console.log( - `Transaction included at blockhash ${status.asInBlock}`, - ); - } else if (status.isFinalized) { - console.log( - `Transaction finalized at blockHash ${status.asFinalized}`, - ); - unsub(); - return resolve(); - } else if (isError) { - console.log(`Transaction error`); - reject(`Transaction error`); - } - }); - }); - - return 0; -} - -module.exports = { run }; diff --git a/polkadot/zombienet_tests/elastic_scaling/assign-core.js b/polkadot/zombienet_tests/elastic_scaling/assign-core.js new file mode 120000 index 000000000000..eeb6402c06f5 --- /dev/null +++ b/polkadot/zombienet_tests/elastic_scaling/assign-core.js @@ -0,0 +1 @@ +../assign-core.js \ No newline at end of file diff --git a/polkadot/zombienet_tests/functional/0015-coretime-shared-core.toml b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.toml new file mode 100644 index 000000000000..fed30e0db053 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.toml @@ -0,0 +1,44 @@ +[settings] +timeout = 1000 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] + max_candidate_depth = 3 + allowed_ancestry_len = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + lookahead = 2 + num_cores = 4 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] + needed_approvals = 3 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +command = "polkadot" + + [[relaychain.node_groups]] + name = "validator" + args = ["-lruntime=debug,parachain=debug,parachain::backing=trace,parachain::collator-protocol=trace,parachain::prospective-parachains=trace,runtime::parachains::scheduler=trace,runtime::inclusion-inherent=trace,runtime::inclusion=trace" ] + count = 4 + +{% for id in range(2000,2004) %} +[[parachains]] +id = {{id}} +register_para = false +onboard_as_parachain = false +add_to_genesis = false +chain = "glutton-westend-local-{{id}}" + [parachains.genesis.runtimeGenesis.patch.glutton] + compute = "50000000" + storage = "2500000000" + trashDataCount = 5120 + + [parachains.collator] + name = "collator-{{id}}" + image = "{{CUMULUS_IMAGE}}" + command = "polkadot-parachain" + args = ["-lparachain=debug"] + +{% endfor %} diff --git a/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl new file mode 100644 index 000000000000..b8b8887df857 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl @@ -0,0 +1,16 @@ +Description: CT shared core test +Network: ./0015-coretime-shared-core.toml +Creds: config + +validator: reports node_roles is 4 + +# register paras 2 by 2 to speed up the test. registering all at once will exceed the weight limit. +validator-0: js-script ./0015-force-register-paras.js with "2000,2001" return is 0 within 600 seconds +validator-0: js-script ./0015-force-register-paras.js with "2002,2003" return is 0 within 600 seconds +# assign core 0 to be shared by all paras. +validator-0: js-script ./assign-core.js with "0,2000,14400,2001,14400,2002,14400,2003,14400" return is 0 within 600 seconds + +collator-2000: reports block height is at least 6 within 200 seconds +collator-2001: reports block height is at least 6 within 50 seconds +collator-2002: reports block height is at least 6 within 50 seconds +collator-2003: reports block height is at least 6 within 50 seconds diff --git a/polkadot/zombienet_tests/functional/0015-force-register-paras.js b/polkadot/zombienet_tests/functional/0015-force-register-paras.js new file mode 100644 index 000000000000..f82163b01105 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0015-force-register-paras.js @@ -0,0 +1,63 @@ +async function run(nodeName, networkInfo, args) { + const init = networkInfo.nodesByName[nodeName]; + let wsUri = init.wsUri; + let userDefinedTypes = init.userDefinedTypes; + const api = await zombie.connect(wsUri, userDefinedTypes); + + // account to submit tx + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + let calls = []; + + for (let i = 0; i < args.length; i++) { + let para = args[i]; + const sec = networkInfo.nodesByName["collator-" + para]; + const api_collator = await zombie.connect(sec.wsUri, sec.userDefinedTypes); + + await zombie.util.cryptoWaitReady(); + + // Get the genesis header and the validation code of the parachain + const genesis_header = await api_collator.rpc.chain.getHeader(); + const validation_code = await api_collator.rpc.state.getStorage("0x3A636F6465"); + + calls.push( + api.tx.paras.addTrustedValidationCode(validation_code.toHex()) + ); + calls.push( + api.tx.registrar.forceRegister( + alice.address, + 0, + Number(para), + genesis_header.toHex(), + validation_code.toHex(), + ) + ); + } + + const sudo_batch = api.tx.sudo.sudo(api.tx.utility.batch(calls)); + + await new Promise(async (resolve, reject) => { + const unsub = await sudo_batch + .signAndSend(alice, ({ status, isError }) => { + if (status.isInBlock) { + console.log( + `Transaction included at blockhash ${status.asInBlock}`, + ); + } else if (status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${status.asFinalized}`, + ); + unsub(); + return resolve(); + } else if (isError) { + console.log(`Transaction error`); + reject(`Transaction error`); + } + }); + }); + + return 0; +} + +module.exports = { run }; diff --git a/polkadot/zombienet_tests/functional/assign-core.js b/polkadot/zombienet_tests/functional/assign-core.js new file mode 120000 index 000000000000..eeb6402c06f5 --- /dev/null +++ b/polkadot/zombienet_tests/functional/assign-core.js @@ -0,0 +1 @@ +../assign-core.js \ No newline at end of file diff --git a/prdoc/pr_4724.prdoc b/prdoc/pr_4724.prdoc new file mode 100644 index 000000000000..3723c2a70246 --- /dev/null +++ b/prdoc/pr_4724.prdoc @@ -0,0 +1,24 @@ +title: Fix core sharing and make use of scheduling_lookahead during backing + +doc: + - audience: Node Dev + description: | + Core sharing (two or more parachains scheduled on the same core with interlaced assignments) was not working correctly. + Adds the neccessary fixes to the backing subsystems. Moreover, adds support for backing collations which are built + and advertised ahead of time (with up to `scheduling_lookahead` relay chain blocks in advance). + +crates: + - name: polkadot-node-core-backing + bump: patch + - name: polkadot-node-core-prospective-parachains + bump: patch + - name: polkadot-collator-protocol + bump: patch + - name: polkadot-statement-distribution + bump: patch + - name: polkadot-node-subsystem-util + bump: minor + - name: polkadot-runtime-parachains + bump: none + - name: polkadot + bump: none From 4389aafb7f5f85b6fe7199ef7c428d09b2e89191 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Wed, 19 Jun 2024 15:49:35 +0200 Subject: [PATCH 47/52] Update bridges zombienet tests relay version (#4821) --- docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile index 196ba861f503..e17952ccee80 100644 --- a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile +++ b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile @@ -1,7 +1,7 @@ # this image is built on top of existing Zombienet image ARG ZOMBIENET_IMAGE # this image uses substrate-relay image built elsewhere -ARG SUBSTRATE_RELAY_IMAGE=docker.io/paritytech/substrate-relay:v1.5.0 +ARG SUBSTRATE_RELAY_IMAGE=docker.io/paritytech/substrate-relay:v1.6.4 # metadata ARG VCS_REF From 9f09169e1518b623d00968337cdaf55f5eff7b56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 19 Jun 2024 17:10:33 +0200 Subject: [PATCH 48/52] Fix CLI pruning params (#4836) `ValueEnum` is apparently not using the `from_str`... Closes: https://github.com/paritytech/polkadot-sdk/issues/4828 --- .../client/cli/src/params/pruning_params.rs | 81 +++++++++++++------ 1 file changed, 56 insertions(+), 25 deletions(-) diff --git a/substrate/client/cli/src/params/pruning_params.rs b/substrate/client/cli/src/params/pruning_params.rs index 88ae006c638e..6b7b0e7ffa99 100644 --- a/substrate/client/cli/src/params/pruning_params.rs +++ b/substrate/client/cli/src/params/pruning_params.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use crate::error; -use clap::{builder::PossibleValue, Args, ValueEnum}; +use clap::Args; use sc_service::{BlocksPruning, PruningMode}; /// Parameters to define the pruning mode @@ -30,23 +30,38 @@ pub struct PruningParams { /// This setting can only be set on the first creation of the database. Every subsequent run /// will load the pruning mode from the database and will error if the stored mode doesn't /// match this CLI value. It is fine to drop this CLI flag for subsequent runs. The only - /// exception is that `` can change between subsequent runs (increasing it will not + /// exception is that `NUMBER` can change between subsequent runs (increasing it will not /// lead to restoring pruned state). /// + /// Possible values: + /// + /// - archive: Keep the data of all blocks. + /// + /// - archive-canonical: Keep only the data of finalized blocks. + /// + /// - NUMBER: Keep the data of the last NUMBER of finalized blocks. + /// /// [default: 256] - #[arg(alias = "pruning", long, value_name = "PRUNING_MODE", value_enum)] + #[arg(alias = "pruning", long, value_name = "PRUNING_MODE")] pub state_pruning: Option, /// Specify the blocks pruning mode. /// /// This mode specifies when the block's body (including justifications) /// should be pruned (ie, removed) from the database. + /// + /// Possible values: + /// + /// - archive: Keep the data of all blocks. + /// + /// - archive-canonical: Keep only the data of finalized blocks. + /// + /// - NUMBER: Keep the data of the last NUMBER of finalized blocks. #[arg( alias = "keep-blocks", long, value_name = "PRUNING_MODE", - default_value_t = DatabasePruningMode::ArchiveCanonical, - value_enum + default_value = "archive-canonical" )] pub blocks_pruning: DatabasePruningMode, } @@ -78,26 +93,6 @@ pub enum DatabasePruningMode { Custom(u32), } -impl ValueEnum for DatabasePruningMode { - fn value_variants<'a>() -> &'a [Self] { - &[Self::Archive, Self::ArchiveCanonical, Self::Custom(0)] - } - - fn to_possible_value(&self) -> Option { - Some(match self { - Self::Archive => PossibleValue::new("archive").help("Keep the data of all blocks."), - Self::ArchiveCanonical => PossibleValue::new("archive-canonical") - .help("Keep only the data of finalized blocks."), - Self::Custom(_) => PossibleValue::new("") - .help("Keep the data of the last of finalized blocks."), - }) - } - - fn from_str(input: &str, _: bool) -> Result { - ::from_str(input) - } -} - impl std::str::FromStr for DatabasePruningMode { type Err = String; @@ -132,3 +127,39 @@ impl Into for DatabasePruningMode { } } } + +#[cfg(test)] +mod tests { + use super::*; + use clap::Parser; + + #[derive(Parser)] + struct Cli { + #[clap(flatten)] + pruning: PruningParams, + } + + #[test] + fn pruning_params_parse_works() { + let Cli { pruning } = + Cli::parse_from(["", "--state-pruning=1000", "--blocks-pruning=1000"]); + + assert!(matches!(pruning.state_pruning, Some(DatabasePruningMode::Custom(1000)))); + assert!(matches!(pruning.blocks_pruning, DatabasePruningMode::Custom(1000))); + + let Cli { pruning } = + Cli::parse_from(["", "--state-pruning=archive", "--blocks-pruning=archive"]); + + assert!(matches!(dbg!(pruning.state_pruning), Some(DatabasePruningMode::Archive))); + assert!(matches!(pruning.blocks_pruning, DatabasePruningMode::Archive)); + + let Cli { pruning } = Cli::parse_from([ + "", + "--state-pruning=archive-canonical", + "--blocks-pruning=archive-canonical", + ]); + + assert!(matches!(dbg!(pruning.state_pruning), Some(DatabasePruningMode::ArchiveCanonical))); + assert!(matches!(pruning.blocks_pruning, DatabasePruningMode::ArchiveCanonical)); + } +} From 6c857609a9425902d6dfe5445afb16c6b23ad86c Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Wed, 19 Jun 2024 18:20:11 +0200 Subject: [PATCH 49/52] rpc server: add `health/readiness endpoint` (#4802) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previous attempt https://github.com/paritytech/substrate/pull/14314 Close #4443 Ideally, we should move /health and /health/readiness to the prometheus server but because it's was quite easy to implement on the RPC server and that RPC server already exposes /health. Manual tests on a polkadot node syncing: ```bash ➜ polkadot-sdk (na-fix-4443) ✗ curl -v localhost:9944/health * Host localhost:9944 was resolved. * IPv6: ::1 * IPv4: 127.0.0.1 * Trying [::1]:9944... * connect to ::1 port 9944 from ::1 port 55024 failed: Connection refused * Trying 127.0.0.1:9944... * Connected to localhost (127.0.0.1) port 9944 > GET /health HTTP/1.1 > Host: localhost:9944 > User-Agent: curl/8.5.0 > Accept: */* > < HTTP/1.1 200 OK < content-type: application/json; charset=utf-8 < content-length: 53 < date: Fri, 14 Jun 2024 16:12:23 GMT < * Connection #0 to host localhost left intact {"peers":0,"isSyncing":false,"shouldHavePeers":false}% ➜ polkadot-sdk (na-fix-4443) ✗ curl -v localhost:9944/health/readiness * Host localhost:9944 was resolved. * IPv6: ::1 * IPv4: 127.0.0.1 * Trying [::1]:9944... * connect to ::1 port 9944 from ::1 port 54328 failed: Connection refused * Trying 127.0.0.1:9944... * Connected to localhost (127.0.0.1) port 9944 > GET /health/readiness HTTP/1.1 > Host: localhost:9944 > User-Agent: curl/8.5.0 > Accept: */* > < HTTP/1.1 500 Internal Server Error < content-type: application/json; charset=utf-8 < content-length: 0 < date: Fri, 14 Jun 2024 16:12:36 GMT < * Connection #0 to host localhost left intact ``` //cc @BulatSaif you may be interested in this.. --------- Co-authored-by: Bastian Köcher --- Cargo.lock | 1 + prdoc/pr_4802.prdoc | 16 ++ substrate/client/rpc-servers/Cargo.toml | 1 + substrate/client/rpc-servers/src/lib.rs | 10 +- .../client/rpc-servers/src/middleware/mod.rs | 2 + .../rpc-servers/src/middleware/node_health.rs | 199 ++++++++++++++++++ 6 files changed, 223 insertions(+), 6 deletions(-) create mode 100644 prdoc/pr_4802.prdoc create mode 100644 substrate/client/rpc-servers/src/middleware/node_health.rs diff --git a/Cargo.lock b/Cargo.lock index bbb785a618a8..cb4c25ae998c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17846,6 +17846,7 @@ dependencies = [ "ip_network", "jsonrpsee", "log", + "serde", "serde_json", "substrate-prometheus-endpoint", "tokio", diff --git a/prdoc/pr_4802.prdoc b/prdoc/pr_4802.prdoc new file mode 100644 index 000000000000..5757c4cbae18 --- /dev/null +++ b/prdoc/pr_4802.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add `health/readiness endpoint` to the rpc server + +doc: + - audience: Node Operator + description: | + Add `/health/readiness endpoint` to the rpc server which returns HTTP status code 200 if the chain is synced + and can connect to the rest of the network otherwise status code 500 is returned. + The endpoint can be reached by performing a HTTP GET request to the + endpoint such as `$ curl /health/readiness` + +crates: + - name: sc-rpc-server + bump: patch diff --git a/substrate/client/rpc-servers/Cargo.toml b/substrate/client/rpc-servers/Cargo.toml index 7837c852a1c9..19369e295fc4 100644 --- a/substrate/client/rpc-servers/Cargo.toml +++ b/substrate/client/rpc-servers/Cargo.toml @@ -25,6 +25,7 @@ ip_network = "0.4.1" jsonrpsee = { version = "0.22", features = ["server"] } log = { workspace = true, default-features = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } +serde = { workspace = true } serde_json = { workspace = true, default-features = true } tokio = { version = "1.22.0", features = ["parking_lot"] } tower = { version = "0.4.13", features = ["util"] } diff --git a/substrate/client/rpc-servers/src/lib.rs b/substrate/client/rpc-servers/src/lib.rs index ba1fcf5e3677..647a7ca7e435 100644 --- a/substrate/client/rpc-servers/src/lib.rs +++ b/substrate/client/rpc-servers/src/lib.rs @@ -32,12 +32,10 @@ use hyper::{ service::{make_service_fn, service_fn}, }; use jsonrpsee::{ - server::{ - middleware::http::ProxyGetRequestLayer, stop_channel, ws, PingConfig, StopHandle, - TowerServiceBuilder, - }, + server::{stop_channel, ws, PingConfig, StopHandle, TowerServiceBuilder}, Methods, RpcModule, }; +use middleware::NodeHealthProxyLayer; use tokio::net::TcpListener; use tower::Service; use utils::{build_rpc_api, format_cors, get_proxy_ip, host_filtering, try_into_cors}; @@ -132,8 +130,8 @@ where let http_middleware = tower::ServiceBuilder::new() .option_layer(host_filter) - // Proxy `GET /health` requests to internal `system_health` method. - .layer(ProxyGetRequestLayer::new("/health", "system_health")?) + // Proxy `GET /health, /health/readiness` requests to the internal `system_health` method. + .layer(NodeHealthProxyLayer::default()) .layer(try_into_cors(cors)?); let mut builder = jsonrpsee::server::Server::builder() diff --git a/substrate/client/rpc-servers/src/middleware/mod.rs b/substrate/client/rpc-servers/src/middleware/mod.rs index 88ed8b2f4335..0a14be4dacf5 100644 --- a/substrate/client/rpc-servers/src/middleware/mod.rs +++ b/substrate/client/rpc-servers/src/middleware/mod.rs @@ -32,9 +32,11 @@ use jsonrpsee::{ }; mod metrics; +mod node_health; mod rate_limit; pub use metrics::*; +pub use node_health::*; pub use rate_limit::*; const MAX_JITTER: Duration = Duration::from_millis(50); diff --git a/substrate/client/rpc-servers/src/middleware/node_health.rs b/substrate/client/rpc-servers/src/middleware/node_health.rs new file mode 100644 index 000000000000..d68ec14cb8fe --- /dev/null +++ b/substrate/client/rpc-servers/src/middleware/node_health.rs @@ -0,0 +1,199 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Middleware for handling `/health` and `/health/readiness` endpoints. + +use std::{ + error::Error, + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + +use futures::future::FutureExt; +use http::{HeaderValue, Method, StatusCode, Uri}; +use hyper::Body; +use jsonrpsee::types::{Response as RpcResponse, ResponseSuccess as RpcResponseSuccess}; +use tower::Service; + +const RPC_SYSTEM_HEALTH_CALL: &str = r#"{"jsonrpc":"2.0","method":"system_health","id":0}"#; +const HEADER_VALUE_JSON: HeaderValue = HeaderValue::from_static("application/json; charset=utf-8"); + +/// Layer that applies [`NodeHealthProxy`] which +/// proxies `/health` and `/health/readiness` endpoints. +#[derive(Debug, Clone, Default)] +pub struct NodeHealthProxyLayer; + +impl tower::Layer for NodeHealthProxyLayer { + type Service = NodeHealthProxy; + + fn layer(&self, service: S) -> Self::Service { + NodeHealthProxy::new(service) + } +} + +/// Middleware that proxies `/health` and `/health/readiness` endpoints. +pub struct NodeHealthProxy(S); + +impl NodeHealthProxy { + /// Creates a new [`NodeHealthProxy`]. + pub fn new(service: S) -> Self { + Self(service) + } +} + +impl tower::Service> for NodeHealthProxy +where + S: Service, Response = http::Response>, + S::Response: 'static, + S::Error: Into> + 'static, + S::Future: Send + 'static, +{ + type Response = S::Response; + type Error = Box; + type Future = + Pin> + Send + 'static>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.0.poll_ready(cx).map_err(Into::into) + } + + fn call(&mut self, mut req: http::Request) -> Self::Future { + let maybe_intercept = InterceptRequest::from_http(&req); + + // Modify the request and proxy it to `system_health` + if let InterceptRequest::Health | InterceptRequest::Readiness = maybe_intercept { + // RPC methods are accessed with `POST`. + *req.method_mut() = Method::POST; + // Precautionary remove the URI. + *req.uri_mut() = Uri::from_static("/"); + + // Requests must have the following headers: + req.headers_mut().insert(http::header::CONTENT_TYPE, HEADER_VALUE_JSON); + req.headers_mut().insert(http::header::ACCEPT, HEADER_VALUE_JSON); + + // Adjust the body to reflect the method call. + req = req.map(|_| Body::from(RPC_SYSTEM_HEALTH_CALL)); + } + + // Call the inner service and get a future that resolves to the response. + let fut = self.0.call(req); + + async move { + let res = fut.await.map_err(|err| err.into())?; + + Ok(match maybe_intercept { + InterceptRequest::Deny => + http_response(StatusCode::METHOD_NOT_ALLOWED, Body::empty()), + InterceptRequest::No => res, + InterceptRequest::Health => { + let health = parse_rpc_response(res.into_body()).await?; + http_ok_response(serde_json::to_string(&health)?) + }, + InterceptRequest::Readiness => { + let health = parse_rpc_response(res.into_body()).await?; + if (!health.is_syncing && health.peers > 0) || !health.should_have_peers { + http_ok_response(Body::empty()) + } else { + http_internal_error() + } + }, + }) + } + .boxed() + } +} + +// NOTE: This is duplicated here to avoid dependency to the `RPC API`. +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +struct Health { + /// Number of connected peers + pub peers: usize, + /// Is the node syncing + pub is_syncing: bool, + /// Should this node have any peers + /// + /// Might be false for local chains or when running without discovery. + pub should_have_peers: bool, +} + +fn http_ok_response>(body: S) -> hyper::Response { + http_response(StatusCode::OK, body) +} + +fn http_response>( + status_code: StatusCode, + body: S, +) -> hyper::Response { + hyper::Response::builder() + .status(status_code) + .header(http::header::CONTENT_TYPE, HEADER_VALUE_JSON) + .body(body.into()) + .expect("Header is valid; qed") +} + +fn http_internal_error() -> hyper::Response { + http_response(hyper::StatusCode::INTERNAL_SERVER_ERROR, Body::empty()) +} + +async fn parse_rpc_response(body: Body) -> Result> { + let bytes = hyper::body::to_bytes(body).await?; + + let raw_rp = serde_json::from_slice::>(&bytes)?; + let rp = RpcResponseSuccess::::try_from(raw_rp)?; + + Ok(rp.result) +} + +/// Whether the request should be treated as ordinary RPC call or be modified. +enum InterceptRequest { + /// Proxy `/health` to `system_health`. + Health, + /// Checks if node has at least one peer and is not doing major syncing. + /// + /// Returns HTTP status code 200 on success otherwise HTTP status code 500 is returned. + Readiness, + /// Treat as a ordinary RPC call and don't modify the request or response. + No, + /// Deny health or readiness calls that is not HTTP GET request. + /// + /// Returns HTTP status code 405. + Deny, +} + +impl InterceptRequest { + fn from_http(req: &http::Request) -> InterceptRequest { + match req.uri().path() { + "/health" => + if req.method() == http::Method::GET { + InterceptRequest::Health + } else { + InterceptRequest::Deny + }, + "/health/readiness" => + if req.method() == http::Method::GET { + InterceptRequest::Readiness + } else { + InterceptRequest::Deny + }, + // Forward all other requests to the RPC server. + _ => InterceptRequest::No, + } + } +} From 74decbbdf22a7b109209448307563c6f3d62abac Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 20 Jun 2024 08:56:56 +0000 Subject: [PATCH 50/52] Bump curve25519-dalek from 4.1.2 to 4.1.3 (#4824) Bumps [curve25519-dalek](https://github.com/dalek-cryptography/curve25519-dalek) from 4.1.2 to 4.1.3.
Commits
  • 5312a03 curve: Bump version to 4.1.3 (#660)
  • b4f9e4d SECURITY: fix timing variability in backend/serial/u32/scalar.rs (#661)
  • 415892a SECURITY: fix timing variability in backend/serial/u64/scalar.rs (#659)
  • 56bf398 Updates license field to valid SPDX format (#647)
  • 9252fa5 Mitigate check-cfg until MSRV 1.77 (#652)
  • 1efe6a9 Fix a minor typo in signing.rs (#649)
  • cc3421a Indicate that the rand_core feature is required (#641)
  • 858c4ca Address new nightly clippy unnecessary qualifications (#639)
  • 31ccb67 Remove platforms in favor using CARGO_CFG_TARGET_POINTER_WIDTH (#636)
  • 19c7f4a Fix new nightly redundant import lint warns (#638)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=curve25519-dalek&package-manager=cargo&previous-version=4.1.2&new-version=4.1.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/paritytech/polkadot-sdk/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 19 +++++++++---------- .../primitives/statement-store/Cargo.toml | 2 +- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cb4c25ae998c..5f3e3c3603e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4536,16 +4536,15 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.2" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "platforms", "rustc_version 0.4.0", "subtle 2.5.0", "zeroize", @@ -5018,7 +5017,7 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ - "curve25519-dalek 4.1.2", + "curve25519-dalek 4.1.3", "ed25519 2.2.2", "rand_core 0.6.4", "serde", @@ -5033,7 +5032,7 @@ version = "4.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d9ce6874da5d4415896cd45ffbc4d1cfc0c4f9c079427bd870742c30f2f65a9" dependencies = [ - "curve25519-dalek 4.1.2", + "curve25519-dalek 4.1.3", "ed25519 2.2.2", "hashbrown 0.14.3", "hex", @@ -8479,7 +8478,7 @@ dependencies = [ "bitflags 1.3.2", "blake2 0.10.6", "c2-chacha", - "curve25519-dalek 4.1.2", + "curve25519-dalek 4.1.3", "either", "hashlink", "lioness", @@ -18339,7 +18338,7 @@ dependencies = [ "aead", "arrayref", "arrayvec 0.7.4", - "curve25519-dalek 4.1.2", + "curve25519-dalek 4.1.3", "getrandom_or_panic", "merlin", "rand_core 0.6.4", @@ -19085,7 +19084,7 @@ dependencies = [ "aes-gcm", "blake2 0.10.6", "chacha20poly1305", - "curve25519-dalek 4.1.2", + "curve25519-dalek 4.1.3", "rand_core 0.6.4", "ring 0.17.7", "rustc_version 0.4.0", @@ -20356,7 +20355,7 @@ name = "sp-statement-store" version = "10.0.0" dependencies = [ "aes-gcm", - "curve25519-dalek 4.1.2", + "curve25519-dalek 4.1.3", "ed25519-dalek 2.1.1", "hkdf", "parity-scale-codec", @@ -23781,7 +23780,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96" dependencies = [ - "curve25519-dalek 4.1.2", + "curve25519-dalek 4.1.3", "rand_core 0.6.4", "serde", "zeroize", diff --git a/substrate/primitives/statement-store/Cargo.toml b/substrate/primitives/statement-store/Cargo.toml index bb893b25dc44..60919b7439ea 100644 --- a/substrate/primitives/statement-store/Cargo.toml +++ b/substrate/primitives/statement-store/Cargo.toml @@ -30,7 +30,7 @@ thiserror = { optional = true, workspace = true } # ECIES dependencies ed25519-dalek = { version = "2.1", optional = true } x25519-dalek = { version = "2.0", optional = true, features = ["static_secrets"] } -curve25519-dalek = { version = "4.1.1", optional = true } +curve25519-dalek = { version = "4.1.3", optional = true } aes-gcm = { version = "0.10", optional = true } hkdf = { version = "0.12.0", optional = true } sha2 = { version = "0.10.7", optional = true } From a23abb17232107275089040a33ff38e6a801e648 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Jun 2024 09:23:19 +0200 Subject: [PATCH 51/52] Bump ws from 8.16.0 to 8.17.1 in /bridges/testing/framework/utils/generate_hex_encoded_call (#4825) Bumps [ws](https://github.com/websockets/ws) from 8.16.0 to 8.17.1.
Release notes

Sourced from ws's releases.

8.17.1

Bug fixes

  • Fixed a DoS vulnerability (#2231).

A request with a number of headers exceeding the[server.maxHeadersCount][] threshold could be used to crash a ws server.

const http = require('http');
const WebSocket = require('ws');

const wss = new WebSocket.Server({ port: 0 }, function () { const chars = "!#$%&'*+-.0123456789abcdefghijklmnopqrstuvwxyz^_`|~".split(''); const headers = {}; let count = 0;

for (let i = 0; i < chars.length; i++) { if (count === 2000) break;

for (let j = 0; j &lt; chars.length; j++) {
  const key = chars[i] + chars[j];
  headers[key] = 'x';

  if (++count === 2000) break;
}

}

headers.Connection = 'Upgrade'; headers.Upgrade = 'websocket'; headers['Sec-WebSocket-Key'] = 'dGhlIHNhbXBsZSBub25jZQ=='; headers['Sec-WebSocket-Version'] = '13';

const request = http.request({ headers: headers, host: '127.0.0.1', port: wss.address().port });

request.end(); });

The vulnerability was reported by Ryan LaPointe in websockets/ws#2230.

In vulnerable versions of ws, the issue can be mitigated in the following ways:

  1. Reduce the maximum allowed length of the request headers using the [--max-http-header-size=size][] and/or the [maxHeaderSize][] options so that no more headers than the server.maxHeadersCount limit can be sent.

... (truncated)

Commits
  • 3c56601 [dist] 8.17.1
  • e55e510 [security] Fix crash when the Upgrade header cannot be read (#2231)
  • 6a00029 [test] Increase code coverage
  • ddfe4a8 [perf] Reduce the amount of crypto.randomFillSync() calls
  • b73b118 [dist] 8.17.0
  • 29694a5 [test] Use the highWaterMark variable
  • 934c9d6 [ci] Test on node 22
  • 1817bac [ci] Do not test on node 21
  • 96c9b3d [major] Flip the default value of allowSynchronousEvents (#2221)
  • e5f32c7 [fix] Emit at most one event per event loop iteration (#2218)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=ws&package-manager=npm_and_yarn&previous-version=8.16.0&new-version=8.17.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/paritytech/polkadot-sdk/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Branislav Kontur --- .../utils/generate_hex_encoded_call/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json b/bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json index b2dddaa19ed1..ca3abcc528cf 100644 --- a/bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json +++ b/bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json @@ -736,9 +736,9 @@ } }, "node_modules/ws": { - "version": "8.16.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.16.0.tgz", - "integrity": "sha512-HS0c//TP7Ina87TfiPUz1rQzMhHrl/SG2guqRcTOIUYD2q8uhUdNHZYJUaQ8aTGPzCh+c6oawMKW35nFl1dxyQ==", + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", + "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", "engines": { "node": ">=10.0.0" }, From b301218db8785c6d425ca9a9ef90daa80780f2ce Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Fri, 21 Jun 2024 11:33:33 +0200 Subject: [PATCH 52/52] [ci] Change storage type for forklift in GHA (#4850) PR changes forklift authentication to gcs cc https://github.com/paritytech/ci_cd/issues/987 --- .forklift/config-gitlab.toml | 33 +++++++++++++++++++ .forklift/config.toml | 10 ++---- .github/review-bot.yml | 2 ++ .github/workflows/check-runtime-migration.yml | 7 ---- .github/workflows/tests-linux-stable.yml | 18 +++------- .github/workflows/tests.yml | 18 +++------- .gitlab-ci.yml | 3 +- 7 files changed, 50 insertions(+), 41 deletions(-) create mode 100644 .forklift/config-gitlab.toml diff --git a/.forklift/config-gitlab.toml b/.forklift/config-gitlab.toml new file mode 100644 index 000000000000..ab3b2729a46d --- /dev/null +++ b/.forklift/config-gitlab.toml @@ -0,0 +1,33 @@ +[compression] +type = "zstd" + +[compression.zstd] +compressionLevel = 3 + +[general] +jobNameVariable = "CI_JOB_NAME" +jobsBlackList = [] +logLevel = "warn" +threadsCount = 6 + +[cache] +extraEnv = ["RUNTIME_METADATA_HASH"] + +[metrics] +enabled = true +pushEndpoint = "placeholder" + +[metrics.extraLabels] +environment = "production" +job_name = "$CI_JOB_NAME" +project_name = "$CI_PROJECT_PATH" + +[storage] +type = "s3" + +[storage.s3] +accessKeyId = "placeholder" +bucketName = "placeholder" +concurrency = 10 +endpointUrl = "placeholder" +secretAccessKey = "placeholder" diff --git a/.forklift/config.toml b/.forklift/config.toml index ab3b2729a46d..6f8eed8882ea 100644 --- a/.forklift/config.toml +++ b/.forklift/config.toml @@ -23,11 +23,7 @@ job_name = "$CI_JOB_NAME" project_name = "$CI_PROJECT_PATH" [storage] -type = "s3" +type = "gcs" -[storage.s3] -accessKeyId = "placeholder" -bucketName = "placeholder" -concurrency = 10 -endpointUrl = "placeholder" -secretAccessKey = "placeholder" +[storage.gcs] +bucketName = "parity-ci-forklift" diff --git a/.github/review-bot.yml b/.github/review-bot.yml index ed719cefec8b..adbc480c6ba1 100644 --- a/.github/review-bot.yml +++ b/.github/review-bot.yml @@ -9,6 +9,7 @@ rules: - ^\.gitlab/.* - ^\.config/nextest.toml - ^\.cargo/.* + - ^\.forklift/.* exclude: - ^\.gitlab/pipeline/zombienet.* type: "or" @@ -33,6 +34,7 @@ rules: - ^docker/.* - ^\.github/.* - ^\.gitlab/.* + - ^\.forklift/.* - ^\.config/nextest.toml - ^\.cargo/.* minApprovals: 2 diff --git a/.github/workflows/check-runtime-migration.yml b/.github/workflows/check-runtime-migration.yml index 671673c02c09..33da5a8ecd59 100644 --- a/.github/workflows/check-runtime-migration.yml +++ b/.github/workflows/check-runtime-migration.yml @@ -11,13 +11,6 @@ concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true -env: - FORKLIFT_storage_s3_bucketName: ${{ secrets.FORKLIFT_storage_s3_bucketName }} - FORKLIFT_storage_s3_accessKeyId: ${{ secrets.FORKLIFT_storage_s3_accessKeyId }} - FORKLIFT_storage_s3_secretAccessKey: ${{ secrets.FORKLIFT_storage_s3_secretAccessKey }} - FORKLIFT_storage_s3_endpointUrl: ${{ secrets.FORKLIFT_storage_s3_endpointUrl }} - FORKLIFT_metrics_pushEndpoint: ${{ secrets.FORKLIFT_metrics_pushEndpoint }} - jobs: set-image: # GitHub Actions allows using 'env' in a container context. diff --git a/.github/workflows/tests-linux-stable.yml b/.github/workflows/tests-linux-stable.yml index 5fdfabc437fe..6f2ac87c3efb 100644 --- a/.github/workflows/tests-linux-stable.yml +++ b/.github/workflows/tests-linux-stable.yml @@ -12,15 +12,7 @@ concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true -env: - FORKLIFT_storage_s3_bucketName: ${{ secrets.FORKLIFT_storage_s3_bucketName }} - FORKLIFT_storage_s3_accessKeyId: ${{ secrets.FORKLIFT_storage_s3_accessKeyId }} - FORKLIFT_storage_s3_secretAccessKey: ${{ secrets.FORKLIFT_storage_s3_secretAccessKey }} - FORKLIFT_storage_s3_endpointUrl: ${{ secrets.FORKLIFT_storage_s3_endpointUrl }} - FORKLIFT_metrics_pushEndpoint: ${{ secrets.FORKLIFT_metrics_pushEndpoint }} - jobs: - changes: permissions: pull-requests: read @@ -31,7 +23,7 @@ jobs: # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 # This workaround sets the container image for each job using 'set-image' job output. needs: changes - if: ${{ needs.changes.outputs.rust }} + if: ${{ needs.changes.outputs.rust }} runs-on: ubuntu-latest outputs: IMAGE: ${{ steps.set_image.outputs.IMAGE }} @@ -40,10 +32,10 @@ jobs: uses: actions/checkout@v4 - id: set_image run: cat .github/env >> $GITHUB_OUTPUT - + test-linux-stable-int: needs: [set-image, changes] - if: ${{ needs.changes.outputs.rust }} + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy timeout-minutes: 30 container: @@ -60,11 +52,11 @@ jobs: uses: actions/checkout@v4 - name: script run: WASM_BUILD_NO_COLOR=1 time forklift cargo test -p staging-node-cli --release --locked -- --ignored - + # https://github.com/paritytech/ci_cd/issues/864 test-linux-stable-runtime-benchmarks: needs: [set-image, changes] - if: ${{ needs.changes.outputs.rust }} + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy timeout-minutes: 30 container: diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 293acadc4e6a..0c1447cba33a 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -11,15 +11,7 @@ concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true -env: - FORKLIFT_storage_s3_bucketName: ${{ secrets.FORKLIFT_storage_s3_bucketName }} - FORKLIFT_storage_s3_accessKeyId: ${{ secrets.FORKLIFT_storage_s3_accessKeyId }} - FORKLIFT_storage_s3_secretAccessKey: ${{ secrets.FORKLIFT_storage_s3_secretAccessKey }} - FORKLIFT_storage_s3_endpointUrl: ${{ secrets.FORKLIFT_storage_s3_endpointUrl }} - FORKLIFT_metrics_pushEndpoint: ${{ secrets.FORKLIFT_metrics_pushEndpoint }} - jobs: - changes: permissions: pull-requests: read @@ -40,7 +32,7 @@ jobs: quick-benchmarks: needs: [set-image, changes] - if: ${{ needs.changes.outputs.rust }} + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy timeout-minutes: 30 container: @@ -55,11 +47,11 @@ jobs: uses: actions/checkout@v4 - name: script run: time forklift cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet - + # cf https://github.com/paritytech/polkadot-sdk/issues/1652 test-syscalls: needs: [set-image, changes] - if: ${{ needs.changes.outputs.rust }} + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy timeout-minutes: 30 container: @@ -81,10 +73,10 @@ jobs: # - if [[ "$CI_JOB_STATUS" == "failed" ]]; then # printf "The x86_64 syscalls used by the worker binaries have changed. Please review if this is expected and update polkadot/scripts/list-syscalls/*-worker-syscalls as needed.\n"; # fi - + cargo-check-all-benches: needs: [set-image, changes] - if: ${{ needs.changes.outputs.rust }} + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy timeout-minutes: 30 container: diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 73a8c52c448f..7f2babc6bd47 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -120,7 +120,8 @@ default: .forklift-cache: before_script: - mkdir ~/.forklift - - cp .forklift/config.toml ~/.forklift/config.toml + - cp .forklift/config-gitlab.toml ~/.forklift/config.toml + - cat .forklift/config-gitlab.toml > .forklift/config.toml - > if [ "$FORKLIFT_BYPASS" != "true" ]; then echo "FORKLIFT_BYPASS not set";