diff --git a/Cargo.lock b/Cargo.lock index 05800e3468718..5d0268328e0e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14731,7 +14731,6 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-statement-table", - "rstest", "sc-keystore", "schnellru", "sp-application-crypto 30.0.0", @@ -14906,8 +14905,6 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", - "rstest", - "schnellru", "sp-application-crypto 30.0.0", "sp-keystore 0.34.0", "thiserror 1.0.65", diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index be829a84ee6ea..a234bdd3046ff 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -33,7 +33,6 @@ futures = { features = ["thread-pool"], workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } polkadot-primitives = { workspace = true, features = ["test"] } polkadot-primitives-test-helpers = { workspace = true } -rstest = { workspace = true } sc-keystore = { workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index f5bc346131dc8..1127a58653d67 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -107,7 +107,6 @@ use polkadot_node_subsystem_util::{ }; use polkadot_parachain_primitives::primitives::IsSystem; use polkadot_primitives::{ - node_features::FeatureIndex, vstaging::{ BackedCandidate, CandidateReceiptV2 as CandidateReceipt, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, @@ -234,9 +233,6 @@ struct PerRelayParentState { fallbacks: HashMap, /// The minimum backing votes threshold. minimum_backing_votes: u32, - /// If true, we're appending extra bits in the BackedCandidate validator indices bitfield, - /// which represent the assigned core index. True if ElasticScalingMVP is enabled. - inject_core_index: bool, /// The number of cores. n_cores: u32, /// Claim queue state. If the runtime API is not available, it'll be populated with info from @@ -616,7 +612,6 @@ fn table_attested_to_backed( ValidatorSignature, >, table_context: &TableContext, - inject_core_index: bool, ) -> Option { let TableAttestedCandidate { candidate, validity_votes, group_id: core_index } = attested; @@ -655,7 +650,7 @@ fn table_attested_to_backed( .map(|(pos_in_votes, _pos_in_group)| validity_votes[pos_in_votes].clone()) .collect(), validator_indices, - inject_core_index.then_some(core_index), + Some(core_index), )) } @@ -1161,16 +1156,11 @@ async fn construct_per_relay_parent_state( let node_features = per_session_cache.node_features(session_index, parent, ctx.sender()).await; let node_features = try_runtime_api!(node_features); - let inject_core_index = node_features - .get(FeatureIndex::ElasticScalingMVP as usize) - .map(|b| *b) - .unwrap_or(false); - let executor_params = per_session_cache.executor_params(session_index, parent, ctx.sender()).await; let executor_params = try_runtime_api!(executor_params); - gum::debug!(target: LOG_TARGET, inject_core_index, ?parent, "New state"); + gum::debug!(target: LOG_TARGET, ?parent, "New state"); let (validator_groups, group_rotation_info) = try_runtime_api!(groups); @@ -1241,7 +1231,6 @@ async fn construct_per_relay_parent_state( awaiting_validation: HashSet::new(), fallbacks: HashMap::new(), minimum_backing_votes, - inject_core_index, n_cores: validator_groups.len() as u32, claim_queue: ClaimQueueSnapshot::from(claim_queue), validator_to_group, @@ -1676,11 +1665,7 @@ async fn post_import_statement_actions( // `HashSet::insert` returns true if the thing wasn't in there already. if rp_state.backed.insert(candidate_hash) { - if let Some(backed) = table_attested_to_backed( - attested, - &rp_state.table_context, - rp_state.inject_core_index, - ) { + if let Some(backed) = table_attested_to_backed(attested, &rp_state.table_context) { let para_id = backed.candidate().descriptor.para_id(); gum::debug!( target: LOG_TARGET, @@ -2157,13 +2142,7 @@ fn handle_get_backable_candidates_message( &rp_state.table_context, rp_state.minimum_backing_votes, ) - .and_then(|attested| { - table_attested_to_backed( - attested, - &rp_state.table_context, - rp_state.inject_core_index, - ) - }); + .and_then(|attested| table_attested_to_backed(attested, &rp_state.table_context)); if let Some(backed_candidate) = maybe_backed_candidate { backed diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 1a5fbeda100c8..4be8124c66d19 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -27,7 +27,6 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers::mock::new_leaf; use polkadot_primitives::{ - node_features, vstaging::{CoreState, MutateDescriptorV2, OccupiedCore}, BlockNumber, CandidateDescriptor, GroupRotationInfo, HeadData, Header, PersistedValidationData, ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, @@ -37,7 +36,6 @@ use polkadot_primitives_test_helpers::{ dummy_committed_candidate_receipt_v2, dummy_hash, validator_pubkeys, }; use polkadot_statement_table::v2::Misbehavior; -use rstest::rstest; use sp_application_crypto::AppCrypto; use sp_keyring::Sr25519Keyring; use sp_keystore::Keystore; @@ -780,19 +778,9 @@ fn backing_second_works() { } // Test that the candidate reaches quorum successfully. -#[rstest] -#[case(true)] -#[case(false)] -fn backing_works(#[case] elastic_scaling_mvp: bool) { +#[test] +fn backing_works() { let mut test_state = TestState::default(); - if elastic_scaling_mvp { - test_state - .node_features - .resize((node_features::FeatureIndex::ElasticScalingMVP as u8 + 1) as usize, false); - test_state - .node_features - .set(node_features::FeatureIndex::ElasticScalingMVP as u8 as usize, true); - } test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; @@ -917,12 +905,9 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) { assert_eq!(candidates[0].validity_votes().len(), 3); let (validator_indices, maybe_core_index) = - candidates[0].validator_indices_and_core_index(elastic_scaling_mvp); - if elastic_scaling_mvp { - assert_eq!(maybe_core_index.unwrap(), CoreIndex(0)); - } else { - assert!(maybe_core_index.is_none()); - } + candidates[0].validator_indices_and_core_index(true); + + assert_eq!(maybe_core_index.unwrap(), CoreIndex(0)); assert_eq!( validator_indices, @@ -941,13 +926,6 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) { #[test] fn get_backed_candidate_preserves_order() { let mut test_state = TestState::default(); - test_state - .node_features - .resize((node_features::FeatureIndex::ElasticScalingMVP as u8 + 1) as usize, false); - test_state - .node_features - .set(node_features::FeatureIndex::ElasticScalingMVP as u8 as usize, true); - // Set a single validator as the first validator group. It simplifies the test. test_state.validator_groups.0[0] = vec![ValidatorIndex(2)]; // Add another validator group for the third core. @@ -1575,8 +1553,11 @@ fn backing_works_while_validation_ongoing() { .validity_votes() .contains(&ValidityAttestation::Explicit(signed_c.signature().clone()))); assert_eq!( - candidates[0].validator_indices_and_core_index(false), - (bitvec::bitvec![u8, bitvec::order::Lsb0; 1, 0, 1, 1].as_bitslice(), None) + candidates[0].validator_indices_and_core_index(true), + ( + bitvec::bitvec![u8, bitvec::order::Lsb0; 1, 0, 1, 1].as_bitslice(), + Some(CoreIndex(0)) + ) ); virtual_overseer @@ -2225,7 +2206,7 @@ fn candidate_backing_reorders_votes() { group_id: core_idx, }; - let backed = table_attested_to_backed(attested, &table_context, false).unwrap(); + let backed = table_attested_to_backed(attested, &table_context).unwrap(); let expected_bitvec = { let mut validator_indices = BitVec::::with_capacity(6); @@ -2243,8 +2224,8 @@ fn candidate_backing_reorders_votes() { vec![fake_attestation(1).into(), fake_attestation(3).into(), fake_attestation(5).into()]; assert_eq!( - backed.validator_indices_and_core_index(false), - (expected_bitvec.as_bitslice(), None) + backed.validator_indices_and_core_index(true), + (expected_bitvec.as_bitslice(), Some(CoreIndex(10))) ); assert_eq!(backed.validity_votes(), expected_attestations); } diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index a3880d5a0f136..ec1430af6c04b 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -21,7 +21,6 @@ polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } -schnellru = { workspace = true } thiserror = { workspace = true } [dev-dependencies] @@ -30,5 +29,3 @@ polkadot-primitives = { workspace = true, features = ["test"] } polkadot-primitives-test-helpers = { workspace = true } sp-application-crypto = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } - -rstest = { workspace = true } diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index b36988c9f1d92..f8d053c22d55f 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -24,7 +24,6 @@ use futures::{ channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered, FutureExt, }; use futures_timer::Delay; -use schnellru::{ByLength, LruMap}; use polkadot_node_subsystem::{ messages::{ @@ -34,14 +33,10 @@ use polkadot_node_subsystem::{ overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; -use polkadot_node_subsystem_util::{ - request_availability_cores, request_node_features, request_session_index_for_child, TimeoutExt, -}; +use polkadot_node_subsystem_util::{request_availability_cores, TimeoutExt}; use polkadot_primitives::{ - node_features::FeatureIndex, vstaging::{BackedCandidate, CoreState}, - CandidateHash, CoreIndex, Hash, Id as ParaId, SessionIndex, SignedAvailabilityBitfield, - ValidatorIndex, + CandidateHash, CoreIndex, Hash, Id as ParaId, SignedAvailabilityBitfield, ValidatorIndex, }; use std::collections::{BTreeMap, HashMap}; @@ -74,25 +69,18 @@ impl ProvisionerSubsystem { } } -/// Per-session info we need for the provisioner subsystem. -pub struct PerSession { - elastic_scaling_mvp: bool, -} - /// A per-relay-parent state for the provisioning subsystem. pub struct PerRelayParent { leaf: ActivatedLeaf, - elastic_scaling_mvp: bool, signed_bitfields: Vec, is_inherent_ready: bool, awaiting_inherent: Vec>, } impl PerRelayParent { - fn new(leaf: ActivatedLeaf, per_session: &PerSession) -> Self { + fn new(leaf: ActivatedLeaf) -> Self { Self { leaf, - elastic_scaling_mvp: per_session.elastic_scaling_mvp, signed_bitfields: Vec::new(), is_inherent_ready: false, awaiting_inherent: Vec::new(), @@ -120,17 +108,10 @@ impl ProvisionerSubsystem { async fn run(mut ctx: Context, metrics: Metrics) -> FatalResult<()> { let mut inherent_delays = InherentDelays::new(); let mut per_relay_parent = HashMap::new(); - let mut per_session = LruMap::new(ByLength::new(2)); loop { - let result = run_iteration( - &mut ctx, - &mut per_relay_parent, - &mut per_session, - &mut inherent_delays, - &metrics, - ) - .await; + let result = + run_iteration(&mut ctx, &mut per_relay_parent, &mut inherent_delays, &metrics).await; match result { Ok(()) => break, @@ -145,7 +126,6 @@ async fn run(mut ctx: Context, metrics: Metrics) -> FatalResult<()> { async fn run_iteration( ctx: &mut Context, per_relay_parent: &mut HashMap, - per_session: &mut LruMap, inherent_delays: &mut InherentDelays, metrics: &Metrics, ) -> Result<(), Error> { @@ -155,7 +135,7 @@ async fn run_iteration( // Map the error to ensure that the subsystem exits when the overseer is gone. match from_overseer.map_err(Error::OverseerExited)? { FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => - handle_active_leaves_update(ctx.sender(), update, per_relay_parent, per_session, inherent_delays).await?, + handle_active_leaves_update(update, per_relay_parent, inherent_delays).await?, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), FromOrchestra::Communication { msg } => { @@ -184,10 +164,8 @@ async fn run_iteration( } async fn handle_active_leaves_update( - sender: &mut impl overseer::ProvisionerSenderTrait, update: ActiveLeavesUpdate, per_relay_parent: &mut HashMap, - per_session: &mut LruMap, inherent_delays: &mut InherentDelays, ) -> Result<(), Error> { gum::trace!(target: LOG_TARGET, "Handle ActiveLeavesUpdate"); @@ -196,27 +174,9 @@ async fn handle_active_leaves_update( } if let Some(leaf) = update.activated { - let session_index = request_session_index_for_child(leaf.hash, sender) - .await - .await - .map_err(Error::CanceledSessionIndex)??; - if per_session.get(&session_index).is_none() { - let elastic_scaling_mvp = request_node_features(leaf.hash, session_index, sender) - .await - .await - .map_err(Error::CanceledNodeFeatures)?? - .get(FeatureIndex::ElasticScalingMVP as usize) - .map(|b| *b) - .unwrap_or(false); - - per_session.insert(session_index, PerSession { elastic_scaling_mvp }); - } - - let session_info = per_session.get(&session_index).expect("Just inserted"); - gum::trace!(target: LOG_TARGET, leaf_hash=?leaf.hash, "Adding delay"); let delay_fut = Delay::new(PRE_PROPOSE_TIMEOUT).map(move |_| leaf.hash).boxed(); - per_relay_parent.insert(leaf.hash, PerRelayParent::new(leaf, session_info)); + per_relay_parent.insert(leaf.hash, PerRelayParent::new(leaf)); inherent_delays.push(delay_fut); } @@ -272,8 +232,6 @@ async fn send_inherent_data_bg( ) -> Result<(), Error> { let leaf = per_relay_parent.leaf.clone(); let signed_bitfields = per_relay_parent.signed_bitfields.clone(); - let elastic_scaling_mvp = per_relay_parent.elastic_scaling_mvp; - let mut sender = ctx.sender().clone(); let bg = async move { @@ -285,19 +243,13 @@ async fn send_inherent_data_bg( "Sending inherent data in background." ); - let send_result = send_inherent_data( - &leaf, - &signed_bitfields, - elastic_scaling_mvp, - return_senders, - &mut sender, - &metrics, - ) // Make sure call is not taking forever: - .timeout(SEND_INHERENT_DATA_TIMEOUT) - .map(|v| match v { - Some(r) => r, - None => Err(Error::SendInherentDataTimeout), - }); + let send_result = + send_inherent_data(&leaf, &signed_bitfields, return_senders, &mut sender, &metrics) // Make sure call is not taking forever: + .timeout(SEND_INHERENT_DATA_TIMEOUT) + .map(|v| match v { + Some(r) => r, + None => Err(Error::SendInherentDataTimeout), + }); match send_result.await { Err(err) => { @@ -383,7 +335,6 @@ type CoreAvailability = BitVec; async fn send_inherent_data( leaf: &ActivatedLeaf, bitfields: &[SignedAvailabilityBitfield], - elastic_scaling_mvp: bool, return_senders: Vec>, from_job: &mut impl overseer::ProvisionerSenderTrait, metrics: &Metrics, @@ -420,9 +371,7 @@ async fn send_inherent_data( "Selected bitfields" ); - let candidates = - select_candidates(&availability_cores, &bitfields, elastic_scaling_mvp, leaf, from_job) - .await?; + let candidates = select_candidates(&availability_cores, &bitfields, leaf, from_job).await?; gum::trace!( target: LOG_TARGET, @@ -533,7 +482,6 @@ fn select_availability_bitfields( /// based on core states. async fn request_backable_candidates( availability_cores: &[CoreState], - elastic_scaling_mvp: bool, bitfields: &[SignedAvailabilityBitfield], relay_parent: &ActivatedLeaf, sender: &mut impl overseer::ProvisionerSenderTrait, @@ -595,11 +543,6 @@ async fn request_backable_candidates( for (para_id, core_count) in scheduled_cores_per_para { let para_ancestors = ancestors.remove(¶_id).unwrap_or_default(); - // If elastic scaling MVP is disabled, only allow one candidate per parachain. - if !elastic_scaling_mvp && core_count > 1 { - continue - } - let response = get_backable_candidates( relay_parent.hash, para_id, @@ -630,7 +573,6 @@ async fn request_backable_candidates( async fn select_candidates( availability_cores: &[CoreState], bitfields: &[SignedAvailabilityBitfield], - elastic_scaling_mvp: bool, leaf: &ActivatedLeaf, sender: &mut impl overseer::ProvisionerSenderTrait, ) -> Result, Error> { @@ -641,15 +583,8 @@ async fn select_candidates( "before GetBackedCandidates" ); - let selected_candidates = request_backable_candidates( - availability_cores, - elastic_scaling_mvp, - bitfields, - leaf, - sender, - ) - .await?; - + let selected_candidates = + request_backable_candidates(availability_cores, bitfields, leaf, sender).await?; gum::debug!(target: LOG_TARGET, ?selected_candidates, "Got backable candidates"); // now get the backed candidates corresponding to these candidate receipts diff --git a/polkadot/node/core/provisioner/src/tests.rs b/polkadot/node/core/provisioner/src/tests.rs index 4667f44d65a03..d9e7240a4e0b7 100644 --- a/polkadot/node/core/provisioner/src/tests.rs +++ b/polkadot/node/core/provisioner/src/tests.rs @@ -263,7 +263,6 @@ mod select_candidates { BlockNumber, CandidateCommitments, PersistedValidationData, }; use polkadot_primitives_test_helpers::{dummy_candidate_descriptor_v2, dummy_hash}; - use rstest::rstest; use std::ops::Not; use CoreState::{Free, Scheduled}; @@ -661,7 +660,6 @@ mod select_candidates { select_candidates( &[], &[], - false, &new_leaf(Default::default(), BLOCK_UNDER_PRODUCTION - 1), &mut tx, ) @@ -740,7 +738,6 @@ mod select_candidates { let result = select_candidates( &mock_cores, &[], - false, &new_leaf(Default::default(), BLOCK_UNDER_PRODUCTION - 1), &mut tx, ) @@ -843,7 +840,6 @@ mod select_candidates { let result = select_candidates( &mock_cores, &[], - true, &new_leaf(Default::default(), BLOCK_UNDER_PRODUCTION - 1), &mut tx, ) @@ -858,11 +854,8 @@ mod select_candidates { }, ) } - - #[rstest] - #[case(true)] - #[case(false)] - fn one_core_per_para(#[case] elastic_scaling_mvp: bool) { + #[test] + fn one_core_per_para() { let mock_cores = mock_availability_cores_one_per_para(); // why those particular indices? see the comments on mock_availability_cores() @@ -888,7 +881,6 @@ mod select_candidates { let result = select_candidates( &mock_cores, &[], - elastic_scaling_mvp, &new_leaf(Default::default(), BLOCK_UNDER_PRODUCTION - 1), &mut tx, ) @@ -965,56 +957,6 @@ mod select_candidates { let result = select_candidates( &mock_cores, &[], - true, - &new_leaf(Default::default(), BLOCK_UNDER_PRODUCTION - 1), - &mut tx, - ) - .await - .unwrap(); - - assert_eq!(result.len(), expected_candidates_clone.len()); - result.into_iter().for_each(|c| { - assert!( - expected_candidates_clone - .iter() - .any(|c2| c.candidate().corresponds_to(&c2.receipt())), - "Failed to find candidate: {:?}", - c, - ) - }); - }, - ) - } - - #[test] - fn multiple_cores_per_para_elastic_scaling_mvp_disabled() { - let mock_cores = mock_availability_cores_multiple_per_para(); - - // why those particular indices? see the comments on mock_availability_cores() - let expected_candidates: Vec<_> = vec![1, 4, 7, 8, 10]; - - let (candidates, expected_candidates) = - make_candidates(mock_cores.len(), expected_candidates); - - let mut required_ancestors: HashMap, Ancestors> = HashMap::new(); - required_ancestors.insert( - vec![candidates[4]], - vec![CandidateHash(Hash::from_low_u64_be(41))].into_iter().collect(), - ); - required_ancestors.insert( - vec![candidates[8]], - vec![CandidateHash(Hash::from_low_u64_be(81))].into_iter().collect(), - ); - - let mock_cores_clone = mock_cores.clone(); - let expected_candidates_clone = expected_candidates.clone(); - test_harness( - |r| mock_overseer(r, mock_cores_clone, expected_candidates, required_ancestors), - |mut tx: TestSubsystemSender| async move { - let result = select_candidates( - &mock_cores, - &[], - false, &new_leaf(Default::default(), BLOCK_UNDER_PRODUCTION - 1), &mut tx, ) @@ -1076,7 +1018,6 @@ mod select_candidates { let result = select_candidates( &mock_cores, &[], - false, &new_leaf(Default::default(), BLOCK_UNDER_PRODUCTION - 1), &mut tx, ) diff --git a/polkadot/node/test/service/src/chain_spec.rs b/polkadot/node/test/service/src/chain_spec.rs index ef83c4795dc68..f17998bfa8ff6 100644 --- a/polkadot/node/test/service/src/chain_spec.rs +++ b/polkadot/node/test/service/src/chain_spec.rs @@ -111,10 +111,13 @@ fn polkadot_testnet_genesis( const ENDOWMENT: u128 = 1_000_000 * DOTS; const STASH: u128 = 100 * DOTS; - // Prepare node features with V2 receipts enabled. + // Prepare node features with V2 receipts + // and elastic scaling enabled. let mut node_features = NodeFeatures::new(); - node_features.resize(node_features::FeatureIndex::CandidateReceiptV2 as usize + 1, false); + node_features.resize(node_features::FeatureIndex::FirstUnassigned as usize + 1, false); + node_features.set(node_features::FeatureIndex::CandidateReceiptV2 as u8 as usize, true); + node_features.set(node_features::FeatureIndex::ElasticScalingMVP as u8 as usize, true); serde_json::json!({ "balances": { diff --git a/prdoc/pr_7286.prdoc b/prdoc/pr_7286.prdoc new file mode 100644 index 0000000000000..4788bf5a8d15f --- /dev/null +++ b/prdoc/pr_7286.prdoc @@ -0,0 +1,11 @@ +title: Remove node-side feature flag checks for Elastic Scaling MVP +doc: + - audience: Node Dev + description: | + This PR removes node-side conditional checks for FeatureIndex::ElasticScalingMVP, by + default elastic scaling is always enabled. This simplifies the backing and provisioner logic. +crates: + - name: polkadot-node-core-backing + bump: patch + - name: polkadot-node-core-provisioner + bump: major