From 0d13d7b398f6159821c99c8e625a5e3110484865 Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 5 Mar 2026 14:18:44 +0100 Subject: [PATCH 01/52] Fix issue #11272 https://github.com/paritytech/polkadot-sdk/issues/11272 --- polkadot/node/collation-generation/src/lib.rs | 2 +- .../node/collation-generation/src/tests.rs | 2 +- polkadot/node/core/backing/src/lib.rs | 67 +++-- polkadot/node/core/backing/src/tests/mod.rs | 137 +++++++++- .../node/core/candidate-validation/src/lib.rs | 48 +--- .../core/candidate-validation/src/tests.rs | 78 +----- .../dispute-coordinator/src/initialized.rs | 32 +-- .../src/fragment_chain/mod.rs | 14 +- .../src/fragment_chain/tests.rs | 209 +++++--------- .../core/prospective-parachains/src/lib.rs | 55 ++-- .../core/prospective-parachains/src/tests.rs | 20 +- polkadot/node/core/provisioner/src/tests.rs | 4 +- polkadot/node/core/pvf/common/src/execute.rs | 6 +- polkadot/node/core/pvf/src/execute/queue.rs | 3 - polkadot/node/core/pvf/src/host.rs | 1 - polkadot/node/core/pvf/tests/it/main.rs | 1 - .../src/variants/suggest_garbage_candidate.rs | 4 +- .../src/collator_side/mod.rs | 23 +- .../src/collator_side/tests/mod.rs | 2 +- .../src/validator_side/collation.rs | 11 +- .../src/validator_side/error.rs | 3 - .../src/validator_side/mod.rs | 94 +++---- .../src/validator_side/tests/mod.rs | 11 +- .../tests/prospective_parachains.rs | 66 +---- .../collation_manager/mod.rs | 1 - .../peer_manager/mod.rs | 2 +- .../src/validator_side_experimental/tests.rs | 21 +- .../dispute-distribution/src/receiver/mod.rs | 14 +- .../src/sender/send_task.rs | 16 +- .../statement-distribution/src/error.rs | 3 - .../statement-distribution/src/v2/mod.rs | 63 +---- .../statement-distribution/src/v2/requests.rs | 13 +- .../src/v2/tests/requests.rs | 6 + polkadot/primitives/src/lib.rs | 2 +- polkadot/primitives/src/v9/mod.rs | 258 ++++++++++++++++-- polkadot/primitives/test-helpers/src/lib.rs | 64 ++--- .../runtime/parachains/src/inclusion/tests.rs | 2 +- .../parachains/src/paras_inherent/mod.rs | 52 ++-- .../parachains/src/paras_inherent/tests.rs | 23 +- 39 files changed, 676 insertions(+), 757 deletions(-) diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index 568237a1d313e..6a9aa2c361e33 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -665,7 +665,7 @@ async fn construct_and_distribute_receipt( let ccr = CommittedCandidateReceiptV2 { descriptor, commitments: commitments.clone() }; - ccr.parse_ump_signals(&transposed_claim_queue, scheduling_parent.is_some()) + ccr.parse_ump_signals(&transposed_claim_queue) .map_err(Error::CandidateReceiptCheck)?; ccr.to_plain() diff --git a/polkadot/node/collation-generation/src/tests.rs b/polkadot/node/collation-generation/src/tests.rs index 8022d57c3867b..a306b3b9d7c01 100644 --- a/polkadot/node/collation-generation/src/tests.rs +++ b/polkadot/node/collation-generation/src/tests.rs @@ -556,7 +556,7 @@ fn approved_peer_signal() { assert_eq!(descriptor.persisted_validation_data_hash(), expected_pvd.hash()); assert_eq!(descriptor.para_head(), dummy_head_data().hash()); assert_eq!(descriptor.validation_code_hash(), validation_code_hash); - assert_eq!(descriptor.version(true), CandidateDescriptorVersion::V3); + assert_eq!(descriptor.version(), CandidateDescriptorVersion::V3); } ); diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 0abe9c207a8c7..fb624bd87076b 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -450,6 +450,18 @@ struct State { background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, /// The handle to the keystore used for signing. keystore: KeystorePtr, + /// Monotonic flag: set to `true` once any activated leaf has the V3 candidate + /// descriptor feature enabled. Once set, never unset. Used for V3 gating checks + /// in backing — if V3 was never seen, reject V3 candidates and candidates where + /// old/new version detection disagrees. + /// + /// Note: In theory a reorg could revert a leaf where V3 was enabled, making this + /// flag temporarily inaccurate. This is acceptable because: + /// 1. The runtime performs the same check and is always correct. + /// 2. The worst case is the backer signs a statement the runtime later rejects — the candidate + /// simply won't be included, no slashing occurs. + /// 3. This is an extremely short-lived edge case during reorgs. + v3_ever_seen: bool, } impl State { @@ -464,6 +476,7 @@ impl State { per_session_cache: PerSessionCache::default(), background_validation_tx, keystore, + v3_ever_seen: false, } } } @@ -1053,6 +1066,9 @@ async fn handle_active_leaves_update( .await?; if let Some(per) = per { + if !state.v3_ever_seen && FeatureIndex::CandidateReceiptV3.is_set(&per.node_features) { + state.v3_ever_seen = true; + } state.per_scheduling_parent.insert(maybe_new, per); } } @@ -1380,8 +1396,7 @@ async fn get_executor_params( sp_state: &PerSchedulingParentState, sender: &mut impl overseer::SubsystemSender, ) -> Result, Error> { - let v3_enabled = FeatureIndex::CandidateReceiptV3.is_set(&sp_state.node_features); - let session = descriptor.session_index(v3_enabled).unwrap_or(sp_state.session_index); + let session = descriptor.session_index().unwrap_or(sp_state.session_index); per_session_cache .executor_params(session, sp_state.parent, sender) .await @@ -1856,8 +1871,7 @@ async fn kick_off_validation_work( candidate_hash, pov_hash: attesting.pov_hash, }; - let v3_enabled = FeatureIndex::CandidateReceiptV3.is_set(&sp_state.node_features); - let scheduling_parent = attesting.candidate.descriptor().scheduling_parent(v3_enabled); + let scheduling_parent = attesting.candidate.descriptor().scheduling_parent(); background_validate_and_make_available( ctx, @@ -1909,6 +1923,19 @@ async fn maybe_validate_and_import( return Ok(()); } + // Version consistency + V3 gating for Seconded statements (shared logic). + if let StatementWithPVD::Seconded(receipt, _) = statement.payload() { + if let Err(reason) = receipt.descriptor.check_version_acceptance(state.v3_ever_seen) { + gum::debug!( + target: LOG_TARGET, + ?scheduling_parent, + "Not importing Seconded statement: {}", + reason, + ); + return Ok(()); + } + } + let res = import_statement(ctx, sp_state, &mut state.per_candidate, &statement).await; // if we get an Error::RejectedByProspectiveParachains, @@ -2032,8 +2059,7 @@ async fn validate_and_second( ); let bg_sender = ctx.sender().clone(); - let v3_enabled = FeatureIndex::CandidateReceiptV3.is_set(&sp_state.node_features); - let scheduling_parent = candidate.descriptor.scheduling_parent(v3_enabled); + let scheduling_parent = candidate.descriptor.scheduling_parent(); background_validate_and_make_available( ctx, sp_state, @@ -2067,7 +2093,6 @@ async fn handle_second_message( let _timer = metrics.time_process_second(); let candidate_hash = candidate.hash(); - let relay_parent = candidate.descriptor().relay_parent(); if candidate.descriptor().persisted_validation_data_hash() != persisted_validation_data.hash() { gum::warn!( @@ -2079,25 +2104,19 @@ async fn handle_second_message( return Ok(()); } - // First, determine v3_enabled by checking any available relay parent state - // (we need this to extract scheduling_parent correctly) - // Note: We use the relay parent for node feature detection, while later we use the scheduling - // parent. This is fine because: - // - // - We assume the node feature gets enabled and not disabled again. - // - The scheduling parent is never older than the relay parent. - // - // Thus if the feature was enabled at the relay parent, it will also be enabled at the - // scheduling parent. If it was not, it does not matter because then we have scheduling_parent - // == relay_parent. - let v3_enabled = state - .per_scheduling_parent - .get(&relay_parent) - .map(|sp_state| FeatureIndex::CandidateReceiptV3.is_set(&sp_state.node_features)) - .unwrap_or(false); + // Version consistency + V3 gating (shared logic from primitives). + if let Err(reason) = candidate.descriptor().check_version_acceptance(state.v3_ever_seen) { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + "Not seconding candidate: {}", + reason, + ); + return Ok(()); + } // The signing context should use scheduling_parent (for V1/V2, this equals relay_parent) - let scheduling_parent = candidate.descriptor().scheduling_parent(v3_enabled); + let scheduling_parent = candidate.descriptor().scheduling_parent(); // Look up the PerSchedulingParentState using scheduling_parent - this is where we'll sign let sp_state = match state.per_scheduling_parent.get_mut(&scheduling_parent) { diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 2850b43799bb2..3704bbd1fbb7b 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -32,8 +32,8 @@ use polkadot_primitives::{ PersistedValidationData, ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, }; use polkadot_primitives_test_helpers::{ - dummy_candidate_receipt_bad_sig, dummy_collator, dummy_collator_signature, - dummy_committed_candidate_receipt_v2, dummy_hash, validator_pubkeys, CandidateDescriptor, + dummy_candidate_receipt_bad_sig, dummy_committed_candidate_receipt_v2, dummy_hash, + validator_pubkeys, }; use polkadot_statement_table::v2::Misbehavior; use sp_application_crypto::AppCrypto; @@ -236,18 +236,17 @@ struct TestCandidateBuilder { impl TestCandidateBuilder { fn build(self) -> CommittedCandidateReceipt { CommittedCandidateReceipt { - descriptor: CandidateDescriptor { - para_id: self.para_id, - pov_hash: self.pov_hash, - relay_parent: self.relay_parent, - erasure_root: self.erasure_root, - collator: dummy_collator(), - signature: dummy_collator_signature(), - para_head: self.head_data.hash(), - validation_code_hash: ValidationCode(self.validation_code).hash(), - persisted_validation_data_hash: self.persisted_validation_data_hash, - } - .into(), + descriptor: polkadot_primitives::CandidateDescriptorV2::new( + self.para_id, + self.relay_parent, + CoreIndex(0), + 1, // session_index (matches TestState default) + self.persisted_validation_data_hash, + self.pov_hash, + self.erasure_root, + self.head_data.hash(), + ValidationCode(self.validation_code).hash(), + ), commitments: CandidateCommitments { head_data: self.head_data, upward_messages: Default::default(), @@ -4195,3 +4194,113 @@ fn occupied_core_assignment() { virtual_overseer }); } + +// Test that an ambiguous candidate (version consistency check fails) is silently rejected +// when sent via CandidateBackingMessage::Second. +#[test] +fn ambiguous_candidate_rejected_on_second() { + let mut test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + + let pov_hash = pov.hash(); + let mut candidate = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + + // Make the candidate ambiguous: set scheduling_parent to non-zero while keeping + // version=0. Old rules see V1 (non-zero scheduling_parent), new rules see V2. + candidate.descriptor.set_scheduling_parent(Hash::repeat_byte(0xAB)); + + let second = CandidateBackingMessage::Second { + scheduling_parent: test_state.relay_parent, + candidate: candidate.to_plain(), + pvd: pvd.clone(), + pov: pov.clone(), + }; + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + // The candidate should be silently rejected — no validation work issued. + assert_matches!(virtual_overseer.recv().timeout(Duration::from_secs(1)).await, None); + + virtual_overseer + }); +} + +// Test that an ambiguous candidate (version consistency check fails) is silently rejected +// when received as a Seconded statement from another validator via +// CandidateBackingMessage::Statement. +#[test] +fn ambiguous_candidate_rejected_on_statement() { + let mut test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + + let pov_hash = pov.hash(); + let mut candidate = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + + // Make the candidate ambiguous: set scheduling_parent to non-zero while keeping + // version=0. Old rules see V1 (non-zero scheduling_parent), new rules see V2. + candidate.descriptor.set_scheduling_parent(Hash::repeat_byte(0xAB)); + + // Sign as a Seconded statement from another validator. + let public = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[2].to_seed()), + ) + .expect("Insert key into keystore"); + + let signed_statement = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate.clone(), pvd.clone()), + &test_state.signing_context, + ValidatorIndex(2), + &public.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + let statement = CandidateBackingMessage::Statement { + scheduling_parent: test_state.relay_parent, + statement: signed_statement, + }; + + virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + + // The candidate should be silently rejected — no validation work issued. + assert_matches!(virtual_overseer.recv().timeout(Duration::from_secs(1)).await, None); + + virtual_overseer + }); +} diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 75fd1692d05c1..8982098662b94 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -49,7 +49,6 @@ use polkadot_primitives::{ DEFAULT_APPROVAL_EXECUTION_TIMEOUT, DEFAULT_BACKING_EXECUTION_TIMEOUT, DEFAULT_LENIENT_PREPARATION_TIMEOUT, DEFAULT_PRECHECK_PREPARATION_TIMEOUT, }, - node_features::FeatureIndex, transpose_claim_queue, AuthorityDiscoveryId, CandidateCommitments, CandidateDescriptorV2 as CandidateDescriptor, CandidateEvent, CandidateReceiptV2 as CandidateReceipt, @@ -207,39 +206,6 @@ where return; }; - let v3_enabled = - match util::request_node_features(relay_parent, session_index, &mut sender) - .await - .await - { - Ok(Ok(features)) => FeatureIndex::CandidateReceiptV3.is_set(&features), - Ok(Err(e)) => { - gum::warn!( - target: LOG_TARGET, - ?relay_parent, - ?session_index, - err = ?e, - "Failed to fetch node features from runtime" - ); - let _ = response_sender - .send(Err(ValidationFailed("Node features not available".to_string()))); - return; - }, - Err(e) => { - gum::warn!( - target: LOG_TARGET, - ?relay_parent, - ?session_index, - err = ?e, - "Failed to fetch node features, oneshot canceled" - ); - let _ = response_sender.send(Err(ValidationFailed( - "Node features request canceled".to_string(), - ))); - return; - }, - }; - // This will return a default value for the limit if runtime API is not available. // however we still error out if there is a weird runtime API error. let Ok(validation_code_bomb_limit) = util::runtime::fetch_validation_code_bomb_limit( @@ -264,7 +230,7 @@ where // Claim queue is scheduling context — fetch it from the scheduling_parent. // For V1/V2, scheduling_parent() returns relay_parent. - let scheduling_parent = candidate_receipt.descriptor.scheduling_parent(v3_enabled); + let scheduling_parent = candidate_receipt.descriptor.scheduling_parent(); let maybe_claim_queue = claim_queue(scheduling_parent, &mut sender).await; // Fetch the scheduling session index for validating the descriptor's @@ -300,7 +266,6 @@ where exec_kind, &metrics, maybe_claim_queue, - v3_enabled, validation_code_bomb_limit, ) .await; @@ -921,7 +886,6 @@ async fn validate_candidate_exhaustive( exec_kind: PvfExecKind, metrics: &Metrics, maybe_claim_queue: Option, - v3_enabled: bool, validation_code_bomb_limit: u32, ) -> Result { let _timer = metrics.time_validate_candidate_exhaustive(); @@ -942,7 +906,7 @@ async fn validate_candidate_exhaustive( // check is left for later when we actually can: https://github.com/paritytech/polkadot-sdk/issues/11182 // TODO: Properly check session index in the runtime: // https://github.com/paritytech/polkadot-sdk/issues/11033 - match (exec_kind, candidate_receipt.descriptor.scheduling_session(v3_enabled)) { + match (exec_kind, candidate_receipt.descriptor.scheduling_session()) { ( PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_), Some(scheduling_session), @@ -973,7 +937,6 @@ async fn validate_candidate_exhaustive( pov: pov.clone(), executor_params: executor_params.clone(), exec_timeout: pvf_exec_timeout(&executor_params, exec_kind.into()), - v3_enabled, }; let result = match exec_kind { @@ -1120,10 +1083,9 @@ async fn validate_candidate_exhaustive( return Err(ValidationFailed(error.into())); }; - if let Err(err) = committed_candidate_receipt.parse_ump_signals( - &transpose_claim_queue(claim_queue.0), - v3_enabled, - ) { + if let Err(err) = committed_candidate_receipt + .parse_ump_signals(&transpose_claim_queue(claim_queue.0)) + { gum::warn!( target: LOG_TARGET, candidate_hash = ?candidate_receipt.hash(), diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index bf8d8ee867611..cfeb686c2186a 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -554,7 +554,6 @@ fn candidate_validation_ok_is_ok(#[case] v2_descriptor: bool) { PvfExecKind::Backing(dummy_hash()), &Default::default(), Some(ClaimQueueSnapshot(cq)), - false, VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -644,7 +643,6 @@ fn invalid_session_or_ump_signals() { exec_kind, &Default::default(), Default::default(), - true, // v3_enabled VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -669,7 +667,6 @@ fn invalid_session_or_ump_signals() { exec_kind, &Default::default(), Some(Default::default()), - true, // v3_enabled VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -694,7 +691,6 @@ fn invalid_session_or_ump_signals() { exec_kind, &Default::default(), Default::default(), - true, // v3_enabled VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -728,7 +724,6 @@ fn invalid_session_or_ump_signals() { exec_kind, &Default::default(), Some(ClaimQueueSnapshot(cq.clone())), - true, // v3_enabled VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -758,7 +753,7 @@ fn invalid_session_or_ump_signals() { perform_basic_checks(&descriptor, validation_data.max_pov_size, &pov, &validation_code.hash()) .unwrap(); - assert_eq!(descriptor.version(true), CandidateDescriptorVersion::V1); + assert_eq!(descriptor.version(), CandidateDescriptorVersion::V1); let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; for exec_kind in @@ -775,7 +770,6 @@ fn invalid_session_or_ump_signals() { exec_kind, &Default::default(), Some(Default::default()), - true, // v3_enabled VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -800,7 +794,6 @@ fn invalid_session_or_ump_signals() { exec_kind, &Default::default(), Default::default(), - true, // v3_enabled VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -851,7 +844,6 @@ fn invalid_session_or_ump_signals() { exec_kind, &Default::default(), Some(ClaimQueueSnapshot(cq.clone())), - true, // v3_enabled VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -879,7 +871,6 @@ fn invalid_session_or_ump_signals() { exec_kind, &Default::default(), Some(ClaimQueueSnapshot(cq.clone())), - true, // v3_enabled VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -897,9 +888,8 @@ fn invalid_session_or_ump_signals() { #[test] /// Tests V3 candidate descriptor validation: -/// - V3 descriptor with UMP signals and v3_enabled=true is valid -/// - V3 descriptor without UMP signals and v3_enabled=true is invalid (NoUMPSignalWithV3Descriptor) -/// - V3 descriptor with v3_enabled=false is invalid (UnknownVersion) +/// - V3 descriptor with UMP signals is valid +/// - V3 descriptor without UMP signals is invalid (NoUMPSignalWithV3Descriptor) fn v3_descriptor_validation() { let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; @@ -923,10 +913,10 @@ fn v3_descriptor_validation() { scheduling_parent, ); - // Verify it's detected as V3 when v3_enabled=true - assert_eq!(descriptor.version(true), CandidateDescriptorVersion::V3); - // When v3_enabled=false, V3 descriptors (with non-zero scheduling_parent) are detected as V1 - assert_eq!(descriptor.version(false), CandidateDescriptorVersion::V1); + // Verify it's detected as V3 + assert_eq!(descriptor.version(), CandidateDescriptorVersion::V3); + // Under old rules, V3 (non-zero scheduling_parent) is detected as V1 + assert_eq!(descriptor.version_old_rules(), CandidateDescriptorVersion::V1); // Validation result WITH UMP signals (required for V3) let mut validation_result_with_signals = WasmValidationResult { @@ -974,7 +964,7 @@ fn v3_descriptor_validation() { let mut cq = BTreeMap::new(); let _ = cq.insert(CoreIndex(0), vec![ParaId::from(1_u32)].into()); - // Test 1: V3 descriptor + UMP signals + v3_enabled=true => Valid + // Test 1: V3 descriptor + UMP signals => Valid { let candidate_receipt = CandidateReceipt { descriptor: descriptor.clone(), @@ -994,7 +984,6 @@ fn v3_descriptor_validation() { PvfExecKind::Backing(dummy_hash()), &Default::default(), Some(ClaimQueueSnapshot(cq.clone())), - true, // v3_enabled VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -1002,7 +991,7 @@ fn v3_descriptor_validation() { assert_matches!(result, ValidationResult::Valid(_, _)); } - // Test 2: V3 descriptor + NO UMP signals + v3_enabled=true => Invalid + // Test 2: V3 descriptor + NO UMP signals => Invalid // (NoUMPSignalWithV3Descriptor) { let candidate_receipt = CandidateReceipt { @@ -1023,7 +1012,6 @@ fn v3_descriptor_validation() { PvfExecKind::Backing(dummy_hash()), &Default::default(), Some(ClaimQueueSnapshot(cq.clone())), - true, // v3_enabled VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -1036,42 +1024,7 @@ fn v3_descriptor_validation() { ); } - // Test 3: V3 descriptor + v3_enabled=false => Invalid (UMPSignalWithV1Descriptor) - // When v3_enabled=false, a V3 descriptor (with non-zero scheduling_parent) is detected as V1 - { - let candidate_receipt = CandidateReceipt { - descriptor: descriptor.clone(), - commitments_hash: commitments_with_signals.hash(), - }; - - let result = executor::block_on(validate_candidate_exhaustive( - 1, - MockValidateCandidateBackend::with_hardcoded_result(Ok( - validation_result_with_signals.clone() - )), - validation_data.clone(), - validation_code.clone(), - candidate_receipt, - Arc::new(pov.clone()), - ExecutorParams::default(), - PvfExecKind::Backing(dummy_hash()), - &Default::default(), - Some(ClaimQueueSnapshot(cq.clone())), - false, // v3_enabled=false: V3 descriptor detected as V1 - VALIDATION_CODE_BOMB_LIMIT, - )) - .unwrap(); - - // V3 detected as V1 when v3_enabled=false, rejected because V1 forbids UMP signals - assert_matches!( - result, - ValidationResult::Invalid(InvalidCandidate::InvalidUMPSignals( - CommittedCandidateReceiptError::UMPSignalWithV1Descriptor - )) - ); - } - - // Test 4: V3 descriptor with scheduling_session_offset > 0, mismatched expected + // Test 3: V3 descriptor with scheduling_session_offset > 0, mismatched expected // scheduling session => InvalidSessionIndex { let mut desc = descriptor.clone(); @@ -1097,7 +1050,6 @@ fn v3_descriptor_validation() { PvfExecKind::Backing(dummy_hash()), &Default::default(), Some(ClaimQueueSnapshot(cq.clone())), - true, VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -1131,7 +1083,6 @@ fn v3_descriptor_validation() { PvfExecKind::Backing(dummy_hash()), &Default::default(), Some(ClaimQueueSnapshot(cq.clone())), - true, VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -1164,7 +1115,6 @@ fn v3_descriptor_validation() { exec_kind, &Default::default(), Some(ClaimQueueSnapshot(cq.clone())), - true, VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -1216,7 +1166,6 @@ fn candidate_validation_bad_return_is_invalid() { PvfExecKind::Backing(dummy_hash()), &Default::default(), Default::default(), - true, // v3_enabled VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -1302,7 +1251,6 @@ fn candidate_validation_one_ambiguous_error_is_valid() { PvfExecKind::Approval, &Default::default(), Default::default(), - true, // v3_enabled VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -1347,7 +1295,6 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() { PvfExecKind::Approval, &Default::default(), Default::default(), - true, // v3_enabled VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -1466,7 +1413,6 @@ fn candidate_validation_retry_on_error_helper( exec_kind, &Default::default(), Default::default(), - true, // v3_enabled VALIDATION_CODE_BOMB_LIMIT, )); } @@ -1513,7 +1459,6 @@ fn candidate_validation_timeout_is_internal_error() { PvfExecKind::Backing(dummy_hash()), &Default::default(), Default::default(), - true, // v3_enabled VALIDATION_CODE_BOMB_LIMIT, )); @@ -1564,7 +1509,6 @@ fn candidate_validation_commitment_hash_mismatch_is_invalid() { PvfExecKind::Backing(dummy_hash()), &Default::default(), Default::default(), - true, // v3_enabled VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -1618,7 +1562,6 @@ fn candidate_validation_code_mismatch_is_invalid() { PvfExecKind::Backing(dummy_hash()), &Default::default(), Default::default(), - true, // v3_enabled VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -1681,7 +1624,6 @@ fn compressed_code_works() { PvfExecKind::Backing(dummy_hash()), &Default::default(), Some(Default::default()), - true, // v3_enabled VALIDATION_CODE_BOMB_LIMIT, )); diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index 95d960c00a5c6..643c92655e20e 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -44,10 +44,9 @@ use polkadot_node_subsystem_util::{ ControlledValidatorIndices, }; use polkadot_primitives::{ - node_features::FeatureIndex, slashing, BlockNumber, CandidateHash, - CandidateReceiptV2 as CandidateReceipt, CompactStatement, DisputeStatement, - DisputeStatementSet, Hash, ScrapedOnChainVotes, SessionIndex, ValidDisputeStatementKind, - ValidatorId, ValidatorIndex, + slashing, BlockNumber, CandidateHash, CandidateReceiptV2 as CandidateReceipt, CompactStatement, + DisputeStatement, DisputeStatementSet, Hash, ScrapedOnChainVotes, SessionIndex, + ValidDisputeStatementKind, ValidatorId, ValidatorIndex, }; use schnellru::{LruMap, UnlimitedCompact}; @@ -606,32 +605,11 @@ impl Initialized { for (candidate_receipt, backers) in backing_validators_per_candidate { let relay_parent = candidate_receipt.descriptor.relay_parent(); - // First, fetch session info for the message session to get node_features - let extended_session_info = match self - .runtime_info - .get_session_info_by_index(ctx.sender(), relay_parent, session) - .await - { - Ok(info) => info, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?session, - ?err, - "Could not retrieve session info from RuntimeInfo", - ); - return Ok(()); - }, - }; - - let v3_enabled = - FeatureIndex::CandidateReceiptV3.is_set(&extended_session_info.node_features); - // For V2/V3: Get scheduling session and parent from descriptor // For V1: These methods return None/relay_parent, fall back to message session let scheduling_session = - candidate_receipt.descriptor.scheduling_session(v3_enabled).unwrap_or(session); - let scheduling_parent = candidate_receipt.descriptor.scheduling_parent(v3_enabled); + candidate_receipt.descriptor.scheduling_session().unwrap_or(session); + let scheduling_parent = candidate_receipt.descriptor.scheduling_parent(); // Backing validators are from the scheduling context // Fetch session info using scheduling_parent as the runtime API context diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index eb4fa70cbb5f8..36c79f4def023 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -212,14 +212,12 @@ impl CandidateStorage { candidate_hash: CandidateHash, candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, - v3_enabled: bool, ) -> Result<(), Error> { let entry = CandidateEntry::new( candidate_hash, candidate, persisted_validation_data, CandidateState::Backed, - v3_enabled, )?; self.add_candidate_entry(entry) @@ -377,15 +375,8 @@ impl CandidateEntry { candidate_hash: CandidateHash, candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, - v3_enabled: bool, ) -> Result { - Self::new( - candidate_hash, - candidate, - persisted_validation_data, - CandidateState::Seconded, - v3_enabled, - ) + Self::new(candidate_hash, candidate, persisted_validation_data, CandidateState::Seconded) } pub fn hash(&self) -> CandidateHash { @@ -397,7 +388,6 @@ impl CandidateEntry { candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, state: CandidateState, - v3_enabled: bool, ) -> Result { let para_id = candidate.descriptor.para_id(); if persisted_validation_data.hash() != candidate.descriptor.persisted_validation_data_hash() @@ -413,7 +403,7 @@ impl CandidateEntry { } let relay_parent = candidate.descriptor.relay_parent(); - let scheduling_parent = candidate.descriptor.scheduling_parent(v3_enabled); + let scheduling_parent = candidate.descriptor.scheduling_parent(); Ok(Self { candidate_hash, diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index 5f33c37dac3a2..6cfcb8a30a643 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -18,7 +18,7 @@ use super::*; use assert_matches::assert_matches; use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations; use polkadot_primitives::{ - BlockNumber, CandidateCommitments, CandidateDescriptorV2, HeadData, Id as ParaId, + BlockNumber, CandidateCommitments, CandidateDescriptorV2, CoreIndex, HeadData, Id as ParaId, MutateDescriptorV2, }; use polkadot_primitives_test_helpers as test_helpers; @@ -122,26 +122,36 @@ impl CandidateBuilder { max_pov_size: 1_000_000, }; - let descriptor: CandidateDescriptorV2 = CandidateDescriptor { - para_id: self.para_id, - relay_parent: self.relay_parent, - collator: test_helpers::dummy_collator(), - persisted_validation_data_hash: persisted_validation_data.hash(), - pov_hash: Hash::repeat_byte(1), - erasure_root: Hash::repeat_byte(1), - signature: test_helpers::zero_collator_signature(), - para_head: self.para_head.hash(), - validation_code_hash: Hash::repeat_byte(42).into(), - } - .into(); - let descriptor = if let Some(scheduling_parent) = self.scheduling_parent { - let mut d = descriptor; - d.set_version(1); - d.set_scheduling_parent(scheduling_parent); - d + // V3 descriptors must be constructed directly (not via V1→V2 conversion) + // because the conversion puts collator bytes into reserved1, which would + // cause v3_version() to misdetect the descriptor as V1. + test_helpers::make_valid_candidate_descriptor_v3( + self.para_id, + self.relay_parent, + CoreIndex(0), + 1, + persisted_validation_data.hash(), + Hash::repeat_byte(1), + Hash::repeat_byte(42), + self.para_head.hash(), + Hash::repeat_byte(1), + scheduling_parent, + ) } else { - descriptor + let d: CandidateDescriptorV2 = CandidateDescriptor { + para_id: self.para_id, + relay_parent: self.relay_parent, + collator: test_helpers::dummy_collator(), + persisted_validation_data_hash: persisted_validation_data.hash(), + pov_hash: Hash::repeat_byte(1), + erasure_root: Hash::repeat_byte(1), + signature: test_helpers::zero_collator_signature(), + para_head: self.para_head.hash(), + validation_code_hash: Hash::repeat_byte(42).into(), + } + .into(); + d }; let candidate = CommittedCandidateReceipt { @@ -303,12 +313,11 @@ fn candidate_storage_methods() { candidate.clone(), wrong_pvd.clone(), CandidateState::Seconded, - false ), Err(CandidateEntryError::PersistedValidationDataMismatch) ); assert_matches!( - CandidateEntry::new_seconded(candidate_hash, candidate.clone(), wrong_pvd, false), + CandidateEntry::new_seconded(candidate_hash, candidate.clone(), wrong_pvd), Err(CandidateEntryError::PersistedValidationDataMismatch) ); // Zero-length cycle. @@ -319,7 +328,7 @@ fn candidate_storage_methods() { pvd.parent_head = HeadData(vec![1; 10]); candidate.descriptor.set_persisted_validation_data_hash(pvd.hash()); assert_matches!( - CandidateEntry::new_seconded(candidate_hash, candidate, pvd, false), + CandidateEntry::new_seconded(candidate_hash, candidate, pvd), Err(CandidateEntryError::ZeroLengthCycle) ); } @@ -334,7 +343,6 @@ fn candidate_storage_methods() { candidate.clone(), pvd.clone(), CandidateState::Seconded, - false, ) .unwrap(); storage.add_candidate_entry(candidate_entry.clone()).unwrap(); @@ -378,7 +386,7 @@ fn candidate_storage_methods() { assert_eq!(storage.head_data_by_hash(&parent_head_hash), None); storage - .add_pending_availability_candidate(candidate_hash, candidate.clone(), pvd, false) + .add_pending_availability_candidate(candidate_hash, candidate.clone(), pvd) .unwrap(); assert!(storage.contains(&candidate_hash)); @@ -400,7 +408,7 @@ fn candidate_storage_methods() { .build(); let candidate_hash_2 = candidate_2.hash(); let candidate_entry_2 = - CandidateEntry::new_seconded(candidate_hash_2, candidate_2, pvd_2, false).unwrap(); + CandidateEntry::new_seconded(candidate_hash_2, candidate_2, pvd_2).unwrap(); storage.add_candidate_entry(candidate_entry_2).unwrap(); assert_eq!( @@ -479,14 +487,9 @@ fn test_populate_and_check_potential() { .hrmp_watermark(relay_parent_x_info.number) .build(); let candidate_a_hash = candidate_a.hash(); - let candidate_a_entry = CandidateEntry::new( - candidate_a_hash, - candidate_a, - pvd_a.clone(), - CandidateState::Backed, - false, - ) - .unwrap(); + let candidate_a_entry = + CandidateEntry::new(candidate_a_hash, candidate_a, pvd_a.clone(), CandidateState::Backed) + .unwrap(); storage.add_candidate_entry(candidate_a_entry.clone()).unwrap(); let (pvd_b, candidate_b) = CandidateBuilder::new(para_id, relay_parent_y_info.hash) .relay_parent_number(relay_parent_y_info.number) @@ -496,8 +499,7 @@ fn test_populate_and_check_potential() { .build(); let candidate_b_hash = candidate_b.hash(); let candidate_b_entry = - CandidateEntry::new(candidate_b_hash, candidate_b, pvd_b, CandidateState::Backed, false) - .unwrap(); + CandidateEntry::new(candidate_b_hash, candidate_b, pvd_b, CandidateState::Backed).unwrap(); storage.add_candidate_entry(candidate_b_entry.clone()).unwrap(); let (pvd_c, candidate_c) = CandidateBuilder::new(para_id, relay_parent_z_info.hash) .relay_parent_number(relay_parent_z_info.number) @@ -507,8 +509,7 @@ fn test_populate_and_check_potential() { .build(); let candidate_c_hash = candidate_c.hash(); let candidate_c_entry = - CandidateEntry::new(candidate_c_hash, candidate_c, pvd_c, CandidateState::Backed, false) - .unwrap(); + CandidateEntry::new(candidate_c_hash, candidate_c, pvd_c, CandidateState::Backed).unwrap(); storage.add_candidate_entry(candidate_c_entry.clone()).unwrap(); // Candidate A doesn't adhere to the base constraints. @@ -750,7 +751,6 @@ fn test_populate_and_check_potential() { wrong_candidate_c, wrong_pvd_c, CandidateState::Backed, - false, ) .unwrap(); modified_storage.add_candidate_entry(wrong_candidate_c_entry.clone()).unwrap(); @@ -794,7 +794,6 @@ fn test_populate_and_check_potential() { wrong_candidate_c, wrong_pvd_c, CandidateState::Backed, - false, ) .unwrap(); modified_storage.add_candidate_entry(wrong_candidate_c_entry.clone()).unwrap(); @@ -833,7 +832,6 @@ fn test_populate_and_check_potential() { unconnected_candidate_c, unconnected_pvd_c, CandidateState::Backed, - false, ) .unwrap(); modified_storage @@ -880,7 +878,6 @@ fn test_populate_and_check_potential() { modified_candidate_a, modified_pvd_a, CandidateState::Backed, - false, ) .unwrap(), ) @@ -918,7 +915,6 @@ fn test_populate_and_check_potential() { wrong_candidate_c, wrong_pvd_c, CandidateState::Backed, - false, ) .unwrap(); modified_storage.add_candidate_entry(wrong_candidate_c_entry.clone()).unwrap(); @@ -1080,8 +1076,7 @@ fn test_populate_and_check_potential() { .build(); let candidate_d_hash = candidate_d.hash(); let candidate_d_entry = - CandidateEntry::new(candidate_d_hash, candidate_d, pvd_d, CandidateState::Backed, false) - .unwrap(); + CandidateEntry::new(candidate_d_hash, candidate_d, pvd_d, CandidateState::Backed).unwrap(); assert!(populate_chain_from_previous_storage(&relay_chain_scope, &scope, &storage) .can_add_candidate_as_potential(&relay_chain_scope, &candidate_d_entry) .is_ok()); @@ -1096,7 +1091,7 @@ fn test_populate_and_check_potential() { .build(); let candidate_f_hash = candidate_f.hash(); let candidate_f_entry = - CandidateEntry::new(candidate_f_hash, candidate_f, pvd_f, CandidateState::Seconded, false) + CandidateEntry::new(candidate_f_hash, candidate_f, pvd_f, CandidateState::Seconded) .unwrap(); assert!(populate_chain_from_previous_storage(&relay_chain_scope, &scope, &storage) .can_add_candidate_as_potential(&relay_chain_scope, &candidate_f_entry) @@ -1112,7 +1107,7 @@ fn test_populate_and_check_potential() { .build(); let candidate_a1_hash = candidate_a1.hash(); let candidate_a1_entry = - CandidateEntry::new(candidate_a1_hash, candidate_a1, pvd_a1, CandidateState::Backed, false) + CandidateEntry::new(candidate_a1_hash, candidate_a1, pvd_a1, CandidateState::Backed) .unwrap(); // Candidate A1 is created so that its hash is greater than the candidate A hash. assert_eq!(fork_selection_rule(&candidate_a_hash, &candidate_a1_hash), Ordering::Less); @@ -1133,14 +1128,9 @@ fn test_populate_and_check_potential() { .hrmp_watermark(relay_parent_x_info.number) .build(); let candidate_b1_hash = candidate_b1.hash(); - let candidate_b1_entry = CandidateEntry::new( - candidate_b1_hash, - candidate_b1, - pvd_b1, - CandidateState::Seconded, - false, - ) - .unwrap(); + let candidate_b1_entry = + CandidateEntry::new(candidate_b1_hash, candidate_b1, pvd_b1, CandidateState::Seconded) + .unwrap(); assert!(populate_chain_from_previous_storage(&relay_chain_scope, &scope, &storage) .can_add_candidate_as_potential(&relay_chain_scope, &candidate_b1_entry) .is_ok()); @@ -1156,7 +1146,7 @@ fn test_populate_and_check_potential() { .build(); let candidate_c1_hash = candidate_c1.hash(); let candidate_c1_entry = - CandidateEntry::new(candidate_c1_hash, candidate_c1, pvd_c1, CandidateState::Backed, false) + CandidateEntry::new(candidate_c1_hash, candidate_c1, pvd_c1, CandidateState::Backed) .unwrap(); assert!(populate_chain_from_previous_storage(&relay_chain_scope, &scope, &storage) .can_add_candidate_as_potential(&relay_chain_scope, &candidate_c1_entry) @@ -1172,14 +1162,9 @@ fn test_populate_and_check_potential() { .hrmp_watermark(relay_parent_x_info.number) .build(); let candidate_c2_hash = candidate_c2.hash(); - let candidate_c2_entry = CandidateEntry::new( - candidate_c2_hash, - candidate_c2, - pvd_c2, - CandidateState::Seconded, - false, - ) - .unwrap(); + let candidate_c2_entry = + CandidateEntry::new(candidate_c2_hash, candidate_c2, pvd_c2, CandidateState::Seconded) + .unwrap(); assert!(populate_chain_from_previous_storage(&relay_chain_scope, &scope, &storage) .can_add_candidate_as_potential(&relay_chain_scope, &candidate_c2_entry) .is_ok()); @@ -1193,14 +1178,9 @@ fn test_populate_and_check_potential() { .hrmp_watermark(relay_parent_x_info.number) .build(); let candidate_a2_hash = candidate_a2.hash(); - let candidate_a2_entry = CandidateEntry::new( - candidate_a2_hash, - candidate_a2, - pvd_a2, - CandidateState::Seconded, - false, - ) - .unwrap(); + let candidate_a2_entry = + CandidateEntry::new(candidate_a2_hash, candidate_a2, pvd_a2, CandidateState::Seconded) + .unwrap(); // Candidate A2 is created so that its hash is greater than the candidate A hash. assert_eq!(fork_selection_rule(&candidate_a2_hash, &candidate_a_hash), Ordering::Less); @@ -1219,7 +1199,7 @@ fn test_populate_and_check_potential() { .build(); let candidate_b2_hash = candidate_b2.hash(); let candidate_b2_entry = - CandidateEntry::new(candidate_b2_hash, candidate_b2, pvd_b2, CandidateState::Backed, false) + CandidateEntry::new(candidate_b2_hash, candidate_b2, pvd_b2, CandidateState::Backed) .unwrap(); assert!(populate_chain_from_previous_storage(&relay_chain_scope, &scope, &storage) .can_add_candidate_as_potential(&relay_chain_scope, &candidate_b2_entry) @@ -1280,37 +1260,27 @@ fn test_populate_and_check_potential() { let (pvd_c3, candidate_c3) = CandidateBuilder::new(para_id, relay_parent_y_info.hash) .relay_parent_number(relay_parent_y_info.number) .parent_head(vec![0xb4].into()) - .para_head(vec![0xc2].into()) + .para_head(vec![0xc3].into()) .hrmp_watermark(relay_parent_y_info.number) .build(); let candidate_c3_hash = candidate_c3.hash(); - let candidate_c3_entry = CandidateEntry::new( - candidate_c3_hash, - candidate_c3, - pvd_c3, - CandidateState::Seconded, - false, - ) - .unwrap(); + let candidate_c3_entry = + CandidateEntry::new(candidate_c3_hash, candidate_c3, pvd_c3, CandidateState::Seconded) + .unwrap(); // Candidate C4. let (pvd_c4, candidate_c4) = CandidateBuilder::new(para_id, relay_parent_y_info.hash) .relay_parent_number(relay_parent_y_info.number) .parent_head(vec![0xb4].into()) - .para_head(vec![0xc3].into()) + .para_head(vec![0xc2].into()) .hrmp_watermark(relay_parent_y_info.number) .build(); let candidate_c4_hash = candidate_c4.hash(); // C4 should have a lower candidate hash than C3. assert_eq!(fork_selection_rule(&candidate_c4_hash, &candidate_c3_hash), Ordering::Less); - let candidate_c4_entry = CandidateEntry::new( - candidate_c4_hash, - candidate_c4, - pvd_c4, - CandidateState::Seconded, - false, - ) - .unwrap(); + let candidate_c4_entry = + CandidateEntry::new(candidate_c4_hash, candidate_c4, pvd_c4, CandidateState::Seconded) + .unwrap(); let mut storage = storage.clone(); storage.add_candidate_entry(candidate_c3_entry).unwrap(); @@ -1350,14 +1320,8 @@ fn test_populate_and_check_potential() { let candidate_e_hash = candidate_e.hash(); storage .add_candidate_entry( - CandidateEntry::new( - candidate_e_hash, - candidate_e, - pvd_e, - CandidateState::Seconded, - false, - ) - .unwrap(), + CandidateEntry::new(candidate_e_hash, candidate_e, pvd_e, CandidateState::Seconded) + .unwrap(), ) .unwrap(); @@ -1511,13 +1475,8 @@ fn test_find_ancestor_path_and_find_backable_chain() { for (pvd, candidate) in candidates.iter() { storage .add_candidate_entry( - CandidateEntry::new_seconded( - candidate.hash(), - candidate.clone(), - pvd.clone(), - false, - ) - .unwrap(), + CandidateEntry::new_seconded(candidate.hash(), candidate.clone(), pvd.clone()) + .unwrap(), ) .unwrap(); } @@ -1715,14 +1674,8 @@ fn test_v3_scheduling_parent_validation() { .hrmp_watermark(relay_parent_x_info.number) .build(); let candidate_hash = candidate.hash(); - let candidate_entry = CandidateEntry::new( - candidate_hash, - candidate, - pvd, - CandidateState::Backed, - true, // v3_enabled - ) - .unwrap(); + let candidate_entry = + CandidateEntry::new(candidate_hash, candidate, pvd, CandidateState::Backed).unwrap(); let (relay_chain_scope, scope) = make_scope( relay_parent_z_info.clone(), @@ -1750,14 +1703,8 @@ fn test_v3_scheduling_parent_validation() { .hrmp_watermark(relay_parent_x_info.number) .build(); let candidate_hash = candidate.hash(); - let candidate_entry = CandidateEntry::new( - candidate_hash, - candidate, - pvd, - CandidateState::Backed, - true, // v3_enabled - ) - .unwrap(); + let candidate_entry = + CandidateEntry::new(candidate_hash, candidate, pvd, CandidateState::Backed).unwrap(); let (relay_chain_scope, scope) = make_scope( relay_parent_z_info.clone(), @@ -1784,14 +1731,8 @@ fn test_v3_scheduling_parent_validation() { .hrmp_watermark(relay_parent_x_info.number) .build(); let candidate_hash = candidate.hash(); - let candidate_entry = CandidateEntry::new( - candidate_hash, - candidate, - pvd, - CandidateState::Backed, - true, // v3_enabled - ) - .unwrap(); + let candidate_entry = + CandidateEntry::new(candidate_hash, candidate, pvd, CandidateState::Backed).unwrap(); let (relay_chain_scope, scope) = make_scope( relay_parent_z_info.clone(), @@ -1819,14 +1760,8 @@ fn test_v3_scheduling_parent_validation() { .hrmp_watermark(relay_parent_x_info.number) .build(); let candidate_hash = candidate.hash(); - let candidate_entry = CandidateEntry::new( - candidate_hash, - candidate, - pvd, - CandidateState::Backed, - true, // v3_enabled - ) - .unwrap(); + let candidate_entry = + CandidateEntry::new(candidate_hash, candidate, pvd, CandidateState::Backed).unwrap(); // Verify the entry correctly tracks both parents assert_eq!(candidate_entry.relay_parent(), relay_parent_x); diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 589b8a897d9f2..117041f486f29 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -45,14 +45,13 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::{ backing_implicit_view::BlockInfoProspectiveParachains as BlockInfo, inclusion_emulator::{Constraints, RelayChainBlockInfo}, - request_backing_constraints, request_candidates_pending_availability, request_node_features, + request_backing_constraints, request_candidates_pending_availability, request_session_index_for_child, runtime::{fetch_claim_queue, fetch_scheduling_lookahead}, }; use polkadot_primitives::{ - node_features::FeatureIndex, transpose_claim_queue, CandidateHash, - CommittedCandidateReceiptV2 as CommittedCandidateReceipt, Hash, Header, Id as ParaId, - NodeFeatures, PersistedValidationData, + transpose_claim_queue, CandidateHash, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, + Hash, Header, Id as ParaId, PersistedValidationData, }; use crate::{ @@ -78,8 +77,6 @@ struct RelayBlockViewData { // The relay chain scope containing the relay parent and its allowed ancestors. // This is shared across all paras for this relay parent. relay_chain_scope: fragment_chain::RelayChainScope, - // The node features active at this relay parent. - node_features: NodeFeatures, } struct View { @@ -239,10 +236,6 @@ async fn handle_active_leaves_update( .await .map_err(JfyiError::RuntimeApiRequestCanceled)??; - let node_features = request_node_features(hash, session_index, ctx.sender()) - .await - .await - .map_err(JfyiError::RuntimeApiRequestCanceled)??; let ancestry_len = fetch_scheduling_lookahead(hash, session_index, ctx.sender()) .await? .saturating_sub(1); @@ -283,8 +276,6 @@ async fn handle_active_leaves_update( }, }; - let v3_enabled = FeatureIndex::CandidateReceiptV3.is_set(&node_features); - let mut fragment_chains = HashMap::new(); for (para, claims_by_depth) in transposed_claim_queue.iter() { // Find constraints and pending availability candidates. @@ -319,7 +310,6 @@ async fn handle_active_leaves_update( candidate_hash, c.candidate, c.persisted_validation_data, - v3_enabled, ); match res { @@ -410,7 +400,7 @@ async fn handle_active_leaves_update( } view.per_relay_parent - .insert(hash, RelayBlockViewData { fragment_chains, relay_chain_scope, node_features }); + .insert(hash, RelayBlockViewData { fragment_chains, relay_chain_scope }); view.active_leaves.insert(hash); } @@ -563,30 +553,21 @@ async fn handle_introduce_seconded_candidate( } = request; let candidate_hash = candidate.hash(); - let candidate_relay_parent = candidate.descriptor.relay_parent(); - - // Get v3_enabled from the node_features of the candidate's relay_parent - let v3_enabled = view - .per_relay_parent - .get(&candidate_relay_parent) - .map(|rp_data| FeatureIndex::CandidateReceiptV3.is_set(&rp_data.node_features)) - .unwrap_or(false); - - let candidate_entry = - match CandidateEntry::new_seconded(candidate_hash, candidate, pvd, v3_enabled) { - Ok(candidate) => candidate, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - para_id = ?para, - "Cannot add seconded candidate: {}", - err - ); - let _ = tx.send(false); - return; - }, - }; + let candidate_entry = match CandidateEntry::new_seconded(candidate_hash, candidate, pvd) { + Ok(candidate) => candidate, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + "Cannot add seconded candidate: {}", + err + ); + + let _ = tx.send(false); + return; + }, + }; let mut added = Vec::with_capacity(view.per_relay_parent.len()); let mut para_scheduled = false; diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index 40dd204ce7f53..910e5387a4e2e 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -29,7 +29,7 @@ use polkadot_primitives::{ BackingState, CandidatePendingAvailability, Constraints, InboundHrmpLimitations, }, BlockNumber, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreIndex, HeadData, - Header, MutateDescriptorV2, NodeFeatures, PersistedValidationData, ValidationCodeHash, + Header, MutateDescriptorV2, PersistedValidationData, ValidationCodeHash, DEFAULT_SCHEDULING_LOOKAHEAD, }; use polkadot_primitives_test_helpers::make_candidate; @@ -266,15 +266,6 @@ async fn handle_leaf_activation( } ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::NodeFeatures(session_index, tx)) - ) if parent == *hash && session_index == 1 => { - tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); - } - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( @@ -2951,15 +2942,6 @@ fn uses_ancestry_only_within_session() { } ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::NodeFeatures(session_index, tx)) - ) if parent == hash && session_index == session => { - tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); - } - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( diff --git a/polkadot/node/core/provisioner/src/tests.rs b/polkadot/node/core/provisioner/src/tests.rs index c64e9534188a5..92f9cebd4593c 100644 --- a/polkadot/node/core/provisioner/src/tests.rs +++ b/polkadot/node/core/provisioner/src/tests.rs @@ -564,7 +564,7 @@ mod select_candidates { expected.sort_by_key(|c| c.candidate().descriptor.para_id()); let mut candidates_iter = expected.iter().map(|candidate| BackableCandidateRef { candidate_hash: candidate.hash(), - scheduling_parent: candidate.descriptor().scheduling_parent(true), + scheduling_parent: candidate.descriptor().scheduling_parent(), }); while let Some(from_job) = receiver.next().await { @@ -602,7 +602,7 @@ mod select_candidates { candidate_hash: candidate.hash(), scheduling_parent: candidate .descriptor() - .scheduling_parent(true), + .scheduling_parent(), }) .collect(), ) diff --git a/polkadot/node/core/pvf/common/src/execute.rs b/polkadot/node/core/pvf/common/src/execute.rs index 10288dbd51fee..3d76bba26536c 100644 --- a/polkadot/node/core/pvf/common/src/execute.rs +++ b/polkadot/node/core/pvf/common/src/execute.rs @@ -42,8 +42,6 @@ pub struct ValidationContext { pub executor_params: ExecutorParams, /// Execution timeout pub exec_timeout: Duration, - /// Whether V3 features are enabled - pub v3_enabled: bool, } impl ValidationContext { @@ -54,12 +52,12 @@ impl ValidationContext { /// Get the scheduling parent hash from the candidate descriptor pub fn scheduling_parent(&self) -> Hash { - self.candidate_receipt.descriptor.scheduling_parent(self.v3_enabled) + self.candidate_receipt.descriptor.scheduling_parent() } /// Get the candidate descriptor version pub fn descriptor_version(&self) -> CandidateDescriptorVersion { - self.candidate_receipt.descriptor.version(self.v3_enabled) + self.candidate_receipt.descriptor.version() } /// Convert to an ExecuteRequest for sending to the worker. diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index bfb1497081bb9..aff047ae5857c 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -929,7 +929,6 @@ mod tests { pov, executor_params: ExecutorParams::default(), exec_timeout: Duration::from_secs(10), - v3_enabled: false, }; ExecuteJob { @@ -1103,7 +1102,6 @@ mod tests { pov: Arc::new(PoV { block_data: BlockData(Vec::new()) }), executor_params: ExecutorParams::default(), exec_timeout: Duration::from_secs(1), - v3_enabled: false, }; let relevant_job = ExecuteJob { artifact: ArtifactPathId { @@ -1125,7 +1123,6 @@ mod tests { pov: Arc::new(PoV { block_data: BlockData(Vec::new()) }), executor_params: ExecutorParams::default(), exec_timeout: Duration::from_secs(1), - v3_enabled: false, }; let expired_job = ExecuteJob { artifact: ArtifactPathId { diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index d7cb04291ab10..a0d619f727f8d 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -1244,7 +1244,6 @@ pub(crate) mod tests { pov, executor_params: ExecutorParams::default(), exec_timeout: TEST_EXECUTION_TIMEOUT, - v3_enabled: false, } } diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index 3595fef5d423b..ab37ba7fac408 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -124,7 +124,6 @@ impl TestHost { pov: Arc::new(pov), executor_params: executor_params.clone(), exec_timeout: TEST_EXECUTION_TIMEOUT, - v3_enabled: false, }; self.host diff --git a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs index ebf23f2eed001..b1cd8f2bc7608 100644 --- a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs +++ b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs @@ -215,8 +215,8 @@ where descriptor: CandidateDescriptorV2::new( candidate.descriptor.para_id(), relay_parent, - candidate.descriptor.core_index(false).unwrap_or(CoreIndex(0)), - candidate.descriptor.session_index(false).unwrap_or(0), + candidate.descriptor.core_index().unwrap_or(CoreIndex(0)), + candidate.descriptor.session_index().unwrap_or(0), validation_data_hash, pov_hash, erasure_root, diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index 0279394c5fd4d..ca9357e388407 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -480,26 +480,8 @@ async fn distribute_collation( ) .await; - // Step 1: Extract execution relay_parent to lookup node features and get v3_enabled - let relay_parent = receipt.descriptor.relay_parent(); - let v3_enabled = match state.per_scheduling_parent.get(&relay_parent) { - Some(sp_state) => sp_state.v3_enabled, - None => { - gum::warn!( - target: LOG_TARGET, - para_id = %id, - ?relay_parent, - ?candidate_hash, - "Dropping candidate: candidate relay parent is out of our view", - ); - return Ok(()); - }, - }; - - // Step 2: Extract scheduling_parent using v3_enabled - let scheduling_parent = receipt.descriptor.scheduling_parent(v3_enabled); + let scheduling_parent = receipt.descriptor.scheduling_parent(); - // Step 3: Lookup the ACTUAL per_relay_parent state using scheduling_parent let per_scheduling_parent = match state.per_scheduling_parent.get_mut(&scheduling_parent) { Some(per_scheduling_parent) => per_scheduling_parent, None => { @@ -959,8 +941,7 @@ async fn advertise_collation( } // Get the candidate descriptor version from the receipt - let candidate_descriptor_version = - collation.receipt.descriptor.version(per_scheduling_parent.v3_enabled); + let candidate_descriptor_version = collation.receipt.descriptor.version(); gum::debug!( target: LOG_TARGET, diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index 8b08d92b66879..bb08863e01fc9 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -428,7 +428,7 @@ async fn distribute_collation_with_receipt( pov: pov.clone(), parent_head_data: HeadData(vec![1, 2, 3]), result_sender: None, - core_index: candidate.descriptor.core_index(false).unwrap(), + core_index: candidate.descriptor.core_index().unwrap(), }, ) .await; diff --git a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs index 82298ca95fab1..6f7101f0c42d9 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs @@ -86,12 +86,10 @@ pub struct FetchedCollation { impl FetchedCollation { /// Create a new `FetchedCollation` from a candidate receipt. - /// - /// Requires `v3_enabled` to correctly extract the scheduling parent from V3 descriptors. - pub fn new(receipt: &CandidateReceipt, v3_enabled: bool) -> Self { + pub fn new(receipt: &CandidateReceipt) -> Self { let descriptor = receipt.descriptor(); Self { - scheduling_parent: descriptor.scheduling_parent(v3_enabled), + scheduling_parent: descriptor.scheduling_parent(), para_id: descriptor.para_id(), candidate_hash: receipt.hash(), } @@ -182,7 +180,6 @@ pub fn fetched_collation_sanity_check( fetched: &CandidateReceipt, persisted_validation_data: &PersistedValidationData, maybe_parent_head_and_hash: Option<(HeadData, Hash)>, - v3_enabled: bool, ) -> Result<(), SecondingError> { if persisted_validation_data.hash() != fetched.descriptor().persisted_validation_data_hash() { return Err(SecondingError::PersistedValidationDataMismatch); @@ -195,7 +192,7 @@ pub fn fetched_collation_sanity_check( return Err(SecondingError::CandidateHashMismatch); } - if advertised.scheduling_parent != fetched.descriptor.scheduling_parent(v3_enabled) { + if advertised.scheduling_parent != fetched.descriptor.scheduling_parent() { return Err(SecondingError::SchedulingParentMismatch); } @@ -206,7 +203,7 @@ pub fn fetched_collation_sanity_check( // For V3 protocol advertisements, verify the fetched descriptor version matches the advertised // one. if let Some(advertised_version) = &advertised.advertised_descriptor_version { - let fetched_version = fetched.descriptor.version(v3_enabled); + let fetched_version = fetched.descriptor.version(); if advertised_version != &fetched_version { return Err(SecondingError::DescriptorVersionMismatch( *advertised_version, diff --git a/polkadot/node/network/collator-protocol/src/validator_side/error.rs b/polkadot/node/network/collator-protocol/src/validator_side/error.rs index ac3f3ad74a84a..8dc2ac36251fd 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/error.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/error.rs @@ -42,9 +42,6 @@ pub enum Error { #[error("Response receiver for claim queue request cancelled")] CancelledClaimQueue(oneshot::Canceled), - #[error("Response receiver for node features request cancelled")] - CancelledNodeFeatures(oneshot::Canceled), - #[error("No state for the relay parent")] RelayParentStateNotFound, diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 08f67a3e84e2a..799a8292a8bb8 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -165,12 +165,11 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::{ backing_implicit_view::View as ImplicitView, reputation::{ReputationAggregator, REPUTATION_CHANGE_INTERVAL}, - request_claim_queue, request_node_features, request_session_index_for_child, + request_claim_queue, request_session_index_for_child, }; use polkadot_primitives::{ - node_features, CandidateDescriptorV2, CandidateDescriptorVersion, CandidateHash, CollatorId, - CoreIndex, Hash, HeadData, Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, - SessionIndex, + CandidateDescriptorV2, CandidateDescriptorVersion, CandidateHash, CollatorId, CoreIndex, Hash, + HeadData, Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, SessionIndex, }; use super::{modify_reputation, tick_stream, LOG_TARGET}; @@ -542,7 +541,6 @@ impl RelayParentHoldOffState { /// State tracked for each scheduling parent in the implicit view. struct PerSchedulingParent { collations: Collations, - v3_enabled: bool, /// The core index assigned to this validator at this scheduling parent's block height. /// Used to look up the relevant claim queue from the leaf. current_core: CoreIndex, @@ -678,18 +676,13 @@ impl State { .filter(|fc| fc.scheduling_parent == *scheduling_parent && fc.para_id == *para_id) .count(); - let v3_enabled = self - .per_scheduling_parent - .get(scheduling_parent) - .map_or(false, |sp| sp.v3_enabled); - let blocked_from_seconding = self.blocked_from_seconding.values().fold(0, |acc, blocked_collations| { acc + blocked_collations .iter() .filter(|pc| { pc.candidate_receipt.descriptor.para_id() == *para_id && - pc.candidate_receipt.descriptor.scheduling_parent(v3_enabled) == + pc.candidate_receipt.descriptor.scheduling_parent() == *scheduling_parent }) .count() @@ -733,24 +726,23 @@ fn is_scheduling_parent_in_implicit_view<'a>( }) } -async fn construct_per_relay_parent( +async fn construct_per_scheduling_parent( sender: &mut Sender, assigned_cores: &mut HashMap, keystore: &KeystorePtr, - relay_parent: Hash, - v3_enabled: bool, + scheduling_parent: Hash, session_index: SessionIndex, ) -> Result> where Sender: CollatorProtocolSenderTrait, { - let validators = polkadot_node_subsystem_util::request_validators(relay_parent, sender) + let validators = polkadot_node_subsystem_util::request_validators(scheduling_parent, sender) .await .await .map_err(Error::CancelledActiveValidators)??; let (groups, rotation_info) = - polkadot_node_subsystem_util::request_validator_groups(relay_parent, sender) + polkadot_node_subsystem_util::request_validator_groups(scheduling_parent, sender) .await .await .map_err(Error::CancelledValidatorGroups)??; @@ -761,7 +753,7 @@ where ) { rotation_info.core_for_group(group, groups.len()) } else { - gum::trace!(target: LOG_TARGET, ?relay_parent, "Not a validator"); + gum::trace!(target: LOG_TARGET, ?scheduling_parent, "Not a validator"); return Ok(None); }; @@ -771,7 +763,7 @@ where if *entry == 1 { gum::debug!( target: LOG_TARGET, - ?relay_parent, + ?scheduling_parent, ?core_now, "Assigned to core", ); @@ -781,7 +773,6 @@ where Ok(Some(PerSchedulingParent { collations, - v3_enabled, current_core: core_now, session_index, ah_held_off_advertisements: RelayParentHoldOffState::NotStarted, @@ -1908,25 +1899,17 @@ where .await .map_err(Error::CancelledSessionIndex)??; - let node_features = request_node_features(*leaf, session_index, sender) - .await - .await - .map_err(Error::CancelledNodeFeatures)??; - - let v3_enabled = node_features::FeatureIndex::CandidateReceiptV3.is_set(&node_features); - // Fetch claim queue for this leaf (used for both construction and validation) let leaf_claim_queue = request_claim_queue(*leaf, sender) .await .await .map_err(Error::CancelledClaimQueue)??; - let Some(per_relay_parent) = construct_per_relay_parent( + let Some(per_scheduling_parent) = construct_per_scheduling_parent( sender, &mut state.assigned_cores, keystore, *leaf, - v3_enabled, session_index, ) .await? @@ -1934,7 +1917,7 @@ where continue; }; - state.per_scheduling_parent.insert(*leaf, per_relay_parent); + state.per_scheduling_parent.insert(*leaf, per_scheduling_parent); state.leaf_claim_queues.insert(*leaf, leaf_claim_queue); state @@ -1948,19 +1931,18 @@ where state.implicit_view.known_allowed_relay_parents_under(leaf).unwrap_or_default(); for block_hash in allowed_ancestry { if let Entry::Vacant(entry) = state.per_scheduling_parent.entry(*block_hash) { - // Safe to use the same v3_enabled config for the allowed relay parents as well - // as the same session index since they must be in the same session. - if let Some(per_relay_parent) = construct_per_relay_parent( + // Safe to use the same session index for the allowed scheduling parents as well + // since they must be in the same session. + if let Some(pers_scheduling_parent) = construct_per_scheduling_parent( sender, &mut state.assigned_cores, keystore, *block_hash, - v3_enabled, session_index, ) .await? { - entry.insert(per_relay_parent); + entry.insert(pers_scheduling_parent); } } } @@ -2192,9 +2174,7 @@ async fn process_msg( }; let output_head_data = receipt.commitments.head_data.clone(); let output_head_data_hash = receipt.descriptor.para_head(); - let v3_enabled = - state.per_scheduling_parent.get(&parent).map_or(false, |rp| rp.v3_enabled); - let fetched_collation = FetchedCollation::new(&receipt.to_plain(), v3_enabled); + let fetched_collation = FetchedCollation::new(&receipt.to_plain()); if let Some(CollationEvent { collator_id, pending_collation, .. }) = state.fetched_candidates.remove(&fetched_collation) { @@ -2262,9 +2242,7 @@ async fn process_msg( parent_head_data_hash: candidate_receipt.descriptor.para_head(), }); - let v3_enabled = - state.per_scheduling_parent.get(&parent).map_or(false, |rp| rp.v3_enabled); - let fetched_collation = FetchedCollation::new(&candidate_receipt, v3_enabled); + let fetched_collation = FetchedCollation::new(&candidate_receipt); let candidate_hash = fetched_collation.candidate_hash; let id = match state.fetched_candidates.entry(fetched_collation) { Entry::Occupied(entry) @@ -2606,26 +2584,25 @@ async fn kick_off_seconding( let scheduling_parent = collation_event.pending_collation.scheduling_parent; let para_id = collation_event.pending_collation.para_id; - let (v3_enabled, per_scheduling_parent) = - match state.per_scheduling_parent.get_mut(&scheduling_parent) { - Some(state) => (state.v3_enabled, state), - None => { - // Relay parent went out of view, not an error. - gum::trace!( - target: LOG_TARGET, - relay_parent = ?scheduling_parent, - "Fetched collation for a parent out of view", - ); - return Ok(false); - }, - }; + let per_scheduling_parent = match state.per_scheduling_parent.get_mut(&scheduling_parent) { + Some(state) => state, + None => { + // Relay parent went out of view, not an error. + gum::trace!( + target: LOG_TARGET, + relay_parent = ?scheduling_parent, + "Fetched collation for a parent out of view", + ); + return Ok(false); + }, + }; // Sanity check of the candidate receipt version. descriptor_version_sanity_check(candidate_receipt.descriptor(), per_scheduling_parent)?; let collations = &mut per_scheduling_parent.collations; - let fetched_collation = FetchedCollation::new(&candidate_receipt, v3_enabled); + let fetched_collation = FetchedCollation::new(&candidate_receipt); if let Entry::Vacant(entry) = state.fetched_candidates.entry(fetched_collation) { collation_event.pending_collation.commitments_hash = Some(candidate_receipt.commitments_hash); @@ -2708,7 +2685,6 @@ async fn kick_off_seconding( &candidate_receipt, &pvd, maybe_parent_head.and_then(|head| maybe_parent_head_hash.map(|hash| (head, hash))), - v3_enabled, )?; ctx.send_message(CandidateBackingMessage::Second { @@ -3029,20 +3005,19 @@ fn get_next_collation_to_fetch( // Sanity check the candidate descriptor version using individual parameters. pub fn descriptor_version_sanity_check_with_params( descriptor: &CandidateDescriptorV2, - v3_enabled: bool, expected_core: CoreIndex, expected_session: SessionIndex, ) -> std::result::Result<(), SecondingError> { - match descriptor.version(v3_enabled) { + match descriptor.version() { CandidateDescriptorVersion::V1 => Ok(()), CandidateDescriptorVersion::V2 | CandidateDescriptorVersion::V3 => { - if let Some(core_index) = descriptor.core_index(v3_enabled) { + if let Some(core_index) = descriptor.core_index() { if core_index != expected_core { return Err(SecondingError::InvalidCoreIndex(core_index.0, expected_core.0)); } } - if let Some(session_index) = descriptor.session_index(v3_enabled) { + if let Some(session_index) = descriptor.session_index() { if session_index != expected_session { return Err(SecondingError::InvalidSessionIndex( session_index, @@ -3064,7 +3039,6 @@ fn descriptor_version_sanity_check( ) -> std::result::Result<(), SecondingError> { descriptor_version_sanity_check_with_params( descriptor, - per_scheduling_parent.v3_enabled, per_scheduling_parent.current_core, per_scheduling_parent.session_index, ) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs index 32d5c49721732..8385306a98893 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -41,9 +41,8 @@ use polkadot_node_subsystem::messages::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::{reputation::add_reputation, TimeoutExt}; use polkadot_primitives::{ - node_features, CandidateReceiptV2 as CandidateReceipt, CollatorPair, CoreIndex, - GroupRotationInfo, HeadData, NodeFeatures, PersistedValidationData, ValidatorId, - ValidatorIndex, + CandidateReceiptV2 as CandidateReceipt, CollatorPair, CoreIndex, GroupRotationInfo, HeadData, + PersistedValidationData, ValidatorId, ValidatorIndex, }; use polkadot_primitives_test_helpers::{dummy_candidate_receipt_bad_sig, dummy_hash}; @@ -72,7 +71,6 @@ struct TestState { group_rotation_info: GroupRotationInfo, claim_queue: BTreeMap>, scheduling_lookahead: u32, - node_features: NodeFeatures, session_index: SessionIndex, // Used by `update_view` to keep track of latest requested ancestor last_known_block: Option, @@ -117,10 +115,6 @@ impl Default for TestState { .collect(), ); - let mut node_features = NodeFeatures::EMPTY; - node_features.resize(node_features::FeatureIndex::CandidateReceiptV2 as usize + 1, false); - node_features.set(node_features::FeatureIndex::CandidateReceiptV2 as u8 as usize, true); - Self { chain_ids: Self::CHAIN_IDS.map(|id| ParaId::from(id)).to_vec(), relay_parent, @@ -130,7 +124,6 @@ impl Default for TestState { group_rotation_info, claim_queue, scheduling_lookahead, - node_features, session_index: 1, last_known_block: None, } diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index a8c9555ea5111..3bfe5aa735f84 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -98,16 +98,6 @@ pub(super) async fn update_view( } ); - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::NodeFeatures(_, tx) - )) => { - tx.send(Ok(test_state.node_features.clone())).unwrap(); - } - ); - // handle_our_view_change fetches claim queue for the leaf // (stored in leaf_claim_queues for the new offset-based validation) assert_matches!( @@ -1802,24 +1792,11 @@ fn child_blocked_from_seconding_by_parent(#[case] valid_parent: bool) { } #[rstest] -#[case(true, false)] // V3 enabled, not crafted -#[case(false, false)] // V3 disabled, not crafted (detected as V1) -#[case(false, true)] // V3 disabled, crafted with non-zero reserved (detected as Unknown) -fn v3_descriptor(#[case] v3_feature_enabled: bool, #[case] crafted_unknown: bool) { +#[case(false)] // V3 descriptor accepted (version detection is now self-contained) +#[case(true)] // Unknown version descriptor rejected +fn v3_descriptor(#[case] crafted_unknown: bool) { let mut test_state = TestState::default(); - if v3_feature_enabled { - // Enable V3 feature for case_1 - test_state - .node_features - .resize(node_features::FeatureIndex::CandidateReceiptV3 as usize + 1, false); - test_state - .node_features - .set(node_features::FeatureIndex::CandidateReceiptV3 as u8 as usize, true); - } else { - test_state.node_features = NodeFeatures::EMPTY; - } - test_harness(ReputationAggregator::new(|_| true), HashSet::new(), |test_harness| async move { let TestHarness { mut virtual_overseer, keystore } = test_harness; @@ -1851,18 +1828,12 @@ fn v3_descriptor(#[case] v3_feature_enabled: bool, #[case] crafted_unknown: bool committed_candidate.descriptor.set_session_index(test_state.session_index); if crafted_unknown { - // Case 3: Create a crafted descriptor that will be detected as Unknown when - // v3_enabled=false. Set version field to 1 but keep scheduling_parent as zero. - // Since scheduling_parent is zero, old_v1_detected doesn't trigger (no backward - // compat). Then v2_version() checks the version field: version=1 is not recognized - // when v3_enabled=false (only version=0 is valid), so it returns Unknown. - committed_candidate.descriptor.set_version(1); - // Don't set scheduling_parent - keep it as default (zero) + // Create a descriptor with an unrecognized version field (version=2). + // version=0 is V2, version=1 is V3, anything else is Unknown. + committed_candidate.descriptor.set_version(2); } else { - // Cases 1 & 2: Normal V3 descriptor - // Make it a V3 descriptor by setting version field to 1 + // Normal V3 descriptor: version=1 with scheduling_parent set committed_candidate.descriptor.set_version(1); - // Set scheduling_parent to head_b (which is in active leaves) committed_candidate.descriptor.set_scheduling_parent(head_b); } @@ -1909,8 +1880,7 @@ fn v3_descriptor(#[case] v3_feature_enabled: bool, #[case] crafted_unknown: bool .expect("Sending response should succeed"); if crafted_unknown { - // Case 3: V3 disabled with crafted descriptor (zero reserved fields, non-zero version) - // Should be rejected as Unknown version + // Unknown version descriptor should be rejected assert_matches!( overseer_recv(&mut virtual_overseer).await, AllMessages::NetworkBridgeTx( @@ -1920,26 +1890,8 @@ fn v3_descriptor(#[case] v3_feature_enabled: bool, #[case] crafted_unknown: bool assert_eq!(rep.value, COST_REPORT_BAD.cost_or_benefit()); } ); - } else if v3_feature_enabled { - // Case 1: V3 is enabled, descriptor should be detected as V3 and accepted - assert_candidate_backing_second( - &mut virtual_overseer, - head_b, - test_state.chain_ids[0], - &pov, - CollationVersion::V2, - ) - .await; - - send_seconded_statement(&mut virtual_overseer, keystore.clone(), &committed_candidate) - .await; - - assert_collation_seconded(&mut virtual_overseer, head_b, peer_a, CollationVersion::V2) - .await; } else { - // Case 2: V3 is disabled, a real V3 descriptor (with non-zero scheduling_parent) - // should be detected as V1 due to backwards compatibility. - // The old reserved fields have non-zero values, which triggers old_v1_detected. + // V3 descriptor accepted by collator-protocol (V3 gating is done in backing) assert_candidate_backing_second( &mut virtual_overseer, head_b, diff --git a/polkadot/node/network/collator-protocol/src/validator_side_experimental/collation_manager/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side_experimental/collation_manager/mod.rs index a3ac82a74a779..0e67e47bb7da3 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side_experimental/collation_manager/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side_experimental/collation_manager/mod.rs @@ -480,7 +480,6 @@ impl CollationManager { // Sanity check of the candidate receipt version. if let Err(err) = descriptor_version_sanity_check_with_params( fetched_collation.candidate_receipt.descriptor(), - false, // v3_enabled - experimental module doesn't support V3 yet per_rp.core_index, per_rp.session_index, ) { diff --git a/polkadot/node/network/collator-protocol/src/validator_side_experimental/peer_manager/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side_experimental/peer_manager/mod.rs index 27c3d9171f63e..cd4e9af8d1589 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side_experimental/peer_manager/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side_experimental/peer_manager/mod.rs @@ -517,7 +517,7 @@ async fn extract_reputation_bumps_on_new_finalized_block false, _ => true, }; diff --git a/polkadot/node/network/collator-protocol/src/validator_side_experimental/tests.rs b/polkadot/node/network/collator-protocol/src/validator_side_experimental/tests.rs index cfbc4b3c5fcff..8ec7897dbeb14 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side_experimental/tests.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side_experimental/tests.rs @@ -46,10 +46,9 @@ use polkadot_node_subsystem::messages::{ use polkadot_node_subsystem_test_helpers::{mock::new_leaf, sender_receiver, TestSubsystemSender}; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::{ - node_features::FeatureIndex, ApprovedPeerId, BlockNumber, - CandidateReceiptV2 as CandidateReceipt, + ApprovedPeerId, BlockNumber, CandidateReceiptV2 as CandidateReceipt, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreIndex, GroupRotationInfo, Hash, - HeadData, Header, Id as ParaId, MutateDescriptorV2, NodeFeatures, OccupiedCoreAssumption, + HeadData, Header, Id as ParaId, MutateDescriptorV2, OccupiedCoreAssumption, PersistedValidationData, SessionIndex, SigningContext, UMPSignal, ValidatorId, ValidatorIndex, UMP_SEPARATOR, }; @@ -399,16 +398,6 @@ impl TestState { ))) .unwrap(); }, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - rp, - RuntimeApiRequest::NodeFeatures(s_index, tx), - )) => { - let session_index = self.rp_info.get(&rp).unwrap().session_index; - assert_eq!(session_index, s_index); - let mut node_features = NodeFeatures::EMPTY; - node_features.resize(FeatureIndex::FirstUnassigned as usize, false); - tx.send(Ok(node_features)).unwrap(); - }, AllMessages::RuntimeApi(RuntimeApiMessage::Request( rp, RuntimeApiRequest::ClaimQueue(tx), @@ -2372,9 +2361,9 @@ async fn test_collation_response_out_of_view() { } // TODO(https://github.com/paritytech/polkadot-sdk/issues/10883?issue=paritytech%7Cpolkadot-sdk%7C11084): Add -// test_v3_descriptor_without_feature_enabled — verify V3 descriptors are rejected when v3_enabled -// is false. The previous test_v2_descriptor_without_feature_enabled was removed because V2 is now -// always enabled. +// test for V3 descriptor validation. The previous test_v2_descriptor_without_feature_enabled was +// removed because V2 is now always enabled. The v3_enabled parameter has been removed from +// descriptor methods. #[rstest] #[tokio::test] diff --git a/polkadot/node/network/dispute-distribution/src/receiver/mod.rs b/polkadot/node/network/dispute-distribution/src/receiver/mod.rs index 8d8c618d1d61a..686b9f4f94da0 100644 --- a/polkadot/node/network/dispute-distribution/src/receiver/mod.rs +++ b/polkadot/node/network/dispute-distribution/src/receiver/mod.rs @@ -44,7 +44,6 @@ use polkadot_node_subsystem::{ overseer, }; use polkadot_node_subsystem_util::{runtime, runtime::RuntimeInfo}; -use polkadot_primitives::node_features::FeatureIndex; use crate::{ metrics::{FAILED, SUCCEEDED}, @@ -326,19 +325,8 @@ where let IncomingRequest { peer, payload, pending_response } = incoming; // For disputes, we need session info from the scheduling context - // First get a reference relay parent to fetch node features - let relay_parent = payload.0.candidate_receipt.descriptor.relay_parent(); - - let session_info_for_features = self - .runtime - .get_session_info_by_index(&mut self.sender, relay_parent, payload.0.session_index) - .await?; - let v3_enabled = - FeatureIndex::CandidateReceiptV3.is_set(&session_info_for_features.node_features); - // Use scheduling_parent to fetch the session info for dispute validators - let scheduling_parent = - payload.0.candidate_receipt.descriptor.scheduling_parent(v3_enabled); + let scheduling_parent = payload.0.candidate_receipt.descriptor.scheduling_parent(); let info = self .runtime diff --git a/polkadot/node/network/dispute-distribution/src/sender/send_task.rs b/polkadot/node/network/dispute-distribution/src/sender/send_task.rs index d7941f1dad292..e9a1a405789d7 100644 --- a/polkadot/node/network/dispute-distribution/src/sender/send_task.rs +++ b/polkadot/node/network/dispute-distribution/src/sender/send_task.rs @@ -29,8 +29,7 @@ use polkadot_node_network_protocol::{ use polkadot_node_subsystem::{messages::NetworkBridgeTxMessage, overseer}; use polkadot_node_subsystem_util::{metrics, nesting_sender::NestingSender, runtime::RuntimeInfo}; use polkadot_primitives::{ - node_features::FeatureIndex, AuthorityDiscoveryId, CandidateHash, Hash, SessionIndex, - ValidatorIndex, + AuthorityDiscoveryId, CandidateHash, Hash, SessionIndex, ValidatorIndex, }; use super::error::{FatalError, Result}; @@ -236,19 +235,8 @@ impl SendTask { active_sessions: &HashMap, ) -> Result> { // For disputes, we need session info from the scheduling context - // First get a reference relay parent to fetch node features - let relay_parent = self.request.0.candidate_receipt.descriptor.relay_parent(); - - // Get node features to determine v3_enabled - let session_info_for_features = runtime - .get_session_info_by_index(ctx.sender(), relay_parent, self.request.0.session_index) - .await?; - let v3_enabled = - FeatureIndex::CandidateReceiptV3.is_set(&session_info_for_features.node_features); - // Use scheduling_parent to fetch the session info for dispute validators - let scheduling_parent = - self.request.0.candidate_receipt.descriptor.scheduling_parent(v3_enabled); + let scheduling_parent = self.request.0.candidate_receipt.descriptor.scheduling_parent(); // Retrieve all authorities which participated in the parachain consensus of the session // in which the candidate was backed (scheduling session). diff --git a/polkadot/node/network/statement-distribution/src/error.rs b/polkadot/node/network/statement-distribution/src/error.rs index a4307a497398d..ee37db9afdae8 100644 --- a/polkadot/node/network/statement-distribution/src/error.rs +++ b/polkadot/node/network/statement-distribution/src/error.rs @@ -80,9 +80,6 @@ pub enum Error { #[error("Fetching minimum backing votes failed {0:?}")] FetchMinimumBackingVotes(RuntimeApiError), - #[error("Fetching node features failed {0:?}")] - FetchNodeFeatures(RuntimeApiError), - #[error("Attempted to share statement when not a validator or not assigned")] InvalidShare, diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 44344676d138f..228eeff9d289f 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -44,13 +44,13 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util::{ backing_implicit_view::View as ImplicitView, reputation::ReputationAggregator, - request_min_backing_votes, request_node_features, runtime::ClaimQueueSnapshot, + request_min_backing_votes, runtime::ClaimQueueSnapshot, }; use polkadot_primitives::{ - node_features::FeatureIndex, transpose_claim_queue, AuthorityDiscoveryId, CandidateHash, - CompactStatement, CoreIndex, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, IndexedVec, - NodeFeatures, SessionIndex, SessionInfo, SignedStatement, SigningContext, TransposedClaimQueue, - UncheckedSignedStatement, ValidatorId, ValidatorIndex, + transpose_claim_queue, AuthorityDiscoveryId, CandidateHash, CompactStatement, CoreIndex, + GroupIndex, GroupRotationInfo, Hash, Id as ParaId, IndexedVec, SessionIndex, SessionInfo, + SignedStatement, SigningContext, TransposedClaimQueue, UncheckedSignedStatement, ValidatorId, + ValidatorIndex, }; use sp_keystore::KeystorePtr; @@ -222,17 +222,10 @@ struct PerSessionState { // getting the topology from the gossip-support subsystem grid_view: Option, local_validator: Option, - // Node features for this session - node_features: NodeFeatures, } impl PerSessionState { - fn new( - session_info: SessionInfo, - keystore: &KeystorePtr, - backing_threshold: u32, - node_features: NodeFeatures, - ) -> Self { + fn new(session_info: SessionInfo, keystore: &KeystorePtr, backing_threshold: u32) -> Self { let groups = Groups::new(session_info.validator_groups.clone(), backing_threshold); let mut authority_lookup = HashMap::new(); for (i, ad) in session_info.discovery_keys.iter().cloned().enumerate() { @@ -245,14 +238,7 @@ impl PerSessionState { ) .map(|(_, index)| LocalValidatorIndex::Active(index)); - PerSessionState { - session_info, - groups, - authority_lookup, - grid_view: None, - local_validator, - node_features, - } + PerSessionState { session_info, groups, authority_lookup, grid_view: None, local_validator } } fn supply_topology( @@ -288,11 +274,6 @@ impl PerSessionState { fn is_not_validator(&self) -> bool { self.grid_view.is_some() && self.local_validator.is_none() } - - /// Returns `true` if v3 candidate receipts are enabled - fn v3_enabled(&self) -> bool { - FeatureIndex::CandidateReceiptV3.is_set(&self.node_features) - } } pub(crate) struct State { @@ -602,17 +583,8 @@ async fn handle_active_leaf_update( .await .map_err(JfyiError::RuntimeApiUnavailable)? .map_err(JfyiError::FetchMinimumBackingVotes)?; - let node_features = request_node_features(new_relay_parent, session_index, ctx.sender()) - .await - .await - .map_err(JfyiError::RuntimeApiUnavailable)? - .map_err(JfyiError::FetchNodeFeatures)?; - let mut per_session_state = PerSessionState::new( - session_info, - &state.keystore, - minimum_backing_votes, - node_features, - ); + let mut per_session_state = + PerSessionState::new(session_info, &state.keystore, minimum_backing_votes); if let Some(topology) = state.unused_topologies.remove(&session_index) { per_session_state.supply_topology(&topology.topology, topology.local_index); } @@ -2165,17 +2137,11 @@ async fn fragment_chain_update_inner( { let confirmed_candidate = state.candidates.get_confirmed(&candidate_hash); - // Get the session for the relay parent to determine v3_enabled. - // We need this to correctly extract the scheduling_parent from the descriptor. - let relay_parent = receipt.descriptor.relay_parent(); - let session_via_relay_parent = - state.per_scheduling_parent.get(&relay_parent).map(|rp| rp.session); - - let per_session = - session_via_relay_parent.and_then(|session| state.per_session.get(&session)); - let v3_enabled = per_session.map_or(false, |ps| ps.v3_enabled()); - - let scheduling_parent = receipt.descriptor.scheduling_parent(v3_enabled); + let scheduling_parent = receipt.descriptor.scheduling_parent(); + let per_session = state + .per_scheduling_parent + .get(&scheduling_parent) + .and_then(|p| state.per_session.get(&p.session)); let prs = state.per_scheduling_parent.get_mut(&scheduling_parent); if let (Some(confirmed), Some(prs), Some(per_session)) = @@ -3023,7 +2989,6 @@ pub(crate) async fn handle_response( }, disabled_mask, &scheduling_parent_state.transposed_cq, - per_session.v3_enabled(), ); for (peer, rep) in res.reputation_changes { diff --git a/polkadot/node/network/statement-distribution/src/v2/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs index c8a90f3f5cfd2..09f336d241414 100644 --- a/polkadot/node/network/statement-distribution/src/v2/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs @@ -568,7 +568,6 @@ impl UnhandledResponse { allowed_para_lookup: impl Fn(ParaId, GroupIndex) -> bool, disabled_mask: BitVec, transposed_cq: &TransposedClaimQueue, - v3_enabled: bool, ) -> ResponseValidationOutput { let UnhandledResponse { response: TaggedResponse { identifier, requested_peer, props, response }, @@ -655,7 +654,6 @@ impl UnhandledResponse { allowed_para_lookup, disabled_mask, transposed_cq, - v3_enabled, ); if let CandidateRequestStatus::Complete { .. } = output.request_status { @@ -677,7 +675,6 @@ fn validate_complete_response( allowed_para_lookup: impl Fn(ParaId, GroupIndex) -> bool, disabled_mask: BitVec, transposed_cq: &TransposedClaimQueue, - v3_enabled: bool, ) -> ResponseValidationOutput { let RequestProperties { backing_threshold, mut unwanted_mask } = props; @@ -730,7 +727,7 @@ fn validate_complete_response( let candidate_hash = response.candidate_receipt.hash(); // Validate the ump signals. - if let Err(err) = response.candidate_receipt.parse_ump_signals(transposed_cq, v3_enabled) { + if let Err(err) = response.candidate_receipt.parse_ump_signals(transposed_cq) { gum::debug!( target: LOG_TARGET, ?candidate_hash, @@ -743,8 +740,7 @@ fn validate_complete_response( // Check if `session_index` of relay parent matches candidate descriptor // `session_index`. - if let Some(candidate_session_index) = - response.candidate_receipt.descriptor.session_index(v3_enabled) + if let Some(candidate_session_index) = response.candidate_receipt.descriptor.session_index() { if candidate_session_index != session { gum::debug!( @@ -1133,7 +1129,6 @@ mod tests { allowed_para_lookup, disabled_mask.clone(), &Default::default(), - false, ); assert_eq!( output, @@ -1174,7 +1169,6 @@ mod tests { allowed_para_lookup, disabled_mask, &Default::default(), - false, ); assert_eq!( output, @@ -1260,7 +1254,6 @@ mod tests { allowed_para_lookup, disabled_mask, &Default::default(), - false, ); assert_eq!( output, @@ -1343,7 +1336,6 @@ mod tests { allowed_para_lookup, disabled_mask, &Default::default(), - false, ); assert_eq!( output, @@ -1483,7 +1475,6 @@ mod tests { allowed_para_lookup, disabled_mask.clone(), &Default::default(), - false, ); // First request served successfully diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index 1a563146b65fe..d3a9b63c11dea 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -63,6 +63,12 @@ fn cluster_peer_allowed_to_send_incomplete_statements(#[case] use_v3_descriptor: if use_v3_descriptor { candidate.descriptor.set_version(1); candidate.descriptor.set_scheduling_parent(relay_parent); + // V3 descriptors require UMP signals. + candidate.commitments.upward_messages.force_push(UMP_SEPARATOR); + candidate + .commitments + .upward_messages + .force_push(UMPSignal::SelectCore(CoreSelector(0), ClaimQueueOffset(0)).encode()); } let candidate_hash = candidate.hash(); diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 5fc012eec40d1..d712cd5e0bc08 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -64,7 +64,7 @@ pub use v9::{ UncheckedSignedAvailabilityBitfields, UncheckedSignedStatement, UpgradeGoAhead, UpgradeRestriction, UpwardMessage, ValidDisputeStatementKind, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, - ValidityError, ASSIGNMENT_KEY_TYPE_ID, DEFAULT_CLAIM_QUEUE_OFFSET, + ValidityError, VersionCheckError, ASSIGNMENT_KEY_TYPE_ID, DEFAULT_CLAIM_QUEUE_OFFSET, DEFAULT_SCHEDULING_LOOKAHEAD, LEGACY_MIN_BACKING_VOTES, LOWEST_PUBLIC_ID, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, MIN_CODE_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, PARACHAINS_INHERENT_IDENTIFIER, PARACHAIN_KEY_TYPE_ID, diff --git a/polkadot/primitives/src/v9/mod.rs b/polkadot/primitives/src/v9/mod.rs index c3845270d906c..312e32f1d30f6 100644 --- a/polkadot/primitives/src/v9/mod.rs +++ b/polkadot/primitives/src/v9/mod.rs @@ -1852,6 +1852,27 @@ pub enum CandidateDescriptorVersion { Unknown, } +/// Error returned by [`CandidateDescriptorV2::check_version_acceptance`]. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum VersionCheckError { + /// Old-style and new-style version detection disagree, and this is not the + /// expected V3 disagreement (old rules → V1, new rules → V3) with V3 enabled. + Inconsistency, + /// The descriptor is V3 but the V3 feature is not enabled. + V3NotEnabled, +} + +impl core::fmt::Display for VersionCheckError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + Self::Inconsistency => { + write!(f, "Descriptor version detection inconsistency (old vs new rules disagree)") + }, + Self::V3NotEnabled => write!(f, "V3 candidate descriptor but V3 feature not enabled"), + } + } +} + /// A unique descriptor of the candidate receipt. #[derive(PartialEq, Eq, Clone, Encode, Decode, DecodeWithMemTracking, TypeInfo)] pub struct CandidateDescriptorV2 { @@ -1947,17 +1968,71 @@ impl> CandidateDescriptorV2 { /// /// # Arguments /// - /// * `v3_enabled` - Whether the V3 candidate descriptor version is enabled - /// via node features. When `true`, the function will properly detect and - /// return V3 descriptors. When `false`, the function preserves pre-V3 - /// behavior for backwards compatibility - see explanation above. - pub fn version(&self, v3_enabled: bool) -> CandidateDescriptorVersion { - if v3_enabled { - self.v3_version() - } else { - // Preserve pre v3 behavior exactly: - self.v2_version() + /// Detect the version of the candidate descriptor. + /// + /// Always uses the relaxed (v3-capable) detection logic. This means + /// version detection is self-contained and does not require knowing + /// whether the V3 node feature is enabled. + /// + /// The safety invariant is maintained by the runtime and backing + /// subsystem: they reject candidates where `version()` and + /// `version_old_rules()` disagree when V3 is not yet enabled, and + /// reject V3 candidates outright when V3 is not enabled. This ensures + /// that any on-chain candidate has an unambiguous version, so approval + /// checkers and dispute participants never need to look up node features. + pub fn version(&self) -> CandidateDescriptorVersion { + self.v3_version() + } + + /// Detect the version using the pre-V3 (stricter) rules. + /// + /// Under these rules, all reserved fields, `scheduling_parent`, and + /// `scheduling_session_offset` must be zero for a descriptor to be + /// considered V2. Any non-zero value in those fields causes V1 + /// detection. V3 descriptors appear as V1 under these rules. + /// + /// Used together with `version()` in consistency checks: if the two + /// methods disagree, the candidate is ambiguous and must be rejected + /// when V3 is not enabled. + pub fn version_old_rules(&self) -> CandidateDescriptorVersion { + self.v2_version() + } + + /// Returns `true` if the old-style and new-style version detection agree. + /// + /// When V3 is not enabled, both runtime and backing must reject candidates + /// where this returns `false`, preventing ambiguous candidates from landing + /// on-chain. Once V3 is enabled, disagreement is expected for V3 candidates + /// (old rules see V1, new rules see V3) and this check is skipped. + pub fn check_version_consistency(&self) -> bool { + self.version() == self.version_old_rules() + } + + /// Validates that the descriptor version is acceptable given whether V3 is enabled. + /// + /// This is the single source of truth for version gating logic, used by both + /// the runtime (`check_descriptor_version_and_signals`) and the backing subsystem. + /// + /// Checks two things: + /// 1. Old-style and new-style version detection must agree, unless the candidate is V3 and V3 + /// is enabled (the expected disagreement: old rules see V1, new rules see V3). + /// 2. V3 candidates are rejected when V3 is not enabled. + pub fn check_version_acceptance(&self, v3_enabled: bool) -> Result<(), VersionCheckError> { + let version = self.version(); + + // Version consistency: old and new detection must agree, unless this is the + // expected V3 disagreement (old rules → V1, new rules → V3) with V3 enabled. + let is_expected_v3_disagreement = version == CandidateDescriptorVersion::V3 && v3_enabled; + if !self.check_version_consistency() && !is_expected_v3_disagreement { + return Err(VersionCheckError::Inconsistency); } + + // V3 gating: reject V3 candidates before the feature is enabled. + if version == CandidateDescriptorVersion::V3 && !v3_enabled { + return Err(VersionCheckError::V3NotEnabled); + } + + Ok(()) } fn v2_version(&self) -> CandidateDescriptorVersion { @@ -2038,11 +2113,9 @@ impl> CandidateDescriptorV2 { } /// Returns the collator id if this is a v1 `CandidateDescriptor` - /// - /// Note: This method assumes v3_enabled = false and is only for test code. #[cfg(feature = "test")] pub fn collator(&self) -> Option { - if self.version(false) == CandidateDescriptorVersion::V1 { + if self.version() == CandidateDescriptorVersion::V1 { Some(self.rebuild_collator_field()) } else { None @@ -2072,11 +2145,9 @@ impl> CandidateDescriptorV2 { } /// Returns the collator signature of `V1` candidate descriptors, `None` otherwise. - /// - /// Note: This method assumes v3_enabled = false and is only for test code. #[cfg(feature = "test")] pub fn signature(&self) -> Option { - if self.version(false) == CandidateDescriptorVersion::V1 { + if self.version() == CandidateDescriptorVersion::V1 { return Some(self.rebuild_signature_field()); } @@ -2084,8 +2155,8 @@ impl> CandidateDescriptorV2 { } /// Returns the `core_index` of `V2` and `V3` candidate descriptors, `None` for `V1`. - pub fn core_index(&self, v3_enabled: bool) -> Option { - if self.version(v3_enabled) == CandidateDescriptorVersion::V1 { + pub fn core_index(&self) -> Option { + if self.version() == CandidateDescriptorVersion::V1 { return None; } @@ -2093,8 +2164,8 @@ impl> CandidateDescriptorV2 { } /// Returns the `session_index` of `V2` and `V3` candidate descriptors, `None` for `V1`. - pub fn session_index(&self, v3_enabled: bool) -> Option { - if self.version(v3_enabled) == CandidateDescriptorVersion::V1 { + pub fn session_index(&self) -> Option { + if self.version() == CandidateDescriptorVersion::V1 { return None; } @@ -2106,8 +2177,8 @@ impl> CandidateDescriptorV2 { /// /// On v1 and v2 this function will return the relay parent as under these versions the relay /// parent is also the scheduling parent. - pub fn scheduling_parent(&self, v3_enabled: bool) -> H { - match self.version(v3_enabled) { + pub fn scheduling_parent(&self) -> H { + match self.version() { CandidateDescriptorVersion::V1 => self.relay_parent, CandidateDescriptorVersion::V2 => self.relay_parent, CandidateDescriptorVersion::V3 => self.scheduling_parent, @@ -2121,8 +2192,8 @@ impl> CandidateDescriptorV2 { /// On v1: Return None. /// On v2: Return the session index as it equals the scheduling session on v2. /// On v3: Return the provided scheduling session index. - pub fn scheduling_session(&self, v3_enabled: bool) -> Option { - match self.version(v3_enabled) { + pub fn scheduling_session(&self) -> Option { + match self.version() { CandidateDescriptorVersion::V1 => None, CandidateDescriptorVersion::V2 => Some(self.session_index), CandidateDescriptorVersion::V3 => { @@ -2664,15 +2735,18 @@ impl> CommittedCandidateReceiptV2 { /// Params: /// - `cores_per_para` is a claim queue snapshot at the candidate's relay parent, stored as /// a mapping between `ParaId` and the cores assigned per depth. - /// - `v3_enabled` - whether V3 candidate descriptors are enabled via node features. + /// + /// NOTE: This must only be called in the runtime and backing - never in approval voting nor + /// disputes! At least not as long as nodes exist which don't understand v3 candidate + /// descriptors. Not checking there is fine, because it is checked by the runtime - if it can be + /// disputed, it has been checked already! pub fn parse_ump_signals( &self, cores_per_para: &TransposedClaimQueue, - v3_enabled: bool, ) -> Result { let signals = self.commitments.ump_signals()?; - match self.descriptor.version(v3_enabled) { + match self.descriptor.version() { CandidateDescriptorVersion::V1 => { // If the parachain runtime started sending ump signals, v1 descriptors are no // longer allowed. @@ -2689,7 +2763,8 @@ impl> CommittedCandidateReceiptV2 { }, _ if signals.is_empty() => { // V3 and above require UMP signals. - // This is technically changed behavior, but can't be triggered without v3 enabled. + // This is technically changed behavior, but this is fine as it is only checked in + // the runtime and in backing! return Err(CommittedCandidateReceiptError::NoUMPSignalWithV3Descriptor); }, _ => {}, @@ -3116,4 +3191,131 @@ pub mod tests { assert!(zero_b.leading_zeros() >= zero_u.leading_zeros()); } + + fn make_v2_descriptor() -> CandidateDescriptorV2 { + CandidateDescriptorV2::new( + Id::from(1u32), + Hash::repeat_byte(1), + CoreIndex(0), + 1, + Hash::repeat_byte(2), + Hash::repeat_byte(3), + Hash::repeat_byte(4), + Hash::repeat_byte(5), + ValidationCodeHash::from(Hash::repeat_byte(6)), + ) + } + + fn make_v3_descriptor() -> CandidateDescriptorV2 { + CandidateDescriptorV2::new_v3( + Id::from(1u32), + Hash::repeat_byte(1), + CoreIndex(0), + 1, + Hash::repeat_byte(2), + Hash::repeat_byte(3), + Hash::repeat_byte(4), + Hash::repeat_byte(5), + ValidationCodeHash::from(Hash::repeat_byte(6)), + Hash::repeat_byte(7), // scheduling_parent + ) + } + + #[test] + fn check_version_acceptance_v1_consistent() { + // A V1 descriptor (created from old-style with non-zero collator fields) + // Both old and new rules agree → passes regardless of v3_enabled. + let mut desc = make_v2_descriptor(); + // Put non-zero bytes in first 16 bytes of reserved1 to trigger V1 in both + // old and new detection. + desc.reserved1[0] = 0xFF; + + assert_eq!(desc.version(), CandidateDescriptorVersion::V1); + assert_eq!(desc.version_old_rules(), CandidateDescriptorVersion::V1); + assert!(desc.check_version_consistency()); + + assert!(desc.check_version_acceptance(false).is_ok()); + assert!(desc.check_version_acceptance(true).is_ok()); + } + + #[test] + fn check_version_acceptance_v2_consistent() { + // A clean V2 descriptor: both rules agree → passes always. + let desc = make_v2_descriptor(); + + assert_eq!(desc.version(), CandidateDescriptorVersion::V2); + assert_eq!(desc.version_old_rules(), CandidateDescriptorVersion::V2); + assert!(desc.check_version_consistency()); + + assert!(desc.check_version_acceptance(false).is_ok()); + assert!(desc.check_version_acceptance(true).is_ok()); + } + + #[test] + fn check_version_acceptance_v3_when_enabled() { + // V3 descriptor with v3_enabled=true → passes. + let desc = make_v3_descriptor(); + + assert_eq!(desc.version(), CandidateDescriptorVersion::V3); + assert_eq!(desc.version_old_rules(), CandidateDescriptorVersion::V1); + assert!(!desc.check_version_consistency()); + + assert!(desc.check_version_acceptance(true).is_ok()); + } + + #[test] + fn check_version_acceptance_v3_when_disabled() { + // V3 descriptor with v3_enabled=false → rejected. + // The consistency check fires first (old rules see V1, new rules see V3, + // and V3 disagreement is not expected when v3_enabled=false). + let desc = make_v3_descriptor(); + + assert_eq!(desc.version(), CandidateDescriptorVersion::V3); + assert_eq!(desc.check_version_acceptance(false), Err(VersionCheckError::Inconsistency)); + } + + #[test] + fn check_version_acceptance_ambiguous_rejected() { + // Craft descriptor where old rules see V1, new rules see V2. + // reserved1[16..24] non-zero, reserved1[0..16] all zero, version=0. + let mut desc = make_v2_descriptor(); + desc.reserved1[16] = 0xFF; // triggers old V1 check but not new + + assert_eq!(desc.version(), CandidateDescriptorVersion::V2); + assert_eq!(desc.version_old_rules(), CandidateDescriptorVersion::V1); + assert!(!desc.check_version_consistency()); + + // Rejected regardless of v3_enabled. + assert_eq!(desc.check_version_acceptance(false), Err(VersionCheckError::Inconsistency)); + assert_eq!(desc.check_version_acceptance(true), Err(VersionCheckError::Inconsistency)); + } + + #[test] + fn check_version_consistency_v3_expected_disagreement() { + // V3 descriptor: version() returns V3, version_old_rules() returns V1. + // check_version_consistency() is false — but this is expected. + let desc = make_v3_descriptor(); + + assert_eq!(desc.version(), CandidateDescriptorVersion::V3); + assert_eq!(desc.version_old_rules(), CandidateDescriptorVersion::V1); + assert!(!desc.check_version_consistency()); + // Accepted when V3 is enabled. + assert!(desc.check_version_acceptance(true).is_ok()); + } + + #[test] + fn check_version_acceptance_ambiguous_scheduling_parent_nonzero() { + // Descriptor with scheduling_parent non-zero but version=0. + // Old rules: V1 (scheduling_parent non-zero triggers old_v1_detected). + // New rules: V2 (only checks reserved1[0..16], which is zero). + let mut desc = make_v2_descriptor(); + desc.scheduling_parent = Hash::repeat_byte(0xAB); + + assert_eq!(desc.version(), CandidateDescriptorVersion::V2); + assert_eq!(desc.version_old_rules(), CandidateDescriptorVersion::V1); + assert!(!desc.check_version_consistency()); + + assert_eq!(desc.check_version_acceptance(false), Err(VersionCheckError::Inconsistency)); + assert_eq!(desc.check_version_acceptance(true), Err(VersionCheckError::Inconsistency)); + } } diff --git a/polkadot/primitives/test-helpers/src/lib.rs b/polkadot/primitives/test-helpers/src/lib.rs index 56110a4de99a7..20b0992ff5d1f 100644 --- a/polkadot/primitives/test-helpers/src/lib.rs +++ b/polkadot/primitives/test-helpers/src/lib.rs @@ -446,8 +446,14 @@ pub fn dummy_validator() -> ValidatorId { } /// Create a meaningless collator id. +/// +/// Byte 8 is set to 1 so that when V1 descriptors are converted to V2 layout, +/// `reserved1[0]` (mapped from `collator[8]`) is non-zero, allowing `v3_version()` +/// to correctly detect the descriptor as V1. pub fn dummy_collator() -> CollatorId { - CollatorId::from(sr25519::Public::default()) + let mut bytes = [0u8; 32]; + bytes[8] = 1; + CollatorId::from(sr25519::Public::from_raw(bytes)) } /// Create a meaningless collator signature. It is important to not be 0, as we'd confuse @@ -778,7 +784,7 @@ mod candidate_receipt_tests { // We get same candidate hash. assert_eq!(old_ccr.hash(), new_ccr.hash()); - assert_eq!(new_ccr.descriptor.version(false), CandidateDescriptorVersion::V1); + assert_eq!(new_ccr.descriptor.version_old_rules(), CandidateDescriptorVersion::V1); assert_eq!(old_ccr.descriptor.collator, new_ccr.descriptor.collator().unwrap()); assert_eq!(old_ccr.descriptor.signature, new_ccr.descriptor.signature().unwrap()); } @@ -786,7 +792,7 @@ mod candidate_receipt_tests { #[test] fn invalid_version_descriptor() { let mut new_ccr = dummy_committed_candidate_receipt_v2(Hash::default()); - assert_eq!(new_ccr.descriptor.version(false), CandidateDescriptorVersion::V2); + assert_eq!(new_ccr.descriptor.version_old_rules(), CandidateDescriptorVersion::V2); // Put some unknown version. new_ccr.descriptor.set_version(100); @@ -794,9 +800,9 @@ mod candidate_receipt_tests { let new_ccr: CommittedCandidateReceiptV2 = Decode::decode(&mut new_ccr.encode().as_slice()).unwrap(); - assert_eq!(new_ccr.descriptor.version(false), CandidateDescriptorVersion::Unknown); + assert_eq!(new_ccr.descriptor.version_old_rules(), CandidateDescriptorVersion::Unknown); assert_eq!( - new_ccr.parse_ump_signals(&std::collections::BTreeMap::new(), false), + new_ccr.parse_ump_signals(&std::collections::BTreeMap::new()), Err(CommittedCandidateReceiptError::UnknownVersion(100)) ); } @@ -834,7 +840,7 @@ mod candidate_receipt_tests { let v2_ccr: CommittedCandidateReceiptV2 = Decode::decode(&mut encoded_ccr.as_slice()).unwrap(); - assert_eq!(v2_ccr.descriptor.core_index(false), Some(CoreIndex(123))); + assert_eq!(v2_ccr.descriptor.core_index(), Some(CoreIndex(123))); let mut cq = BTreeMap::new(); cq.insert( @@ -842,7 +848,7 @@ mod candidate_receipt_tests { vec![new_ccr.descriptor.para_id(), new_ccr.descriptor.para_id()].into(), ); - assert!(new_ccr.parse_ump_signals(&transpose_claim_queue(cq), false).is_ok()); + assert!(new_ccr.parse_ump_signals(&transpose_claim_queue(cq)).is_ok()); assert_eq!(new_ccr.hash(), v2_ccr.hash()); } @@ -870,17 +876,17 @@ mod candidate_receipt_tests { let v1_ccr: CommittedCandidateReceiptV2 = Decode::decode(&mut encoded_ccr.as_slice()).unwrap(); - assert_eq!(v1_ccr.descriptor.version(false), CandidateDescriptorVersion::V1); + assert_eq!(v1_ccr.descriptor.version_old_rules(), CandidateDescriptorVersion::V1); assert!(!v1_ccr.commitments.ump_signals().unwrap().is_empty()); let mut cq = BTreeMap::new(); cq.insert(CoreIndex(0), vec![v1_ccr.descriptor.para_id()].into()); cq.insert(CoreIndex(1), vec![v1_ccr.descriptor.para_id()].into()); - assert_eq!(v1_ccr.descriptor.core_index(false), None); + assert_eq!(v1_ccr.descriptor.core_index(), None); assert_eq!( - v1_ccr.parse_ump_signals(&transpose_claim_queue(cq), false), + v1_ccr.parse_ump_signals(&transpose_claim_queue(cq)), Err(CommittedCandidateReceiptError::UMPSignalWithV1Descriptor) ); } @@ -900,7 +906,7 @@ mod candidate_receipt_tests { // Since collator sig and id are zeroed, it means that the descriptor uses format // version 2. Should still pass checks without core selector. - assert!(new_ccr.parse_ump_signals(&transpose_claim_queue(cq), false).is_ok()); + assert!(new_ccr.parse_ump_signals(&transpose_claim_queue(cq)).is_ok()); let mut cq = BTreeMap::new(); cq.insert(CoreIndex(0), vec![new_ccr.descriptor.para_id()].into()); @@ -908,7 +914,7 @@ mod candidate_receipt_tests { // Passes even if 2 cores are assigned, because elastic scaling MVP could still inject the // core index in the `BackedCandidate`. - assert!(new_ccr.parse_ump_signals(&transpose_claim_queue(cq), false).is_ok()); + assert!(new_ccr.parse_ump_signals(&transpose_claim_queue(cq)).is_ok()); // Adding collator signature should make it decode as v1. old_ccr.descriptor.signature = dummy_collator_signature(); @@ -924,7 +930,7 @@ mod candidate_receipt_tests { assert_eq!(new_ccr.descriptor.signature(), Some(old_ccr.descriptor.signature)); assert_eq!(new_ccr.descriptor.collator(), Some(old_ccr.descriptor.collator)); - assert_eq!(new_ccr.descriptor.core_index(false), None); + assert_eq!(new_ccr.descriptor.core_index(), None); assert_eq!(new_ccr.descriptor.para_id(), ParaId::new(1000)); assert_eq!(old_ccr_hash, new_ccr.hash()); @@ -954,18 +960,12 @@ mod candidate_receipt_tests { new_ccr.commitments.upward_messages.force_push(vec![0u8; 256]); new_ccr.commitments.upward_messages.force_push(vec![0xff; 256]); - assert_eq!( - new_ccr.parse_ump_signals(&cq, false), - Ok(CandidateUMPSignals::dummy(None, None)) - ); + assert_eq!(new_ccr.parse_ump_signals(&cq), Ok(CandidateUMPSignals::dummy(None, None))); // separator new_ccr.commitments.upward_messages.force_push(UMP_SEPARATOR); - assert_eq!( - new_ccr.parse_ump_signals(&cq, false), - Ok(CandidateUMPSignals::dummy(None, None)) - ); + assert_eq!(new_ccr.parse_ump_signals(&cq), Ok(CandidateUMPSignals::dummy(None, None))); // CoreIndex commitment { @@ -976,7 +976,7 @@ mod candidate_receipt_tests { .force_push(UMPSignal::SelectCore(CoreSelector(0), ClaimQueueOffset(1)).encode()); assert_eq!( - new_ccr.parse_ump_signals(&cq, false), + new_ccr.parse_ump_signals(&cq), Ok(CandidateUMPSignals::dummy(Some((CoreSelector(0), ClaimQueueOffset(1))), None)) ); } @@ -991,7 +991,7 @@ mod candidate_receipt_tests { .force_push(UMPSignal::ApprovedPeer(vec![1, 2, 3].try_into().unwrap()).encode()); assert_eq!( - new_ccr.parse_ump_signals(&cq, false), + new_ccr.parse_ump_signals(&cq), Ok(CandidateUMPSignals::dummy(None, Some(vec![1, 2, 3].try_into().unwrap()))) ); @@ -1003,7 +1003,7 @@ mod candidate_receipt_tests { .force_push(UMPSignal::SelectCore(CoreSelector(0), ClaimQueueOffset(1)).encode()); assert_eq!( - new_ccr.parse_ump_signals(&cq, false), + new_ccr.parse_ump_signals(&cq), Ok(CandidateUMPSignals::dummy( Some((CoreSelector(0), ClaimQueueOffset(1))), Some(vec![1, 2, 3].try_into().unwrap()) @@ -1022,7 +1022,7 @@ mod candidate_receipt_tests { .force_push(UMPSignal::ApprovedPeer(vec![1, 2, 3].try_into().unwrap()).encode()); assert_eq!( - new_ccr.parse_ump_signals(&cq, false), + new_ccr.parse_ump_signals(&cq), Ok(CandidateUMPSignals::dummy( Some((CoreSelector(0), ClaimQueueOffset(1))), Some(vec![1, 2, 3].try_into().unwrap()) @@ -1053,7 +1053,7 @@ mod candidate_receipt_tests { // No signals can be decoded. assert_eq!( - new_ccr.parse_ump_signals(&cq, false), + new_ccr.parse_ump_signals(&cq), Err(CommittedCandidateReceiptError::UmpSignalDecode) ); assert_eq!( @@ -1084,13 +1084,13 @@ mod candidate_receipt_tests { let cq = transpose_claim_queue(cq); assert_eq!( - new_ccr.parse_ump_signals(&cq, false), + new_ccr.parse_ump_signals(&cq), Ok(CandidateUMPSignals::dummy(None, Some(vec![1, 2, 3].try_into().unwrap()))) ); new_ccr.descriptor.set_core_index(CoreIndex(1)); assert_eq!( - new_ccr.parse_ump_signals(&cq, false), + new_ccr.parse_ump_signals(&cq), Err(CommittedCandidateReceiptError::InvalidCoreIndex) ); new_ccr.descriptor.set_core_index(CoreIndex(0)); @@ -1102,14 +1102,14 @@ mod candidate_receipt_tests { // No assignments. assert_eq!( - new_ccr.parse_ump_signals(&transpose_claim_queue(Default::default()), false), + new_ccr.parse_ump_signals(&transpose_claim_queue(Default::default())), Err(CommittedCandidateReceiptError::NoAssignment) ); // Mismatch between descriptor index and commitment. new_ccr.descriptor.set_core_index(CoreIndex(1)); assert_eq!( - new_ccr.parse_ump_signals(&cq, false), + new_ccr.parse_ump_signals(&cq), Err(CommittedCandidateReceiptError::CoreIndexMismatch { descriptor: CoreIndex(1), commitments: CoreIndex(0), @@ -1132,7 +1132,7 @@ mod candidate_receipt_tests { .force_push(UMPSignal::ApprovedPeer(vec![4, 5].try_into().unwrap()).encode()); assert_eq!( - new_ccr.parse_ump_signals(&cq, false), + new_ccr.parse_ump_signals(&cq), Err(CommittedCandidateReceiptError::DuplicateUMPSignal) ); @@ -1153,7 +1153,7 @@ mod candidate_receipt_tests { .force_push(UMPSignal::ApprovedPeer(vec![1, 2, 3].try_into().unwrap()).encode()); assert_eq!( - new_ccr.parse_ump_signals(&cq, false), + new_ccr.parse_ump_signals(&cq), Err(CommittedCandidateReceiptError::TooManyUMPSignals) ); } diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 1642868dce032..d8ddff536811e 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -332,7 +332,7 @@ impl TestCandidateBuilder { }, }; - if ccr.descriptor.version(false) == CandidateDescriptorVersion::V2 { + if ccr.descriptor.version() == CandidateDescriptorVersion::V2 { ccr.commitments.upward_messages.force_push(UMP_SEPARATOR); ccr.commitments.upward_messages.force_push( diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 88b101e0543c8..8731f6893153b 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -924,18 +924,22 @@ pub(crate) fn sanitize_bitfields( /// Perform required checks for given candidate receipt. /// -/// Returns `true` if candidate descriptor is version 1. +/// Returns `true` if the candidate passes all version and signal checks. /// -/// Otherwise returns `false` if: -/// - version 2 descriptors are not allowed -/// - the core index in descriptor doesn't match the one computed from the commitments -/// - the `SelectCore` signal does not refer to a core at the top of claim queue +/// Returns `false` if: +/// - the descriptor version is unknown +/// - version consistency check fails (old/new detection rules disagree unexpectedly) +/// - version 3 descriptors are present but v3 is not enabled +/// - the relay parent or scheduling parent is not in the allowed relay parents +/// - UMP signal parsing fails +/// - for V2/V3: the core index in descriptor doesn't match the one computed from the commitments, +/// or the `SelectCore` signal does not refer to a core at the top of claim queue fn check_descriptor_version_and_signals( candidate: &BackedCandidate, allowed_relay_parents: &AllowedRelayParentsTracker>, v3_enabled: bool, ) -> bool { - let descriptor_version = candidate.descriptor().version(v3_enabled); + let descriptor_version = candidate.descriptor().version(); if descriptor_version == CandidateDescriptorVersion::Unknown { log::debug!( @@ -947,6 +951,18 @@ fn check_descriptor_version_and_signals( return false; } + // Version consistency + V3 gating (shared logic from primitives). + if let Err(reason) = candidate.descriptor().check_version_acceptance(v3_enabled) { + log::debug!( + target: LOG_TARGET, + "{}. Dropping candidate {:?} for paraid {:?}.", + reason, + candidate.candidate().hash(), + candidate.descriptor().para_id() + ); + return false; + } + // Check relay_parent exists in allowed relay parents (execution context). // Needed for all versions to access relay chain state. let relay_parent = candidate.descriptor().relay_parent(); @@ -968,7 +984,7 @@ fn check_descriptor_version_and_signals( // movement of scheduling_parent is primarily a censorship resistance concern, handled // by the collator protocol's active leaf check. The relay chain only requires validity // (i.e., the scheduling_parent is in allowed relay parents). - let scheduling_parent = candidate.descriptor().scheduling_parent(v3_enabled); + let scheduling_parent = candidate.descriptor().scheduling_parent(); let Some((sp_info, _)) = allowed_relay_parents.acquire_info(scheduling_parent, None) else { log::debug!( target: LOG_TARGET, @@ -982,7 +998,7 @@ fn check_descriptor_version_and_signals( // UMP signals check uses scheduling parent's claim queue. // For V1/V2: scheduling_parent == relay_parent, so uses same claim queue as before. // For V3: uses the claim queue from the scheduling_parent. - if let Err(err) = candidate.candidate().parse_ump_signals(&sp_info.claim_queue, v3_enabled) { + if let Err(err) = candidate.candidate().parse_ump_signals(&sp_info.claim_queue) { log::debug!( target: LOG_TARGET, "UMP signal check failed: {:?}. Dropping candidate {:?} for paraid {:?}.", @@ -1001,7 +1017,7 @@ fn check_descriptor_version_and_signals( // For V2/V3: Check scheduling session matches current session. // For V2: scheduling_session() returns session_index (relay parent session). // For V3: scheduling_session() returns scheduling_session_index. - let Some(scheduling_session) = candidate.descriptor().scheduling_session(v3_enabled) else { + let Some(scheduling_session) = candidate.descriptor().scheduling_session() else { log::debug!( target: LOG_TARGET, "Invalid V2/V3 candidate receipt {:?} for paraid {:?}, missing scheduling session.", @@ -1011,7 +1027,7 @@ fn check_descriptor_version_and_signals( return false; }; - let Some(session_index) = candidate.descriptor().session_index(v3_enabled) else { + let Some(session_index) = candidate.descriptor().session_index() else { log::debug!( target: LOG_TARGET, "Invalid V2/V3 candidate receipt {:?} for paraid {:?}, missing session index.", @@ -1112,12 +1128,8 @@ fn sanitize_backed_candidates( // Map candidates to scheduled cores. Filter out any unscheduled candidates along with their // descendants. - let mut backed_candidates_with_core = map_candidates_to_cores::( - &allowed_relay_parents, - scheduled, - candidates_per_para, - v3_enabled, - ); + let mut backed_candidates_with_core = + map_candidates_to_cores::(&allowed_relay_parents, scheduled, candidates_per_para); // Filter out backing statements from disabled validators. If by that we render a candidate with // less backing votes than required, filter that candidate also. As all the other filtering @@ -1448,7 +1460,6 @@ fn map_candidates_to_cores>, mut scheduled: BTreeMap>, candidates: BTreeMap>>, - v3_enabled: bool, ) -> BTreeMap, CoreIndex)>> { let mut backed_candidates_with_core = BTreeMap::new(); @@ -1494,9 +1505,7 @@ fn map_candidates_to_cores(allowed_relay_parents, &candidate, v3_enabled) - { + if let Some(core_index) = get_core_index::(allowed_relay_parents, &candidate) { if scheduled_cores.remove(&core_index) { temp_backed_candidates.push((candidate, core_index)); } else { @@ -1544,12 +1553,11 @@ fn map_candidates_to_cores( allowed_relay_parents: &AllowedRelayParentsTracker>, candidate: &BackedCandidate, - v3_enabled: bool, ) -> Option { candidate .candidate() .descriptor - .core_index(v3_enabled) + .core_index() .or_else(|| get_injected_core_index::(allowed_relay_parents, &candidate)) } diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index 80c6c6f399137..8c5e3c64df0d8 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -1770,7 +1770,7 @@ mod enter { // Verify all candidates have V3 descriptors (version=1) for candidate in ¶_inherent_data.backed_candidates { - assert_eq!(candidate.descriptor().version(true), CandidateDescriptorVersion::V3); + assert_eq!(candidate.descriptor().version(), CandidateDescriptorVersion::V3); } let mut inherent_data = InherentData::new(); @@ -1784,7 +1784,7 @@ mod enter { // Verify the filtered candidates are still V3 for candidate in &filtered.backed_candidates { - assert_eq!(candidate.descriptor().version(true), CandidateDescriptorVersion::V3); + assert_eq!(candidate.descriptor().version(), CandidateDescriptorVersion::V3); } }); } @@ -1850,10 +1850,8 @@ mod enter { }); } - // Test that V3 descriptors with UMP signals are rejected when CandidateReceiptV3 is NOT - // enabled. When v3_enabled=false, V3 descriptors (with non-zero scheduling_parent) are - // detected as V1. Since V1 forbids UMP signals and V3 requires them, valid V3 candidates are - // rejected as invalid V1 (UMPSignalWithV1Descriptor). This protects old nodes from slashing. + // Test that V3 descriptors are rejected when CandidateReceiptV3 is NOT enabled. + // The runtime's consistency check and V3 gating reject these candidates. #[test] fn v3_descriptors_rejected_as_v1_when_disabled() { let config = default_config(); @@ -1885,10 +1883,13 @@ mod enter { // Verify descriptor version detection behavior for candidate in ¶_inherent_data.backed_candidates { - // With v3_enabled=true, we correctly see V3 - assert_eq!(candidate.descriptor().version(true), CandidateDescriptorVersion::V3); - // With v3_enabled=false, V3 (non-zero scheduling_parent) is detected as V1 - assert_eq!(candidate.descriptor().version(false), CandidateDescriptorVersion::V1); + // version() always uses relaxed (v3) logic + assert_eq!(candidate.descriptor().version(), CandidateDescriptorVersion::V3); + // Under old rules, V3 (non-zero scheduling_parent) is detected as V1 + assert_eq!( + candidate.descriptor().version_old_rules(), + CandidateDescriptorVersion::V1 + ); } let mut inherent_data = InherentData::new(); @@ -2287,7 +2288,7 @@ mod enter { descriptor: CandidateDescriptorV2::new( backed_candidate.descriptor().para_id(), backed_candidate.descriptor().relay_parent(), - backed_candidate.descriptor().core_index(false).unwrap(), + backed_candidate.descriptor().core_index().unwrap(), 100, backed_candidate.descriptor().persisted_validation_data_hash(), backed_candidate.descriptor().pov_hash(), From 72c73654237ae65a2c5587326d5e1cdbc6d506c8 Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 5 Mar 2026 19:17:05 +0100 Subject: [PATCH 02/52] Create pr_11290.prdoc --- prdoc/pr_11290.prdoc | 58 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 prdoc/pr_11290.prdoc diff --git a/prdoc/pr_11290.prdoc b/prdoc/pr_11290.prdoc new file mode 100644 index 0000000000000..71d30d1eba233 --- /dev/null +++ b/prdoc/pr_11290.prdoc @@ -0,0 +1,58 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Make candidate descriptor version detection self-contained + +doc: + - audience: Node Dev + description: | + Removes the `v3_enabled: bool` parameter from `CandidateDescriptorV2::version()` and all + accessor methods (`core_index`, `session_index`, `scheduling_parent`, `scheduling_session`), + making version detection self-contained. Previously, version detection depended on a node + feature lookup from the relay parent, which could produce mismatches between backers (who + derive the feature from a recent leaf) and approval checkers / dispute participants (who may + not be able to determine the feature from an old relay parent). This mismatch could lead to + disputes and backer slashing. + + The fix moves version gating responsibility to two places: + - **Runtime**: `check_descriptor_version_and_signals()` rejects ambiguous candidates + (where old-style and new-style version detection disagree) and V3 candidates before the + feature is enabled. + - **Backing subsystem**: Adds defense-in-depth `check_version_acceptance()` checks at the + signing boundary, covering both the seconding path and the statement-import path. + + Downstream consumers (dispute coordinator, dispute distribution, statement distribution, + collator protocol) no longer need to look up node features for version detection, since any + candidate that reaches them on-chain was already validated by the runtime. + +crates: + - name: polkadot-primitives + bump: major + - name: polkadot-primitives-test-helpers + bump: major + - name: polkadot-node-core-backing + bump: major + - name: polkadot-node-core-candidate-validation + bump: major + - name: polkadot-node-core-pvf + bump: patch + - name: polkadot-node-core-pvf-common + bump: major + - name: polkadot-node-core-prospective-parachains + bump: major + - name: polkadot-node-core-dispute-coordinator + bump: patch + - name: polkadot-node-core-provisioner + bump: patch + - name: polkadot-collator-protocol + bump: major + - name: polkadot-statement-distribution + bump: major + - name: polkadot-dispute-distribution + bump: patch + - name: polkadot-node-collation-generation + bump: patch + - name: polkadot-runtime-parachains + bump: major + - name: polkadot-test-malus + bump: patch From 6eb1aadef28a545c90fe7b2430ea17cf5985aa1b Mon Sep 17 00:00:00 2001 From: eskimor Date: Sat, 7 Mar 2026 05:55:40 +0100 Subject: [PATCH 03/52] Add V3 transition support for PVF validation - Add v3_seen flag and transition-safe accessors used by PVF - Add version_for_approval_dispute on CandidateDescriptorV2 - Add scheduling_parent_for_approval_dispute - and scheduling_session_for_approval_dispute - Thread v3_seen through ValidationContext and related PVF paths - Update tests to use CandidateReceiptV2 and the new transition logic --- .../node/core/candidate-validation/src/lib.rs | 421 +++++++++++------- .../core/candidate-validation/src/tests.rs | 237 ++++++---- .../dispute-coordinator/src/initialized.rs | 60 ++- polkadot/node/core/pvf/common/src/execute.rs | 21 +- polkadot/node/core/pvf/src/execute/queue.rs | 18 +- polkadot/node/core/pvf/src/host.rs | 9 +- polkadot/node/core/pvf/tests/it/main.rs | 5 +- polkadot/primitives/src/v9/mod.rs | 73 ++- 8 files changed, 575 insertions(+), 269 deletions(-) diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 8982098662b94..a7e6a83975fc2 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -39,7 +39,7 @@ use polkadot_node_subsystem::{ SubsystemSender, }; use polkadot_node_subsystem_util::{ - self as util, + self as util, request_node_features, runtime::{fetch_scheduling_lookahead, ClaimQueueSnapshot}, }; use polkadot_overseer::{ActivatedLeaf, ActiveLeavesUpdate}; @@ -49,6 +49,7 @@ use polkadot_primitives::{ DEFAULT_APPROVAL_EXECUTION_TIMEOUT, DEFAULT_BACKING_EXECUTION_TIMEOUT, DEFAULT_LENIENT_PREPARATION_TIMEOUT, DEFAULT_PRECHECK_PREPARATION_TIMEOUT, }, + node_features::FeatureIndex, transpose_claim_queue, AuthorityDiscoveryId, CandidateCommitments, CandidateDescriptorV2 as CandidateDescriptor, CandidateEvent, CandidateReceiptV2 as CandidateReceipt, @@ -170,10 +171,78 @@ where } } +/// Fetch the validation code bomb limit for a candidate. +/// +/// NOTE: This method is fetching state from the scheduling parent. Fetching state for the +/// scheduling or relay parent of a candidate is not sound in disputes! This is necessary as of now +/// though, as the provided runtime API does not allow fetching for older sessions. For the time +/// being, we at least use the scheduling parent as this is more likely to still be around than the +/// relay parent. +/// +/// For what session to pick (to be fetched via an active leaf, not scheduling nor relay parent): In +/// principle both the scheduling session and the execution session would be sensible choices here +/// for fetching the limit, all that matters is that we have consensus among validators. For +/// parachain block confidence, decreasing the value would be problematic in both cases. For +/// increased values, all that matters is consensus. +async fn fetch_bomb_limit( + candidate_descriptor: &CandidateDescriptor, + exec_kind: PvfExecKind, + v3_ever_seen: bool, + sender: &mut Sender, +) -> Result +where + Sender: SubsystemSender, +{ + // For approval/dispute, use the transition-safe scheduling parent + // to match old backer behavior before V3 is confirmed enabled. + // Backing uses its own v3_ever_seen + check_version_acceptance() gate. + // NOTE: As noted above, even looking at the scheduling parent in disputes context should be + // suspicious normally! + let scheduling_parent = match exec_kind { + PvfExecKind::Approval | PvfExecKind::Dispute => { + candidate_descriptor.scheduling_parent_for_approval_dispute(v3_ever_seen) + }, + _ => candidate_descriptor.scheduling_parent(), + }; + + let scheduling_session = + match candidate_descriptor.scheduling_session_for_approval_dispute(v3_ever_seen) { + Some(session) => session, + None => { + // NOTE: This is depending on scheduling parent state to still be around! + let Some(session) = get_session_index(sender, scheduling_parent).await else { + return Err("Cannot fetch session index from the runtime".into()); + }; + session + }, + }; + + // Returns a default value if the runtime API is not available for this session, + // but errors on unexpected runtime API failures. + // NOTE: This is depending on scheduling parent state to still be around! + util::runtime::fetch_validation_code_bomb_limit(scheduling_parent, scheduling_session, sender) + .await + .map_err(|_| "Cannot fetch validation code bomb limit from the runtime".into()) +} + +/// Data only needed during backing validation. These are additional strictness +/// checks that backing performs but approval/dispute can (and need to) skip, because the +/// runtime also validates them at inclusion time. These depend on chain state date for the +/// scheduling or even the relay parent to still be around. Which is not a valid assumption in +/// disputes. +struct BackingExtras { + /// Claim queue snapshot for UMP signal validation. + claim_queue: ClaimQueueSnapshot, + /// Session index independently fetched from runtime at scheduling_parent, + /// used to verify the descriptor's scheduling_session claim. + expected_scheduling_session: SessionIndex, +} + fn handle_validation_message( mut sender: S, validation_host: ValidationHost, metrics: Metrics, + v3_ever_seen: bool, msg: CandidateValidationMessage, ) -> Pin + Send>> where @@ -191,57 +260,41 @@ where .. } => async move { let _timer = metrics.time_validate_from_exhaustive(); - let relay_parent = candidate_receipt.descriptor.relay_parent(); - - let Some(session_index) = get_session_index(&mut sender, relay_parent).await else { - let error = "cannot fetch session index from the runtime"; - gum::warn!( - target: LOG_TARGET, - ?relay_parent, - error, - ); - - let _ = response_sender - .send(Err(ValidationFailed("Session index not found".to_string()))); - return; - }; - // This will return a default value for the limit if runtime API is not available. - // however we still error out if there is a weird runtime API error. - let Ok(validation_code_bomb_limit) = util::runtime::fetch_validation_code_bomb_limit( - relay_parent, - session_index, - &mut sender, - ) - .await - else { - let error = "cannot fetch validation code bomb limit from the runtime"; - gum::warn!( - target: LOG_TARGET, - ?relay_parent, - error, - ); - - let _ = response_sender.send(Err(ValidationFailed( - "Validation code bomb limit not available".to_string(), - ))); - return; - }; + let validation_code_bomb_limit = + match fetch_bomb_limit(&candidate_receipt.descriptor, exec_kind, v3_ever_seen, &mut sender) + .await + { + Ok(limit) => limit, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + scheduling_parent = ?candidate_receipt.descriptor.scheduling_parent(), + ?err, + "Failed to fetch validation code bomb limit", + ); + let _ = response_sender.send(Err(ValidationFailed(err))); + return; + }, + }; - // Claim queue is scheduling context — fetch it from the scheduling_parent. - // For V1/V2, scheduling_parent() returns relay_parent. - let scheduling_parent = candidate_receipt.descriptor.scheduling_parent(); - let maybe_claim_queue = claim_queue(scheduling_parent, &mut sender).await; + // --- Backing-only extras --- + // Stricter checks that backing performs but approval/dispute can + // skip, because the runtime also validates them at inclusion time. + let backing_extras = match exec_kind { + PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_) => { + let scheduling_parent = candidate_receipt.descriptor.scheduling_parent(); + + let Some(claim_queue) = claim_queue(scheduling_parent, &mut sender).await + else { + let _ = response_sender + .send(Err(ValidationFailed("Claim queue not available".to_string()))); + return; + }; - // Fetch the scheduling session index for validating the descriptor's - // scheduling_session claim. For V1/V2 scheduling_parent == - // relay_parent so we reuse session_index. - let scheduling_session_index = if scheduling_parent == relay_parent { - session_index - } else { - match get_session_index(&mut sender, scheduling_parent).await { - Some(idx) => idx, - None => { + let Some(expected_scheduling_session) = + get_session_index(&mut sender, scheduling_parent).await + else { gum::warn!( target: LOG_TARGET, ?scheduling_parent, @@ -251,12 +304,14 @@ where "Scheduling session index not found".to_string(), ))); return; - }, - } + }; + + Some(BackingExtras { claim_queue, expected_scheduling_session }) + }, + _ => None, }; let res = validate_candidate_exhaustive( - scheduling_session_index, validation_host, validation_data, validation_code, @@ -265,8 +320,9 @@ where executor_params, exec_kind, &metrics, - maybe_claim_queue, validation_code_bomb_limit, + v3_ever_seen, + backing_extras, ) .await; @@ -361,7 +417,7 @@ async fn run( ctx.spawn_blocking("pvf-validation-host", task.boxed())?; let mut tasks = FuturesUnordered::new(); - let mut prepare_state = PrepareValidationState::default(); + let mut state = State::default(); loop { loop { @@ -374,13 +430,13 @@ async fn run( keystore.clone(), &mut validation_host, update, - &mut prepare_state, + &mut state, ).await }, Ok(FromOrchestra::Signal(OverseerSignal::BlockFinalized(..))) => {}, Ok(FromOrchestra::Signal(OverseerSignal::Conclude)) => return Ok(()), Ok(FromOrchestra::Communication { msg }) => { - let task = handle_validation_message(ctx.sender().clone(), validation_host.clone(), metrics.clone(), msg); + let task = handle_validation_message(ctx.sender().clone(), validation_host.clone(), metrics.clone(), state.v3_ever_seen, msg); tasks.push(task); if tasks.len() >= TASK_LIMIT { break @@ -415,8 +471,32 @@ async fn run( } } -struct PrepareValidationState { +/// Top-level subsystem state, owning session tracking, V3 transition detection, +/// and PVF preparation bookkeeping. +struct State { + /// Current session index, tracked across active leaf updates. session_index: Option, + /// Monotonic flag: set to `true` once any activated leaf has the V3 candidate + /// descriptor node feature enabled. Once set, never unset. + /// Used to determine whether approval/dispute validation should trust + /// `version()` (V3-capable) or fall back to `version_old_rules()`. + /// See `CandidateDescriptorV2::version_for_approval_dispute` for the safety argument. + v3_ever_seen: bool, + /// PVF preparation state (proactive pre-compilation for next session). + pvf_prep: PvfPrepState, +} + +impl Default for State { + fn default() -> Self { + Self { session_index: None, v3_ever_seen: false, pvf_prep: PvfPrepState::default() } + } +} + +/// State for proactive PVF preparation. +/// +/// Tracks whether we're a next-session authority and which code hashes we've already +/// sent to the PVF host. +struct PvfPrepState { is_next_session_authority: bool, // PVF host won't prepare the same code hash twice, so here we just avoid extra communication already_prepared_code_hashes: HashSet, @@ -424,10 +504,9 @@ struct PrepareValidationState { per_block_limit: usize, } -impl Default for PrepareValidationState { +impl Default for PvfPrepState { fn default() -> Self { Self { - session_index: None, is_next_session_authority: false, already_prepared_code_hashes: HashSet::new(), per_block_limit: 1, @@ -435,35 +514,71 @@ impl Default for PrepareValidationState { } } +/// Check if the V3 candidate descriptor node feature is enabled at the given +/// session. Returns `true` if the feature is set. +async fn check_v3_feature( + sender: &mut Sender, + relay_parent: Hash, + session_index: SessionIndex, +) -> bool +where + Sender: SubsystemSender, +{ + if let Ok(Ok(features)) = request_node_features(relay_parent, session_index, sender).await.await + { + if FeatureIndex::CandidateReceiptV3.is_set(&features) { + gum::info!( + target: LOG_TARGET, + ?session_index, + "CandidateReceiptV3 node feature detected, \ + switching to V3-aware approval/dispute validation", + ); + return true; + } + } + false +} + async fn handle_active_leaves_update( sender: &mut Sender, keystore: KeystorePtr, validation_host: &mut impl ValidationBackend, update: ActiveLeavesUpdate, - prepare_state: &mut PrepareValidationState, + state: &mut State, ) where Sender: SubsystemSender + SubsystemSender, { - let maybe_session_index = update_active_leaves(sender, validation_host, update.clone()).await; + update_active_leaves_validation_backend(sender, validation_host, update.clone()).await; - if let Some(activated) = update.activated { - let maybe_new_session_index = match (prepare_state.session_index, maybe_session_index) { - (Some(existing_index), Some(new_index)) => { - (new_index > existing_index).then_some(new_index) - }, - (None, Some(new_index)) => Some(new_index), - _ => None, - }; - maybe_prepare_validation( - sender, - keystore.clone(), - validation_host, - activated, - prepare_state, - maybe_new_session_index, - ) - .await; + let Some(activated) = update.activated else { return }; + let maybe_session_index = get_session_index(sender, activated.hash).await; + + // Detect session change + let new_session = match (state.session_index, maybe_session_index) { + (Some(old), Some(new)) => (new > old).then_some(new), + (None, Some(new)) => Some(new), + _ => None, + }; + + state.session_index = new_session.or(state.session_index); + + // V3 feature detection on session change + if !state.v3_ever_seen { + if let Some(session_index) = new_session { + state.v3_ever_seen = check_v3_feature(sender, activated.hash, session_index).await; + } } + + // Proactive PVF preparation + maybe_prepare_validation( + sender, + keystore.clone(), + validation_host, + activated, + &mut state.pvf_prep, + new_session, + ) + .await; } async fn maybe_prepare_validation( @@ -471,34 +586,28 @@ async fn maybe_prepare_validation( keystore: KeystorePtr, validation_backend: &mut impl ValidationBackend, leaf: ActivatedLeaf, - state: &mut PrepareValidationState, - new_session_index: Option, + pvf_prep: &mut PvfPrepState, + new_session: Option, ) where Sender: SubsystemSender, { - if new_session_index.is_some() { - state.session_index = new_session_index; - state.already_prepared_code_hashes.clear(); - state.is_next_session_authority = check_next_session_authority( - sender, - keystore, - leaf.hash, - state.session_index.expect("qed: just checked above"), - ) - .await; + if let Some(new_session_index) = new_session { + pvf_prep.already_prepared_code_hashes.clear(); + pvf_prep.is_next_session_authority = + check_next_session_authority(sender, keystore, leaf.hash, new_session_index).await; } // On every active leaf check candidates and prepare PVFs our node doesn't have yet. - if state.is_next_session_authority { + if pvf_prep.is_next_session_authority { let code_hashes = prepare_pvfs_for_backed_candidates( sender, validation_backend, leaf.hash, - &state.already_prepared_code_hashes, - state.per_block_limit, + &pvf_prep.already_prepared_code_hashes, + pvf_prep.per_block_limit, ) .await; - state.already_prepared_code_hashes.extend(code_hashes.unwrap_or_default()); + pvf_prep.already_prepared_code_hashes.extend(code_hashes.unwrap_or_default()); } } @@ -688,23 +797,18 @@ where Some(processed_code_hashes) } -async fn update_active_leaves( +async fn update_active_leaves_validation_backend( sender: &mut Sender, validation_backend: &mut impl ValidationBackend, update: ActiveLeavesUpdate, -) -> Option -where +) where Sender: SubsystemSender + SubsystemSender, { - let maybe_new_leaf = if let Some(activated) = &update.activated { - get_session_index(sender, activated.hash) - .await - .map(|index| (activated.hash, index)) + let ancestors = if let Some(ref activated) = update.activated { + get_block_ancestors(sender, activated.hash).await } else { - None + vec![] }; - - let ancestors = get_block_ancestors(sender, maybe_new_leaf).await; if let Err(err) = validation_backend.update_active_leaves(update, ancestors).await { gum::warn!( target: LOG_TARGET, @@ -712,31 +816,32 @@ where "cannot update active leaves in validation backend", ); }; - - maybe_new_leaf.map(|l| l.1) } -async fn get_block_ancestors( - sender: &mut Sender, - maybe_new_leaf: Option<(Hash, SessionIndex)>, -) -> Vec +/// Get list of still valid scheduling parents for the given leaf. +/// +/// TODO: This function does not take into account session boundaries, which leads to wasted effort: +/// https://github.com/paritytech/polkadot-sdk/issues/11301 +async fn get_block_ancestors(sender: &mut Sender, leaf: Hash) -> Vec where Sender: SubsystemSender + SubsystemSender, { - let Some((scheduling_parent, session_index)) = maybe_new_leaf else { return vec![] }; - let scheduling_lookahead = - match fetch_scheduling_lookahead(scheduling_parent, session_index, sender).await { - Ok(scheduling_lookahead) => scheduling_lookahead, - res => { - gum::warn!(target: LOG_TARGET, ?res, "Failed to request scheduling lookahead"); - return vec![]; - }, - }; + let Some(session_index) = get_session_index(sender, leaf).await else { + gum::warn!(target: LOG_TARGET, ?leaf, "Failed to request session index for leaf."); + return vec![]; + }; + let scheduling_lookahead = match fetch_scheduling_lookahead(leaf, session_index, sender).await { + Ok(scheduling_lookahead) => scheduling_lookahead, + res => { + gum::warn!(target: LOG_TARGET, ?res, "Failed to request scheduling lookahead"); + return vec![]; + }, + }; let (tx, rx) = oneshot::channel(); sender .send_message(ChainApiMessage::Ancestors { - hash: scheduling_parent, + hash: leaf, // Subtract 1 from the claim queue length, as it includes current `scheduling_parent`. k: scheduling_lookahead.saturating_sub(1) as usize, response_channel: tx, @@ -876,7 +981,6 @@ where } async fn validate_candidate_exhaustive( - expected_scheduling_session_index: SessionIndex, mut validation_backend: impl ValidationBackend + Send, persisted_validation_data: PersistedValidationData, validation_code: ValidationCode, @@ -885,12 +989,12 @@ async fn validate_candidate_exhaustive( executor_params: ExecutorParams, exec_kind: PvfExecKind, metrics: &Metrics, - maybe_claim_queue: Option, validation_code_bomb_limit: u32, + v3_seen: bool, + backing_extras: Option, ) -> Result { let _timer = metrics.time_validate_candidate_exhaustive(); let validation_code_hash = validation_code.hash(); - let relay_parent = candidate_receipt.descriptor.relay_parent(); let para_id = candidate_receipt.descriptor.para_id(); let candidate_hash = candidate_receipt.hash(); @@ -902,21 +1006,19 @@ async fn validate_candidate_exhaustive( "About to validate a candidate.", ); - // Validate the scheduling session during backing. The relay parent session - // check is left for later when we actually can: https://github.com/paritytech/polkadot-sdk/issues/11182 + // Backing-only: verify the descriptor's scheduling_session claim against + // the session index independently fetched from the runtime. + // The relay parent session check is left for later: + // https://github.com/paritytech/polkadot-sdk/issues/11182 // TODO: Properly check session index in the runtime: // https://github.com/paritytech/polkadot-sdk/issues/11033 - match (exec_kind, candidate_receipt.descriptor.scheduling_session()) { - ( - PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_), - Some(scheduling_session), - ) => { - if scheduling_session != expected_scheduling_session_index { + if let Some(BackingExtras { expected_scheduling_session, .. }) = &backing_extras { + if let Some(scheduling_session) = candidate_receipt.descriptor.scheduling_session() { + if scheduling_session != *expected_scheduling_session { return Ok(ValidationResult::Invalid(InvalidCandidate::InvalidSessionIndex)); } - }, - (_, _) => {}, - }; + } + } if let Err(e) = perform_basic_checks( &candidate_receipt.descriptor, @@ -937,6 +1039,7 @@ async fn validate_candidate_exhaustive( pov: pov.clone(), executor_params: executor_params.clone(), exec_timeout: pvf_exec_timeout(&executor_params, exec_kind.into()), + v3_seen, }; let result = match exec_kind { @@ -1068,37 +1171,21 @@ async fn validate_candidate_exhaustive( // invalid. Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch)) } else { - match exec_kind { - // Core selectors are optional for V2 descriptors, but we still check the - // descriptor core index. - PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_) => { - let Some(claim_queue) = maybe_claim_queue else { - let error = "cannot fetch the claim queue from the runtime"; - gum::warn!( - target: LOG_TARGET, - ?relay_parent, - error - ); - - return Err(ValidationFailed(error.into())); - }; - - if let Err(err) = committed_candidate_receipt - .parse_ump_signals(&transpose_claim_queue(claim_queue.0)) - { - gum::warn!( - target: LOG_TARGET, - candidate_hash = ?candidate_receipt.hash(), - "Invalid UMP signals: {}", - err - ); - return Ok(ValidationResult::Invalid( - InvalidCandidate::InvalidUMPSignals(err), - )); - } - }, - // No checks for approvals and disputes - _ => {}, + // Backing-only: validate UMP signals against the claim queue. + if let Some(BackingExtras { claim_queue, .. }) = &backing_extras { + if let Err(err) = committed_candidate_receipt + .parse_ump_signals(&transpose_claim_queue(claim_queue.0.clone())) + { + gum::warn!( + target: LOG_TARGET, + candidate_hash = ?candidate_receipt.hash(), + "Invalid UMP signals: {}", + err + ); + return Ok(ValidationResult::Invalid( + InvalidCandidate::InvalidUMPSignals(err), + )); + } } Ok(ValidationResult::Valid( @@ -1247,6 +1334,10 @@ trait ValidationBackend { async fn heads_up(&mut self, active_pvfs: Vec) -> Result<(), String>; + /// Inform the backend about active leaf changes + /// + /// Ancestors provided should match the still valid scheduling parents (implicit view) as of the + /// activated leaf. This is used for pruning queued jobs which became obsolete. async fn update_active_leaves( &mut self, update: ActiveLeavesUpdate, diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index cfeb686c2186a..6467849411b03 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -30,7 +30,7 @@ use polkadot_node_subsystem_test_helpers::{make_subsystem_context, TestSubsystem use polkadot_node_subsystem_util::reexports::SubsystemContext; use polkadot_overseer::ActivatedLeaf; use polkadot_primitives::{ - CandidateDescriptorV2, CandidateDescriptorVersion, ClaimQueueOffset, + CandidateDescriptorV2, CandidateDescriptorVersion, ClaimQueueOffset, NodeFeatures, CommittedCandidateReceiptError, CoreIndex, CoreSelector, GroupIndex, HeadData, Id as ParaId, MutateDescriptorV2, OccupiedCoreAssumption, SessionInfo, UMPSignal, UpwardMessage, ValidatorId, DEFAULT_SCHEDULING_LOOKAHEAD, UMP_SEPARATOR, @@ -544,7 +544,6 @@ fn candidate_validation_ok_is_ok(#[case] v2_descriptor: bool) { let _ = cq.insert(CoreIndex(1), vec![1.into(), 1.into()].into()); let v = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result)), validation_data.clone(), validation_code, @@ -553,8 +552,12 @@ fn candidate_validation_ok_is_ok(#[case] v2_descriptor: bool) { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - Some(ClaimQueueSnapshot(cq)), VALIDATION_CODE_BOMB_LIMIT, + false, + Some(BackingExtras { + claim_queue: ClaimQueueSnapshot(cq), + expected_scheduling_session: 1, + }), )) .unwrap(); @@ -633,7 +636,6 @@ fn invalid_session_or_ump_signals() { [PvfExecKind::Backing(dummy_hash()), PvfExecKind::BackingSystemParas(dummy_hash())] { let err = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -642,8 +644,12 @@ fn invalid_session_or_ump_signals() { ExecutorParams::default(), exec_kind, &Default::default(), - Default::default(), VALIDATION_CODE_BOMB_LIMIT, + false, + Some(BackingExtras { + claim_queue: Default::default(), + expected_scheduling_session: 1, + }), )) .unwrap(); @@ -657,7 +663,6 @@ fn invalid_session_or_ump_signals() { [PvfExecKind::Backing(dummy_hash()), PvfExecKind::BackingSystemParas(dummy_hash())] { let result = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -666,8 +671,12 @@ fn invalid_session_or_ump_signals() { ExecutorParams::default(), exec_kind, &Default::default(), - Some(Default::default()), VALIDATION_CODE_BOMB_LIMIT, + false, + Some(BackingExtras { + claim_queue: Default::default(), + expected_scheduling_session: 1, + }), )) .unwrap(); assert_matches!( @@ -681,7 +690,6 @@ fn invalid_session_or_ump_signals() { // Validation doesn't fail for approvals and disputes, core/session index is not checked. for exec_kind in [PvfExecKind::Approval, PvfExecKind::Dispute] { let v = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -690,8 +698,9 @@ fn invalid_session_or_ump_signals() { ExecutorParams::default(), exec_kind, &Default::default(), - Default::default(), VALIDATION_CODE_BOMB_LIMIT, + false, + None, )) .unwrap(); @@ -714,7 +723,6 @@ fn invalid_session_or_ump_signals() { [PvfExecKind::Backing(dummy_hash()), PvfExecKind::BackingSystemParas(dummy_hash())] { let v = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -723,8 +731,12 @@ fn invalid_session_or_ump_signals() { ExecutorParams::default(), exec_kind, &Default::default(), - Some(ClaimQueueSnapshot(cq.clone())), VALIDATION_CODE_BOMB_LIMIT, + false, + Some(BackingExtras { + claim_queue: ClaimQueueSnapshot(cq.clone()), + expected_scheduling_session: 1, + }), )) .unwrap(); @@ -760,7 +772,6 @@ fn invalid_session_or_ump_signals() { [PvfExecKind::Backing(dummy_hash()), PvfExecKind::BackingSystemParas(dummy_hash())] { let result = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -769,8 +780,12 @@ fn invalid_session_or_ump_signals() { ExecutorParams::default(), exec_kind, &Default::default(), - Some(Default::default()), VALIDATION_CODE_BOMB_LIMIT, + false, + Some(BackingExtras { + claim_queue: Default::default(), + expected_scheduling_session: 1, + }), )) .unwrap(); assert_matches!( @@ -784,7 +799,6 @@ fn invalid_session_or_ump_signals() { // Validation doesn't fail for approvals and disputes, ump signals are not checked. for exec_kind in [PvfExecKind::Approval, PvfExecKind::Dispute] { let v = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -793,8 +807,9 @@ fn invalid_session_or_ump_signals() { ExecutorParams::default(), exec_kind, &Default::default(), - Default::default(), VALIDATION_CODE_BOMB_LIMIT, + false, + None, )) .unwrap(); @@ -834,7 +849,6 @@ fn invalid_session_or_ump_signals() { [PvfExecKind::Backing(dummy_hash()), PvfExecKind::BackingSystemParas(dummy_hash())] { let v = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -843,8 +857,12 @@ fn invalid_session_or_ump_signals() { ExecutorParams::default(), exec_kind, &Default::default(), - Some(ClaimQueueSnapshot(cq.clone())), VALIDATION_CODE_BOMB_LIMIT, + false, + Some(BackingExtras { + claim_queue: ClaimQueueSnapshot(cq.clone()), + expected_scheduling_session: 1, + }), )) .unwrap(); @@ -861,7 +879,6 @@ fn invalid_session_or_ump_signals() { // Validation also doesn't fail for approvals and disputes. for exec_kind in [PvfExecKind::Approval, PvfExecKind::Dispute] { let v = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -870,8 +887,9 @@ fn invalid_session_or_ump_signals() { ExecutorParams::default(), exec_kind, &Default::default(), - Some(ClaimQueueSnapshot(cq.clone())), VALIDATION_CODE_BOMB_LIMIT, + false, + None, )) .unwrap(); @@ -972,7 +990,6 @@ fn v3_descriptor_validation() { }; let result = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result(Ok( validation_result_with_signals.clone() )), @@ -983,8 +1000,12 @@ fn v3_descriptor_validation() { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - Some(ClaimQueueSnapshot(cq.clone())), VALIDATION_CODE_BOMB_LIMIT, + false, + Some(BackingExtras { + claim_queue: ClaimQueueSnapshot(cq.clone()), + expected_scheduling_session: 1, + }), )) .unwrap(); @@ -1000,7 +1021,6 @@ fn v3_descriptor_validation() { }; let result = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result(Ok( validation_result_no_signals.clone() )), @@ -1011,8 +1031,12 @@ fn v3_descriptor_validation() { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - Some(ClaimQueueSnapshot(cq.clone())), VALIDATION_CODE_BOMB_LIMIT, + false, + Some(BackingExtras { + claim_queue: ClaimQueueSnapshot(cq.clone()), + expected_scheduling_session: 1, + }), )) .unwrap(); @@ -1036,9 +1060,8 @@ fn v3_descriptor_validation() { commitments_hash: commitments_with_signals.hash(), }; - // Pass expected_scheduling_session_index=1, but descriptor claims 2 + // Pass expected_scheduling_session=1, but descriptor claims 2 let result = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result(Ok( validation_result_with_signals.clone() )), @@ -1049,8 +1072,12 @@ fn v3_descriptor_validation() { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - Some(ClaimQueueSnapshot(cq.clone())), VALIDATION_CODE_BOMB_LIMIT, + false, + Some(BackingExtras { + claim_queue: ClaimQueueSnapshot(cq.clone()), + expected_scheduling_session: 1, + }), )) .unwrap(); @@ -1069,9 +1096,8 @@ fn v3_descriptor_validation() { commitments_hash: commitments_with_signals.hash(), }; - // Pass expected_scheduling_session_index=2 matching descriptor's claim + // Pass expected_scheduling_session=2 matching descriptor's claim let result = executor::block_on(validate_candidate_exhaustive( - 2, MockValidateCandidateBackend::with_hardcoded_result(Ok( validation_result_with_signals.clone() )), @@ -1082,8 +1108,12 @@ fn v3_descriptor_validation() { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - Some(ClaimQueueSnapshot(cq.clone())), VALIDATION_CODE_BOMB_LIMIT, + false, + Some(BackingExtras { + claim_queue: ClaimQueueSnapshot(cq.clone()), + expected_scheduling_session: 2, + }), )) .unwrap(); @@ -1103,7 +1133,6 @@ fn v3_descriptor_validation() { for exec_kind in [PvfExecKind::Approval, PvfExecKind::Dispute] { let result = executor::block_on(validate_candidate_exhaustive( - 1, // mismatched, but should be ignored for non-backing MockValidateCandidateBackend::with_hardcoded_result(Ok( validation_result_with_signals.clone(), )), @@ -1114,8 +1143,9 @@ fn v3_descriptor_validation() { ExecutorParams::default(), exec_kind, &Default::default(), - Some(ClaimQueueSnapshot(cq.clone())), VALIDATION_CODE_BOMB_LIMIT, + false, + None, // No backing extras: session/UMP checks are skipped )) .unwrap(); @@ -1154,7 +1184,6 @@ fn candidate_validation_bad_return_is_invalid() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let v = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( WasmInvalidCandidate::HardTimeout, ))), @@ -1165,8 +1194,9 @@ fn candidate_validation_bad_return_is_invalid() { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - Default::default(), VALIDATION_CODE_BOMB_LIMIT, + false, + None, // Backing extras not needed: test exercises error path )) .unwrap(); @@ -1238,7 +1268,6 @@ fn candidate_validation_one_ambiguous_error_is_valid() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; let v = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result_list(vec![ Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), Ok(validation_result), @@ -1250,8 +1279,9 @@ fn candidate_validation_one_ambiguous_error_is_valid() { ExecutorParams::default(), PvfExecKind::Approval, &Default::default(), - Default::default(), VALIDATION_CODE_BOMB_LIMIT, + false, + None, )) .unwrap(); @@ -1282,7 +1312,6 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let v = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result_list(vec![ Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), @@ -1294,8 +1323,9 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() { ExecutorParams::default(), PvfExecKind::Approval, &Default::default(), - Default::default(), VALIDATION_CODE_BOMB_LIMIT, + false, + None, )) .unwrap(); @@ -1403,7 +1433,6 @@ fn candidate_validation_retry_on_error_helper( let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; return executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result_list(mock_errors), validation_data, validation_code, @@ -1412,8 +1441,9 @@ fn candidate_validation_retry_on_error_helper( ExecutorParams::default(), exec_kind, &Default::default(), - Default::default(), VALIDATION_CODE_BOMB_LIMIT, + false, + None, // Tests error/retry paths, backing extras not needed )); } @@ -1447,7 +1477,6 @@ fn candidate_validation_timeout_is_internal_error() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let v = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( WasmInvalidCandidate::HardTimeout, ))), @@ -1458,8 +1487,9 @@ fn candidate_validation_timeout_is_internal_error() { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - Default::default(), VALIDATION_CODE_BOMB_LIMIT, + false, + None, // Backing extras not needed: test exercises error path )); assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))); @@ -1499,7 +1529,6 @@ fn candidate_validation_commitment_hash_mismatch_is_invalid() { }; let result = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result)), validation_data, validation_code, @@ -1508,8 +1537,9 @@ fn candidate_validation_commitment_hash_mismatch_is_invalid() { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - Default::default(), VALIDATION_CODE_BOMB_LIMIT, + false, + None, // Backing extras not needed: test exercises commitments mismatch path )) .unwrap(); @@ -1550,7 +1580,6 @@ fn candidate_validation_code_mismatch_is_invalid() { let (_ctx, _ctx_handle) = make_subsystem_context::(pool.clone()); let v = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( WasmInvalidCandidate::HardTimeout, ))), @@ -1561,8 +1590,9 @@ fn candidate_validation_code_mismatch_is_invalid() { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - Default::default(), VALIDATION_CODE_BOMB_LIMIT, + false, + None, // Backing extras not needed: test exercises code mismatch path )) .unwrap(); @@ -1614,7 +1644,6 @@ fn compressed_code_works() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; let v = executor::block_on(validate_candidate_exhaustive( - 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result)), validation_data, validation_code, @@ -1623,8 +1652,12 @@ fn compressed_code_works() { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - Some(Default::default()), VALIDATION_CODE_BOMB_LIMIT, + false, + Some(BackingExtras { + claim_queue: Default::default(), + expected_scheduling_session: 1, + }), )); assert_matches!(v, Ok(ValidationResult::Valid(_, _))); @@ -1936,7 +1969,7 @@ fn maybe_prepare_validation_golden_path() { let mut backend = MockHeadsUp::default(); let activated_hash = Hash::random(); let update = dummy_active_leaves_update(activated_hash); - let mut state = PrepareValidationState::default(); + let mut state = State::default(); let check_fut = handle_active_leaves_update(ctx.sender(), keystore, &mut backend, update, &mut state); @@ -1944,6 +1977,13 @@ fn maybe_prepare_validation_golden_path() { let test_fut = async move { assert_new_active_leaf_messages(&mut ctx_handle, 1).await; + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, tx))) => { + let _ = tx.send(Ok(NodeFeatures::new())); + } + ); + assert_matches!( ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::Authorities(tx))) => { @@ -2010,7 +2050,7 @@ fn maybe_prepare_validation_golden_path() { assert_eq!(backend.heads_up_call_count.load(Ordering::SeqCst), 1); assert!(state.session_index.is_some()); - assert!(state.is_next_session_authority); + assert!(state.pvf_prep.is_next_session_authority); } #[test] @@ -2022,11 +2062,7 @@ fn maybe_prepare_validation_checkes_authority_once_per_session() { let mut backend = MockHeadsUp::default(); let activated_hash = Hash::random(); let update = dummy_active_leaves_update(activated_hash); - let mut state = PrepareValidationState { - session_index: Some(1), - is_next_session_authority: false, - ..Default::default() - }; + let mut state = State { session_index: Some(1), ..Default::default() }; let check_fut = handle_active_leaves_update(ctx.sender(), keystore, &mut backend, update, &mut state); @@ -2038,7 +2074,7 @@ fn maybe_prepare_validation_checkes_authority_once_per_session() { assert_eq!(backend.heads_up_call_count.load(Ordering::SeqCst), 0); assert!(state.session_index.is_some()); - assert!(!state.is_next_session_authority); + assert!(!state.pvf_prep.is_next_session_authority); } #[test] @@ -2050,10 +2086,15 @@ fn maybe_prepare_validation_resets_state_on_a_new_session() { let mut backend = MockHeadsUp::default(); let activated_hash = Hash::random(); let update = dummy_active_leaves_update(activated_hash); - let mut state = PrepareValidationState { + let mut state = State { session_index: Some(1), - is_next_session_authority: true, - already_prepared_code_hashes: HashSet::from_iter(vec![ValidationCode(vec![0; 16]).hash()]), + pvf_prep: PvfPrepState { + is_next_session_authority: true, + already_prepared_code_hashes: HashSet::from_iter(vec![ + ValidationCode(vec![0; 16]).hash(), + ]), + ..Default::default() + }, ..Default::default() }; @@ -2063,6 +2104,13 @@ fn maybe_prepare_validation_resets_state_on_a_new_session() { let test_fut = async move { assert_new_active_leaf_messages(&mut ctx_handle, 2).await; + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, tx))) => { + let _ = tx.send(Ok(NodeFeatures::new())); + } + ); + assert_matches!( ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::Authorities(tx))) => { @@ -2084,8 +2132,8 @@ fn maybe_prepare_validation_resets_state_on_a_new_session() { assert_eq!(backend.heads_up_call_count.load(Ordering::SeqCst), 0); assert_eq!(state.session_index.unwrap(), 2); - assert!(!state.is_next_session_authority); - assert!(state.already_prepared_code_hashes.is_empty()); + assert!(!state.pvf_prep.is_next_session_authority); + assert!(state.pvf_prep.already_prepared_code_hashes.is_empty()); } #[test] @@ -2097,7 +2145,7 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_no_new_session_and_not_a_va let mut backend = MockHeadsUp::default(); let activated_hash = Hash::random(); let update = dummy_active_leaves_update(activated_hash); - let mut state = PrepareValidationState { session_index: Some(1), ..Default::default() }; + let mut state = State { session_index: Some(1), ..Default::default() }; let check_fut = handle_active_leaves_update(ctx.sender(), keystore, &mut backend, update, &mut state); @@ -2109,7 +2157,7 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_no_new_session_and_not_a_va assert_eq!(backend.heads_up_call_count.load(Ordering::SeqCst), 0); assert!(state.session_index.is_some()); - assert!(!state.is_next_session_authority); + assert!(!state.pvf_prep.is_next_session_authority); } #[test] @@ -2121,9 +2169,9 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_no_new_session_but_a_valida let mut backend = MockHeadsUp::default(); let activated_hash = Hash::random(); let update = dummy_active_leaves_update(activated_hash); - let mut state = PrepareValidationState { + let mut state = State { session_index: Some(1), - is_next_session_authority: true, + pvf_prep: PvfPrepState { is_next_session_authority: true, ..Default::default() }, ..Default::default() }; @@ -2184,7 +2232,7 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_no_new_session_but_a_valida assert_eq!(backend.heads_up_call_count.load(Ordering::SeqCst), 1); assert!(state.session_index.is_some()); - assert!(state.is_next_session_authority); + assert!(state.pvf_prep.is_next_session_authority); } #[test] @@ -2196,7 +2244,7 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_not_a_validator_in_the_next let mut backend = MockHeadsUp::default(); let activated_hash = Hash::random(); let update = dummy_active_leaves_update(activated_hash); - let mut state = PrepareValidationState::default(); + let mut state = State::default(); let check_fut = handle_active_leaves_update(ctx.sender(), keystore, &mut backend, update, &mut state); @@ -2204,6 +2252,13 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_not_a_validator_in_the_next let test_fut = async move { assert_new_active_leaf_messages(&mut ctx_handle, 1).await; + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, tx))) => { + let _ = tx.send(Ok(NodeFeatures::new())); + } + ); + assert_matches!( ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::Authorities(tx))) => { @@ -2225,7 +2280,7 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_not_a_validator_in_the_next assert_eq!(backend.heads_up_call_count.load(Ordering::SeqCst), 0); assert!(state.session_index.is_some()); - assert!(!state.is_next_session_authority); + assert!(!state.pvf_prep.is_next_session_authority); } #[test] @@ -2237,7 +2292,7 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_a_validator_in_the_current_ let mut backend = MockHeadsUp::default(); let activated_hash = Hash::random(); let update = dummy_active_leaves_update(activated_hash); - let mut state = PrepareValidationState::default(); + let mut state = State::default(); let check_fut = handle_active_leaves_update(ctx.sender(), keystore, &mut backend, update, &mut state); @@ -2245,6 +2300,13 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_a_validator_in_the_current_ let test_fut = async move { assert_new_active_leaf_messages(&mut ctx_handle, 1).await; + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, tx))) => { + let _ = tx.send(Ok(NodeFeatures::new())); + } + ); + assert_matches!( ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::Authorities(tx))) => { @@ -2266,7 +2328,7 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_a_validator_in_the_current_ assert_eq!(backend.heads_up_call_count.load(Ordering::SeqCst), 0); assert!(state.session_index.is_some()); - assert!(!state.is_next_session_authority); + assert!(!state.pvf_prep.is_next_session_authority); } #[test] @@ -2278,7 +2340,10 @@ fn maybe_prepare_validation_prepares_a_limited_number_of_pvfs() { let mut backend = MockHeadsUp::default(); let activated_hash = Hash::random(); let update = dummy_active_leaves_update(activated_hash); - let mut state = PrepareValidationState { per_block_limit: 2, ..Default::default() }; + let mut state = State { + pvf_prep: PvfPrepState { per_block_limit: 2, ..Default::default() }, + ..Default::default() + }; let check_fut = handle_active_leaves_update(ctx.sender(), keystore, &mut backend, update, &mut state); @@ -2286,6 +2351,13 @@ fn maybe_prepare_validation_prepares_a_limited_number_of_pvfs() { let test_fut = async move { assert_new_active_leaf_messages(&mut ctx_handle, 1).await; + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, tx))) => { + let _ = tx.send(Ok(NodeFeatures::new())); + } + ); + assert_matches!( ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::Authorities(tx))) => { @@ -2359,8 +2431,8 @@ fn maybe_prepare_validation_prepares_a_limited_number_of_pvfs() { assert_eq!(backend.heads_up_call_count.load(Ordering::SeqCst), 1); assert!(state.session_index.is_some()); - assert!(state.is_next_session_authority); - assert_eq!(state.already_prepared_code_hashes.len(), 2); + assert!(state.pvf_prep.is_next_session_authority); + assert_eq!(state.pvf_prep.already_prepared_code_hashes.len(), 2); } #[test] @@ -2372,14 +2444,17 @@ fn maybe_prepare_validation_does_not_prepare_already_prepared_pvfs() { let mut backend = MockHeadsUp::default(); let activated_hash = Hash::random(); let update = dummy_active_leaves_update(activated_hash); - let mut state = PrepareValidationState { + let mut state = State { session_index: Some(1), - is_next_session_authority: true, - per_block_limit: 2, - already_prepared_code_hashes: HashSet::from_iter(vec![ - ValidationCode(vec![0; 16]).hash(), - ValidationCode(vec![1; 16]).hash(), - ]), + pvf_prep: PvfPrepState { + is_next_session_authority: true, + per_block_limit: 2, + already_prepared_code_hashes: HashSet::from_iter(vec![ + ValidationCode(vec![0; 16]).hash(), + ValidationCode(vec![1; 16]).hash(), + ]), + }, + ..Default::default() }; let check_fut = @@ -2444,6 +2519,6 @@ fn maybe_prepare_validation_does_not_prepare_already_prepared_pvfs() { assert_eq!(backend.heads_up_call_count.load(Ordering::SeqCst), 1); assert!(state.session_index.is_some()); - assert!(state.is_next_session_authority); - assert_eq!(state.already_prepared_code_hashes.len(), 3); + assert!(state.pvf_prep.is_next_session_authority); + assert_eq!(state.pvf_prep.already_prepared_code_hashes.len(), 3); } diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index 643c92655e20e..85d26157d0155 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -44,9 +44,10 @@ use polkadot_node_subsystem_util::{ ControlledValidatorIndices, }; use polkadot_primitives::{ - slashing, BlockNumber, CandidateHash, CandidateReceiptV2 as CandidateReceipt, CompactStatement, - DisputeStatement, DisputeStatementSet, Hash, ScrapedOnChainVotes, SessionIndex, - ValidDisputeStatementKind, ValidatorId, ValidatorIndex, + node_features::FeatureIndex, slashing, BlockNumber, CandidateHash, + CandidateReceiptV2 as CandidateReceipt, CompactStatement, DisputeStatement, + DisputeStatementSet, Hash, ScrapedOnChainVotes, SessionIndex, ValidDisputeStatementKind, + ValidatorId, ValidatorIndex, }; use schnellru::{LruMap, UnlimitedCompact}; @@ -120,6 +121,12 @@ pub(crate) struct Initialized { /// `CHAIN_IMPORT_MAX_BATCH_SIZE` and put the rest here for later processing. chain_import_backlog: VecDeque, metrics: Metrics, + /// Monotonic flag: set to `true` once any activated leaf has the V3 candidate + /// descriptor node feature enabled. Once set, never unset. + /// Used to determine whether scraped on-chain votes should use V3 descriptor + /// semantics or fall back to old rules. + /// See `CandidateDescriptorV2::version_for_approval_dispute` for the safety argument. + v3_ever_seen: bool, } #[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)] @@ -153,6 +160,7 @@ impl Initialized { participation_receiver, chain_import_backlog: VecDeque::new(), metrics, + v3_ever_seen: false, } } @@ -364,7 +372,7 @@ impl Initialized { self.offchain_disabled_validators.prune_old(prune_up_to); }, Ok(_) => { /* no new session => nothing to cache */ }, - Err(err) => { + Err(ref err) => { gum::debug!( target: LOG_TARGET, ?err, @@ -373,6 +381,33 @@ impl Initialized { }, } + // Check for the V3 node feature after the session caching loop, + // so get_session_info_by_index hits the LRU cache (no extra runtime + // round-trip). This runs on every activated leaf while !v3_ever_seen, + // because on startup the session is already cached but v3_ever_seen + // starts as false. + // TODO: This is not sufficient - we skip the check on the _very_ first leaf before + // initialized! + if !self.v3_ever_seen { + if let Ok(idx) = session_idx { + if let Ok(info) = self + .runtime_info + .get_session_info_by_index(ctx.sender(), new_leaf.hash, idx) + .await + { + if FeatureIndex::CandidateReceiptV3.is_set(&info.node_features) { + gum::info!( + target: LOG_TARGET, + session_idx = idx, + "CandidateReceiptV3 node feature detected in \ + dispute-coordinator", + ); + self.v3_ever_seen = true; + } + } + } + } + let ScrapedUpdates { unapplied_slashes, on_chain_votes, .. } = scraped_updates; self.process_unapplied_slashes(ctx, new_leaf.hash, unapplied_slashes).await; @@ -603,13 +638,20 @@ impl Initialized { // Scraped on-chain backing votes for the candidates with // the new active leaf as if we received them via gossip. for (candidate_receipt, backers) in backing_validators_per_candidate { + // TODO: NO RELAY PARENT! let relay_parent = candidate_receipt.descriptor.relay_parent(); - // For V2/V3: Get scheduling session and parent from descriptor - // For V1: These methods return None/relay_parent, fall back to message session - let scheduling_session = - candidate_receipt.descriptor.scheduling_session().unwrap_or(session); - let scheduling_parent = candidate_receipt.descriptor.scheduling_parent(); + // Use transition-safe descriptor methods for scheduling context. + // Before the V3 node feature is seen, these fall back to old-rules + // behavior to match old backers and prevent slashing. + // See `CandidateDescriptorV2::version_for_approval_dispute`. + let scheduling_session = candidate_receipt + .descriptor + .scheduling_session_for_approval_dispute(self.v3_ever_seen) + .unwrap_or(session); + let scheduling_parent = candidate_receipt + .descriptor + .scheduling_parent_for_approval_dispute(self.v3_ever_seen); // Backing validators are from the scheduling context // Fetch session info using scheduling_parent as the runtime API context diff --git a/polkadot/node/core/pvf/common/src/execute.rs b/polkadot/node/core/pvf/common/src/execute.rs index 3d76bba26536c..a278447598e56 100644 --- a/polkadot/node/core/pvf/common/src/execute.rs +++ b/polkadot/node/core/pvf/common/src/execute.rs @@ -42,6 +42,13 @@ pub struct ValidationContext { pub executor_params: ExecutorParams, /// Execution timeout pub exec_timeout: Duration, + /// Whether the `CandidateReceiptV3` node feature has ever been seen enabled. + /// + /// During the V3 transition period, this flag determines whether to trust + /// `descriptor.version()` or fall back to `descriptor.version_old_rules()` + /// for approval/dispute validations. + /// See `CandidateDescriptorV2::version_for_approval_dispute`. + pub v3_seen: bool, } impl ValidationContext { @@ -50,14 +57,20 @@ impl ValidationContext { self.candidate_receipt.descriptor.relay_parent() } - /// Get the scheduling parent hash from the candidate descriptor + /// Get the scheduling parent hash, using transition-safe logic. + // TODO: This is using _for_approval_dispute, but is also used in backing context. + // Might be fine, but: + // 1. Definitely needs a renaming then. + // 2. We should remove the special casing in tho other cases then too. pub fn scheduling_parent(&self) -> Hash { - self.candidate_receipt.descriptor.scheduling_parent() + self.candidate_receipt + .descriptor + .scheduling_parent_for_approval_dispute(self.v3_seen) } - /// Get the candidate descriptor version + /// Get the effective candidate descriptor version, using transition-safe logic. pub fn descriptor_version(&self) -> CandidateDescriptorVersion { - self.candidate_receipt.descriptor.version() + self.candidate_receipt.descriptor.version_for_approval_dispute(self.v3_seen) } /// Convert to an ExecuteRequest for sending to the worker. diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index aff047ae5857c..e2dba55e0ed1c 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -903,7 +903,9 @@ mod tests { use polkadot_node_core_pvf_common::execute::ValidationContext; use polkadot_node_primitives::{BlockData, PoV}; use polkadot_node_subsystem_test_helpers::mock::new_leaf; - use polkadot_primitives::{ExecutorParams, PersistedValidationData}; + use polkadot_primitives::{ + CandidateReceiptV2 as CandidateReceipt, ExecutorParams, PersistedValidationData, + }; use polkadot_primitives_test_helpers::dummy_candidate_receipt; use sp_core::H256; use std::sync::Arc; @@ -923,12 +925,14 @@ mod tests { let pov = Arc::new(PoV { block_data: BlockData(b"pov".to_vec()) }); let candidate_receipt = dummy_candidate_receipt(H256::default()); + let candidate_receipt: CandidateReceipt = candidate_receipt.into(); let validation_context = ValidationContext { - candidate_receipt: candidate_receipt.into(), + candidate_receipt, pvd, pov, executor_params: ExecutorParams::default(), exec_timeout: Duration::from_secs(10), + v3_seen: false, }; ExecuteJob { @@ -1096,12 +1100,15 @@ mod tests { assert_eq!(queue.unscheduled.unscheduled.values().map(|x| x.len()).sum::(), 0); let mut result_rxs = vec![]; let (result_tx, _result_rx) = oneshot::channel(); + let relevant_candidate: CandidateReceipt = + dummy_candidate_receipt(relevant_relay_parent).into(); let relevant_validation_context = ValidationContext { - candidate_receipt: dummy_candidate_receipt(relevant_relay_parent).into(), + candidate_receipt: relevant_candidate, pvd: Arc::new(PersistedValidationData::default()), pov: Arc::new(PoV { block_data: BlockData(Vec::new()) }), executor_params: ExecutorParams::default(), exec_timeout: Duration::from_secs(1), + v3_seen: false, }; let relevant_job = ExecuteJob { artifact: ArtifactPathId { @@ -1117,12 +1124,15 @@ mod tests { queue.unscheduled.add(relevant_job, Priority::Backing); for _ in 0..10 { let (result_tx, result_rx) = oneshot::channel(); + let expired_candidate: CandidateReceipt = + dummy_candidate_receipt(old_relay_parent).into(); let expired_validation_context = ValidationContext { - candidate_receipt: dummy_candidate_receipt(old_relay_parent).into(), + candidate_receipt: expired_candidate, pvd: Arc::new(PersistedValidationData::default()), pov: Arc::new(PoV { block_data: BlockData(Vec::new()) }), executor_params: ExecutorParams::default(), exec_timeout: Duration::from_secs(1), + v3_seen: false, }; let expired_job = ExecuteJob { artifact: ArtifactPathId { diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index a0d619f727f8d..39298b07dedc7 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -1035,7 +1035,9 @@ pub(crate) mod tests { use futures::future::BoxFuture; use polkadot_node_core_pvf_common::execute::ValidationContext; use polkadot_node_primitives::{BlockData, PoV}; - use polkadot_primitives::{ExecutorParams, PersistedValidationData}; + use polkadot_primitives::{ + CandidateReceiptV2 as CandidateReceipt, ExecutorParams, PersistedValidationData, + }; use polkadot_primitives_test_helpers::dummy_candidate_receipt; use sp_core::H256; use std::sync::Arc; @@ -1238,12 +1240,15 @@ pub(crate) mod tests { pvd: Arc, pov: Arc, ) -> ValidationContext { + let candidate_receipt: CandidateReceipt = + dummy_candidate_receipt(H256::default()).into(); ValidationContext { - candidate_receipt: dummy_candidate_receipt(H256::default()).into(), + candidate_receipt, pvd, pov, executor_params: ExecutorParams::default(), exec_timeout: TEST_EXECUTION_TIMEOUT, + v3_seen: false, } } diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index ab37ba7fac408..a9bb1484ee116 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -118,12 +118,15 @@ impl TestHost { ) -> Result { let (result_tx, result_rx) = futures::channel::oneshot::channel(); + let candidate_receipt: polkadot_primitives::CandidateReceiptV2 = + dummy_candidate_receipt(relay_parent).into(); let validation_context = ValidationContext { - candidate_receipt: dummy_candidate_receipt(relay_parent).into(), + candidate_receipt, pvd: Arc::new(pvd), pov: Arc::new(pov), executor_params: executor_params.clone(), exec_timeout: TEST_EXECUTION_TIMEOUT, + v3_seen: false, }; self.host diff --git a/polkadot/primitives/src/v9/mod.rs b/polkadot/primitives/src/v9/mod.rs index 312e32f1d30f6..8f8af88428a67 100644 --- a/polkadot/primitives/src/v9/mod.rs +++ b/polkadot/primitives/src/v9/mod.rs @@ -1977,9 +1977,15 @@ impl> CandidateDescriptorV2 { /// The safety invariant is maintained by the runtime and backing /// subsystem: they reject candidates where `version()` and /// `version_old_rules()` disagree when V3 is not yet enabled, and - /// reject V3 candidates outright when V3 is not enabled. This ensures - /// that any on-chain candidate has an unambiguous version, so approval - /// checkers and dispute participants never need to look up node features. + /// reject V3 candidates outright when V3 is not enabled. + /// + /// During the V3 transition, approval checkers, dispute participants, + /// and on-chain vote scrapers must use [`Self::version_for_approval_dispute`] + /// (and the corresponding `scheduling_parent_for_approval_dispute` / + /// `scheduling_session_for_approval_dispute`) instead of `version()` + /// directly. This ensures they match old backer semantics before the V3 + /// node feature is confirmed enabled. See those methods for the full + /// safety argument. pub fn version(&self) -> CandidateDescriptorVersion { self.v3_version() } @@ -2202,6 +2208,67 @@ impl> CandidateDescriptorV2 { CandidateDescriptorVersion::Unknown => None, } } + + /// Version for use in approval checking, disputes, and on-chain vote scraping + /// during the V3 transition period. + /// + /// Before the `CandidateReceiptV3` node feature is observed on any leaf, + /// uses [`Self::version_old_rules`] to match old backer behavior. After + /// the feature is seen, trusts [`Self::version`]. + /// + /// This prevents slashing honest old backers when a malicious collator crafts + /// a pseudo-V3 descriptor that old nodes interpret as V1 but new nodes would + /// interpret as V3 (different PVF inputs → dispute → 100% slash). + /// + /// Safety argument: The node feature can only be enabled well after the runtime + /// upgrade that adds `check_version_acceptance()` protection at inclusion time. + /// Once the feature is seen on any leaf, the runtime has long been rejecting + /// pseudo-V3 candidates, so no ambiguous candidates can exist on-chain. + /// + /// Only needed during the V3 transition. Once V3 is universally deployed, + /// callers can switch to [`Self::version`] directly. + pub fn version_for_approval_dispute( + &self, + v3_ever_seen: bool, + ) -> CandidateDescriptorVersion { + if v3_ever_seen { + self.version() + } else { + self.version_old_rules() + } + } + + /// Scheduling parent for use in approval checking, disputes, and on-chain + /// vote scraping during the V3 transition period. + /// + /// See [`Self::version_for_approval_dispute`] for the safety argument. + pub fn scheduling_parent_for_approval_dispute(&self, v3_ever_seen: bool) -> H + where + H: Copy, + { + match self.version_for_approval_dispute(v3_ever_seen) { + CandidateDescriptorVersion::V3 => self.scheduling_parent, + _ => self.relay_parent, + } + } + + /// Scheduling session for use in approval checking, disputes, and on-chain + /// vote scraping during the V3 transition period. + /// + /// See [`Self::version_for_approval_dispute`] for the safety argument. + pub fn scheduling_session_for_approval_dispute( + &self, + v3_ever_seen: bool, + ) -> Option { + match self.version_for_approval_dispute(v3_ever_seen) { + CandidateDescriptorVersion::V1 => None, + CandidateDescriptorVersion::V2 => Some(self.session_index), + CandidateDescriptorVersion::V3 => { + Some(self.session_index.saturating_add(self.scheduling_session_offset as _)) + }, + CandidateDescriptorVersion::Unknown => None, + } + } } impl core::fmt::Debug for CandidateDescriptorV2 From 3de0a616081bad03d3d5f3115ea8479618f6505c Mon Sep 17 00:00:00 2001 From: eskimor Date: Mon, 9 Mar 2026 22:42:31 +0100 Subject: [PATCH 04/52] Fixes towards the final design. --- .../node/core/candidate-validation/src/lib.rs | 49 +++++++++---------- .../dispute-coordinator/src/initialized.rs | 4 +- polkadot/node/core/pvf/common/src/execute.rs | 4 +- polkadot/primitives/src/v9/mod.rs | 34 ++++++------- 4 files changed, 44 insertions(+), 47 deletions(-) diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index a7e6a83975fc2..5b60d3b2701de 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -193,20 +193,13 @@ async fn fetch_bomb_limit( where Sender: SubsystemSender, { - // For approval/dispute, use the transition-safe scheduling parent - // to match old backer behavior before V3 is confirmed enabled. - // Backing uses its own v3_ever_seen + check_version_acceptance() gate. // NOTE: As noted above, even looking at the scheduling parent in disputes context should be // suspicious normally! - let scheduling_parent = match exec_kind { - PvfExecKind::Approval | PvfExecKind::Dispute => { - candidate_descriptor.scheduling_parent_for_approval_dispute(v3_ever_seen) - }, - _ => candidate_descriptor.scheduling_parent(), - }; + let scheduling_parent = + candidate_descriptor.scheduling_parent_for_candidate_validation(v3_ever_seen); let scheduling_session = - match candidate_descriptor.scheduling_session_for_approval_dispute(v3_ever_seen) { + match candidate_descriptor.scheduling_session_for_candidate_validation(v3_ever_seen) { Some(session) => session, None => { // NOTE: This is depending on scheduling parent state to still be around! @@ -261,22 +254,26 @@ where } => async move { let _timer = metrics.time_validate_from_exhaustive(); - let validation_code_bomb_limit = - match fetch_bomb_limit(&candidate_receipt.descriptor, exec_kind, v3_ever_seen, &mut sender) - .await - { - Ok(limit) => limit, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - scheduling_parent = ?candidate_receipt.descriptor.scheduling_parent(), - ?err, - "Failed to fetch validation code bomb limit", - ); - let _ = response_sender.send(Err(ValidationFailed(err))); - return; - }, - }; + let validation_code_bomb_limit = match fetch_bomb_limit( + &candidate_receipt.descriptor, + exec_kind, + v3_ever_seen, + &mut sender, + ) + .await + { + Ok(limit) => limit, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + scheduling_parent = ?candidate_receipt.descriptor.scheduling_parent(), + ?err, + "Failed to fetch validation code bomb limit", + ); + let _ = response_sender.send(Err(ValidationFailed(err))); + return; + }, + }; // --- Backing-only extras --- // Stricter checks that backing performs but approval/dispute can diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index 85d26157d0155..3d0df23b36a06 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -647,11 +647,11 @@ impl Initialized { // See `CandidateDescriptorV2::version_for_approval_dispute`. let scheduling_session = candidate_receipt .descriptor - .scheduling_session_for_approval_dispute(self.v3_ever_seen) + .scheduling_session_for_candidate_validation(self.v3_ever_seen) .unwrap_or(session); let scheduling_parent = candidate_receipt .descriptor - .scheduling_parent_for_approval_dispute(self.v3_ever_seen); + .scheduling_parent_for_candidate_validation(self.v3_ever_seen); // Backing validators are from the scheduling context // Fetch session info using scheduling_parent as the runtime API context diff --git a/polkadot/node/core/pvf/common/src/execute.rs b/polkadot/node/core/pvf/common/src/execute.rs index a278447598e56..a4ab0e4be65cd 100644 --- a/polkadot/node/core/pvf/common/src/execute.rs +++ b/polkadot/node/core/pvf/common/src/execute.rs @@ -65,12 +65,12 @@ impl ValidationContext { pub fn scheduling_parent(&self) -> Hash { self.candidate_receipt .descriptor - .scheduling_parent_for_approval_dispute(self.v3_seen) + .scheduling_parent_for_candidate_validation(self.v3_seen) } /// Get the effective candidate descriptor version, using transition-safe logic. pub fn descriptor_version(&self) -> CandidateDescriptorVersion { - self.candidate_receipt.descriptor.version_for_approval_dispute(self.v3_seen) + self.candidate_receipt.descriptor.version_for_candidate_validation(self.v3_seen) } /// Convert to an ExecuteRequest for sending to the worker. diff --git a/polkadot/primitives/src/v9/mod.rs b/polkadot/primitives/src/v9/mod.rs index 8f8af88428a67..b6353dca86fef 100644 --- a/polkadot/primitives/src/v9/mod.rs +++ b/polkadot/primitives/src/v9/mod.rs @@ -2063,14 +2063,14 @@ impl> CandidateDescriptorV2 { impl CandidateDescriptorV2 { fn v3_version(&self) -> CandidateDescriptorVersion { - // Reduce checked bits for v1 signficiantly to make more bytes easier + // Reduce checked bits for v1 significantly to make more bytes easier // usable in future upgrades. 16 bytes is 32 hexadecimal digits which // must all be 0 by accident to cause any issues. Bitcoin hardest // difficulty so far has been 24 digits/12 bytes // // Impact if it still happened would also be fairly minimal: We would // drop a parachain block, which is not a big deal on v1, where we are - // not aiming for perfect block confidence yet.. + // not aiming for perfect block confidence yet. let new_v1_detected = self.reserved1[0..16] != [0u8; 16]; if new_v1_detected { @@ -2209,10 +2209,11 @@ impl> CandidateDescriptorV2 { } } - /// Version for use in approval checking, disputes, and on-chain vote scraping + /// Version for use in candidate validation + /// /// during the V3 transition period. /// - /// Before the `CandidateReceiptV3` node feature is observed on any leaf, + /// Before the `CandidateReceiptV3` node feature is observed on a finalized block, /// uses [`Self::version_old_rules`] to match old backer behavior. After /// the feature is seen, trusts [`Self::version`]. /// @@ -2220,14 +2221,15 @@ impl> CandidateDescriptorV2 { /// a pseudo-V3 descriptor that old nodes interpret as V1 but new nodes would /// interpret as V3 (different PVF inputs → dispute → 100% slash). /// - /// Safety argument: The node feature can only be enabled well after the runtime - /// upgrade that adds `check_version_acceptance()` protection at inclusion time. - /// Once the feature is seen on any leaf, the runtime has long been rejecting - /// pseudo-V3 candidates, so no ambiguous candidates can exist on-chain. + /// Safety argument: The node feature can only be enabled well after the runtime upgrade that + /// adds `check_version_acceptance()` protection at inclusion time. Once the feature is seen on + /// any leaf, the runtime has long been upgraded and already rejecting pseudo-V3 candidates + /// (candidates that are valid v1 under the old rules, but are v3 without UMP signals under the + /// new rules), so no ambiguous candidates can exist on-chain. /// /// Only needed during the V3 transition. Once V3 is universally deployed, /// callers can switch to [`Self::version`] directly. - pub fn version_for_approval_dispute( + pub fn version_for_candidate_validation( &self, v3_ever_seen: bool, ) -> CandidateDescriptorVersion { @@ -2238,29 +2240,27 @@ impl> CandidateDescriptorV2 { } } - /// Scheduling parent for use in approval checking, disputes, and on-chain - /// vote scraping during the V3 transition period. + /// Scheduling parent for use in candidate validation. /// /// See [`Self::version_for_approval_dispute`] for the safety argument. - pub fn scheduling_parent_for_approval_dispute(&self, v3_ever_seen: bool) -> H + pub fn scheduling_parent_for_candidate_validation(&self, v3_ever_seen: bool) -> H where H: Copy, { - match self.version_for_approval_dispute(v3_ever_seen) { + match self.version_for_candidate_validation(v3_ever_seen) { CandidateDescriptorVersion::V3 => self.scheduling_parent, _ => self.relay_parent, } } - /// Scheduling session for use in approval checking, disputes, and on-chain - /// vote scraping during the V3 transition period. + /// Scheduling session for use candidate validation. /// /// See [`Self::version_for_approval_dispute`] for the safety argument. - pub fn scheduling_session_for_approval_dispute( + pub fn scheduling_session_for_candidate_validation( &self, v3_ever_seen: bool, ) -> Option { - match self.version_for_approval_dispute(v3_ever_seen) { + match self.version_for_candidate_validation(v3_ever_seen) { CandidateDescriptorVersion::V1 => None, CandidateDescriptorVersion::V2 => Some(self.session_index), CandidateDescriptorVersion::V3 => { From ca9e0dbb4c7eeb768ae16a6bd2670343231c7d1a Mon Sep 17 00:00:00 2001 From: eskimor Date: Tue, 10 Mar 2026 00:18:30 +0100 Subject: [PATCH 05/52] Use finalized blocks for node feature detection in backing --- polkadot/node/core/backing/src/lib.rs | 61 ++++++++++++++----- .../node/core/candidate-validation/src/lib.rs | 39 ++++++------ .../dispute-coordinator/src/initialized.rs | 2 +- polkadot/primitives/src/v9/mod.rs | 18 +++--- 4 files changed, 73 insertions(+), 47 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index fb624bd87076b..dda63e535b6c9 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -450,17 +450,16 @@ struct State { background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, /// The handle to the keystore used for signing. keystore: KeystorePtr, - /// Monotonic flag: set to `true` once any activated leaf has the V3 candidate - /// descriptor feature enabled. Once set, never unset. Used for V3 gating checks - /// in backing — if V3 was never seen, reject V3 candidates and candidates where - /// old/new version detection disagrees. + /// Monotonic flag: set to `true` once a **finalized** block is observed whose + /// session has the `CandidateReceiptV3` node feature enabled. Once set, never + /// unset. Used for V3 gating checks in backing — if V3 was never seen, reject + /// V3 candidates and candidates where old/new version detection disagrees. /// - /// Note: In theory a reorg could revert a leaf where V3 was enabled, making this - /// flag temporarily inaccurate. This is acceptable because: - /// 1. The runtime performs the same check and is always correct. - /// 2. The worst case is the backer signs a statement the runtime later rejects — the candidate - /// simply won't be included, no slashing occurs. - /// 3. This is an extremely short-lived edge case during reorgs. + /// Backing uses finalized blocks (rather than any active leaf) to ensure that + /// new backers do not start producing V3 candidates before a supermajority of + /// validators (including approval checkers) are aware of the feature. Approval + /// and dispute validation use active leaves instead, so they always transition + /// *before* backing does — the safe direction. v3_ever_seen: bool, } @@ -534,7 +533,11 @@ async fn run_iteration( state, ).await?; } - FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {} + FromOrchestra::Signal(OverseerSignal::BlockFinalized(hash, _number)) => { + if !state.v3_ever_seen { + check_v3_on_finalized(&mut *ctx, state, hash).await?; + } + } FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), FromOrchestra::Communication { msg } => { handle_communication(&mut *ctx, state, msg, metrics).await?; @@ -1066,9 +1069,6 @@ async fn handle_active_leaves_update( .await?; if let Some(per) = per { - if !state.v3_ever_seen && FeatureIndex::CandidateReceiptV3.is_set(&per.node_features) { - state.v3_ever_seen = true; - } state.per_scheduling_parent.insert(maybe_new, per); } } @@ -1076,6 +1076,39 @@ async fn handle_active_leaves_update( Ok(()) } +/// Check whether the `CandidateReceiptV3` node feature is enabled at the session +/// of the given finalized block. Sets `state.v3_ever_seen` if so. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn check_v3_on_finalized( + ctx: &mut Context, + state: &mut State, + finalized_hash: Hash, +) -> Result<(), Error> { + let session_index = request_session_index_for_child(finalized_hash, ctx.sender()) + .await + .await + .map_err(runtime::Error::from)? + .map_err(runtime::Error::from)?; + + let node_features = state + .per_session_cache + .node_features(session_index, finalized_hash, ctx.sender()) + .await + .map_err(runtime::Error::from)?; + + if FeatureIndex::CandidateReceiptV3.is_set(&node_features) { + gum::info!( + target: LOG_TARGET, + ?session_index, + "CandidateReceiptV3 node feature detected in finalized block, \ + enabling V3 candidate support", + ); + state.v3_ever_seen = true; + } + + Ok(()) +} + macro_rules! try_runtime_api { ($x: expr) => { match $x { diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 5b60d3b2701de..d31156e1343c4 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -186,7 +186,6 @@ where /// increased values, all that matters is consensus. async fn fetch_bomb_limit( candidate_descriptor: &CandidateDescriptor, - exec_kind: PvfExecKind, v3_ever_seen: bool, sender: &mut Sender, ) -> Result @@ -254,26 +253,22 @@ where } => async move { let _timer = metrics.time_validate_from_exhaustive(); - let validation_code_bomb_limit = match fetch_bomb_limit( - &candidate_receipt.descriptor, - exec_kind, - v3_ever_seen, - &mut sender, - ) - .await - { - Ok(limit) => limit, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - scheduling_parent = ?candidate_receipt.descriptor.scheduling_parent(), - ?err, - "Failed to fetch validation code bomb limit", - ); - let _ = response_sender.send(Err(ValidationFailed(err))); - return; - }, - }; + let validation_code_bomb_limit = + match fetch_bomb_limit(&candidate_receipt.descriptor, v3_ever_seen, &mut sender) + .await + { + Ok(limit) => limit, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + scheduling_parent = ?candidate_receipt.descriptor.scheduling_parent(), + ?err, + "Failed to fetch validation code bomb limit", + ); + let _ = response_sender.send(Err(ValidationFailed(err))); + return; + }, + }; // --- Backing-only extras --- // Stricter checks that backing performs but approval/dispute can @@ -477,7 +472,7 @@ struct State { /// descriptor node feature enabled. Once set, never unset. /// Used to determine whether approval/dispute validation should trust /// `version()` (V3-capable) or fall back to `version_old_rules()`. - /// See `CandidateDescriptorV2::version_for_approval_dispute` for the safety argument. + /// See `CandidateDescriptorV2::version_for_candidate_validation` for the safety argument. v3_ever_seen: bool, /// PVF preparation state (proactive pre-compilation for next session). pvf_prep: PvfPrepState, diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index 3d0df23b36a06..b96948b2c2282 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -125,7 +125,7 @@ pub(crate) struct Initialized { /// descriptor node feature enabled. Once set, never unset. /// Used to determine whether scraped on-chain votes should use V3 descriptor /// semantics or fall back to old rules. - /// See `CandidateDescriptorV2::version_for_approval_dispute` for the safety argument. + /// See `CandidateDescriptorV2::version_for_candidate_validation` for the safety argument. v3_ever_seen: bool, } diff --git a/polkadot/primitives/src/v9/mod.rs b/polkadot/primitives/src/v9/mod.rs index b6353dca86fef..8f8538142ceb9 100644 --- a/polkadot/primitives/src/v9/mod.rs +++ b/polkadot/primitives/src/v9/mod.rs @@ -2209,23 +2209,21 @@ impl> CandidateDescriptorV2 { } } - /// Version for use in candidate validation + /// Version for use in candidate validation during the V3 transition period. /// - /// during the V3 transition period. - /// - /// Before the `CandidateReceiptV3` node feature is observed on a finalized block, - /// uses [`Self::version_old_rules`] to match old backer behavior. After - /// the feature is seen, trusts [`Self::version`]. + /// Before the `CandidateReceiptV3` node feature is observed, uses + /// [`Self::version_old_rules`] to match old backer behavior. After the feature + /// is seen, trusts [`Self::version`]. /// /// This prevents slashing honest old backers when a malicious collator crafts /// a pseudo-V3 descriptor that old nodes interpret as V1 but new nodes would /// interpret as V3 (different PVF inputs → dispute → 100% slash). /// /// Safety argument: The node feature can only be enabled well after the runtime upgrade that - /// adds `check_version_acceptance()` protection at inclusion time. Once the feature is seen on - /// any leaf, the runtime has long been upgraded and already rejecting pseudo-V3 candidates - /// (candidates that are valid v1 under the old rules, but are v3 without UMP signals under the - /// new rules), so no ambiguous candidates can exist on-chain. + /// adds `check_version_acceptance()` protection at inclusion time. Once the feature is seen, + /// the runtime has long been upgraded and already rejecting pseudo-V3 candidates (candidates + /// that are valid v1 under the old rules, but are v3 without UMP signals under the new + /// rules), so no ambiguous candidates can exist on-chain. /// /// Only needed during the V3 transition. Once V3 is universally deployed, /// callers can switch to [`Self::version`] directly. From a6be54f2bdfa70556ed6298c478e36dfa041ea0f Mon Sep 17 00:00:00 2001 From: eskimor Date: Tue, 10 Mar 2026 09:44:08 +0100 Subject: [PATCH 06/52] Handle first leaf properly in dispute coordinator --- .../dispute-coordinator/src/initialized.rs | 30 ++- .../core/dispute-coordinator/src/tests.rs | 194 +++++++++++++++++- 2 files changed, 214 insertions(+), 10 deletions(-) diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index b96948b2c2282..cdc5975b450ae 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -206,6 +206,32 @@ impl Initialized { if let Some(InitialData { participations, votes: on_chain_votes, leaf: first_leaf }) = initial_data.take() { + // Check V3 on the first leaf *before* processing on-chain votes. + // Session info is already cached from handle_startup, so this hits the LRU. + // Without this, v3_ever_seen would still be false when process_chain_import_backlog + // runs, causing V3 candidates to be misinterpreted as V1. + if !self.v3_ever_seen { + if let Ok(info) = self + .runtime_info + .get_session_info_by_index( + ctx.sender(), + first_leaf.hash, + self.highest_session_seen, + ) + .await + { + if FeatureIndex::CandidateReceiptV3.is_set(&info.node_features) { + gum::info!( + target: LOG_TARGET, + session_idx = self.highest_session_seen, + "CandidateReceiptV3 node feature detected on first leaf in \ + dispute-coordinator", + ); + self.v3_ever_seen = true; + } + } + } + for (priority, request) in participations { self.participation.queue_participation(ctx, priority, request).await?; } @@ -386,8 +412,8 @@ impl Initialized { // round-trip). This runs on every activated leaf while !v3_ever_seen, // because on startup the session is already cached but v3_ever_seen // starts as false. - // TODO: This is not sufficient - we skip the check on the _very_ first leaf before - // initialized! + // Note: The very first leaf is handled separately in run_until_error + // before process_chain_import_backlog. if !self.v3_ever_seen { if let Ok(idx) = session_idx { if let Ok(info) = self diff --git a/polkadot/node/core/dispute-coordinator/src/tests.rs b/polkadot/node/core/dispute-coordinator/src/tests.rs index 94068030458ea..3dad43e873c6b 100644 --- a/polkadot/node/core/dispute-coordinator/src/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/tests.rs @@ -65,6 +65,7 @@ use polkadot_primitives::{ GroupIndex, Hash, HeadData, Header, IndexedVec, MultiDisputeStatementSet, MutateDescriptorV2, NodeFeatures, ScrapedOnChainVotes, SessionIndex, SessionInfo, SigningContext, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, + ValidityAttestation, }; use polkadot_primitives_test_helpers::{ dummy_candidate_receipt_v2_bad_sig, dummy_digest, dummy_hash, @@ -180,6 +181,11 @@ struct TestState { last_block: Hash, // last session the subsystem knows about. known_session: Option, + /// When true, node features will include `CandidateReceiptV3` during session caching. + v3_node_features: bool, + /// Optional on-chain votes to return from `FetchOnChainVotes` on the first leaf. + /// When `Some`, the backing_validators_per_candidate from this will be used instead of empty. + initial_on_chain_votes: Option, } impl Default for TestState { @@ -249,6 +255,8 @@ impl Default for TestState { block_num_to_header, last_block, known_session: None, + v3_node_features: false, + initial_on_chain_votes: None, } } } @@ -364,7 +372,16 @@ impl TestState { AllMessages::RuntimeApi( RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) ) => { - si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + let features = if self.v3_node_features { + use polkadot_primitives::node_features::FeatureIndex; + let mut f = NodeFeatures::new(); + f.resize(FeatureIndex::CandidateReceiptV3 as usize + 1, false); + f.set(FeatureIndex::CandidateReceiptV3 as usize, true); + f + } else { + NodeFeatures::EMPTY + }; + si_tx.send(Ok(features)).unwrap(); } ); } @@ -397,13 +414,13 @@ impl TestState { _new_leaf, RuntimeApiRequest::FetchOnChainVotes(tx), )) => { - // add some `BackedCandidates` or resolved disputes here as needed - tx.send(Ok(Some(ScrapedOnChainVotes { - session, - backing_validators_per_candidate: Vec::default(), - disputes: MultiDisputeStatementSet::default(), - }))) - .unwrap(); + let votes = + self.initial_on_chain_votes.take().unwrap_or(ScrapedOnChainVotes { + session, + backing_validators_per_candidate: Vec::default(), + disputes: MultiDisputeStatementSet::default(), + }); + tx.send(Ok(Some(votes))).unwrap(); }, AllMessages::RuntimeApi(RuntimeApiMessage::Request( _new_leaf, @@ -642,6 +659,39 @@ fn make_valid_candidate_receipt() -> CandidateReceipt { make_another_valid_candidate_receipt(dummy_hash()) } +/// Create a V3 candidate receipt with a distinct scheduling_parent. +/// +/// V3 candidates have `version=1`, `reserved1[0..16]` all zeros, and `scheduling_parent` +/// different from `relay_parent`. Built from raw to avoid dummy collator bytes polluting +/// the reserved1 field (which would cause version detection to return V1). +fn make_v3_candidate_receipt( + relay_parent: Hash, + scheduling_parent: Hash, + session_index: SessionIndex, +) -> CandidateReceipt { + use polkadot_primitives::CandidateDescriptorV2; + let descriptor = CandidateDescriptorV2::new_from_raw( + 0.into(), // para_id + relay_parent, // relay_parent + 1, // version = V3 + 0, // core_index + session_index, // session_index + 0, // scheduling_session_offset + [0u8; 24], // reserved1 + dummy_hash(), // persisted_validation_data_hash + dummy_hash(), // pov_hash + dummy_hash(), // erasure_root + scheduling_parent, // scheduling_parent + [0u8; 32], // reserved2 + dummy_hash(), // para_head + polkadot_primitives_test_helpers::dummy_validation_code().hash(), // validation_code_hash + ); + CandidateReceipt { + descriptor, + commitments_hash: CandidateCommitments::default().hash(), + } +} + fn make_invalid_candidate_receipt() -> CandidateReceipt { dummy_candidate_receipt_v2_bad_sig(Default::default(), Some(Default::default())) } @@ -652,6 +702,69 @@ fn make_another_valid_candidate_receipt(relay_parent: Hash) -> CandidateReceipt candidate_receipt } +impl TestState { + /// Create a backing `ValidityAttestation` for a candidate, signed with the given + /// `scheduling_parent` as the signing context's parent hash. + /// + /// This mirrors how real backers sign: they use the scheduling parent (not relay + /// parent) in the signing context. + fn make_backing_attestation( + &self, + candidate_hash: CandidateHash, + validator_index: ValidatorIndex, + session: SessionIndex, + scheduling_parent: Hash, + ) -> ValidityAttestation { + let keystore = self.master_keystore.clone() as KeystorePtr; + let validator_id = self.validators[validator_index.0 as usize].public().into(); + let context = + SigningContext { session_index: session, parent_hash: scheduling_parent }; + + let statement = SignedFullStatement::sign( + &keystore, + Statement::Valid(candidate_hash), + &context, + validator_index, + &validator_id, + ) + .unwrap() + .unwrap(); + + let sig = statement.signature().clone(); + ValidityAttestation::Explicit(sig) + } +} + +/// Create on-chain votes with a V3 candidate that has backing attestations signed +/// with the real scheduling_parent. +fn make_v3_on_chain_votes( + test_state: &TestState, + session: SessionIndex, + relay_parent: Hash, + scheduling_parent: Hash, +) -> ScrapedOnChainVotes { + let candidate_receipt = + make_v3_candidate_receipt(relay_parent, scheduling_parent, session); + let candidate_hash = candidate_receipt.hash(); + + // Create a valid backing attestation signed with scheduling_parent + let attestation = test_state.make_backing_attestation( + candidate_hash, + ValidatorIndex(0), // Alice + session, + scheduling_parent, + ); + + ScrapedOnChainVotes { + session, + backing_validators_per_candidate: vec![( + candidate_receipt, + vec![(ValidatorIndex(0), attestation)], + )], + disputes: MultiDisputeStatementSet::default(), + } +} + // Generate a `CandidateBacked` event from a `CandidateReceipt`. The rest is dummy data. fn make_candidate_backed_event(candidate_receipt: CandidateReceipt) -> CandidateEvent { CandidateEvent::CandidateBacked( @@ -4724,3 +4837,68 @@ fn disputes_unactivated_when_all_raising_parties_disabled() { }) }); } + +/// Regression test: V3 candidate on the very first leaf must be handled correctly. +/// +/// Before the fix, `Initialized` was created with `v3_ever_seen = false` and +/// `process_chain_import_backlog` (which processes on-chain backing votes from the +/// first leaf) ran before `process_active_leaves_update` could set `v3_ever_seen`. +/// This caused V3 candidates to be misinterpreted as V1, using `relay_parent` instead +/// of `scheduling_parent` in the backing signature context — breaking the debug_assert. +#[test] +fn v3_candidate_on_first_leaf_is_detected_correctly() { + let mut test_state = TestState::default(); + test_state.v3_node_features = true; + + // Add two more blocks after the genesis (which is created in `default()`) + let h1 = Header { + parent_hash: test_state.last_block, + number: 1, + digest: dummy_digest(), + state_root: dummy_hash(), + extrinsics_root: dummy_hash(), + }; + let h1_hash = h1.hash(); + test_state.headers.insert(h1_hash, h1); + test_state.block_num_to_header.insert(1, h1_hash); + test_state.last_block = h1_hash; + + let h2 = Header { + parent_hash: test_state.last_block, + number: 2, + digest: dummy_digest(), + state_root: dummy_hash(), + extrinsics_root: dummy_hash(), + }; + let h2_hash = h2.hash(); + test_state.headers.insert(h2_hash, h2); + test_state.block_num_to_header.insert(2, h2_hash); + test_state.last_block = h2_hash; + + let session = 1; + // Use a relay_parent from the test state and a distinct scheduling_parent + let relay_parent = h1_hash; + let scheduling_parent = Hash::repeat_byte(0xBB); + + // Prepare V3 on-chain votes with backing attestation signed using scheduling_parent + test_state.initial_on_chain_votes = Some(make_v3_on_chain_votes( + &test_state, + session, + relay_parent, + scheduling_parent, + )); + + test_state.resume(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + // Process all initial leaves — this will feed the V3 on-chain votes + // through process_chain_import_backlog on the first leaf. + // With the fix, v3_ever_seen is set before processing on-chain votes, + // so scheduling_parent is used correctly in signature verification. + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + test_state + }) + }); +} From 7c28da26ef0d9a66ca11e41afd16402b99fd31e0 Mon Sep 17 00:00:00 2001 From: eskimor Date: Tue, 10 Mar 2026 10:11:00 +0100 Subject: [PATCH 07/52] Add test for subsequent leaves too. --- .../core/dispute-coordinator/src/tests.rs | 139 ++++++++++++++++-- 1 file changed, 125 insertions(+), 14 deletions(-) diff --git a/polkadot/node/core/dispute-coordinator/src/tests.rs b/polkadot/node/core/dispute-coordinator/src/tests.rs index 3dad43e873c6b..86b794d4ff594 100644 --- a/polkadot/node/core/dispute-coordinator/src/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/tests.rs @@ -4838,19 +4838,12 @@ fn disputes_unactivated_when_all_raising_parties_disabled() { }); } -/// Regression test: V3 candidate on the very first leaf must be handled correctly. -/// -/// Before the fix, `Initialized` was created with `v3_ever_seen = false` and -/// `process_chain_import_backlog` (which processes on-chain backing votes from the -/// first leaf) ran before `process_active_leaves_update` could set `v3_ever_seen`. -/// This caused V3 candidates to be misinterpreted as V1, using `relay_parent` instead -/// of `scheduling_parent` in the backing signature context — breaking the debug_assert. -#[test] -fn v3_candidate_on_first_leaf_is_detected_correctly() { +/// Set up a `TestState` with two extra blocks (h1, h2) and V3 on-chain votes ready. +/// Returns `(test_state, relay_parent, scheduling_parent)`. +fn setup_v3_test_state() -> (TestState, Hash, Hash) { let mut test_state = TestState::default(); - test_state.v3_node_features = true; - // Add two more blocks after the genesis (which is created in `default()`) + // Add two blocks after genesis (which is created in `default()`) let h1 = Header { parent_hash: test_state.last_block, number: 1, @@ -4875,12 +4868,25 @@ fn v3_candidate_on_first_leaf_is_detected_correctly() { test_state.block_num_to_header.insert(2, h2_hash); test_state.last_block = h2_hash; - let session = 1; - // Use a relay_parent from the test state and a distinct scheduling_parent let relay_parent = h1_hash; let scheduling_parent = Hash::repeat_byte(0xBB); - // Prepare V3 on-chain votes with backing attestation signed using scheduling_parent + (test_state, relay_parent, scheduling_parent) +} + +/// Regression test: V3 candidate on the very first leaf must be handled correctly. +/// +/// Before the fix, `Initialized` was created with `v3_ever_seen = false` and +/// `process_chain_import_backlog` (which processes on-chain backing votes from the +/// first leaf) ran before `process_active_leaves_update` could set `v3_ever_seen`. +/// This caused V3 candidates to be misinterpreted as V1, using `relay_parent` instead +/// of `scheduling_parent` in the backing signature context — breaking the debug_assert. +#[test] +fn v3_candidate_on_first_leaf_is_detected_correctly() { + let (mut test_state, relay_parent, scheduling_parent) = setup_v3_test_state(); + test_state.v3_node_features = true; + + let session = 1; test_state.initial_on_chain_votes = Some(make_v3_on_chain_votes( &test_state, session, @@ -4902,3 +4908,108 @@ fn v3_candidate_on_first_leaf_is_detected_correctly() { }) }); } + +/// Test: V3 node feature activated on a subsequent leaf (not the first) is detected +/// correctly via `process_active_leaves_update`. +/// +/// Starts with V3 OFF (session 1), then activates a new leaf at session 2 with V3 ON. +/// The V3 on-chain votes on that subsequent leaf must be processed with the correct +/// signing context (scheduling_parent, not relay_parent). +#[test] +fn v3_candidate_on_subsequent_leaf_is_detected_correctly() { + let (test_state, relay_parent, scheduling_parent) = setup_v3_test_state(); + // V3 is OFF on startup — the first leaf uses session 1 without V3. + assert!(!test_state.v3_node_features); + + let startup_session = 1; + + test_state.resume(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + test_state.handle_resume_sync(&mut virtual_overseer, startup_session).await; + + // Now enable V3 node features for session 2. + test_state.v3_node_features = true; + let new_session = 2; + + // Prepare V3 on-chain votes that will be returned on the next leaf's + // FetchOnChainVotes query (via handle_sync_queries' FetchOnChainVotes arm). + test_state.initial_on_chain_votes = Some(make_v3_on_chain_votes( + &test_state, + new_session, + relay_parent, + scheduling_parent, + )); + + // Activate a leaf at session 2. handle_sync_queries will handle the + // scraper messages and SessionIndexForChild, but NOT the session caching + // for the new session (since known_session is already Some). + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + new_session, + 3, + Vec::new(), + ) + .await; + + // Manually handle session caching messages for the new session. + // The production code's process_active_leaves_update caches sessions + // [highest_session_seen+1 ..= new_session], which is just session 2. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionInfo(session_index, tx), + )) => { + assert_eq!(session_index, new_session); + let _ = tx.send(Ok(Some(test_state.session_info()))); + } + ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionExecutorParams(session_index, tx), + )) => { + assert_eq!(session_index, new_session); + let _ = tx.send(Ok(Some(ExecutorParams::default()))); + } + ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + // Return V3-enabled features — this is what triggers v3_ever_seen + // in the production code's subsequent-leaf check. + use polkadot_primitives::node_features::FeatureIndex; + let mut f = NodeFeatures::new(); + f.resize(FeatureIndex::CandidateReceiptV3 as usize + 1, false); + f.set(FeatureIndex::CandidateReceiptV3 as usize, true); + si_tx.send(Ok(f)).unwrap(); + } + ); + + // Handle DisabledValidators request from CandidateEnvironment::new + // during process_chain_import_backlog's vote import. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::DisabledValidators(tx), + )) => { + tx.send(Ok(Vec::new())).unwrap(); + } + ); + + // If v3_ever_seen was NOT set, process_chain_import_backlog would use + // relay_parent instead of scheduling_parent for signature verification, + // causing a debug_assert failure. The test passing means the subsequent- + // leaf V3 detection works correctly. + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + test_state + }) + }); +} From 27e441afb46409b97cffbc5d8340c83d7858d695 Mon Sep 17 00:00:00 2001 From: eskimor Date: Tue, 10 Mar 2026 14:31:59 +0100 Subject: [PATCH 08/52] candidate validation tests. --- .../core/candidate-validation/src/tests.rs | 159 ++++++++++++++++++ polkadot/primitives/src/v9/mod.rs | 38 +++++ 2 files changed, 197 insertions(+) diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 6467849411b03..de6097985b5ed 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -1958,6 +1958,15 @@ async fn assert_new_active_leaf_messages( let _ = response_channel.send(Ok((0..(lookahead_value - 1)).into_iter().map(|i| Hash::from_low_u64_be(i as u64)).collect())); } ); + + // Second SessionIndexForChild — from handle_active_leaves_update's own + // get_session_index call (separate from the one in update_active_leaves_validation_backend). + assert_matches!( + recv_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx))) => { + let _ = tx.send(Ok(expected_session_index)); + } + ); } #[test] @@ -2522,3 +2531,153 @@ fn maybe_prepare_validation_does_not_prepare_already_prepared_pvfs() { assert!(state.pvf_prep.is_next_session_authority); assert_eq!(state.pvf_prep.already_prepared_code_hashes.len(), 3); } + +/// Verify that a V3 descriptor is interpreted differently depending on `v3_ever_seen`. +/// +/// Before V3 activation: old rules apply — V3 descriptors appear as V1, so +/// `scheduling_parent` falls back to `relay_parent`. +/// +/// After V3 activation: new rules apply — V3 descriptors are correctly identified, +/// so `scheduling_parent` returns the real scheduling parent from the descriptor. +/// +/// Verify that `handle_active_leaves_update` correctly detects V3 node features on +/// session changes and sets `v3_ever_seen` accordingly. +/// +/// Scenario: +/// 1. First leaf at session 1, V3 OFF → `v3_ever_seen` stays false +/// 2. Second leaf at session 2, V3 ON → `v3_ever_seen` becomes true +/// 3. Third leaf at session 2 (same session) → no re-check (monotonic flag) +#[test] +fn v3_feature_detected_on_session_change() { + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool); + + let keystore = alice_keystore(); + let mut backend = MockHeadsUp::default(); + let mut state = State::default(); + + // --- Leaf 1: session 1, V3 feature NOT enabled --- + let leaf1_hash = Hash::repeat_byte(0x01); + let update1 = dummy_active_leaves_update(leaf1_hash); + + let check_fut = + handle_active_leaves_update(ctx.sender(), keystore.clone(), &mut backend, update1, &mut state); + + let test_fut = async move { + // Standard leaf activation messages + assert_new_active_leaf_messages(&mut ctx_handle, 1).await; + + // NodeFeatures request — return EMPTY (no V3) + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::NodeFeatures(_, tx), + )) => { + let _ = tx.send(Ok(NodeFeatures::new())); + } + ); + + // Authorities (PVF prep) — return empty so we skip PVF prep + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::Authorities(tx), + )) => { + let _ = tx.send(Ok(vec![])); + } + ); + + // SessionInfo (check_next_session_authority always fetches this) + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionInfo(idx, tx), + )) => { + assert_eq!(idx, 1); + let _ = tx.send(Ok(Some(dummy_session_info(vec![])))); + } + ); + + ctx_handle + }; + + let (test_fut, check_fut) = (test_fut, check_fut); + let (mut ctx_handle, _) = executor::block_on(future::join(test_fut, check_fut)); + + assert_eq!(state.session_index, Some(1)); + assert!(!state.v3_ever_seen, "V3 should not be detected yet"); + + // --- Leaf 2: session 2, V3 feature ENABLED --- + let leaf2_hash = Hash::repeat_byte(0x02); + let update2 = dummy_active_leaves_update(leaf2_hash); + + let check_fut = + handle_active_leaves_update(ctx.sender(), keystore.clone(), &mut backend, update2, &mut state); + + let test_fut = async move { + assert_new_active_leaf_messages(&mut ctx_handle, 2).await; + + // NodeFeatures request — return V3 ENABLED + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::NodeFeatures(_, tx), + )) => { + let mut features = NodeFeatures::new(); + features.resize(FeatureIndex::CandidateReceiptV3 as usize + 1, false); + features.set(FeatureIndex::CandidateReceiptV3 as usize, true); + let _ = tx.send(Ok(features)); + } + ); + + // Authorities + SessionInfo for PVF prep + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::Authorities(tx), + )) => { + let _ = tx.send(Ok(vec![])); + } + ); + + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionInfo(idx, tx), + )) => { + assert_eq!(idx, 2); + let _ = tx.send(Ok(Some(dummy_session_info(vec![])))); + } + ); + + ctx_handle + }; + + let (mut ctx_handle, _) = executor::block_on(future::join(test_fut, check_fut)); + + assert_eq!(state.session_index, Some(2)); + assert!(state.v3_ever_seen, "V3 should be detected now"); + + // --- Leaf 3: same session 2, no new session → no V3 re-check --- + let leaf3_hash = Hash::repeat_byte(0x03); + let update3 = dummy_active_leaves_update(leaf3_hash); + + let check_fut = + handle_active_leaves_update(ctx.sender(), keystore, &mut backend, update3, &mut state); + + let test_fut = async move { + // Same session — only the standard leaf messages, no NodeFeatures query + assert_new_active_leaf_messages(&mut ctx_handle, 2).await; + }; + + executor::block_on(future::join(test_fut, check_fut)); + + assert_eq!(state.session_index, Some(2)); + assert!(state.v3_ever_seen, "V3 flag is monotonic — stays true"); +} diff --git a/polkadot/primitives/src/v9/mod.rs b/polkadot/primitives/src/v9/mod.rs index 8f8538142ceb9..f454432dadf32 100644 --- a/polkadot/primitives/src/v9/mod.rs +++ b/polkadot/primitives/src/v9/mod.rs @@ -3368,6 +3368,44 @@ pub mod tests { assert!(desc.check_version_acceptance(true).is_ok()); } + #[test] + fn v3_feature_activation_changes_descriptor_interpretation() { + let desc = make_v3_descriptor(); + + // Sanity: the descriptor IS V3 under new rules but looks like V1 under old rules. + assert_eq!(desc.version(), CandidateDescriptorVersion::V3); + assert_eq!(desc.version_old_rules(), CandidateDescriptorVersion::V1); + + // Before V3 activation: descriptor is treated as V1 — relay_parent is used. + assert_eq!( + desc.version_for_candidate_validation(false), + CandidateDescriptorVersion::V1, + ); + assert_eq!( + desc.scheduling_parent_for_candidate_validation(false), + Hash::repeat_byte(1), // relay_parent + ); + assert_eq!( + desc.scheduling_session_for_candidate_validation(false), + None, + "V1 has no embedded session — must be fetched from runtime", + ); + + // After V3 activation: descriptor is correctly identified as V3. + assert_eq!( + desc.version_for_candidate_validation(true), + CandidateDescriptorVersion::V3, + ); + assert_eq!( + desc.scheduling_parent_for_candidate_validation(true), + Hash::repeat_byte(7), // scheduling_parent + ); + assert_eq!( + desc.scheduling_session_for_candidate_validation(true), + Some(1), // session_index from descriptor, offset=0 + ); + } + #[test] fn check_version_acceptance_ambiguous_scheduling_parent_nonzero() { // Descriptor with scheduling_parent non-zero but version=0. From 1574e03ea40190178b992594121d9be67a3dbfb9 Mon Sep 17 00:00:00 2001 From: eskimor Date: Tue, 10 Mar 2026 15:15:34 +0100 Subject: [PATCH 09/52] Good chunk of relay parent cleanup in disputes (More to come) --- .../core/dispute-coordinator/src/import.rs | 13 +++++-- .../dispute-coordinator/src/initialized.rs | 36 ++++++++++--------- .../node/core/dispute-coordinator/src/lib.rs | 6 ++-- .../src/participation/queues/mod.rs | 20 +++++------ 4 files changed, 42 insertions(+), 33 deletions(-) diff --git a/polkadot/node/core/dispute-coordinator/src/import.rs b/polkadot/node/core/dispute-coordinator/src/import.rs index 8861d719eedfb..6ad42a7a26a5a 100644 --- a/polkadot/node/core/dispute-coordinator/src/import.rs +++ b/polkadot/node/core/dispute-coordinator/src/import.rs @@ -65,20 +65,27 @@ impl<'a> CandidateEnvironment<'a> { ctx: &mut Context, runtime_info: &'a mut RuntimeInfo, session_index: SessionIndex, - relay_parent: Hash, + scheduling_parent: Hash, disabled_offchain: impl IntoIterator, controlled_indices: &mut ControlledValidatorIndices, ) -> Option> { + /// We use the scheduling parent here to have consensus on disabled state among validators. + /// If this fetch fails because e.g. we have never seen the fork of the candidate, not + /// seeing the disabled state is acceptable, because we have spam protection for these + /// cases in place anyways. let disabled_onchain = runtime_info - .get_disabled_validators(ctx.sender(), relay_parent) + .get_disabled_validators(ctx.sender(), scheduling_parent) .await .unwrap_or_else(|err| { gum::info!(target: LOG_TARGET, ?err, "Failed to get disabled validators"); Vec::new() }); + // Using the scheduling parent here is fine, because we warm the cache on active leaves + // update, thus this call will succeed even if the scheduling parent's state is not + // available. let (session, executor_params) = match runtime_info - .get_session_info_by_index(ctx.sender(), relay_parent, session_index) + .get_session_info_by_index(ctx.sender(), scheduling_parent, session_index) .await { Ok(extended_session_info) => { diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index cdc5975b450ae..5272fe7baa759 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -458,7 +458,7 @@ impl Initialized { async fn process_unapplied_slashes( &mut self, ctx: &mut Context, - relay_parent: Hash, + leaf: Hash, unapplied_slashes: Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>, ) { for (session_index, candidate_hash, pending) in unapplied_slashes { @@ -567,7 +567,7 @@ impl Initialized { let res = submit_report_dispute_lost( ctx.sender(), - relay_parent, + leaf, dispute_proof, key_ownership_proof, ) @@ -664,9 +664,6 @@ impl Initialized { // Scraped on-chain backing votes for the candidates with // the new active leaf as if we received them via gossip. for (candidate_receipt, backers) in backing_validators_per_candidate { - // TODO: NO RELAY PARENT! - let relay_parent = candidate_receipt.descriptor.relay_parent(); - // Use transition-safe descriptor methods for scheduling context. // Before the V3 node feature is seen, these fall back to old-rules // behavior to match old backers and prevent slashing. @@ -702,7 +699,7 @@ impl Initialized { gum::trace!( target: LOG_TARGET, ?candidate_hash, - ?relay_parent, + ?scheduling_parent, "Importing backing votes from chain for candidate" ); let statements = backers @@ -772,13 +769,13 @@ impl Initialized { match import_result { ImportStatementsResult::ValidImport => gum::trace!( target: LOG_TARGET, - ?relay_parent, + ?scheduling_parent, ?session, "Imported backing votes from chain" ), ImportStatementsResult::InvalidImport => gum::warn!( target: LOG_TARGET, - ?relay_parent, + ?scheduling_parent, ?session, "Attempted import of on-chain backing votes failed" ), @@ -1026,18 +1023,21 @@ impl Initialized { let candidate_hash = candidate_receipt.hash(); let votes_in_db = overlay_db.load_candidate_votes(session, &candidate_hash)?; - let relay_parent = match &candidate_receipt { - MaybeCandidateReceipt::Provides(candidate_receipt) => { - candidate_receipt.descriptor().relay_parent() - }, + let scheduling_parent = match &candidate_receipt { + MaybeCandidateReceipt::Provides(candidate_receipt) => candidate_receipt + .descriptor() + .scheduling_parent_for_candidate_validation(self.v3_ever_seen), MaybeCandidateReceipt::AssumeBackingVotePresent(candidate_hash) => match &votes_in_db { - Some(votes) => votes.candidate_receipt.descriptor().relay_parent(), + Some(votes) => votes + .candidate_receipt + .descriptor() + .scheduling_parent_for_candidate_validation(self.v3_ever_seen), None => { gum::warn!( target: LOG_TARGET, session, ?candidate_hash, - "Cannot obtain relay parent without `CandidateReceipt` available!" + "Cannot obtain scheduling parent without `CandidateReceipt` available!" ); return Ok(ImportStatementsResult::InvalidImport); }, @@ -1048,7 +1048,7 @@ impl Initialized { ctx, &mut self.runtime_info, session, - relay_parent, + scheduling_parent, self.offchain_disabled_validators.iter(session), &mut self.controlled_validator_indices, ) @@ -1374,7 +1374,7 @@ impl Initialized { } // Notify ChainSelection if a dispute has concluded against a candidate. ChainSelection - // will need to mark the candidate's relay parent as reverted. + // will need to mark the candidate's scheduling parent as reverted. if import_result.has_fresh_byzantine_threshold_against() { let blocks_including = self.scraper.get_blocks_including_candidate(&candidate_hash); for (parent_block_number, parent_block_hash) in &blocks_including { @@ -1523,7 +1523,9 @@ impl Initialized { ctx, &mut self.runtime_info, session, - candidate_receipt.descriptor.relay_parent(), + candidate_receipt + .descriptor + .scheduling_parent_for_candidate_validation(self.v3_ever_seen), self.offchain_disabled_validators.iter(session), &mut self.controlled_validator_indices, ) diff --git a/polkadot/node/core/dispute-coordinator/src/lib.rs b/polkadot/node/core/dispute-coordinator/src/lib.rs index bb5189fb6e17d..58b4e3a9ad189 100644 --- a/polkadot/node/core/dispute-coordinator/src/lib.rs +++ b/polkadot/node/core/dispute-coordinator/src/lib.rs @@ -78,9 +78,9 @@ use initialized::{InitialData, Initialized}; /// If we have seen a candidate included somewhere, we should treat it as priority and will be able /// to provide an ordering for participation. Thus a dispute for a candidate where we can get some /// ordering is high-priority (we know it is a valid dispute) and those can be ordered by -/// `participation` based on `relay_parent` block number and other metrics, so each validator will -/// participate in disputes in a similar order, which ensures we will be resolving disputes, even -/// under heavy load. +/// `participation` based on `scheduling_parent` block number and other metrics, so each validator +/// will participate in disputes in a similar order, which ensures we will be resolving disputes, +/// even under heavy load. mod scraping; use scraping::ChainScraper; diff --git a/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs b/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs index d935e17b77404..4babc73c4b69f 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs @@ -376,7 +376,7 @@ struct CandidateComparator { /// just using the lowest `BlockNumber` of all available including blocks - the problem is, /// that is not stable. If a new fork appears after the fact, we would start ordering the same /// candidate differently, which would result in the same candidate getting queued twice. - relay_parent_block_number: Option, + scheduling_parent_block_number: Option, /// By adding the `CandidateHash`, we can guarantee a unique ordering across candidates with /// the same relay parent block number. Candidates without `relay_parent_block_number` are /// ordered by the `candidate_hash` (and treated with the lowest priority, as already @@ -390,7 +390,7 @@ impl CandidateComparator { /// Useful for testing. #[cfg(test)] pub fn new_dummy(block_number: Option, candidate_hash: CandidateHash) -> Self { - Self { relay_parent_block_number: block_number, candidate_hash } + Self { scheduling_parent_block_number: block_number, candidate_hash } } /// Create a candidate comparator for a given candidate. @@ -412,12 +412,12 @@ impl CandidateComparator { gum::warn!( target: LOG_TARGET, candidate_hash = ?candidate_hash, - "Candidate's relay_parent could not be found via chain API - `CandidateComparator` \ + "Candidate's scheduling_parent could not be found via chain API - `CandidateComparator` \ with an empty relay parent block number will be provided!" ); } - Ok(CandidateComparator { relay_parent_block_number: n, candidate_hash }) + Ok(CandidateComparator { scheduling_parent_block_number: n, candidate_hash }) } } @@ -437,13 +437,13 @@ impl PartialOrd for CandidateComparator { impl Ord for CandidateComparator { fn cmp(&self, other: &Self) -> Ordering { - return match (self.relay_parent_block_number, other.relay_parent_block_number) { + return match (self.scheduling_parent_block_number, other.scheduling_parent_block_number) { (None, None) => { // No relay parents for both -> compare hashes self.candidate_hash.cmp(&other.candidate_hash) }, - (Some(self_relay_parent_block_num), Some(other_relay_parent_block_num)) => { - match self_relay_parent_block_num.cmp(&other_relay_parent_block_num) { + (Some(self_scheduling_parent_block_num), Some(other_scheduling_parent_block_num)) => { + match self_scheduling_parent_block_num.cmp(&other_scheduling_parent_block_num) { // if the relay parent is the same for both -> compare hashes Ordering::Equal => self.candidate_hash.cmp(&other.candidate_hash), // if not - return the result from comparing the relay parent block numbers @@ -451,7 +451,7 @@ impl Ord for CandidateComparator { } }, (Some(_), None) => { - // Candidates with known relay parents are always with priority + // Candidates with known scheduling parents are always with priority Ordering::Less }, (None, Some(_)) => { @@ -464,10 +464,10 @@ impl Ord for CandidateComparator { async fn get_block_number( sender: &mut impl overseer::DisputeCoordinatorSenderTrait, - relay_parent: Hash, + block_hash: Hash, ) -> FatalResult> { let (tx, rx) = oneshot::channel(); - sender.send_message(ChainApiMessage::BlockNumber(relay_parent, tx)).await; + sender.send_message(ChainApiMessage::BlockNumber(block_hash, tx)).await; rx.await .map_err(|_| FatalError::ChainApiSenderDropped)? .map_err(FatalError::ChainApiAncestors) From 316a25f6f0a470334f02e489f80ff2a83514e4b4 Mon Sep 17 00:00:00 2001 From: eskimor Date: Tue, 10 Mar 2026 15:20:15 +0100 Subject: [PATCH 10/52] Basic test for v3 backing gating. --- polkadot/node/core/backing/src/tests/mod.rs | 146 ++++++++++++++++++++ 1 file changed, 146 insertions(+) diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 3704bbd1fbb7b..738c1c600973f 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -4304,3 +4304,149 @@ fn ambiguous_candidate_rejected_on_statement() { virtual_overseer }); } + +// Test that version acceptance filtering behaves correctly before and after V3 activation: +// - Ambiguous candidates (old rules ≠ new rules in an unexpected way) are always rejected. +// - V3 candidates are rejected before activation but accepted after. +#[test] +fn version_acceptance_before_and_after_v3_activation() { + let mut test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + let pov_hash = pov.hash(); + + // --- Before V3 activation --- + + // 1. Ambiguous candidate: rejected (Inconsistency). + // scheduling_parent non-zero + version=0 → old rules see V1, new rules see V2. + let mut ambiguous = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + ambiguous.descriptor.set_scheduling_parent(Hash::repeat_byte(0xAB)); + + virtual_overseer + .send(FromOrchestra::Communication { + msg: CandidateBackingMessage::Second { + scheduling_parent: test_state.relay_parent, + candidate: ambiguous.to_plain(), + pvd: pvd.clone(), + pov: pov.clone(), + }, + }) + .await; + assert_matches!(virtual_overseer.recv().timeout(Duration::from_secs(1)).await, None); + + // 2. V3 candidate: rejected (V3NotEnabled). + let v3_candidate = CommittedCandidateReceipt { + descriptor: CandidateDescriptorV2::new_v3( + test_state.chain_ids[0], + test_state.relay_parent, + CoreIndex(0), + 1, + pvd.hash(), + pov_hash, + make_erasure_root(&test_state, pov.clone(), pvd.clone()), + expected_head_data.hash(), + ValidationCode(validation_code.0.clone()).hash(), + test_state.relay_parent, // scheduling_parent = relay_parent + ), + commitments: CandidateCommitments { + head_data: expected_head_data.clone(), + ..Default::default() + }, + }; + + virtual_overseer + .send(FromOrchestra::Communication { + msg: CandidateBackingMessage::Second { + scheduling_parent: test_state.relay_parent, + candidate: v3_candidate.to_plain(), + pvd: pvd.clone(), + pov: pov.clone(), + }, + }) + .await; + assert_matches!(virtual_overseer.recv().timeout(Duration::from_secs(1)).await, None); + + // --- Activate V3 via BlockFinalized with a new session --- + // Use session 2 so NodeFeatures isn't cached (session 1 was cached with V3=off). + let finalized_hash = Hash::repeat_byte(0xFF); + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::BlockFinalized(finalized_hash, 1))) + .await; + + // check_v3_on_finalized: SessionIndexForChild → session 2 + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::SessionIndexForChild(tx), + )) if parent == finalized_hash => { + tx.send(Ok(2)).unwrap(); + } + ); + + // NodeFeatures for session 2 → V3 enabled + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::NodeFeatures(session_index, tx), + )) if parent == finalized_hash && session_index == 2 => { + let mut features = NodeFeatures::new(); + features.resize(FeatureIndex::CandidateReceiptV3 as usize + 1, false); + features.set(FeatureIndex::CandidateReceiptV3 as usize, true); + tx.send(Ok(features)).unwrap(); + } + ); + + // --- After V3 activation --- + + // 3. Ambiguous candidate: STILL rejected (Inconsistency is always fatal). + virtual_overseer + .send(FromOrchestra::Communication { + msg: CandidateBackingMessage::Second { + scheduling_parent: test_state.relay_parent, + candidate: ambiguous.to_plain(), + pvd: pvd.clone(), + pov: pov.clone(), + }, + }) + .await; + assert_matches!(virtual_overseer.recv().timeout(Duration::from_secs(1)).await, None); + + // 4. V3 candidate: NOW ACCEPTED — passes check_version_acceptance. + // It proceeds past the version check into the normal seconding flow. + virtual_overseer + .send(FromOrchestra::Communication { + msg: CandidateBackingMessage::Second { + scheduling_parent: test_state.relay_parent, + candidate: v3_candidate.to_plain(), + pvd: pvd.clone(), + pov: pov.clone(), + }, + }) + .await; + + // Any message arriving means it passed the version acceptance check. + // (Before V3 activation, the same candidate produced a timeout above.) + assert!( + virtual_overseer.recv().timeout(Duration::from_secs(1)).await.is_some(), + "V3 candidate should pass version acceptance after activation" + ); + + virtual_overseer + }); +} From e4751a2004cbc1b74a1973104c77f9dbb418ee7d Mon Sep 17 00:00:00 2001 From: eskimor Date: Tue, 10 Mar 2026 15:22:32 +0100 Subject: [PATCH 11/52] More dispute scheduling parent fixes --- polkadot/node/core/dispute-coordinator/src/import.rs | 8 ++++---- .../node/core/dispute-coordinator/src/initialized.rs | 11 +++++++++-- .../dispute-coordinator/src/participation/mod.rs | 6 ++++-- .../src/participation/queues/mod.rs | 12 +++++++++--- .../dispute-coordinator/src/participation/tests.rs | 2 +- 5 files changed, 27 insertions(+), 12 deletions(-) diff --git a/polkadot/node/core/dispute-coordinator/src/import.rs b/polkadot/node/core/dispute-coordinator/src/import.rs index 6ad42a7a26a5a..f971687a4c699 100644 --- a/polkadot/node/core/dispute-coordinator/src/import.rs +++ b/polkadot/node/core/dispute-coordinator/src/import.rs @@ -69,10 +69,10 @@ impl<'a> CandidateEnvironment<'a> { disabled_offchain: impl IntoIterator, controlled_indices: &mut ControlledValidatorIndices, ) -> Option> { - /// We use the scheduling parent here to have consensus on disabled state among validators. - /// If this fetch fails because e.g. we have never seen the fork of the candidate, not - /// seeing the disabled state is acceptable, because we have spam protection for these - /// cases in place anyways. + // We use the scheduling parent here to have consensus on disabled state among validators. + // If this fetch fails because e.g. we have never seen the fork of the candidate, not + // seeing the disabled state is acceptable, because we have spam protection for these + // cases in place anyways. let disabled_onchain = runtime_info .get_disabled_validators(ctx.sender(), scheduling_parent) .await diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index 5272fe7baa759..827c59a74e9ec 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -233,7 +233,9 @@ impl Initialized { } for (priority, request) in participations { - self.participation.queue_participation(ctx, priority, request).await?; + self.participation + .queue_participation(ctx, priority, request, self.v3_ever_seen) + .await?; } let mut overlay_db = OverlayedBackend::new(backend); @@ -341,7 +343,11 @@ impl Initialized { self.scraper.process_active_leaves_update(ctx.sender(), &update).await?; log_error( self.participation - .bump_to_priority_for_candidates(ctx, &scraped_updates.included_receipts) + .bump_to_priority_for_candidates( + ctx, + &scraped_updates.included_receipts, + self.v3_ever_seen, + ) .await, )?; self.participation.process_active_leaves_update(ctx, &update).await?; @@ -1270,6 +1276,7 @@ impl Initialized { env.executor_params().clone(), request_timer, ), + self.v3_ever_seen, ) .await; log_error(r)?; diff --git a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs index ee74347bf0d72..c398130b7fa9c 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs @@ -161,6 +161,7 @@ impl Participation { ctx: &mut Context, priority: ParticipationPriority, mut req: ParticipationRequest, + v3_ever_seen: bool, ) -> Result<()> { // Participation already running - we can ignore that request, discarding its timer: if self.running_participations.contains(req.candidate_hash()) { @@ -175,7 +176,7 @@ impl Participation { } } // Out of capacity/no recent block yet - queue: - self.queue.queue(ctx.sender(), priority, req).await + self.queue.queue(ctx.sender(), priority, req, v3_ever_seen).await } /// Message from a worker task was received - get the outcome. @@ -230,9 +231,10 @@ impl Participation { &mut self, ctx: &mut Context, included_receipts: &Vec, + v3_ever_seen: bool, ) -> Result<()> { for receipt in included_receipts { - self.queue.prioritize_if_present(ctx.sender(), receipt).await?; + self.queue.prioritize_if_present(ctx.sender(), receipt, v3_ever_seen).await?; } Ok(()) } diff --git a/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs b/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs index 4babc73c4b69f..3a6b2e7aaf66c 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs @@ -197,8 +197,10 @@ impl Queues { sender: &mut impl overseer::DisputeCoordinatorSenderTrait, priority: ParticipationPriority, req: ParticipationRequest, + v3_ever_seen: bool, ) -> Result<()> { - let comparator = CandidateComparator::new(sender, &req.candidate_receipt).await?; + let comparator = + CandidateComparator::new(sender, &req.candidate_receipt, v3_ever_seen).await?; self.queue_with_comparator(comparator, priority, req)?; Ok(()) @@ -224,8 +226,9 @@ impl Queues { &mut self, sender: &mut impl overseer::DisputeCoordinatorSenderTrait, receipt: &CandidateReceipt, + v3_ever_seen: bool, ) -> Result<()> { - let comparator = CandidateComparator::new(sender, receipt).await?; + let comparator = CandidateComparator::new(sender, receipt, v3_ever_seen).await?; self.prioritize_with_comparator(comparator)?; Ok(()) } @@ -404,9 +407,12 @@ impl CandidateComparator { pub async fn new( sender: &mut impl overseer::DisputeCoordinatorSenderTrait, candidate: &CandidateReceipt, + v3_ever_seen: bool, ) -> FatalResult { let candidate_hash = candidate.hash(); - let n = get_block_number(sender, candidate.descriptor().relay_parent()).await?; + let scheduling_parent = + candidate.descriptor().scheduling_parent_for_candidate_validation(v3_ever_seen); + let n = get_block_number(sender, scheduling_parent).await?; if n.is_none() { gum::warn!( diff --git a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs index 23f7984965b39..eb8d5d704d21a 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs @@ -77,7 +77,7 @@ async fn participate_with_commitments_hash( ParticipationRequest::new(candidate_receipt, session, Default::default(), request_timer); participation - .queue_participation(ctx, ParticipationPriority::BestEffort, req) + .queue_participation(ctx, ParticipationPriority::BestEffort, req, false) .await } From 3efd6252dcd0b0fed26c8aa3cc2fe32f29d54e76 Mon Sep 17 00:00:00 2001 From: eskimor Date: Tue, 10 Mar 2026 15:32:58 +0100 Subject: [PATCH 12/52] Improve backing test --- polkadot/node/core/backing/src/tests/mod.rs | 228 ++++++++++++++++---- 1 file changed, 190 insertions(+), 38 deletions(-) diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 738c1c600973f..54431a24c1ce9 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -4305,11 +4305,72 @@ fn ambiguous_candidate_rejected_on_statement() { }); } -// Test that version acceptance filtering behaves correctly before and after V3 activation: +/// Helper: send a BlockFinalized signal that activates V3 node features. +/// +/// Uses session 2 (uncached) so the subsystem fetches fresh NodeFeatures with V3 enabled. +async fn activate_v3_via_block_finalized(virtual_overseer: &mut VirtualOverseer) { + let finalized_hash = Hash::repeat_byte(0xFF); + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::BlockFinalized(finalized_hash, 1))) + .await; + + // check_v3_on_finalized: SessionIndexForChild → session 2 + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::SessionIndexForChild(tx), + )) if parent == finalized_hash => { + tx.send(Ok(2)).unwrap(); + } + ); + + // NodeFeatures for session 2 → V3 enabled + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::NodeFeatures(session_index, tx), + )) if parent == finalized_hash && session_index == 2 => { + let mut features = NodeFeatures::new(); + features.resize(FeatureIndex::CandidateReceiptV3 as usize + 1, false); + features.set(FeatureIndex::CandidateReceiptV3 as usize, true); + tx.send(Ok(features)).unwrap(); + } + ); +} + +/// Helper: sign a Seconded statement as validator 2 for use with CandidateBackingMessage::Statement. +fn sign_seconded_statement( + test_state: &TestState, + candidate: &CommittedCandidateReceipt, + pvd: &PersistedValidationData, +) -> SignedFullStatementWithPVD { + let public = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[2].to_seed()), + ) + .expect("Insert key into keystore"); + + SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate.clone(), pvd.clone()), + &test_state.signing_context, + ValidatorIndex(2), + &public.into(), + ) + .ok() + .flatten() + .expect("should be signed") +} + +// Test that version acceptance filtering behaves correctly before and after V3 activation +// via the CandidateBackingMessage::Second path (local seconding). // - Ambiguous candidates (old rules ≠ new rules in an unexpected way) are always rejected. // - V3 candidates are rejected before activation but accepted after. #[test] -fn version_acceptance_before_and_after_v3_activation() { +fn version_acceptance_before_and_after_v3_activation_on_second() { let mut test_state = TestState::default(); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; @@ -4360,7 +4421,7 @@ fn version_acceptance_before_and_after_v3_activation() { make_erasure_root(&test_state, pov.clone(), pvd.clone()), expected_head_data.hash(), ValidationCode(validation_code.0.clone()).hash(), - test_state.relay_parent, // scheduling_parent = relay_parent + test_state.relay_parent, ), commitments: CandidateCommitments { head_data: expected_head_data.clone(), @@ -4380,37 +4441,8 @@ fn version_acceptance_before_and_after_v3_activation() { .await; assert_matches!(virtual_overseer.recv().timeout(Duration::from_secs(1)).await, None); - // --- Activate V3 via BlockFinalized with a new session --- - // Use session 2 so NodeFeatures isn't cached (session 1 was cached with V3=off). - let finalized_hash = Hash::repeat_byte(0xFF); - virtual_overseer - .send(FromOrchestra::Signal(OverseerSignal::BlockFinalized(finalized_hash, 1))) - .await; - - // check_v3_on_finalized: SessionIndexForChild → session 2 - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - parent, - RuntimeApiRequest::SessionIndexForChild(tx), - )) if parent == finalized_hash => { - tx.send(Ok(2)).unwrap(); - } - ); - - // NodeFeatures for session 2 → V3 enabled - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - parent, - RuntimeApiRequest::NodeFeatures(session_index, tx), - )) if parent == finalized_hash && session_index == 2 => { - let mut features = NodeFeatures::new(); - features.resize(FeatureIndex::CandidateReceiptV3 as usize + 1, false); - features.set(FeatureIndex::CandidateReceiptV3 as usize, true); - tx.send(Ok(features)).unwrap(); - } - ); + // --- Activate V3 --- + activate_v3_via_block_finalized(&mut virtual_overseer).await; // --- After V3 activation --- @@ -4440,11 +4472,131 @@ fn version_acceptance_before_and_after_v3_activation() { }) .await; - // Any message arriving means it passed the version acceptance check. + // The candidate passed version acceptance and proceeds to validation — + // executor params are fetched as part of the validation setup. // (Before V3 activation, the same candidate produced a timeout above.) - assert!( - virtual_overseer.recv().timeout(Duration::from_secs(1)).await.is_some(), - "V3 candidate should pass version acceptance after activation" + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionExecutorParams(_, _), + )) + ); + + virtual_overseer + }); +} + +// Same test as above but via the CandidateBackingMessage::Statement path +// (receiving a Seconded statement from another validator). +#[test] +fn version_acceptance_before_and_after_v3_activation_on_statement() { + let mut test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + let pov_hash = pov.hash(); + + // Ambiguous candidate + let mut ambiguous = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + ambiguous.descriptor.set_scheduling_parent(Hash::repeat_byte(0xAB)); + + // V3 candidate + let v3_candidate = CommittedCandidateReceipt { + descriptor: CandidateDescriptorV2::new_v3( + test_state.chain_ids[0], + test_state.relay_parent, + CoreIndex(0), + 1, + pvd.hash(), + pov_hash, + make_erasure_root(&test_state, pov.clone(), pvd.clone()), + expected_head_data.hash(), + ValidationCode(validation_code.0.clone()).hash(), + test_state.relay_parent, + ), + commitments: CandidateCommitments { + head_data: expected_head_data.clone(), + ..Default::default() + }, + }; + + // --- Before V3 activation --- + + // 1. Ambiguous candidate via Statement: rejected. + let stmt = sign_seconded_statement(&test_state, &ambiguous, &pvd); + virtual_overseer + .send(FromOrchestra::Communication { + msg: CandidateBackingMessage::Statement { + scheduling_parent: test_state.relay_parent, + statement: stmt, + }, + }) + .await; + assert_matches!(virtual_overseer.recv().timeout(Duration::from_secs(1)).await, None); + + // 2. V3 candidate via Statement: rejected (V3NotEnabled). + let stmt = sign_seconded_statement(&test_state, &v3_candidate, &pvd); + virtual_overseer + .send(FromOrchestra::Communication { + msg: CandidateBackingMessage::Statement { + scheduling_parent: test_state.relay_parent, + statement: stmt, + }, + }) + .await; + assert_matches!(virtual_overseer.recv().timeout(Duration::from_secs(1)).await, None); + + // --- Activate V3 --- + activate_v3_via_block_finalized(&mut virtual_overseer).await; + + // --- After V3 activation --- + + // 3. Ambiguous candidate via Statement: STILL rejected. + let stmt = sign_seconded_statement(&test_state, &ambiguous, &pvd); + virtual_overseer + .send(FromOrchestra::Communication { + msg: CandidateBackingMessage::Statement { + scheduling_parent: test_state.relay_parent, + statement: stmt, + }, + }) + .await; + assert_matches!(virtual_overseer.recv().timeout(Duration::from_secs(1)).await, None); + + // 4. V3 candidate via Statement: NOW ACCEPTED. + let stmt = sign_seconded_statement(&test_state, &v3_candidate, &pvd); + virtual_overseer + .send(FromOrchestra::Communication { + msg: CandidateBackingMessage::Statement { + scheduling_parent: test_state.relay_parent, + statement: stmt, + }, + }) + .await; + + // The candidate passed version acceptance and is forwarded to + // prospective parachains for import — this is the first step of the + // Statement path after the version check. + // (Before V3 activation, the same candidate produced a timeout above.) + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate(_, _), + ) ); virtual_overseer From 52c63746dab321596418d2dc9bf03931004122c1 Mon Sep 17 00:00:00 2001 From: eskimor Date: Tue, 10 Mar 2026 15:33:10 +0100 Subject: [PATCH 13/52] Doc fix --- polkadot/node/core/dispute-coordinator/src/import.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/core/dispute-coordinator/src/import.rs b/polkadot/node/core/dispute-coordinator/src/import.rs index f971687a4c699..15753ba8d9dd4 100644 --- a/polkadot/node/core/dispute-coordinator/src/import.rs +++ b/polkadot/node/core/dispute-coordinator/src/import.rs @@ -51,7 +51,7 @@ pub struct CandidateEnvironment<'a> { executor_params: &'a ExecutorParams, /// Validator indices controlled by this node. controlled_indices: HashSet, - /// Indices of on-chain disabled validators at the `relay_parent` combined + /// Indices of on-chain disabled validators at the `scheduling_parent` combined /// with the off-chain state. disabled_indices: HashSet, } From 12361cc376b28b29eaa25b9c0860c4b547aca692 Mon Sep 17 00:00:00 2001 From: eskimor Date: Tue, 10 Mar 2026 15:49:48 +0100 Subject: [PATCH 14/52] Fmt --- polkadot/node/core/backing/src/tests/mod.rs | 3 +- .../core/candidate-validation/src/tests.rs | 51 +++++++-------- .../core/dispute-coordinator/src/tests.rs | 63 +++++++------------ polkadot/node/core/pvf/src/host.rs | 3 +- polkadot/primitives/src/v9/mod.rs | 10 +-- 5 files changed, 52 insertions(+), 78 deletions(-) diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 54431a24c1ce9..a9a5d3e9920ef 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -4340,7 +4340,8 @@ async fn activate_v3_via_block_finalized(virtual_overseer: &mut VirtualOverseer) ); } -/// Helper: sign a Seconded statement as validator 2 for use with CandidateBackingMessage::Statement. +/// Helper: sign a Seconded statement as validator 2 for use with +/// CandidateBackingMessage::Statement. fn sign_seconded_statement( test_state: &TestState, candidate: &CommittedCandidateReceipt, diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index de6097985b5ed..49a17388f9ba3 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -30,10 +30,10 @@ use polkadot_node_subsystem_test_helpers::{make_subsystem_context, TestSubsystem use polkadot_node_subsystem_util::reexports::SubsystemContext; use polkadot_overseer::ActivatedLeaf; use polkadot_primitives::{ - CandidateDescriptorV2, CandidateDescriptorVersion, ClaimQueueOffset, NodeFeatures, + CandidateDescriptorV2, CandidateDescriptorVersion, ClaimQueueOffset, CommittedCandidateReceiptError, CoreIndex, CoreSelector, GroupIndex, HeadData, Id as ParaId, - MutateDescriptorV2, OccupiedCoreAssumption, SessionInfo, UMPSignal, UpwardMessage, ValidatorId, - DEFAULT_SCHEDULING_LOOKAHEAD, UMP_SEPARATOR, + MutateDescriptorV2, NodeFeatures, OccupiedCoreAssumption, SessionInfo, UMPSignal, + UpwardMessage, ValidatorId, DEFAULT_SCHEDULING_LOOKAHEAD, UMP_SEPARATOR, }; use polkadot_primitives_test_helpers::{ dummy_collator, dummy_collator_signature, dummy_hash, make_valid_candidate_descriptor, @@ -554,10 +554,7 @@ fn candidate_validation_ok_is_ok(#[case] v2_descriptor: bool) { &Default::default(), VALIDATION_CODE_BOMB_LIMIT, false, - Some(BackingExtras { - claim_queue: ClaimQueueSnapshot(cq), - expected_scheduling_session: 1, - }), + Some(BackingExtras { claim_queue: ClaimQueueSnapshot(cq), expected_scheduling_session: 1 }), )) .unwrap(); @@ -646,10 +643,7 @@ fn invalid_session_or_ump_signals() { &Default::default(), VALIDATION_CODE_BOMB_LIMIT, false, - Some(BackingExtras { - claim_queue: Default::default(), - expected_scheduling_session: 1, - }), + Some(BackingExtras { claim_queue: Default::default(), expected_scheduling_session: 1 }), )) .unwrap(); @@ -673,10 +667,7 @@ fn invalid_session_or_ump_signals() { &Default::default(), VALIDATION_CODE_BOMB_LIMIT, false, - Some(BackingExtras { - claim_queue: Default::default(), - expected_scheduling_session: 1, - }), + Some(BackingExtras { claim_queue: Default::default(), expected_scheduling_session: 1 }), )) .unwrap(); assert_matches!( @@ -782,10 +773,7 @@ fn invalid_session_or_ump_signals() { &Default::default(), VALIDATION_CODE_BOMB_LIMIT, false, - Some(BackingExtras { - claim_queue: Default::default(), - expected_scheduling_session: 1, - }), + Some(BackingExtras { claim_queue: Default::default(), expected_scheduling_session: 1 }), )) .unwrap(); assert_matches!( @@ -1654,10 +1642,7 @@ fn compressed_code_works() { &Default::default(), VALIDATION_CODE_BOMB_LIMIT, false, - Some(BackingExtras { - claim_queue: Default::default(), - expected_scheduling_session: 1, - }), + Some(BackingExtras { claim_queue: Default::default(), expected_scheduling_session: 1 }), )); assert_matches!(v, Ok(ValidationResult::Valid(_, _))); @@ -2100,7 +2085,7 @@ fn maybe_prepare_validation_resets_state_on_a_new_session() { pvf_prep: PvfPrepState { is_next_session_authority: true, already_prepared_code_hashes: HashSet::from_iter(vec![ - ValidationCode(vec![0; 16]).hash(), + ValidationCode(vec![0; 16]).hash() ]), ..Default::default() }, @@ -2560,8 +2545,13 @@ fn v3_feature_detected_on_session_change() { let leaf1_hash = Hash::repeat_byte(0x01); let update1 = dummy_active_leaves_update(leaf1_hash); - let check_fut = - handle_active_leaves_update(ctx.sender(), keystore.clone(), &mut backend, update1, &mut state); + let check_fut = handle_active_leaves_update( + ctx.sender(), + keystore.clone(), + &mut backend, + update1, + &mut state, + ); let test_fut = async move { // Standard leaf activation messages @@ -2614,8 +2604,13 @@ fn v3_feature_detected_on_session_change() { let leaf2_hash = Hash::repeat_byte(0x02); let update2 = dummy_active_leaves_update(leaf2_hash); - let check_fut = - handle_active_leaves_update(ctx.sender(), keystore.clone(), &mut backend, update2, &mut state); + let check_fut = handle_active_leaves_update( + ctx.sender(), + keystore.clone(), + &mut backend, + update2, + &mut state, + ); let test_fut = async move { assert_new_active_leaf_messages(&mut ctx_handle, 2).await; diff --git a/polkadot/node/core/dispute-coordinator/src/tests.rs b/polkadot/node/core/dispute-coordinator/src/tests.rs index 86b794d4ff594..2b714e9c55a2d 100644 --- a/polkadot/node/core/dispute-coordinator/src/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/tests.rs @@ -414,12 +414,11 @@ impl TestState { _new_leaf, RuntimeApiRequest::FetchOnChainVotes(tx), )) => { - let votes = - self.initial_on_chain_votes.take().unwrap_or(ScrapedOnChainVotes { - session, - backing_validators_per_candidate: Vec::default(), - disputes: MultiDisputeStatementSet::default(), - }); + let votes = self.initial_on_chain_votes.take().unwrap_or(ScrapedOnChainVotes { + session, + backing_validators_per_candidate: Vec::default(), + disputes: MultiDisputeStatementSet::default(), + }); tx.send(Ok(Some(votes))).unwrap(); }, AllMessages::RuntimeApi(RuntimeApiMessage::Request( @@ -671,25 +670,22 @@ fn make_v3_candidate_receipt( ) -> CandidateReceipt { use polkadot_primitives::CandidateDescriptorV2; let descriptor = CandidateDescriptorV2::new_from_raw( - 0.into(), // para_id - relay_parent, // relay_parent - 1, // version = V3 - 0, // core_index - session_index, // session_index - 0, // scheduling_session_offset - [0u8; 24], // reserved1 - dummy_hash(), // persisted_validation_data_hash - dummy_hash(), // pov_hash - dummy_hash(), // erasure_root - scheduling_parent, // scheduling_parent - [0u8; 32], // reserved2 - dummy_hash(), // para_head + 0.into(), // para_id + relay_parent, // relay_parent + 1, // version = V3 + 0, // core_index + session_index, // session_index + 0, /* scheduling_session_offset */ + [0u8; 24], // reserved1 + dummy_hash(), // persisted_validation_data_hash + dummy_hash(), // pov_hash + dummy_hash(), // erasure_root + scheduling_parent, // scheduling_parent + [0u8; 32], // reserved2 + dummy_hash(), // para_head polkadot_primitives_test_helpers::dummy_validation_code().hash(), // validation_code_hash ); - CandidateReceipt { - descriptor, - commitments_hash: CandidateCommitments::default().hash(), - } + CandidateReceipt { descriptor, commitments_hash: CandidateCommitments::default().hash() } } fn make_invalid_candidate_receipt() -> CandidateReceipt { @@ -717,8 +713,7 @@ impl TestState { ) -> ValidityAttestation { let keystore = self.master_keystore.clone() as KeystorePtr; let validator_id = self.validators[validator_index.0 as usize].public().into(); - let context = - SigningContext { session_index: session, parent_hash: scheduling_parent }; + let context = SigningContext { session_index: session, parent_hash: scheduling_parent }; let statement = SignedFullStatement::sign( &keystore, @@ -743,8 +738,7 @@ fn make_v3_on_chain_votes( relay_parent: Hash, scheduling_parent: Hash, ) -> ScrapedOnChainVotes { - let candidate_receipt = - make_v3_candidate_receipt(relay_parent, scheduling_parent, session); + let candidate_receipt = make_v3_candidate_receipt(relay_parent, scheduling_parent, session); let candidate_hash = candidate_receipt.hash(); // Create a valid backing attestation signed with scheduling_parent @@ -4887,12 +4881,8 @@ fn v3_candidate_on_first_leaf_is_detected_correctly() { test_state.v3_node_features = true; let session = 1; - test_state.initial_on_chain_votes = Some(make_v3_on_chain_votes( - &test_state, - session, - relay_parent, - scheduling_parent, - )); + test_state.initial_on_chain_votes = + Some(make_v3_on_chain_votes(&test_state, session, relay_parent, scheduling_parent)); test_state.resume(|mut test_state, mut virtual_overseer| { Box::pin(async move { @@ -4944,12 +4934,7 @@ fn v3_candidate_on_subsequent_leaf_is_detected_correctly() { // scraper messages and SessionIndexForChild, but NOT the session caching // for the new session (since known_session is already Some). test_state - .activate_leaf_at_session( - &mut virtual_overseer, - new_session, - 3, - Vec::new(), - ) + .activate_leaf_at_session(&mut virtual_overseer, new_session, 3, Vec::new()) .await; // Manually handle session caching messages for the new session. diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 39298b07dedc7..46d1b8761b90e 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -1240,8 +1240,7 @@ pub(crate) mod tests { pvd: Arc, pov: Arc, ) -> ValidationContext { - let candidate_receipt: CandidateReceipt = - dummy_candidate_receipt(H256::default()).into(); + let candidate_receipt: CandidateReceipt = dummy_candidate_receipt(H256::default()).into(); ValidationContext { candidate_receipt, pvd, diff --git a/polkadot/primitives/src/v9/mod.rs b/polkadot/primitives/src/v9/mod.rs index f454432dadf32..e829aae355bc2 100644 --- a/polkadot/primitives/src/v9/mod.rs +++ b/polkadot/primitives/src/v9/mod.rs @@ -3377,10 +3377,7 @@ pub mod tests { assert_eq!(desc.version_old_rules(), CandidateDescriptorVersion::V1); // Before V3 activation: descriptor is treated as V1 — relay_parent is used. - assert_eq!( - desc.version_for_candidate_validation(false), - CandidateDescriptorVersion::V1, - ); + assert_eq!(desc.version_for_candidate_validation(false), CandidateDescriptorVersion::V1,); assert_eq!( desc.scheduling_parent_for_candidate_validation(false), Hash::repeat_byte(1), // relay_parent @@ -3392,10 +3389,7 @@ pub mod tests { ); // After V3 activation: descriptor is correctly identified as V3. - assert_eq!( - desc.version_for_candidate_validation(true), - CandidateDescriptorVersion::V3, - ); + assert_eq!(desc.version_for_candidate_validation(true), CandidateDescriptorVersion::V3,); assert_eq!( desc.scheduling_parent_for_candidate_validation(true), Hash::repeat_byte(7), // scheduling_parent From 96f02f306f1f99dfd97d63d55a42a656da319fdf Mon Sep 17 00:00:00 2001 From: eskimor Date: Tue, 10 Mar 2026 16:04:13 +0100 Subject: [PATCH 15/52] Naming & prdocs update --- polkadot/node/core/approval-voting/src/lib.rs | 19 ++++++++----------- prdoc/pr_11290.prdoc | 13 +++++++++++-- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index c9ad3123b300b..f8117fc617dad 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -850,17 +850,17 @@ impl CurrentlyCheckingSet { async fn get_extended_session_info<'a, Sender>( runtime_info: &'a mut RuntimeInfo, sender: &mut Sender, - relay_parent: Hash, + block_hash: Hash, ) -> Option<&'a ExtendedSessionInfo> where Sender: SubsystemSender, { - match runtime_info.get_session_info(sender, relay_parent).await { + match runtime_info.get_session_info(sender, block_hash).await { Ok(extended_info) => Some(&extended_info), Err(_) => { gum::debug!( target: LOG_TARGET, - ?relay_parent, + ?block_hash, "Can't obtain SessionInfo or ExecutorParams" ); None @@ -871,22 +871,19 @@ where async fn get_extended_session_info_by_index<'a, Sender>( runtime_info: &'a mut RuntimeInfo, sender: &mut Sender, - relay_parent: Hash, + block_hash: Hash, session_index: SessionIndex, ) -> Option<&'a ExtendedSessionInfo> where Sender: SubsystemSender, { - match runtime_info - .get_session_info_by_index(sender, relay_parent, session_index) - .await - { + match runtime_info.get_session_info_by_index(sender, block_hash, session_index).await { Ok(extended_info) => Some(&extended_info), Err(_) => { gum::debug!( target: LOG_TARGET, session = session_index, - ?relay_parent, + ?block_hash, "Can't obtain SessionInfo or ExecutorParams" ); None @@ -897,13 +894,13 @@ where async fn get_session_info_by_index<'a, Sender>( runtime_info: &'a mut RuntimeInfo, sender: &mut Sender, - relay_parent: Hash, + block_hash: Hash, session_index: SessionIndex, ) -> Option<&'a SessionInfo> where Sender: SubsystemSender, { - get_extended_session_info_by_index(runtime_info, sender, relay_parent, session_index) + get_extended_session_info_by_index(runtime_info, sender, block_hash, session_index) .await .map(|extended_info| &extended_info.session_info) } diff --git a/prdoc/pr_11290.prdoc b/prdoc/pr_11290.prdoc index 71d30d1eba233..3e1c962f2c237 100644 --- a/prdoc/pr_11290.prdoc +++ b/prdoc/pr_11290.prdoc @@ -1,7 +1,7 @@ # Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 # See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json -title: Make candidate descriptor version detection self-contained +title: Fix issue #11272 - Make candidate descriptor version detection self-contained doc: - audience: Node Dev @@ -25,6 +25,15 @@ doc: collator protocol) no longer need to look up node features for version detection, since any candidate that reaches them on-chain was already validated by the runtime. + Additional changes: + - **Candidate validation**: V3 feature detection is based on session changes via + `handle_active_leaves_update`. Uses `scheduling_parent_for_candidate_validation(v3_ever_seen)` + and `version_for_candidate_validation(v3_ever_seen)` to transition safely. + - **Dispute coordinator**: V3 feature detection on both first and subsequent leaves. + Uses `scheduling_parent_for_candidate_validation(v3_ever_seen)` in dispute participation + queue ordering (`CandidateComparator`), so V3 candidates are ordered by their scheduling + parent's block number rather than relay parent. + crates: - name: polkadot-primitives bump: major @@ -41,7 +50,7 @@ crates: - name: polkadot-node-core-prospective-parachains bump: major - name: polkadot-node-core-dispute-coordinator - bump: patch + bump: minor - name: polkadot-node-core-provisioner bump: patch - name: polkadot-collator-protocol From bd0ed0261a199541174e903a401780054fccfdcd Mon Sep 17 00:00:00 2001 From: eskimor Date: Tue, 10 Mar 2026 16:46:01 +0100 Subject: [PATCH 16/52] More relay_parent -> scheduling_parent --- polkadot/node/network/protocol/src/lib.rs | 4 +- .../src/v2/candidates.rs | 8 +- .../statement-distribution/src/v2/mod.rs | 153 ++++++++++-------- .../src/v2/tests/grid.rs | 34 ++-- .../src/v2/tests/requests.rs | 14 +- .../subsystem-bench/src/lib/statement/mod.rs | 2 +- 6 files changed, 115 insertions(+), 100 deletions(-) diff --git a/polkadot/node/network/protocol/src/lib.rs b/polkadot/node/network/protocol/src/lib.rs index ffe10188479df..ce93fc9af9987 100644 --- a/polkadot/node/network/protocol/src/lib.rs +++ b/polkadot/node/network/protocol/src/lib.rs @@ -738,8 +738,8 @@ pub mod v3 { /// of the statements backing it. #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] pub struct BackedCandidateManifest { - /// The relay-parent of the candidate. - pub relay_parent: Hash, + /// The scheduling-parent of the candidate. + pub scheduling_parent: Hash, /// The hash of the candidate. pub candidate_hash: CandidateHash, /// The group index backing the candidate at the relay-parent. diff --git a/polkadot/node/network/statement-distribution/src/v2/candidates.rs b/polkadot/node/network/statement-distribution/src/v2/candidates.rs index 1c85c90da8e8a..ada499bca1847 100644 --- a/polkadot/node/network/statement-distribution/src/v2/candidates.rs +++ b/polkadot/node/network/statement-distribution/src/v2/candidates.rs @@ -102,7 +102,7 @@ impl Candidates { match entry { CandidateState::Confirmed(ref c) => { - if c.relay_parent() != claimed_relay_parent { + if c.scheduling_parent() != claimed_relay_parent { return Err(BadAdvertisement); } @@ -337,7 +337,7 @@ impl Candidates { }; self.candidates.retain(|c_hash, state| match state { CandidateState::Confirmed(ref mut c) => { - if !relay_parent_live(&c.relay_parent()) { + if !relay_parent_live(&c.scheduling_parent()) { remove_parent_claims(*c_hash, c.parent_head_data_hash(), c.para_id()); false } else { @@ -531,8 +531,8 @@ pub struct ConfirmedCandidate { impl ConfirmedCandidate { /// Get the relay-parent of the candidate. - pub fn relay_parent(&self) -> Hash { - self.receipt.descriptor.relay_parent() + pub fn scheduling_parent(&self) -> Hash { + self.receipt.descriptor.scheduling_parent() } /// Get the para-id of the candidate. diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 228eeff9d289f..107f8a57c87e6 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -999,7 +999,7 @@ async fn send_pending_grid_messages( match kind { grid::ManifestKind::Full => { let manifest = protocol_v3::BackedCandidateManifest { - relay_parent, + scheduling_parent, candidate_hash, group_index, para_id: confirmed_candidate.para_id(), @@ -1102,12 +1102,12 @@ async fn send_pending_grid_messages( pub(crate) async fn share_local_statement( ctx: &mut Context, state: &mut State, - relay_parent: Hash, + scheduling_parent: Hash, statement: SignedFullStatementWithPVD, reputation: &mut ReputationAggregator, metrics: &Metrics, ) -> JfyiErrorResult<()> { - let per_scheduling_parent = match state.per_scheduling_parent.get_mut(&relay_parent) { + let per_scheduling_parent = match state.per_scheduling_parent.get_mut(&scheduling_parent) { None => return Err(JfyiError::InvalidShare), Some(x) => x, }; @@ -1133,11 +1133,12 @@ pub(crate) async fn share_local_statement( // have the candidate. Sanity: check the para-id is valid. let expected = match statement.payload() { FullStatementWithPVD::Seconded(ref c, _) => { - Some((c.descriptor.para_id(), c.descriptor.relay_parent())) - }, - FullStatementWithPVD::Valid(hash) => { - state.candidates.get_confirmed(&hash).map(|c| (c.para_id(), c.relay_parent())) + Some((c.descriptor.para_id(), c.descriptor.scheduling_parent())) }, + FullStatementWithPVD::Valid(hash) => state + .candidates + .get_confirmed(&hash) + .map(|c| (c.para_id(), c.scheduling_parent())), }; let is_seconded = match statement.payload() { @@ -1145,7 +1146,7 @@ pub(crate) async fn share_local_statement( FullStatementWithPVD::Valid(_) => false, }; - let (expected_para, expected_relay_parent) = match expected { + let (expected_para, expected_scheduling_parent) = match expected { None => return Err(JfyiError::InvalidShare), Some(x) => x, }; @@ -1167,7 +1168,9 @@ pub(crate) async fn share_local_statement( return Err(JfyiError::InvalidShare); } - if !local_assignments.contains(&expected_para) || relay_parent != expected_relay_parent { + if !local_assignments.contains(&expected_para) || + scheduling_parent != expected_scheduling_parent + { return Err(JfyiError::InvalidShare); } @@ -1224,7 +1227,7 @@ pub(crate) async fn share_local_statement( // send the compact version of the statement to any peers which need it. circulate_statement( ctx, - relay_parent, + scheduling_parent, per_scheduling_parent, per_session, &state.candidates, @@ -1266,7 +1269,7 @@ enum DirectTargetKind { #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] async fn circulate_statement( ctx: &mut Context, - relay_parent: Hash, + scheduling_parent: Hash, scheduling_parent_state: &mut PerSchedulingParentState, per_session: &PerSessionState, candidates: &Candidates, @@ -1346,14 +1349,16 @@ async fn circulate_statement( for (target, authority_id, kind) in targets { // Find peer ID based on authority ID, and also filter to connected. let peer_id: (PeerId, ProtocolVersion) = match authorities.get(&authority_id) { - Some(p) if peers.get(p).map_or(false, |p| p.knows_relay_parent(&relay_parent)) => ( - *p, - peers - .get(p) - .expect("Qed, can't fail because it was checked above") - .protocol_version - .into(), - ), + Some(p) if peers.get(p).map_or(false, |p| p.knows_relay_parent(&scheduling_parent)) => { + ( + *p, + peers + .get(p) + .expect("Qed, can't fail because it was checked above") + .protocol_version + .into(), + ) + }, None | Some(_) => continue, }; @@ -1401,7 +1406,7 @@ async fn circulate_statement( ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( statement_to_v3_peers, ValidationProtocols::V3(protocol_v3::StatementDistributionMessage::Statement( - relay_parent, + scheduling_parent, statement.as_unchecked().clone(), )) .into(), @@ -1414,10 +1419,10 @@ async fn circulate_statement( fn check_statement_signature( session_index: SessionIndex, validators: &IndexedVec, - relay_parent: Hash, + scheduling_parent: Hash, statement: UncheckedSignedStatement, ) -> std::result::Result { - let signing_context = SigningContext { session_index, parent_hash: relay_parent }; + let signing_context = SigningContext { session_index, parent_hash: scheduling_parent }; validators .get(statement.unchecked_validator_index()) @@ -1451,7 +1456,7 @@ async fn handle_incoming_statement( ctx: &mut Context, state: &mut State, peer: PeerId, - relay_parent: Hash, + scheduling_parent: Hash, statement: UncheckedSignedStatement, reputation: &mut ReputationAggregator, metrics: &Metrics, @@ -1465,7 +1470,7 @@ async fn handle_incoming_statement( }; // Ensure we know the scheduling parent. - let per_scheduling_parent = match state.per_scheduling_parent.get_mut(&relay_parent) { + let per_scheduling_parent = match state.per_scheduling_parent.get_mut(&scheduling_parent) { None => { modify_reputation( reputation, @@ -1496,7 +1501,7 @@ async fn handle_incoming_statement( if per_scheduling_parent.is_disabled(&statement.unchecked_validator_index()) { gum::debug!( target: LOG_TARGET, - ?relay_parent, + ?scheduling_parent, validator_index = ?statement.unchecked_validator_index(), "Ignoring a statement from disabled validator." ); @@ -1564,7 +1569,7 @@ async fn handle_incoming_statement( active.zip(cluster_sender_index) { match handle_cluster_statement( - relay_parent, + scheduling_parent, &mut active.cluster_tracker, per_scheduling_parent.session, &per_session.session_info, @@ -1600,7 +1605,7 @@ async fn handle_incoming_statement( if let Some((grid_sender_index, validator_knows_statement)) = grid_sender_index { if !validator_knows_statement { match handle_grid_statement( - relay_parent, + scheduling_parent, &mut local_validator.grid_tracker, per_scheduling_parent.session, &per_session, @@ -1642,7 +1647,7 @@ async fn handle_incoming_statement( let res = state.candidates.insert_unconfirmed( peer, candidate_hash, - relay_parent, + scheduling_parent, originator_group, None, ); @@ -1664,10 +1669,11 @@ async fn handle_incoming_statement( if !is_confirmed { // If the candidate is not confirmed, note that we should attempt // to request it from the given peer. - let mut request_entry = - state - .request_manager - .get_or_insert(relay_parent, candidate_hash, originator_group); + let mut request_entry = state.request_manager.get_or_insert( + scheduling_parent, + candidate_hash, + originator_group, + ); request_entry.add_peer(peer); @@ -1685,7 +1691,7 @@ async fn handle_incoming_statement( // sanity: should never happen. gum::warn!( target: LOG_TARGET, - ?relay_parent, + ?scheduling_parent, validator_index = ?originator_index, "Error - accepted message from unknown validator." ); @@ -1713,7 +1719,7 @@ async fn handle_incoming_statement( ctx, candidate_hash, originator_group, - &relay_parent, + &scheduling_parent, &mut *per_scheduling_parent, confirmed, per_session, @@ -1724,7 +1730,7 @@ async fn handle_incoming_statement( // We always circulate statements at this point. circulate_statement( ctx, - relay_parent, + scheduling_parent, per_scheduling_parent, per_session, &state.candidates, @@ -1745,7 +1751,7 @@ async fn handle_incoming_statement( /// if successful, this returns a checked signed statement if it should be imported /// or otherwise an error indicating a reputational fault. fn handle_cluster_statement( - relay_parent: Hash, + scheduling_parent: Hash, cluster_tracker: &mut ClusterTracker, session: SessionIndex, session_info: &SessionInfo, @@ -1774,12 +1780,15 @@ fn handle_cluster_statement( }; // Ensure the statement is correctly signed. - let checked_statement = - match check_statement_signature(session, &session_info.validators, relay_parent, statement) - { - Ok(s) => s, - Err(_) => return Err(COST_INVALID_SIGNATURE), - }; + let checked_statement = match check_statement_signature( + session, + &session_info.validators, + scheduling_parent, + statement, + ) { + Ok(s) => s, + Err(_) => return Err(COST_INVALID_SIGNATURE), + }; cluster_tracker.note_received( cluster_sender_index, @@ -1796,7 +1805,7 @@ fn handle_cluster_statement( /// if successful, this returns a checked signed statement if it should be imported /// or otherwise an error indicating a reputational fault. fn handle_grid_statement( - relay_parent: Hash, + scheduling_parent: Hash, grid_tracker: &mut GridTracker, session: SessionIndex, per_session: &PerSessionState, @@ -1807,7 +1816,7 @@ fn handle_grid_statement( let checked_statement = match check_statement_signature( session, &per_session.session_info.validators, - relay_parent, + scheduling_parent, statement, ) { Ok(s) => s, @@ -1900,7 +1909,7 @@ async fn provide_candidate_to_grid( None => return, }; - let relay_parent = confirmed_candidate.relay_parent(); + let scheduling_parent = confirmed_candidate.scheduling_parent(); let group_index = confirmed_candidate.group_index(); let grid_view = match per_session.grid_view { @@ -1921,7 +1930,7 @@ async fn provide_candidate_to_grid( gum::warn!( target: LOG_TARGET, ?candidate_hash, - ?relay_parent, + ?scheduling_parent, ?group_index, session = scheduling_parent_state.session, "Handled backed candidate with unknown group?", @@ -1947,7 +1956,7 @@ async fn provide_candidate_to_grid( ); let manifest = protocol_v3::BackedCandidateManifest { - relay_parent, + scheduling_parent, candidate_hash, group_index, para_id: confirmed_candidate.para_id(), @@ -1967,7 +1976,7 @@ async fn provide_candidate_to_grid( let p = match connected_validator_peer(authorities, per_session, v) { None => continue, Some(p) => { - if peers.get(&p).map_or(false, |d| d.knows_relay_parent(&relay_parent)) { + if peers.get(&p).map_or(false, |d| d.knows_relay_parent(&scheduling_parent)) { (p, peers.get(&p).expect("Qed, was checked above").protocol_version.into()) } else { continue; @@ -1989,7 +1998,7 @@ async fn provide_candidate_to_grid( post_statements.extend( post_acknowledgement_statement_messages( v, - relay_parent, + scheduling_parent, &mut local_validator.grid_tracker, &scheduling_parent_state.statement_store, &per_session.groups, @@ -2221,7 +2230,7 @@ async fn handle_incoming_manifest_common<'a, Context>( per_session: &'a HashMap, candidates: &mut Candidates, candidate_hash: CandidateHash, - relay_parent: Hash, + scheduling_parent: Hash, para_id: ParaId, mut manifest_summary: grid::ManifestSummary, manifest_kind: grid::ManifestKind, @@ -2230,7 +2239,7 @@ async fn handle_incoming_manifest_common<'a, Context>( // 1. sanity checks: peer is connected, relay-parent in state, para ID matches group index. let peer_state = peers.get(&peer)?; - let scheduling_parent_state = match per_scheduling_parent.get_mut(&relay_parent) { + let scheduling_parent_state = match per_scheduling_parent.get_mut(&scheduling_parent) { None => { modify_reputation( reputation, @@ -2349,7 +2358,7 @@ async fn handle_incoming_manifest_common<'a, Context>( if let Err(BadAdvertisement) = candidates.insert_unconfirmed( peer, candidate_hash, - relay_parent, + scheduling_parent, group_index, Some((claimed_parent_hash, para_id)), ) { @@ -2375,7 +2384,7 @@ async fn handle_incoming_manifest_common<'a, Context>( /// This notes the messages as sent within the grid state. fn post_acknowledgement_statement_messages( recipient: ValidatorIndex, - relay_parent: Hash, + scheduling_parent: Hash, grid_tracker: &mut GridTracker, statement_store: &StatementStore, groups: &Groups, @@ -2402,7 +2411,7 @@ fn post_acknowledgement_statement_messages( match peer.1.into() { ValidationVersion::V3 => messages.push(ValidationProtocols::V3( protocol_v3::StatementDistributionMessage::Statement( - relay_parent, + scheduling_parent, statement.as_unchecked().clone(), ) .into(), @@ -2437,7 +2446,7 @@ async fn handle_incoming_manifest( &state.per_session, &mut state.candidates, manifest.candidate_hash, - manifest.relay_parent, + manifest.scheduling_parent, manifest.para_id, grid::ManifestSummary { claimed_parent_hash: manifest.parent_head_data_hash, @@ -2491,7 +2500,7 @@ async fn handle_incoming_manifest( sender_index, &per_session.groups, scheduling_parent_state, - manifest.relay_parent, + manifest.scheduling_parent, manifest.group_index, manifest.candidate_hash, local_knowledge, @@ -2511,7 +2520,11 @@ async fn handle_incoming_manifest( state .request_manager - .get_or_insert(manifest.relay_parent, manifest.candidate_hash, manifest.group_index) + .get_or_insert( + manifest.scheduling_parent, + manifest.candidate_hash, + manifest.group_index, + ) .add_peer(peer); } } @@ -2523,7 +2536,7 @@ fn acknowledgement_and_statement_messages( validator_index: ValidatorIndex, groups: &Groups, scheduling_parent_state: &mut PerSchedulingParentState, - relay_parent: Hash, + scheduling_parent: Hash, group_index: GroupIndex, candidate_hash: CandidateHash, local_knowledge: StatementFilter, @@ -2557,7 +2570,7 @@ fn acknowledgement_and_statement_messages( let statement_messages = post_acknowledgement_statement_messages( validator_index, - relay_parent, + scheduling_parent, &mut local_validator.grid_tracker, &scheduling_parent_state.statement_store, &groups, @@ -2593,9 +2606,11 @@ async fn handle_incoming_acknowledgement( ); let candidate_hash = acknowledgement.candidate_hash; - let (relay_parent, parent_head_data_hash, group_index, para_id) = { + let (scheduling_parent, parent_head_data_hash, group_index, para_id) = { match state.candidates.get_confirmed(&candidate_hash) { - Some(c) => (c.relay_parent(), c.parent_head_data_hash(), c.group_index(), c.para_id()), + Some(c) => { + (c.scheduling_parent(), c.parent_head_data_hash(), c.group_index(), c.para_id()) + }, None => { modify_reputation( reputation, @@ -2617,7 +2632,7 @@ async fn handle_incoming_acknowledgement( &state.per_session, &mut state.candidates, candidate_hash, - relay_parent, + scheduling_parent, para_id, grid::ManifestSummary { claimed_parent_hash: parent_head_data_hash, @@ -2642,7 +2657,7 @@ async fn handle_incoming_acknowledgement( let messages = post_acknowledgement_statement_messages( sender_index, - relay_parent, + scheduling_parent, &mut local_validator.grid_tracker, &scheduling_parent_state.statement_store, &per_session.groups, @@ -2693,7 +2708,7 @@ pub(crate) async fn handle_backed_candidate_message( }; let scheduling_parent_state = - match state.per_scheduling_parent.get_mut(&confirmed.relay_parent()) { + match state.per_scheduling_parent.get_mut(&confirmed.scheduling_parent()) { None => return, Some(s) => s, }; @@ -2739,10 +2754,10 @@ async fn send_cluster_candidate_statements( ctx: &mut Context, state: &mut State, candidate_hash: CandidateHash, - relay_parent: Hash, + scheduling_parent: Hash, metrics: &Metrics, ) { - let scheduling_parent_state = match state.per_scheduling_parent.get_mut(&relay_parent) { + let scheduling_parent_state = match state.per_scheduling_parent.get_mut(&scheduling_parent) { None => return, Some(s) => s, }; @@ -2776,7 +2791,7 @@ async fn send_cluster_candidate_statements( for statement in statements { circulate_statement( ctx, - relay_parent, + scheduling_parent, scheduling_parent_state, per_session, &state.candidates, @@ -3115,7 +3130,7 @@ pub(crate) fn answer_request(state: &mut State, message: ResponderMessage) { }; let scheduling_parent_state = - match state.per_scheduling_parent.get_mut(&confirmed.relay_parent()) { + match state.per_scheduling_parent.get_mut(&confirmed.scheduling_parent()) { None => return, Some(s) => s, }; @@ -3238,7 +3253,7 @@ pub(crate) fn answer_request(state: &mut State, message: ResponderMessage) { gum::info!( target: LOG_TARGET, ?candidate_hash, - relay_parent = ?confirmed.relay_parent(), + relay_parent = ?confirmed.scheduling_parent(), ?group_index, "Dropping a request from a grid peer because the backing threshold is no longer met." ); diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs index 48fcdb357484f..8c8cc4d093e88 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs @@ -208,7 +208,7 @@ fn backed_candidate_leads_to_advertisement() { ) => { assert_eq!(peers, vec![peer_c]); assert_eq!(manifest, BackedCandidateManifest { - relay_parent, + scheduling_parent, candidate_hash, group_index: local_group_index, para_id: local_para, @@ -314,7 +314,7 @@ fn received_advertisement_before_confirmation_leads_to_request() { // Receive an advertisement from C on an unconfirmed candidate. { let manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash, group_index: other_group, para_id: other_para, @@ -443,7 +443,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { let candidate_hash = candidate.hash(); let manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash, group_index: other_group, para_id: other_para, @@ -751,7 +751,7 @@ fn received_acknowledgements_for_locally_confirmed() { ) => { assert_eq!(peers, vec![peer_c]); assert_eq!(manifest, BackedCandidateManifest { - relay_parent, + scheduling_parent, candidate_hash, group_index: local_group, para_id: local_para, @@ -831,7 +831,7 @@ fn received_acknowledgements_for_externally_confirmed() { let candidate_hash = candidate.hash(); let manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash, group_index: other_group, para_id: other_para, @@ -987,7 +987,7 @@ fn received_advertisement_after_confirmation_before_backing() { send_new_topology(&mut overseer, state.make_dummy_topology()).await; let manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash, group_index: other_group, para_id: other_para, @@ -1164,7 +1164,7 @@ fn additional_statements_are_shared_after_manifest_exchange() { // Receive an advertisement from C. { let manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash, group_index: other_group, para_id: other_para, @@ -1321,7 +1321,7 @@ fn additional_statements_are_shared_after_manifest_exchange() { // Receive a manifest about the same candidate from peer D. Contains different statements. { let manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash, group_index: other_group, para_id: other_para, @@ -1555,7 +1555,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; let expected_manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash, group_index: local_group_index, para_id: local_para, @@ -1774,7 +1774,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { ) => { assert_eq!(peers, vec![peer_c]); assert_eq!(manifest, BackedCandidateManifest { - relay_parent, + scheduling_parent, candidate_hash, group_index: local_group_index, para_id: local_para, @@ -1874,7 +1874,7 @@ fn inner_grid_statements_imported_to_backing(groups_for_first_para: usize) { // Receive an advertisement from C. { let manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash, group_index: other_group, para_id: other_para, @@ -2092,7 +2092,7 @@ fn advertisements_rejected_from_incorrect_peers() { send_new_topology(&mut overseer, state.make_dummy_topology()).await; let manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash, group_index: other_group, para_id: other_para, @@ -2204,7 +2204,7 @@ fn manifest_rejected_with_unknown_relay_parent() { send_new_topology(&mut overseer, state.make_dummy_topology()).await; let manifest = BackedCandidateManifest { - relay_parent: unknown_parent, + scheduling_parent: unknown_parent, candidate_hash, group_index: other_group, para_id: other_para, @@ -2294,7 +2294,7 @@ fn manifest_rejected_when_not_a_validator() { send_new_topology(&mut overseer, state.make_dummy_topology()).await; let manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash, group_index: other_group, para_id: other_para, @@ -2389,7 +2389,7 @@ fn manifest_rejected_when_group_does_not_match_para() { send_new_topology(&mut overseer, state.make_dummy_topology()).await; let manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash, group_index: other_group, para_id: other_para, @@ -2492,7 +2492,7 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { send_new_topology(&mut overseer, state.make_dummy_topology()).await; let manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash, group_index: other_group, para_id: other_para, @@ -2664,7 +2664,7 @@ fn inactive_local_participates_in_grid() { // Receive an advertisement from A. let manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash, group_index: group_idx, para_id: para, diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index d3a9b63c11dea..d330f896e2f6a 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -282,7 +282,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { // Peer C advertises candidate 1. { let manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash: candidate_hash_1, group_index: other_group, para_id: other_para, @@ -354,7 +354,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { // Peer C advertises candidate 2. { let manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash: candidate_hash_2, group_index: other_group, para_id: other_para, @@ -431,7 +431,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { // would fail with "Un-requested Statement In Response". { let manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash: candidate_hash_3, group_index: other_group, para_id: other_para, @@ -532,7 +532,7 @@ fn peer_reported_for_not_enough_statements() { send_new_topology(&mut overseer, state.make_dummy_topology()).await; let manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash, group_index: other_group, para_id: other_para, @@ -2413,7 +2413,7 @@ fn local_node_respects_statement_mask() { ) => { assert_eq!(peers, vec![peer_c]); assert_eq!(manifest, BackedCandidateManifest { - relay_parent, + scheduling_parent, candidate_hash, group_index: local_group_index, para_id: local_para, @@ -2546,7 +2546,7 @@ fn should_delay_before_retrying_dropped_requests() { // Send a request about a candidate. { let manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash: candidate_hash_1, group_index: other_group, para_id: other_para, @@ -2592,7 +2592,7 @@ fn should_delay_before_retrying_dropped_requests() { // We still send requests about different candidates as per usual. { let manifest = BackedCandidateManifest { - relay_parent, + scheduling_parent: relay_parent, candidate_hash: candidate_hash_2, group_index: other_group, para_id: other_para, diff --git a/polkadot/node/subsystem-bench/src/lib/statement/mod.rs b/polkadot/node/subsystem-bench/src/lib/statement/mod.rs index 85542006c90ac..bc08d6759c626 100644 --- a/polkadot/node/subsystem-bench/src/lib/statement/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/statement/mod.rs @@ -376,7 +376,7 @@ pub async fn benchmark_statement_distribution( .unwrap() .hash(); let manifest = BackedCandidateManifest { - relay_parent: block_info.hash, + scheduling_parent: block_info.hash, candidate_hash, group_index: GroupIndex(group_index as u32), para_id: Id::new(group_index as u32 + 1), From 220b3c1e48845246190d634b35d0cb1e36b99024 Mon Sep 17 00:00:00 2001 From: eskimor Date: Tue, 10 Mar 2026 21:24:06 +0100 Subject: [PATCH 17/52] WIP: more relay_parent -> scheduling_parent (Mostly statement distribution) --- polkadot/node/core/backing/src/lib.rs | 37 +++++++++++-------- .../src/fragment_chain/mod.rs | 9 ++++- polkadot/node/core/pvf/src/execute/queue.rs | 4 +- .../src/validator_side/mod.rs | 2 + .../src/v2/candidates.rs | 22 +++++------ .../statement-distribution/src/v2/mod.rs | 9 +++-- .../statement-distribution/src/v2/requests.rs | 16 ++++---- .../src/v2/tests/grid.rs | 6 +-- .../src/v2/tests/requests.rs | 2 +- 9 files changed, 62 insertions(+), 45 deletions(-) diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index dda63e535b6c9..c5db5e6aaa0de 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -246,7 +246,7 @@ struct PerSchedulingParentState { struct PerCandidateState { persisted_validation_data: PersistedValidationData, seconded_locally: bool, - relay_parent: Hash, + scheduling_parent: Hash, } /// A cache for storing data per-session to reduce repeated @@ -766,7 +766,8 @@ async fn request_candidate_validation( ) -> Result { let (tx, rx) = oneshot::channel(); let is_system = candidate_receipt.descriptor.para_id().is_system(); - let relay_parent = candidate_receipt.descriptor.relay_parent(); + // PVF execution uses this for pruning obsolete jobs - we need the scheduling parent here: + let scheduling_parent = candidate_receipt.descriptor.scheduling_parent(); sender .send_message(CandidateValidationMessage::ValidateFromExhaustive { @@ -776,9 +777,9 @@ async fn request_candidate_validation( pov, executor_params, exec_kind: if is_system { - PvfExecKind::BackingSystemParas(relay_parent) + PvfExecKind::BackingSystemParas(scheduling_parent) } else { - PvfExecKind::Backing(relay_parent) + PvfExecKind::Backing(scheduling_parent) }, response_sender: tx, }) @@ -1016,7 +1017,7 @@ async fn handle_active_leaves_update( // are known. state .per_candidate - .retain(|_, pc| state.per_scheduling_parent.contains_key(&pc.relay_parent)); + .retain(|_, pc| state.per_scheduling_parent.contains_key(&pc.scheduling_parent)); // Get relay parents which might be fresh but might be known already // that are explicit or implicit from the new active leaf. @@ -1309,14 +1310,18 @@ async fn seconding_sanity_check( let mut leaves_for_seconding = Vec::new(); let mut responses = FuturesOrdered::>>::new(); - let candidate_relay_parent = hypothetical_candidate.relay_parent(); + // Scheduling context: the scheduling parent determines which leaves this candidate + // can be seconded under (it must be in the allowed ancestry). + let candidate_scheduling_parent = hypothetical_candidate.scheduling_parent(); let candidate_hash = hypothetical_candidate.candidate_hash(); for head in implicit_view.leaves() { - // Check that the candidate relay parent is allowed for para, skip the - // leaf otherwise. + // Check that the candidate scheduling parent is allowed under this leaf. let allowed_parents_for_para = implicit_view.known_allowed_relay_parents_under(head); - if !allowed_parents_for_para.unwrap_or_default().contains(&candidate_relay_parent) { + if !allowed_parents_for_para + .unwrap_or_default() + .contains(&candidate_scheduling_parent) + { continue; } @@ -1388,13 +1393,13 @@ async fn handle_can_second_request( request: CanSecondRequest, tx: oneshot::Sender, ) { - let relay_parent = request.candidate_scheduling_parent; - let response = if state.per_scheduling_parent.get(&relay_parent).is_some() { + let scheduling_parent = request.candidate_scheduling_parent; + let response = if state.per_scheduling_parent.get(&scheduling_parent).is_some() { let hypothetical_candidate = HypotheticalCandidate::Incomplete { candidate_hash: request.candidate_hash, candidate_para: request.candidate_para_id, parent_head_data_hash: request.parent_head_data_hash, - candidate_relay_parent: relay_parent, + candidate_scheduling_parent: scheduling_parent, }; let result = @@ -1505,14 +1510,13 @@ async fn handle_validated_candidate_command( let candidate_hash = candidate.hash(); gum::debug!( target: LOG_TARGET, - relay_parent = ?candidate.descriptor().relay_parent(), + scheduling_parent = ?candidate.descriptor().scheduling_parent(), ?candidate_hash, "Attempted to second candidate but was rejected by prospective parachains", ); - // Ensure the collator is reported. ctx.send_message(CollatorProtocolMessage::Invalid( - candidate.descriptor().relay_parent(), + candidate.descriptor().scheduling_parent(), candidate, )) .await; @@ -1711,7 +1715,8 @@ async fn import_statement( persisted_validation_data: pvd.clone(), // This is set after importing when seconding locally. seconded_locally: false, - relay_parent: candidate.descriptor.relay_parent(), + // Scheduling context: used for cleanup against per_scheduling_parent. + scheduling_parent: candidate.descriptor.scheduling_parent(), }, ); } diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 36c79f4def023..bf70792d936b1 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -634,6 +634,7 @@ struct FragmentNode { } impl FragmentNode { + /// Execution context: the relay parent determines PVD, constraints, and message state. fn relay_parent(&self) -> Hash { self.fragment.relay_parent().hash } @@ -643,6 +644,7 @@ impl From<&FragmentNode> for CandidateEntry { fn from(node: &FragmentNode) -> Self { // We don't need to perform the checks done in `CandidateEntry::new()`, since a // `FragmentNode` always comes from a `CandidateEntry` + // Execution context: preserves relay parent for constraint validation. let relay_parent = node.relay_parent(); Self { candidate_hash: node.candidate_hash, @@ -1020,6 +1022,8 @@ impl FragmentChain { // The value returned may not be valid if we want to add a candidate pending availability, which // may have a relay parent which is out of scope. Special handling is needed in that case. // `None` is returned if the candidate's relay parent info cannot be found. + // Execution context: the relay parent determines constraint progression + // (HRMP watermark, DMP advancement must not regress). fn earliest_relay_parent( &self, relay_chain_scope: &RelayChainScope, @@ -1110,6 +1114,7 @@ impl FragmentChain { relay_chain_scope: &RelayChainScope, candidate: &impl HypotheticalOrConcreteCandidate, ) -> Result<(), Error> { + // TODO: This still needs to be untangled! let relay_parent = candidate.relay_parent(); let parent_head_hash = candidate.parent_head_data_hash(); @@ -1181,6 +1186,8 @@ impl FragmentChain { .base_constraints .apply_modifications(&parent_candidate.cumulative_modifications) .map_err(Error::ComputeConstraints)?, + // Execution context: relay parent block number for HRMP + // watermark and DMP constraint checking. relay_chain_scope .ancestor(&parent_candidate.relay_parent()) .map(|rp| rp.number), @@ -1496,7 +1503,7 @@ impl FragmentChain { // Update the cumulative constraint modifications. cumulative_modifications.stack(fragment.constraint_modifications()); - // Update the earliest rp + // Execution context: track earliest relay parent for constraint validation. earliest_rp = fragment.relay_parent().clone(); let node = FragmentNode { diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index e2dba55e0ed1c..e778e926aadf4 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -316,12 +316,12 @@ impl Queue { .iter() .enumerate() .filter_map(|(index, job)| { - let relay_parent = match job.exec_kind { + let scheduling_parent = match job.exec_kind { PvfExecKind::Backing(x) | PvfExecKind::BackingSystemParas(x) => x, _ => return None, }; let in_active_fork = self.active_leaves.iter().any(|(hash, ancestors)| { - *hash == relay_parent || ancestors.contains(&relay_parent) + *hash == scheduling_parent || ancestors.contains(&scheduling_parent) }); if in_active_fork { None diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 799a8292a8bb8..cbd9dbd95bac7 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -2625,6 +2625,8 @@ async fn kick_off_seconding( (pvd, maybe_parent_head_data, Some(parent_head_data_hash)) }, (CollationVersion::V1, _) => { + // Execution context: relay parent for fetching PVD from relay chain state. + // TODO: We will need to use the new runtime API here: let pvd = request_persisted_validation_data( ctx.sender(), candidate_receipt.descriptor().relay_parent(), diff --git a/polkadot/node/network/statement-distribution/src/v2/candidates.rs b/polkadot/node/network/statement-distribution/src/v2/candidates.rs index ada499bca1847..c0027f82a6c65 100644 --- a/polkadot/node/network/statement-distribution/src/v2/candidates.rs +++ b/polkadot/node/network/statement-distribution/src/v2/candidates.rs @@ -154,7 +154,7 @@ impl Candidates { assigned_group: GroupIndex, ) -> Option { let parent_hash = persisted_validation_data.parent_head.hash(); - let relay_parent = candidate_receipt.descriptor.relay_parent(); + let scheduling_parent = candidate_receipt.descriptor.scheduling_parent(); let para_id = candidate_receipt.descriptor.para_id(); let prev_state = self.candidates.insert( @@ -185,7 +185,7 @@ impl Candidates { let mut reckoning = PostConfirmationReckoning::default(); for (leaf_hash, x) in u.unconfirmed_importable_under { - if x.relay_parent == relay_parent && + if x.scheduling_parent == scheduling_parent && x.parent_hash == parent_hash && x.para_id == para_id { @@ -209,7 +209,7 @@ impl Candidates { } } - if claims.check(relay_parent, assigned_group, parent_hash, para_id) { + if claims.check(scheduling_parent, assigned_group, parent_hash, para_id) { reckoning.correct.insert(peer); } else { reckoning.incorrect.insert(peer); @@ -256,10 +256,10 @@ impl Candidates { candidate_hash, candidate_para, parent_head_data_hash, - candidate_relay_parent, + candidate_scheduling_parent, } => { let u = UnconfirmedImportable { - relay_parent: *candidate_relay_parent, + scheduling_parent: *candidate_scheduling_parent, parent_hash: *parent_head_data_hash, para_id: *candidate_para, }; @@ -404,7 +404,7 @@ impl CandidateClaims { // properties of an unconfirmed but hypothetically importable candidate. #[derive(Debug, Hash, PartialEq, Eq)] struct UnconfirmedImportable { - relay_parent: Hash, + scheduling_parent: Hash, parent_hash: Hash, para_id: ParaId, } @@ -477,7 +477,7 @@ impl UnconfirmedCandidate { }); self.unconfirmed_importable_under - .retain(|(l, props)| leaves.contains(l) && relay_parent_live(&props.relay_parent)); + .retain(|(l, props)| leaves.contains(l) && relay_parent_live(&props.scheduling_parent)); } fn extend_hypotheticals( @@ -497,7 +497,7 @@ impl UnconfirmedCandidate { candidate_hash, candidate_para: *para_id, parent_head_data_hash: *parent_head_hash, - candidate_relay_parent: *relay_parent, + candidate_scheduling_parent: *relay_parent, }); } } @@ -1263,19 +1263,19 @@ mod tests { candidate_hash: candidate_hash_b, candidate_para: 1.into(), parent_head_data_hash: candidate_head_data_hash_a, - candidate_relay_parent: relay_hash, + candidate_scheduling_parent: relay_hash, }; let hypothetical_c = HypotheticalCandidate::Incomplete { candidate_hash: candidate_hash_c, candidate_para: 1.into(), parent_head_data_hash: candidate_head_data_hash_a, - candidate_relay_parent: relay_hash, + candidate_scheduling_parent: relay_hash, }; let hypothetical_d = HypotheticalCandidate::Incomplete { candidate_hash: candidate_hash_d, candidate_para: 1.into(), parent_head_data_hash: candidate_head_data_hash_b, - candidate_relay_parent: relay_hash, + candidate_scheduling_parent: relay_hash, }; let hypotheticals = candidates.frontier_hypotheticals(Some((relay_hash, 1.into()))); diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 107f8a57c87e6..b56c1639d6375 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -954,7 +954,7 @@ async fn send_pending_cluster_statements( #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] async fn send_pending_grid_messages( ctx: &mut Context, - relay_parent: Hash, + scheduling_parent: Hash, peer_id: &(PeerId, ValidationVersion), peer_validator_id: ValidatorIndex, groups: &Groups, @@ -1037,7 +1037,7 @@ async fn send_pending_grid_messages( peer_validator_id, groups, scheduling_parent_state, - relay_parent, + scheduling_parent, group_index, candidate_hash, local_knowledge, @@ -1067,7 +1067,7 @@ async fn send_pending_grid_messages( .filter_map(|(originator, compact)| { let res = pending_statement_network_message( &scheduling_parent_state.statement_store, - relay_parent, + scheduling_parent, peer_id, originator, compact.clone(), @@ -2825,11 +2825,12 @@ async fn apply_post_confirmation( let candidate_hash = post_confirmation.hypothetical.candidate_hash(); state.request_manager.remove_for(candidate_hash); + // Scheduling context: look up per_scheduling_parent state for statement circulation. send_cluster_candidate_statements( ctx, state, candidate_hash, - post_confirmation.hypothetical.relay_parent(), + post_confirmation.hypothetical.scheduling_parent(), metrics, ) .await; diff --git a/polkadot/node/network/statement-distribution/src/v2/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs index 09f336d241414..6b6021a6b33f1 100644 --- a/polkadot/node/network/statement-distribution/src/v2/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs @@ -72,8 +72,8 @@ use std::{ /// anything other than the candidate hash. #[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct CandidateIdentifier { - /// The relay-parent this candidate is ostensibly under. - pub relay_parent: Hash, + /// The scheduling-parent this candidate is ostensibly under. + pub scheduling_parent: Hash, /// The hash of the candidate. pub candidate_hash: CandidateHash, /// The index of the group claiming to be assigned to the candidate's @@ -184,7 +184,8 @@ impl RequestManager { candidate_hash: CandidateHash, group_index: GroupIndex, ) -> Entry<'_> { - let identifier = CandidateIdentifier { relay_parent, candidate_hash, group_index }; + let identifier = + CandidateIdentifier { scheduling_parent: relay_parent, candidate_hash, group_index }; let (candidate, fresh) = match self.requests.entry(identifier.clone()) { HEntry::Occupied(e) => (e.into_mut(), false), @@ -245,7 +246,7 @@ impl RequestManager { // Remove from `by_priority` and `requests`. self.by_priority.retain(|(_priority, id)| { - let retain = relay_parent != id.relay_parent; + let retain = relay_parent != id.scheduling_parent; if !retain { self.requests.remove(id); candidate_hashes.insert(id.candidate_hash); @@ -257,7 +258,7 @@ impl RequestManager { for candidate_hash in candidate_hashes { match self.unique_identifiers.entry(candidate_hash) { HEntry::Occupied(mut entry) => { - entry.get_mut().retain(|id| relay_parent != id.relay_parent); + entry.get_mut().retain(|id| relay_parent != id.scheduling_parent); if entry.get().is_empty() { entry.remove(); } @@ -703,7 +704,8 @@ fn validate_complete_response( // sanity-check candidate response. // note: roughly ascending cost of operations { - if response.candidate_receipt.descriptor.relay_parent() != identifier.relay_parent { + if response.candidate_receipt.descriptor.scheduling_parent() != identifier.scheduling_parent + { return invalid_candidate_output(COST_INVALID_RESPONSE); } @@ -766,7 +768,7 @@ fn validate_complete_response( let index_in_group = |v: ValidatorIndex| group.iter().position(|x| &v == x); let signing_context = - SigningContext { parent_hash: identifier.relay_parent, session_index: session }; + SigningContext { parent_hash: identifier.scheduling_parent, session_index: session }; for unchecked_statement in response.statements.into_iter().take(group.len() * 2) { // ensure statement is from a validator in the group. diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs index 8c8cc4d093e88..d95fedc7239ee 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs @@ -208,7 +208,7 @@ fn backed_candidate_leads_to_advertisement() { ) => { assert_eq!(peers, vec![peer_c]); assert_eq!(manifest, BackedCandidateManifest { - scheduling_parent, + scheduling_parent: relay_parent, candidate_hash, group_index: local_group_index, para_id: local_para, @@ -751,7 +751,7 @@ fn received_acknowledgements_for_locally_confirmed() { ) => { assert_eq!(peers, vec![peer_c]); assert_eq!(manifest, BackedCandidateManifest { - scheduling_parent, + scheduling_parent: relay_parent, candidate_hash, group_index: local_group, para_id: local_para, @@ -1774,7 +1774,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { ) => { assert_eq!(peers, vec![peer_c]); assert_eq!(manifest, BackedCandidateManifest { - scheduling_parent, + scheduling_parent: relay_parent, candidate_hash, group_index: local_group_index, para_id: local_para, diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index d330f896e2f6a..3ceab87795cd2 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -2413,7 +2413,7 @@ fn local_node_respects_statement_mask() { ) => { assert_eq!(peers, vec![peer_c]); assert_eq!(manifest, BackedCandidateManifest { - scheduling_parent, + scheduling_parent: relay_parent, candidate_hash, group_index: local_group_index, para_id: local_para, From 699edaef113adcbc1df2a8c5b33aa939ed74270d Mon Sep 17 00:00:00 2001 From: eskimor Date: Tue, 10 Mar 2026 23:16:03 +0100 Subject: [PATCH 18/52] More relay_parent -> scheduling_parent Get rid of HypotheticalOrConcreteCandidate --- .../src/fragment_chain/mod.rs | 219 +++++++++--------- .../src/fragment_chain/tests.rs | 12 +- .../core/prospective-parachains/src/lib.rs | 33 ++- .../statement-distribution/src/v2/mod.rs | 22 +- polkadot/node/subsystem-types/src/messages.rs | 20 +- .../src/inclusion_emulator/mod.rs | 58 +---- 6 files changed, 170 insertions(+), 194 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index bf70792d936b1..a9d9da77a2e04 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -130,12 +130,11 @@ use super::LOG_TARGET; use polkadot_node_subsystem::messages::{Ancestors, BackableCandidateRef}; use polkadot_node_subsystem_util::inclusion_emulator::{ self, validate_commitments, ConstraintModifications, Constraints, Fragment, - HypotheticalOrConcreteCandidate, ProspectiveCandidate, RelayChainBlockInfo, + ProspectiveCandidate, RelayChainBlockInfo, }; use polkadot_primitives::{ - BlockNumber, CandidateCommitments, CandidateHash, - CommittedCandidateReceiptV2 as CommittedCandidateReceipt, Hash, HeadData, Id as ParaId, - PersistedValidationData, ValidationCodeHash, + BlockNumber, CandidateHash, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, Hash, + HeadData, Id as ParaId, PersistedValidationData, }; use thiserror::Error; @@ -423,48 +422,6 @@ impl CandidateEntry { } } -impl HypotheticalOrConcreteCandidate for CandidateEntry { - fn commitments(&self) -> Option<&CandidateCommitments> { - Some(&self.candidate.commitments) - } - - fn persisted_validation_data(&self) -> Option<&PersistedValidationData> { - Some(&self.candidate.persisted_validation_data) - } - - fn validation_code_hash(&self) -> Option { - Some(self.candidate.validation_code_hash) - } - - fn parent_head_data_hash(&self) -> Hash { - self.parent_head_data_hash - } - - fn output_head_data_hash(&self) -> Option { - Some(self.output_head_data_hash) - } - - /// Get the relay parent hash (execution context). - /// - /// For V3 candidates, this determines execution context and can be older than - /// scheduling_parent. For V1/V2 candidates, this is the same as - /// scheduling_parent. - fn relay_parent(&self) -> Hash { - self.relay_parent - } - - fn candidate_hash(&self) -> CandidateHash { - self.candidate_hash - } - - /// Get the scheduling parent hash. - /// - /// For V3 candidates, this is the scheduling parent (used for backing group selection). - /// For V1/V2 candidates, this equals the relay parent. - fn scheduling_parent(&self) -> Hash { - self.scheduling_parent - } -} /// A candidate existing on-chain but pending availability, for special treatment /// in the [`Scope`]. @@ -891,9 +848,9 @@ impl FragmentChain { pub fn can_add_candidate_as_potential( &self, relay_chain_scope: &RelayChainScope, - candidate: &impl HypotheticalOrConcreteCandidate, + candidate: &CandidateEntry, ) -> Result<(), Error> { - let candidate_hash = candidate.candidate_hash(); + let candidate_hash = candidate.candidate_hash; if self.chain.contains(&candidate_hash) || self.unconnected.contains(&candidate_hash) { return Err(Error::CandidateAlreadyKnown); @@ -902,6 +859,30 @@ impl FragmentChain { self.check_potential(relay_chain_scope, candidate) } + /// Lightweight check for whether a hypothetical candidate (possibly incomplete) could be added + /// to this chain. Only performs checks that don't require the relay parent or full candidate + /// data: scheduling parent in scope, fork checks, cycle checks. + pub fn can_add_candidate_as_potential_hypothetical( + &self, + relay_chain_scope: &RelayChainScope, + scheduling_parent: Hash, + candidate_hash: CandidateHash, + parent_head_hash: Hash, + output_head_hash: Option, + ) -> Result<(), Error> { + if self.chain.contains(&candidate_hash) || self.unconnected.contains(&candidate_hash) { + return Err(Error::CandidateAlreadyKnown); + } + + self.check_potential_lightweight( + relay_chain_scope, + scheduling_parent, + candidate_hash, + parent_head_hash, + output_head_hash, + ) + } + /// Try adding a seconded candidate, if the candidate has potential. It will never be added to /// the chain directly in the seconded state, it will only be part of the unconnected storage. pub fn try_adding_seconded_candidate( @@ -1106,38 +1087,19 @@ impl FragmentChain { Ok(()) } - // Checks the potential of a candidate to be added to the chain now or in the future. - // It works both with concrete candidates for which we have the full PVD and committed receipt, - // but also does some more basic checks for incomplete candidates (before even fetching them). - fn check_potential( + // Lightweight potential check using only scheduling_parent and parent/output head hashes. + // Used for hypothetical (possibly incomplete) candidates where we don't have the relay parent + // or full candidate data. Checks: scheduling parent in scope, zero-length cycle, fork rules, + // cycle/invalid tree checks. + fn check_potential_lightweight( &self, relay_chain_scope: &RelayChainScope, - candidate: &impl HypotheticalOrConcreteCandidate, + scheduling_parent: Hash, + candidate_hash: CandidateHash, + parent_head_hash: Hash, + output_head_hash: Option, ) -> Result<(), Error> { - // TODO: This still needs to be untangled! - let relay_parent = candidate.relay_parent(); - let parent_head_hash = candidate.parent_head_data_hash(); - - // trivial 0-length cycle. - if let Some(output_head_hash) = candidate.output_head_data_hash() { - if parent_head_hash == output_head_hash { - return Err(Error::ZeroLengthCycle); - } - } - - // Check if the relay parent is in scope. - let Some(relay_parent) = relay_chain_scope.ancestor(&relay_parent) else { - return Err(Error::RelayParentNotInScope( - relay_parent, - relay_chain_scope.earliest_relay_parent().hash, - )); - }; - - // For V3 candidates, also check if the scheduling parent is in scope. - // The scheduling parent determines the backing group and must be within the implicit view. - // For V1/V2 candidates, scheduling_parent equals relay_parent, so this is redundant but - // harmless. - let scheduling_parent = candidate.scheduling_parent(); + // Check if the scheduling parent is in scope. if relay_chain_scope.ancestor(&scheduling_parent).is_none() { return Err(Error::SchedulingParentNotInScope( scheduling_parent, @@ -1145,14 +1107,11 @@ impl FragmentChain { )); } - // Check if the relay parent moved backwards from the latest candidate pending availability. - let earliest_rp_of_pending_availability = - self.earliest_relay_parent_pending_availability(relay_chain_scope); - if relay_parent.number < earliest_rp_of_pending_availability.number { - return Err(Error::RelayParentPrecedesCandidatePendingAvailability( - relay_parent.hash, - earliest_rp_of_pending_availability.hash, - )); + // Trivial 0-length cycle. + if let Some(output_head_hash) = output_head_hash { + if parent_head_hash == output_head_hash { + return Err(Error::ZeroLengthCycle); + } } // If it's a fork with a backed candidate in the current chain. @@ -1164,11 +1123,57 @@ impl FragmentChain { // If the candidate is backed and in the current chain, accept only a candidate // according to the fork selection rule. - if fork_selection_rule(other_candidate, &candidate.candidate_hash()) == Ordering::Less { + if fork_selection_rule(other_candidate, &candidate_hash) == Ordering::Less { return Err(Error::ForkChoiceRule(*other_candidate)); } } + // Check for cycles or invalid tree transitions. + if let Some(ref output_head_hash) = output_head_hash { + self.check_cycles_or_invalid_tree(output_head_hash)?; + } + + Ok(()) + } + + // Full potential check for concrete candidates (CandidateEntry). Performs all lightweight + // checks plus relay-parent-dependent validation: relay parent in scope, constraint checking, + // min relay parent number checks. + fn check_potential( + &self, + relay_chain_scope: &RelayChainScope, + candidate: &CandidateEntry, + ) -> Result<(), Error> { + let parent_head_hash = candidate.parent_head_data_hash; + + // Run the lightweight checks first. + self.check_potential_lightweight( + relay_chain_scope, + candidate.scheduling_parent, + candidate.candidate_hash, + parent_head_hash, + Some(candidate.output_head_data_hash), + )?; + + // Execution context: check if the relay parent is in scope. + let relay_parent_hash = candidate.relay_parent; + let Some(relay_parent) = relay_chain_scope.ancestor(&relay_parent_hash) else { + return Err(Error::RelayParentNotInScope( + relay_parent_hash, + relay_chain_scope.earliest_relay_parent().hash, + )); + }; + + // Check if the relay parent moved backwards from the latest candidate pending availability. + let earliest_rp_of_pending_availability = + self.earliest_relay_parent_pending_availability(relay_chain_scope); + if relay_parent.number < earliest_rp_of_pending_availability.number { + return Err(Error::RelayParentPrecedesCandidatePendingAvailability( + relay_parent.hash, + earliest_rp_of_pending_availability.hash, + )); + } + // Try seeing if the parent candidate is in the current chain or if it is the latest // included candidate. If so, get the constraints the candidate must satisfy. let (is_unconnected, constraints, maybe_min_relay_parent_number) = @@ -1200,37 +1205,29 @@ impl FragmentChain { (true, self.scope.base_constraints.clone(), None) }; - // Check for cycles or invalid tree transitions. - if let Some(ref output_head_hash) = candidate.output_head_data_hash() { - self.check_cycles_or_invalid_tree(output_head_hash)?; - } + // Check against constraints. + let commitments = &candidate.candidate.commitments; + let pvd = &candidate.candidate.persisted_validation_data; + let validation_code_hash = candidate.candidate.validation_code_hash; - // Check against constraints if we have a full concrete candidate. - if let (Some(commitments), Some(pvd), Some(validation_code_hash)) = ( - candidate.commitments(), - candidate.persisted_validation_data(), - candidate.validation_code_hash(), - ) { - if is_unconnected { - // If the parent is not yet part of the chain, we can check the commitments only - // if we have the full candidate. - return validate_commitments( - &self.scope.base_constraints, - &relay_parent, - commitments, - &validation_code_hash, - ) - .map_err(Error::CheckAgainstConstraints); - } - Fragment::check_against_constraints( + if is_unconnected { + // If the parent is not yet part of the chain, we can only check the commitments. + return validate_commitments( + &self.scope.base_constraints, &relay_parent, - &constraints, commitments, &validation_code_hash, - pvd, ) - .map_err(Error::CheckAgainstConstraints)?; + .map_err(Error::CheckAgainstConstraints); } + Fragment::check_against_constraints( + &relay_parent, + &constraints, + commitments, + &validation_code_hash, + pvd, + ) + .map_err(Error::CheckAgainstConstraints)?; if relay_parent.number < constraints.min_relay_parent_number { return Err(Error::RelayParentMovedBackwards); diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index 6cfcb8a30a643..c46770f617667 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -550,7 +550,7 @@ fn test_populate_and_check_potential() { assert_eq!(chain.unconnected_len(), 0); assert_matches!( chain.can_add_candidate_as_potential(&relay_chain_scope, &candidate_a_entry), - Err(Error::RelayParentNotInScope(_, _)) + Err(Error::SchedulingParentNotInScope(_, _)) ); // However, if taken independently, both B and C still have potential, since we // don't know that A doesn't. @@ -700,7 +700,7 @@ fn test_populate_and_check_potential() { assert_matches!( chain.can_add_candidate_as_potential(&relay_chain_scope, &candidate_a_entry), - Err(Error::RelayParentNotInScope(_, _)) + Err(Error::SchedulingParentNotInScope(_, _)) ); // However, if taken independently, both B and C still have potential, since we // don't know that A doesn't. @@ -722,11 +722,11 @@ fn test_populate_and_check_potential() { assert_matches!( chain.can_add_candidate_as_potential(&relay_chain_scope, &candidate_a_entry), - Err(Error::RelayParentNotInScope(_, _)) + Err(Error::SchedulingParentNotInScope(_, _)) ); assert_matches!( chain.can_add_candidate_as_potential(&relay_chain_scope, &candidate_b_entry), - Err(Error::RelayParentNotInScope(_, _)) + Err(Error::SchedulingParentNotInScope(_, _)) ); // However, if taken independently, C still has potential, since we // don't know that A and B don't @@ -1764,8 +1764,8 @@ fn test_v3_scheduling_parent_validation() { CandidateEntry::new(candidate_hash, candidate, pvd, CandidateState::Backed).unwrap(); // Verify the entry correctly tracks both parents - assert_eq!(candidate_entry.relay_parent(), relay_parent_x); - assert_eq!(candidate_entry.scheduling_parent(), relay_parent_y); + assert_eq!(candidate_entry.relay_parent, relay_parent_x); + assert_eq!(candidate_entry.scheduling_parent, relay_parent_y); storage.add_candidate_entry(candidate_entry).unwrap(); diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 117041f486f29..bbb7511343f86 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -819,8 +819,37 @@ fn answer_hypothetical_membership_request( let para_id = &candidate.candidate_para(); let Some(fragment_chain) = leaf_view.fragment_chains.get(para_id) else { continue }; - let res = fragment_chain - .can_add_candidate_as_potential(&leaf_view.relay_chain_scope, candidate); + let res = match candidate { + HypotheticalCandidate::Complete { + candidate_hash, + ref receipt, + ref persisted_validation_data, + } => { + // For complete candidates, build a CandidateEntry and run the full + // potential check including constraint validation. + let entry = fragment_chain::CandidateEntry::new_seconded( + *candidate_hash, + (**receipt).clone(), + persisted_validation_data.clone(), + ); + match entry { + Ok(entry) => fragment_chain + .can_add_candidate_as_potential( + &leaf_view.relay_chain_scope, + &entry, + ), + Err(_) => continue, + } + }, + HypotheticalCandidate::Incomplete { .. } => + fragment_chain.can_add_candidate_as_potential_hypothetical( + &leaf_view.relay_chain_scope, + candidate.scheduling_parent(), + candidate.candidate_hash(), + candidate.parent_head_data_hash(), + candidate.output_head_data_hash(), + ), + }; match res { Err(FragmentChainError::CandidateAlreadyKnown) | Ok(()) => { membership.push(*active_leaf); diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index b56c1639d6375..71e386cd64093 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -2849,7 +2849,8 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St let peer_advertised = |identifier: &CandidateIdentifier, peer: &_| { let peer_data = peers.get(peer)?; - let scheduling_parent_state = state.per_scheduling_parent.get(&identifier.relay_parent)?; + let scheduling_parent_state = + state.per_scheduling_parent.get(&identifier.scheduling_parent)?; let per_session = state.per_session.get(&scheduling_parent_state.session)?; let local_validator = scheduling_parent_state.local_validator.as_ref()?; @@ -2877,9 +2878,9 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St None }; let request_props = |identifier: &CandidateIdentifier| { - let &CandidateIdentifier { relay_parent, group_index, .. } = identifier; + let &CandidateIdentifier { scheduling_parent, group_index, .. } = identifier; - let scheduling_parent_state = state.per_scheduling_parent.get(&relay_parent)?; + let scheduling_parent_state = state.per_scheduling_parent.get(&scheduling_parent)?; let per_session = state.per_session.get(&scheduling_parent_state.session)?; let group = per_session.groups.get(group_index)?; let seconding_limit = @@ -2961,7 +2962,7 @@ pub(crate) async fn handle_response( reputation: &mut ReputationAggregator, metrics: &Metrics, ) { - let &requests::CandidateIdentifier { relay_parent, candidate_hash, group_index } = + let &requests::CandidateIdentifier { scheduling_parent, candidate_hash, group_index } = response.candidate_identifier(); let peer = *response.requested_peer(); @@ -2973,10 +2974,11 @@ pub(crate) async fn handle_response( ); let post_confirmation = { - let scheduling_parent_state = match state.per_scheduling_parent.get_mut(&relay_parent) { - None => return, - Some(s) => s, - }; + let scheduling_parent_state = + match state.per_scheduling_parent.get_mut(&scheduling_parent) { + None => return, + Some(s) => s, + }; let per_session = match state.per_session.get(&scheduling_parent_state.session) { None => return, @@ -3073,7 +3075,7 @@ pub(crate) async fn handle_response( return; } - let scheduling_parent_state = match state.per_scheduling_parent.get_mut(&relay_parent) { + let scheduling_parent_state = match state.per_scheduling_parent.get_mut(&scheduling_parent) { None => return, Some(s) => s, }; @@ -3087,7 +3089,7 @@ pub(crate) async fn handle_response( ctx, candidate_hash, group_index, - &relay_parent, + &scheduling_parent, scheduling_parent_state, confirmed, per_session, diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 49d221ac04e54..91f38958191fa 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -295,7 +295,7 @@ pub enum CollatorProtocolMessage { /// We recommended a particular candidate to be seconded, but it was invalid; penalize the /// collator. /// - /// The hash is the relay parent. + /// The hash is the scheduling parent. Invalid(Hash, CandidateReceipt), /// The candidate we recommended to be seconded was validated successfully. /// @@ -1324,8 +1324,8 @@ pub enum HypotheticalCandidate { candidate_para: ParaId, /// The claimed head-data hash of the candidate. parent_head_data_hash: Hash, - /// The claimed relay parent of the candidate. - candidate_relay_parent: Hash, + /// The claimed scheduling parent of the candidate. + candidate_scheduling_parent: Hash, }, } @@ -1358,14 +1358,18 @@ impl HypotheticalCandidate { } } - /// Get candidate's relay parent. - pub fn relay_parent(&self) -> Hash { + /// Get candidate's scheduling parent. + /// + /// For `Complete` candidates, this is the scheduling parent from the descriptor + /// (which equals relay_parent for V1/V2 descriptors). + /// For `Incomplete` candidates, this is the claimed scheduling parent. + pub fn scheduling_parent(&self) -> Hash { match *self { HypotheticalCandidate::Complete { ref receipt, .. } => { - receipt.descriptor.relay_parent() + receipt.descriptor.scheduling_parent() }, - HypotheticalCandidate::Incomplete { candidate_relay_parent, .. } => { - candidate_relay_parent + HypotheticalCandidate::Incomplete { candidate_scheduling_parent, .. } => { + candidate_scheduling_parent }, } } diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index 5d86eda84baed..edbb1086ddd21 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -80,10 +80,9 @@ /// /// That means a few blocks of execution time lost, which is not a big deal for code upgrades /// in practice at most once every few weeks. -use polkadot_node_subsystem::messages::HypotheticalCandidate; use polkadot_primitives::{ async_backing::Constraints as PrimitiveConstraints, skip_ump_signals, BlockNumber, - CandidateCommitments, CandidateHash, Hash, HeadData, Id as ParaId, PersistedValidationData, + CandidateCommitments, Hash, HeadData, Id as ParaId, PersistedValidationData, UpgradeRestriction, ValidationCodeHash, }; use std::{collections::HashMap, sync::Arc}; @@ -794,61 +793,6 @@ fn validate_against_constraints( .map_err(FragmentValidityError::OutputsInvalid) } -/// Trait for a hypothetical or concrete candidate, as needed when assessing the validity of a -/// potential candidate. -pub trait HypotheticalOrConcreteCandidate { - /// Return a reference to the candidate commitments, if present. - fn commitments(&self) -> Option<&CandidateCommitments>; - /// Return a reference to the persisted validation data, if present. - fn persisted_validation_data(&self) -> Option<&PersistedValidationData>; - /// Return a reference to the validation code hash, if present. - fn validation_code_hash(&self) -> Option; - /// Return the parent head hash. - fn parent_head_data_hash(&self) -> Hash; - /// Return the output head hash, if present. - fn output_head_data_hash(&self) -> Option; - /// Return the relay parent hash. - fn relay_parent(&self) -> Hash; - /// Return the candidate hash. - fn candidate_hash(&self) -> CandidateHash; - /// Return the scheduling parent hash. - /// - /// For V3 candidates, this may differ from relay_parent. - /// For V1/V2 candidates and hypothetical candidates, this defaults to relay_parent. - fn scheduling_parent(&self) -> Hash { - self.relay_parent() - } -} - -impl HypotheticalOrConcreteCandidate for HypotheticalCandidate { - fn commitments(&self) -> Option<&CandidateCommitments> { - self.commitments() - } - - fn persisted_validation_data(&self) -> Option<&PersistedValidationData> { - self.persisted_validation_data() - } - - fn validation_code_hash(&self) -> Option { - self.validation_code_hash() - } - - fn parent_head_data_hash(&self) -> Hash { - self.parent_head_data_hash() - } - - fn output_head_data_hash(&self) -> Option { - self.output_head_data_hash() - } - - fn relay_parent(&self) -> Hash { - self.relay_parent() - } - - fn candidate_hash(&self) -> CandidateHash { - self.candidate_hash() - } -} #[cfg(test)] mod tests { From 4e52c0ceaeafca68465145f97172f1e4d748076d Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 11 Mar 2026 08:06:47 +0100 Subject: [PATCH 19/52] Use a recent block for executor params fetch --- polkadot/node/core/approval-voting/src/lib.rs | 61 ++++++++----------- .../node/core/approval-voting/src/tests.rs | 41 ------------- 2 files changed, 25 insertions(+), 77 deletions(-) diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index f8117fc617dad..77dd7a4a17d12 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -847,27 +847,6 @@ impl CurrentlyCheckingSet { } } -async fn get_extended_session_info<'a, Sender>( - runtime_info: &'a mut RuntimeInfo, - sender: &mut Sender, - block_hash: Hash, -) -> Option<&'a ExtendedSessionInfo> -where - Sender: SubsystemSender, -{ - match runtime_info.get_session_info(sender, block_hash).await { - Ok(extended_info) => Some(&extended_info), - Err(_) => { - gum::debug!( - target: LOG_TARGET, - ?block_hash, - "Can't obtain SessionInfo or ExecutorParams" - ); - None - }, - } -} - async fn get_extended_session_info_by_index<'a, Sender>( runtime_info: &'a mut RuntimeInfo, sender: &mut Sender, @@ -1920,14 +1899,18 @@ async fn distribution_messages_for_activation>( }; if let Some((cert, val_index, tranche)) = maybe_cert { - let ExtendedSessionInfo { ref executor_params, .. } = match get_extended_session_info( - session_info_provider, - sender, - candidate_entry.candidate_receipt().descriptor().relay_parent(), - ) - .await - { - Some(i) => i, - None => return Ok(actions), - }; + // Use relay_block (the relay chain block) rather than the candidate's relay_parent + // for the runtime API query. The relay_parent may reference an old/finalized block + // whose state is already pruned, but executor params are session-buffered so any + // recent relay chain block in the same session will return the same result. + let ExtendedSessionInfo { ref executor_params, .. } = + match get_extended_session_info_by_index( + session_info_provider, + sender, + relay_block, + block_entry.session(), + ) + .await + { + Some(i) => i, + None => return Ok(actions), + }; let indirect_cert = IndirectAssignmentCertV2 { block_hash: relay_block, validator: val_index, cert }; diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index db85f365dba3c..23325302d4f49 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -2963,13 +2963,6 @@ fn subsystem_validate_approvals_cache() { assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot))); clock.inner.lock().wakeup_all(slot_to_tick(slot)); - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(rx), )) => { - rx.send(Ok(1u32.into())).unwrap(); - } - ); - futures_timer::Delay::new(Duration::from_millis(200)).await; clock.inner.lock().wakeup_all(slot_to_tick(slot + 2)); @@ -4090,13 +4083,6 @@ fn test_approval_is_sent_on_max_approval_coalesce_count() { assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot))); clock.inner.lock().wakeup_all(slot_to_tick(slot)); - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(rx), )) => { - rx.send(Ok(1u32.into())).unwrap(); - } - ); - futures_timer::Delay::new(Duration::from_millis(200)).await; clock.inner.lock().wakeup_all(slot_to_tick(slot + 2)); @@ -4398,13 +4384,6 @@ fn test_approval_is_sent_on_max_approval_coalesce_wait() { assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot))); clock.inner.lock().wakeup_all(slot_to_tick(slot)); - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(rx), )) => { - rx.send(Ok(1u32.into())).unwrap(); - } - ); - futures_timer::Delay::new(Duration::from_millis(200)).await; clock.inner.lock().wakeup_all(slot_to_tick(slot + 2)); @@ -4519,13 +4498,6 @@ async fn setup_overseer_with_two_blocks_each_with_one_assignment_triggered( assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot))); clock.inner.lock().wakeup_all(slot_to_tick(slot)); - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(rx), )) => { - rx.send(Ok(1u32.into())).unwrap(); - } - ); - futures_timer::Delay::new(Duration::from_millis(200)).await; clock.inner.lock().wakeup_all(slot_to_tick(slot + 2)); @@ -4629,13 +4601,6 @@ async fn setup_overseer_with_blocks_with_two_assignments_triggered( assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot))); clock.inner.lock().wakeup_all(slot_to_tick(slot)); - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(rx), )) => { - rx.send(Ok(1u32.into())).unwrap(); - } - ); - futures_timer::Delay::new(Duration::from_millis(200)).await; clock.inner.lock().wakeup_all(slot_to_tick(slot + 2)); @@ -5621,12 +5586,6 @@ fn subsystem_launches_missed_assignments_on_restart() { } ); - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(rx), )) => { - rx.send(Ok(1u32.into())).unwrap(); - } - ); assert_matches!( overseer_recv(&mut virtual_overseer).await, AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( From 7d93ea75aaffcd4c40195d52427e712916e582a0 Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 11 Mar 2026 09:32:43 +0100 Subject: [PATCH 20/52] Properly fix executor params fetch in approval voting --- polkadot/node/core/approval-voting/src/lib.rs | 34 ++++++++++++------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 77dd7a4a17d12..d635fbea1302a 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -1899,18 +1899,24 @@ async fn distribution_messages_for_activation>( }; if let Some((cert, val_index, tranche)) = maybe_cert { - // Use relay_block (the relay chain block) rather than the candidate's relay_parent - // for the runtime API query. The relay_parent may reference an old/finalized block - // whose state is already pruned, but executor params are session-buffered so any - // recent relay chain block in the same session will return the same result. + // Executor params are session-buffered, so we use relay_block (the including relay + // block) for the runtime API query — its state is guaranteed available. The session + // index comes from the candidate descriptor (relay_parent's session), falling back + // to the including block's session for V1 descriptors. + let session = candidate_receipt + .descriptor + .session_index() + .unwrap_or(block_entry.session()); let ExtendedSessionInfo { ref executor_params, .. } = match get_extended_session_info_by_index( session_info_provider, sender, relay_block, - block_entry.session(), + session, ) .await { From dfcb539cdb9da053d803e62ee159ea3b62033da6 Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 11 Mar 2026 10:42:22 +0100 Subject: [PATCH 21/52] Fix CI --- polkadot/node/core/approval-voting/src/lib.rs | 5 +---- .../prospective-parachains/src/fragment_chain/mod.rs | 1 - polkadot/node/core/prospective-parachains/src/lib.rs | 9 +++------ .../node/network/statement-distribution/src/v2/mod.rs | 10 +++++----- .../node/subsystem-util/src/inclusion_emulator/mod.rs | 1 - 5 files changed, 9 insertions(+), 17 deletions(-) diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index d635fbea1302a..13410f68b729e 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -3378,10 +3378,7 @@ async fn process_wakeup>( // block) for the runtime API query — its state is guaranteed available. The session // index comes from the candidate descriptor (relay_parent's session), falling back // to the including block's session for V1 descriptors. - let session = candidate_receipt - .descriptor - .session_index() - .unwrap_or(block_entry.session()); + let session = candidate_receipt.descriptor.session_index().unwrap_or(block_entry.session()); let ExtendedSessionInfo { ref executor_params, .. } = match get_extended_session_info_by_index( session_info_provider, diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index a9d9da77a2e04..26177dadda68c 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -422,7 +422,6 @@ impl CandidateEntry { } } - /// A candidate existing on-chain but pending availability, for special treatment /// in the [`Scope`]. #[derive(Debug, Clone)] diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index bbb7511343f86..59eb68b78b8c3 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -834,15 +834,12 @@ fn answer_hypothetical_membership_request( ); match entry { Ok(entry) => fragment_chain - .can_add_candidate_as_potential( - &leaf_view.relay_chain_scope, - &entry, - ), + .can_add_candidate_as_potential(&leaf_view.relay_chain_scope, &entry), Err(_) => continue, } }, - HypotheticalCandidate::Incomplete { .. } => - fragment_chain.can_add_candidate_as_potential_hypothetical( + HypotheticalCandidate::Incomplete { .. } => fragment_chain + .can_add_candidate_as_potential_hypothetical( &leaf_view.relay_chain_scope, candidate.scheduling_parent(), candidate.candidate_hash(), diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 71e386cd64093..62bceee4ba480 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -2974,11 +2974,11 @@ pub(crate) async fn handle_response( ); let post_confirmation = { - let scheduling_parent_state = - match state.per_scheduling_parent.get_mut(&scheduling_parent) { - None => return, - Some(s) => s, - }; + let scheduling_parent_state = match state.per_scheduling_parent.get_mut(&scheduling_parent) + { + None => return, + Some(s) => s, + }; let per_session = match state.per_session.get(&scheduling_parent_state.session) { None => return, diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index edbb1086ddd21..705d13b9393e6 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -793,7 +793,6 @@ fn validate_against_constraints( .map_err(FragmentValidityError::OutputsInvalid) } - #[cfg(test)] mod tests { use super::*; From 88433d13d72b59c3a4d225d3b5531d6cb7516024 Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 11 Mar 2026 10:44:38 +0100 Subject: [PATCH 22/52] Update prdoc --- prdoc/pr_11290.prdoc | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/prdoc/pr_11290.prdoc b/prdoc/pr_11290.prdoc index 3e1c962f2c237..2b6fd27745951 100644 --- a/prdoc/pr_11290.prdoc +++ b/prdoc/pr_11290.prdoc @@ -33,6 +33,16 @@ doc: Uses `scheduling_parent_for_candidate_validation(v3_ever_seen)` in dispute participation queue ordering (`CandidateComparator`), so V3 candidates are ordered by their scheduling parent's block number rather than relay parent. + - **Prospective parachains**: Removes the `HypotheticalOrConcreteCandidate` trait. Splits + `check_potential` into a lightweight check (using only scheduling_parent, no relay_parent + needed) and a full constraint check (for concrete `CandidateEntry` with relay_parent). + `HypotheticalCandidate::Incomplete` no longer pretends to have a relay_parent. + - **Subsystem util**: Removes the `HypotheticalOrConcreteCandidate` trait definition and its + `HypotheticalCandidate` impl from the inclusion emulator. + - **Approval voting**: Fixes executor params fetching to use the candidate descriptor's + `session_index()` (the relay_parent's session) but use a recent block for the fetch itself. + - **Statement distribution**: Fixes remaining `relay_parent` → `scheduling_parent` variable + references in request dispatch and response handling. crates: - name: polkadot-primitives @@ -63,5 +73,15 @@ crates: bump: patch - name: polkadot-runtime-parachains bump: major + - name: polkadot-node-subsystem-types + bump: major + - name: polkadot-node-subsystem-util + bump: major + - name: polkadot-node-core-approval-voting + bump: major + - name: polkadot-node-network-protocol + bump: patch + - name: polkadot-subsystem-bench + bump: patch - name: polkadot-test-malus bump: patch From aa60a2e56c727957b3558fbd735eb229a7cb33dc Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 11 Mar 2026 11:09:47 +0100 Subject: [PATCH 23/52] More relay_parent -> scheduling_parent --- .../src/v2/candidates.rs | 8 +- .../statement-distribution/src/v2/mod.rs | 74 +++++++++++-------- .../statement-distribution/src/v2/requests.rs | 5 +- 3 files changed, 48 insertions(+), 39 deletions(-) diff --git a/polkadot/node/network/statement-distribution/src/v2/candidates.rs b/polkadot/node/network/statement-distribution/src/v2/candidates.rs index c0027f82a6c65..fd2cb64dd619c 100644 --- a/polkadot/node/network/statement-distribution/src/v2/candidates.rs +++ b/polkadot/node/network/statement-distribution/src/v2/candidates.rs @@ -88,7 +88,7 @@ impl Candidates { &mut self, peer: PeerId, candidate_hash: CandidateHash, - claimed_relay_parent: Hash, + claimed_scheduling_parent: Hash, claimed_group_index: GroupIndex, claimed_parent_hash_and_id: Option<(Hash, ParaId)>, ) -> Result<(), BadAdvertisement> { @@ -102,7 +102,7 @@ impl Candidates { match entry { CandidateState::Confirmed(ref c) => { - if c.scheduling_parent() != claimed_relay_parent { + if c.scheduling_parent() != claimed_scheduling_parent { return Err(BadAdvertisement); } @@ -124,7 +124,7 @@ impl Candidates { c.add_claims( peer, CandidateClaims { - relay_parent: claimed_relay_parent, + relay_parent: claimed_scheduling_parent, group_index: claimed_group_index, parent_hash_and_id: claimed_parent_hash_and_id, }, @@ -564,7 +564,7 @@ impl ConfirmedCandidate { } /// Get the group index of the assigned group. Note that this is in the context - /// of the state of the chain at the candidate's relay parent and its para-id. + /// of the state of the chain at the candidate's scheduling parent and its para-id. pub fn group_index(&self) -> GroupIndex { self.assigned_group } diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 62bceee4ba480..67586e8131e7c 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -533,19 +533,22 @@ pub(crate) async fn handle_network_update( async fn handle_active_leaf_update( ctx: &mut Context, state: &mut State, - new_relay_parent: Hash, + new_scheduling_parent: Hash, ) -> JfyiErrorResult<()> { let disabled_validators: HashSet<_> = - polkadot_node_subsystem_util::request_disabled_validators(new_relay_parent, ctx.sender()) - .await - .await - .map_err(JfyiError::RuntimeApiUnavailable)? - .map_err(JfyiError::FetchDisabledValidators)? - .into_iter() - .collect(); + polkadot_node_subsystem_util::request_disabled_validators( + new_scheduling_parent, + ctx.sender(), + ) + .await + .await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchDisabledValidators)? + .into_iter() + .collect(); let session_index = polkadot_node_subsystem_util::request_session_index_for_child( - new_relay_parent, + new_scheduling_parent, ctx.sender(), ) .await @@ -555,7 +558,7 @@ async fn handle_active_leaf_update( if !state.per_session.contains_key(&session_index) { let session_info = polkadot_node_subsystem_util::request_session_info( - new_relay_parent, + new_scheduling_parent, session_index, ctx.sender(), ) @@ -568,7 +571,7 @@ async fn handle_active_leaf_update( None => { gum::warn!( target: LOG_TARGET, - relay_parent = ?new_relay_parent, + scheduling_parent = ?new_scheduling_parent, "No session info available for current session" ); @@ -578,7 +581,7 @@ async fn handle_active_leaf_update( }; let minimum_backing_votes = - request_min_backing_votes(new_relay_parent, session_index, ctx.sender()) + request_min_backing_votes(new_scheduling_parent, session_index, ctx.sender()) .await .await .map_err(JfyiError::RuntimeApiUnavailable)? @@ -599,7 +602,7 @@ async fn handle_active_leaf_update( if !disabled_validators.is_empty() { gum::debug!( target: LOG_TARGET, - relay_parent = ?new_relay_parent, + scheduling_parent = ?new_scheduling_parent, ?session_index, ?disabled_validators, "Disabled validators detected" @@ -607,7 +610,7 @@ async fn handle_active_leaf_update( } let group_rotation_info = - polkadot_node_subsystem_util::request_validator_groups(new_relay_parent, ctx.sender()) + polkadot_node_subsystem_util::request_validator_groups(new_scheduling_parent, ctx.sender()) .await .await .map_err(JfyiError::RuntimeApiUnavailable)? @@ -615,7 +618,7 @@ async fn handle_active_leaf_update( .1; let claim_queue = ClaimQueueSnapshot( - polkadot_node_subsystem_util::request_claim_queue(new_relay_parent, ctx.sender()) + polkadot_node_subsystem_util::request_claim_queue(new_scheduling_parent, ctx.sender()) .await .await .map_err(JfyiError::RuntimeApiUnavailable)? @@ -640,7 +643,7 @@ async fn handle_active_leaf_update( let transposed_cq = transpose_claim_queue(claim_queue.0); state.per_scheduling_parent.insert( - new_relay_parent, + new_scheduling_parent, PerSchedulingParentState { local_validator, statement_store: StatementStore::new(&per_session.groups), @@ -668,18 +671,18 @@ pub(crate) async fn handle_active_leaves_update( .await .map_err(JfyiError::ActivateLeafFailure)?; - let new_relay_parents = + let new_scheduling_parents = state.implicit_view.all_allowed_relay_parents().cloned().collect::>(); - for new_relay_parent in new_relay_parents.iter().cloned() { - if state.per_scheduling_parent.contains_key(&new_relay_parent) { + for new_scheduling_parent in new_scheduling_parents.iter().cloned() { + if state.per_scheduling_parent.contains_key(&new_scheduling_parent) { continue; } - if let Err(err) = handle_active_leaf_update(ctx, state, new_relay_parent).await { + if let Err(err) = handle_active_leaf_update(ctx, state, new_scheduling_parent).await { gum::warn!( target: LOG_TARGET, - relay_parent = ?new_relay_parent, + scheduling_parent = ?new_scheduling_parent, error = ?err, "Failed to handle active leaf update" ); @@ -701,16 +704,22 @@ pub(crate) async fn handle_active_leaves_update( { let mut update_peers = Vec::new(); for (peer, peer_state) in state.peers.iter_mut() { - let fresh = peer_state.reconcile_active_leaf(activated.hash, &new_relay_parents); + let fresh = peer_state.reconcile_active_leaf(activated.hash, &new_scheduling_parents); if !fresh.is_empty() { update_peers.push((*peer, fresh)); } } for (peer, fresh) in update_peers { - for fresh_relay_parent in fresh { - send_peer_messages_for_relay_parent(ctx, state, peer, fresh_relay_parent, metrics) - .await; + for fresh_scheduling_parent in fresh { + send_peer_messages_for_scheduling_parent( + ctx, + state, + peer, + fresh_scheduling_parent, + metrics, + ) + .await; } } } @@ -803,8 +812,9 @@ async fn handle_peer_view_update( peer_data.update_view(new_view, &state.implicit_view) }; - for new_relay_parent in fresh_implicit { - send_peer_messages_for_relay_parent(ctx, state, peer, new_relay_parent, metrics).await; + for new_scheduling_parent in fresh_implicit { + send_peer_messages_for_scheduling_parent(ctx, state, peer, new_scheduling_parent, metrics) + .await; } } @@ -830,11 +840,11 @@ fn find_validator_ids<'a>( /// This function is designed to be cheap and not to send duplicate messages in repeated /// cases. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn send_peer_messages_for_relay_parent( +async fn send_peer_messages_for_scheduling_parent( ctx: &mut Context, state: &mut State, peer: PeerId, - relay_parent: Hash, + scheduling_parent: Hash, metrics: &Metrics, ) { let peer_data = match state.peers.get_mut(&peer) { @@ -842,7 +852,7 @@ async fn send_peer_messages_for_relay_parent( Some(p) => p, }; - let scheduling_parent_state = match state.per_scheduling_parent.get_mut(&relay_parent) { + let scheduling_parent_state = match state.per_scheduling_parent.get_mut(&scheduling_parent) { None => return, Some(s) => s, }; @@ -862,7 +872,7 @@ async fn send_peer_messages_for_relay_parent( { send_pending_cluster_statements( ctx, - relay_parent, + scheduling_parent, &(peer, peer_data.protocol_version), validator_id, &mut active.cluster_tracker, @@ -875,7 +885,7 @@ async fn send_peer_messages_for_relay_parent( send_pending_grid_messages( ctx, - relay_parent, + scheduling_parent, &(peer, peer_data.protocol_version), validator_id, &per_session_state.groups, diff --git a/polkadot/node/network/statement-distribution/src/v2/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs index 6b6021a6b33f1..979181260b640 100644 --- a/polkadot/node/network/statement-distribution/src/v2/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs @@ -180,12 +180,11 @@ impl RequestManager { /// manager doesn't store this request already. pub fn get_or_insert( &mut self, - relay_parent: Hash, + scheduling_parent: Hash, candidate_hash: CandidateHash, group_index: GroupIndex, ) -> Entry<'_> { - let identifier = - CandidateIdentifier { scheduling_parent: relay_parent, candidate_hash, group_index }; + let identifier = CandidateIdentifier { scheduling_parent, candidate_hash, group_index }; let (candidate, fresh) = match self.requests.entry(identifier.clone()) { HEntry::Occupied(e) => (e.into_mut(), false), From b1d72957175c26f371b4f467a50d3084fe570a66 Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 11 Mar 2026 13:02:31 +0100 Subject: [PATCH 24/52] Cleanup + fix dispute distribution --- .../core/dispute-coordinator/src/import.rs | 5 +- .../dispute-coordinator/src/initialized.rs | 2 +- polkadot/node/core/pvf/common/src/execute.rs | 8 +-- .../network/dispute-distribution/src/lib.rs | 60 +++++++++++++++++-- .../dispute-distribution/src/receiver/mod.rs | 40 +++++++++++-- .../dispute-distribution/src/sender/mod.rs | 2 + .../src/sender/send_task.rs | 18 ++++-- polkadot/node/network/protocol/src/lib.rs | 10 ++-- .../src/v2/candidates.rs | 24 ++++---- .../statement-distribution/src/v2/mod.rs | 4 +- .../statement-distribution/src/v2/requests.rs | 14 ++--- polkadot/primitives/src/v9/mod.rs | 14 ++--- 12 files changed, 145 insertions(+), 56 deletions(-) diff --git a/polkadot/node/core/dispute-coordinator/src/import.rs b/polkadot/node/core/dispute-coordinator/src/import.rs index 15753ba8d9dd4..58110193b0119 100644 --- a/polkadot/node/core/dispute-coordinator/src/import.rs +++ b/polkadot/node/core/dispute-coordinator/src/import.rs @@ -70,9 +70,8 @@ impl<'a> CandidateEnvironment<'a> { controlled_indices: &mut ControlledValidatorIndices, ) -> Option> { // We use the scheduling parent here to have consensus on disabled state among validators. - // If this fetch fails because e.g. we have never seen the fork of the candidate, not - // seeing the disabled state is acceptable, because we have spam protection for these - // cases in place anyways. + // If this fetch fails because e.g. we have never seen the fork of the candidate, we are + // still fine, because we have spam protection for these cases in place anyways. let disabled_onchain = runtime_info .get_disabled_validators(ctx.sender(), scheduling_parent) .await diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index 827c59a74e9ec..2e6a82e4b3b24 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -673,7 +673,7 @@ impl Initialized { // Use transition-safe descriptor methods for scheduling context. // Before the V3 node feature is seen, these fall back to old-rules // behavior to match old backers and prevent slashing. - // See `CandidateDescriptorV2::version_for_approval_dispute`. + // See `CandidateDescriptorV2::version_for_candidate_validation`. let scheduling_session = candidate_receipt .descriptor .scheduling_session_for_candidate_validation(self.v3_ever_seen) diff --git a/polkadot/node/core/pvf/common/src/execute.rs b/polkadot/node/core/pvf/common/src/execute.rs index a4ab0e4be65cd..aebb934e4dcf3 100644 --- a/polkadot/node/core/pvf/common/src/execute.rs +++ b/polkadot/node/core/pvf/common/src/execute.rs @@ -47,7 +47,7 @@ pub struct ValidationContext { /// During the V3 transition period, this flag determines whether to trust /// `descriptor.version()` or fall back to `descriptor.version_old_rules()` /// for approval/dispute validations. - /// See `CandidateDescriptorV2::version_for_approval_dispute`. + /// See `CandidateDescriptorV2::version_for_candidate_validation`. pub v3_seen: bool, } @@ -58,10 +58,8 @@ impl ValidationContext { } /// Get the scheduling parent hash, using transition-safe logic. - // TODO: This is using _for_approval_dispute, but is also used in backing context. - // Might be fine, but: - // 1. Definitely needs a renaming then. - // 2. We should remove the special casing in tho other cases then too. + // Note: This uses _for_candidate_validation which is the transition-safe version. + // It is used in both backing and approval/dispute contexts. pub fn scheduling_parent(&self) -> Hash { self.candidate_receipt .descriptor diff --git a/polkadot/node/network/dispute-distribution/src/lib.rs b/polkadot/node/network/dispute-distribution/src/lib.rs index 574d6c3ae25b5..04c99a7f3763e 100644 --- a/polkadot/node/network/dispute-distribution/src/lib.rs +++ b/polkadot/node/network/dispute-distribution/src/lib.rs @@ -24,12 +24,18 @@ //! The sender is responsible for getting our vote out, see `sender`. The receiver handles //! incoming [`DisputeRequest`](v1::DisputeRequest)s and offers spam protection, see `receiver`. -use std::time::Duration; +use std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; use futures::{channel::mpsc, FutureExt, StreamExt, TryFutureExt}; use polkadot_node_network_protocol::authority_discovery::AuthorityDiscovery; -use polkadot_node_subsystem_util::nesting_sender::NestingSender; +use polkadot_node_subsystem_util::{nesting_sender::NestingSender, request_node_features}; use sp_keystore::KeystorePtr; use polkadot_node_network_protocol::request_response::{incoming::IncomingRequestReceiver, v1}; @@ -38,6 +44,7 @@ use polkadot_node_subsystem::{ messages::DisputeDistributionMessage, overseer, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; +use polkadot_primitives::node_features::FeatureIndex; use polkadot_node_subsystem_util::{runtime, runtime::RuntimeInfo}; /// ## The sender [`DisputeSender`] @@ -132,6 +139,11 @@ pub struct DisputeDistributionSubsystem { /// Metrics for this subsystem. metrics: Metrics, + + /// Monotonic flag: set to `true` once any activated leaf has the `CandidateReceiptV3` + /// node feature enabled. Shared with the receiver task via `Arc`. + /// See `CandidateDescriptorV2::version_for_candidate_validation` for the safety argument. + v3_ever_seen: Arc, } #[overseer::subsystem(DisputeDistribution, error = SubsystemError, prefix = self::overseer)] @@ -176,6 +188,7 @@ where req_receiver: Some(req_receiver), authority_discovery, metrics, + v3_ever_seen: Arc::new(AtomicBool::new(false)), } } @@ -188,6 +201,7 @@ where .expect("Must be provided on `new` and we take ownership here. qed."), self.authority_discovery.clone(), self.metrics.clone(), + self.v3_ever_seen.clone(), ); ctx.spawn("disputes-receiver", receiver.run().boxed()) .map_err(FatalError::SpawnTask)?; @@ -240,8 +254,41 @@ where ) -> Result { match signal { OverseerSignal::Conclude => return Ok(SignalResult::Conclude), - OverseerSignal::ActiveLeaves(update) => { - self.disputes_sender.update_leaves(ctx, &mut self.runtime, update).await?; + OverseerSignal::ActiveLeaves(ref update) => { + // Detect V3 node feature on activated leaves (same approach as + // dispute-coordinator and candidate-validation). + if !self.v3_ever_seen.load(Ordering::Relaxed) { + if let Some(ref activated) = update.activated { + if let Ok(session_index) = self + .runtime + .get_session_index_for_child(ctx.sender(), activated.hash) + .await + { + if let Ok(Ok(features)) = request_node_features( + activated.hash, + session_index, + ctx.sender(), + ) + .await + .await + { + if FeatureIndex::CandidateReceiptV3.is_set(&features) { + gum::info!( + target: LOG_TARGET, + ?session_index, + "CandidateReceiptV3 node feature detected in \ + dispute-distribution", + ); + self.v3_ever_seen.store(true, Ordering::Relaxed); + } + } + } + } + } + + self.disputes_sender + .update_leaves(ctx, &mut self.runtime, update.clone()) + .await?; }, OverseerSignal::BlockFinalized(_, _) => {}, }; @@ -256,7 +303,10 @@ where ) -> Result<()> { match msg { DisputeDistributionMessage::SendDispute(dispute_msg) => { - self.disputes_sender.start_sender(ctx, &mut self.runtime, dispute_msg).await? + let v3_ever_seen = self.v3_ever_seen.load(Ordering::Relaxed); + self.disputes_sender + .start_sender(ctx, &mut self.runtime, dispute_msg, v3_ever_seen) + .await? }, } Ok(()) diff --git a/polkadot/node/network/dispute-distribution/src/receiver/mod.rs b/polkadot/node/network/dispute-distribution/src/receiver/mod.rs index 686b9f4f94da0..5a74a834a7a33 100644 --- a/polkadot/node/network/dispute-distribution/src/receiver/mod.rs +++ b/polkadot/node/network/dispute-distribution/src/receiver/mod.rs @@ -16,6 +16,10 @@ use std::{ pin::Pin, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, task::{Context, Poll}, time::Duration, }; @@ -113,6 +117,11 @@ pub struct DisputesReceiver { /// Log received requests. metrics: Metrics, + + /// Shared monotonic flag for V3 candidate descriptor detection. + /// Updated by the main subsystem task on active leaf updates. + /// See `CandidateDescriptorV2::version_for_candidate_validation` for the safety argument. + v3_ever_seen: Arc, } /// Messages as handled by this receiver internally. @@ -154,6 +163,7 @@ where receiver: IncomingRequestReceiver, authority_discovery: AD, metrics: Metrics, + v3_ever_seen: Arc, ) -> Self { let runtime = RuntimeInfo::new_with_config(runtime::Config { keystore: None, @@ -168,6 +178,7 @@ where authority_discovery, pending_imports: FuturesUnordered::new(), metrics, + v3_ever_seen, } } @@ -324,10 +335,31 @@ where ) -> Result<()> { let IncomingRequest { peer, payload, pending_response } = incoming; - // For disputes, we need session info from the scheduling context - // Use scheduling_parent to fetch the session info for dispute validators - let scheduling_parent = payload.0.candidate_receipt.descriptor.scheduling_parent(); - + // For disputes, we need session info from the scheduling context. + // Use the transition-safe method to match old backer semantics before V3 is confirmed. + let v3_ever_seen = self.v3_ever_seen.load(Ordering::Relaxed); + let scheduling_parent = payload + .0 + .candidate_receipt + .descriptor + .scheduling_parent_for_candidate_validation(v3_ever_seen); + + // The scheduling parent may not be a block we have ever seen (e.g. disputes + // about candidates on forks we never imported), so we cannot rely on its state + // being available for runtime API queries. + // + // This is fine because `get_session_info_by_index` has two cache layers: + // + // 1. A local LRU cache in this subsystem's `RuntimeInfo`, keyed by session index. Once + // populated by a prior call from this receiver, the scheduling parent hash is + // irrelevant. Thus on a dispute storm, the cache will be warm (the actual threat + // scenario). + // + // 2. More importantly, the runtime API subsystem itself caches session info by session + // index. The dispute coordinator and dispute distribution sender both query session info + // on every active leaf, warming that global cache for all sessions within the dispute + // window. When our local cache misses, the runtime API subsystem can still serve the + // response from its cache even if the scheduling parent block's state is unavailable. let info = self .runtime .get_session_info_by_index(&mut self.sender, scheduling_parent, payload.0.session_index) diff --git a/polkadot/node/network/dispute-distribution/src/sender/mod.rs b/polkadot/node/network/dispute-distribution/src/sender/mod.rs index 235355fc37ce4..7c21241e876bd 100644 --- a/polkadot/node/network/dispute-distribution/src/sender/mod.rs +++ b/polkadot/node/network/dispute-distribution/src/sender/mod.rs @@ -127,6 +127,7 @@ impl DisputeSender { ctx: &mut Context, runtime: &mut RuntimeInfo, msg: DisputeMessage, + v3_ever_seen: bool, ) -> Result<()> { let req: DisputeRequest = msg.into(); let candidate_hash = req.0.candidate_receipt.hash(); @@ -145,6 +146,7 @@ impl DisputeSender { NestingSender::new(self.tx.clone(), DisputeSenderMessage::TaskFinish), req, &self.metrics, + v3_ever_seen, ) .await?; vacant.insert(send_task); diff --git a/polkadot/node/network/dispute-distribution/src/sender/send_task.rs b/polkadot/node/network/dispute-distribution/src/sender/send_task.rs index e9a1a405789d7..87655c3b3d709 100644 --- a/polkadot/node/network/dispute-distribution/src/sender/send_task.rs +++ b/polkadot/node/network/dispute-distribution/src/sender/send_task.rs @@ -59,6 +59,10 @@ pub struct SendTask { /// Sender to be cloned for tasks. tx: NestingSender, + + /// Whether the V3 candidate descriptor feature has been observed. + /// Used for transition-safe scheduling parent extraction. + v3_ever_seen: bool, } /// Status of a particular vote/statement delivery to a particular validator. @@ -113,9 +117,10 @@ impl SendTask { tx: NestingSender, request: DisputeRequest, metrics: &Metrics, + v3_ever_seen: bool, ) -> Result { let mut send_task = - Self { request, deliveries: HashMap::new(), has_failed_sends: false, tx }; + Self { request, deliveries: HashMap::new(), has_failed_sends: false, tx, v3_ever_seen }; send_task.refresh_sends(ctx, runtime, active_sessions, metrics).await?; Ok(send_task) } @@ -234,9 +239,14 @@ impl SendTask { runtime: &mut RuntimeInfo, active_sessions: &HashMap, ) -> Result> { - // For disputes, we need session info from the scheduling context - // Use scheduling_parent to fetch the session info for dispute validators - let scheduling_parent = self.request.0.candidate_receipt.descriptor.scheduling_parent(); + // For disputes, we need session info from the scheduling context. + // Use the transition-safe method to match old backer semantics before V3 is confirmed. + let scheduling_parent = self + .request + .0 + .candidate_receipt + .descriptor + .scheduling_parent_for_candidate_validation(self.v3_ever_seen); // Retrieve all authorities which participated in the parachain consensus of the session // in which the candidate was backed (scheduling session). diff --git a/polkadot/node/network/protocol/src/lib.rs b/polkadot/node/network/protocol/src/lib.rs index ce93fc9af9987..9d6896eda08ac 100644 --- a/polkadot/node/network/protocol/src/lib.rs +++ b/polkadot/node/network/protocol/src/lib.rs @@ -742,7 +742,7 @@ pub mod v3 { pub scheduling_parent: Hash, /// The hash of the candidate. pub candidate_hash: CandidateHash, - /// The group index backing the candidate at the relay-parent. + /// The group index backing the candidate at the scheduling-parent. pub group_index: GroupIndex, /// The para ID of the candidate. It is illegal for this to /// be a para ID which is not assigned to the group indicated @@ -751,12 +751,12 @@ pub mod v3 { /// The head-data corresponding to the candidate. pub parent_head_data_hash: Hash, /// A statement filter which indicates which validators in the - /// para's group at the relay-parent have validated this candidate + /// para's group at the scheduling-parent have validated this candidate /// and issued statements about it, to the advertiser's knowledge. /// /// This MUST have exactly the minimum amount of bytes /// necessary to represent the number of validators in the assigned - /// backing group as-of the relay-parent. + /// backing group as-of the scheduling-parent. pub statement_knowledge: StatementFilter, } @@ -766,12 +766,12 @@ pub mod v3 { /// The hash of the candidate. pub candidate_hash: CandidateHash, /// A statement filter which indicates which validators in the - /// para's group at the relay-parent have validated this candidate + /// para's group at the scheduling-parent have validated this candidate /// and issued statements about it, to the advertiser's knowledge. /// /// This MUST have exactly the minimum amount of bytes /// necessary to represent the number of validators in the assigned - /// backing group as-of the relay-parent. + /// backing group as-of the scheduling-parent. pub statement_knowledge: StatementFilter, } diff --git a/polkadot/node/network/statement-distribution/src/v2/candidates.rs b/polkadot/node/network/statement-distribution/src/v2/candidates.rs index fd2cb64dd619c..3468304e98af3 100644 --- a/polkadot/node/network/statement-distribution/src/v2/candidates.rs +++ b/polkadot/node/network/statement-distribution/src/v2/candidates.rs @@ -124,7 +124,7 @@ impl Candidates { c.add_claims( peer, CandidateClaims { - relay_parent: claimed_scheduling_parent, + scheduling_parent: claimed_scheduling_parent, group_index: claimed_group_index, parent_hash_and_id: claimed_parent_hash_and_id, }, @@ -378,8 +378,8 @@ enum CandidateState { /// Claims made alongside the advertisement of a candidate. #[derive(Debug, Clone, PartialEq, Eq, Hash)] struct CandidateClaims { - /// The relay-parent committed to by the candidate. - relay_parent: Hash, + /// The scheduling-parent committed to by the candidate. + scheduling_parent: Hash, /// The group index assigned to this candidate. group_index: GroupIndex, /// The hash of the parent head-data and the ParaId. This is optional, @@ -390,12 +390,12 @@ struct CandidateClaims { impl CandidateClaims { fn check( &self, - relay_parent: Hash, + scheduling_parent: Hash, group_index: GroupIndex, parent_hash: Hash, para_id: ParaId, ) -> bool { - self.relay_parent == relay_parent && + self.scheduling_parent == scheduling_parent && self.group_index == group_index && self.parent_hash_and_id.map_or(true, |p| p == (parent_hash, para_id)) } @@ -429,9 +429,9 @@ impl UnconfirmedCandidate { // memory consumption is bounded in the same way. if let Some(parent_claims) = claims.parent_hash_and_id { let sub_claims = self.parent_claims.entry(parent_claims).or_default(); - match sub_claims.iter().position(|x| x.0 == claims.relay_parent) { + match sub_claims.iter().position(|x| x.0 == claims.scheduling_parent) { Some(p) => sub_claims[p].1 += 1, - None => sub_claims.push((claims.relay_parent, 1)), + None => sub_claims.push((claims.scheduling_parent, 1)), } } self.claims.push((peer, claims)); @@ -452,12 +452,12 @@ impl UnconfirmedCandidate { relay_parent_live: impl Fn(&Hash) -> bool, ) { self.claims.retain(|c| { - if relay_parent_live(&c.1.relay_parent) { + if relay_parent_live(&c.1.scheduling_parent) { true } else { if let Some(parent_claims) = c.1.parent_hash_and_id { if let Entry::Occupied(mut e) = self.parent_claims.entry(parent_claims) { - if let Some(p) = e.get().iter().position(|x| x.0 == c.1.relay_parent) { + if let Some(p) = e.get().iter().position(|x| x.0 == c.1.scheduling_parent) { let sub_claims = e.get_mut(); sub_claims[p].1 -= 1; if sub_claims[p].1 == 0 { @@ -491,13 +491,13 @@ impl UnconfirmedCandidate { v: &mut Vec, i: impl IntoIterator)>, ) { - for ((parent_head_hash, para_id), possible_relay_parents) in i { - for (relay_parent, _rc) in possible_relay_parents { + for ((parent_head_hash, para_id), possible_scheduling_parents) in i { + for (scheduling_parent, _rc) in possible_scheduling_parents { v.push(HypotheticalCandidate::Incomplete { candidate_hash, candidate_para: *para_id, parent_head_data_hash: *parent_head_hash, - candidate_scheduling_parent: *relay_parent, + candidate_scheduling_parent: *scheduling_parent, }); } } diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 67586e8131e7c..8bcf2fa371a93 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -772,7 +772,7 @@ pub(crate) fn handle_deactivate_leaves(state: &mut State, leaves: &[Hash]) { }); // clean up requests related to this scheduling parent. - state.request_manager.remove_by_relay_parent(*leaf); + state.request_manager.remove_by_scheduling_parent(*leaf); } } @@ -3266,7 +3266,7 @@ pub(crate) fn answer_request(state: &mut State, message: ResponderMessage) { gum::info!( target: LOG_TARGET, ?candidate_hash, - relay_parent = ?confirmed.scheduling_parent(), + scheduling_parent = ?confirmed.scheduling_parent(), ?group_index, "Dropping a request from a grid peer because the backing threshold is no longer met." ); diff --git a/polkadot/node/network/statement-distribution/src/v2/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs index 979181260b640..1401de50ae2e9 100644 --- a/polkadot/node/network/statement-distribution/src/v2/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs @@ -240,12 +240,12 @@ impl RequestManager { } /// Remove based on relay-parent. - pub fn remove_by_relay_parent(&mut self, relay_parent: Hash) { + pub fn remove_by_scheduling_parent(&mut self, scheduling_parent: Hash) { let mut candidate_hashes = HashSet::new(); // Remove from `by_priority` and `requests`. self.by_priority.retain(|(_priority, id)| { - let retain = relay_parent != id.scheduling_parent; + let retain = scheduling_parent != id.scheduling_parent; if !retain { self.requests.remove(id); candidate_hashes.insert(id.candidate_hash); @@ -257,7 +257,7 @@ impl RequestManager { for candidate_hash in candidate_hashes { match self.unique_identifiers.entry(candidate_hash) { HEntry::Occupied(mut entry) => { - entry.get_mut().retain(|id| relay_parent != id.scheduling_parent); + entry.get_mut().retain(|id| scheduling_parent != id.scheduling_parent); if entry.get().is_empty() { entry.remove(); } @@ -966,7 +966,7 @@ mod tests { assert_eq!(request_manager.by_priority.len(), 6); assert_eq!(request_manager.unique_identifiers.len(), 5); - request_manager.remove_by_relay_parent(parent_a); + request_manager.remove_by_scheduling_parent(parent_a); assert_eq!(request_manager.requests.len(), 3); assert_eq!(request_manager.by_priority.len(), 3); @@ -977,7 +977,7 @@ mod tests { // Duplicate hash should still be there (under a different parent). assert!(request_manager.unique_identifiers.contains_key(&duplicate_hash)); - request_manager.remove_by_relay_parent(parent_b); + request_manager.remove_by_scheduling_parent(parent_b); assert_eq!(request_manager.requests.len(), 1); assert_eq!(request_manager.by_priority.len(), 1); @@ -986,7 +986,7 @@ mod tests { assert!(!request_manager.unique_identifiers.contains_key(&candidate_b1)); assert!(!request_manager.unique_identifiers.contains_key(&candidate_b2)); - request_manager.remove_by_relay_parent(parent_c); + request_manager.remove_by_scheduling_parent(parent_c); assert!(request_manager.requests.is_empty()); assert!(request_manager.by_priority.is_empty()); @@ -1227,7 +1227,7 @@ mod tests { } // Garbage collect based on relay parent. - request_manager.remove_by_relay_parent(relay_parent); + request_manager.remove_by_scheduling_parent(relay_parent); // Try to validate response. { diff --git a/polkadot/primitives/src/v9/mod.rs b/polkadot/primitives/src/v9/mod.rs index e829aae355bc2..08e7ced27f8bb 100644 --- a/polkadot/primitives/src/v9/mod.rs +++ b/polkadot/primitives/src/v9/mod.rs @@ -1980,9 +1980,9 @@ impl> CandidateDescriptorV2 { /// reject V3 candidates outright when V3 is not enabled. /// /// During the V3 transition, approval checkers, dispute participants, - /// and on-chain vote scrapers must use [`Self::version_for_approval_dispute`] - /// (and the corresponding `scheduling_parent_for_approval_dispute` / - /// `scheduling_session_for_approval_dispute`) instead of `version()` + /// and on-chain vote scrapers must use [`Self::version_for_candidate_validation`] + /// (and the corresponding `scheduling_parent_for_candidate_validation` / + /// `scheduling_session_for_candidate_validation`) instead of `version()` /// directly. This ensures they match old backer semantics before the V3 /// node feature is confirmed enabled. See those methods for the full /// safety argument. @@ -2070,7 +2070,7 @@ impl CandidateDescriptorV2 { // // Impact if it still happened would also be fairly minimal: We would // drop a parachain block, which is not a big deal on v1, where we are - // not aiming for perfect block confidence yet. + // not aiming for perfect block confidence. let new_v1_detected = self.reserved1[0..16] != [0u8; 16]; if new_v1_detected { @@ -2240,7 +2240,7 @@ impl> CandidateDescriptorV2 { /// Scheduling parent for use in candidate validation. /// - /// See [`Self::version_for_approval_dispute`] for the safety argument. + /// See [`Self::version_for_candidate_validation`] for the safety argument. pub fn scheduling_parent_for_candidate_validation(&self, v3_ever_seen: bool) -> H where H: Copy, @@ -2253,7 +2253,7 @@ impl> CandidateDescriptorV2 { /// Scheduling session for use candidate validation. /// - /// See [`Self::version_for_approval_dispute`] for the safety argument. + /// See [`Self::version_for_candidate_validation`] for the safety argument. pub fn scheduling_session_for_candidate_validation( &self, v3_ever_seen: bool, @@ -2828,8 +2828,6 @@ impl> CommittedCandidateReceiptV2 { }, _ if signals.is_empty() => { // V3 and above require UMP signals. - // This is technically changed behavior, but this is fine as it is only checked in - // the runtime and in backing! return Err(CommittedCandidateReceiptError::NoUMPSignalWithV3Descriptor); }, _ => {}, From 12bd8ed4e82c9b49a784b377b6865bb731ae90a1 Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 11 Mar 2026 13:51:57 +0100 Subject: [PATCH 25/52] Fix test --- .../network/dispute-distribution/src/tests/mod.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/polkadot/node/network/dispute-distribution/src/tests/mod.rs b/polkadot/node/network/dispute-distribution/src/tests/mod.rs index 14ffcb52a41e3..3867553b86a88 100644 --- a/polkadot/node/network/dispute-distribution/src/tests/mod.rs +++ b/polkadot/node/network/dispute-distribution/src/tests/mod.rs @@ -755,6 +755,18 @@ async fn activate_leaf( } ); + // The V3 feature detection in handle_signals sends a NodeFeatures request + // right after SessionIndexForChild. + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::NodeFeatures(_, tx) + )) => { + tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); + if let Some(session_info) = new_session { assert_matches!( handle.recv().await, From 66d8384c14bffbe5fb5da0a8c36346c81a3d5fb0 Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 11 Mar 2026 14:25:06 +0100 Subject: [PATCH 26/52] Fmt fixes --- .../node/network/dispute-distribution/src/lib.rs | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/polkadot/node/network/dispute-distribution/src/lib.rs b/polkadot/node/network/dispute-distribution/src/lib.rs index 04c99a7f3763e..f9fe9b610c7e5 100644 --- a/polkadot/node/network/dispute-distribution/src/lib.rs +++ b/polkadot/node/network/dispute-distribution/src/lib.rs @@ -44,8 +44,8 @@ use polkadot_node_subsystem::{ messages::DisputeDistributionMessage, overseer, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; -use polkadot_primitives::node_features::FeatureIndex; use polkadot_node_subsystem_util::{runtime, runtime::RuntimeInfo}; +use polkadot_primitives::node_features::FeatureIndex; /// ## The sender [`DisputeSender`] /// @@ -264,13 +264,10 @@ where .get_session_index_for_child(ctx.sender(), activated.hash) .await { - if let Ok(Ok(features)) = request_node_features( - activated.hash, - session_index, - ctx.sender(), - ) - .await - .await + if let Ok(Ok(features)) = + request_node_features(activated.hash, session_index, ctx.sender()) + .await + .await { if FeatureIndex::CandidateReceiptV3.is_set(&features) { gum::info!( From 80aa1049a0c8cfe1108e60887c952312438f60be Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 11 Mar 2026 14:56:41 +0100 Subject: [PATCH 27/52] Fix prdoc version bump --- prdoc/pr_11290.prdoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/prdoc/pr_11290.prdoc b/prdoc/pr_11290.prdoc index 2b6fd27745951..32b1aa99460be 100644 --- a/prdoc/pr_11290.prdoc +++ b/prdoc/pr_11290.prdoc @@ -68,7 +68,7 @@ crates: - name: polkadot-statement-distribution bump: major - name: polkadot-dispute-distribution - bump: patch + bump: major - name: polkadot-node-collation-generation bump: patch - name: polkadot-runtime-parachains @@ -80,7 +80,7 @@ crates: - name: polkadot-node-core-approval-voting bump: major - name: polkadot-node-network-protocol - bump: patch + bump: major - name: polkadot-subsystem-bench bump: patch - name: polkadot-test-malus From 7da2f5163b37212cb896d1a4964933e886c38526 Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 11 Mar 2026 17:04:50 +0100 Subject: [PATCH 28/52] Fix fmt --- .../network/collator-protocol/src/validator_side/mod.rs | 6 +++--- .../src/validator_side/tests/prospective_parachains.rs | 6 +----- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 406f3788bf9d6..9139a82c37c20 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -168,9 +168,9 @@ use polkadot_node_subsystem_util::{ request_claim_queue, request_session_index_for_child, }; use polkadot_primitives::{ - CandidateDescriptorV2, CandidateDescriptorVersion, CandidateHash, CollatorId, - CoreIndex, Hash, HeadData, Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, - SessionIndex, RELAY_CHAIN_SLOT_DURATION_MILLIS, + CandidateDescriptorV2, CandidateDescriptorVersion, CandidateHash, CollatorId, CoreIndex, Hash, + HeadData, Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, SessionIndex, + RELAY_CHAIN_SLOT_DURATION_MILLIS, }; use sp_consensus_babe::digests::CompatibleDigestItem; use sp_consensus_slots::SlotDuration; diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index 47c59278c703a..e1976cd9489ed 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -1858,10 +1858,7 @@ fn child_blocked_from_seconding_by_parent(#[case] valid_parent: bool) { #[case(false, CollationVersion::V2)] // V3 descriptor via V2 protocol → rejected (wrong protocol) #[case(true, CollationVersion::V1)] // Crafted unknown descriptor via V1 → rejected #[case(true, CollationVersion::V2)] // Crafted unknown descriptor via V2 → rejected -fn v3_descriptor( - #[case] crafted_unknown: bool, - #[case] collation_version: CollationVersion, -) { +fn v3_descriptor(#[case] crafted_unknown: bool, #[case] collation_version: CollationVersion) { let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), HashSet::new(), |test_harness| async move { @@ -2001,7 +1998,6 @@ fn v3_descriptor( fn v3_scheduling_parent_rejected_on_stalled_relay_chain() { let mut test_state = TestState::default(); - test_harness(ReputationAggregator::new(|_| true), HashSet::new(), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; From 52d54bc1bc004d8f5632bc4213ef6b1f4dd300f7 Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 11 Mar 2026 17:31:00 +0100 Subject: [PATCH 29/52] Remove now unused v3_enabled. --- .../collator-protocol/src/collator_side/mod.rs | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index 02f8f194ca8c1..025dcd98a31e4 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -47,7 +47,6 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::{ backing_implicit_view::View as ImplicitView, reputation::{ReputationAggregator, REPUTATION_CHANGE_INTERVAL}, - request_node_features, runtime::{ fetch_claim_queue, get_candidate_events, get_group_rotation_info, ClaimQueueSnapshot, RuntimeInfo, @@ -55,7 +54,7 @@ use polkadot_node_subsystem_util::{ TimeoutExt, }; use polkadot_primitives::{ - node_features, AuthorityDiscoveryId, BlockNumber, CandidateEvent, CandidateHash, + AuthorityDiscoveryId, BlockNumber, CandidateEvent, CandidateHash, CandidateReceiptV2 as CandidateReceipt, CollatorPair, CoreIndex, Hash, HeadData, Id as ParaId, SessionIndex, }; @@ -295,8 +294,6 @@ struct PerSchedulingParent { block_number: Option, /// The session index of this relay parent. session_index: SessionIndex, - /// Whether v3 candidate receipts are enabled. - v3_enabled: bool, } impl PerSchedulingParent { @@ -329,22 +326,12 @@ impl PerSchedulingParent { validator_groups.insert(*core, group); } - let node_features = request_node_features(block_hash, session_index, ctx.sender()) - .await - .await - .ok() - .and_then(|r| r.ok()) - .unwrap_or_default(); - - let v3_enabled = node_features::FeatureIndex::CandidateReceiptV3.is_set(&node_features); - Ok(Self { validator_group: validator_groups, collations: HashMap::new(), assignments, block_number, session_index, - v3_enabled, }) } } From 55135a7cb7643d0e04c5ddaaf557e6c1862d6fac Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 12 Mar 2026 12:36:46 +0100 Subject: [PATCH 30/52] Fmt --- .../parachains/src/paras_inherent/mod.rs | 46 ++++++++----------- 1 file changed, 19 insertions(+), 27 deletions(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index c211403a4e092..f6322e5e302bc 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -977,11 +977,7 @@ fn check_descriptor_version_and_signals( // Needed for all versions to access relay chain state. let relay_parent = candidate.descriptor().relay_parent(); - let session_index = - candidate - .descriptor() - .session_index() - .unwrap_or(current_session_index); + let session_index = candidate.descriptor().session_index().unwrap_or(current_session_index); if shared::Pallet::::get_relay_parent_info(session_index, relay_parent).is_none() { log::debug!( @@ -1086,14 +1082,14 @@ fn check_descriptor_version_and_signals( /// **Phase 1: Sanitization** (`paras_inherent`, this module) /// - `check_descriptor_version_and_signals`: version gating, relay/scheduling parent validity, /// session restrictions, UMP signals, core index from signals (V2/V3) -/// - `filter_unchained_candidates`: dependency ordering, relay parent bounds, PVD hash, -/// validation code hash, para head match (via `verify_backed_candidate`) +/// - `filter_unchained_candidates`: dependency ordering, relay parent bounds, PVD hash, validation +/// code hash, para head match (via `verify_backed_candidate`) /// - `map_candidates_to_cores`: core assignment mapping, core index from descriptor/injection /// - `filter_backed_statements_from_disabled_validators`: disabled validator filtering /// /// **Phase 2: Processing** (`inclusion::process_candidates`) -/// - `verify_backed_candidate`: relay parent lookup (using session from descriptor), -/// PVD hash, validation code hash, para head match +/// - `verify_backed_candidate`: relay parent lookup (using session from descriptor), PVD hash, +/// validation code hash, para head match /// - Scheduling parent lookup for group assignment /// - Backing vote count and signature verification /// - State updates (pending availability, head data, etc.) @@ -1442,10 +1438,7 @@ fn filter_unchained_candidates::new(Some(*latest_relay_parent)); - match check_ctx.verify_backed_candidate( - candidate.candidate(), - latest_head_data.clone(), - ) { + match check_ctx.verify_backed_candidate(candidate.candidate(), latest_head_data.clone()) { Ok(relay_parent_block_number) => { para_latest_context.insert( para_id, @@ -1593,20 +1586,19 @@ fn get_injected_core_index block_num, - None => { - log::debug!( - target: LOG_TARGET, - "Scheduling parent {:?} for candidate {:?} is not in the allowed scheduling parents.", - candidate.descriptor().scheduling_parent(), - candidate.candidate().hash(), - ); - return None; - }, - }; + let scheduling_parent_block_number = + match allowed_scheduling_parents.acquire_info(candidate.descriptor().scheduling_parent()) { + Some((_, block_num)) => block_num, + None => { + log::debug!( + target: LOG_TARGET, + "Scheduling parent {:?} for candidate {:?} is not in the allowed scheduling parents.", + candidate.descriptor().scheduling_parent(), + candidate.candidate().hash(), + ); + return None; + }, + }; // Get the backing group of the candidate backed at `core_idx`. let group_idx = match scheduler::Pallet::::group_assigned_to_core( From e66a664e7a19a29be3052be3e2f0a39101339e30 Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 18 Mar 2026 12:44:26 +0100 Subject: [PATCH 31/52] Review remarks --- .../node/core/candidate-validation/src/lib.rs | 4 -- .../core/dispute-coordinator/src/tests.rs | 8 +--- .../src/validator_side/mod.rs | 4 +- polkadot/primitives/src/lib.rs | 2 +- polkadot/primitives/src/v9/mod.rs | 39 +++++++------------ 5 files changed, 19 insertions(+), 38 deletions(-) diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index d31156e1343c4..11853884c9696 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -1000,10 +1000,6 @@ async fn validate_candidate_exhaustive( // Backing-only: verify the descriptor's scheduling_session claim against // the session index independently fetched from the runtime. - // The relay parent session check is left for later: - // https://github.com/paritytech/polkadot-sdk/issues/11182 - // TODO: Properly check session index in the runtime: - // https://github.com/paritytech/polkadot-sdk/issues/11033 if let Some(BackingExtras { expected_scheduling_session, .. }) = &backing_extras { if let Some(scheduling_session) = candidate_receipt.descriptor.scheduling_session() { if scheduling_session != *expected_scheduling_session { diff --git a/polkadot/node/core/dispute-coordinator/src/tests.rs b/polkadot/node/core/dispute-coordinator/src/tests.rs index 2b714e9c55a2d..a0f0bc6669d0a 100644 --- a/polkadot/node/core/dispute-coordinator/src/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/tests.rs @@ -4868,13 +4868,7 @@ fn setup_v3_test_state() -> (TestState, Hash, Hash) { (test_state, relay_parent, scheduling_parent) } -/// Regression test: V3 candidate on the very first leaf must be handled correctly. -/// -/// Before the fix, `Initialized` was created with `v3_ever_seen = false` and -/// `process_chain_import_backlog` (which processes on-chain backing votes from the -/// first leaf) ran before `process_active_leaves_update` could set `v3_ever_seen`. -/// This caused V3 candidates to be misinterpreted as V1, using `relay_parent` instead -/// of `scheduling_parent` in the backing signature context — breaking the debug_assert. +/// V3 candidate on the very first leaf must be handled correctly. #[test] fn v3_candidate_on_first_leaf_is_detected_correctly() { let (mut test_state, relay_parent, scheduling_parent) = setup_v3_test_state(); diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 9139a82c37c20..a163e5813d1df 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -2702,8 +2702,8 @@ async fn kick_off_seconding( (pvd, maybe_parent_head_data, Some(parent_head_data_hash)) }, (CollationVersion::V1, _) => { - // Execution context: relay parent for fetching PVD from relay chain state. - // TODO: We will need to use the new runtime API here: + // V1 protocol only carries V1 descriptors (enforced by + // descriptor_version_sanity_check), so the legacy PVD API is correct. let pvd = request_persisted_validation_data( ctx.sender(), candidate_receipt.descriptor().relay_parent(), diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 7cb2ed286d98b..1167cd9e22c6e 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -64,7 +64,7 @@ pub use v9::{ UncheckedSignedAvailabilityBitfields, UncheckedSignedStatement, UpgradeGoAhead, UpgradeRestriction, UpwardMessage, ValidDisputeStatementKind, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, - ValidityError, VersionCheckError, ASSIGNMENT_KEY_TYPE_ID, DEFAULT_CLAIM_QUEUE_OFFSET, + ValidityError, CandidateDescriptorVersionCheckError, ASSIGNMENT_KEY_TYPE_ID, DEFAULT_CLAIM_QUEUE_OFFSET, DEFAULT_SCHEDULING_LOOKAHEAD, LEGACY_MIN_BACKING_VOTES, LOWEST_PUBLIC_ID, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, MIN_CODE_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, PARACHAINS_INHERENT_IDENTIFIER, PARACHAIN_KEY_TYPE_ID, diff --git a/polkadot/primitives/src/v9/mod.rs b/polkadot/primitives/src/v9/mod.rs index a4626c49d07d1..1168e40b84dd5 100644 --- a/polkadot/primitives/src/v9/mod.rs +++ b/polkadot/primitives/src/v9/mod.rs @@ -1858,25 +1858,20 @@ pub enum CandidateDescriptorVersion { /// Error returned by [`CandidateDescriptorV2::check_version_acceptance`]. #[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum VersionCheckError { +#[cfg_attr(feature = "std", derive(thiserror::Error))] +pub enum CandidateDescriptorVersionCheckError { /// Old-style and new-style version detection disagree, and this is not the /// expected V3 disagreement (old rules → V1, new rules → V3) with V3 enabled. + #[cfg_attr( + feature = "std", + error("Descriptor version detection inconsistency (old vs new rules disagree)") + )] Inconsistency, /// The descriptor is V3 but the V3 feature is not enabled. + #[cfg_attr(feature = "std", error("V3 candidate descriptor but V3 feature not enabled"))] V3NotEnabled, } -impl core::fmt::Display for VersionCheckError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - Self::Inconsistency => { - write!(f, "Descriptor version detection inconsistency (old vs new rules disagree)") - }, - Self::V3NotEnabled => write!(f, "V3 candidate descriptor but V3 feature not enabled"), - } - } -} - /// A unique descriptor of the candidate receipt. #[derive(PartialEq, Eq, Clone, Encode, Decode, DecodeWithMemTracking, TypeInfo)] pub struct CandidateDescriptorV2 { @@ -1966,10 +1961,6 @@ impl> CandidateDescriptorV2 { /// some actually unused bytes are available (don't affect the v1 version /// check). /// - /// # Arguments - /// - /// Detect the version of the candidate descriptor. - /// /// Always uses the relaxed (v3-capable) detection logic. This means /// version detection is self-contained and does not require knowing /// whether the V3 node feature is enabled. @@ -2023,19 +2014,19 @@ impl> CandidateDescriptorV2 { /// 1. Old-style and new-style version detection must agree, unless the candidate is V3 and V3 /// is enabled (the expected disagreement: old rules see V1, new rules see V3). /// 2. V3 candidates are rejected when V3 is not enabled. - pub fn check_version_acceptance(&self, v3_enabled: bool) -> Result<(), VersionCheckError> { + pub fn check_version_acceptance(&self, v3_enabled: bool) -> Result<(), CandidateDescriptorVersionCheckError> { let version = self.version(); // Version consistency: old and new detection must agree, unless this is the // expected V3 disagreement (old rules → V1, new rules → V3) with V3 enabled. let is_expected_v3_disagreement = version == CandidateDescriptorVersion::V3 && v3_enabled; if !self.check_version_consistency() && !is_expected_v3_disagreement { - return Err(VersionCheckError::Inconsistency); + return Err(CandidateDescriptorVersionCheckError::Inconsistency); } // V3 gating: reject V3 candidates before the feature is enabled. if version == CandidateDescriptorVersion::V3 && !v3_enabled { - return Err(VersionCheckError::V3NotEnabled); + return Err(CandidateDescriptorVersionCheckError::V3NotEnabled); } Ok(()) @@ -3334,7 +3325,7 @@ pub mod tests { let desc = make_v3_descriptor(); assert_eq!(desc.version(), CandidateDescriptorVersion::V3); - assert_eq!(desc.check_version_acceptance(false), Err(VersionCheckError::Inconsistency)); + assert_eq!(desc.check_version_acceptance(false), Err(CandidateDescriptorVersionCheckError::Inconsistency)); } #[test] @@ -3349,8 +3340,8 @@ pub mod tests { assert!(!desc.check_version_consistency()); // Rejected regardless of v3_enabled. - assert_eq!(desc.check_version_acceptance(false), Err(VersionCheckError::Inconsistency)); - assert_eq!(desc.check_version_acceptance(true), Err(VersionCheckError::Inconsistency)); + assert_eq!(desc.check_version_acceptance(false), Err(CandidateDescriptorVersionCheckError::Inconsistency)); + assert_eq!(desc.check_version_acceptance(true), Err(CandidateDescriptorVersionCheckError::Inconsistency)); } #[test] @@ -3410,7 +3401,7 @@ pub mod tests { assert_eq!(desc.version_old_rules(), CandidateDescriptorVersion::V1); assert!(!desc.check_version_consistency()); - assert_eq!(desc.check_version_acceptance(false), Err(VersionCheckError::Inconsistency)); - assert_eq!(desc.check_version_acceptance(true), Err(VersionCheckError::Inconsistency)); + assert_eq!(desc.check_version_acceptance(false), Err(CandidateDescriptorVersionCheckError::Inconsistency)); + assert_eq!(desc.check_version_acceptance(true), Err(CandidateDescriptorVersionCheckError::Inconsistency)); } } From 55862a29e617cd3c422daed9b74ed1b9f185f6bf Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 18 Mar 2026 17:44:37 +0100 Subject: [PATCH 32/52] Check relay parent session. --- .../node/core/candidate-validation/src/lib.rs | 300 ++++++++++++------ polkadot/node/primitives/src/lib.rs | 6 +- 2 files changed, 215 insertions(+), 91 deletions(-) diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 11853884c9696..f41b3b2ee8300 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -217,17 +217,117 @@ where .map_err(|_| "Cannot fetch validation code bomb limit from the runtime".into()) } -/// Data only needed during backing validation. These are additional strictness -/// checks that backing performs but approval/dispute can (and need to) skip, because the -/// runtime also validates them at inclusion time. These depend on chain state date for the -/// scheduling or even the relay parent to still be around. Which is not a valid assumption in -/// disputes. -struct BackingExtras { - /// Claim queue snapshot for UMP signal validation. - claim_queue: ClaimQueueSnapshot, - /// Session index independently fetched from runtime at scheduling_parent, - /// used to verify the descriptor's scheduling_session claim. - expected_scheduling_session: SessionIndex, +/// Output of [`pre_validate_candidate`]: data needed by PVF execution and +/// post-validation. +struct PreValidationOutput { + /// Validation code bomb limit for PVF preparation. + validation_code_bomb_limit: u32, + /// Claim queue for backing-only UMP signal post-validation. `None` for + /// approval/dispute. + claim_queue: Option, +} + +/// Errors from [`pre_validate_candidate`]. +enum PreValidationError { + /// The candidate is definitively invalid. + Invalid(InvalidCandidate), + /// A runtime API call failed — cannot determine validity. + RuntimeError(String), +} + +/// Pre-validate a candidate before PVF execution. +/// +/// Performs all checks that don't require running the PVF: +/// - Fetch validation code bomb limit (fetched from runtime) +/// - Basic checks: PoV size, PoV hash, validation code hash +/// - Backing-only (skipped for approval/dispute): +/// - Scheduling session matches runtime +/// - Relay parent valid in claimed session (v16+ `AllowedRelayParentInfo` API) +/// - Claim queue fetch +/// +/// Backing-only checks are skipped for approval/dispute because the runtime +/// validates them at inclusion time and the chain state they depend on may not +/// be available in disputes. +async fn pre_validate_candidate( + sender: &mut Sender, + candidate_receipt: &CandidateReceipt, + persisted_validation_data: &PersistedValidationData, + pov: &PoV, + validation_code_hash: &ValidationCodeHash, + exec_kind: PvfExecKind, + v3_ever_seen: bool, +) -> Result +where + Sender: SubsystemSender, +{ + let validation_code_bomb_limit = + fetch_bomb_limit(&candidate_receipt.descriptor, v3_ever_seen, sender) + .await + .map_err(PreValidationError::RuntimeError)?; + + if let Err(e) = perform_basic_checks( + &candidate_receipt.descriptor, + persisted_validation_data.max_pov_size, + pov, + validation_code_hash, + ) { + return Err(PreValidationError::Invalid(e)); + } + + let claim_queue = match exec_kind { + PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_) => { + let scheduling_parent = candidate_receipt.descriptor.scheduling_parent(); + + // Verify scheduling session. + let expected_scheduling_session = + get_session_index(sender, scheduling_parent).await.ok_or_else(|| { + PreValidationError::RuntimeError( + "Scheduling session index not found".to_string(), + ) + })?; + + if let Some(scheduling_session) = candidate_receipt.descriptor.scheduling_session() { + if scheduling_session != expected_scheduling_session { + return Err(PreValidationError::Invalid( + InvalidCandidate::InvalidSchedulingSession, + )); + } + } + + // Verify relay parent is valid in the claimed session (v16+ API). + if let Some(session_index) = candidate_receipt.descriptor.session_index() { + match check_relay_parent_in_session( + sender, + scheduling_parent, + session_index, + candidate_receipt.descriptor.relay_parent(), + ) + .await + { + Ok(()) => {}, + // Safe to skip: on old runtimes cross-session relay parents don't + // exist, and the scheduling session check above already covers the + // relay parent session (scheduling_parent == relay_parent). + Err(CheckRelayParentSessionError::NotSupported) => {}, + Err(CheckRelayParentSessionError::NotFound) => + return Err(PreValidationError::Invalid( + InvalidCandidate::InvalidRelayParentSession, + )), + Err(CheckRelayParentSessionError::RuntimeError(err)) => + return Err(PreValidationError::RuntimeError(err)), + } + } + + let cq = claim_queue(scheduling_parent, sender).await.ok_or_else(|| { + PreValidationError::RuntimeError("Claim queue not available".to_string()) + })?; + + Some(cq) + }, + _ => None, + }; + + Ok(PreValidationOutput { validation_code_bomb_limit, claim_queue }) } fn handle_validation_message( @@ -253,57 +353,31 @@ where } => async move { let _timer = metrics.time_validate_from_exhaustive(); - let validation_code_bomb_limit = - match fetch_bomb_limit(&candidate_receipt.descriptor, v3_ever_seen, &mut sender) - .await - { - Ok(limit) => limit, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - scheduling_parent = ?candidate_receipt.descriptor.scheduling_parent(), - ?err, - "Failed to fetch validation code bomb limit", - ); - let _ = response_sender.send(Err(ValidationFailed(err))); - return; - }, - }; - - // --- Backing-only extras --- - // Stricter checks that backing performs but approval/dispute can - // skip, because the runtime also validates them at inclusion time. - let backing_extras = match exec_kind { - PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_) => { - let scheduling_parent = candidate_receipt.descriptor.scheduling_parent(); - - let Some(claim_queue) = claim_queue(scheduling_parent, &mut sender).await - else { - let _ = response_sender - .send(Err(ValidationFailed("Claim queue not available".to_string()))); - return; - }; - - let Some(expected_scheduling_session) = - get_session_index(&mut sender, scheduling_parent).await - else { - gum::warn!( - target: LOG_TARGET, - ?scheduling_parent, - "Cannot fetch scheduling session index from the runtime", - ); - let _ = response_sender.send(Err(ValidationFailed( - "Scheduling session index not found".to_string(), - ))); - return; - }; - - Some(BackingExtras { claim_queue, expected_scheduling_session }) + // Phase 1: Pre-validation — cheap checks, fail fast before PVF. + let pre = match pre_validate_candidate( + &mut sender, + &candidate_receipt, + &validation_data, + &pov, + &validation_code.hash(), + exec_kind, + v3_ever_seen, + ) + .await + { + Ok(pre) => pre, + Err(PreValidationError::Invalid(e)) => { + let _ = response_sender.send(Ok(ValidationResult::Invalid(e))); + return; + }, + Err(PreValidationError::RuntimeError(err)) => { + let _ = response_sender.send(Err(ValidationFailed(err))); + return; }, - _ => None, }; - let res = validate_candidate_exhaustive( + // Phase 2: PVF execution + output validation. + let res = validate_candidate( validation_host, validation_data, validation_code, @@ -312,9 +386,8 @@ where executor_params, exec_kind, &metrics, - validation_code_bomb_limit, v3_ever_seen, - backing_extras, + pre, ) .await; @@ -621,6 +694,71 @@ where Some(session_index) } +enum CheckRelayParentSessionError { + /// The `AllowedRelayParentInfo` runtime API (v16+) is not supported. + NotSupported, + /// The relay parent was not found in the claimed session. + NotFound, + /// An unexpected runtime API error occurred. + RuntimeError(String), +} + +/// Check that the relay parent is known to the runtime in the claimed session. +/// +/// Uses the `AllowedRelayParentInfo` runtime API (v16+) called at some +/// `recent_block` (recent enough to have state available). We cannot query +/// state at the relay parent directly because it may be old and pruned. +async fn check_relay_parent_in_session( + sender: &mut Sender, + recent_block: Hash, + claimed_session: SessionIndex, + relay_parent: Hash, +) -> Result<(), CheckRelayParentSessionError> +where + Sender: SubsystemSender, +{ + let rx = util::request_from_runtime(recent_block, sender, |tx| { + RuntimeApiRequest::AllowedRelayParentInfo(claimed_session, relay_parent, tx) + }) + .await; + + match rx.await { + Ok(Ok(Some(_))) => Ok(()), + Ok(Ok(None)) => Err(CheckRelayParentSessionError::NotFound), + Ok(Err(RuntimeApiError::NotSupported { .. })) => { + gum::debug!( + target: LOG_TARGET, + ?recent_block, + "AllowedRelayParentInfo API not supported", + ); + Err(CheckRelayParentSessionError::NotSupported) + }, + Ok(Err(err)) => { + gum::warn!( + target: LOG_TARGET, + ?recent_block, + ?relay_parent, + ?err, + "Error calling AllowedRelayParentInfo runtime API", + ); + Err(CheckRelayParentSessionError::RuntimeError(format!( + "AllowedRelayParentInfo runtime API error: {err}" + ))) + }, + Err(_) => { + gum::warn!( + target: LOG_TARGET, + ?recent_block, + ?relay_parent, + "AllowedRelayParentInfo request cancelled", + ); + Err(CheckRelayParentSessionError::RuntimeError( + "AllowedRelayParentInfo request cancelled".into(), + )) + }, + } +} + // Returns true if the node is an authority in the next session. async fn check_next_session_authority( sender: &mut Sender, @@ -972,7 +1110,14 @@ where } } -async fn validate_candidate_exhaustive( +/// Execute a PVF and validate the candidate's output. +/// +/// Assumes all pre-validation ([`pre_validate_candidate`]) has already passed. +/// Handles: +/// 1. PVF execution (backing: single attempt; approval/dispute: with retry) +/// 2. Post-validation: para_head hash, commitments hash +/// 3. Backing-only post-validation: UMP signal validation against claim queue +async fn validate_candidate( mut validation_backend: impl ValidationBackend + Send, persisted_validation_data: PersistedValidationData, validation_code: ValidationCode, @@ -981,43 +1126,20 @@ async fn validate_candidate_exhaustive( executor_params: ExecutorParams, exec_kind: PvfExecKind, metrics: &Metrics, - validation_code_bomb_limit: u32, v3_seen: bool, - backing_extras: Option, + pre: PreValidationOutput, ) -> Result { let _timer = metrics.time_validate_candidate_exhaustive(); - let validation_code_hash = validation_code.hash(); let para_id = candidate_receipt.descriptor.para_id(); let candidate_hash = candidate_receipt.hash(); gum::debug!( target: LOG_TARGET, - ?validation_code_hash, ?candidate_hash, ?para_id, "About to validate a candidate.", ); - // Backing-only: verify the descriptor's scheduling_session claim against - // the session index independently fetched from the runtime. - if let Some(BackingExtras { expected_scheduling_session, .. }) = &backing_extras { - if let Some(scheduling_session) = candidate_receipt.descriptor.scheduling_session() { - if scheduling_session != *expected_scheduling_session { - return Ok(ValidationResult::Invalid(InvalidCandidate::InvalidSessionIndex)); - } - } - } - - if let Err(e) = perform_basic_checks( - &candidate_receipt.descriptor, - persisted_validation_data.max_pov_size, - &pov, - &validation_code_hash, - ) { - gum::debug!(target: LOG_TARGET, ?para_id, ?candidate_hash, "Invalid candidate (basic checks)"); - return Ok(ValidationResult::Invalid(e)); - } - let persisted_validation_data = Arc::new(persisted_validation_data); // Create the validation context shared by both backing and approval/dispute paths @@ -1040,7 +1162,7 @@ async fn validate_candidate_exhaustive( executor_params, prep_timeout, PrepareJobKind::Compilation, - validation_code_bomb_limit, + pre.validation_code_bomb_limit, ); validation_backend.validate_candidate(pvf, validation_context, exec_kind).await @@ -1052,7 +1174,7 @@ async fn validate_candidate_exhaustive( validation_context, PVF_APPROVAL_EXECUTION_RETRY_DELAY, exec_kind, - validation_code_bomb_limit, + pre.validation_code_bomb_limit, ) .await }, @@ -1160,7 +1282,7 @@ async fn validate_candidate_exhaustive( Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch)) } else { // Backing-only: validate UMP signals against the claim queue. - if let Some(BackingExtras { claim_queue, .. }) = &backing_extras { + if let Some(claim_queue) = &pre.claim_queue { if let Err(err) = committed_candidate_receipt .parse_ump_signals(&transpose_claim_queue(claim_queue.0.clone())) { diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index d7601e72ea76b..fbda18b2317d5 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -354,8 +354,10 @@ pub enum InvalidCandidate { CodeHashMismatch, /// Validation has generated different candidate commitments. CommitmentsHashMismatch, - /// The candidate receipt contains an invalid session index. - InvalidSessionIndex, + /// The descriptor's scheduling session does not match the runtime. + InvalidSchedulingSession, + /// The relay parent is not recognized in the descriptor's claimed session. + InvalidRelayParentSession, /// The candidate receipt invalid UMP signals. InvalidUMPSignals(CommittedCandidateReceiptError), } From ef6247c63f5866d9770b1225334f929c595d451b Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 19 Mar 2026 10:33:52 +0100 Subject: [PATCH 33/52] Use for_candidate_validation() methods also on the backing path for consistency. Not strictly necessary, but makes tests less confusing. --- .../node/core/candidate-validation/src/lib.rs | 19 ++++++++++++++----- polkadot/primitives/src/v9/mod.rs | 16 +++++++++++++++- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index f41b3b2ee8300..f9b7697bd3826 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -276,7 +276,9 @@ where let claim_queue = match exec_kind { PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_) => { - let scheduling_parent = candidate_receipt.descriptor.scheduling_parent(); + let scheduling_parent = candidate_receipt + .descriptor + .scheduling_parent_for_candidate_validation(v3_ever_seen); // Verify scheduling session. let expected_scheduling_session = @@ -286,7 +288,10 @@ where ) })?; - if let Some(scheduling_session) = candidate_receipt.descriptor.scheduling_session() { + if let Some(scheduling_session) = candidate_receipt + .descriptor + .scheduling_session_for_candidate_validation(v3_ever_seen) + { if scheduling_session != expected_scheduling_session { return Err(PreValidationError::Invalid( InvalidCandidate::InvalidSchedulingSession, @@ -295,7 +300,10 @@ where } // Verify relay parent is valid in the claimed session (v16+ API). - if let Some(session_index) = candidate_receipt.descriptor.session_index() { + if let Some(session_index) = candidate_receipt + .descriptor + .session_index_for_candidate_validation(v3_ever_seen) + { match check_relay_parent_in_session( sender, scheduling_parent, @@ -330,15 +338,16 @@ where Ok(PreValidationOutput { validation_code_bomb_limit, claim_queue }) } -fn handle_validation_message( +fn handle_validation_message( mut sender: S, - validation_host: ValidationHost, + validation_host: V, metrics: Metrics, v3_ever_seen: bool, msg: CandidateValidationMessage, ) -> Pin + Send>> where S: SubsystemSender, + V: ValidationBackend + Clone + Send + 'static, { match msg { CandidateValidationMessage::ValidateFromExhaustive { diff --git a/polkadot/primitives/src/v9/mod.rs b/polkadot/primitives/src/v9/mod.rs index 1168e40b84dd5..2b25fbdeac3ec 100644 --- a/polkadot/primitives/src/v9/mod.rs +++ b/polkadot/primitives/src/v9/mod.rs @@ -2242,7 +2242,7 @@ impl> CandidateDescriptorV2 { } } - /// Scheduling session for use candidate validation. + /// Scheduling session for candidate validation. /// /// See [`Self::version_for_candidate_validation`] for the safety argument. pub fn scheduling_session_for_candidate_validation( @@ -2258,6 +2258,20 @@ impl> CandidateDescriptorV2 { CandidateDescriptorVersion::Unknown => None, } } + + /// Session index (relay parent session) for candidate validation. + /// + /// See [`Self::version_for_candidate_validation`] for the safety argument. + pub fn session_index_for_candidate_validation( + &self, + v3_ever_seen: bool, + ) -> Option { + match self.version_for_candidate_validation(v3_ever_seen) { + CandidateDescriptorVersion::V1 | CandidateDescriptorVersion::Unknown => None, + CandidateDescriptorVersion::V2 | CandidateDescriptorVersion::V3 => + Some(self.session_index), + } + } } impl core::fmt::Debug for CandidateDescriptorV2 From 2613f75dbe013f098e24f3b2b155436c81cc7bc8 Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 19 Mar 2026 10:46:45 +0100 Subject: [PATCH 34/52] Basic tests. --- .../core/candidate-validation/src/tests.rs | 744 +++++++++++++----- 1 file changed, 526 insertions(+), 218 deletions(-) diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 49a17388f9ba3..7c7b31baa5267 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -16,7 +16,10 @@ use std::{ collections::BTreeMap, - sync::atomic::{AtomicUsize, Ordering}, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, Mutex, + }, }; use super::*; @@ -416,20 +419,35 @@ fn check_does_not_match() { executor::block_on(test_fut); } +#[derive(Clone)] struct MockValidateCandidateBackend { + inner: Arc>, +} + +struct MockValidateCandidateBackendInner { result_list: Vec>, num_times_called: usize, } impl MockValidateCandidateBackend { fn with_hardcoded_result(result: Result) -> Self { - Self { result_list: vec![result], num_times_called: 0 } + Self { + inner: Arc::new(Mutex::new(MockValidateCandidateBackendInner { + result_list: vec![result], + num_times_called: 0, + })), + } } fn with_hardcoded_result_list( result_list: Vec>, ) -> Self { - Self { result_list, num_times_called: 0 } + Self { + inner: Arc::new(Mutex::new(MockValidateCandidateBackendInner { + result_list, + num_times_called: 0, + })), + } } } @@ -443,8 +461,9 @@ impl ValidationBackend for MockValidateCandidateBackend { ) -> Result { // This is expected to panic if called more times than expected, indicating an error in the // test. - let result = self.result_list[self.num_times_called].clone(); - self.num_times_called += 1; + let mut inner = self.inner.lock().unwrap(); + let result = inner.result_list[inner.num_times_called].clone(); + inner.num_times_called += 1; result } @@ -543,7 +562,7 @@ fn candidate_validation_ok_is_ok(#[case] v2_descriptor: bool) { let _ = cq.insert(CoreIndex(0), vec![1.into(), 2.into()].into()); let _ = cq.insert(CoreIndex(1), vec![1.into(), 1.into()].into()); - let v = executor::block_on(validate_candidate_exhaustive( + let v = executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result)), validation_data.clone(), validation_code, @@ -552,9 +571,11 @@ fn candidate_validation_ok_is_ok(#[case] v2_descriptor: bool) { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, false, - Some(BackingExtras { claim_queue: ClaimQueueSnapshot(cq), expected_scheduling_session: 1 }), + PreValidationOutput { + validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, + claim_queue: Some(ClaimQueueSnapshot(cq)), + }, )) .unwrap(); @@ -628,35 +649,13 @@ fn invalid_session_or_ump_signals() { let mut candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; - // Session index specified in CandidateDescriptor does not match expected session. - for exec_kind in - [PvfExecKind::Backing(dummy_hash()), PvfExecKind::BackingSystemParas(dummy_hash())] - { - let err = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), - validation_data.clone(), - validation_code.clone(), - candidate_receipt.clone(), - Arc::new(pov.clone()), - ExecutorParams::default(), - exec_kind, - &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, - false, - Some(BackingExtras { claim_queue: Default::default(), expected_scheduling_session: 1 }), - )) - .unwrap(); - - assert_matches!(err, ValidationResult::Invalid(InvalidCandidate::InvalidSessionIndex)); - } - candidate_receipt.descriptor.set_session_index(1); // Candidate has no assignments but a core selector. for exec_kind in [PvfExecKind::Backing(dummy_hash()), PvfExecKind::BackingSystemParas(dummy_hash())] { - let result = executor::block_on(validate_candidate_exhaustive( + let result = executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -665,9 +664,11 @@ fn invalid_session_or_ump_signals() { ExecutorParams::default(), exec_kind, &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, false, - Some(BackingExtras { claim_queue: Default::default(), expected_scheduling_session: 1 }), + PreValidationOutput { + validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, + claim_queue: Some(ClaimQueueSnapshot::default()), + }, )) .unwrap(); assert_matches!( @@ -680,7 +681,7 @@ fn invalid_session_or_ump_signals() { // Validation doesn't fail for approvals and disputes, core/session index is not checked. for exec_kind in [PvfExecKind::Approval, PvfExecKind::Dispute] { - let v = executor::block_on(validate_candidate_exhaustive( + let v = executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -689,9 +690,11 @@ fn invalid_session_or_ump_signals() { ExecutorParams::default(), exec_kind, &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, false, - None, + PreValidationOutput { + validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, + claim_queue: None, + }, )) .unwrap(); @@ -713,7 +716,7 @@ fn invalid_session_or_ump_signals() { for exec_kind in [PvfExecKind::Backing(dummy_hash()), PvfExecKind::BackingSystemParas(dummy_hash())] { - let v = executor::block_on(validate_candidate_exhaustive( + let v = executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -722,12 +725,11 @@ fn invalid_session_or_ump_signals() { ExecutorParams::default(), exec_kind, &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, false, - Some(BackingExtras { - claim_queue: ClaimQueueSnapshot(cq.clone()), - expected_scheduling_session: 1, - }), + PreValidationOutput { + validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, + claim_queue: Some(ClaimQueueSnapshot(cq.clone())), + }, )) .unwrap(); @@ -762,7 +764,7 @@ fn invalid_session_or_ump_signals() { for exec_kind in [PvfExecKind::Backing(dummy_hash()), PvfExecKind::BackingSystemParas(dummy_hash())] { - let result = executor::block_on(validate_candidate_exhaustive( + let result = executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -771,9 +773,11 @@ fn invalid_session_or_ump_signals() { ExecutorParams::default(), exec_kind, &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, false, - Some(BackingExtras { claim_queue: Default::default(), expected_scheduling_session: 1 }), + PreValidationOutput { + validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, + claim_queue: Some(ClaimQueueSnapshot::default()), + }, )) .unwrap(); assert_matches!( @@ -786,7 +790,7 @@ fn invalid_session_or_ump_signals() { // Validation doesn't fail for approvals and disputes, ump signals are not checked. for exec_kind in [PvfExecKind::Approval, PvfExecKind::Dispute] { - let v = executor::block_on(validate_candidate_exhaustive( + let v = executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -795,9 +799,11 @@ fn invalid_session_or_ump_signals() { ExecutorParams::default(), exec_kind, &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, false, - None, + PreValidationOutput { + validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, + claim_queue: None, + }, )) .unwrap(); @@ -836,7 +842,7 @@ fn invalid_session_or_ump_signals() { for exec_kind in [PvfExecKind::Backing(dummy_hash()), PvfExecKind::BackingSystemParas(dummy_hash())] { - let v = executor::block_on(validate_candidate_exhaustive( + let v = executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -845,12 +851,11 @@ fn invalid_session_or_ump_signals() { ExecutorParams::default(), exec_kind, &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, false, - Some(BackingExtras { - claim_queue: ClaimQueueSnapshot(cq.clone()), - expected_scheduling_session: 1, - }), + PreValidationOutput { + validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, + claim_queue: Some(ClaimQueueSnapshot(cq.clone())), + }, )) .unwrap(); @@ -866,7 +871,7 @@ fn invalid_session_or_ump_signals() { // Validation also doesn't fail for approvals and disputes. for exec_kind in [PvfExecKind::Approval, PvfExecKind::Dispute] { - let v = executor::block_on(validate_candidate_exhaustive( + let v = executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -875,9 +880,11 @@ fn invalid_session_or_ump_signals() { ExecutorParams::default(), exec_kind, &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, false, - None, + PreValidationOutput { + validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, + claim_queue: None, + }, )) .unwrap(); @@ -977,7 +984,7 @@ fn v3_descriptor_validation() { commitments_hash: commitments_with_signals.hash(), }; - let result = executor::block_on(validate_candidate_exhaustive( + let result = executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result(Ok( validation_result_with_signals.clone() )), @@ -988,12 +995,11 @@ fn v3_descriptor_validation() { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, false, - Some(BackingExtras { - claim_queue: ClaimQueueSnapshot(cq.clone()), - expected_scheduling_session: 1, - }), + PreValidationOutput { + validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, + claim_queue: Some(ClaimQueueSnapshot(cq.clone())), + }, )) .unwrap(); @@ -1008,7 +1014,7 @@ fn v3_descriptor_validation() { commitments_hash: commitments_no_signals.hash(), }; - let result = executor::block_on(validate_candidate_exhaustive( + let result = executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result(Ok( validation_result_no_signals.clone() )), @@ -1019,12 +1025,11 @@ fn v3_descriptor_validation() { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, false, - Some(BackingExtras { - claim_queue: ClaimQueueSnapshot(cq.clone()), - expected_scheduling_session: 1, - }), + PreValidationOutput { + validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, + claim_queue: Some(ClaimQueueSnapshot(cq.clone())), + }, )) .unwrap(); @@ -1036,79 +1041,6 @@ fn v3_descriptor_validation() { ); } - // Test 3: V3 descriptor with scheduling_session_offset > 0, mismatched expected - // scheduling session => InvalidSessionIndex - { - let mut desc = descriptor.clone(); - // session_index=1, offset=1 => scheduling_session=2 - desc.set_scheduling_session_offset(1); - - let candidate_receipt = CandidateReceipt { - descriptor: desc, - commitments_hash: commitments_with_signals.hash(), - }; - - // Pass expected_scheduling_session=1, but descriptor claims 2 - let result = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result(Ok( - validation_result_with_signals.clone() - )), - validation_data.clone(), - validation_code.clone(), - candidate_receipt, - Arc::new(pov.clone()), - ExecutorParams::default(), - PvfExecKind::Backing(dummy_hash()), - &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, - false, - Some(BackingExtras { - claim_queue: ClaimQueueSnapshot(cq.clone()), - expected_scheduling_session: 1, - }), - )) - .unwrap(); - - assert_matches!(result, ValidationResult::Invalid(InvalidCandidate::InvalidSessionIndex)); - } - - // Test 5: V3 descriptor with scheduling_session_offset > 0, correct expected - // scheduling session => Valid - { - let mut desc = descriptor.clone(); - // session_index=1, offset=1 => scheduling_session=2 - desc.set_scheduling_session_offset(1); - - let candidate_receipt = CandidateReceipt { - descriptor: desc, - commitments_hash: commitments_with_signals.hash(), - }; - - // Pass expected_scheduling_session=2 matching descriptor's claim - let result = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result(Ok( - validation_result_with_signals.clone() - )), - validation_data.clone(), - validation_code.clone(), - candidate_receipt, - Arc::new(pov.clone()), - ExecutorParams::default(), - PvfExecKind::Backing(dummy_hash()), - &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, - false, - Some(BackingExtras { - claim_queue: ClaimQueueSnapshot(cq.clone()), - expected_scheduling_session: 2, - }), - )) - .unwrap(); - - assert_matches!(result, ValidationResult::Valid(_, _)); - } - - // Test 6: Scheduling session check is skipped for approvals/disputes { let mut desc = descriptor.clone(); // session_index=1, offset=1 => scheduling_session=2, but expected=1 @@ -1120,7 +1052,7 @@ fn v3_descriptor_validation() { }; for exec_kind in [PvfExecKind::Approval, PvfExecKind::Dispute] { - let result = executor::block_on(validate_candidate_exhaustive( + let result = executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result(Ok( validation_result_with_signals.clone(), )), @@ -1131,9 +1063,11 @@ fn v3_descriptor_validation() { ExecutorParams::default(), exec_kind, &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, false, - None, // No backing extras: session/UMP checks are skipped + PreValidationOutput { + validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, + claim_queue: None, + }, )) .unwrap(); @@ -1171,7 +1105,7 @@ fn candidate_validation_bad_return_is_invalid() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; - let v = executor::block_on(validate_candidate_exhaustive( + let v = executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( WasmInvalidCandidate::HardTimeout, ))), @@ -1182,9 +1116,11 @@ fn candidate_validation_bad_return_is_invalid() { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, false, - None, // Backing extras not needed: test exercises error path + PreValidationOutput { + validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, + claim_queue: None, + }, )) .unwrap(); @@ -1255,7 +1191,7 @@ fn candidate_validation_one_ambiguous_error_is_valid() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; - let v = executor::block_on(validate_candidate_exhaustive( + let v = executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result_list(vec![ Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), Ok(validation_result), @@ -1267,9 +1203,11 @@ fn candidate_validation_one_ambiguous_error_is_valid() { ExecutorParams::default(), PvfExecKind::Approval, &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, false, - None, + PreValidationOutput { + validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, + claim_queue: None, + }, )) .unwrap(); @@ -1299,7 +1237,7 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; - let v = executor::block_on(validate_candidate_exhaustive( + let v = executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result_list(vec![ Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), @@ -1311,9 +1249,11 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() { ExecutorParams::default(), PvfExecKind::Approval, &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, false, - None, + PreValidationOutput { + validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, + claim_queue: None, + }, )) .unwrap(); @@ -1420,7 +1360,7 @@ fn candidate_validation_retry_on_error_helper( let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; - return executor::block_on(validate_candidate_exhaustive( + return executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result_list(mock_errors), validation_data, validation_code, @@ -1429,9 +1369,11 @@ fn candidate_validation_retry_on_error_helper( ExecutorParams::default(), exec_kind, &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, false, - None, // Tests error/retry paths, backing extras not needed + PreValidationOutput { + validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, + claim_queue: None, + }, )); } @@ -1464,7 +1406,7 @@ fn candidate_validation_timeout_is_internal_error() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; - let v = executor::block_on(validate_candidate_exhaustive( + let v = executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( WasmInvalidCandidate::HardTimeout, ))), @@ -1475,9 +1417,11 @@ fn candidate_validation_timeout_is_internal_error() { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, false, - None, // Backing extras not needed: test exercises error path + PreValidationOutput { + validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, + claim_queue: None, + }, )); assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))); @@ -1516,7 +1460,7 @@ fn candidate_validation_commitment_hash_mismatch_is_invalid() { hrmp_watermark: 12345, }; - let result = executor::block_on(validate_candidate_exhaustive( + let result = executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result)), validation_data, validation_code, @@ -1525,9 +1469,11 @@ fn candidate_validation_commitment_hash_mismatch_is_invalid() { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, false, - None, // Backing extras not needed: test exercises commitments mismatch path + PreValidationOutput { + validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, + claim_queue: None, + }, )) .unwrap(); @@ -1535,58 +1481,6 @@ fn candidate_validation_commitment_hash_mismatch_is_invalid() { assert_matches!(result, ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch)); } -#[test] -fn candidate_validation_code_mismatch_is_invalid() { - let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; - - let pov = PoV { block_data: BlockData(vec![1; 32]) }; - let validation_code = ValidationCode(vec![2; 16]); - - let descriptor = make_valid_candidate_descriptor( - ParaId::from(1_u32), - dummy_hash(), - validation_data.hash(), - pov.hash(), - ValidationCode(vec![1; 16]).hash(), - dummy_hash(), - dummy_hash(), - Sr25519Keyring::Alice, - ) - .into(); - - let check = perform_basic_checks( - &descriptor, - validation_data.max_pov_size, - &pov, - &validation_code.hash(), - ); - assert_matches!(check, Err(InvalidCandidate::CodeHashMismatch)); - - let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; - - let pool = TaskExecutor::new(); - let (_ctx, _ctx_handle) = make_subsystem_context::(pool.clone()); - - let v = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( - WasmInvalidCandidate::HardTimeout, - ))), - validation_data, - validation_code, - candidate_receipt, - Arc::new(pov), - ExecutorParams::default(), - PvfExecKind::Backing(dummy_hash()), - &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, - false, - None, // Backing extras not needed: test exercises code mismatch path - )) - .unwrap(); - - assert_matches!(v, ValidationResult::Invalid(InvalidCandidate::CodeHashMismatch)); -} - #[test] fn compressed_code_works() { let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; @@ -1631,7 +1525,7 @@ fn compressed_code_works() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; - let v = executor::block_on(validate_candidate_exhaustive( + let v = executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result)), validation_data, validation_code, @@ -1640,9 +1534,11 @@ fn compressed_code_works() { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - VALIDATION_CODE_BOMB_LIMIT, false, - Some(BackingExtras { claim_queue: Default::default(), expected_scheduling_session: 1 }), + PreValidationOutput { + validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, + claim_queue: None, + }, )); assert_matches!(v, Ok(ValidationResult::Valid(_, _))); @@ -2676,3 +2572,415 @@ fn v3_feature_detected_on_session_change() { assert_eq!(state.session_index, Some(2)); assert!(state.v3_ever_seen, "V3 flag is monotonic — stays true"); } + +// ============================================================================ +// Subsystem-level tests: exercise handle_validation_message end-to-end. +// +// These test the real message handling path with mocked runtime API responses, +// ensuring pre-validation, PVF execution, and post-validation are wired +// correctly. +// ============================================================================ + +/// Helper: respond to the runtime API calls made by `fetch_bomb_limit` for a +/// V2 descriptor (which has an embedded session index, so no SessionIndexForChild +/// call is needed — only ValidationCodeBombLimit). +async fn mock_fetch_bomb_limit_v2( + ctx_handle: &mut TestSubsystemContextHandle, + expected_scheduling_parent: Hash, + session_index: SessionIndex, +) { + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::ValidationCodeBombLimit(session, tx), + )) => { + assert_eq!(parent, expected_scheduling_parent); + assert_eq!(session, session_index); + let _ = tx.send(Ok(VALIDATION_CODE_BOMB_LIMIT)); + } + ); +} + +/// Helper: respond to the runtime API calls made during backing pre-validation +/// (after bomb limit): SessionIndexForChild, AllowedRelayParentInfo, ClaimQueue. +async fn mock_backing_pre_validation( + ctx_handle: &mut TestSubsystemContextHandle, + expected_scheduling_parent: Hash, + scheduling_session_response: SessionIndex, + relay_parent_info_response: Option, + claim_queue: BTreeMap>, +) { + // get_session_index → SessionIndexForChild + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::SessionIndexForChild(tx), + )) => { + assert_eq!(parent, expected_scheduling_parent); + let _ = tx.send(Ok(scheduling_session_response)); + } + ); + + // check_relay_parent_in_session → AllowedRelayParentInfo + if let Some(valid) = relay_parent_info_response { + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::AllowedRelayParentInfo(_session, _relay_parent, tx), + )) => { + assert_eq!(parent, expected_scheduling_parent); + if valid { + let _ = tx.send(Ok(Some(Default::default()))); + } else { + let _ = tx.send(Ok(None)); + } + } + ); + } + + // claim_queue → ClaimQueue + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::ClaimQueue(tx), + )) => { + assert_eq!(parent, expected_scheduling_parent); + let cq = claim_queue.into_iter().map(|(k, v)| (k, v.into())).collect(); + let _ = tx.send(Ok(cq)); + } + ); +} + +/// Scheduling session check: backing rejects when the descriptor's session +/// doesn't match the runtime; approval/dispute skips the check entirely. +/// +/// Uses a V2 descriptor with a deliberately wrong session_index=100 while the +/// runtime reports session=1. Loops through all exec kinds to verify backing +/// rejects and approval/dispute accepts. +#[test] +fn pre_validation_scheduling_session_check() { + let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; + let pov = PoV { block_data: BlockData(vec![1; 32]) }; + let head_data = HeadData(vec![1, 1, 1]); + let validation_code = ValidationCode(vec![2; 16]); + let scheduling_parent = dummy_hash(); + + // V2 descriptor with wrong session_index=100 (runtime will report 1). + let descriptor = make_valid_candidate_descriptor_v2( + ParaId::from(1_u32), + scheduling_parent, + CoreIndex(1), + 100, + dummy_hash(), + pov.hash(), + validation_code.hash(), + head_data.hash(), + dummy_hash(), + ); + + let validation_result = WasmValidationResult { + head_data: head_data.clone(), + new_validation_code: None, + upward_messages: Default::default(), + horizontal_messages: Default::default(), + processed_downward_messages: 0, + hrmp_watermark: 0, + }; + let commitments = CandidateCommitments { + head_data: validation_result.head_data.clone(), + upward_messages: validation_result.upward_messages.clone(), + horizontal_messages: validation_result.horizontal_messages.clone(), + new_validation_code: validation_result.new_validation_code.clone(), + processed_downward_messages: validation_result.processed_downward_messages, + hrmp_watermark: validation_result.hrmp_watermark, + }; + let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; + + let all_exec_kinds = [ + PvfExecKind::Backing(dummy_hash()), + PvfExecKind::BackingSystemParas(dummy_hash()), + PvfExecKind::Approval, + PvfExecKind::Dispute, + ]; + + for exec_kind in all_exec_kinds { + let is_backing = + matches!(exec_kind, PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_)); + + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); + let mock_backend = + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())); + + let (response_tx, response_rx) = oneshot::channel(); + + let task = handle_validation_message( + ctx.sender().clone(), + mock_backend, + Metrics::default(), + false, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data: validation_data.clone(), + validation_code: validation_code.clone(), + candidate_receipt: candidate_receipt.clone(), + pov: Arc::new(pov.clone()), + executor_params: ExecutorParams::default(), + exec_kind, + response_sender: response_tx, + }, + ); + + let test_fut = async move { + // fetch_bomb_limit: V2 descriptor has embedded session_index=100. + mock_fetch_bomb_limit_v2(&mut ctx_handle, scheduling_parent, 100).await; + + if is_backing { + // Backing: SessionIndexForChild returns 1 (mismatch with 100). + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::SessionIndexForChild(tx), + )) => { + let _ = tx.send(Ok(1)); + } + ); + // Rejects before any further calls. + } + // Approval/dispute: no session check, PVF runs directly. + }; + + executor::block_on(future::join(test_fut, task)); + + let result = executor::block_on(response_rx).unwrap().unwrap(); + if is_backing { + assert_matches!( + result, + ValidationResult::Invalid(InvalidCandidate::InvalidSchedulingSession) + ); + } else { + assert_matches!(result, ValidationResult::Valid(_, _)); + } + } +} + +/// V3 scheduling session offset mismatch: backing rejects when the computed scheduling session +/// (session_index + offset) doesn't match the runtime. Uses `v3_ever_seen=true` — backing only +/// sends V3 candidates after V3 is confirmed enabled. +#[test] +fn pre_validation_v3_scheduling_offset_mismatch() { + let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; + let pov = PoV { block_data: BlockData(vec![1; 32]) }; + let validation_code = ValidationCode(vec![2; 16]); + let scheduling_parent = Hash::repeat_byte(0xAA); + + // V3 descriptor: session_index=1, offset=1 → scheduling_session=2 + let mut descriptor = make_valid_candidate_descriptor_v3( + ParaId::from(1_u32), + dummy_hash(), // relay_parent + CoreIndex(0), + 1, // session_index + dummy_hash(), + pov.hash(), + validation_code.hash(), + dummy_hash(), + dummy_hash(), // erasure_root + scheduling_parent, + ); + descriptor.set_scheduling_session_offset(1); + + let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; + + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); + let mock_backend = + MockValidateCandidateBackend::with_hardcoded_result(Ok(WasmValidationResult { + head_data: HeadData(vec![1]), + new_validation_code: None, + upward_messages: Default::default(), + horizontal_messages: Default::default(), + processed_downward_messages: 0, + hrmp_watermark: 0, + })); + + let (response_tx, response_rx) = oneshot::channel(); + + let task = handle_validation_message( + ctx.sender().clone(), + mock_backend, + Metrics::default(), + true, // v3_ever_seen=true → V3 descriptor fields are trusted + CandidateValidationMessage::ValidateFromExhaustive { + validation_data: validation_data.clone(), + validation_code: validation_code.clone(), + candidate_receipt: candidate_receipt.clone(), + pov: Arc::new(pov.clone()), + executor_params: ExecutorParams::default(), + exec_kind: PvfExecKind::Backing(dummy_hash()), + response_sender: response_tx, + }, + ); + + let test_fut = async move { + // With v3_ever_seen=true, fetch_bomb_limit uses the real scheduling_parent + // and scheduling_session=2 (session_index=1 + offset=1). + mock_fetch_bomb_limit_v2(&mut ctx_handle, scheduling_parent, 2).await; + // Backing: get_session_index at scheduling_parent returns 1, + // but descriptor claims scheduling_session=2. + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::SessionIndexForChild(tx), + )) => { + assert_eq!(parent, scheduling_parent); + let _ = tx.send(Ok(1)); + } + ); + }; + + executor::block_on(future::join(test_fut, task)); + + assert_matches!( + executor::block_on(response_rx).unwrap(), + Ok(ValidationResult::Invalid(InvalidCandidate::InvalidSchedulingSession)) + ); +} + +/// A PoV hash mismatch is caught during pre-validation before PVF execution, +/// for any exec kind. +#[test] +fn pre_validation_basic_checks_pov_hash_mismatch() { + let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; + let pov = PoV { block_data: BlockData(vec![1; 32]) }; + let validation_code = ValidationCode(vec![2; 16]); + + // Create descriptor with WRONG pov hash + let descriptor = make_valid_candidate_descriptor_v2( + ParaId::from(1_u32), + dummy_hash(), + CoreIndex(1), + 1, + dummy_hash(), + Hash::repeat_byte(0xFF), // wrong pov hash + validation_code.hash(), + dummy_hash(), + dummy_hash(), + ); + + let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; + + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); + let mock_backend = + MockValidateCandidateBackend::with_hardcoded_result(Ok(WasmValidationResult { + head_data: HeadData(vec![1]), + new_validation_code: None, + upward_messages: Default::default(), + horizontal_messages: Default::default(), + processed_downward_messages: 0, + hrmp_watermark: 0, + })); + + let (response_tx, response_rx) = oneshot::channel(); + + let task = handle_validation_message( + ctx.sender().clone(), + mock_backend, + Metrics::default(), + false, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data: validation_data.clone(), + validation_code: validation_code.clone(), + candidate_receipt: candidate_receipt.clone(), + pov: Arc::new(pov.clone()), + executor_params: ExecutorParams::default(), + exec_kind: PvfExecKind::Backing(dummy_hash()), + response_sender: response_tx, + }, + ); + + let test_fut = async move { + // fetch_bomb_limit + mock_fetch_bomb_limit_v2(&mut ctx_handle, dummy_hash(), 1).await; + // perform_basic_checks fails with PoVHashMismatch — no further calls + }; + + executor::block_on(future::join(test_fut, task)); + + assert_matches!( + executor::block_on(response_rx).unwrap(), + Ok(ValidationResult::Invalid(InvalidCandidate::PoVHashMismatch)) + ); +} + +/// A validation code hash mismatch is caught during pre-validation before PVF +/// execution, for any exec kind. +#[test] +fn pre_validation_basic_checks_code_hash_mismatch() { + let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; + let pov = PoV { block_data: BlockData(vec![1; 32]) }; + let validation_code = ValidationCode(vec![2; 16]); + + // Create descriptor with WRONG validation code hash + let descriptor = make_valid_candidate_descriptor_v2( + ParaId::from(1_u32), + dummy_hash(), + CoreIndex(1), + 1, + dummy_hash(), + pov.hash(), + ValidationCode(vec![0xFF; 16]).hash(), // wrong + dummy_hash(), + dummy_hash(), + ); + + let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; + + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); + let mock_backend = + MockValidateCandidateBackend::with_hardcoded_result(Ok(WasmValidationResult { + head_data: HeadData(vec![1]), + new_validation_code: None, + upward_messages: Default::default(), + horizontal_messages: Default::default(), + processed_downward_messages: 0, + hrmp_watermark: 0, + })); + + let (response_tx, response_rx) = oneshot::channel(); + + let task = handle_validation_message( + ctx.sender().clone(), + mock_backend, + Metrics::default(), + false, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data: validation_data.clone(), + validation_code: validation_code.clone(), + candidate_receipt: candidate_receipt.clone(), + pov: Arc::new(pov.clone()), + executor_params: ExecutorParams::default(), + exec_kind: PvfExecKind::Backing(dummy_hash()), + response_sender: response_tx, + }, + ); + + let test_fut = async move { + // fetch_bomb_limit + mock_fetch_bomb_limit_v2(&mut ctx_handle, dummy_hash(), 1).await; + // perform_basic_checks fails with CodeHashMismatch — no further calls + }; + + executor::block_on(future::join(test_fut, task)); + + assert_matches!( + executor::block_on(response_rx).unwrap(), + Ok(ValidationResult::Invalid(InvalidCandidate::CodeHashMismatch)) + ); +} From 1b343f77324a4331826ece1fb45e8072a51ce119 Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 19 Mar 2026 10:56:07 +0100 Subject: [PATCH 35/52] More test coverage --- .../core/candidate-validation/src/tests.rs | 420 +++++++++++++----- 1 file changed, 320 insertions(+), 100 deletions(-) diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 7c7b31baa5267..4f1400cb83a0e 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -2851,136 +2851,356 @@ fn pre_validation_v3_scheduling_offset_mismatch() { ); } -/// A PoV hash mismatch is caught during pre-validation before PVF execution, -/// for any exec kind. +/// Basic checks (PoV hash, code hash, PoV size) are caught during pre-validation +/// before PVF execution, regardless of exec kind. #[test] -fn pre_validation_basic_checks_pov_hash_mismatch() { +fn pre_validation_basic_checks() { let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; let pov = PoV { block_data: BlockData(vec![1; 32]) }; let validation_code = ValidationCode(vec![2; 16]); - // Create descriptor with WRONG pov hash - let descriptor = make_valid_candidate_descriptor_v2( - ParaId::from(1_u32), - dummy_hash(), - CoreIndex(1), - 1, - dummy_hash(), - Hash::repeat_byte(0xFF), // wrong pov hash - validation_code.hash(), - dummy_hash(), - dummy_hash(), - ); - - let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; - - let pool = TaskExecutor::new(); - let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); - let mock_backend = - MockValidateCandidateBackend::with_hardcoded_result(Ok(WasmValidationResult { - head_data: HeadData(vec![1]), - new_validation_code: None, - upward_messages: Default::default(), - horizontal_messages: Default::default(), - processed_downward_messages: 0, - hrmp_watermark: 0, - })); + // Each case: (descriptor, pov_override, expected_error) + let cases: Vec<(_, Option, _)> = vec![ + // Wrong PoV hash + ( + make_valid_candidate_descriptor_v2( + ParaId::from(1_u32), + dummy_hash(), + CoreIndex(1), + 1, + dummy_hash(), + Hash::repeat_byte(0xFF), // wrong + validation_code.hash(), + dummy_hash(), + dummy_hash(), + ), + None, + InvalidCandidate::PoVHashMismatch, + ), + // Wrong code hash + ( + make_valid_candidate_descriptor_v2( + ParaId::from(1_u32), + dummy_hash(), + CoreIndex(1), + 1, + dummy_hash(), + pov.hash(), + ValidationCode(vec![0xFF; 16]).hash(), // wrong + dummy_hash(), + dummy_hash(), + ), + None, + InvalidCandidate::CodeHashMismatch, + ), + // PoV too large (max_pov_size=1024 but PoV is 2048 bytes) + ( + make_valid_candidate_descriptor_v2( + ParaId::from(1_u32), + dummy_hash(), + CoreIndex(1), + 1, + dummy_hash(), + PoV { block_data: BlockData(vec![0; 2048]) }.hash(), + validation_code.hash(), + dummy_hash(), + dummy_hash(), + ), + Some(PoV { block_data: BlockData(vec![0; 2048]) }), + InvalidCandidate::ParamsTooLarge(2048), + ), + ]; - let (response_tx, response_rx) = oneshot::channel(); + let all_exec_kinds = [ + PvfExecKind::Backing(dummy_hash()), + PvfExecKind::BackingSystemParas(dummy_hash()), + PvfExecKind::Approval, + PvfExecKind::Dispute, + ]; - let task = handle_validation_message( - ctx.sender().clone(), - mock_backend, - Metrics::default(), - false, - CandidateValidationMessage::ValidateFromExhaustive { - validation_data: validation_data.clone(), - validation_code: validation_code.clone(), - candidate_receipt: candidate_receipt.clone(), - pov: Arc::new(pov.clone()), - executor_params: ExecutorParams::default(), - exec_kind: PvfExecKind::Backing(dummy_hash()), - response_sender: response_tx, - }, - ); + for (descriptor, pov_override, expected_error) in &cases { + let test_pov = pov_override.as_ref().unwrap_or(&pov); + + for exec_kind in &all_exec_kinds { + let candidate_receipt = + CandidateReceipt { descriptor: descriptor.clone(), commitments_hash: Hash::zero() }; + + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = + make_subsystem_context::(pool.clone()); + let mock_backend = + MockValidateCandidateBackend::with_hardcoded_result(Ok(WasmValidationResult { + head_data: HeadData(vec![1]), + new_validation_code: None, + upward_messages: Default::default(), + horizontal_messages: Default::default(), + processed_downward_messages: 0, + hrmp_watermark: 0, + })); + + let (response_tx, response_rx) = oneshot::channel(); + + let task = handle_validation_message( + ctx.sender().clone(), + mock_backend, + Metrics::default(), + false, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data: validation_data.clone(), + validation_code: validation_code.clone(), + candidate_receipt, + pov: Arc::new(test_pov.clone()), + executor_params: ExecutorParams::default(), + exec_kind: *exec_kind, + response_sender: response_tx, + }, + ); - let test_fut = async move { - // fetch_bomb_limit - mock_fetch_bomb_limit_v2(&mut ctx_handle, dummy_hash(), 1).await; - // perform_basic_checks fails with PoVHashMismatch — no further calls - }; + let test_fut = async move { + mock_fetch_bomb_limit_v2(&mut ctx_handle, dummy_hash(), 1).await; + // perform_basic_checks fails — no further calls + }; - executor::block_on(future::join(test_fut, task)); + executor::block_on(future::join(test_fut, task)); - assert_matches!( - executor::block_on(response_rx).unwrap(), - Ok(ValidationResult::Invalid(InvalidCandidate::PoVHashMismatch)) - ); + assert_matches!( + executor::block_on(response_rx).unwrap(), + Ok(ValidationResult::Invalid(ref e)) => { + assert_eq!( + std::mem::discriminant(e), + std::mem::discriminant(expected_error), + "Expected {expected_error:?} for exec_kind {exec_kind:?}, got {e:?}" + ); + } + ); + } + } } -/// A validation code hash mismatch is caught during pre-validation before PVF -/// execution, for any exec kind. +/// Relay parent session check via AllowedRelayParentInfo (v16+ API): backing +/// rejects when the relay parent is not found in the claimed session. +/// +/// Also verifies that `NotSupported` (old runtime) is safely skipped — the +/// scheduling session check covers it on old runtimes where relay_parent == +/// scheduling_parent. #[test] -fn pre_validation_basic_checks_code_hash_mismatch() { +fn pre_validation_relay_parent_session_check() { let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; let pov = PoV { block_data: BlockData(vec![1; 32]) }; + let head_data = HeadData(vec![1, 1, 1]); let validation_code = ValidationCode(vec![2; 16]); + let scheduling_parent = dummy_hash(); - // Create descriptor with WRONG validation code hash + // V2 descriptor with correct session_index=1. let descriptor = make_valid_candidate_descriptor_v2( ParaId::from(1_u32), - dummy_hash(), + scheduling_parent, CoreIndex(1), 1, dummy_hash(), pov.hash(), - ValidationCode(vec![0xFF; 16]).hash(), // wrong - dummy_hash(), + validation_code.hash(), + head_data.hash(), dummy_hash(), ); - let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; + let validation_result = WasmValidationResult { + head_data: head_data.clone(), + new_validation_code: None, + upward_messages: Default::default(), + horizontal_messages: Default::default(), + processed_downward_messages: 0, + hrmp_watermark: 0, + }; + let commitments = CandidateCommitments { + head_data: validation_result.head_data.clone(), + upward_messages: validation_result.upward_messages.clone(), + horizontal_messages: validation_result.horizontal_messages.clone(), + new_validation_code: validation_result.new_validation_code.clone(), + processed_downward_messages: validation_result.processed_downward_messages, + hrmp_watermark: validation_result.hrmp_watermark, + }; + let candidate_receipt = + CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; - let pool = TaskExecutor::new(); - let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); - let mock_backend = - MockValidateCandidateBackend::with_hardcoded_result(Ok(WasmValidationResult { - head_data: HeadData(vec![1]), - new_validation_code: None, - upward_messages: Default::default(), - horizontal_messages: Default::default(), - processed_downward_messages: 0, - hrmp_watermark: 0, - })); + // Case 1: AllowedRelayParentInfo returns None → InvalidRelayParentSession + { + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); + let mock_backend = MockValidateCandidateBackend::with_hardcoded_result(Ok( + validation_result.clone(), + )); - let (response_tx, response_rx) = oneshot::channel(); + let (response_tx, response_rx) = oneshot::channel(); - let task = handle_validation_message( - ctx.sender().clone(), - mock_backend, - Metrics::default(), - false, - CandidateValidationMessage::ValidateFromExhaustive { - validation_data: validation_data.clone(), - validation_code: validation_code.clone(), - candidate_receipt: candidate_receipt.clone(), - pov: Arc::new(pov.clone()), - executor_params: ExecutorParams::default(), - exec_kind: PvfExecKind::Backing(dummy_hash()), - response_sender: response_tx, - }, - ); + let task = handle_validation_message( + ctx.sender().clone(), + mock_backend, + Metrics::default(), + false, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data: validation_data.clone(), + validation_code: validation_code.clone(), + candidate_receipt: candidate_receipt.clone(), + pov: Arc::new(pov.clone()), + executor_params: ExecutorParams::default(), + exec_kind: PvfExecKind::Backing(dummy_hash()), + response_sender: response_tx, + }, + ); - let test_fut = async move { - // fetch_bomb_limit - mock_fetch_bomb_limit_v2(&mut ctx_handle, dummy_hash(), 1).await; - // perform_basic_checks fails with CodeHashMismatch — no further calls - }; + let test_fut = async move { + mock_fetch_bomb_limit_v2(&mut ctx_handle, scheduling_parent, 1).await; + // SessionIndexForChild: scheduling session matches. + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, RuntimeApiRequest::SessionIndexForChild(tx), + )) => { let _ = tx.send(Ok(1)); } + ); + // AllowedRelayParentInfo: relay parent NOT found. + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, RuntimeApiRequest::AllowedRelayParentInfo(_, _, tx), + )) => { let _ = tx.send(Ok(None)); } + ); + }; - executor::block_on(future::join(test_fut, task)); + executor::block_on(future::join(test_fut, task)); - assert_matches!( - executor::block_on(response_rx).unwrap(), - Ok(ValidationResult::Invalid(InvalidCandidate::CodeHashMismatch)) - ); + assert_matches!( + executor::block_on(response_rx).unwrap(), + Ok(ValidationResult::Invalid(InvalidCandidate::InvalidRelayParentSession)) + ); + } + + // Case 2: AllowedRelayParentInfo not supported → skipped, proceeds to valid. + { + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); + let mock_backend = MockValidateCandidateBackend::with_hardcoded_result(Ok( + validation_result.clone(), + )); + + let (response_tx, response_rx) = oneshot::channel(); + + let task = handle_validation_message( + ctx.sender().clone(), + mock_backend, + Metrics::default(), + false, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data: validation_data.clone(), + validation_code: validation_code.clone(), + candidate_receipt: candidate_receipt.clone(), + pov: Arc::new(pov.clone()), + executor_params: ExecutorParams::default(), + exec_kind: PvfExecKind::Backing(dummy_hash()), + response_sender: response_tx, + }, + ); + + let test_fut = async move { + mock_fetch_bomb_limit_v2(&mut ctx_handle, scheduling_parent, 1).await; + // SessionIndexForChild: matches. + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, RuntimeApiRequest::SessionIndexForChild(tx), + )) => { let _ = tx.send(Ok(1)); } + ); + // AllowedRelayParentInfo: not supported → skipped. + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, RuntimeApiRequest::AllowedRelayParentInfo(_, _, tx), + )) => { + let _ = tx.send(Err(RuntimeApiError::NotSupported { + runtime_api_name: "AllowedRelayParentInfo", + })); + } + ); + // ClaimQueue: proceeds normally. + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, RuntimeApiRequest::ClaimQueue(tx), + )) => { + let mut cq = BTreeMap::new(); + let _ = cq.insert(CoreIndex(1), vec![ParaId::from(1_u32)].into()); + let _ = tx.send(Ok(cq)); + } + ); + }; + + executor::block_on(future::join(test_fut, task)); + + assert_matches!( + executor::block_on(response_rx).unwrap(), + Ok(ValidationResult::Valid(_, _)) + ); + } + + // Case 3: AllowedRelayParentInfo returns Some → valid, proceeds. + { + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); + let mock_backend = MockValidateCandidateBackend::with_hardcoded_result(Ok( + validation_result.clone(), + )); + + let (response_tx, response_rx) = oneshot::channel(); + + let task = handle_validation_message( + ctx.sender().clone(), + mock_backend, + Metrics::default(), + false, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data: validation_data.clone(), + validation_code: validation_code.clone(), + candidate_receipt: candidate_receipt.clone(), + pov: Arc::new(pov.clone()), + executor_params: ExecutorParams::default(), + exec_kind: PvfExecKind::Backing(dummy_hash()), + response_sender: response_tx, + }, + ); + + let test_fut = async move { + mock_fetch_bomb_limit_v2(&mut ctx_handle, scheduling_parent, 1).await; + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, RuntimeApiRequest::SessionIndexForChild(tx), + )) => { let _ = tx.send(Ok(1)); } + ); + // AllowedRelayParentInfo: found → valid. + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, RuntimeApiRequest::AllowedRelayParentInfo(_, _, tx), + )) => { let _ = tx.send(Ok(Some(Default::default()))); } + ); + // ClaimQueue. + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, RuntimeApiRequest::ClaimQueue(tx), + )) => { + let mut cq = BTreeMap::new(); + let _ = cq.insert(CoreIndex(1), vec![ParaId::from(1_u32)].into()); + let _ = tx.send(Ok(cq)); + } + ); + }; + + executor::block_on(future::join(test_fut, task)); + + assert_matches!( + executor::block_on(response_rx).unwrap(), + Ok(ValidationResult::Valid(_, _)) + ); + } } From 91a2d27be2197757dc6fb740b43019cd35b9998d Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 19 Mar 2026 11:10:07 +0100 Subject: [PATCH 36/52] Test UMP signal enforcement better --- .../core/candidate-validation/src/tests.rs | 171 +++++++----------- 1 file changed, 64 insertions(+), 107 deletions(-) diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 4f1400cb83a0e..abbc4747ed749 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -899,25 +899,28 @@ fn invalid_session_or_ump_signals() { } } +/// V3 UMP signal enforcement: backing requires UMP signals for V3 candidates, +/// approval/dispute skips the check entirely. +/// +/// Loops through all exec kinds × (with signals, without signals) and verifies: +/// - Backing + signals → Valid +/// - Backing + no signals → Invalid(NoUMPSignalWithV3Descriptor) +/// - Approval/Dispute + signals → Valid (check skipped) +/// - Approval/Dispute + no signals → Valid (check skipped) #[test] -/// Tests V3 candidate descriptor validation: -/// - V3 descriptor with UMP signals is valid -/// - V3 descriptor without UMP signals is invalid (NoUMPSignalWithV3Descriptor) -fn v3_descriptor_validation() { +fn v3_ump_signal_enforcement() { let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; - let pov = PoV { block_data: BlockData(vec![1; 32]) }; let head_data = HeadData(vec![1, 1, 1]); let validation_code = ValidationCode(vec![2; 16]); - // Create a V3 descriptor with scheduling_parent different from relay_parent let relay_parent = dummy_hash(); let scheduling_parent = Hash::repeat_byte(0x42); let descriptor = make_valid_candidate_descriptor_v3( ParaId::from(1_u32), relay_parent, CoreIndex(0), - 1, // session_index matching expected + 1, validation_data.hash(), pov.hash(), validation_code.hash(), @@ -926,13 +929,10 @@ fn v3_descriptor_validation() { scheduling_parent, ); - // Verify it's detected as V3 assert_eq!(descriptor.version(), CandidateDescriptorVersion::V3); - // Under old rules, V3 (non-zero scheduling_parent) is detected as V1 - assert_eq!(descriptor.version_old_rules(), CandidateDescriptorVersion::V1); - // Validation result WITH UMP signals (required for V3) - let mut validation_result_with_signals = WasmValidationResult { + // Validation result WITH UMP signals (required for V3 in backing) + let mut result_with_signals = WasmValidationResult { head_data: head_data.clone(), new_validation_code: None, upward_messages: Default::default(), @@ -940,22 +940,13 @@ fn v3_descriptor_validation() { processed_downward_messages: 0, hrmp_watermark: 0, }; - validation_result_with_signals.upward_messages.force_push(UMP_SEPARATOR); - validation_result_with_signals + result_with_signals.upward_messages.force_push(UMP_SEPARATOR); + result_with_signals .upward_messages .force_push(UMPSignal::SelectCore(CoreSelector(0), ClaimQueueOffset(0)).encode()); - let commitments_with_signals = CandidateCommitments { - head_data: validation_result_with_signals.head_data.clone(), - upward_messages: validation_result_with_signals.upward_messages.clone(), - horizontal_messages: validation_result_with_signals.horizontal_messages.clone(), - new_validation_code: validation_result_with_signals.new_validation_code.clone(), - processed_downward_messages: validation_result_with_signals.processed_downward_messages, - hrmp_watermark: validation_result_with_signals.hrmp_watermark, - }; - // Validation result WITHOUT UMP signals - let validation_result_no_signals = WasmValidationResult { + let result_no_signals = WasmValidationResult { head_data: head_data.clone(), new_validation_code: None, upward_messages: Default::default(), @@ -964,114 +955,80 @@ fn v3_descriptor_validation() { hrmp_watermark: 0, }; - let commitments_no_signals = CandidateCommitments { - head_data: validation_result_no_signals.head_data.clone(), - upward_messages: validation_result_no_signals.upward_messages.clone(), - horizontal_messages: validation_result_no_signals.horizontal_messages.clone(), - new_validation_code: validation_result_no_signals.new_validation_code.clone(), - processed_downward_messages: validation_result_no_signals.processed_downward_messages, - hrmp_watermark: validation_result_no_signals.hrmp_watermark, - }; - - // Setup claim queue with para assigned to core 0 let mut cq = BTreeMap::new(); let _ = cq.insert(CoreIndex(0), vec![ParaId::from(1_u32)].into()); - // Test 1: V3 descriptor + UMP signals => Valid - { - let candidate_receipt = CandidateReceipt { - descriptor: descriptor.clone(), - commitments_hash: commitments_with_signals.hash(), - }; - - let result = executor::block_on(validate_candidate( - MockValidateCandidateBackend::with_hardcoded_result(Ok( - validation_result_with_signals.clone() - )), - validation_data.clone(), - validation_code.clone(), - candidate_receipt, - Arc::new(pov.clone()), - ExecutorParams::default(), - PvfExecKind::Backing(dummy_hash()), - &Default::default(), - false, - PreValidationOutput { - validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, - claim_queue: Some(ClaimQueueSnapshot(cq.clone())), - }, - )) - .unwrap(); - - assert_matches!(result, ValidationResult::Valid(_, _)); - } + let all_exec_kinds = [ + PvfExecKind::Backing(dummy_hash()), + PvfExecKind::BackingSystemParas(dummy_hash()), + PvfExecKind::Approval, + PvfExecKind::Dispute, + ]; - // Test 2: V3 descriptor + NO UMP signals => Invalid - // (NoUMPSignalWithV3Descriptor) - { + for has_signals in [true, false] { + let validation_result = if has_signals { + result_with_signals.clone() + } else { + result_no_signals.clone() + }; + let commitments = CandidateCommitments { + head_data: validation_result.head_data.clone(), + upward_messages: validation_result.upward_messages.clone(), + horizontal_messages: validation_result.horizontal_messages.clone(), + new_validation_code: validation_result.new_validation_code.clone(), + processed_downward_messages: validation_result.processed_downward_messages, + hrmp_watermark: validation_result.hrmp_watermark, + }; let candidate_receipt = CandidateReceipt { descriptor: descriptor.clone(), - commitments_hash: commitments_no_signals.hash(), + commitments_hash: commitments.hash(), }; - let result = executor::block_on(validate_candidate( - MockValidateCandidateBackend::with_hardcoded_result(Ok( - validation_result_no_signals.clone() - )), - validation_data.clone(), - validation_code.clone(), - candidate_receipt, - Arc::new(pov.clone()), - ExecutorParams::default(), - PvfExecKind::Backing(dummy_hash()), - &Default::default(), - false, - PreValidationOutput { - validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, - claim_queue: Some(ClaimQueueSnapshot(cq.clone())), - }, - )) - .unwrap(); - - assert_matches!( - result, - ValidationResult::Invalid(InvalidCandidate::InvalidUMPSignals( - CommittedCandidateReceiptError::NoUMPSignalWithV3Descriptor - )) - ); - } - - { - let mut desc = descriptor.clone(); - // session_index=1, offset=1 => scheduling_session=2, but expected=1 - desc.set_scheduling_session_offset(1); - - let candidate_receipt = CandidateReceipt { - descriptor: desc, - commitments_hash: commitments_with_signals.hash(), - }; + for exec_kind in &all_exec_kinds { + let is_backing = matches!( + exec_kind, + PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_) + ); - for exec_kind in [PvfExecKind::Approval, PvfExecKind::Dispute] { let result = executor::block_on(validate_candidate( MockValidateCandidateBackend::with_hardcoded_result(Ok( - validation_result_with_signals.clone(), + validation_result.clone(), )), validation_data.clone(), validation_code.clone(), candidate_receipt.clone(), Arc::new(pov.clone()), ExecutorParams::default(), - exec_kind, + *exec_kind, &Default::default(), false, PreValidationOutput { validation_code_bomb_limit: VALIDATION_CODE_BOMB_LIMIT, - claim_queue: None, + claim_queue: if is_backing { + Some(ClaimQueueSnapshot(cq.clone())) + } else { + None + }, }, )) .unwrap(); - assert_matches!(result, ValidationResult::Valid(_, _)); + match (is_backing, has_signals) { + // Backing without signals → V3 requires them. + (true, false) => assert_matches!( + result, + ValidationResult::Invalid(InvalidCandidate::InvalidUMPSignals( + CommittedCandidateReceiptError::NoUMPSignalWithV3Descriptor + )), + "Backing must reject V3 without UMP signals ({exec_kind:?})" + ), + // All other combinations → valid. + _ => assert_matches!( + result, + ValidationResult::Valid(_, _), + "Expected Valid for exec_kind={exec_kind:?} has_signals={has_signals}" + ), + } } } } From 80a61ec002e8809e32210233f49fc6c881e1f1a3 Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 19 Mar 2026 11:53:13 +0100 Subject: [PATCH 37/52] Review remarks - clarifications. --- .../node/core/candidate-validation/src/lib.rs | 12 ++-- .../core/candidate-validation/src/tests.rs | 38 +++++-------- .../src/validator_side/mod.rs | 6 +- polkadot/primitives/src/lib.rs | 48 ++++++++-------- polkadot/primitives/src/v9/mod.rs | 56 ++++++++++++++----- 5 files changed, 88 insertions(+), 72 deletions(-) diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index f9b7697bd3826..dd66c8c0b2512 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -246,7 +246,7 @@ enum PreValidationError { /// - Claim queue fetch /// /// Backing-only checks are skipped for approval/dispute because the runtime -/// validates them at inclusion time and the chain state they depend on may not +/// validates them at backing time and the chain state they depend on may not /// be available in disputes. async fn pre_validate_candidate( sender: &mut Sender, @@ -317,12 +317,14 @@ where // exist, and the scheduling session check above already covers the // relay parent session (scheduling_parent == relay_parent). Err(CheckRelayParentSessionError::NotSupported) => {}, - Err(CheckRelayParentSessionError::NotFound) => + Err(CheckRelayParentSessionError::NotFound) => { return Err(PreValidationError::Invalid( InvalidCandidate::InvalidRelayParentSession, - )), - Err(CheckRelayParentSessionError::RuntimeError(err)) => - return Err(PreValidationError::RuntimeError(err)), + )) + }, + Err(CheckRelayParentSessionError::RuntimeError(err)) => { + return Err(PreValidationError::RuntimeError(err)) + }, } } diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index abbc4747ed749..61ce533d6607b 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -966,11 +966,8 @@ fn v3_ump_signal_enforcement() { ]; for has_signals in [true, false] { - let validation_result = if has_signals { - result_with_signals.clone() - } else { - result_no_signals.clone() - }; + let validation_result = + if has_signals { result_with_signals.clone() } else { result_no_signals.clone() }; let commitments = CandidateCommitments { head_data: validation_result.head_data.clone(), upward_messages: validation_result.upward_messages.clone(), @@ -985,15 +982,11 @@ fn v3_ump_signal_enforcement() { }; for exec_kind in &all_exec_kinds { - let is_backing = matches!( - exec_kind, - PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_) - ); + let is_backing = + matches!(exec_kind, PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_)); let result = executor::block_on(validate_candidate( - MockValidateCandidateBackend::with_hardcoded_result(Ok( - validation_result.clone(), - )), + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), candidate_receipt.clone(), @@ -2883,8 +2876,7 @@ fn pre_validation_basic_checks() { CandidateReceipt { descriptor: descriptor.clone(), commitments_hash: Hash::zero() }; let pool = TaskExecutor::new(); - let (mut ctx, mut ctx_handle) = - make_subsystem_context::(pool.clone()); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); let mock_backend = MockValidateCandidateBackend::with_hardcoded_result(Ok(WasmValidationResult { head_data: HeadData(vec![1]), @@ -2977,16 +2969,14 @@ fn pre_validation_relay_parent_session_check() { processed_downward_messages: validation_result.processed_downward_messages, hrmp_watermark: validation_result.hrmp_watermark, }; - let candidate_receipt = - CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; + let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; // Case 1: AllowedRelayParentInfo returns None → InvalidRelayParentSession { let pool = TaskExecutor::new(); let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); - let mock_backend = MockValidateCandidateBackend::with_hardcoded_result(Ok( - validation_result.clone(), - )); + let mock_backend = + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())); let (response_tx, response_rx) = oneshot::channel(); @@ -3036,9 +3026,8 @@ fn pre_validation_relay_parent_session_check() { { let pool = TaskExecutor::new(); let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); - let mock_backend = MockValidateCandidateBackend::with_hardcoded_result(Ok( - validation_result.clone(), - )); + let mock_backend = + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())); let (response_tx, response_rx) = oneshot::channel(); @@ -3103,9 +3092,8 @@ fn pre_validation_relay_parent_session_check() { { let pool = TaskExecutor::new(); let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); - let mock_backend = MockValidateCandidateBackend::with_hardcoded_result(Ok( - validation_result.clone(), - )); + let mock_backend = + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())); let (response_tx, response_rx) = oneshot::channel(); diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index a163e5813d1df..2ee718dadb664 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -2660,11 +2660,11 @@ async fn kick_off_seconding( let per_scheduling_parent = match state.per_scheduling_parent.get_mut(&scheduling_parent) { Some(state) => state, None => { - // Relay parent went out of view, not an error. + // Scheduling parent went out of view, not an error. gum::trace!( target: LOG_TARGET, - relay_parent = ?scheduling_parent, - "Fetched collation for a parent out of view", + ?scheduling_parent, + "Fetched collation for a scheduling parent out of view", ); return Ok(false); }, diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 1167cd9e22c6e..1f5cea1691054 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -45,30 +45,30 @@ pub use v9::{ ApprovalVoteMultipleCandidates, ApprovalVotingParams, ApprovedPeerId, AssignmentId, AsyncBackingParams, AuthorityDiscoveryId, AvailabilityBitfield, BackedCandidate, Balance, BlakeTwo256, Block, BlockId, BlockNumber, CandidateCommitments, CandidateDescriptorV2, - CandidateDescriptorVersion, CandidateEvent, CandidateHash, CandidateIndex, CandidateReceiptV2, - CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, ChunkIndex, ClaimQueueOffset, - CollatorId, CollatorSignature, CommittedCandidateReceiptError, CommittedCandidateReceiptV2, - CompactStatement, ConsensusLog, CoreIndex, CoreSelector, CoreState, DisputeOffenceKind, - DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, ExecutorParam, - ExecutorParamError, ExecutorParams, ExecutorParamsHash, ExecutorParamsPrepHash, - ExplicitDisputeStatement, GroupIndex, GroupRotationInfo, Hash, HashT, HeadData, Header, - HorizontalMessages, HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage, IndexedVec, - InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, NodeFeatures, - Nonce, OccupiedCore, OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, - ParathreadEntry, PersistedValidationData, PvfCheckStatement, PvfExecKind, PvfPrepKind, - RuntimeMetricLabel, RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, - RuntimeMetricOp, RuntimeMetricUpdate, ScheduledCore, SchedulerParams, ScrapedOnChainVotes, - SessionIndex, SessionInfo, Signature, Signed, SignedAvailabilityBitfield, - SignedAvailabilityBitfields, SignedStatement, SigningContext, Slot, TransposedClaimQueue, - UMPSignal, UncheckedSigned, UncheckedSignedAvailabilityBitfield, - UncheckedSignedAvailabilityBitfields, UncheckedSignedStatement, UpgradeGoAhead, - UpgradeRestriction, UpwardMessage, ValidDisputeStatementKind, ValidationCode, - ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, - ValidityError, CandidateDescriptorVersionCheckError, ASSIGNMENT_KEY_TYPE_ID, DEFAULT_CLAIM_QUEUE_OFFSET, - DEFAULT_SCHEDULING_LOOKAHEAD, LEGACY_MIN_BACKING_VOTES, LOWEST_PUBLIC_ID, MAX_CODE_SIZE, - MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, MIN_CODE_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, - ON_DEMAND_MAX_QUEUE_MAX_SIZE, PARACHAINS_INHERENT_IDENTIFIER, PARACHAIN_KEY_TYPE_ID, - RELAY_CHAIN_SLOT_DURATION_MILLIS, UMP_SEPARATOR, + CandidateDescriptorVersion, CandidateDescriptorVersionCheckError, CandidateEvent, + CandidateHash, CandidateIndex, CandidateReceiptV2, CheckedDisputeStatementSet, + CheckedMultiDisputeStatementSet, ChunkIndex, ClaimQueueOffset, CollatorId, CollatorSignature, + CommittedCandidateReceiptError, CommittedCandidateReceiptV2, CompactStatement, ConsensusLog, + CoreIndex, CoreSelector, CoreState, DisputeOffenceKind, DisputeState, DisputeStatement, + DisputeStatementSet, DownwardMessage, EncodeAs, ExecutorParam, ExecutorParamError, + ExecutorParams, ExecutorParamsHash, ExecutorParamsPrepHash, ExplicitDisputeStatement, + GroupIndex, GroupRotationInfo, Hash, HashT, HeadData, Header, HorizontalMessages, + HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage, IndexedVec, InherentData, + InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, NodeFeatures, Nonce, + OccupiedCore, OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, ParathreadEntry, + PersistedValidationData, PvfCheckStatement, PvfExecKind, PvfPrepKind, RuntimeMetricLabel, + RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, RuntimeMetricOp, + RuntimeMetricUpdate, ScheduledCore, SchedulerParams, ScrapedOnChainVotes, SessionIndex, + SessionInfo, Signature, Signed, SignedAvailabilityBitfield, SignedAvailabilityBitfields, + SignedStatement, SigningContext, Slot, TransposedClaimQueue, UMPSignal, UncheckedSigned, + UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, + UncheckedSignedStatement, UpgradeGoAhead, UpgradeRestriction, UpwardMessage, + ValidDisputeStatementKind, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + ValidatorSignature, ValidityAttestation, ValidityError, ASSIGNMENT_KEY_TYPE_ID, + DEFAULT_CLAIM_QUEUE_OFFSET, DEFAULT_SCHEDULING_LOOKAHEAD, LEGACY_MIN_BACKING_VOTES, + LOWEST_PUBLIC_ID, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, MIN_CODE_SIZE, + ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, PARACHAINS_INHERENT_IDENTIFIER, + PARACHAIN_KEY_TYPE_ID, RELAY_CHAIN_SLOT_DURATION_MILLIS, UMP_SEPARATOR, }; #[cfg(feature = "test")] diff --git a/polkadot/primitives/src/v9/mod.rs b/polkadot/primitives/src/v9/mod.rs index 2b25fbdeac3ec..d56cf04b2a1cb 100644 --- a/polkadot/primitives/src/v9/mod.rs +++ b/polkadot/primitives/src/v9/mod.rs @@ -2007,14 +2007,24 @@ impl> CandidateDescriptorV2 { /// Validates that the descriptor version is acceptable given whether V3 is enabled. /// - /// This is the single source of truth for version gating logic, used by both - /// the runtime (`check_descriptor_version_and_signals`) and the backing subsystem. - /// - /// Checks two things: - /// 1. Old-style and new-style version detection must agree, unless the candidate is V3 and V3 - /// is enabled (the expected disagreement: old rules see V1, new rules see V3). - /// 2. V3 candidates are rejected when V3 is not enabled. - pub fn check_version_acceptance(&self, v3_enabled: bool) -> Result<(), CandidateDescriptorVersionCheckError> { + /// Used by both the runtime (`check_descriptor_version_and_signals`) and the + /// backing subsystem. Serves two distinct purposes: + /// + /// 1. **V2 ambiguity protection (long-lived):** Old-style and new-style version detection must + /// agree, unless the candidate is V3 and V3 is enabled (the expected disagreement: old rules + /// see V1, new rules see V3). This prevents a crafted candidate from being treated as V2 (no + /// mandatory UMP signals) by new nodes but as V1 by old nodes. Needed as long as V1 exists + /// (maximum safety) or until we could have valiators not yet using the new rules. + /// + /// 2. **V3 gating (transitional):** V3 candidates are rejected when V3 is not enabled. + /// + /// Note: Consistent `Unknown` versions are not our concern here — they are caught upstream + /// by the runtime (`check_descriptor_version_and_signals`) and the collator + /// protocol (`descriptor_version_sanity_check`). + pub fn check_version_acceptance( + &self, + v3_enabled: bool, + ) -> Result<(), CandidateDescriptorVersionCheckError> { let version = self.version(); // Version consistency: old and new detection must agree, unless this is the @@ -2268,8 +2278,9 @@ impl> CandidateDescriptorV2 { ) -> Option { match self.version_for_candidate_validation(v3_ever_seen) { CandidateDescriptorVersion::V1 | CandidateDescriptorVersion::Unknown => None, - CandidateDescriptorVersion::V2 | CandidateDescriptorVersion::V3 => - Some(self.session_index), + CandidateDescriptorVersion::V2 | CandidateDescriptorVersion::V3 => { + Some(self.session_index) + }, } } } @@ -3339,7 +3350,10 @@ pub mod tests { let desc = make_v3_descriptor(); assert_eq!(desc.version(), CandidateDescriptorVersion::V3); - assert_eq!(desc.check_version_acceptance(false), Err(CandidateDescriptorVersionCheckError::Inconsistency)); + assert_eq!( + desc.check_version_acceptance(false), + Err(CandidateDescriptorVersionCheckError::Inconsistency) + ); } #[test] @@ -3354,8 +3368,14 @@ pub mod tests { assert!(!desc.check_version_consistency()); // Rejected regardless of v3_enabled. - assert_eq!(desc.check_version_acceptance(false), Err(CandidateDescriptorVersionCheckError::Inconsistency)); - assert_eq!(desc.check_version_acceptance(true), Err(CandidateDescriptorVersionCheckError::Inconsistency)); + assert_eq!( + desc.check_version_acceptance(false), + Err(CandidateDescriptorVersionCheckError::Inconsistency) + ); + assert_eq!( + desc.check_version_acceptance(true), + Err(CandidateDescriptorVersionCheckError::Inconsistency) + ); } #[test] @@ -3415,7 +3435,13 @@ pub mod tests { assert_eq!(desc.version_old_rules(), CandidateDescriptorVersion::V1); assert!(!desc.check_version_consistency()); - assert_eq!(desc.check_version_acceptance(false), Err(CandidateDescriptorVersionCheckError::Inconsistency)); - assert_eq!(desc.check_version_acceptance(true), Err(CandidateDescriptorVersionCheckError::Inconsistency)); + assert_eq!( + desc.check_version_acceptance(false), + Err(CandidateDescriptorVersionCheckError::Inconsistency) + ); + assert_eq!( + desc.check_version_acceptance(true), + Err(CandidateDescriptorVersionCheckError::Inconsistency) + ); } } From e675d847d078f6a4a6ad0811fb75e02d44bf84dd Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 19 Mar 2026 13:21:36 +0100 Subject: [PATCH 38/52] Bring back manual Display instance --- polkadot/primitives/src/v9/mod.rs | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/polkadot/primitives/src/v9/mod.rs b/polkadot/primitives/src/v9/mod.rs index d56cf04b2a1cb..c61b98b5e7254 100644 --- a/polkadot/primitives/src/v9/mod.rs +++ b/polkadot/primitives/src/v9/mod.rs @@ -1858,20 +1858,27 @@ pub enum CandidateDescriptorVersion { /// Error returned by [`CandidateDescriptorV2::check_version_acceptance`]. #[derive(Debug, Copy, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "std", derive(thiserror::Error))] pub enum CandidateDescriptorVersionCheckError { /// Old-style and new-style version detection disagree, and this is not the /// expected V3 disagreement (old rules → V1, new rules → V3) with V3 enabled. - #[cfg_attr( - feature = "std", - error("Descriptor version detection inconsistency (old vs new rules disagree)") - )] Inconsistency, /// The descriptor is V3 but the V3 feature is not enabled. - #[cfg_attr(feature = "std", error("V3 candidate descriptor but V3 feature not enabled"))] V3NotEnabled, } +// Manual Display impl required because this type is used in `no_std` runtime +// code (paras_inherent) where thiserror::Error is not available. +impl core::fmt::Display for CandidateDescriptorVersionCheckError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + Self::Inconsistency => + write!(f, "Descriptor version detection inconsistency (old vs new rules disagree)"), + Self::V3NotEnabled => + write!(f, "V3 candidate descriptor but V3 feature not enabled"), + } + } +} + /// A unique descriptor of the candidate receipt. #[derive(PartialEq, Eq, Clone, Encode, Decode, DecodeWithMemTracking, TypeInfo)] pub struct CandidateDescriptorV2 { From 3f4242edbdc80b7211a2043d5b709230760f8ec6 Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 19 Mar 2026 19:31:52 +0100 Subject: [PATCH 39/52] Typo + relay parent for pvd. --- .../collator-protocol/src/validator_side/mod.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 2ee718dadb664..340a03730a748 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -2005,7 +2005,7 @@ where if let Entry::Vacant(entry) = state.per_scheduling_parent.entry(*block_hash) { // Safe to use the same session index for the allowed scheduling parents as well // since they must be in the same session. - if let Some(pers_scheduling_parent) = construct_per_scheduling_parent( + if let Some(per_scheduling_parent) = construct_per_scheduling_parent( sender, &mut state.assigned_cores, keystore, @@ -2014,7 +2014,7 @@ where ) .await? { - entry.insert(pers_scheduling_parent); + entry.insert(per_scheduling_parent); } } } @@ -2690,9 +2690,13 @@ async fn kick_off_seconding( ) { (CollationVersion::V2, Some(ProspectiveCandidate { parent_head_data_hash, .. })) | (CollationVersion::V3, Some(ProspectiveCandidate { parent_head_data_hash, .. })) => { + // PVD contains relay_parent_number and relay_parent_storage_root, so + // we must pass the actual relay_parent (execution context), not the + // scheduling_parent. For V1/V2 these are identical; for V3 the + // relay_parent may be older. let pvd = request_prospective_validation_data( ctx.sender(), - scheduling_parent, + candidate_receipt.descriptor().relay_parent(), parent_head_data_hash, para_id, maybe_parent_head_data.clone(), From 0d8d9b1e62a292b4a0d8eec8523abb294b8845cd Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 19 Mar 2026 19:39:17 +0100 Subject: [PATCH 40/52] Fix tests accordingly --- .../collator-protocol/src/validator_side/tests/mod.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs index 5753e3d08446e..db2804a4b22ce 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -284,9 +284,13 @@ async fn overseer_signal(overseer: &mut VirtualOverseer, signal: OverseerSignal) } /// Assert that the next message is a `CandidateBacking(Second())`. +/// +/// `expected_relay_parent` is the relay parent used for the PVD request (execution +/// context). For V1/V2 this equals the scheduling parent; for V3 it may differ. async fn assert_candidate_backing_second( virtual_overseer: &mut VirtualOverseer, expected_scheduling_parent: Hash, + expected_relay_parent: Hash, expected_para_id: ParaId, expected_pov: &PoV, version: CollationVersion, @@ -303,7 +307,7 @@ async fn assert_candidate_backing_second( hash, RuntimeApiRequest::PersistedValidationData(para_id, assumption, tx), )) => { - assert_eq!(expected_scheduling_parent, hash); + assert_eq!(expected_relay_parent, hash); assert_eq!(expected_para_id, para_id); assert_eq!(OccupiedCoreAssumption::Free, assumption); tx.send(Ok(Some(pvd.clone()))).unwrap(); @@ -314,7 +318,7 @@ async fn assert_candidate_backing_second( AllMessages::ProspectiveParachains( ProspectiveParachainsMessage::GetProspectiveValidationData(request, tx), ) => { - assert_eq!(expected_scheduling_parent, request.candidate_relay_parent); + assert_eq!(expected_relay_parent, request.candidate_relay_parent); assert_eq!(expected_para_id, request.para_id); tx.send(Some(pvd.clone())).unwrap(); } From 831a06059165d963968ac9f4c8767d6ddda682fd Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 19 Mar 2026 19:52:48 +0100 Subject: [PATCH 41/52] Fix tests for using relay parent in pvd. --- .../collator-protocol/src/validator_side/tests/mod.rs | 4 ++++ .../src/validator_side/tests/prospective_parachains.rs | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs index db2804a4b22ce..11a22cedbc6c4 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -615,6 +615,7 @@ fn fetch_one_collation_at_a_time_for_v1_advertisement() { assert_candidate_backing_second( &mut virtual_overseer, test_state.relay_parent, + test_state.relay_parent, test_state.chain_ids[0], &pov, CollationVersion::V1, @@ -740,6 +741,7 @@ fn fetches_next_collation() { assert_candidate_backing_second( &mut virtual_overseer, second, + second, test_state.chain_ids[0], &pov, CollationVersion::V1, @@ -851,6 +853,7 @@ fn fetch_next_collation_on_invalid_collation() { let receipt = assert_candidate_backing_second( &mut virtual_overseer, relay_parent, + relay_parent, test_state.chain_ids[0], &pov, CollationVersion::V1, @@ -1299,6 +1302,7 @@ fn peer_disconnect_clears_pending_collations_from_waiting_queue() { assert_candidate_backing_second( &mut virtual_overseer, relay_parent, + relay_parent, test_state.chain_ids[0], &pov, CollationVersion::V1, diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index e1976cd9489ed..27e4f27895654 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -544,6 +544,7 @@ async fn send_collation_and_assert_processing( assert_candidate_backing_second( virtual_overseer, relay_parent, + relay_parent, expected_para_id, &pov, CollationVersion::V2, @@ -625,6 +626,7 @@ fn v1_advertisement_accepted_and_seconded() { assert_candidate_backing_second( &mut virtual_overseer, head_b, + head_b, test_state.chain_ids[0], &pov, CollationVersion::V1, @@ -1974,6 +1976,7 @@ fn v3_descriptor(#[case] crafted_unknown: bool, #[case] collation_version: Colla assert_candidate_backing_second( &mut virtual_overseer, head_b, + head_b, test_state.chain_ids[0], &pov, CollationVersion::V3, @@ -2176,6 +2179,7 @@ fn v3_scheduling_parent_in_progress_slot_accepts_leaf_parent() { assert_candidate_backing_second( &mut virtual_overseer, head_b_parent, + head_b_grandparent, test_state.chain_ids[0], &pov, CollationVersion::V3, @@ -2301,6 +2305,7 @@ fn v3_scheduling_parent_finished_slot_accepts_leaf() { assert_candidate_backing_second( &mut virtual_overseer, head_b, + head_b_parent, test_state.chain_ids[0], &pov, CollationVersion::V3, From eb254fa699662eb746a3b2747c498eeef6dbea87 Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 19 Mar 2026 19:53:06 +0100 Subject: [PATCH 42/52] Scheduling session needs to be the current session not the relay parent sesssion! --- .../collator-protocol/src/validator_side/mod.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 340a03730a748..6bf05a06b3f9a 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -3108,10 +3108,14 @@ pub fn descriptor_version_sanity_check_with_params( } } - if let Some(session_index) = descriptor.session_index() { - if session_index != expected_session { + // For V2: session_index() == scheduling_session() (relay_parent == scheduling_parent). + // For V3: session_index() is the relay_parent's session which may differ from the + // scheduling session when the relay_parent is from a previous session. Check the + // scheduling_session instead, which must always match the current session. + if let Some(scheduling_session) = descriptor.scheduling_session() { + if scheduling_session != expected_session { return Err(SecondingError::InvalidSessionIndex( - session_index, + scheduling_session, expected_session, )); } From 71bb2e8648c4faf1927db8c3d97e1f1145a8a551 Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 19 Mar 2026 19:56:02 +0100 Subject: [PATCH 43/52] Moaaar tests! --- .../tests/prospective_parachains.rs | 95 +++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index 27e4f27895654..74a570afdb046 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -26,6 +26,7 @@ use polkadot_primitives::{ }; use polkadot_primitives_test_helpers::{ dummy_committed_candidate_receipt_v2, dummy_committed_candidate_receipt_v3, + make_valid_candidate_descriptor_v3, }; use rstest::rstest; use sp_consensus_babe::digests::{CompatibleDigestItem, PreDigest, SecondaryPlainPreDigest}; @@ -4162,3 +4163,97 @@ mod ah_stop_gap { ); } } + +/// Verify that `descriptor_version_sanity_check_with_params` checks the +/// scheduling session (not the relay-parent session) for V3 descriptors +/// where the two sessions differ (cross-session relay parent). +#[test] +fn v3_sanity_check_uses_scheduling_session_not_relay_parent_session() { + let relay_parent = Hash::repeat_byte(1); + let scheduling_parent = Hash::repeat_byte(2); + + let relay_parent_session: SessionIndex = 4; + let scheduling_session_offset: u8 = 1; + // scheduling_session = relay_parent_session + offset = 5 + let scheduling_session = relay_parent_session + scheduling_session_offset as SessionIndex; + + let core = CoreIndex(0); + + let mut descriptor = make_valid_candidate_descriptor_v3( + 1.into(), + relay_parent, + core, + relay_parent_session, + Hash::zero(), + Hash::zero(), + Hash::zero(), + Hash::zero(), + Hash::zero(), + scheduling_parent, + ); + descriptor.set_scheduling_session_offset(scheduling_session_offset); + + // Sanity: verify the descriptor is V3 and sessions are as expected. + assert_eq!(descriptor.version(), CandidateDescriptorVersion::V3); + assert_eq!(descriptor.session_index(), Some(relay_parent_session)); + assert_eq!(descriptor.scheduling_session(), Some(scheduling_session)); + + // The check must pass when expected_session matches the scheduling session. + assert!(descriptor_version_sanity_check_with_params( + &descriptor, + core, + scheduling_session, + CollationVersion::V3, + ) + .is_ok()); + + // The check must fail when expected_session is the relay-parent session + // (which differs from the scheduling session for cross-session V3 candidates). + assert_matches!( + descriptor_version_sanity_check_with_params( + &descriptor, + core, + relay_parent_session, + CollationVersion::V3, + ), + Err(SecondingError::InvalidSessionIndex(got, expected)) => { + assert_eq!(got, scheduling_session); + assert_eq!(expected, relay_parent_session); + } + ); +} + +/// Verify that V2 descriptors still check session_index correctly (V2 has no +/// scheduling_session_offset, so session_index == scheduling_session). +#[test] +fn v2_sanity_check_session_index_unchanged() { + let relay_parent = Hash::repeat_byte(1); + let core = CoreIndex(0); + let session: SessionIndex = 5; + + let mut descriptor = dummy_committed_candidate_receipt_v2(relay_parent); + descriptor.descriptor.set_core_index(core); + descriptor.descriptor.set_session_index(session); + + assert_eq!(descriptor.descriptor.version(), CandidateDescriptorVersion::V2); + + // Passes with matching session. + assert!(descriptor_version_sanity_check_with_params( + &descriptor.descriptor, + core, + session, + CollationVersion::V2, + ) + .is_ok()); + + // Fails with wrong session. + assert_matches!( + descriptor_version_sanity_check_with_params( + &descriptor.descriptor, + core, + session + 1, + CollationVersion::V2, + ), + Err(SecondingError::InvalidSessionIndex(..)) + ); +} From 841b8f552f41b599f720078a9b156f505d4fa389 Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 19 Mar 2026 20:34:02 +0100 Subject: [PATCH 44/52] Remove dead code + fmt --- .../core/candidate-validation/src/tests.rs | 52 ------------------- polkadot/primitives/src/v9/mod.rs | 8 +-- 2 files changed, 4 insertions(+), 56 deletions(-) diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 61ce533d6607b..9c499af7319e9 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -2552,58 +2552,6 @@ async fn mock_fetch_bomb_limit_v2( ); } -/// Helper: respond to the runtime API calls made during backing pre-validation -/// (after bomb limit): SessionIndexForChild, AllowedRelayParentInfo, ClaimQueue. -async fn mock_backing_pre_validation( - ctx_handle: &mut TestSubsystemContextHandle, - expected_scheduling_parent: Hash, - scheduling_session_response: SessionIndex, - relay_parent_info_response: Option, - claim_queue: BTreeMap>, -) { - // get_session_index → SessionIndexForChild - assert_matches!( - ctx_handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - parent, - RuntimeApiRequest::SessionIndexForChild(tx), - )) => { - assert_eq!(parent, expected_scheduling_parent); - let _ = tx.send(Ok(scheduling_session_response)); - } - ); - - // check_relay_parent_in_session → AllowedRelayParentInfo - if let Some(valid) = relay_parent_info_response { - assert_matches!( - ctx_handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - parent, - RuntimeApiRequest::AllowedRelayParentInfo(_session, _relay_parent, tx), - )) => { - assert_eq!(parent, expected_scheduling_parent); - if valid { - let _ = tx.send(Ok(Some(Default::default()))); - } else { - let _ = tx.send(Ok(None)); - } - } - ); - } - - // claim_queue → ClaimQueue - assert_matches!( - ctx_handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - parent, - RuntimeApiRequest::ClaimQueue(tx), - )) => { - assert_eq!(parent, expected_scheduling_parent); - let cq = claim_queue.into_iter().map(|(k, v)| (k, v.into())).collect(); - let _ = tx.send(Ok(cq)); - } - ); -} /// Scheduling session check: backing rejects when the descriptor's session /// doesn't match the runtime; approval/dispute skips the check entirely. diff --git a/polkadot/primitives/src/v9/mod.rs b/polkadot/primitives/src/v9/mod.rs index c61b98b5e7254..10be302579675 100644 --- a/polkadot/primitives/src/v9/mod.rs +++ b/polkadot/primitives/src/v9/mod.rs @@ -1871,10 +1871,10 @@ pub enum CandidateDescriptorVersionCheckError { impl core::fmt::Display for CandidateDescriptorVersionCheckError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { - Self::Inconsistency => - write!(f, "Descriptor version detection inconsistency (old vs new rules disagree)"), - Self::V3NotEnabled => - write!(f, "V3 candidate descriptor but V3 feature not enabled"), + Self::Inconsistency => { + write!(f, "Descriptor version detection inconsistency (old vs new rules disagree)") + }, + Self::V3NotEnabled => write!(f, "V3 candidate descriptor but V3 feature not enabled"), } } } From d1714bc654a13270f53c55a58f2069da93cdd973 Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 19 Mar 2026 20:45:03 +0100 Subject: [PATCH 45/52] more fmt --- polkadot/node/core/candidate-validation/src/tests.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 9c499af7319e9..56c6a2d5bb90d 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -2552,7 +2552,6 @@ async fn mock_fetch_bomb_limit_v2( ); } - /// Scheduling session check: backing rejects when the descriptor's session /// doesn't match the runtime; approval/dispute skips the check entirely. /// From 0dc5e50edd2498e12bcd40f8e3f178d165f5a0c2 Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 19 Mar 2026 21:15:36 +0100 Subject: [PATCH 46/52] Fix prdoc --- prdoc/pr_11290.prdoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/prdoc/pr_11290.prdoc b/prdoc/pr_11290.prdoc index 32b1aa99460be..654f848f17e2f 100644 --- a/prdoc/pr_11290.prdoc +++ b/prdoc/pr_11290.prdoc @@ -69,6 +69,8 @@ crates: bump: major - name: polkadot-dispute-distribution bump: major + - name: polkadot-node-primitives + bump: major - name: polkadot-node-collation-generation bump: patch - name: polkadot-runtime-parachains From 62a031c089e227f4385677f2ac90cb9176b842cb Mon Sep 17 00:00:00 2001 From: eskimor Date: Fri, 20 Mar 2026 09:31:13 +0100 Subject: [PATCH 47/52] Fix relay parent session check. --- .../node/core/candidate-validation/src/lib.rs | 84 +++------------- .../core/candidate-validation/src/tests.rs | 98 ++++--------------- polkadot/node/core/runtime-api/src/cache.rs | 14 +-- polkadot/node/core/runtime-api/src/lib.rs | 18 ++-- polkadot/node/core/runtime-api/src/tests.rs | 2 +- polkadot/node/subsystem-types/src/messages.rs | 11 ++- .../subsystem-types/src/runtime_client.rs | 9 +- polkadot/node/subsystem-util/src/lib.rs | 63 ++++++++++++ polkadot/primitives/src/runtime_api.rs | 14 ++- .../src/runtime_api_impl/vstaging.rs | 8 +- polkadot/runtime/rococo/src/lib.rs | 4 +- polkadot/runtime/test-runtime/src/lib.rs | 4 +- polkadot/runtime/westend/src/lib.rs | 4 +- 13 files changed, 142 insertions(+), 191 deletions(-) diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index dd66c8c0b2512..05d463a13f844 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -242,7 +242,7 @@ enum PreValidationError { /// - Basic checks: PoV size, PoV hash, validation code hash /// - Backing-only (skipped for approval/dispute): /// - Scheduling session matches runtime -/// - Relay parent valid in claimed session (v16+ `AllowedRelayParentInfo` API) +/// - Relay parent valid in claimed session (via `check_relay_parent_info` utility) /// - Claim queue fetch /// /// Backing-only checks are skipped for approval/dispute because the runtime @@ -299,30 +299,33 @@ where } } - // Verify relay parent is valid in the claimed session (v16+ API). + // Verify relay parent is valid in the claimed session. + // Uses the node-side utility which handles both the self-query case + // (scheduling_parent == relay_parent, V2) and ancestor queries (V3). if let Some(session_index) = candidate_receipt .descriptor .session_index_for_candidate_validation(v3_ever_seen) { - match check_relay_parent_in_session( + let relay_parent = candidate_receipt.descriptor.relay_parent(); + match util::check_relay_parent_info( sender, scheduling_parent, session_index, - candidate_receipt.descriptor.relay_parent(), + relay_parent, ) .await { - Ok(()) => {}, + util::CheckRelayParentInfoResult::Valid => {}, // Safe to skip: on old runtimes cross-session relay parents don't // exist, and the scheduling session check above already covers the // relay parent session (scheduling_parent == relay_parent). - Err(CheckRelayParentSessionError::NotSupported) => {}, - Err(CheckRelayParentSessionError::NotFound) => { + util::CheckRelayParentInfoResult::NotSupported => {}, + util::CheckRelayParentInfoResult::NotFound => { return Err(PreValidationError::Invalid( InvalidCandidate::InvalidRelayParentSession, )) }, - Err(CheckRelayParentSessionError::RuntimeError(err)) => { + util::CheckRelayParentInfoResult::RuntimeError(err) => { return Err(PreValidationError::RuntimeError(err)) }, } @@ -705,71 +708,6 @@ where Some(session_index) } -enum CheckRelayParentSessionError { - /// The `AllowedRelayParentInfo` runtime API (v16+) is not supported. - NotSupported, - /// The relay parent was not found in the claimed session. - NotFound, - /// An unexpected runtime API error occurred. - RuntimeError(String), -} - -/// Check that the relay parent is known to the runtime in the claimed session. -/// -/// Uses the `AllowedRelayParentInfo` runtime API (v16+) called at some -/// `recent_block` (recent enough to have state available). We cannot query -/// state at the relay parent directly because it may be old and pruned. -async fn check_relay_parent_in_session( - sender: &mut Sender, - recent_block: Hash, - claimed_session: SessionIndex, - relay_parent: Hash, -) -> Result<(), CheckRelayParentSessionError> -where - Sender: SubsystemSender, -{ - let rx = util::request_from_runtime(recent_block, sender, |tx| { - RuntimeApiRequest::AllowedRelayParentInfo(claimed_session, relay_parent, tx) - }) - .await; - - match rx.await { - Ok(Ok(Some(_))) => Ok(()), - Ok(Ok(None)) => Err(CheckRelayParentSessionError::NotFound), - Ok(Err(RuntimeApiError::NotSupported { .. })) => { - gum::debug!( - target: LOG_TARGET, - ?recent_block, - "AllowedRelayParentInfo API not supported", - ); - Err(CheckRelayParentSessionError::NotSupported) - }, - Ok(Err(err)) => { - gum::warn!( - target: LOG_TARGET, - ?recent_block, - ?relay_parent, - ?err, - "Error calling AllowedRelayParentInfo runtime API", - ); - Err(CheckRelayParentSessionError::RuntimeError(format!( - "AllowedRelayParentInfo runtime API error: {err}" - ))) - }, - Err(_) => { - gum::warn!( - target: LOG_TARGET, - ?recent_block, - ?relay_parent, - "AllowedRelayParentInfo request cancelled", - ); - Err(CheckRelayParentSessionError::RuntimeError( - "AllowedRelayParentInfo request cancelled".into(), - )) - }, - } -} - // Returns true if the node is an authority in the next session. async fn check_next_session_authority( sender: &mut Sender, diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 56c6a2d5bb90d..7786ce6f28045 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -2873,12 +2873,12 @@ fn pre_validation_basic_checks() { } } -/// Relay parent session check via AllowedRelayParentInfo (v16+ API): backing -/// rejects when the relay parent is not found in the claimed session. +/// Relay parent session check: for V2 candidates (scheduling_parent == relay_parent), +/// the `check_relay_parent_info` utility takes the self-query path, verifying the +/// session via `session_index_for_child` directly. /// -/// Also verifies that `NotSupported` (old runtime) is safely skipped — the -/// scheduling session check covers it on old runtimes where relay_parent == -/// scheduling_parent. +/// Case 1: Session mismatch → InvalidRelayParentSession. +/// Case 2: Session matches → valid, proceeds to PVF execution. #[test] fn pre_validation_relay_parent_session_check() { let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; @@ -2887,7 +2887,7 @@ fn pre_validation_relay_parent_session_check() { let validation_code = ValidationCode(vec![2; 16]); let scheduling_parent = dummy_hash(); - // V2 descriptor with correct session_index=1. + // V2 descriptor with session_index=1. let descriptor = make_valid_candidate_descriptor_v2( ParaId::from(1_u32), scheduling_parent, @@ -2918,7 +2918,9 @@ fn pre_validation_relay_parent_session_check() { }; let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; - // Case 1: AllowedRelayParentInfo returns None → InvalidRelayParentSession + // Case 1: Self-query session mismatch → InvalidRelayParentSession. + // The utility calls session_index_for_child which returns 99 (doesn't match + // descriptor's session_index=1). { let pool = TaskExecutor::new(); let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); @@ -2945,19 +2947,20 @@ fn pre_validation_relay_parent_session_check() { let test_fut = async move { mock_fetch_bomb_limit_v2(&mut ctx_handle, scheduling_parent, 1).await; - // SessionIndexForChild: scheduling session matches. + // Scheduling session check: matches (session=1). assert_matches!( ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, RuntimeApiRequest::SessionIndexForChild(tx), )) => { let _ = tx.send(Ok(1)); } ); - // AllowedRelayParentInfo: relay parent NOT found. + // check_relay_parent_info self-query: session_index_for_child returns 99 + // (mismatch with descriptor's session_index=1). assert_matches!( ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, RuntimeApiRequest::AllowedRelayParentInfo(_, _, tx), - )) => { let _ = tx.send(Ok(None)); } + _, RuntimeApiRequest::SessionIndexForChild(tx), + )) => { let _ = tx.send(Ok(99)); } ); }; @@ -2969,7 +2972,7 @@ fn pre_validation_relay_parent_session_check() { ); } - // Case 2: AllowedRelayParentInfo not supported → skipped, proceeds to valid. + // Case 2: Self-query session matches → valid, proceeds to PVF execution. { let pool = TaskExecutor::new(); let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); @@ -2996,86 +2999,21 @@ fn pre_validation_relay_parent_session_check() { let test_fut = async move { mock_fetch_bomb_limit_v2(&mut ctx_handle, scheduling_parent, 1).await; - // SessionIndexForChild: matches. + // Scheduling session check: matches. assert_matches!( ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, RuntimeApiRequest::SessionIndexForChild(tx), )) => { let _ = tx.send(Ok(1)); } ); - // AllowedRelayParentInfo: not supported → skipped. - assert_matches!( - ctx_handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, RuntimeApiRequest::AllowedRelayParentInfo(_, _, tx), - )) => { - let _ = tx.send(Err(RuntimeApiError::NotSupported { - runtime_api_name: "AllowedRelayParentInfo", - })); - } - ); - // ClaimQueue: proceeds normally. - assert_matches!( - ctx_handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, RuntimeApiRequest::ClaimQueue(tx), - )) => { - let mut cq = BTreeMap::new(); - let _ = cq.insert(CoreIndex(1), vec![ParaId::from(1_u32)].into()); - let _ = tx.send(Ok(cq)); - } - ); - }; - - executor::block_on(future::join(test_fut, task)); - - assert_matches!( - executor::block_on(response_rx).unwrap(), - Ok(ValidationResult::Valid(_, _)) - ); - } - - // Case 3: AllowedRelayParentInfo returns Some → valid, proceeds. - { - let pool = TaskExecutor::new(); - let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); - let mock_backend = - MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())); - - let (response_tx, response_rx) = oneshot::channel(); - - let task = handle_validation_message( - ctx.sender().clone(), - mock_backend, - Metrics::default(), - false, - CandidateValidationMessage::ValidateFromExhaustive { - validation_data: validation_data.clone(), - validation_code: validation_code.clone(), - candidate_receipt: candidate_receipt.clone(), - pov: Arc::new(pov.clone()), - executor_params: ExecutorParams::default(), - exec_kind: PvfExecKind::Backing(dummy_hash()), - response_sender: response_tx, - }, - ); - - let test_fut = async move { - mock_fetch_bomb_limit_v2(&mut ctx_handle, scheduling_parent, 1).await; + // check_relay_parent_info self-query: session matches (session=1). assert_matches!( ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, RuntimeApiRequest::SessionIndexForChild(tx), )) => { let _ = tx.send(Ok(1)); } ); - // AllowedRelayParentInfo: found → valid. - assert_matches!( - ctx_handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, RuntimeApiRequest::AllowedRelayParentInfo(_, _, tx), - )) => { let _ = tx.send(Ok(Some(Default::default()))); } - ); - // ClaimQueue. + // ClaimQueue: proceeds normally. assert_matches!( ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index bc77a4bc7f890..6be030137d07e 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -82,7 +82,7 @@ pub(crate) struct RequestResultCache { validation_code_bomb_limits: LruMap, para_ids: LruMap>, max_relay_parent_session_age: LruMap, - allowed_relay_parent_info: + ancestor_relay_parent_info: LruMap<(Hash, SessionIndex, Hash), Option>>, } @@ -127,7 +127,7 @@ impl Default for RequestResultCache { validation_code_bomb_limits: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), para_ids: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), max_relay_parent_session_age: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), - allowed_relay_parent_info: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), + ancestor_relay_parent_info: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), } } } @@ -656,25 +656,25 @@ impl RequestResultCache { .insert(session_index, max_relay_parent_session_age); } - pub(crate) fn allowed_relay_parent_info( + pub(crate) fn ancestor_relay_parent_info( &mut self, relay_parent: Hash, session_index: SessionIndex, queried_relay_parent: Hash, ) -> Option<&Option>> { - self.allowed_relay_parent_info + self.ancestor_relay_parent_info .get(&(relay_parent, session_index, queried_relay_parent)) .map(|v| &*v) } - pub(crate) fn cache_allowed_relay_parent_info( + pub(crate) fn cache_ancestor_relay_parent_info( &mut self, relay_parent: Hash, session_index: SessionIndex, queried_relay_parent: Hash, value: Option>, ) { - self.allowed_relay_parent_info + self.ancestor_relay_parent_info .insert((relay_parent, session_index, queried_relay_parent), value); } } @@ -732,7 +732,7 @@ pub(crate) enum RequestResult { ValidationCodeBombLimit(SessionIndex, u32), ParaIds(SessionIndex, Vec), MaxRelayParentSessionAge(SessionIndex, u32), - AllowedRelayParentInfo(Hash, SessionIndex, Hash, Option>), + AncestorRelayParentInfo(Hash, SessionIndex, Hash, Option>), UnappliedSlashesV2(Hash, Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>), } diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index 0ae142b54b1ae..7952f40bd2e19 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -221,8 +221,8 @@ where MaxRelayParentSessionAge(session_index, max_relay_parent_session_age) => self .requests_cache .cache_max_relay_parent_session_age(session_index, max_relay_parent_session_age), - AllowedRelayParentInfo(relay_parent, session_index, queried_relay_parent, info) => { - self.requests_cache.cache_allowed_relay_parent_info( + AncestorRelayParentInfo(relay_parent, session_index, queried_relay_parent, info) => { + self.requests_cache.cache_ancestor_relay_parent_info( relay_parent, session_index, queried_relay_parent, @@ -455,8 +455,8 @@ where Some(Request::MaxRelayParentSessionAge(index, sender)) } }, - Request::AllowedRelayParentInfo(session_index, queried_relay_parent, sender) => { - if let Some(value) = self.requests_cache.allowed_relay_parent_info( + Request::AncestorRelayParentInfo(session_index, queried_relay_parent, sender) => { + if let Some(value) = self.requests_cache.ancestor_relay_parent_info( relay_parent, session_index, queried_relay_parent, @@ -465,7 +465,7 @@ where let _ = sender.send(Ok(value.clone())); None } else { - Some(Request::AllowedRelayParentInfo( + Some(Request::AncestorRelayParentInfo( session_index, queried_relay_parent, sender, @@ -820,10 +820,10 @@ where sender, result = (index) ), - Request::AllowedRelayParentInfo(session_index, queried_relay_parent, sender) => query!( - AllowedRelayParentInfo, - allowed_relay_parent_info(session_index, queried_relay_parent), - ver = Request::ALLOWED_RELAY_PARENT_INFO_RUNTIME_REQUIREMENT, + Request::AncestorRelayParentInfo(session_index, queried_relay_parent, sender) => query!( + AncestorRelayParentInfo, + ancestor_relay_parent_info(session_index, queried_relay_parent), + ver = Request::ANCESTOR_RELAY_PARENT_INFO_RUNTIME_REQUIREMENT, sender, result = (relay_parent, session_index, queried_relay_parent) ), diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs index 587900981090b..98453a087fdce 100644 --- a/polkadot/node/core/runtime-api/src/tests.rs +++ b/polkadot/node/core/runtime-api/src/tests.rs @@ -336,7 +336,7 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { todo!("Not required for tests") } - async fn allowed_relay_parent_info( + async fn ancestor_relay_parent_info( &self, _: Hash, _: SessionIndex, diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index af75aaf9e0ebf..6664bf0334654 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -838,9 +838,10 @@ pub enum RuntimeApiRequest { /// Get the maximum relay parent session age allowed for parachain blocks. /// `V16` MaxRelayParentSessionAge(SessionIndex, RuntimeApiSender), - /// Get the relay parent info (block number and state root) for a given session and relay - /// parent hash. `V16` - AllowedRelayParentInfo( + /// Look up relay parent info for an **ancestor** block. A block is not in its + /// own `AllowedRelayParents`, so querying a block about itself returns `None`. + /// Use the node-side `check_relay_parent_info` utility for the general case. `V16` + AncestorRelayParentInfo( SessionIndex, Hash, RuntimeApiSender>>, @@ -904,8 +905,8 @@ impl RuntimeApiRequest { /// `MaxRelayParentSessionAge` pub const MAX_RELAY_PARENT_SESSION_AGE_RUNTIME_REQUIREMENT: u32 = 16; - /// `AllowedRelayParentInfo` - pub const ALLOWED_RELAY_PARENT_INFO_RUNTIME_REQUIREMENT: u32 = 16; + /// `AncestorRelayParentInfo` + pub const ANCESTOR_RELAY_PARENT_INFO_RUNTIME_REQUIREMENT: u32 = 16; } /// A message to the Runtime API subsystem. diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index f10606cb8781b..631124b0de925 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -371,8 +371,9 @@ pub trait RuntimeApiSubsystemClient { /// Fetch the maximum relay parent session age allowed for parachain blocks. async fn max_relay_parent_session_age(&self, at: Hash) -> Result; - /// Fetch relay parent info for a given session index and relay parent hash. - async fn allowed_relay_parent_info( + /// Look up relay parent info for an **ancestor** block. A block is not in its + /// own `AllowedRelayParents`, so querying a block about itself returns `None`. + async fn ancestor_relay_parent_info( &self, at: Hash, session_index: SessionIndex, @@ -687,7 +688,7 @@ where self.client.runtime_api().max_relay_parent_session_age(at) } - async fn allowed_relay_parent_info( + async fn ancestor_relay_parent_info( &self, at: Hash, session_index: SessionIndex, @@ -695,7 +696,7 @@ where ) -> Result>, ApiError> { self.client .runtime_api() - .allowed_relay_parent_info(at, session_index, relay_parent) + .ancestor_relay_parent_info(at, session_index, relay_parent) } } diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs index d02df378279b7..bb9536fc0ea2c 100644 --- a/polkadot/node/subsystem-util/src/lib.rs +++ b/polkadot/node/subsystem-util/src/lib.rs @@ -321,6 +321,69 @@ specialize_requests! { } +/// Result of [`check_relay_parent_info`]. +pub enum CheckRelayParentInfoResult { + /// The relay parent is valid in the given session. + Valid, + /// The relay parent was not found in the given session (or session mismatch + /// in the self-query case). + NotFound, + /// The `ancestor_relay_parent_info` runtime API is not supported. Safe to + /// skip on old runtimes where cross-session relay parents don't exist. + NotSupported, + /// A runtime API or communication error occurred. + RuntimeError(String), +} + +/// Check whether a relay parent is valid in a given session. +/// +/// Works for all blocks within the `max_relay_parent_session_age` window, +/// including the block being queried at (the "self" case where +/// `query_at == relay_parent`). The `ancestor_relay_parent_info` runtime API +/// only works for ancestors (a block is not in its own `AllowedRelayParents`). +/// This utility handles the self case by verifying the session directly via +/// `session_index_for_child`. +pub async fn check_relay_parent_info( + sender: &mut impl overseer::SubsystemSender, + query_at: Hash, + session_index: SessionIndex, + relay_parent: Hash, +) -> CheckRelayParentInfoResult { + if query_at == relay_parent { + // Self-query: the runtime API can't answer (block not in its own + // AllowedRelayParents). Verify the session directly. + return match request_session_index_for_child(relay_parent, sender).await.await { + Ok(Ok(session)) if session == session_index => CheckRelayParentInfoResult::Valid, + Ok(Ok(_)) => CheckRelayParentInfoResult::NotFound, + Ok(Err(err)) => CheckRelayParentInfoResult::RuntimeError(format!( + "SessionIndexForChild error: {err}" + )), + Err(_) => CheckRelayParentInfoResult::RuntimeError( + "SessionIndexForChild request cancelled".into(), + ), + }; + } + + // Ancestor query: use the runtime API. + match request_from_runtime(query_at, sender, |tx| { + RuntimeApiRequest::AncestorRelayParentInfo(session_index, relay_parent, tx) + }) + .await + .await + { + Ok(Ok(Some(_))) => CheckRelayParentInfoResult::Valid, + Ok(Ok(None)) => CheckRelayParentInfoResult::NotFound, + Ok(Err(RuntimeApiError::NotSupported { .. })) => + CheckRelayParentInfoResult::NotSupported, + Ok(Err(err)) => CheckRelayParentInfoResult::RuntimeError(format!( + "AncestorRelayParentInfo error: {err}" + )), + Err(_) => CheckRelayParentInfoResult::RuntimeError( + "AncestorRelayParentInfo request cancelled".into(), + ), + } +} + /// Requests executor parameters from the runtime effective at given relay-parent. First obtains /// session index at the relay-parent, relying on the fact that it should be cached by the runtime /// API caching layer even if the block itself has already been pruned. Then requests executor diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index a613560b49f6b..11f0028502bbd 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -329,11 +329,17 @@ sp_api::decl_runtime_apis! { #[api_version(16)] fn max_relay_parent_session_age() -> u32; - /// Retrieve the relay parent info (block number and state root) for a given - /// session index and relay parent hash. Returns `None` if the relay parent - /// is not found in the allowed relay parents for that session. + /// Look up relay parent info for a block that is an **ancestor** of the block + /// this API is called at. Returns `None` if the relay parent is not found + /// in the allowed relay parents for the given session. + /// + /// NOTE: A block is not in its own `AllowedRelayParents` storage (it gets + /// added during the next block's inherent). Querying a block about itself + /// will always return `None`. Use the node-side `check_relay_parent_info` + /// utility for a general-purpose check that handles both the self and + /// ancestor cases. #[api_version(16)] - fn allowed_relay_parent_info( + fn ancestor_relay_parent_info( session_index: SessionIndex, relay_parent: Hash, ) -> Option>; diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index f4cdfb9c085c7..1dd223e0818b7 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -39,8 +39,12 @@ pub fn max_relay_parent_session_age() -> u32 { configuration::ActiveConfig::::get().max_relay_parent_session_age } -/// Implementation of `allowed_relay_parent_info` runtime API. -pub fn allowed_relay_parent_info( +/// Implementation of `ancestor_relay_parent_info` runtime API. +/// +/// Looks up relay parent info for an **ancestor** block. A block is not in its +/// own `AllowedRelayParents` (it gets added during the next block's inherent), +/// so querying a block about itself always returns `None`. +pub fn ancestor_relay_parent_info( session_index: SessionIndex, relay_parent: T::Hash, ) -> Option>> { diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 5dde33c79ebea..faeaf5992d7c0 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -2171,11 +2171,11 @@ sp_api::impl_runtime_apis! { parachains_staging_runtime_api_impl::max_relay_parent_session_age::() } - fn allowed_relay_parent_info( + fn ancestor_relay_parent_info( session_index: SessionIndex, relay_parent: Hash, ) -> Option> { - parachains_staging_runtime_api_impl::allowed_relay_parent_info::(session_index, relay_parent) + parachains_staging_runtime_api_impl::ancestor_relay_parent_info::(session_index, relay_parent) } } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index b8f005e46f84e..0d9659d62ec28 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -1146,11 +1146,11 @@ sp_api::impl_runtime_apis! { staging_runtime_impl::max_relay_parent_session_age::() } - fn allowed_relay_parent_info( + fn ancestor_relay_parent_info( session_index: SessionIndex, relay_parent: Hash, ) -> Option> { - staging_runtime_impl::allowed_relay_parent_info::(session_index, relay_parent) + staging_runtime_impl::ancestor_relay_parent_info::(session_index, relay_parent) } } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index a69bde073b7e2..c6a209630368a 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2445,11 +2445,11 @@ sp_api::impl_runtime_apis! { parachains_staging_runtime_api_impl::max_relay_parent_session_age::() } - fn allowed_relay_parent_info( + fn ancestor_relay_parent_info( session_index: SessionIndex, relay_parent: Hash, ) -> Option> { - parachains_staging_runtime_api_impl::allowed_relay_parent_info::(session_index, relay_parent) + parachains_staging_runtime_api_impl::ancestor_relay_parent_info::(session_index, relay_parent) } } From 1f7bcb29a786ab1de52810a474fd04d10d99b1b0 Mon Sep 17 00:00:00 2001 From: eskimor Date: Fri, 20 Mar 2026 09:39:41 +0100 Subject: [PATCH 48/52] Better name + more tests --- .../node/core/candidate-validation/src/lib.rs | 10 +- .../core/candidate-validation/src/tests.rs | 258 ++++++++++++++++++ polkadot/node/subsystem-util/src/lib.rs | 27 +- 3 files changed, 277 insertions(+), 18 deletions(-) diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 05d463a13f844..21938a28faf60 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -307,7 +307,7 @@ where .session_index_for_candidate_validation(v3_ever_seen) { let relay_parent = candidate_receipt.descriptor.relay_parent(); - match util::check_relay_parent_info( + match util::check_relay_parent_session( sender, scheduling_parent, session_index, @@ -315,17 +315,17 @@ where ) .await { - util::CheckRelayParentInfoResult::Valid => {}, + util::CheckRelayParentSessionResult::Valid => {}, // Safe to skip: on old runtimes cross-session relay parents don't // exist, and the scheduling session check above already covers the // relay parent session (scheduling_parent == relay_parent). - util::CheckRelayParentInfoResult::NotSupported => {}, - util::CheckRelayParentInfoResult::NotFound => { + util::CheckRelayParentSessionResult::NotSupported => {}, + util::CheckRelayParentSessionResult::NotFound => { return Err(PreValidationError::Invalid( InvalidCandidate::InvalidRelayParentSession, )) }, - util::CheckRelayParentInfoResult::RuntimeError(err) => { + util::CheckRelayParentSessionResult::RuntimeError(err) => { return Err(PreValidationError::RuntimeError(err)) }, } diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 7786ce6f28045..473ec62499b84 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -3034,3 +3034,261 @@ fn pre_validation_relay_parent_session_check() { ); } } + +/// Relay parent session check for V3 candidates (scheduling_parent != relay_parent): +/// the `check_relay_parent_info` utility takes the ancestor-query path, calling +/// the `AncestorRelayParentInfo` runtime API. +/// +/// Case 1: AncestorRelayParentInfo returns None → InvalidRelayParentSession. +/// Case 2: AncestorRelayParentInfo not supported → skipped, proceeds to valid. +/// Case 3: AncestorRelayParentInfo returns Some → valid, proceeds. +#[test] +fn pre_validation_relay_parent_session_check_v3_ancestor_query() { + let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; + let pov = PoV { block_data: BlockData(vec![1; 32]) }; + let head_data = HeadData(vec![1, 1, 1]); + let validation_code = ValidationCode(vec![2; 16]); + let relay_parent = dummy_hash(); + let scheduling_parent = Hash::repeat_byte(0x42); + + // V3 descriptor: scheduling_parent != relay_parent, session_index=1. + let descriptor = make_valid_candidate_descriptor_v3( + ParaId::from(1_u32), + relay_parent, + CoreIndex(1), + 1, + dummy_hash(), + pov.hash(), + validation_code.hash(), + head_data.hash(), + dummy_hash(), + scheduling_parent, + ); + + let validation_result = WasmValidationResult { + head_data: head_data.clone(), + new_validation_code: None, + upward_messages: Default::default(), + horizontal_messages: Default::default(), + processed_downward_messages: 0, + hrmp_watermark: 0, + }; + let commitments = CandidateCommitments { + head_data: validation_result.head_data.clone(), + upward_messages: validation_result.upward_messages.clone(), + horizontal_messages: validation_result.horizontal_messages.clone(), + new_validation_code: validation_result.new_validation_code.clone(), + processed_downward_messages: validation_result.processed_downward_messages, + hrmp_watermark: validation_result.hrmp_watermark, + }; + let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; + + // Helper: mock the V3 bomb limit fetch flow (no SessionIndexForChild, goes + // straight to ValidationCodeBombLimit since V3 has session in descriptor). + async fn mock_fetch_bomb_limit_v3( + ctx_handle: &mut TestSubsystemContextHandle, + expected_scheduling_parent: Hash, + session_index: SessionIndex, + ) { + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::ValidationCodeBombLimit(session, tx), + )) => { + assert_eq!(parent, expected_scheduling_parent); + assert_eq!(session, session_index); + let _ = tx.send(Ok(VALIDATION_CODE_BOMB_LIMIT)); + } + ); + } + + // Helper: mock the V3 backing pre-validation flow up to (but not including) + // the relay parent session check. + async fn mock_v3_pre_checks( + ctx_handle: &mut TestSubsystemContextHandle, + scheduling_parent: Hash, + session: SessionIndex, + ) { + mock_fetch_bomb_limit_v3(ctx_handle, scheduling_parent, session).await; + // Scheduling session check: SessionIndexForChild at scheduling_parent. + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, RuntimeApiRequest::SessionIndexForChild(tx), + )) => { let _ = tx.send(Ok(session)); } + ); + // AllowedRelayParentInfo check for relay parent in session (v16+ API). + // This is only reached for V3 with v3_ever_seen=true, where + // session_index_for_candidate_validation returns Some. + } + + // Case 1: AncestorRelayParentInfo returns None → InvalidRelayParentSession. + { + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); + let mock_backend = + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())); + + let (response_tx, response_rx) = oneshot::channel(); + + let task = handle_validation_message( + ctx.sender().clone(), + mock_backend, + Metrics::default(), + true, // v3_ever_seen + CandidateValidationMessage::ValidateFromExhaustive { + validation_data: validation_data.clone(), + validation_code: validation_code.clone(), + candidate_receipt: candidate_receipt.clone(), + pov: Arc::new(pov.clone()), + executor_params: ExecutorParams::default(), + exec_kind: PvfExecKind::Backing(scheduling_parent), + response_sender: response_tx, + }, + ); + + let test_fut = async move { + mock_v3_pre_checks(&mut ctx_handle, scheduling_parent, 1).await; + // AncestorRelayParentInfo: relay parent NOT found. + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::AncestorRelayParentInfo(session, rp, tx), + )) => { + assert_eq!(parent, scheduling_parent); + assert_eq!(session, 1); + assert_eq!(rp, relay_parent); + let _ = tx.send(Ok(None)); + } + ); + }; + + executor::block_on(future::join(test_fut, task)); + + assert_matches!( + executor::block_on(response_rx).unwrap(), + Ok(ValidationResult::Invalid(InvalidCandidate::InvalidRelayParentSession)) + ); + } + + // Case 2: AncestorRelayParentInfo not supported → skipped, proceeds past session check. + // (Candidate then fails UMP signal check since V3 requires signals — this proves + // the session check was skipped successfully.) + { + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); + let mock_backend = + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())); + + let (response_tx, response_rx) = oneshot::channel(); + + let task = handle_validation_message( + ctx.sender().clone(), + mock_backend, + Metrics::default(), + true, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data: validation_data.clone(), + validation_code: validation_code.clone(), + candidate_receipt: candidate_receipt.clone(), + pov: Arc::new(pov.clone()), + executor_params: ExecutorParams::default(), + exec_kind: PvfExecKind::Backing(scheduling_parent), + response_sender: response_tx, + }, + ); + + let test_fut = async move { + mock_v3_pre_checks(&mut ctx_handle, scheduling_parent, 1).await; + // AncestorRelayParentInfo: not supported → skipped. + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, RuntimeApiRequest::AncestorRelayParentInfo(_, _, tx), + )) => { + let _ = tx.send(Err(RuntimeApiError::NotSupported { + runtime_api_name: "AncestorRelayParentInfo", + })); + } + ); + // ClaimQueue: proceeds past session check. + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, RuntimeApiRequest::ClaimQueue(tx), + )) => { + let mut cq = BTreeMap::new(); + let _ = cq.insert(CoreIndex(1), vec![ParaId::from(1_u32)].into()); + let _ = tx.send(Ok(cq)); + } + ); + }; + + executor::block_on(future::join(test_fut, task)); + + // V3 requires UMP signals which this candidate doesn't have — but the + // point is we got past the session check. + assert_matches!( + executor::block_on(response_rx).unwrap(), + Ok(ValidationResult::Invalid(InvalidCandidate::InvalidUMPSignals(_))) + ); + } + + // Case 3: AncestorRelayParentInfo returns Some → proceeds past session check. + { + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool.clone()); + let mock_backend = + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())); + + let (response_tx, response_rx) = oneshot::channel(); + + let task = handle_validation_message( + ctx.sender().clone(), + mock_backend, + Metrics::default(), + true, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data: validation_data.clone(), + validation_code: validation_code.clone(), + candidate_receipt: candidate_receipt.clone(), + pov: Arc::new(pov.clone()), + executor_params: ExecutorParams::default(), + exec_kind: PvfExecKind::Backing(scheduling_parent), + response_sender: response_tx, + }, + ); + + let test_fut = async move { + mock_v3_pre_checks(&mut ctx_handle, scheduling_parent, 1).await; + // AncestorRelayParentInfo: found. + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, RuntimeApiRequest::AncestorRelayParentInfo(_, _, tx), + )) => { let _ = tx.send(Ok(Some(Default::default()))); } + ); + // ClaimQueue. + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, RuntimeApiRequest::ClaimQueue(tx), + )) => { + let mut cq = BTreeMap::new(); + let _ = cq.insert(CoreIndex(1), vec![ParaId::from(1_u32)].into()); + let _ = tx.send(Ok(cq)); + } + ); + }; + + executor::block_on(future::join(test_fut, task)); + + // Same as case 2 — V3 UMP signals missing, but we got past session check. + assert_matches!( + executor::block_on(response_rx).unwrap(), + Ok(ValidationResult::Invalid(InvalidCandidate::InvalidUMPSignals(_))) + ); + } +} diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs index bb9536fc0ea2c..3fe2cf4b8375b 100644 --- a/polkadot/node/subsystem-util/src/lib.rs +++ b/polkadot/node/subsystem-util/src/lib.rs @@ -322,7 +322,7 @@ specialize_requests! { } /// Result of [`check_relay_parent_info`]. -pub enum CheckRelayParentInfoResult { +pub enum CheckRelayParentSessionResult { /// The relay parent is valid in the given session. Valid, /// The relay parent was not found in the given session (or session mismatch @@ -343,22 +343,22 @@ pub enum CheckRelayParentInfoResult { /// only works for ancestors (a block is not in its own `AllowedRelayParents`). /// This utility handles the self case by verifying the session directly via /// `session_index_for_child`. -pub async fn check_relay_parent_info( +pub async fn check_relay_parent_session( sender: &mut impl overseer::SubsystemSender, query_at: Hash, session_index: SessionIndex, relay_parent: Hash, -) -> CheckRelayParentInfoResult { +) -> CheckRelayParentSessionResult { if query_at == relay_parent { // Self-query: the runtime API can't answer (block not in its own // AllowedRelayParents). Verify the session directly. return match request_session_index_for_child(relay_parent, sender).await.await { - Ok(Ok(session)) if session == session_index => CheckRelayParentInfoResult::Valid, - Ok(Ok(_)) => CheckRelayParentInfoResult::NotFound, - Ok(Err(err)) => CheckRelayParentInfoResult::RuntimeError(format!( + Ok(Ok(session)) if session == session_index => CheckRelayParentSessionResult::Valid, + Ok(Ok(_)) => CheckRelayParentSessionResult::NotFound, + Ok(Err(err)) => CheckRelayParentSessionResult::RuntimeError(format!( "SessionIndexForChild error: {err}" )), - Err(_) => CheckRelayParentInfoResult::RuntimeError( + Err(_) => CheckRelayParentSessionResult::RuntimeError( "SessionIndexForChild request cancelled".into(), ), }; @@ -371,14 +371,15 @@ pub async fn check_relay_parent_info( .await .await { - Ok(Ok(Some(_))) => CheckRelayParentInfoResult::Valid, - Ok(Ok(None)) => CheckRelayParentInfoResult::NotFound, - Ok(Err(RuntimeApiError::NotSupported { .. })) => - CheckRelayParentInfoResult::NotSupported, - Ok(Err(err)) => CheckRelayParentInfoResult::RuntimeError(format!( + Ok(Ok(Some(_))) => CheckRelayParentSessionResult::Valid, + Ok(Ok(None)) => CheckRelayParentSessionResult::NotFound, + Ok(Err(RuntimeApiError::NotSupported { .. })) => { + CheckRelayParentSessionResult::NotSupported + }, + Ok(Err(err)) => CheckRelayParentSessionResult::RuntimeError(format!( "AncestorRelayParentInfo error: {err}" )), - Err(_) => CheckRelayParentInfoResult::RuntimeError( + Err(_) => CheckRelayParentSessionResult::RuntimeError( "AncestorRelayParentInfo request cancelled".into(), ), } From 74462d1532e2ce45e47d9110913f3374c0cc17e2 Mon Sep 17 00:00:00 2001 From: eskimor Date: Fri, 20 Mar 2026 09:42:35 +0100 Subject: [PATCH 49/52] Doc fix + prdoc fix. --- polkadot/node/core/candidate-validation/src/lib.rs | 2 +- prdoc/pr_11290.prdoc | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 21938a28faf60..c149ebd131b6f 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -242,7 +242,7 @@ enum PreValidationError { /// - Basic checks: PoV size, PoV hash, validation code hash /// - Backing-only (skipped for approval/dispute): /// - Scheduling session matches runtime -/// - Relay parent valid in claimed session (via `check_relay_parent_info` utility) +/// - Relay parent valid in claimed session (via `check_relay_parent_session` utility) /// - Claim queue fetch /// /// Backing-only checks are skipped for approval/dispute because the runtime diff --git a/prdoc/pr_11290.prdoc b/prdoc/pr_11290.prdoc index 654f848f17e2f..868da7af65e39 100644 --- a/prdoc/pr_11290.prdoc +++ b/prdoc/pr_11290.prdoc @@ -81,6 +81,14 @@ crates: bump: major - name: polkadot-node-core-approval-voting bump: major + - name: polkadot-node-core-runtime-api + bump: major + - name: rococo-runtime + bump: major + - name: westend-runtime + bump: major + - name: polkadot-test-runtime + bump: major - name: polkadot-node-network-protocol bump: major - name: polkadot-subsystem-bench From 4378ad447a1d8d9e47f9438752f03e386af1108f Mon Sep 17 00:00:00 2001 From: eskimor Date: Fri, 20 Mar 2026 09:56:32 +0100 Subject: [PATCH 50/52] Finish renaming --- .../relay-chain-minimal-node/src/blockchain_rpc_client.rs | 4 ++-- cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs | 4 ++-- polkadot/node/core/candidate-validation/src/tests.rs | 2 +- prdoc/pr_11290.prdoc | 6 ++++++ substrate/frame/staking-async/runtimes/rc/src/lib.rs | 4 ++-- 5 files changed, 13 insertions(+), 7 deletions(-) diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index 2a353e49ef511..6374c6173a7d7 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -487,7 +487,7 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { Ok(self.rpc_client.parachain_host_max_relay_parent_session_age(at).await?) } - async fn allowed_relay_parent_info( + async fn ancestor_relay_parent_info( &self, at: Hash, session_index: polkadot_primitives::SessionIndex, @@ -498,7 +498,7 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { > { Ok(self .rpc_client - .parachain_host_allowed_relay_parent_info(at, session_index, relay_parent) + .parachain_host_ancestor_relay_parent_info(at, session_index, relay_parent) .await?) } } diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index db4fdf80a8b9b..cc3f5a2db7540 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -799,14 +799,14 @@ impl RelayChainRpcClient { .await } - pub async fn parachain_host_allowed_relay_parent_info( + pub async fn parachain_host_ancestor_relay_parent_info( &self, at: RelayHash, session_index: SessionIndex, relay_parent: RelayHash, ) -> Result>, RelayChainError> { self.call_remote_runtime_function( - "ParachainHost_allowed_relay_parent_info", + "ParachainHost_ancestor_relay_parent_info", at, Some((session_index, relay_parent)), ) diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 473ec62499b84..f5fdef7eab2f3 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -3118,7 +3118,7 @@ fn pre_validation_relay_parent_session_check_v3_ancestor_query() { _, RuntimeApiRequest::SessionIndexForChild(tx), )) => { let _ = tx.send(Ok(session)); } ); - // AllowedRelayParentInfo check for relay parent in session (v16+ API). + // AncestorRelayParentInfo check for relay parent in session (v16+ API). // This is only reached for V3 with v3_ever_seen=true, where // session_index_for_candidate_validation returns Some. } diff --git a/prdoc/pr_11290.prdoc b/prdoc/pr_11290.prdoc index 868da7af65e39..70bd412f75bc9 100644 --- a/prdoc/pr_11290.prdoc +++ b/prdoc/pr_11290.prdoc @@ -89,6 +89,12 @@ crates: bump: major - name: polkadot-test-runtime bump: major + - name: pallet-staking-async-rc-runtime + bump: major + - name: cumulus-relay-chain-minimal-node + bump: major + - name: cumulus-relay-chain-rpc-interface + bump: major - name: polkadot-node-network-protocol bump: major - name: polkadot-subsystem-bench diff --git a/substrate/frame/staking-async/runtimes/rc/src/lib.rs b/substrate/frame/staking-async/runtimes/rc/src/lib.rs index d7d7c32447e45..9254c9c893aff 100644 --- a/substrate/frame/staking-async/runtimes/rc/src/lib.rs +++ b/substrate/frame/staking-async/runtimes/rc/src/lib.rs @@ -2356,11 +2356,11 @@ sp_api::impl_runtime_apis! { parachains_staging_runtime_api_impl::max_relay_parent_session_age::() } - fn allowed_relay_parent_info( + fn ancestor_relay_parent_info( session_index: SessionIndex, relay_parent: Hash, ) -> Option> { - parachains_staging_runtime_api_impl::allowed_relay_parent_info::(session_index, relay_parent) + parachains_staging_runtime_api_impl::ancestor_relay_parent_info::(session_index, relay_parent) } } From d20747e5626b224183aee579581f8251bc8d89f8 Mon Sep 17 00:00:00 2001 From: eskimor Date: Fri, 20 Mar 2026 10:54:03 +0100 Subject: [PATCH 51/52] More doc fixes --- polkadot/node/core/candidate-validation/src/tests.rs | 8 ++++---- polkadot/node/subsystem-types/src/messages.rs | 2 +- polkadot/node/subsystem-util/src/lib.rs | 2 +- polkadot/primitives/src/runtime_api.rs | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index f5fdef7eab2f3..8ce2eb57bdb0e 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -2874,7 +2874,7 @@ fn pre_validation_basic_checks() { } /// Relay parent session check: for V2 candidates (scheduling_parent == relay_parent), -/// the `check_relay_parent_info` utility takes the self-query path, verifying the +/// the `check_relay_parent_session` utility takes the self-query path, verifying the /// session via `session_index_for_child` directly. /// /// Case 1: Session mismatch → InvalidRelayParentSession. @@ -2954,7 +2954,7 @@ fn pre_validation_relay_parent_session_check() { _, RuntimeApiRequest::SessionIndexForChild(tx), )) => { let _ = tx.send(Ok(1)); } ); - // check_relay_parent_info self-query: session_index_for_child returns 99 + // check_relay_parent_session self-query: session_index_for_child returns 99 // (mismatch with descriptor's session_index=1). assert_matches!( ctx_handle.recv().await, @@ -3006,7 +3006,7 @@ fn pre_validation_relay_parent_session_check() { _, RuntimeApiRequest::SessionIndexForChild(tx), )) => { let _ = tx.send(Ok(1)); } ); - // check_relay_parent_info self-query: session matches (session=1). + // check_relay_parent_session self-query: session matches (session=1). assert_matches!( ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( @@ -3036,7 +3036,7 @@ fn pre_validation_relay_parent_session_check() { } /// Relay parent session check for V3 candidates (scheduling_parent != relay_parent): -/// the `check_relay_parent_info` utility takes the ancestor-query path, calling +/// the `check_relay_parent_session` utility takes the ancestor-query path, calling /// the `AncestorRelayParentInfo` runtime API. /// /// Case 1: AncestorRelayParentInfo returns None → InvalidRelayParentSession. diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 6664bf0334654..8fffc8a32e2a8 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -840,7 +840,7 @@ pub enum RuntimeApiRequest { MaxRelayParentSessionAge(SessionIndex, RuntimeApiSender), /// Look up relay parent info for an **ancestor** block. A block is not in its /// own `AllowedRelayParents`, so querying a block about itself returns `None`. - /// Use the node-side `check_relay_parent_info` utility for the general case. `V16` + /// Use the node-side `check_relay_parent_session` utility for the general case. `V16` AncestorRelayParentInfo( SessionIndex, Hash, diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs index 3fe2cf4b8375b..78748605f71f8 100644 --- a/polkadot/node/subsystem-util/src/lib.rs +++ b/polkadot/node/subsystem-util/src/lib.rs @@ -321,7 +321,7 @@ specialize_requests! { } -/// Result of [`check_relay_parent_info`]. +/// Result of [`check_relay_parent_session`]. pub enum CheckRelayParentSessionResult { /// The relay parent is valid in the given session. Valid, diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index 11f0028502bbd..9761edf44a85c 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -335,7 +335,7 @@ sp_api::decl_runtime_apis! { /// /// NOTE: A block is not in its own `AllowedRelayParents` storage (it gets /// added during the next block's inherent). Querying a block about itself - /// will always return `None`. Use the node-side `check_relay_parent_info` + /// will always return `None`. Use the node-side `check_relay_parent_session` /// utility for a general-purpose check that handles both the self and /// ancestor cases. #[api_version(16)] From 0f39265da930f10429f0bb56b3ab95c16345cda2 Mon Sep 17 00:00:00 2001 From: eskimor Date: Fri, 20 Mar 2026 12:36:24 +0100 Subject: [PATCH 52/52] Add missing mock API --- polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs index e46dc20b489af..814e718b4b996 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs @@ -350,6 +350,12 @@ impl MockRuntimeApi { ) => { tx.send(Ok(vec![])).unwrap(); }, + RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::SchedulingLookahead(_session, tx), + ) => { + tx.send(Ok(2)).unwrap(); + }, // Long term TODO: implement more as needed. message => { unimplemented!("Unexpected runtime-api message: {:?}", message)