Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 7 additions & 23 deletions node/core/provisioner/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -150,12 +150,7 @@ pub enum Error {

/// Provisioner run arguments.
#[derive(Debug, Clone, Copy)]
pub struct ProvisionerConfig {
/// If enabled, dispute votes will be provided to `fn create_inherent`, otherwise not.
/// Long term we will obviously always want disputes to be enabled, this option exists for testing purposes
/// and will be removed in the near future.
pub disputes_enabled: bool,
}
pub struct ProvisionerConfig;

impl JobTrait for ProvisionerJob {
type ToJob = ProvisionerMessage;
Expand All @@ -170,7 +165,7 @@ impl JobTrait for ProvisionerJob {
// this function is in charge of creating and executing the job's main loop
fn run<S: SubsystemSender>(
leaf: ActivatedLeaf,
run_args: Self::RunArgs,
_: Self::RunArgs,
metrics: Self::Metrics,
receiver: mpsc::Receiver<ProvisionerMessage>,
mut sender: JobSender<S>,
Expand All @@ -179,12 +174,8 @@ impl JobTrait for ProvisionerJob {
async move {
let job = ProvisionerJob::new(leaf, metrics, receiver);

job.run_loop(
sender.subsystem_sender(),
run_args.disputes_enabled,
PerLeafSpan::new(span, "provisioner"),
)
.await
job.run_loop(sender.subsystem_sender(), PerLeafSpan::new(span, "provisioner"))
.await
}
.boxed()
}
Expand All @@ -210,7 +201,6 @@ impl ProvisionerJob {
async fn run_loop(
mut self,
sender: &mut impl SubsystemSender,
disputes_enabled: bool,
span: PerLeafSpan,
) -> Result<(), Error> {
loop {
Expand All @@ -221,7 +211,7 @@ impl ProvisionerJob {
let _timer = self.metrics.time_request_inherent_data();

if self.inherent_after.is_ready() {
self.send_inherent_data(sender, vec![return_sender], disputes_enabled).await;
self.send_inherent_data(sender, vec![return_sender]).await;
} else {
self.awaiting_inherent.push(return_sender);
}
Expand All @@ -238,7 +228,7 @@ impl ProvisionerJob {
let _span = span.child("send-inherent-data");
let return_senders = std::mem::take(&mut self.awaiting_inherent);
if !return_senders.is_empty() {
self.send_inherent_data(sender, return_senders, disputes_enabled).await;
self.send_inherent_data(sender, return_senders).await;
}
}
}
Expand All @@ -251,15 +241,13 @@ impl ProvisionerJob {
&mut self,
sender: &mut impl SubsystemSender,
return_senders: Vec<oneshot::Sender<ProvisionerInherentData>>,
disputes_enabled: bool,
) {
if let Err(err) = send_inherent_data(
&self.leaf,
&self.signed_bitfields,
&self.backed_candidates,
return_senders,
sender,
disputes_enabled,
&self.metrics,
)
.await
Expand All @@ -273,7 +261,6 @@ impl ProvisionerJob {
signed_bitfield_count = self.signed_bitfields.len(),
backed_candidates_count = self.backed_candidates.len(),
leaf_hash = ?self.leaf.hash,
disputes_enabled,
"inherent data sent successfully"
);
}
Expand Down Expand Up @@ -331,16 +318,14 @@ async fn send_inherent_data(
candidates: &[CandidateReceipt],
return_senders: Vec<oneshot::Sender<ProvisionerInherentData>>,
from_job: &mut impl SubsystemSender,
disputes_enabled: bool,
metrics: &Metrics,
) -> Result<(), Error> {
let availability_cores = request_availability_cores(leaf.hash, from_job)
.await
.await
.map_err(|err| Error::CanceledAvailabilityCores(err))??;

let disputes =
if disputes_enabled { select_disputes(from_job, metrics).await? } else { vec![] };
let disputes = select_disputes(from_job, metrics).await?;

// Only include bitfields on fresh leaves. On chain reversions, we want to make sure that
// there will be at least one block, which cannot get disputed, so the chain can make progress.
Expand All @@ -354,7 +339,6 @@ async fn send_inherent_data(

gum::debug!(
target: LOG_TARGET,
disputes_enabled = disputes_enabled,
availability_cores_len = availability_cores.len(),
disputes_count = disputes.len(),
bitfields_count = bitfields.len(),
Expand Down
10 changes: 1 addition & 9 deletions node/service/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -731,23 +731,16 @@ where
let auth_or_collator = role.is_authority() || is_collator.is_collator();
let requires_overseer_for_chain_sel = local_keystore.is_some() && auth_or_collator;

let disputes_enabled = chain_spec.is_rococo() ||
chain_spec.is_kusama() ||
chain_spec.is_westend() ||
chain_spec.is_versi() ||
chain_spec.is_wococo();

let pvf_checker_enabled = !is_collator.is_collator() && chain_spec.is_versi();

let select_chain = if requires_overseer_for_chain_sel {
let metrics =
polkadot_node_subsystem_util::metrics::Metrics::register(prometheus_registry.as_ref())?;

SelectRelayChain::new_disputes_aware(
SelectRelayChain::new_with_overseer(
basics.backend.clone(),
overseer_handle.clone(),
metrics,
disputes_enabled,
)
} else {
SelectRelayChain::new_longest_chain(basics.backend.clone())
Expand Down Expand Up @@ -1006,7 +999,6 @@ where
candidate_validation_config,
chain_selection_config,
dispute_coordinator_config,
disputes_enabled,
pvf_checker_enabled,
},
)
Expand Down
21 changes: 7 additions & 14 deletions node/service/src/overseer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,6 @@ where
pub chain_selection_config: ChainSelectionConfig,
/// Configuration for the dispute coordinator subsystem.
pub dispute_coordinator_config: DisputeCoordinatorConfig,
/// Enable to disputes.
pub disputes_enabled: bool,
/// Enable PVF pre-checking
pub pvf_checker_enabled: bool,
}
Expand Down Expand Up @@ -138,7 +136,6 @@ pub fn prepared_overseer_builder<'a, Spawner, RuntimeClient>(
candidate_validation_config,
chain_selection_config,
dispute_coordinator_config,
disputes_enabled,
pvf_checker_enabled,
}: OverseerGenArgs<'a, Spawner, RuntimeClient>,
) -> Result<
Expand Down Expand Up @@ -243,7 +240,7 @@ where
))
.provisioner(ProvisionerSubsystem::new(
spawner.clone(),
ProvisionerConfig { disputes_enabled },
ProvisionerConfig,
Metrics::register(registry)?,
))
.runtime_api(RuntimeApiSubsystem::new(
Expand All @@ -269,16 +266,12 @@ where
authority_discovery_service.clone(),
Metrics::register(registry)?,
))
.dispute_coordinator(if disputes_enabled {
DisputeCoordinatorSubsystem::new(
parachains_db.clone(),
dispute_coordinator_config,
keystore.clone(),
Metrics::register(registry)?,
)
} else {
DisputeCoordinatorSubsystem::dummy()
})
.dispute_coordinator(DisputeCoordinatorSubsystem::new(
parachains_db.clone(),
dispute_coordinator_config,
keystore.clone(),
Metrics::register(registry)?,
))
.dispute_distribution(DisputeDistributionSubsystem::new(
keystore.clone(),
dispute_req_receiver,
Expand Down
44 changes: 8 additions & 36 deletions node/service/src/relay_chain_selection.rs
Original file line number Diff line number Diff line change
Expand Up @@ -163,30 +163,13 @@ where

/// Create a new [`SelectRelayChain`] wrapping the given chain backend
/// and a handle to the overseer.
pub fn new_disputes_aware(
backend: Arc<B>,
overseer: Handle,
metrics: Metrics,
disputes_enabled: bool,
) -> Self {
gum::debug!(
target: LOG_TARGET,
"Using {} chain selection algorithm",
if disputes_enabled {
"dispute aware relay"
} else {
// no disputes are queried, that logic is disabled
// in `fn finality_target_with_longest_chain`.
"short-circuited relay"
}
);
pub fn new_with_overseer(backend: Arc<B>, overseer: Handle, metrics: Metrics) -> Self {
gum::debug!(target: LOG_TARGET, "Using dispute aware relay-chain selection algorithm",);

SelectRelayChain {
longest_chain: sc_consensus::LongestChain::new(backend.clone()),
selection: IsDisputesAwareWithOverseer::Yes(SelectRelayChainInner::new(
backend,
overseer,
metrics,
disputes_enabled,
backend, overseer, metrics,
)),
}
}
Expand Down Expand Up @@ -243,7 +226,6 @@ where
pub struct SelectRelayChainInner<B, OH> {
backend: Arc<B>,
overseer: OH,
disputes_enabled: bool,
metrics: Metrics,
}

Expand All @@ -254,8 +236,8 @@ where
{
/// Create a new [`SelectRelayChainInner`] wrapping the given chain backend
/// and a handle to the overseer.
pub fn new(backend: Arc<B>, overseer: OH, metrics: Metrics, disputes_enabled: bool) -> Self {
SelectRelayChainInner { backend, overseer, metrics, disputes_enabled }
pub fn new(backend: Arc<B>, overseer: OH, metrics: Metrics) -> Self {
SelectRelayChainInner { backend, overseer, metrics }
}

fn block_header(&self, hash: Hash) -> Result<PolkadotHeader, ConsensusError> {
Expand Down Expand Up @@ -293,7 +275,6 @@ where
backend: self.backend.clone(),
overseer: self.overseer.clone(),
metrics: self.metrics.clone(),
disputes_enabled: self.disputes_enabled,
}
}
}
Expand Down Expand Up @@ -391,7 +372,7 @@ where
let mut overseer = self.overseer.clone();
gum::trace!(target: LOG_TARGET, ?best_leaf, "Longest chain");

let subchain_head = if self.disputes_enabled {
let subchain_head = {
let (tx, rx) = oneshot::channel();
overseer
.send_msg(
Expand All @@ -412,13 +393,6 @@ where
None => return Ok(target_hash),
Some(best) => best,
}
} else {
gum::trace!(target: LOG_TARGET, ?best_leaf, "Dummy disputes active");
if best_leaf == target_hash {
return Ok(target_hash)
} else {
best_leaf
}
};

let target_number = self.block_number(target_hash)?;
Expand Down Expand Up @@ -492,7 +466,7 @@ where
let lag = initial_leaf_number.saturating_sub(subchain_number);
self.metrics.note_approval_checking_finality_lag(lag);

let (lag, subchain_head) = if self.disputes_enabled {
let (lag, subchain_head) = {
// Prevent sending flawed data to the dispute-coordinator.
if Some(subchain_block_descriptions.len() as _) !=
subchain_number.checked_sub(target_number)
Expand Down Expand Up @@ -544,8 +518,6 @@ where
},
};
(lag, subchain_head)
} else {
(lag, subchain_head)
};

gum::trace!(
Expand Down
1 change: 0 additions & 1 deletion node/service/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@ fn test_harness<T: Future<Output = VirtualOverseer>>(
Arc::new(case_vars.chain.clone()),
context.sender().clone(),
Default::default(),
true,
);

let target_hash = case_vars.target_block.clone();
Expand Down