Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions cumulus/polkadot-omni-node/lib/src/common/spec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -248,6 +248,7 @@ pub(crate) trait BaseNodeSpec {
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
true,
Default::default(),
)?;
let client = Arc::new(client);

Expand Down
1 change: 1 addition & 0 deletions cumulus/test/service/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,7 @@ pub fn new_partial(
None,
executor,
enable_import_proof_record,
Default::default(),
)?;
let client = Arc::new(client);

Expand Down
22 changes: 13 additions & 9 deletions polkadot/node/service/src/builder/partial.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,13 @@
#![cfg(feature = "full-node")]

use crate::{
fake_runtime_api::RuntimeApi, grandpa_support, relay_chain_selection, Error, FullBackend,
FullClient, IdentifyVariant, GRANDPA_JUSTIFICATION_PERIOD,
grandpa_support, relay_chain_selection, Error, FullBackend, FullClient, IdentifyVariant,
GRANDPA_JUSTIFICATION_PERIOD,
};
use polkadot_primitives::Block;
use sc_consensus_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider;
use sc_consensus_grandpa::{
FinalityProofProvider as GrandpaFinalityProofProvider, GrandpaPruningFilter,
};
use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};
use sc_service::{Configuration, Error as SubstrateServiceError, KeystoreContainer, TaskManager};
use sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle};
Expand Down Expand Up @@ -120,12 +122,14 @@ pub(crate) fn new_partial_basics(
.with_runtime_cache_size(config.executor.runtime_cache_size)
.build();

let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, _>(
&config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
)?;
// Use GrandpaPruningFilter to preserve blocks with GRANDPA justifications during
// pruning. This is required for warp sync to work on pruned nodes.
let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts(
&config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
vec![Arc::new(GrandpaPruningFilter)],
)?;
let client = Arc::new(client);

let telemetry = telemetry.map(|(worker, telemetry)| {
Expand Down
27 changes: 27 additions & 0 deletions prdoc/pr_10893.prdoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
title: Do not prune blocks with GrandPa justifications
doc:
- audience:
- Node Dev
- Node Operator
description: |-
Warp sync requires GRANDPA justifications at authority set change boundaries to construct proofs. When block pruning is enabled, all block bodies are removed regardless of whether they contain important justifications. The pruned nodes can then not be used to fetch warp proofs.
We now have the capability to filter which blocks can be safely pruned. For parachain nodes, everything can be pruned, solochain nodes using grandpa keep blocks with justifications. This ensures warp sync ability within the network.
crates:
- name: polkadot-service
bump: major
- name: sc-cli
bump: major
- name: sc-consensus-beefy
bump: major
- name: sc-consensus-grandpa
bump: major
- name: sc-client-db
bump: major
- name: sc-service
bump: major
- name: frame-benchmarking-cli
bump: major
- name: polkadot-omni-node-lib
bump: major
- name: staging-node-inspect
bump: major
1 change: 1 addition & 0 deletions substrate/bin/node/cli/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,7 @@ pub fn new_partial(
config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
vec![Arc::new(grandpa::GrandpaPruningFilter)],
)?;
let client = Arc::new(client);

Expand Down
3 changes: 2 additions & 1 deletion substrate/bin/node/inspect/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,8 @@ impl InspectCmd {
RA: Send + Sync + 'static,
{
let executor = sc_service::new_wasm_executor::<HostFunctions>(&config.executor);
let client = sc_service::new_full_client::<B, RA, _>(&config, None, executor)?;
let client =
sc_service::new_full_client::<B, RA, _>(&config, None, executor, Default::default())?;
let inspect = Inspector::<B>::new(client);

match &self.command {
Expand Down
1 change: 1 addition & 0 deletions substrate/bin/node/testing/src/bench.rs
Original file line number Diff line number Diff line change
Expand Up @@ -391,6 +391,7 @@ impl BenchDb {
state_pruning: Some(PruningMode::ArchiveAll),
source: database_type.into_settings(dir.into()),
blocks_pruning: sc_client_db::BlocksPruning::KeepAll,
pruning_filters: Default::default(),
metrics_registry: None,
};
let task_executor = TaskExecutor::new();
Expand Down
1 change: 1 addition & 0 deletions substrate/client/cli/src/commands/chain_info_cmd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ impl ChainInfoCmd {
state_pruning: config.state_pruning.clone(),
source: config.database.clone(),
blocks_pruning: config.blocks_pruning,
pruning_filters: Default::default(),
metrics_registry: None,
};
let backend = sc_service::new_db_backend::<B>(db_config)?;
Expand Down
1 change: 1 addition & 0 deletions substrate/client/consensus/grandpa/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ rand = { workspace = true, default-features = true }
sc-block-builder = { workspace = true, default-features = true }
sc-chain-spec = { workspace = true, default-features = true }
sc-client-api = { workspace = true, default-features = true }
sc-client-db = { workspace = true, default-features = true }
sc-consensus = { workspace = true, default-features = true }
sc-network = { workspace = true, default-features = true }
sc-network-common = { workspace = true, default-features = true }
Expand Down
16 changes: 15 additions & 1 deletion substrate/client/consensus/grandpa/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -148,10 +148,24 @@ use until_imported::UntilGlobalMessageBlocksImported;
// Re-export these two because it's just so damn convenient.
pub use sp_consensus_grandpa::{
AuthorityId, AuthorityPair, CatchUp, Commit, CompactCommit, GrandpaApi, Message, Precommit,
Prevote, PrimaryPropose, ScheduledChange, SignedMessage,
Prevote, PrimaryPropose, ScheduledChange, SignedMessage, GRANDPA_ENGINE_ID,
};
use std::marker::PhantomData;

/// Filter that preserves blocks with GRANDPA justifications during pruning.
///
/// Use this filter with `DatabaseSettings::pruning_filters` to ensure that blocks
/// required for warp sync are not pruned. GRANDPA justifications at authority set change
/// boundaries are needed to construct warp sync proofs.
#[derive(Debug, Clone)]
pub struct GrandpaPruningFilter;

impl sc_client_db::PruningFilter for GrandpaPruningFilter {
fn should_retain(&self, justifications: &sp_runtime::Justifications) -> bool {
justifications.get(GRANDPA_ENGINE_ID).is_some()
}
}

#[cfg(test)]
mod tests;

Expand Down
1 change: 1 addition & 0 deletions substrate/client/db/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ criterion = { workspace = true, default-features = true }
kitchensink-runtime = { workspace = true }
kvdb-rocksdb = { workspace = true }
rand = { workspace = true, default-features = true }
sp-database = { workspace = true, default-features = true, features = ["rocksdb"] }
sp-tracing = { workspace = true, default-features = true }
substrate-test-runtime-client = { workspace = true }
tempfile = { workspace = true }
Expand Down
1 change: 1 addition & 0 deletions substrate/client/db/benches/state_access.rs
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ fn create_backend(config: BenchmarkConfig, temp_dir: &TempDir) -> Backend<Block>
state_pruning: Some(PruningMode::ArchiveAll),
source: DatabaseSource::ParityDb { path },
blocks_pruning: BlocksPruning::KeepAll,
pruning_filters: Default::default(),
metrics_registry: None,
};

Expand Down
Loading
Loading