diff --git a/Cargo.lock b/Cargo.lock index 7cec4939eb92d..63c006ab12155 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -20140,6 +20140,7 @@ dependencies = [ "sc-block-builder", "sc-chain-spec 28.0.0", "sc-client-api 28.0.0", + "sc-client-db", "sc-consensus", "sc-network 0.34.0", "sc-network-common 0.33.0", diff --git a/cumulus/polkadot-omni-node/lib/src/common/spec.rs b/cumulus/polkadot-omni-node/lib/src/common/spec.rs index fd2ae17a3dbd3..f6d9dc43db95b 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/spec.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/spec.rs @@ -248,6 +248,7 @@ pub(crate) trait BaseNodeSpec { telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, true, + Default::default(), )?; let client = Arc::new(client); diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 2ac5b21418125..18f3896772ba5 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -196,6 +196,7 @@ pub fn new_partial( None, executor, enable_import_proof_record, + Default::default(), )?; let client = Arc::new(client); diff --git a/polkadot/node/service/src/builder/partial.rs b/polkadot/node/service/src/builder/partial.rs index 1bca72e210716..5008257c0d0f2 100644 --- a/polkadot/node/service/src/builder/partial.rs +++ b/polkadot/node/service/src/builder/partial.rs @@ -19,11 +19,13 @@ #![cfg(feature = "full-node")] use crate::{ - fake_runtime_api::RuntimeApi, grandpa_support, relay_chain_selection, Error, FullBackend, - FullClient, IdentifyVariant, GRANDPA_JUSTIFICATION_PERIOD, + grandpa_support, relay_chain_selection, Error, FullBackend, FullClient, IdentifyVariant, + GRANDPA_JUSTIFICATION_PERIOD, }; use polkadot_primitives::Block; -use sc_consensus_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider; +use sc_consensus_grandpa::{ + FinalityProofProvider as GrandpaFinalityProofProvider, GrandpaPruningFilter, +}; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; use sc_service::{Configuration, Error as SubstrateServiceError, KeystoreContainer, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle}; @@ -120,12 +122,14 @@ pub(crate) fn new_partial_basics( .with_runtime_cache_size(config.executor.runtime_cache_size) .build(); - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( - &config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - )?; + // Use GrandpaPruningFilter to preserve blocks with GRANDPA justifications during + // pruning. This is required for warp sync to work on pruned nodes. + let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts( + &config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + vec![Arc::new(GrandpaPruningFilter)], + )?; let client = Arc::new(client); let telemetry = telemetry.map(|(worker, telemetry)| { diff --git a/prdoc/pr_10893.prdoc b/prdoc/pr_10893.prdoc new file mode 100644 index 0000000000000..987974f223e0b --- /dev/null +++ b/prdoc/pr_10893.prdoc @@ -0,0 +1,27 @@ +title: Do not prune blocks with GrandPa justifications +doc: +- audience: + - Node Dev + - Node Operator + description: |- + Warp sync requires GRANDPA justifications at authority set change boundaries to construct proofs. When block pruning is enabled, all block bodies are removed regardless of whether they contain important justifications. The pruned nodes can then not be used to fetch warp proofs. + We now have the capability to filter which blocks can be safely pruned. For parachain nodes, everything can be pruned, solochain nodes using grandpa keep blocks with justifications. This ensures warp sync ability within the network. +crates: +- name: polkadot-service + bump: major +- name: sc-cli + bump: major +- name: sc-consensus-beefy + bump: major +- name: sc-consensus-grandpa + bump: major +- name: sc-client-db + bump: major +- name: sc-service + bump: major +- name: frame-benchmarking-cli + bump: major +- name: polkadot-omni-node-lib + bump: major +- name: staging-node-inspect + bump: major diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index f788fda4e282a..dbe38a1391680 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -227,6 +227,7 @@ pub fn new_partial( config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, + vec![Arc::new(grandpa::GrandpaPruningFilter)], )?; let client = Arc::new(client); diff --git a/substrate/bin/node/inspect/src/command.rs b/substrate/bin/node/inspect/src/command.rs index b9e5e55be8ef3..25c1411efa884 100644 --- a/substrate/bin/node/inspect/src/command.rs +++ b/substrate/bin/node/inspect/src/command.rs @@ -37,7 +37,8 @@ impl InspectCmd { RA: Send + Sync + 'static, { let executor = sc_service::new_wasm_executor::(&config.executor); - let client = sc_service::new_full_client::(&config, None, executor)?; + let client = + sc_service::new_full_client::(&config, None, executor, Default::default())?; let inspect = Inspector::::new(client); match &self.command { diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index f348fcc1c181f..476a7000710c1 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -391,6 +391,7 @@ impl BenchDb { state_pruning: Some(PruningMode::ArchiveAll), source: database_type.into_settings(dir.into()), blocks_pruning: sc_client_db::BlocksPruning::KeepAll, + pruning_filters: Default::default(), metrics_registry: None, }; let task_executor = TaskExecutor::new(); diff --git a/substrate/client/cli/src/commands/chain_info_cmd.rs b/substrate/client/cli/src/commands/chain_info_cmd.rs index ab288f4af15f4..36ed24f43dc7f 100644 --- a/substrate/client/cli/src/commands/chain_info_cmd.rs +++ b/substrate/client/cli/src/commands/chain_info_cmd.rs @@ -77,6 +77,7 @@ impl ChainInfoCmd { state_pruning: config.state_pruning.clone(), source: config.database.clone(), blocks_pruning: config.blocks_pruning, + pruning_filters: Default::default(), metrics_registry: None, }; let backend = sc_service::new_db_backend::(db_config)?; diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml index 5ff5b609873c5..d340b49c149ad 100644 --- a/substrate/client/consensus/grandpa/Cargo.toml +++ b/substrate/client/consensus/grandpa/Cargo.toml @@ -33,6 +33,7 @@ rand = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } sc-chain-spec = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } +sc-client-db = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/grandpa/src/lib.rs b/substrate/client/consensus/grandpa/src/lib.rs index 48f9248f3c562..25298a05e5891 100644 --- a/substrate/client/consensus/grandpa/src/lib.rs +++ b/substrate/client/consensus/grandpa/src/lib.rs @@ -148,10 +148,24 @@ use until_imported::UntilGlobalMessageBlocksImported; // Re-export these two because it's just so damn convenient. pub use sp_consensus_grandpa::{ AuthorityId, AuthorityPair, CatchUp, Commit, CompactCommit, GrandpaApi, Message, Precommit, - Prevote, PrimaryPropose, ScheduledChange, SignedMessage, + Prevote, PrimaryPropose, ScheduledChange, SignedMessage, GRANDPA_ENGINE_ID, }; use std::marker::PhantomData; +/// Filter that preserves blocks with GRANDPA justifications during pruning. +/// +/// Use this filter with `DatabaseSettings::pruning_filters` to ensure that blocks +/// required for warp sync are not pruned. GRANDPA justifications at authority set change +/// boundaries are needed to construct warp sync proofs. +#[derive(Debug, Clone)] +pub struct GrandpaPruningFilter; + +impl sc_client_db::PruningFilter for GrandpaPruningFilter { + fn should_retain(&self, justifications: &sp_runtime::Justifications) -> bool { + justifications.get(GRANDPA_ENGINE_ID).is_some() + } +} + #[cfg(test)] mod tests; diff --git a/substrate/client/db/Cargo.toml b/substrate/client/db/Cargo.toml index 8d898ea8bb4ff..edb61c38b36fc 100644 --- a/substrate/client/db/Cargo.toml +++ b/substrate/client/db/Cargo.toml @@ -51,6 +51,7 @@ criterion = { workspace = true, default-features = true } kitchensink-runtime = { workspace = true } kvdb-rocksdb = { workspace = true } rand = { workspace = true, default-features = true } +sp-database = { workspace = true, default-features = true, features = ["rocksdb"] } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } tempfile = { workspace = true } diff --git a/substrate/client/db/benches/state_access.rs b/substrate/client/db/benches/state_access.rs index fd0bce581854b..7ea5b17a321ff 100644 --- a/substrate/client/db/benches/state_access.rs +++ b/substrate/client/db/benches/state_access.rs @@ -125,6 +125,7 @@ fn create_backend(config: BenchmarkConfig, temp_dir: &TempDir) -> Backend state_pruning: Some(PruningMode::ArchiveAll), source: DatabaseSource::ParityDb { path }, blocks_pruning: BlocksPruning::KeepAll, + pruning_filters: Default::default(), metrics_registry: None, }; diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 15ba31d3fb1f9..fbf4c482a114e 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -101,6 +101,26 @@ pub use sp_database::Database; pub use bench::BenchmarkingState; +/// Filter to determine if a block should be excluded from pruning. +/// +/// Note: This filter only affects **block body** (and future header) pruning. +/// It does **not** affect state pruning, which is configured separately. +pub trait PruningFilter: Send + Sync { + /// Check if a block with the given justifications should be preserved. + /// + /// Returns `true` to preserve the block, `false` to allow pruning. + fn should_retain(&self, justifications: &Justifications) -> bool; +} + +impl PruningFilter for F +where + F: Fn(&Justifications) -> bool + Send + Sync, +{ + fn should_retain(&self, justifications: &Justifications) -> bool { + (self)(justifications) + } +} + const CACHE_HEADERS: usize = 8; /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. @@ -313,7 +333,12 @@ pub struct DatabaseSettings { /// /// NOTE: only finalized blocks are subject for removal! pub blocks_pruning: BlocksPruning, - + /// Filters to exclude blocks from pruning. + /// + /// If any filter returns `true` for a block's justifications, the block body + /// (and in the future, the header) will be preserved even when it falls + /// outside the pruning window. Does not affect state pruning. + pub pruning_filters: Vec>, /// Prometheus metrics registry. pub metrics_registry: Option, } @@ -1130,6 +1155,7 @@ pub struct Backend { state_usage: Arc, genesis_state: RwLock>>>, shared_trie_cache: Option>>, + pruning_filters: Vec>, } impl Backend { @@ -1168,11 +1194,39 @@ impl Backend { Self::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), canonicalization_delay) } + /// Create new memory-backed client backend for tests with custom pruning filters. + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_test_with_pruning_filters( + blocks_pruning: u32, + canonicalization_delay: u64, + pruning_filters: Vec>, + ) -> Self { + Self::new_test_with_tx_storage_and_filters( + BlocksPruning::Some(blocks_pruning), + canonicalization_delay, + pruning_filters, + ) + } + /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] pub fn new_test_with_tx_storage( blocks_pruning: BlocksPruning, canonicalization_delay: u64, + ) -> Self { + Self::new_test_with_tx_storage_and_filters( + blocks_pruning, + canonicalization_delay, + Default::default(), + ) + } + + /// Create new memory-backed client backend for tests with custom pruning filters. + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_test_with_tx_storage_and_filters( + blocks_pruning: BlocksPruning, + canonicalization_delay: u64, + pruning_filters: Vec>, ) -> Self { let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); let db = sp_database::as_database(db); @@ -1186,6 +1240,7 @@ impl Backend { state_pruning: Some(state_pruning), source: DatabaseSource::Custom { db, require_create_flag: true }, blocks_pruning, + pruning_filters, metrics_registry: None, }; @@ -1278,6 +1333,7 @@ impl Backend { blocks_pruning: config.blocks_pruning, genesis_state: RwLock::new(None), shared_trie_cache, + pruning_filters: config.pruning_filters.clone(), }; // Older DB versions have no last state key. Check if the state is available and set it. @@ -1965,6 +2021,30 @@ impl Backend { // Before we prune a block, check if it is pinned if let Some(hash) = self.blockchain.hash(number)? { + // Check if any pruning filter wants to preserve this block. + // We need to check both the current transaction justifications (not yet in DB) + // and the DB itself (for justifications from previous transactions). + if !self.pruning_filters.is_empty() { + let justifications = match current_transaction_justifications.get(&hash) { + Some(j) => Some(Justifications::from(j.clone())), + None => self.blockchain.justifications(hash)?, + }; + + let should_retain = justifications + .map(|j| self.pruning_filters.iter().any(|f| f.should_retain(&j))) + .unwrap_or(false); + + // We can just return here, pinning can be ignored since the block will + // remain in the DB. + if should_retain { + debug!( + target: "db", + "Preserving block #{number} ({hash}) due to keep predicate match" + ); + return Ok(()); + } + } + self.blockchain.insert_persisted_body_if_pinned(hash)?; // If the block was finalized in this transaction, it will not be in the db @@ -2880,6 +2960,7 @@ pub(crate) mod tests { state_pruning: Some(PruningMode::blocks_pruning(1)), source: DatabaseSource::Custom { db: backing, require_create_flag: false }, blocks_pruning: BlocksPruning::KeepFinalized, + pruning_filters: Default::default(), metrics_registry: None, }, 0, @@ -5040,4 +5121,189 @@ pub(crate) mod tests { backend.unpin_block(fork_hash_3); assert!(bc.body(fork_hash_3).unwrap().is_none()); } + + #[test] + fn prune_blocks_with_empty_predicates_prunes_all() { + // Test backward compatibility: empty predicates means all blocks are pruned + let backend = Backend::::new_test_with_tx_storage_and_filters( + BlocksPruning::Some(2), + 0, + vec![], // Empty predicates + ); + + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + + // Create 5 blocks + for i in 0..5 { + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![UncheckedXt::new_transaction(i.into(), ())], + None, + ) + .unwrap(); + blocks.push(hash); + prev_hash = hash; + } + + // Justification - but no predicate to preserve it + let justification = (CONS0_ENGINE_ID, vec![1, 2, 3]); + + // Finalize blocks, adding justification to block 1 + { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); + op.mark_finalized(blocks[1], Some(justification.clone())).unwrap(); + op.mark_finalized(blocks[2], None).unwrap(); + op.mark_finalized(blocks[3], None).unwrap(); + op.mark_finalized(blocks[4], None).unwrap(); + backend.commit_operation(op).unwrap(); + } + + let bc = backend.blockchain(); + + // All blocks outside pruning window should be pruned, even with justification + assert_eq!(None, bc.body(blocks[0]).unwrap()); + assert_eq!(None, bc.body(blocks[1]).unwrap()); // Has justification but no predicate + assert_eq!(None, bc.body(blocks[2]).unwrap()); + + // Blocks 3 and 4 are within the pruning window + assert!(bc.body(blocks[3]).unwrap().is_some()); + assert!(bc.body(blocks[4]).unwrap().is_some()); + } + + #[test] + fn prune_blocks_multiple_filters_or_logic() { + // Test that multiple filters use OR logic: if ANY filter matches, block is kept + let backend = Backend::::new_test_with_tx_storage_and_filters( + BlocksPruning::Some(2), + 0, + vec![ + Arc::new(|j: &Justifications| j.get(CONS0_ENGINE_ID).is_some()), + Arc::new(|j: &Justifications| j.get(CONS1_ENGINE_ID).is_some()), + ], + ); + + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + + // Create 7 blocks + for i in 0..7 { + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![UncheckedXt::new_transaction(i.into(), ())], + None, + ) + .unwrap(); + blocks.push(hash); + prev_hash = hash; + } + + let cons0_justification = (CONS0_ENGINE_ID, vec![1, 2, 3]); + let cons1_justification = (CONS1_ENGINE_ID, vec![4, 5, 6]); + + // Finalize blocks with different justification patterns + { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, blocks[6]).unwrap(); + // Block 1: CONS0 only - should be preserved + op.mark_finalized(blocks[1], Some(cons0_justification.clone())).unwrap(); + // Block 2: CONS1 only - should be preserved + op.mark_finalized(blocks[2], Some(cons1_justification.clone())).unwrap(); + // Block 3: No justification - should be pruned + op.mark_finalized(blocks[3], None).unwrap(); + // Block 4: Random/unknown engine ID - should be pruned + op.mark_finalized(blocks[4], Some(([9, 9, 9, 9], vec![7, 8, 9]))).unwrap(); + op.mark_finalized(blocks[5], None).unwrap(); + op.mark_finalized(blocks[6], None).unwrap(); + backend.commit_operation(op).unwrap(); + } + + let bc = backend.blockchain(); + + // Block 0 should be pruned (outside window, no justification) + assert_eq!(None, bc.body(blocks[0]).unwrap()); + + // Block 1 should be preserved (has CONS0 justification) + assert!(bc.body(blocks[1]).unwrap().is_some()); + + // Block 2 should be preserved (has CONS1 justification) + assert!(bc.body(blocks[2]).unwrap().is_some()); + + // Block 3 should be pruned (no justification) + assert_eq!(None, bc.body(blocks[3]).unwrap()); + + // Block 4 should be pruned (unknown engine ID) + assert_eq!(None, bc.body(blocks[4]).unwrap()); + + // Blocks 5 and 6 are within the pruning window + assert!(bc.body(blocks[5]).unwrap().is_some()); + assert!(bc.body(blocks[6]).unwrap().is_some()); + } + + #[test] + fn prune_blocks_filter_only_matches_specific_engine() { + // Test that a filter for one engine ID does NOT preserve blocks with a different engine ID + let backend = Backend::::new_test_with_tx_storage_and_filters( + BlocksPruning::Some(2), + 0, + vec![Arc::new(|j: &Justifications| j.get(CONS0_ENGINE_ID).is_some())], + ); + + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + + // Create 5 blocks + for i in 0..5 { + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![UncheckedXt::new_transaction(i.into(), ())], + None, + ) + .unwrap(); + blocks.push(hash); + prev_hash = hash; + } + + let cons1_justification = (CONS1_ENGINE_ID, vec![4, 5, 6]); + + // Finalize blocks, adding CONS1 justification to block 1 + { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); + // Block 1 gets CONS1 justification - should NOT be preserved by CONS0 filter + op.mark_finalized(blocks[1], Some(cons1_justification.clone())).unwrap(); + op.mark_finalized(blocks[2], None).unwrap(); + op.mark_finalized(blocks[3], None).unwrap(); + op.mark_finalized(blocks[4], None).unwrap(); + backend.commit_operation(op).unwrap(); + } + + let bc = backend.blockchain(); + + // Block 0 should be pruned + assert_eq!(None, bc.body(blocks[0]).unwrap()); + + // Block 1 should also be pruned (CONS1 justification, but only CONS0 filter) + assert_eq!(None, bc.body(blocks[1]).unwrap()); + + // Block 2 should be pruned + assert_eq!(None, bc.body(blocks[2]).unwrap()); + + // Blocks 3 and 4 are within the pruning window + assert!(bc.body(blocks[3]).unwrap().is_some()); + assert!(bc.body(blocks[4]).unwrap().is_some()); + } } diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 7d04388b0873b..9fc5316deed7a 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -144,26 +144,33 @@ pub fn new_full_client( config: &Configuration, telemetry: Option, executor: TExec, + pruning_filters: Vec>, ) -> Result, Error> where TBl: BlockT, TExec: CodeExecutor + RuntimeVersionOf + Clone, { - new_full_parts(config, telemetry, executor).map(|parts| parts.0) + new_full_parts(config, telemetry, executor, pruning_filters).map(|parts| parts.0) } /// Create the initial parts of a full node with the default genesis block builder. +/// +/// The `pruning_filters` parameter allows configuring which blocks should be preserved +/// during pruning. pub fn new_full_parts_record_import( config: &Configuration, telemetry: Option, executor: TExec, enable_import_proof_recording: bool, + pruning_filters: Vec>, ) -> Result, Error> where TBl: BlockT, TExec: CodeExecutor + RuntimeVersionOf + Clone, { - let backend = new_db_backend(config.db_config())?; + let mut db_config = config.db_config(); + db_config.pruning_filters = pruning_filters; + let backend = new_db_backend(db_config)?; let genesis_block_builder = GenesisBlockBuilder::new( config.chain_spec.as_storage_builder(), @@ -181,17 +188,22 @@ where enable_import_proof_recording, ) } + /// Create the initial parts of a full node with the default genesis block builder. +/// +/// The `pruning_filters` parameter allows configuring which blocks should be preserved +/// during pruning. pub fn new_full_parts( config: &Configuration, telemetry: Option, executor: TExec, + pruning_filters: Vec>, ) -> Result, Error> where TBl: BlockT, TExec: CodeExecutor + RuntimeVersionOf + Clone, { - new_full_parts_record_import(config, telemetry, executor, false) + new_full_parts_record_import(config, telemetry, executor, false, pruning_filters) } /// Create the initial parts of a full node. @@ -375,7 +387,10 @@ pub fn new_wasm_executor(config: &ExecutorConfiguration) -> Wa .build() } -/// Create an instance of default DB-backend backend. +/// Create an instance of the default DB-backend. +/// +/// Pruning filters can be configured via `settings.pruning_filters`. +/// If any filter returns `true` for a block's justifications, the block will not be pruned. pub fn new_db_backend( settings: DatabaseSettings, ) -> Result>, sp_blockchain::Error> diff --git a/substrate/client/service/src/config.rs b/substrate/client/service/src/config.rs index 2effa4782e122..85345756312ad 100644 --- a/substrate/client/service/src/config.rs +++ b/substrate/client/service/src/config.rs @@ -235,6 +235,7 @@ impl Configuration { state_pruning: self.state_pruning.clone(), source: self.database.clone(), blocks_pruning: self.blocks_pruning, + pruning_filters: Default::default(), metrics_registry: self.prometheus_registry().cloned(), } } diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index 512b6214561d1..f24dd2dea8f2b 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -83,6 +83,7 @@ pub use sc_chain_spec::{ ChainSpec, ChainType, Extension as ChainSpecExtension, GenericChainSpec, NoExtension, Properties, }; +pub use sc_client_db::PruningFilter; use crate::config::RpcConfiguration; use prometheus_endpoint::Registry; diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index 1fe2fc4e3f97e..f234df13920ec 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -1489,6 +1489,7 @@ fn doesnt_import_blocks_that_revert_finality() { trie_cache_maximum_size: Some(1 << 20), state_pruning: Some(PruningMode::ArchiveAll), blocks_pruning: BlocksPruning::KeepAll, + pruning_filters: Default::default(), source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, metrics_registry: None, }, @@ -1770,6 +1771,7 @@ fn returns_status_for_pruned_blocks() { trie_cache_maximum_size: Some(1 << 20), state_pruning: Some(PruningMode::blocks_pruning(1)), blocks_pruning: BlocksPruning::KeepFinalized, + pruning_filters: Default::default(), source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, metrics_registry: None, }, diff --git a/substrate/frame/revive/dev-node/node/src/service.rs b/substrate/frame/revive/dev-node/node/src/service.rs index 2aee09bb432a7..1eeb511515128 100644 --- a/substrate/frame/revive/dev-node/node/src/service.rs +++ b/substrate/frame/revive/dev-node/node/src/service.rs @@ -65,6 +65,7 @@ pub fn new_partial(config: &Configuration) -> Result { config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, + Default::default(), )?; let client = Arc::new(client); diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs index e1e2ceb419699..edfc36423d87a 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs @@ -539,6 +539,7 @@ impl OverheadCmd { trie_cache_maximum_size: self.trie_cache_maximum_size()?, state_pruning: None, blocks_pruning: BlocksPruning::KeepAll, + pruning_filters: Default::default(), source: database_source, metrics_registry: None, })?; diff --git a/templates/minimal/node/src/service.rs b/templates/minimal/node/src/service.rs index 5c35c9294d7bd..da8f225054b28 100644 --- a/templates/minimal/node/src/service.rs +++ b/templates/minimal/node/src/service.rs @@ -67,6 +67,7 @@ pub fn new_partial(config: &Configuration) -> Result { config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, + Default::default(), )?; let client = Arc::new(client); diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index 01c3cb3c5fff2..a935bac8aaebb 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -100,6 +100,7 @@ pub fn new_partial(config: &Configuration) -> Result telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, true, + Default::default(), )?; let client = Arc::new(client); diff --git a/templates/solochain/node/src/service.rs b/templates/solochain/node/src/service.rs index 0f223e3c66ead..331a1409c5fa2 100644 --- a/templates/solochain/node/src/service.rs +++ b/templates/solochain/node/src/service.rs @@ -3,7 +3,7 @@ use futures::FutureExt; use sc_client_api::{Backend, BlockBackend}; use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; -use sc_consensus_grandpa::SharedVoterState; +use sc_consensus_grandpa::{GrandpaPruningFilter, SharedVoterState}; use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncConfig}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; @@ -49,11 +49,13 @@ pub fn new_partial(config: &Configuration) -> Result { .transpose()?; let executor = sc_service::new_wasm_executor::(&config.executor); + let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, + vec![Arc::new(GrandpaPruningFilter)], )?; let client = Arc::new(client);