Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions crates/prune/prune/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -51,3 +51,7 @@ reth-testing-utils.workspace = true
reth-tracing.workspace = true

assert_matches.workspace = true

[features]
default = []
rocksdb = ["reth-provider/rocksdb"]
69 changes: 69 additions & 0 deletions crates/prune/prune/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ use reth_config::PruneConfig;
use reth_db_api::{table::Value, transaction::DbTxMut};
use reth_exex_types::FinishedExExHeight;
use reth_primitives_traits::NodePrimitives;
#[cfg(all(unix, feature = "rocksdb"))]
use reth_provider::RocksDBProviderFactory;
use reth_provider::{
providers::StaticFileProvider, BlockReader, ChainStateBlockReader, DBProvider,
DatabaseProviderFactory, NodePrimitivesProvider, PruneCheckpointReader, PruneCheckpointWriter,
Expand Down Expand Up @@ -74,6 +76,7 @@ impl PrunerBuilder {
}

/// Builds a [Pruner] from the current configuration with the given provider factory.
#[cfg(all(unix, feature = "rocksdb"))]
pub fn build_with_provider_factory<PF>(self, provider_factory: PF) -> Pruner<PF::ProviderRW, PF>
where
PF: DatabaseProviderFactory<
Expand All @@ -85,6 +88,7 @@ impl PrunerBuilder {
+ StageCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ RocksDBProviderFactory
+ StaticFileProviderFactory<
Primitives: NodePrimitives<SignedTx: Value, Receipt: Value, BlockHeader: Value>,
>,
Expand All @@ -105,7 +109,72 @@ impl PrunerBuilder {
)
}

/// Builds a [Pruner] from the current configuration with the given provider factory.
#[cfg(not(all(unix, feature = "rocksdb")))]
pub fn build_with_provider_factory<PF>(self, provider_factory: PF) -> Pruner<PF::ProviderRW, PF>
where
PF: DatabaseProviderFactory<
ProviderRW: PruneCheckpointWriter
+ PruneCheckpointReader
+ BlockReader<Transaction: Encodable2718>
+ ChainStateBlockReader
+ StorageSettingsCache
+ StageCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ StaticFileProviderFactory<
Primitives: NodePrimitives<SignedTx: Value, Receipt: Value, BlockHeader: Value>,
>,
> + StaticFileProviderFactory<
Primitives = <PF::ProviderRW as NodePrimitivesProvider>::Primitives,
>,
{
let segments =
SegmentSet::from_components(provider_factory.static_file_provider(), self.segments);

Pruner::new_with_factory(
provider_factory,
segments.into_vec(),
self.block_interval,
self.delete_limit,
self.timeout,
self.finished_exex_height,
)
}

/// Builds a [Pruner] from the current configuration with the given static file provider.
#[cfg(all(unix, feature = "rocksdb"))]
pub fn build<Provider>(
self,
static_file_provider: StaticFileProvider<Provider::Primitives>,
) -> Pruner<Provider, ()>
where
Provider: StaticFileProviderFactory<
Primitives: NodePrimitives<SignedTx: Value, Receipt: Value, BlockHeader: Value>,
> + DBProvider<Tx: DbTxMut>
+ BlockReader<Transaction: Encodable2718>
+ ChainStateBlockReader
+ PruneCheckpointWriter
+ PruneCheckpointReader
+ StorageSettingsCache
+ StageCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ RocksDBProviderFactory,
{
let segments = SegmentSet::<Provider>::from_components(static_file_provider, self.segments);

Pruner::new(
segments.into_vec(),
self.block_interval,
self.delete_limit,
self.timeout,
self.finished_exex_height,
)
}

/// Builds a [Pruner] from the current configuration with the given static file provider.
#[cfg(not(all(unix, feature = "rocksdb")))]
pub fn build<Provider>(
self,
static_file_provider: StaticFileProvider<Provider::Primitives>,
Expand Down
55 changes: 55 additions & 0 deletions crates/prune/prune/src/segments/set.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ use crate::segments::{
use alloy_eips::eip2718::Encodable2718;
use reth_db_api::{table::Value, transaction::DbTxMut};
use reth_primitives_traits::NodePrimitives;
#[cfg(all(unix, feature = "rocksdb"))]
use reth_provider::RocksDBProviderFactory;
use reth_provider::{
providers::StaticFileProvider, BlockReader, ChainStateBlockReader, DBProvider,
PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, StorageSettingsCache,
Expand Down Expand Up @@ -44,6 +46,59 @@ impl<Provider> SegmentSet<Provider> {
}
}

#[cfg(all(unix, feature = "rocksdb"))]
impl<Provider> SegmentSet<Provider>
where
Provider: StaticFileProviderFactory<
Primitives: NodePrimitives<SignedTx: Value, Receipt: Value, BlockHeader: Value>,
> + DBProvider<Tx: DbTxMut>
+ PruneCheckpointWriter
+ PruneCheckpointReader
+ BlockReader<Transaction: Encodable2718>
+ ChainStateBlockReader
+ StorageSettingsCache
+ ChangeSetReader
+ StorageChangeSetReader
+ RocksDBProviderFactory,
{
/// Creates a [`SegmentSet`] from an existing components, such as [`StaticFileProvider`] and
/// [`PruneModes`].
pub fn from_components(
_static_file_provider: StaticFileProvider<Provider::Primitives>,
prune_modes: PruneModes,
) -> Self {
let PruneModes {
sender_recovery,
transaction_lookup,
receipts,
account_history,
storage_history,
bodies_history,
receipts_log_filter,
} = prune_modes;

Self::default()
// Bodies - run first since file deletion is fast
.segment_opt(bodies_history.map(Bodies::new))
// Account history
.segment_opt(account_history.map(AccountHistory::new))
// Storage history
.segment_opt(storage_history.map(StorageHistory::new))
// User receipts
.segment_opt(receipts.map(UserReceipts::new))
// Receipts by logs
.segment_opt(
(!receipts_log_filter.is_empty())
.then(|| ReceiptsByLogs::new(receipts_log_filter.clone())),
)
// Transaction lookup
.segment_opt(transaction_lookup.map(TransactionLookup::new))
// Sender recovery
.segment_opt(sender_recovery.map(SenderRecovery::new))
}
}

#[cfg(not(all(unix, feature = "rocksdb")))]
impl<Provider> SegmentSet<Provider>
where
Provider: StaticFileProviderFactory<
Expand Down
Loading
Loading