Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

64 changes: 64 additions & 0 deletions crates/chain-state/src/execution_stats.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
//! Execution timing statistics for slow block logging.
//!
//! This module provides types for collecting and passing execution timing statistics
//! through the block processing pipeline, enabling unified slow block logging after
//! database commit.

/// Statistics collected during block execution for cross-client performance analysis.
///
/// These statistics are populated during block validation and carried through to
/// persistence, where they are used to emit a single unified log entry that includes
/// complete timing information (including commit time).
#[derive(Debug, Clone, Default)]
pub struct ExecutionTimingStats {
/// Block number
pub block_number: u64,
/// Block hash as formatted string (0x-prefixed hex)
pub block_hash: String,
/// Total gas used by the block
pub gas_used: u64,
/// Number of transactions in the block
pub tx_count: usize,
/// Time spent executing transactions (milliseconds)
pub execution_ms: f64,
/// Time spent reading state during execution (milliseconds)
pub state_read_ms: f64,
/// Time spent computing state root hash (milliseconds)
pub state_hash_ms: f64,
/// Number of accounts read during execution
pub accounts_read: usize,
/// Number of storage slots read (SLOAD operations)
pub storage_read: usize,
/// Number of code reads (EXTCODE* operations)
pub code_read: usize,
/// Total bytes of code read
pub code_bytes_read: usize,
/// Number of accounts changed (balance/nonce updates)
pub accounts_changed: usize,
/// Number of accounts deleted (SELFDESTRUCT)
pub accounts_deleted: usize,
/// Number of storage slots changed (SSTORE operations)
pub storage_slots_changed: usize,
/// Number of storage slots deleted (set to zero)
pub storage_slots_deleted: usize,
/// Number of bytecodes created/changed (contract deployments)
pub bytecodes_changed: usize,
/// Total bytes of code written
pub code_bytes_written: usize,
/// Number of EIP-7702 delegations set
pub eip7702_delegations_set: usize,
/// Number of EIP-7702 delegations cleared
pub eip7702_delegations_cleared: usize,
/// Account cache hits
pub account_cache_hits: u64,
/// Account cache misses
pub account_cache_misses: u64,
/// Storage cache hits
pub storage_cache_hits: u64,
/// Storage cache misses
pub storage_cache_misses: u64,
/// Code cache hits
pub code_cache_hits: u64,
/// Code cache misses
pub code_cache_misses: u64,
}
37 changes: 32 additions & 5 deletions crates/chain-state/src/in_memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@

use crate::{
CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications,
ChainInfoTracker, ComputedTrieData, DeferredTrieData, MemoryOverlayStateProvider,
ChainInfoTracker, ComputedTrieData, DeferredTrieData, ExecutionTimingStats,
MemoryOverlayStateProvider,
};
use alloy_consensus::{transaction::TransactionMeta, BlockHeader};
use alloy_eips::{BlockHashOrNumber, BlockNumHash};
Expand Down Expand Up @@ -328,7 +329,7 @@ impl<N: NodePrimitives> CanonicalInMemoryState<N> {
{
if self.inner.in_memory_state.blocks.read().get(&persisted_num_hash.hash).is_none() {
// do nothing
return
return;
}
}

Expand Down Expand Up @@ -567,7 +568,7 @@ impl<N: NodePrimitives> CanonicalInMemoryState<N> {
if let Some(tx) =
block_state.block_ref().recovered_block().body().transaction_by_hash(&hash)
{
return Some(tx.clone())
return Some(tx.clone());
}
}
None
Expand Down Expand Up @@ -760,6 +761,12 @@ pub struct ExecutedBlock<N: NodePrimitives = EthPrimitives> {
/// This allows deferring the computation of the trie data which can be expensive.
/// The data can be populated asynchronously after the block was validated.
pub trie_data: DeferredTrieData,
/// Optional timing statistics for slow block logging.
///
/// When slow block logging is enabled, these statistics are populated during
/// block validation and used by the persistence service to emit a unified log
/// after database commit.
pub timing_stats: Option<ExecutionTimingStats>,
}

impl<N: NodePrimitives> Default for ExecutedBlock<N> {
Expand All @@ -776,6 +783,7 @@ impl<N: NodePrimitives> Default for ExecutedBlock<N> {
state: Default::default(),
}),
trie_data: DeferredTrieData::ready(ComputedTrieData::default()),
timing_stats: None,
}
}
}
Expand All @@ -798,7 +806,12 @@ impl<N: NodePrimitives> ExecutedBlock<N> {
execution_output: Arc<BlockExecutionOutput<N::Receipt>>,
trie_data: ComputedTrieData,
) -> Self {
Self { recovered_block, execution_output, trie_data: DeferredTrieData::ready(trie_data) }
Self {
recovered_block,
execution_output,
trie_data: DeferredTrieData::ready(trie_data),
timing_stats: None,
}
}

/// Create a new [`ExecutedBlock`] with deferred trie data.
Expand All @@ -820,7 +833,7 @@ impl<N: NodePrimitives> ExecutedBlock<N> {
execution_output: Arc<BlockExecutionOutput<N::Receipt>>,
trie_data: DeferredTrieData,
) -> Self {
Self { recovered_block, execution_output, trie_data }
Self { recovered_block, execution_output, trie_data, timing_stats: None }
}

/// Returns a reference to an inner [`SealedBlock`]
Expand Down Expand Up @@ -878,6 +891,20 @@ impl<N: NodePrimitives> ExecutedBlock<N> {
self.trie_data().trie_updates
}

/// Sets the timing statistics for slow block logging.
///
/// This should be called after block validation to attach timing statistics
/// that will be used by persistence to emit a unified slow block log.
pub fn with_timing_stats(mut self, timing_stats: ExecutionTimingStats) -> Self {
self.timing_stats = Some(timing_stats);
self
}

/// Returns a reference to the timing statistics, if set.
pub const fn timing_stats(&self) -> Option<&ExecutionTimingStats> {
self.timing_stats.as_ref()
}

/// Returns the trie input anchored to the persisted ancestor.
///
/// May compute trie data synchronously if the deferred task hasn't completed.
Expand Down
3 changes: 3 additions & 0 deletions crates/chain-state/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg))]

mod execution_stats;
pub use execution_stats::ExecutionTimingStats;

mod in_memory;
pub use in_memory::*;

Expand Down
84 changes: 81 additions & 3 deletions crates/engine/tree/src/persistence.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
use crate::metrics::PersistenceMetrics;
use alloy_eips::BlockNumHash;
use crossbeam_channel::Sender as CrossbeamSender;
use reth_chain_state::ExecutedBlock;
use reth_chain_state::{ExecutedBlock, ExecutionTimingStats};
use reth_errors::ProviderError;
use reth_ethereum_primitives::EthPrimitives;
use reth_evm::metrics::{is_slow_block, is_slow_block_logging_enabled};
use reth_primitives_traits::NodePrimitives;
use reth_provider::{
providers::ProviderNodeTypes, BlockExecutionWriter, BlockHashReader, ChainStateBlockWriter,
Expand All @@ -16,7 +17,7 @@ use std::{
time::Instant,
};
use thiserror::Error;
use tracing::{debug, error};
use tracing::{debug, error, warn};

/// Writes parts of reth's in memory tree state to the database and static files.
///
Expand Down Expand Up @@ -146,6 +147,13 @@ where
let block_count = blocks.len();
debug!(target: "engine::persistence", ?block_count, first=?first_block, last=?last_block, "Saving range of blocks");

// Extract timing stats before consuming blocks (for slow block logging after commit)
let timing_stats_list: Vec<ExecutionTimingStats> = if is_slow_block_logging_enabled() {
blocks.iter().filter_map(|b| b.timing_stats().cloned()).collect()
} else {
Vec::new()
};

let start_time = Instant::now();

if last_block.is_some() {
Expand All @@ -155,13 +163,83 @@ where
provider_rw.commit()?;
}

let commit_duration = start_time.elapsed();
let commit_ms = commit_duration.as_secs_f64() * 1000.0;
debug!(target: "engine::persistence", first=?first_block, last=?last_block, "Saved range of blocks");

self.metrics.save_blocks_block_count.record(block_count as f64);
self.metrics.save_blocks_duration_seconds.record(start_time.elapsed());
self.metrics.save_blocks_duration_seconds.record(commit_duration);

// Emit unified slow block logs after commit completes
for stats in timing_stats_list {
let total_ms =
stats.execution_ms + stats.state_read_ms + stats.state_hash_ms + commit_ms;

// Check if total time exceeds threshold
if is_slow_block(total_ms) {
self.log_slow_block(&stats, commit_ms, total_ms);
}
}

Ok(last_block)
}

/// Logs slow block execution in JSON format for cross-client performance analysis.
///
/// This method outputs a standardized log entry when block execution
/// exceeds the slow block threshold, including all timing phases.
fn log_slow_block(&self, stats: &ExecutionTimingStats, commit_ms: f64, total_ms: f64) {
use reth_evm::metrics::calculate_hit_rate;

let mgas_per_sec = if stats.execution_ms > 0.0 {
(stats.gas_used as f64 / 1_000_000.0) / (stats.execution_ms / 1000.0)
} else {
0.0
};

// Calculate cache hit rates
let account_hit_rate =
calculate_hit_rate(stats.account_cache_hits, stats.account_cache_misses);
let storage_hit_rate =
calculate_hit_rate(stats.storage_cache_hits, stats.storage_cache_misses);
let code_hit_rate = calculate_hit_rate(stats.code_cache_hits, stats.code_cache_misses);

warn!(
target: "reth::slow_block",
message = "Slow block",
block.number = stats.block_number,
block.hash = stats.block_hash,
block.gas_used = stats.gas_used,
block.tx_count = stats.tx_count,
timing.execution_ms = format!("{:.3}", stats.execution_ms),
timing.state_read_ms = format!("{:.3}", stats.state_read_ms),
timing.state_hash_ms = format!("{:.3}", stats.state_hash_ms),
timing.commit_ms = format!("{:.3}", commit_ms),
timing.total_ms = format!("{:.3}", total_ms),
throughput.mgas_per_sec = format!("{:.2}", mgas_per_sec),
state_reads.accounts = stats.accounts_read,
state_reads.storage_slots = stats.storage_read,
state_reads.code = stats.code_read,
state_reads.code_bytes = stats.code_bytes_read,
state_writes.accounts = stats.accounts_changed,
state_writes.accounts_deleted = stats.accounts_deleted,
state_writes.storage_slots = stats.storage_slots_changed,
state_writes.storage_slots_deleted = stats.storage_slots_deleted,
state_writes.code = stats.bytecodes_changed,
state_writes.code_bytes = stats.code_bytes_written,
state_writes.eip7702_delegations_set = stats.eip7702_delegations_set,
state_writes.eip7702_delegations_cleared = stats.eip7702_delegations_cleared,
cache.account.hits = stats.account_cache_hits,
cache.account.misses = stats.account_cache_misses,
cache.account.hit_rate = format!("{:.2}", account_hit_rate),
cache.storage.hits = stats.storage_cache_hits,
cache.storage.misses = stats.storage_cache_misses,
cache.storage.hit_rate = format!("{:.2}", storage_hit_rate),
cache.code.hits = stats.code_cache_hits,
cache.code.misses = stats.code_cache_misses,
cache.code.hit_rate = format!("{:.2}", code_hit_rate),
);
}
}

/// One of the errors that can happen when using the persistence service.
Expand Down
Loading
Loading