From 065e6455c2fff2b60479cd64c2edcc6ca71e1c0a Mon Sep 17 00:00:00 2001 From: "ruiwei.guo" Date: Mon, 1 Dec 2025 15:41:47 +0800 Subject: [PATCH 1/6] feat(metrics): implement RAII-based block timing with Prometheus support --- Cargo.lock | 9 +- crates/engine/tree/src/tree/mod.rs | 72 ++-- crates/node/metrics/src/block_timing.rs | 344 ++++++++++++++++-- .../metrics/src/block_timing_prometheus.rs | 41 +++ crates/node/metrics/src/lib.rs | 8 +- crates/optimism/payload/src/builder.rs | 73 ++-- 6 files changed, 426 insertions(+), 121 deletions(-) create mode 100644 crates/node/metrics/src/block_timing_prometheus.rs diff --git a/Cargo.lock b/Cargo.lock index 4a97d9142a1..f4d7d04783c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7640,7 +7640,7 @@ dependencies = [ [[package]] name = "reth-apollo" -version = "1.9.2" +version = "1.9.3" dependencies = [ "apollo-sdk", "async-once-cell", @@ -9788,6 +9788,7 @@ dependencies = [ "alloy-primitives", "once_cell", "reth-ethereum-forks", + "tracing", ] [[package]] @@ -14588,7 +14589,7 @@ dependencies = [ [[package]] name = "xlayer-db" -version = "1.9.2" +version = "1.9.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -14603,7 +14604,7 @@ dependencies = [ [[package]] name = "xlayer-exex" -version = "1.9.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-evm", @@ -14621,7 +14622,7 @@ dependencies = [ [[package]] name = "xlayer-rpc" -version = "1.9.2" +version = "1.9.3" dependencies = [ "alloy-eips", "alloy-network-primitives", diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 5db698107c5..bf3657cbd66 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -236,29 +236,7 @@ impl OnStateHook for MeteredStateHook { /// 3. Setting insert timing values /// 4. Calculating total as the sum of validate_and_execute + insert_to_tree /// 5. Storing the updated metrics -fn update_insert_timing_metrics( - block_hash: B256, - validate_and_execute: std::time::Duration, - insert_to_tree: std::time::Duration, -) { - use reth_node_metrics::block_timing::{get_block_timing, store_block_timing, BlockTimingMetrics}; - - let mut timing_metrics = if let Some(existing) = get_block_timing(&block_hash) { - // Block was built locally, update insert timing - existing - } else { - // Block was received from network, create timing metrics with insert timing only - BlockTimingMetrics::default() - }; - - timing_metrics.insert.validate_and_execute = validate_and_execute; - timing_metrics.insert.insert_to_tree = insert_to_tree; - // Total should be the sum of validate_and_execute + insert_to_tree - timing_metrics.insert.total = timing_metrics.insert.validate_and_execute + timing_metrics.insert.insert_to_tree; - - store_block_timing(block_hash, timing_metrics); -} - +/// Update insert timing metrics (legacy function, kept for compatibility) /// The engine API tree handler implementation. /// /// This type is responsible for processing engine API requests, maintaining the canonical state and @@ -1424,12 +1402,6 @@ where self.metrics.engine.inserted_already_executed_blocks.increment(1); let elapsed = now.elapsed(); - // X Layer: Update timing metrics for InsertExecutedBlock path - // Note: validate_exec time is 0 because block was already executed during build - use std::time::Duration; - let block_hash = block.recovered_block().hash(); - update_insert_timing_metrics(block_hash, Duration::from_nanos(0), insert_tree_elapsed); - self.emit_event(EngineApiEvent::BeaconConsensus( ConsensusEngineEvent::CanonicalBlockAdded(block, elapsed), )); @@ -2492,16 +2464,6 @@ where // run the conversion to ensure that the block hash is valid. convert_to_block(self, input)?; - // X Layer: Even if block is already seen, update timing metrics if it was built locally - // Block was built locally but already exists in tree - // Set insert timing to 0 for now, will be updated in event handler if elapsed > 0 - use reth_node_metrics::block_timing::get_block_timing; - use std::time::Duration; - let block_hash = block_num_hash.hash; - if get_block_timing(&block_hash).is_some() { - update_insert_timing_metrics(block_hash, Duration::from_nanos(0), Duration::from_nanos(0)); - } - return Ok(InsertPayloadOk::AlreadySeen(BlockStatus::Valid)) } _ => {} @@ -2546,12 +2508,21 @@ where let ctx = TreeCtx::new(&mut self.state, &self.canonical_in_memory_state); - // X Layer: Track insert timing - let validate_exec_start = Instant::now(); + // X Layer: Track insert timing with RAII let start = Instant::now(); - - let executed = execute(&mut self.payload_validator, input, ctx)?; - let validate_exec_elapsed = validate_exec_start.elapsed(); + let block_hash = block_num_hash.hash; + + // Create timing context with Prometheus support + // We'll create Prometheus metrics from the handler's metrics if needed + use reth_node_metrics::block_timing::{BlockTimingContext, BlockTimingPrometheusMetrics}; + let prom_metrics = BlockTimingPrometheusMetrics::default(); + let mut timing_ctx = BlockTimingContext::new_with_prometheus(block_hash, prom_metrics); + + // validate and execute + let executed = { + let _guard = timing_ctx.time_validate_and_execute(); + execute(&mut self.payload_validator, input, ctx)? + }; // if the parent is the canonical head, we can insert the block as the pending block if self.state.tree_state.canonical_block_hash() == executed.recovered_block().parent_hash() @@ -2560,17 +2531,18 @@ where self.canonical_in_memory_state.set_pending_block(executed.clone()); } - let insert_tree_start = Instant::now(); - self.state.tree_state.insert_executed(executed.clone()); - let insert_tree_elapsed = insert_tree_start.elapsed(); + // insert to tree + { + let _guard = timing_ctx.time_insert_to_tree(); + self.state.tree_state.insert_executed(executed.clone()); + } self.metrics.engine.executed_blocks.set(self.state.tree_state.block_count() as f64); // emit insert event let elapsed = start.elapsed(); - // X Layer: Update timing metrics with insert timing - let block_hash = executed.recovered_block().hash(); - update_insert_timing_metrics(block_hash, validate_exec_elapsed, insert_tree_elapsed); + // X Layer: Update timing metrics (auto-updates and stores on drop) + timing_ctx.update_totals(); let engine_event = if is_fork { ConsensusEngineEvent::ForkBlockAdded(executed.clone(), elapsed) } else { diff --git a/crates/node/metrics/src/block_timing.rs b/crates/node/metrics/src/block_timing.rs index cfeed8215b3..22cc1ff348c 100644 --- a/crates/node/metrics/src/block_timing.rs +++ b/crates/node/metrics/src/block_timing.rs @@ -4,20 +4,24 @@ use alloy_primitives::B256; use indexmap::IndexMap; use std::{ sync::{Arc, Mutex}, - time::Duration, + time::{Duration, Instant}, }; +pub use crate::block_timing_prometheus::BlockTimingPrometheusMetrics; + /// Timing metrics for block building phase #[derive(Debug, Clone, Default)] pub struct BuildTiming { /// Time spent applying pre-execution changes pub apply_pre_execution_changes: Duration, /// Time spent executing sequencer transactions - pub execute_sequencer_transactions: Duration, + pub exec_sequencer_transactions: Duration, + /// Time spent selecting/packing mempool transactions + pub select_mempool_transactions: Duration, /// Time spent executing mempool transactions - pub execute_mempool_transactions: Duration, - /// Time spent finishing block build (state root calculation, etc.) - pub finish: Duration, + pub exec_mempool_transactions: Duration, + /// Time spent calculating state root + pub calc_state_root: Duration, /// Total build time pub total: Duration, } @@ -33,17 +37,6 @@ pub struct InsertTiming { pub total: Duration, } -/// Timing metrics for transaction execution -/// -/// Note: Individual transaction execution times (sequencer_txs, mempool_txs) are stored -/// in `BuildTiming` to avoid duplication. This struct only stores the total time. -#[derive(Debug, Clone, Default)] -pub struct DeliverTxsTiming { - /// Total transaction execution time - /// Note: Individual times are stored in BuildTiming (execute_sequencer_transactions, execute_mempool_transactions) - pub total: Duration, -} - /// Complete timing metrics for a block #[derive(Debug, Clone, Default)] pub struct BlockTimingMetrics { @@ -51,22 +44,32 @@ pub struct BlockTimingMetrics { pub build: BuildTiming, /// Block insertion phase timing pub insert: InsertTiming, - /// Transaction execution timing - pub deliver_txs: DeliverTxsTiming, } impl BlockTimingMetrics { /// Format timing metrics for logging pub fn format_for_log(&self) -> String { let format_duration = |d: Duration| { - let ms = d.as_millis(); - let us = d.as_micros(); - if ms > 0 { - format!("{}ms", ms) - } else if us > 0 { - format!("{}µs", us) - } else { - format!("{}ns", d.as_nanos()) + let secs = d.as_secs_f64(); + match () { + _ if secs >= 1.0 => format!("{:.3}s", secs), + _ if secs >= 0.001 => { + let ms = secs * 1000.0; + if ms.fract() == 0.0 { + format!("{}ms", ms as u64) + } else { + format!("{:.3}ms", ms) + } + } + _ if secs >= 0.000001 => { + let us = secs * 1_000_000.0; + if us.fract() == 0.0 { + format!("{}µs", us as u64) + } else { + format!("{:.3}µs", us) + } + } + _ => format!("{}ns", d.as_nanos()), } }; @@ -74,21 +77,19 @@ impl BlockTimingMetrics { let is_locally_built = self.build.total.as_nanos() > 0; if is_locally_built { - // Block was built locally, show full timing including Build and DeliverTxs - // Note: DeliverTxs only shows total to avoid duplication with Build's seqTxs/mempoolTxs - let deliver_txs_total_time = self.build.execute_sequencer_transactions + self.build.execute_mempool_transactions; - + // Block was built locally, show full timing including Build + // Note: Transaction execution times (execSeqTxs, selectMempoolTxs, execMempoolTxs) are already shown in Build phase format!( - "Produce[Build[applyPreExec<{}>, seqTxs<{}>, mempoolTxs<{}>, finish<{}>, total<{}>], Insert[validateExec<{}>, insertTree<{}>, total<{}>]], DeliverTxs[total<{}>]", + "Produce[Build[applyPreExec<{}>, execSeqTxs<{}>, selectMempoolTxs<{}>, execMempoolTxs<{}>, calcStateRoot<{}>, total<{}>], Insert[validateExec<{}>, insertTree<{}>, total<{}>]]", format_duration(self.build.apply_pre_execution_changes), - format_duration(self.build.execute_sequencer_transactions), - format_duration(self.build.execute_mempool_transactions), - format_duration(self.build.finish), + format_duration(self.build.exec_sequencer_transactions), + format_duration(self.build.select_mempool_transactions), + format_duration(self.build.exec_mempool_transactions), + format_duration(self.build.calc_state_root), format_duration(self.build.total), format_duration(self.insert.validate_and_execute), format_duration(self.insert.insert_to_tree), format_duration(self.insert.total), - format_duration(deliver_txs_total_time), ) } else { // Block was received from network, only show Insert timing @@ -153,6 +154,277 @@ pub fn get_block_timing(block_hash: &B256) -> Option { pub fn remove_block_timing(block_hash: &B256) { let store = get_timing_store(); let mut map = store.lock().unwrap(); - map.remove(block_hash); + map.shift_remove(block_hash); +} + +// ============================================================================ +// RAII-based timing helpers +// ============================================================================ + +/// RAII guard that automatically records timing to a mutable reference and optionally to Prometheus when dropped. +/// +/// Usage: +/// ```rust +/// let mut timing = Duration::default(); +/// { +/// let _guard = TimingGuard::new(&mut timing); +/// // ... do work ... +/// } // Guard is dropped here and timing is automatically recorded +/// ``` +pub struct TimingGuard<'a> { + start: Instant, + target: &'a mut Duration, + prometheus_histogram: Option<&'a metrics::Histogram>, +} + +impl<'a> TimingGuard<'a> { + /// Create a new timing guard that will record the elapsed time to `target` when dropped. + pub fn new(target: &'a mut Duration) -> Self { + Self { + start: Instant::now(), + target, + prometheus_histogram: None, + } + } + + /// Create a new timing guard that records to both `target` and Prometheus histogram. + pub fn new_with_prometheus( + target: &'a mut Duration, + prometheus_histogram: &'a metrics::Histogram, + ) -> Self { + Self { + start: Instant::now(), + target, + prometheus_histogram: Some(prometheus_histogram), + } + } + + /// Record the elapsed duration to target and Prometheus (if enabled). + fn record(&mut self) { + let duration = self.start.elapsed(); + *self.target = duration; + if let Some(histogram) = self.prometheus_histogram { + histogram.record(duration.as_secs_f64()); + } + } + + /// Manually finish timing and return the duration. + /// This updates the target immediately and prevents the drop handler from running. + pub fn finish(mut self) -> Duration { + let duration = self.start.elapsed(); + self.record(); + duration + } +} + +impl<'a> Drop for TimingGuard<'a> { + fn drop(&mut self) { + self.record(); + } +} + +/// Context for managing block timing metrics using RAII. +/// +/// This provides a cleaner API for recording timing metrics throughout the block +/// building and insertion process. The context automatically stores metrics when dropped. +/// +/// Supports both logging (via IndexMap) and Prometheus metrics recording. +pub struct BlockTimingContext { + block_hash: B256, + metrics: BlockTimingMetrics, + auto_store: bool, + prometheus_metrics: Option, +} + +impl BlockTimingContext { + /// Create a new timing context for a block. + /// + /// If timing metrics already exist for this block (e.g., from build phase), + /// they will be loaded. Otherwise, a new empty metrics structure is created. + /// + /// Metrics will be automatically stored when the context is dropped. + pub fn new(block_hash: B256) -> Self { + Self::with_metrics(block_hash, get_block_timing(&block_hash), None) + } + + /// Create a new timing context for a block, initializing with empty metrics. + /// + /// Metrics will be automatically stored when the context is dropped. + pub fn new_empty(block_hash: B256) -> Self { + Self::with_metrics(block_hash, None, None) + } + + /// Create a new timing context with Prometheus metrics support. + /// + /// This enables automatic recording to Prometheus in addition to logging. + pub fn new_with_prometheus( + block_hash: B256, + prometheus_metrics: BlockTimingPrometheusMetrics, + ) -> Self { + Self::with_metrics( + block_hash, + get_block_timing(&block_hash), + Some(prometheus_metrics), + ) + } + + /// Create a new empty timing context with Prometheus metrics support. + pub fn new_empty_with_prometheus( + block_hash: B256, + prometheus_metrics: BlockTimingPrometheusMetrics, + ) -> Self { + Self::with_metrics(block_hash, None, Some(prometheus_metrics)) + } + + /// Internal helper to create a context with optional metrics and Prometheus support. + fn with_metrics( + block_hash: B256, + metrics: Option, + prometheus_metrics: Option, + ) -> Self { + Self { + block_hash, + metrics: metrics.unwrap_or_default(), + auto_store: true, + prometheus_metrics, + } + } + + /// Get a mutable reference to the metrics. + pub fn metrics_mut(&mut self) -> &mut BlockTimingMetrics { + &mut self.metrics + } + + /// Get a reference to the metrics. + pub fn metrics(&self) -> &BlockTimingMetrics { + &self.metrics + } + + /// Update the block hash for this context. + pub fn set_block_hash(&mut self, block_hash: B256) { + self.block_hash = block_hash; + } + + /// Create a timing guard for recording build phase: apply pre-execution changes. + pub fn time_apply_pre_execution_changes(&mut self) -> TimingGuard<'_> { + if let Some(ref prom_metrics) = self.prometheus_metrics { + TimingGuard::new_with_prometheus( + &mut self.metrics.build.apply_pre_execution_changes, + &prom_metrics.build_apply_pre_execution_changes, + ) + } else { + TimingGuard::new(&mut self.metrics.build.apply_pre_execution_changes) + } + } + + /// Create a timing guard for recording build phase: execute sequencer transactions. + pub fn time_exec_sequencer_transactions(&mut self) -> TimingGuard<'_> { + if let Some(ref prom_metrics) = self.prometheus_metrics { + TimingGuard::new_with_prometheus( + &mut self.metrics.build.exec_sequencer_transactions, + &prom_metrics.build_exec_sequencer_transactions, + ) + } else { + TimingGuard::new(&mut self.metrics.build.exec_sequencer_transactions) + } + } + + /// Create a timing guard for recording build phase: select/pack mempool transactions. + pub fn time_select_mempool_transactions(&mut self) -> TimingGuard<'_> { + if let Some(ref prom_metrics) = self.prometheus_metrics { + TimingGuard::new_with_prometheus( + &mut self.metrics.build.select_mempool_transactions, + &prom_metrics.build_select_mempool_transactions, + ) + } else { + TimingGuard::new(&mut self.metrics.build.select_mempool_transactions) + } + } + + /// Create a timing guard for recording build phase: execute mempool transactions. + pub fn time_exec_mempool_transactions(&mut self) -> TimingGuard<'_> { + if let Some(ref prom_metrics) = self.prometheus_metrics { + TimingGuard::new_with_prometheus( + &mut self.metrics.build.exec_mempool_transactions, + &prom_metrics.build_exec_mempool_transactions, + ) + } else { + TimingGuard::new(&mut self.metrics.build.exec_mempool_transactions) + } + } + + /// Create a timing guard for recording build phase: calculate state root. + pub fn time_calc_state_root(&mut self) -> TimingGuard<'_> { + if let Some(ref prom_metrics) = self.prometheus_metrics { + TimingGuard::new_with_prometheus( + &mut self.metrics.build.calc_state_root, + &prom_metrics.build_calc_state_root, + ) + } else { + TimingGuard::new(&mut self.metrics.build.calc_state_root) + } + } + + /// Create a timing guard for recording insert phase: validate and execute. + pub fn time_validate_and_execute(&mut self) -> TimingGuard<'_> { + if let Some(ref prom_metrics) = self.prometheus_metrics { + TimingGuard::new_with_prometheus( + &mut self.metrics.insert.validate_and_execute, + &prom_metrics.insert_validate_and_execute, + ) + } else { + TimingGuard::new(&mut self.metrics.insert.validate_and_execute) + } + } + + /// Create a timing guard for recording insert phase: insert to tree. + pub fn time_insert_to_tree(&mut self) -> TimingGuard<'_> { + if let Some(ref prom_metrics) = self.prometheus_metrics { + TimingGuard::new_with_prometheus( + &mut self.metrics.insert.insert_to_tree, + &prom_metrics.insert_insert_to_tree, + ) + } else { + TimingGuard::new(&mut self.metrics.insert.insert_to_tree) + } + } + + /// Manually store the current metrics to the global store. + /// This is called automatically on drop if `auto_store` is true. + pub fn store(&self) { + store_block_timing(self.block_hash, self.metrics.clone()); + } + + /// Set whether metrics should be automatically stored on drop. + pub fn set_auto_store(&mut self, auto_store: bool) { + self.auto_store = auto_store; + } + + /// Calculate and update total times. + pub fn update_totals(&mut self) { + self.metrics.build.total = self.metrics.build.apply_pre_execution_changes + + self.metrics.build.exec_sequencer_transactions + + self.metrics.build.select_mempool_transactions + + self.metrics.build.exec_mempool_transactions + + self.metrics.build.calc_state_root; + + self.metrics.insert.total = self.metrics.insert.validate_and_execute + + self.metrics.insert.insert_to_tree; + + // Also record totals to Prometheus if enabled + if let Some(ref prom_metrics) = self.prometheus_metrics { + prom_metrics.build_total.record(self.metrics.build.total.as_secs_f64()); + prom_metrics.insert_total.record(self.metrics.insert.total.as_secs_f64()); + } + } +} + +impl Drop for BlockTimingContext { + fn drop(&mut self) { + if self.auto_store { + self.update_totals(); + self.store(); + } + } } diff --git a/crates/node/metrics/src/block_timing_prometheus.rs b/crates/node/metrics/src/block_timing_prometheus.rs new file mode 100644 index 00000000000..7851dcc91b6 --- /dev/null +++ b/crates/node/metrics/src/block_timing_prometheus.rs @@ -0,0 +1,41 @@ +//! Prometheus metrics for block timing + +use reth_metrics::{ + metrics::Histogram, + Metrics, +}; + +/// Prometheus metrics for block timing. +/// +/// These metrics track the duration of various phases in block production and insertion. +#[derive(Metrics, Clone)] +#[metrics(scope = "block.timing")] +pub struct BlockTimingPrometheusMetrics { + /// Time spent applying pre-execution changes during block building + pub build_apply_pre_execution_changes: Histogram, + + /// Time spent executing sequencer transactions during block building + pub build_exec_sequencer_transactions: Histogram, + + /// Time spent selecting/packing mempool transactions during block building + pub build_select_mempool_transactions: Histogram, + + /// Time spent executing mempool transactions during block building + pub build_exec_mempool_transactions: Histogram, + + /// Time spent calculating state root during block building + pub build_calc_state_root: Histogram, + + /// Total time spent in block building phase + pub build_total: Histogram, + + /// Time spent validating and executing the block during insertion + pub insert_validate_and_execute: Histogram, + + /// Time spent inserting to tree state + pub insert_insert_to_tree: Histogram, + + /// Total time spent in block insertion phase + pub insert_total: Histogram, +} + diff --git a/crates/node/metrics/src/lib.rs b/crates/node/metrics/src/lib.rs index b375dff8df8..276c199ac17 100644 --- a/crates/node/metrics/src/lib.rs +++ b/crates/node/metrics/src/lib.rs @@ -18,6 +18,8 @@ pub mod transaction_trace_xlayer; pub mod version; /// Block timing metrics for tracking block production and execution times (X Layer) pub mod block_timing; +#[path = "block_timing_prometheus.rs"] +mod block_timing_prometheus; pub use metrics_exporter_prometheus::*; pub use metrics_process::*; @@ -27,4 +29,8 @@ pub use transaction_trace_xlayer::{ flush_global_tracer, get_global_tracer, init_global_tracer, TransactionProcessId, }; // Re-export block timing module items for convenience -pub use block_timing::{BlockTimingMetrics, BuildTiming, InsertTiming, DeliverTxsTiming, store_block_timing, get_block_timing, remove_block_timing}; +pub use block_timing::{ + BlockTimingContext, BlockTimingMetrics, BlockTimingPrometheusMetrics, BuildTiming, + InsertTiming, TimingGuard, get_block_timing, remove_block_timing, + store_block_timing, +}; diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 0b695463006..2ab9ba73eff 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -43,7 +43,7 @@ use reth_transaction_pool::{BestTransactionsAttributes, PoolTransaction, Transac use revm::context::{Block, BlockEnv}; use std::{marker::PhantomData, sync::Arc}; use tracing::{debug, trace, warn}; -use reth_node_metrics::block_timing::{BlockTimingMetrics, BuildTiming, DeliverTxsTiming, store_block_timing}; +use reth_node_metrics::block_timing::{BlockTimingContext, store_block_timing}; use std::time::Instant; /// Optimism's payload builder @@ -363,10 +363,6 @@ impl OpBuilder<'_, Txs> { .unwrap_or_default() .as_millis(); - // X Layer: Initialize timing metrics - let build_start = Instant::now(); - let mut timing_metrics = BlockTimingMetrics::default(); - let mut db = State::builder().with_database(db).with_bundle_update().build(); // Load the L1 block contract into the database cache. If the L1 block contract is not @@ -376,29 +372,42 @@ impl OpBuilder<'_, Txs> { let mut builder = ctx.block_builder(&mut db)?; + // X Layer: Initialize timing context with RAII + // Note: We'll get the block hash after building, so we create an empty context first + // and update it later. For now, we use a placeholder hash. + let build_start = Instant::now(); + let mut timing_ctx = BlockTimingContext::new_empty(B256::ZERO); + // 1. apply pre-execution changes - let pre_exec_start = Instant::now(); - builder.apply_pre_execution_changes().map_err(|err| { - warn!(target: "payload_builder", %err, "failed to apply pre-execution changes"); - PayloadBuilderError::Internal(err.into()) - })?; - timing_metrics.build.apply_pre_execution_changes = pre_exec_start.elapsed(); + { + let _guard = timing_ctx.time_apply_pre_execution_changes(); + builder.apply_pre_execution_changes().map_err(|err| { + warn!(target: "payload_builder", %err, "failed to apply pre-execution changes"); + PayloadBuilderError::Internal(err.into()) + })?; + } // 2. execute sequencer transactions - let seq_txs_start = Instant::now(); - let mut info = ctx.execute_sequencer_transactions(&mut builder)?; - let seq_txs_elapsed = seq_txs_start.elapsed(); - timing_metrics.build.execute_sequencer_transactions = seq_txs_elapsed; + let mut info = { + let _guard = timing_ctx.time_exec_sequencer_transactions(); + ctx.execute_sequencer_transactions(&mut builder)? + }; // 3. if mem pool transactions are requested we execute them if !ctx.attributes().no_tx_pool() { - let mempool_txs_start = Instant::now(); - let best_txs = best(ctx.best_transaction_attributes(builder.evm_mut().block())); - if ctx.execute_best_transactions_xlayer(&mut info, &mut builder, best_txs)?.is_some() { - return Ok(BuildOutcomeKind::Cancelled) + // 3.1. select/pack mempool transactions + let best_txs = { + let _guard = timing_ctx.time_select_mempool_transactions(); + best(ctx.best_transaction_attributes(builder.evm_mut().block())) + }; + + // 3.2. execute mempool transactions + { + let _guard = timing_ctx.time_exec_mempool_transactions(); + if ctx.execute_best_transactions_xlayer(&mut info, &mut builder, best_txs)?.is_some() { + return Ok(BuildOutcomeKind::Cancelled) + } } - let mempool_txs_elapsed = mempool_txs_start.elapsed(); - timing_metrics.build.execute_mempool_transactions = mempool_txs_elapsed; // check if the new payload is even more valuable if !ctx.is_better_payload(info.total_fees) { @@ -407,13 +416,11 @@ impl OpBuilder<'_, Txs> { } } - let finish_start = Instant::now(); - let BlockBuilderOutcome { execution_result, hashed_state, trie_updates, block } = - builder.finish(state_provider)?; - timing_metrics.build.finish = finish_start.elapsed(); - timing_metrics.build.total = build_start.elapsed(); - // Calculate DeliverTxs total from BuildTiming to avoid duplication - timing_metrics.deliver_txs.total = timing_metrics.build.execute_sequencer_transactions + timing_metrics.build.execute_mempool_transactions; + // 4. finish (calculate state root) + let BlockBuilderOutcome { execution_result, hashed_state, trie_updates, block } = { + let _guard = timing_ctx.time_calc_state_root(); + builder.finish(state_provider)? + }; let sealed_block = Arc::new(block.sealed_block().clone()); debug!(target: "payload_builder", id=%ctx.attributes().payload_id(), sealed_block_header = ?sealed_block.header(), "sealed built block"); @@ -439,8 +446,14 @@ impl OpBuilder<'_, Txs> { let block_hash = sealed_block.hash(); let block_number = sealed_block.number(); - // X Layer: Store timing metrics for this block (will be updated with insert timing later) - store_block_timing(block_hash, timing_metrics.clone()); + // X Layer: Update timing context with actual block hash and total times + timing_ctx.set_block_hash(block_hash); + timing_ctx.metrics_mut().build.total = build_start.elapsed(); + timing_ctx.update_totals(); + + // Store timing metrics for this block (will be updated with insert timing later) + // Context will auto-store on drop, but we can also manually store here + timing_ctx.store(); let payload = OpBuiltPayload::new(ctx.payload_id(), sealed_block, info.total_fees, Some(executed)); From a49cbd1665c5af16b7f42e8289c55c90fb9af631 Mon Sep 17 00:00:00 2001 From: "ruiwei.guo" Date: Mon, 1 Dec 2025 15:56:15 +0800 Subject: [PATCH 2/6] remove comment --- crates/engine/tree/src/tree/mod.rs | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index bf3657cbd66..c5069e14ebc 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -228,15 +228,6 @@ impl OnStateHook for MeteredStateHook { } } -/// Helper function to update block timing metrics for insert phase. -/// -/// This function handles the common pattern of: -/// 1. Getting existing timing metrics (if block was built locally) -/// 2. Creating new metrics (if block was received from network) -/// 3. Setting insert timing values -/// 4. Calculating total as the sum of validate_and_execute + insert_to_tree -/// 5. Storing the updated metrics -/// Update insert timing metrics (legacy function, kept for compatibility) /// The engine API tree handler implementation. /// /// This type is responsible for processing engine API requests, maintaining the canonical state and @@ -1401,7 +1392,7 @@ where let insert_tree_elapsed = insert_tree_start.elapsed(); self.metrics.engine.inserted_already_executed_blocks.increment(1); let elapsed = now.elapsed(); - + self.emit_event(EngineApiEvent::BeaconConsensus( ConsensusEngineEvent::CanonicalBlockAdded(block, elapsed), )); @@ -2463,7 +2454,7 @@ where // We now assume that we already have this block in the tree. However, we need to // run the conversion to ensure that the block hash is valid. convert_to_block(self, input)?; - + return Ok(InsertPayloadOk::AlreadySeen(BlockStatus::Valid)) } _ => {} @@ -2511,7 +2502,7 @@ where // X Layer: Track insert timing with RAII let start = Instant::now(); let block_hash = block_num_hash.hash; - + // Create timing context with Prometheus support // We'll create Prometheus metrics from the handler's metrics if needed use reth_node_metrics::block_timing::{BlockTimingContext, BlockTimingPrometheusMetrics}; @@ -2540,7 +2531,7 @@ where // emit insert event let elapsed = start.elapsed(); - + // X Layer: Update timing metrics (auto-updates and stores on drop) timing_ctx.update_totals(); let engine_event = if is_fork { From 417b61bffe30d8558902b3d48b3ddf68711ed9cf Mon Sep 17 00:00:00 2001 From: "ruiwei.guo" Date: Tue, 2 Dec 2025 10:15:44 +0800 Subject: [PATCH 3/6] format --- crates/node/events/src/node.rs | 8 +-- crates/node/metrics/src/block_timing.rs | 71 ++++++++----------- .../metrics/src/block_timing_prometheus.rs | 24 +++---- crates/node/metrics/src/lib.rs | 13 ++-- crates/optimism/payload/src/builder.rs | 18 +++-- 5 files changed, 60 insertions(+), 74 deletions(-) diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index e3af753559f..008912e6852 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -253,15 +253,15 @@ impl NodeState { if full.is_nan() { full = 0.0; } - + // X Layer: Get timing metrics for this block use reth_node_metrics::block_timing::{get_block_timing, remove_block_timing}; let block_hash = block.hash(); - + let timing_str = get_block_timing(&block_hash) .map(|metrics| metrics.format_for_log()) .unwrap_or_default(); - + info!( number=block.number(), hash=?block.hash(), @@ -278,7 +278,7 @@ impl NodeState { timing=%timing_str, "Block added to canonical chain" ); - + // Clean up timing metrics after logging remove_block_timing(&block.hash()); } diff --git a/crates/node/metrics/src/block_timing.rs b/crates/node/metrics/src/block_timing.rs index 22cc1ff348c..c7b6771d5e0 100644 --- a/crates/node/metrics/src/block_timing.rs +++ b/crates/node/metrics/src/block_timing.rs @@ -75,10 +75,11 @@ impl BlockTimingMetrics { // Check if block was built locally (has build timing) or received from network let is_locally_built = self.build.total.as_nanos() > 0; - + if is_locally_built { // Block was built locally, show full timing including Build - // Note: Transaction execution times (execSeqTxs, selectMempoolTxs, execMempoolTxs) are already shown in Build phase + // Note: Transaction execution times (execSeqTxs, selectMempoolTxs, execMempoolTxs) are + // already shown in Build phase format!( "Produce[Build[applyPreExec<{}>, execSeqTxs<{}>, selectMempoolTxs<{}>, execMempoolTxs<{}>, calcStateRoot<{}>, total<{}>], Insert[validateExec<{}>, insertTree<{}>, total<{}>]]", format_duration(self.build.apply_pre_execution_changes), @@ -104,7 +105,7 @@ impl BlockTimingMetrics { } /// Global storage for block timing metrics -/// +/// /// Uses IndexMap to maintain insertion order, allowing us to remove the oldest entries /// when the cache exceeds the limit. static BLOCK_TIMING_STORE: std::sync::OnceLock>>> = @@ -112,28 +113,26 @@ static BLOCK_TIMING_STORE: std::sync::OnceLock Arc>> { - BLOCK_TIMING_STORE - .get_or_init(|| Arc::new(Mutex::new(IndexMap::new()))) - .clone() + BLOCK_TIMING_STORE.get_or_init(|| Arc::new(Mutex::new(IndexMap::new()))).clone() } /// Store timing metrics for a block -/// +/// /// If the block already exists, it will be updated and moved to the end (most recent). /// When the cache exceeds 1000 entries, the oldest entries are removed. pub fn store_block_timing(block_hash: B256, metrics: BlockTimingMetrics) { let store = get_timing_store(); let mut map = store.lock().unwrap(); - + // If the block already exists, remove it first so it can be re-inserted at the end // This ensures that updated blocks are treated as the most recent if map.contains_key(&block_hash) { map.shift_remove(&block_hash); } - + // Insert at the end (most recent position) map.insert(block_hash, metrics); - + // Clean up old entries to prevent memory leak (keep last 1000 blocks) // IndexMap maintains insertion order, so we can safely remove from the front const MAX_ENTRIES: usize = 1000; @@ -161,8 +160,9 @@ pub fn remove_block_timing(block_hash: &B256) { // RAII-based timing helpers // ============================================================================ -/// RAII guard that automatically records timing to a mutable reference and optionally to Prometheus when dropped. -/// +/// RAII guard that automatically records timing to a mutable reference and optionally to Prometheus +/// when dropped. +/// /// Usage: /// ```rust /// let mut timing = Duration::default(); @@ -180,11 +180,7 @@ pub struct TimingGuard<'a> { impl<'a> TimingGuard<'a> { /// Create a new timing guard that will record the elapsed time to `target` when dropped. pub fn new(target: &'a mut Duration) -> Self { - Self { - start: Instant::now(), - target, - prometheus_histogram: None, - } + Self { start: Instant::now(), target, prometheus_histogram: None } } /// Create a new timing guard that records to both `target` and Prometheus histogram. @@ -192,11 +188,7 @@ impl<'a> TimingGuard<'a> { target: &'a mut Duration, prometheus_histogram: &'a metrics::Histogram, ) -> Self { - Self { - start: Instant::now(), - target, - prometheus_histogram: Some(prometheus_histogram), - } + Self { start: Instant::now(), target, prometheus_histogram: Some(prometheus_histogram) } } /// Record the elapsed duration to target and Prometheus (if enabled). @@ -224,10 +216,10 @@ impl<'a> Drop for TimingGuard<'a> { } /// Context for managing block timing metrics using RAII. -/// +/// /// This provides a cleaner API for recording timing metrics throughout the block /// building and insertion process. The context automatically stores metrics when dropped. -/// +/// /// Supports both logging (via IndexMap) and Prometheus metrics recording. pub struct BlockTimingContext { block_hash: B256, @@ -238,34 +230,30 @@ pub struct BlockTimingContext { impl BlockTimingContext { /// Create a new timing context for a block. - /// + /// /// If timing metrics already exist for this block (e.g., from build phase), /// they will be loaded. Otherwise, a new empty metrics structure is created. - /// + /// /// Metrics will be automatically stored when the context is dropped. pub fn new(block_hash: B256) -> Self { Self::with_metrics(block_hash, get_block_timing(&block_hash), None) } /// Create a new timing context for a block, initializing with empty metrics. - /// + /// /// Metrics will be automatically stored when the context is dropped. pub fn new_empty(block_hash: B256) -> Self { Self::with_metrics(block_hash, None, None) } /// Create a new timing context with Prometheus metrics support. - /// + /// /// This enables automatic recording to Prometheus in addition to logging. pub fn new_with_prometheus( block_hash: B256, prometheus_metrics: BlockTimingPrometheusMetrics, ) -> Self { - Self::with_metrics( - block_hash, - get_block_timing(&block_hash), - Some(prometheus_metrics), - ) + Self::with_metrics(block_hash, get_block_timing(&block_hash), Some(prometheus_metrics)) } /// Create a new empty timing context with Prometheus metrics support. @@ -402,14 +390,14 @@ impl BlockTimingContext { /// Calculate and update total times. pub fn update_totals(&mut self) { - self.metrics.build.total = self.metrics.build.apply_pre_execution_changes - + self.metrics.build.exec_sequencer_transactions - + self.metrics.build.select_mempool_transactions - + self.metrics.build.exec_mempool_transactions - + self.metrics.build.calc_state_root; - - self.metrics.insert.total = self.metrics.insert.validate_and_execute - + self.metrics.insert.insert_to_tree; + self.metrics.build.total = self.metrics.build.apply_pre_execution_changes + + self.metrics.build.exec_sequencer_transactions + + self.metrics.build.select_mempool_transactions + + self.metrics.build.exec_mempool_transactions + + self.metrics.build.calc_state_root; + + self.metrics.insert.total = + self.metrics.insert.validate_and_execute + self.metrics.insert.insert_to_tree; // Also record totals to Prometheus if enabled if let Some(ref prom_metrics) = self.prometheus_metrics { @@ -427,4 +415,3 @@ impl Drop for BlockTimingContext { } } } - diff --git a/crates/node/metrics/src/block_timing_prometheus.rs b/crates/node/metrics/src/block_timing_prometheus.rs index 7851dcc91b6..3e2788cef76 100644 --- a/crates/node/metrics/src/block_timing_prometheus.rs +++ b/crates/node/metrics/src/block_timing_prometheus.rs @@ -1,41 +1,37 @@ //! Prometheus metrics for block timing -use reth_metrics::{ - metrics::Histogram, - Metrics, -}; +use reth_metrics::{metrics::Histogram, Metrics}; /// Prometheus metrics for block timing. -/// +/// /// These metrics track the duration of various phases in block production and insertion. #[derive(Metrics, Clone)] #[metrics(scope = "block.timing")] pub struct BlockTimingPrometheusMetrics { /// Time spent applying pre-execution changes during block building pub build_apply_pre_execution_changes: Histogram, - + /// Time spent executing sequencer transactions during block building pub build_exec_sequencer_transactions: Histogram, - + /// Time spent selecting/packing mempool transactions during block building pub build_select_mempool_transactions: Histogram, - + /// Time spent executing mempool transactions during block building pub build_exec_mempool_transactions: Histogram, - + /// Time spent calculating state root during block building pub build_calc_state_root: Histogram, - + /// Total time spent in block building phase pub build_total: Histogram, - + /// Time spent validating and executing the block during insertion pub insert_validate_and_execute: Histogram, - + /// Time spent inserting to tree state pub insert_insert_to_tree: Histogram, - + /// Total time spent in block insertion phase pub insert_total: Histogram, } - diff --git a/crates/node/metrics/src/lib.rs b/crates/node/metrics/src/lib.rs index 276c199ac17..d76b6567ec7 100644 --- a/crates/node/metrics/src/lib.rs +++ b/crates/node/metrics/src/lib.rs @@ -7,6 +7,10 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg))] +/// Block timing metrics for tracking block production and execution times (X Layer) +pub mod block_timing; +#[path = "block_timing_prometheus.rs"] +mod block_timing_prometheus; pub mod chain; /// The metrics hooks for prometheus. pub mod hooks; @@ -16,10 +20,6 @@ pub mod server; /// Transaction tracing for monitoring transaction lifecycle (X Layer) pub mod transaction_trace_xlayer; pub mod version; -/// Block timing metrics for tracking block production and execution times (X Layer) -pub mod block_timing; -#[path = "block_timing_prometheus.rs"] -mod block_timing_prometheus; pub use metrics_exporter_prometheus::*; pub use metrics_process::*; @@ -30,7 +30,6 @@ pub use transaction_trace_xlayer::{ }; // Re-export block timing module items for convenience pub use block_timing::{ - BlockTimingContext, BlockTimingMetrics, BlockTimingPrometheusMetrics, BuildTiming, - InsertTiming, TimingGuard, get_block_timing, remove_block_timing, - store_block_timing, + get_block_timing, remove_block_timing, store_block_timing, BlockTimingContext, + BlockTimingMetrics, BlockTimingPrometheusMetrics, BuildTiming, InsertTiming, TimingGuard, }; diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 2ab9ba73eff..f7eff75b01d 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -20,7 +20,10 @@ use reth_evm::{ ConfigureEvm, Database, }; use reth_execution_types::ExecutionOutcome; -use reth_node_metrics::transaction_trace_xlayer::{get_global_tracer, TransactionProcessId}; +use reth_node_metrics::{ + block_timing::{store_block_timing, BlockTimingContext}, + transaction_trace_xlayer::{get_global_tracer, TransactionProcessId}, +}; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::{transaction::OpTransaction, ADDRESS_L2_TO_L1_MESSAGE_PASSER}; use reth_optimism_txpool::{ @@ -41,10 +44,8 @@ use reth_revm::{ use reth_storage_api::{errors::ProviderError, StateProvider, StateProviderFactory}; use reth_transaction_pool::{BestTransactionsAttributes, PoolTransaction, TransactionPool}; use revm::context::{Block, BlockEnv}; -use std::{marker::PhantomData, sync::Arc}; +use std::{marker::PhantomData, sync::Arc, time::Instant}; use tracing::{debug, trace, warn}; -use reth_node_metrics::block_timing::{BlockTimingContext, store_block_timing}; -use std::time::Instant; /// Optimism's payload builder #[derive(Debug)] @@ -400,11 +401,14 @@ impl OpBuilder<'_, Txs> { let _guard = timing_ctx.time_select_mempool_transactions(); best(ctx.best_transaction_attributes(builder.evm_mut().block())) }; - + // 3.2. execute mempool transactions { let _guard = timing_ctx.time_exec_mempool_transactions(); - if ctx.execute_best_transactions_xlayer(&mut info, &mut builder, best_txs)?.is_some() { + if ctx + .execute_best_transactions_xlayer(&mut info, &mut builder, best_txs)? + .is_some() + { return Ok(BuildOutcomeKind::Cancelled) } } @@ -450,7 +454,7 @@ impl OpBuilder<'_, Txs> { timing_ctx.set_block_hash(block_hash); timing_ctx.metrics_mut().build.total = build_start.elapsed(); timing_ctx.update_totals(); - + // Store timing metrics for this block (will be updated with insert timing later) // Context will auto-store on drop, but we can also manually store here timing_ctx.store(); From f1ea7185543d150a6dfe2b071f12ed32550d889f Mon Sep 17 00:00:00 2001 From: "ruiwei.guo" Date: Wed, 3 Dec 2025 13:55:41 +0800 Subject: [PATCH 4/6] fix logic of promethues --- crates/optimism/payload/src/builder.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index f7eff75b01d..bcb265d6959 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -21,7 +21,7 @@ use reth_evm::{ }; use reth_execution_types::ExecutionOutcome; use reth_node_metrics::{ - block_timing::{store_block_timing, BlockTimingContext}, + block_timing::{store_block_timing, BlockTimingContext, BlockTimingPrometheusMetrics}, transaction_trace_xlayer::{get_global_tracer, TransactionProcessId}, }; use reth_optimism_forks::OpHardforks; @@ -373,11 +373,13 @@ impl OpBuilder<'_, Txs> { let mut builder = ctx.block_builder(&mut db)?; - // X Layer: Initialize timing context with RAII + // X Layer: Initialize timing context with RAII and Prometheus support // Note: We'll get the block hash after building, so we create an empty context first // and update it later. For now, we use a placeholder hash. let build_start = Instant::now(); - let mut timing_ctx = BlockTimingContext::new_empty(B256::ZERO); + let prom_metrics = BlockTimingPrometheusMetrics::default(); + let mut timing_ctx = + BlockTimingContext::new_empty_with_prometheus(B256::ZERO, prom_metrics); // 1. apply pre-execution changes { From 5d20a389daa5a8aa4a32fa059780b642ee58a180 Mon Sep 17 00:00:00 2001 From: "ruiwei.guo" Date: Wed, 3 Dec 2025 17:11:51 +0800 Subject: [PATCH 5/6] fix --- crates/node/metrics/src/block_timing.rs | 128 ++++++++++++++---------- crates/optimism/payload/src/builder.rs | 3 +- 2 files changed, 78 insertions(+), 53 deletions(-) diff --git a/crates/node/metrics/src/block_timing.rs b/crates/node/metrics/src/block_timing.rs index c7b6771d5e0..4c7b1338c71 100644 --- a/crates/node/metrics/src/block_timing.rs +++ b/crates/node/metrics/src/block_timing.rs @@ -172,7 +172,7 @@ pub fn remove_block_timing(block_hash: &B256) { /// } // Guard is dropped here and timing is automatically recorded /// ``` pub struct TimingGuard<'a> { - start: Instant, + start: Option, target: &'a mut Duration, prometheus_histogram: Option<&'a metrics::Histogram>, } @@ -180,7 +180,7 @@ pub struct TimingGuard<'a> { impl<'a> TimingGuard<'a> { /// Create a new timing guard that will record the elapsed time to `target` when dropped. pub fn new(target: &'a mut Duration) -> Self { - Self { start: Instant::now(), target, prometheus_histogram: None } + Self { start: Some(Instant::now()), target, prometheus_histogram: None } } /// Create a new timing guard that records to both `target` and Prometheus histogram. @@ -188,30 +188,46 @@ impl<'a> TimingGuard<'a> { target: &'a mut Duration, prometheus_histogram: &'a metrics::Histogram, ) -> Self { - Self { start: Instant::now(), target, prometheus_histogram: Some(prometheus_histogram) } + Self { + start: Some(Instant::now()), + target, + prometheus_histogram: Some(prometheus_histogram), + } } /// Record the elapsed duration to target and Prometheus (if enabled). - fn record(&mut self) { - let duration = self.start.elapsed(); + /// Returns `None` if already recorded. + fn record(&mut self) -> Option { + let start = self.start.take()?; // If None, already recorded + let duration = start.elapsed(); *self.target = duration; if let Some(histogram) = self.prometheus_histogram { histogram.record(duration.as_secs_f64()); } + Some(duration) } /// Manually finish timing and return the duration. /// This updates the target immediately and prevents the drop handler from running. pub fn finish(mut self) -> Duration { - let duration = self.start.elapsed(); - self.record(); - duration + // Calculate duration once and use it for both recording and return + if let Some(start) = self.start.take() { + let duration = start.elapsed(); + *self.target = duration; + if let Some(histogram) = self.prometheus_histogram { + histogram.record(duration.as_secs_f64()); + } + duration + } else { + // Already recorded, return the current target value + *self.target + } } } impl<'a> Drop for TimingGuard<'a> { fn drop(&mut self) { - self.record(); + let _ = self.record(); // Ignore return value, just ensure recording happens } } @@ -295,85 +311,78 @@ impl BlockTimingContext { /// Create a timing guard for recording build phase: apply pre-execution changes. pub fn time_apply_pre_execution_changes(&mut self) -> TimingGuard<'_> { - if let Some(ref prom_metrics) = self.prometheus_metrics { - TimingGuard::new_with_prometheus( + match &self.prometheus_metrics { + Some(prom_metrics) => TimingGuard::new_with_prometheus( &mut self.metrics.build.apply_pre_execution_changes, &prom_metrics.build_apply_pre_execution_changes, - ) - } else { - TimingGuard::new(&mut self.metrics.build.apply_pre_execution_changes) + ), + None => TimingGuard::new(&mut self.metrics.build.apply_pre_execution_changes), } } /// Create a timing guard for recording build phase: execute sequencer transactions. pub fn time_exec_sequencer_transactions(&mut self) -> TimingGuard<'_> { - if let Some(ref prom_metrics) = self.prometheus_metrics { - TimingGuard::new_with_prometheus( + match &self.prometheus_metrics { + Some(prom_metrics) => TimingGuard::new_with_prometheus( &mut self.metrics.build.exec_sequencer_transactions, &prom_metrics.build_exec_sequencer_transactions, - ) - } else { - TimingGuard::new(&mut self.metrics.build.exec_sequencer_transactions) + ), + None => TimingGuard::new(&mut self.metrics.build.exec_sequencer_transactions), } } /// Create a timing guard for recording build phase: select/pack mempool transactions. pub fn time_select_mempool_transactions(&mut self) -> TimingGuard<'_> { - if let Some(ref prom_metrics) = self.prometheus_metrics { - TimingGuard::new_with_prometheus( + match &self.prometheus_metrics { + Some(prom_metrics) => TimingGuard::new_with_prometheus( &mut self.metrics.build.select_mempool_transactions, &prom_metrics.build_select_mempool_transactions, - ) - } else { - TimingGuard::new(&mut self.metrics.build.select_mempool_transactions) + ), + None => TimingGuard::new(&mut self.metrics.build.select_mempool_transactions), } } /// Create a timing guard for recording build phase: execute mempool transactions. pub fn time_exec_mempool_transactions(&mut self) -> TimingGuard<'_> { - if let Some(ref prom_metrics) = self.prometheus_metrics { - TimingGuard::new_with_prometheus( + match &self.prometheus_metrics { + Some(prom_metrics) => TimingGuard::new_with_prometheus( &mut self.metrics.build.exec_mempool_transactions, &prom_metrics.build_exec_mempool_transactions, - ) - } else { - TimingGuard::new(&mut self.metrics.build.exec_mempool_transactions) + ), + None => TimingGuard::new(&mut self.metrics.build.exec_mempool_transactions), } } /// Create a timing guard for recording build phase: calculate state root. pub fn time_calc_state_root(&mut self) -> TimingGuard<'_> { - if let Some(ref prom_metrics) = self.prometheus_metrics { - TimingGuard::new_with_prometheus( + match &self.prometheus_metrics { + Some(prom_metrics) => TimingGuard::new_with_prometheus( &mut self.metrics.build.calc_state_root, &prom_metrics.build_calc_state_root, - ) - } else { - TimingGuard::new(&mut self.metrics.build.calc_state_root) + ), + None => TimingGuard::new(&mut self.metrics.build.calc_state_root), } } /// Create a timing guard for recording insert phase: validate and execute. pub fn time_validate_and_execute(&mut self) -> TimingGuard<'_> { - if let Some(ref prom_metrics) = self.prometheus_metrics { - TimingGuard::new_with_prometheus( + match &self.prometheus_metrics { + Some(prom_metrics) => TimingGuard::new_with_prometheus( &mut self.metrics.insert.validate_and_execute, &prom_metrics.insert_validate_and_execute, - ) - } else { - TimingGuard::new(&mut self.metrics.insert.validate_and_execute) + ), + None => TimingGuard::new(&mut self.metrics.insert.validate_and_execute), } } /// Create a timing guard for recording insert phase: insert to tree. pub fn time_insert_to_tree(&mut self) -> TimingGuard<'_> { - if let Some(ref prom_metrics) = self.prometheus_metrics { - TimingGuard::new_with_prometheus( + match &self.prometheus_metrics { + Some(prom_metrics) => TimingGuard::new_with_prometheus( &mut self.metrics.insert.insert_to_tree, &prom_metrics.insert_insert_to_tree, - ) - } else { - TimingGuard::new(&mut self.metrics.insert.insert_to_tree) + ), + None => TimingGuard::new(&mut self.metrics.insert.insert_to_tree), } } @@ -388,21 +397,36 @@ impl BlockTimingContext { self.auto_store = auto_store; } - /// Calculate and update total times. - pub fn update_totals(&mut self) { - self.metrics.build.total = self.metrics.build.apply_pre_execution_changes + + /// Calculate total build time from individual components. + fn calculate_build_total(&self) -> Duration { + self.metrics.build.apply_pre_execution_changes + self.metrics.build.exec_sequencer_transactions + self.metrics.build.select_mempool_transactions + self.metrics.build.exec_mempool_transactions + - self.metrics.build.calc_state_root; + self.metrics.build.calc_state_root + } + + /// Calculate total insert time from individual components. + fn calculate_insert_total(&self) -> Duration { + self.metrics.insert.validate_and_execute + self.metrics.insert.insert_to_tree + } - self.metrics.insert.total = - self.metrics.insert.validate_and_execute + self.metrics.insert.insert_to_tree; + /// Calculate and update total times. + /// + /// This method can be safely called multiple times - it will only record to Prometheus once. + /// The totals are recalculated each time, but Prometheus recording happens only on the first + /// call. + pub fn update_totals(&mut self) { + self.metrics.build.total = self.calculate_build_total(); + self.metrics.insert.total = self.calculate_insert_total(); - // Also record totals to Prometheus if enabled - if let Some(ref prom_metrics) = self.prometheus_metrics { + // Record totals to Prometheus if enabled, then consume the metrics to prevent double + // recording + if let Some(prom_metrics) = self.prometheus_metrics.take() { prom_metrics.build_total.record(self.metrics.build.total.as_secs_f64()); prom_metrics.insert_total.record(self.metrics.insert.total.as_secs_f64()); + // Note: We don't restore prometheus_metrics here because we only want to record once + // If metrics are needed later, they should be passed again } } } diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index bcb265d6959..4427710b7e0 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -454,7 +454,8 @@ impl OpBuilder<'_, Txs> { // X Layer: Update timing context with actual block hash and total times timing_ctx.set_block_hash(block_hash); - timing_ctx.metrics_mut().build.total = build_start.elapsed(); + // Note: update_totals() will calculate build.total from individual components, + // so we don't need to manually set it here timing_ctx.update_totals(); // Store timing metrics for this block (will be updated with insert timing later) From 10cc98bc25980c6c13dc869914942c6e9d6f09a8 Mon Sep 17 00:00:00 2001 From: "ruiwei.guo" Date: Wed, 3 Dec 2025 17:16:42 +0800 Subject: [PATCH 6/6] optimize comment --- crates/node/metrics/src/block_timing.rs | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/crates/node/metrics/src/block_timing.rs b/crates/node/metrics/src/block_timing.rs index 4c7b1338c71..66f1b968ca6 100644 --- a/crates/node/metrics/src/block_timing.rs +++ b/crates/node/metrics/src/block_timing.rs @@ -195,10 +195,8 @@ impl<'a> TimingGuard<'a> { } } - /// Record the elapsed duration to target and Prometheus (if enabled). - /// Returns `None` if already recorded. fn record(&mut self) -> Option { - let start = self.start.take()?; // If None, already recorded + let start = self.start.take()?; let duration = start.elapsed(); *self.target = duration; if let Some(histogram) = self.prometheus_histogram { @@ -208,9 +206,7 @@ impl<'a> TimingGuard<'a> { } /// Manually finish timing and return the duration. - /// This updates the target immediately and prevents the drop handler from running. pub fn finish(mut self) -> Duration { - // Calculate duration once and use it for both recording and return if let Some(start) = self.start.take() { let duration = start.elapsed(); *self.target = duration; @@ -219,7 +215,6 @@ impl<'a> TimingGuard<'a> { } duration } else { - // Already recorded, return the current target value *self.target } } @@ -227,7 +222,7 @@ impl<'a> TimingGuard<'a> { impl<'a> Drop for TimingGuard<'a> { fn drop(&mut self) { - let _ = self.record(); // Ignore return value, just ensure recording happens + let _ = self.record(); } } @@ -412,21 +407,13 @@ impl BlockTimingContext { } /// Calculate and update total times. - /// - /// This method can be safely called multiple times - it will only record to Prometheus once. - /// The totals are recalculated each time, but Prometheus recording happens only on the first - /// call. pub fn update_totals(&mut self) { self.metrics.build.total = self.calculate_build_total(); self.metrics.insert.total = self.calculate_insert_total(); - // Record totals to Prometheus if enabled, then consume the metrics to prevent double - // recording if let Some(prom_metrics) = self.prometheus_metrics.take() { prom_metrics.build_total.record(self.metrics.build.total.as_secs_f64()); prom_metrics.insert_total.record(self.metrics.insert.total.as_secs_f64()); - // Note: We don't restore prometheus_metrics here because we only want to record once - // If metrics are needed later, they should be passed again } } }