diff --git a/.github/workflows/checks.yaml b/.github/workflows/checks.yaml index 9df920ad6..00d22b6a1 100644 --- a/.github/workflows/checks.yaml +++ b/.github/workflows/checks.yaml @@ -135,12 +135,6 @@ jobs: - name: Run integration tests run: cargo test --package op-rbuilder --lib - - name: Build flashblocks rbuilder - run: cargo build -p op-rbuilder --bin op-rbuilder --features flashblocks - - - name: Run flashblocks builder integration tests - run: cargo test --package op-rbuilder --lib --features flashblocks - - name: Aggregate playground logs # This steps fails if the test fails early and the playground logs dir has not been created if: ${{ failure() }} diff --git a/Makefile b/Makefile index 8fd76b92f..00c53c927 100644 --- a/Makefile +++ b/Makefile @@ -51,7 +51,6 @@ lint: ## Run the linters test: ## Run the tests for rbuilder and op-rbuilder cargo test --verbose --features "$(FEATURES)" cargo test -p op-rbuilder --verbose --features "$(FEATURES)" - cargo test -p op-rbuilder --verbose --features "$(FEATURES),flashblocks" .PHONY: lt lt: lint test ## Run "lint" and "test" diff --git a/crates/op-rbuilder/Cargo.toml b/crates/op-rbuilder/Cargo.toml index 595beaf44..40c9d6cdb 100644 --- a/crates/op-rbuilder/Cargo.toml +++ b/crates/op-rbuilder/Cargo.toml @@ -132,7 +132,6 @@ min-debug-logs = ["tracing/release_max_level_debug"] min-trace-logs = ["tracing/release_max_level_trace"] testing = [] -flashblocks = [] [[bin]] name = "op-rbuilder" diff --git a/crates/op-rbuilder/src/args/mod.rs b/crates/op-rbuilder/src/args/mod.rs index 254752062..5c87428f3 100644 --- a/crates/op-rbuilder/src/args/mod.rs +++ b/crates/op-rbuilder/src/args/mod.rs @@ -1,5 +1,81 @@ +use crate::builders::BuilderMode; +use clap::Parser; +pub use op::OpRbuilderArgs; +use playground::PlaygroundOptions; +use reth_optimism_cli::{chainspec::OpChainSpecParser, commands::Commands}; + mod op; mod playground; -pub use op::OpRbuilderArgs; -pub use playground::CliExt; +/// This trait is used to extend Reth's CLI with additional functionality that +/// are specific to the OP builder, such as populating default values for CLI arguments +/// when running in the playground mode or checking the builder mode. +/// +pub trait CliExt { + /// Populates the default values for the CLI arguments when the user specifies + /// the `--builder.playground` flag. + fn populate_defaults(self) -> Self; + + /// Returns the builder mode that the node is started with. + fn builder_mode(&self) -> BuilderMode; + + /// Returns the Cli instance with the parsed command line arguments + /// and defaults populated if applicable. + fn parsed() -> Self; +} + +pub type Cli = reth_optimism_cli::Cli; + +impl CliExt for Cli { + /// Checks if the node is started with the `--builder.playground` flag, + /// and if so, populates the default values for the CLI arguments from the + /// playground configuration. + /// + /// The `--builder.playground` flag is used to populate the CLI arguments with + /// default values for running the builder against the playground environment. + /// + /// The values are populated from the default directory of the playground + /// configuration, which is `$HOME/.playground/devnet/` by default. + /// + /// Any manually specified CLI arguments by the user will override the defaults. + fn populate_defaults(self) -> Self { + let Commands::Node(ref node_command) = self.command else { + // playground defaults are only relevant if running the node commands. + return self; + }; + + let Some(ref playground_dir) = node_command.ext.playground else { + // not running in playground mode. + return self; + }; + + let options = match PlaygroundOptions::new(playground_dir) { + Ok(options) => options, + Err(e) => exit(e), + }; + + options.apply(self) + } + + fn parsed() -> Self { + Cli::parse().populate_defaults() + } + + /// Returns the type of builder implementation that the node is started with. + /// Currently supports `Standard` and `Flashblocks` modes. + fn builder_mode(&self) -> BuilderMode { + if let Commands::Node(ref node_command) = self.command { + if node_command.ext.enable_flashblocks { + return BuilderMode::Flashblocks; + } + } + BuilderMode::Standard + } +} + +/// Following clap's convention, a failure to parse the command line arguments +/// will result in terminating the program with a non-zero exit code. +fn exit(error: eyre::Report) -> ! { + eprintln!("{error}"); + std::process::exit(-1); +} diff --git a/crates/op-rbuilder/src/args/op.rs b/crates/op-rbuilder/src/args/op.rs index 5818a5c1d..44dcae78c 100644 --- a/crates/op-rbuilder/src/args/op.rs +++ b/crates/op-rbuilder/src/args/op.rs @@ -19,6 +19,19 @@ pub struct OpRbuilderArgs { /// Builder secret key for signing last transaction in block #[arg(long = "rollup.builder-secret-key", env = "BUILDER_SECRET_KEY")] pub builder_signer: Option, + + /// When set to true, the builder will build flashblocks + /// and will build standard blocks at the chain block time. + /// + /// The default value will change in the future once the flashblocks + /// feature is stable. + #[arg( + long = "rollup.enable-flashblocks", + default_value = "false", + env = "ENABLE_FLASHBLOCKS" + )] + pub enable_flashblocks: bool, + /// Websocket port for flashblock payload builder #[arg( long = "rollup.flashblocks-ws-url", diff --git a/crates/op-rbuilder/src/args/playground.rs b/crates/op-rbuilder/src/args/playground.rs index 6898eb9e6..b02a0f4cb 100644 --- a/crates/op-rbuilder/src/args/playground.rs +++ b/crates/op-rbuilder/src/args/playground.rs @@ -27,7 +27,6 @@ //! directory to use. This is useful for testing against different playground //! configurations. -use super::OpRbuilderArgs; use alloy_primitives::hex; use clap::{parser::ValueSource, CommandFactory}; use core::{ @@ -49,47 +48,9 @@ use std::{ }; use url::{Host, Url}; -/// This trait is used to extend Reth's CLI with additional functionality that -/// populates the default values for the command line arguments when the user -/// specifies that they want to use the playground. -/// -/// The `--builder.playground` flag is used to populate the CLI arguments with -/// default values for running the builder against the playground environment. -/// -/// The values are populated from the default directory of the playground -/// configuration, which is `$HOME/.playground/devnet/` by default. -/// -/// Any manually specified CLI arguments by the user will override the defaults. -pub trait CliExt { - /// Populates the default values for the CLI arguments when the user specifies - /// the `--builder.playground` flag. - fn populate_defaults(self) -> Self; -} - -type Cli = reth_optimism_cli::Cli; - -impl CliExt for Cli { - fn populate_defaults(self) -> Self { - let Commands::Node(ref node_command) = self.command else { - // playground defaults are only relevant if running the node commands. - return self; - }; - - let Some(ref playground_dir) = node_command.ext.playground else { - // not running in playground mode. - return self; - }; +use super::Cli; - let options = match PlaygroundOptions::new(playground_dir) { - Ok(options) => options, - Err(e) => exit(e), - }; - - options.apply(self) - } -} - -struct PlaygroundOptions { +pub struct PlaygroundOptions { /// Sets node.chain in NodeCommand pub chain: Arc, @@ -210,13 +171,6 @@ impl PlaygroundOptions { } } -/// Following clap's convention, a failure to parse the command line arguments -/// will result in terminating the program with a non-zero exit code. -fn exit(error: eyre::Report) -> ! { - eprintln!("{error}"); - std::process::exit(-1); -} - fn existing_path(base: &Path, relative: &str) -> Result { let path = base.join(relative); if path.exists() { diff --git a/crates/op-rbuilder/src/builders/context.rs b/crates/op-rbuilder/src/builders/context.rs new file mode 100644 index 000000000..04e631587 --- /dev/null +++ b/crates/op-rbuilder/src/builders/context.rs @@ -0,0 +1,558 @@ +use alloy_consensus::{Eip658Value, Transaction, TxEip1559}; +use alloy_eips::Typed2718; +use alloy_op_evm::block::receipt_builder::OpReceiptBuilder; +use alloy_primitives::{private::alloy_rlp::Encodable, Address, Bytes, TxKind, U256}; +use alloy_rpc_types_eth::Withdrawals; +use core::fmt::Debug; +use op_alloy_consensus::{OpDepositReceipt, OpTypedTransaction}; +use op_revm::OpSpecId; +use reth::payload::PayloadBuilderAttributes; +use reth_basic_payload_builder::PayloadConfig; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_evm::{ + eth::receipt_builder::ReceiptBuilderCtx, ConfigureEvm, Evm, EvmEnv, EvmError, InvalidTxError, +}; +use reth_node_api::PayloadBuilderError; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_evm::{OpEvmConfig, OpNextBlockEnvAttributes}; +use reth_optimism_forks::OpHardforks; +use reth_optimism_node::OpPayloadBuilderAttributes; +use reth_optimism_payload_builder::{config::OpDAConfig, error::OpPayloadBuilderError}; +use reth_optimism_primitives::{OpReceipt, OpTransactionSigned}; +use reth_payload_builder::PayloadId; +use reth_primitives::{Recovered, SealedHeader}; +use reth_primitives_traits::{InMemorySize, SignedTransaction}; +use reth_provider::ProviderError; +use reth_revm::State; +use reth_transaction_pool::{BestTransactionsAttributes, PoolTransaction}; +use revm::{ + context::{result::ResultAndState, Block}, + Database, DatabaseCommit, +}; +use std::{sync::Arc, time::Instant}; +use tokio_util::sync::CancellationToken; +use tracing::{info, trace, warn}; + +use crate::{ + metrics::OpRBuilderMetrics, primitives::reth::ExecutionInfo, traits::PayloadTxsBounds, + tx::MaybeRevertingTransaction, tx_signer::Signer, +}; + +/// Container type that holds all necessities to build a new payload. +#[derive(Debug)] +pub struct OpPayloadBuilderCtx { + /// The type that knows how to perform system calls and configure the evm. + pub evm_config: OpEvmConfig, + /// The DA config for the payload builder + pub da_config: OpDAConfig, + /// The chainspec + pub chain_spec: Arc, + /// How to build the payload. + pub config: PayloadConfig>, + /// Evm Settings + pub evm_env: EvmEnv, + /// Block env attributes for the current block. + pub block_env_attributes: OpNextBlockEnvAttributes, + /// Marker to check whether the job has been cancelled. + pub cancel: CancellationToken, + /// The builder signer + pub builder_signer: Option, + /// The metrics for the builder + pub metrics: Arc, +} + +impl OpPayloadBuilderCtx { + /// Returns the parent block the payload will be build on. + pub fn parent(&self) -> &SealedHeader { + &self.config.parent_header + } + + /// Returns the builder attributes. + pub const fn attributes(&self) -> &OpPayloadBuilderAttributes { + &self.config.attributes + } + + /// Returns the withdrawals if shanghai is active. + pub fn withdrawals(&self) -> Option<&Withdrawals> { + self.chain_spec + .is_shanghai_active_at_timestamp(self.attributes().timestamp()) + .then(|| &self.attributes().payload_attributes.withdrawals) + } + + /// Returns the block gas limit to target. + pub fn block_gas_limit(&self) -> u64 { + self.attributes() + .gas_limit + .unwrap_or(self.evm_env.block_env.gas_limit) + } + + /// Returns the block number for the block. + pub fn block_number(&self) -> u64 { + self.evm_env.block_env.number + } + + /// Returns the current base fee + pub fn base_fee(&self) -> u64 { + self.evm_env.block_env.basefee + } + + /// Returns the current blob gas price. + pub fn get_blob_gasprice(&self) -> Option { + self.evm_env + .block_env + .blob_gasprice() + .map(|gasprice| gasprice as u64) + } + + /// Returns the blob fields for the header. + /// + /// This will always return `Some(0)` after ecotone. + pub fn blob_fields(&self) -> (Option, Option) { + // OP doesn't support blobs/EIP-4844. + // https://specs.optimism.io/protocol/exec-engine.html#ecotone-disable-blob-transactions + // Need [Some] or [None] based on hardfork to match block hash. + if self.is_ecotone_active() { + (Some(0), Some(0)) + } else { + (None, None) + } + } + + /// Returns the extra data for the block. + /// + /// After holocene this extracts the extradata from the paylpad + pub fn extra_data(&self) -> Result { + if self.is_holocene_active() { + self.attributes() + .get_holocene_extra_data( + self.chain_spec.base_fee_params_at_timestamp( + self.attributes().payload_attributes.timestamp, + ), + ) + .map_err(PayloadBuilderError::other) + } else { + Ok(Default::default()) + } + } + + /// Returns the current fee settings for transactions from the mempool + pub fn best_transaction_attributes(&self) -> BestTransactionsAttributes { + BestTransactionsAttributes::new(self.base_fee(), self.get_blob_gasprice()) + } + + /// Returns the unique id for this payload job. + pub fn payload_id(&self) -> PayloadId { + self.attributes().payload_id() + } + + /// Returns true if regolith is active for the payload. + pub fn is_regolith_active(&self) -> bool { + self.chain_spec + .is_regolith_active_at_timestamp(self.attributes().timestamp()) + } + + /// Returns true if ecotone is active for the payload. + pub fn is_ecotone_active(&self) -> bool { + self.chain_spec + .is_ecotone_active_at_timestamp(self.attributes().timestamp()) + } + + /// Returns true if canyon is active for the payload. + pub fn is_canyon_active(&self) -> bool { + self.chain_spec + .is_canyon_active_at_timestamp(self.attributes().timestamp()) + } + + /// Returns true if holocene is active for the payload. + pub fn is_holocene_active(&self) -> bool { + self.chain_spec + .is_holocene_active_at_timestamp(self.attributes().timestamp()) + } + + /// Returns true if isthmus is active for the payload. + pub fn is_isthmus_active(&self) -> bool { + self.chain_spec + .is_isthmus_active_at_timestamp(self.attributes().timestamp()) + } + + /// Returns the chain id + pub fn chain_id(&self) -> u64 { + self.chain_spec.chain_id() + } + + /// Returns the builder signer + pub fn builder_signer(&self) -> Option { + self.builder_signer + } +} + +impl OpPayloadBuilderCtx { + /// Constructs a receipt for the given transaction. + fn build_receipt( + &self, + ctx: ReceiptBuilderCtx<'_, OpTransactionSigned, E>, + deposit_nonce: Option, + ) -> OpReceipt { + let receipt_builder = self.evm_config.block_executor_factory().receipt_builder(); + match receipt_builder.build_receipt(ctx) { + Ok(receipt) => receipt, + Err(ctx) => { + let receipt = alloy_consensus::Receipt { + // Success flag was added in `EIP-658: Embedding transaction status code + // in receipts`. + status: Eip658Value::Eip658(ctx.result.is_success()), + cumulative_gas_used: ctx.cumulative_gas_used, + logs: ctx.result.into_logs(), + }; + + receipt_builder.build_deposit_receipt(OpDepositReceipt { + inner: receipt, + deposit_nonce, + // The deposit receipt version was introduced in Canyon to indicate an + // update to how receipt hashes should be computed + // when set. The state transition process ensures + // this is only set for post-Canyon deposit + // transactions. + deposit_receipt_version: self.is_canyon_active().then_some(1), + }) + } + } + } + + /// Executes all sequencer transactions that are included in the payload attributes. + pub fn execute_sequencer_transactions( + &self, + db: &mut State, + ) -> Result, PayloadBuilderError> + where + DB: Database, + { + let mut info = ExecutionInfo::with_capacity(self.attributes().transactions.len()); + + let mut evm = self.evm_config.evm_with_env(&mut *db, self.evm_env.clone()); + + for sequencer_tx in &self.attributes().transactions { + // A sequencer's block should never contain blob transactions. + if sequencer_tx.value().is_eip4844() { + return Err(PayloadBuilderError::other( + OpPayloadBuilderError::BlobTransactionRejected, + )); + } + + // Convert the transaction to a [Recovered]. This is + // purely for the purposes of utilizing the `evm_config.tx_env`` function. + // Deposit transactions do not have signatures, so if the tx is a deposit, this + // will just pull in its `from` address. + let sequencer_tx = sequencer_tx + .value() + .try_clone_into_recovered() + .map_err(|_| { + PayloadBuilderError::other(OpPayloadBuilderError::TransactionEcRecoverFailed) + })?; + + // Cache the depositor account prior to the state transition for the deposit nonce. + // + // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces + // were not introduced in Bedrock. In addition, regular transactions don't have deposit + // nonces, so we don't need to touch the DB for those. + let depositor_nonce = (self.is_regolith_active() && sequencer_tx.is_deposit()) + .then(|| { + evm.db_mut() + .load_cache_account(sequencer_tx.signer()) + .map(|acc| acc.account_info().unwrap_or_default().nonce) + }) + .transpose() + .map_err(|_| { + PayloadBuilderError::other(OpPayloadBuilderError::AccountLoadFailed( + sequencer_tx.signer(), + )) + })?; + + let ResultAndState { result, state } = match evm.transact(&sequencer_tx) { + Ok(res) => res, + Err(err) => { + if err.is_invalid_tx_err() { + trace!(target: "payload_builder", %err, ?sequencer_tx, "Error in sequencer transaction, skipping."); + continue; + } + // this is an error that we should treat as fatal for this attempt + return Err(PayloadBuilderError::EvmExecutionError(Box::new(err))); + } + }; + + // add gas used by the transaction to cumulative gas used, before creating the receipt + let gas_used = result.gas_used(); + info.cumulative_gas_used += gas_used; + + let ctx = ReceiptBuilderCtx { + tx: sequencer_tx.inner(), + evm: &evm, + result, + state: &state, + cumulative_gas_used: info.cumulative_gas_used, + }; + info.receipts.push(self.build_receipt(ctx, depositor_nonce)); + + // commit changes + evm.db_mut().commit(state); + + // append sender and transaction to the respective lists + info.executed_senders.push(sequencer_tx.signer()); + info.executed_transactions.push(sequencer_tx.into_inner()); + } + + Ok(info) + } + + /// Executes the given best transactions and updates the execution info. + /// + /// Returns `Ok(Some(())` if the job was cancelled. + pub fn execute_best_transactions( + &self, + info: &mut ExecutionInfo, + db: &mut State, + mut best_txs: impl PayloadTxsBounds, + block_gas_limit: u64, + block_da_limit: Option, + ) -> Result, PayloadBuilderError> + where + DB: Database, + { + let execute_txs_start_time = Instant::now(); + let mut num_txs_considered = 0; + let mut num_txs_simulated = 0; + let mut num_txs_simulated_success = 0; + let mut num_txs_simulated_fail = 0; + let base_fee = self.base_fee(); + let tx_da_limit = self.da_config.max_da_tx_size(); + let mut evm = self.evm_config.evm_with_env(&mut *db, self.evm_env.clone()); + + while let Some(tx) = best_txs.next(()) { + let exclude_reverting_txs = tx.exclude_reverting_txs(); + + let tx = tx.into_consensus(); + num_txs_considered += 1; + // ensure we still have capacity for this transaction + if info.is_tx_over_limits(tx.inner(), block_gas_limit, tx_da_limit, block_da_limit) { + // we can't fit this transaction into the block, so we need to mark it as + // invalid which also removes all dependent transaction from + // the iterator before we can continue + best_txs.mark_invalid(tx.signer(), tx.nonce()); + continue; + } + + // A sequencer's block should never contain blob or deposit transactions from the pool. + if tx.is_eip4844() || tx.is_deposit() { + best_txs.mark_invalid(tx.signer(), tx.nonce()); + continue; + } + + // check if the job was cancelled, if so we can exit early + if self.cancel.is_cancelled() { + return Ok(Some(())); + } + + let tx_simulation_start_time = Instant::now(); + let ResultAndState { result, state } = match evm.transact(&tx) { + Ok(res) => res, + Err(err) => { + if let Some(err) = err.as_invalid_tx_err() { + if err.is_nonce_too_low() { + // if the nonce is too low, we can skip this transaction + trace!(target: "payload_builder", %err, ?tx, "skipping nonce too low transaction"); + } else { + // if the transaction is invalid, we can skip it and all of its + // descendants + trace!(target: "payload_builder", %err, ?tx, "skipping invalid transaction and its descendants"); + best_txs.mark_invalid(tx.signer(), tx.nonce()); + } + + continue; + } + // this is an error that we should treat as fatal for this attempt + return Err(PayloadBuilderError::EvmExecutionError(Box::new(err))); + } + }; + + self.metrics + .tx_simulation_duration + .record(tx_simulation_start_time.elapsed()); + self.metrics.tx_byte_size.record(tx.inner().size() as f64); + num_txs_simulated += 1; + if result.is_success() { + num_txs_simulated_success += 1; + } else { + num_txs_simulated_fail += 1; + if exclude_reverting_txs { + info!(target: "payload_builder", tx_hash = ?tx.tx_hash(), "skipping reverted transaction"); + best_txs.mark_invalid(tx.signer(), tx.nonce()); + continue; + } + } + + // add gas used by the transaction to cumulative gas used, before creating the + // receipt + let gas_used = result.gas_used(); + info.cumulative_gas_used += gas_used; + + // Push transaction changeset and calculate header bloom filter for receipt. + let ctx = ReceiptBuilderCtx { + tx: tx.inner(), + evm: &evm, + result, + state: &state, + cumulative_gas_used: info.cumulative_gas_used, + }; + info.receipts.push(self.build_receipt(ctx, None)); + + // commit changes + evm.db_mut().commit(state); + + // update add to total fees + let miner_fee = tx + .effective_tip_per_gas(base_fee) + .expect("fee is always valid; execution succeeded"); + info.total_fees += U256::from(miner_fee) * U256::from(gas_used); + + // append sender and transaction to the respective lists + info.executed_senders.push(tx.signer()); + info.executed_transactions.push(tx.into_inner()); + } + + self.metrics + .payload_tx_simulation_duration + .record(execute_txs_start_time.elapsed()); + self.metrics + .payload_num_tx_considered + .record(num_txs_considered as f64); + self.metrics + .payload_num_tx_simulated + .record(num_txs_simulated as f64); + self.metrics + .payload_num_tx_simulated_success + .record(num_txs_simulated_success as f64); + self.metrics + .payload_num_tx_simulated_fail + .record(num_txs_simulated_fail as f64); + + Ok(None) + } + + pub fn add_builder_tx( + &self, + info: &mut ExecutionInfo, + db: &mut State, + builder_tx_gas: u64, + message: Vec, + ) -> Option<()> + where + DB: Database, + { + self.builder_signer() + .map(|signer| { + let base_fee = self.base_fee(); + let chain_id = self.chain_id(); + // Create and sign the transaction + let builder_tx = + signed_builder_tx(db, builder_tx_gas, message, signer, base_fee, chain_id)?; + + let mut evm = self.evm_config.evm_with_env(&mut *db, self.evm_env.clone()); + + let ResultAndState { result, state } = evm + .transact(&builder_tx) + .map_err(|err| PayloadBuilderError::EvmExecutionError(Box::new(err)))?; + + // Add gas used by the transaction to cumulative gas used, before creating the receipt + let gas_used = result.gas_used(); + info.cumulative_gas_used += gas_used; + + let ctx = ReceiptBuilderCtx { + tx: builder_tx.inner(), + evm: &evm, + result, + state: &state, + cumulative_gas_used: info.cumulative_gas_used, + }; + info.receipts.push(self.build_receipt(ctx, None)); + + // Release the db reference by dropping evm + drop(evm); + // Commit changes + db.commit(state); + + // Append sender and transaction to the respective lists + info.executed_senders.push(builder_tx.signer()); + info.executed_transactions.push(builder_tx.into_inner()); + Ok(()) + }) + .transpose() + .unwrap_or_else(|err: PayloadBuilderError| { + warn!(target: "payload_builder", %err, "Failed to add builder transaction"); + None + }) + } + + /// Calculates EIP 2718 builder transaction size + pub fn estimate_builder_tx_da_size( + &self, + db: &mut State, + builder_tx_gas: u64, + message: Vec, + ) -> Option + where + DB: Database, + { + self.builder_signer() + .map(|signer| { + let base_fee = self.base_fee(); + let chain_id = self.chain_id(); + // Create and sign the transaction + let builder_tx = + signed_builder_tx(db, builder_tx_gas, message, signer, base_fee, chain_id)?; + Ok(builder_tx.length()) + }) + .transpose() + .unwrap_or_else(|err: PayloadBuilderError| { + warn!(target: "payload_builder", %err, "Failed to add builder transaction"); + None + }) + } +} + +/// Creates signed builder tx to Address::ZERO and specified message as input +pub fn signed_builder_tx( + db: &mut State, + builder_tx_gas: u64, + message: Vec, + signer: Signer, + base_fee: u64, + chain_id: u64, +) -> Result, PayloadBuilderError> +where + DB: Database, +{ + // Create message with block number for the builder to sign + let nonce = db + .load_cache_account(signer.address) + .map(|acc| acc.account_info().unwrap_or_default().nonce) + .map_err(|_| { + PayloadBuilderError::other(OpPayloadBuilderError::AccountLoadFailed(signer.address)) + })?; + + // Create the EIP-1559 transaction + let tx = OpTypedTransaction::Eip1559(TxEip1559 { + chain_id, + nonce, + gas_limit: builder_tx_gas, + max_fee_per_gas: base_fee.into(), + max_priority_fee_per_gas: 0, + to: TxKind::Call(Address::ZERO), + // Include the message as part of the transaction data + input: message.into(), + ..Default::default() + }); + // Sign the transaction + let builder_tx = signer.sign_tx(tx).map_err(PayloadBuilderError::other)?; + + Ok(builder_tx) +} diff --git a/crates/op-rbuilder/src/builders/flashblocks/config.rs b/crates/op-rbuilder/src/builders/flashblocks/config.rs new file mode 100644 index 000000000..18c9ea6d2 --- /dev/null +++ b/crates/op-rbuilder/src/builders/flashblocks/config.rs @@ -0,0 +1,55 @@ +use crate::{args::OpRbuilderArgs, builders::BuilderConfig}; +use core::{ + net::{Ipv4Addr, SocketAddr}, + time::Duration, +}; + +/// Configuration values that are specific to the flashblocks builder. +#[derive(Debug, Clone)] +pub struct FlashblocksConfig { + /// The address of the websockets endpoint that listens for subscriptions to + /// new flashblocks updates. + pub ws_addr: SocketAddr, + + /// How often a flashblock is produced. This is independent of the block time of the chain. + /// Each block will contain one or more flashblocks. On average, the number of flashblocks + /// per block is equal to the block time divided by the flashblock interval. + pub interval: Duration, +} + +impl Default for FlashblocksConfig { + fn default() -> Self { + Self { + ws_addr: SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 1111), + interval: Duration::from_millis(250), + } + } +} + +impl TryFrom for FlashblocksConfig { + type Error = eyre::Report; + + fn try_from(args: OpRbuilderArgs) -> Result { + let ws_addr = args + .flashblocks_ws_url + .parse() + .map_err(|_| eyre::eyre!("Invalid flashblocks websocket address"))?; + + let interval = Duration::from_millis(args.flashblock_block_time); + + Ok(Self { ws_addr, interval }) + } +} + +pub trait FlashBlocksConfigExt { + fn flashblocks_per_block(&self) -> u64; +} + +impl FlashBlocksConfigExt for BuilderConfig { + fn flashblocks_per_block(&self) -> u64 { + if self.block_time.as_millis() == 0 { + return 0; + } + (self.block_time.as_millis() / self.specific.interval.as_millis()) as u64 + } +} diff --git a/crates/op-rbuilder/src/builders/flashblocks/mod.rs b/crates/op-rbuilder/src/builders/flashblocks/mod.rs new file mode 100644 index 000000000..d85259ab0 --- /dev/null +++ b/crates/op-rbuilder/src/builders/flashblocks/mod.rs @@ -0,0 +1,34 @@ +use super::BuilderConfig; +use crate::traits::{NodeBounds, PoolBounds}; +use config::FlashblocksConfig; +use service::FlashblocksServiceBuilder; + +mod config; +//mod context; +mod payload; +mod service; +mod wspub; + +/// Block building strategy that progressively builds chunks of a block and makes them available +/// through a websocket update, then merges them into a full block every chain block time. +pub struct FlashblocksBuilder; + +impl super::PayloadBuilder for FlashblocksBuilder { + type Config = FlashblocksConfig; + + type ServiceBuilder + = FlashblocksServiceBuilder + where + Node: NodeBounds, + Pool: PoolBounds; + + fn new_service( + config: BuilderConfig, + ) -> eyre::Result> + where + Node: NodeBounds, + Pool: PoolBounds, + { + Ok(FlashblocksServiceBuilder(config)) + } +} diff --git a/crates/op-rbuilder/src/builders/flashblocks/payload.rs b/crates/op-rbuilder/src/builders/flashblocks/payload.rs new file mode 100644 index 000000000..fe1e638f8 --- /dev/null +++ b/crates/op-rbuilder/src/builders/flashblocks/payload.rs @@ -0,0 +1,650 @@ +use core::time::Duration; +use std::{sync::Arc, time::Instant}; + +use super::{config::FlashblocksConfig, wspub::WebSocketPublisher}; +use crate::{ + builders::{ + context::OpPayloadBuilderCtx, + flashblocks::config::FlashBlocksConfigExt, + generator::{BlockCell, BuildArguments}, + BuilderConfig, + }, + metrics::OpRBuilderMetrics, + primitives::reth::ExecutionInfo, + traits::{ClientBounds, PoolBounds}, +}; +use alloy_consensus::{ + constants::EMPTY_WITHDRAWALS, proofs, BlockBody, Header, EMPTY_OMMER_ROOT_HASH, +}; +use alloy_eips::{eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE, Encodable2718}; +use alloy_primitives::{map::foldhash::HashMap, Address, B256, U256}; +use reth::payload::PayloadBuilderAttributes; +use reth_basic_payload_builder::BuildOutcome; +use reth_evm::{execute::BlockBuilder, ConfigureEvm}; +use reth_node_api::{Block, NodePrimitives, PayloadBuilderError}; +use reth_optimism_consensus::{calculate_receipt_root_no_memo_optimism, isthmus}; +use reth_optimism_evm::{OpEvmConfig, OpNextBlockEnvAttributes}; +use reth_optimism_forks::OpHardforks; +use reth_optimism_node::{OpBuiltPayload, OpPayloadBuilderAttributes}; +use reth_optimism_primitives::{OpPrimitives, OpReceipt, OpTransactionSigned}; +use reth_payload_util::BestPayloadTransactions; +use reth_provider::{ + ExecutionOutcome, HashedPostStateProvider, ProviderError, StateRootProvider, + StorageRootProvider, +}; +use reth_revm::{ + database::StateProviderDatabase, + db::{states::bundle_state::BundleRetention, BundleState}, + State, +}; +use revm::Database; +use rollup_boost::primitives::{ + ExecutionPayloadBaseV1, ExecutionPayloadFlashblockDeltaV1, FlashblocksPayloadV1, +}; +use serde::{Deserialize, Serialize}; +use tokio::sync::mpsc; +use tracing::{debug, error, warn}; + +#[derive(Debug, Default)] +struct ExtraExecutionInfo { + /// Index of the last consumed flashblock + pub last_flashblock_index: usize, +} + +/// Optimism's payload builder +#[derive(Debug, Clone)] +pub struct OpPayloadBuilder { + /// The type responsible for creating the evm. + pub evm_config: OpEvmConfig, + /// The transaction pool + pub pool: Pool, + /// Node client + pub client: Client, + /// WebSocket publisher for broadcasting flashblocks + /// to all connected subscribers. + pub ws_pub: Arc, + /// System configuration for the builder + pub config: BuilderConfig, + /// The metrics for the builder + pub metrics: Arc, +} + +impl OpPayloadBuilder { + /// `OpPayloadBuilder` constructor. + pub fn new( + evm_config: OpEvmConfig, + pool: Pool, + client: Client, + config: BuilderConfig, + ) -> eyre::Result { + let metrics = Arc::new(OpRBuilderMetrics::default()); + let ws_pub = WebSocketPublisher::new(config.specific.ws_addr, Arc::clone(&metrics))?.into(); + + Ok(Self { + evm_config, + pool, + client, + ws_pub, + config, + metrics, + }) + } +} + +impl reth_basic_payload_builder::PayloadBuilder for OpPayloadBuilder +where + Pool: Clone + Send + Sync, + Client: Clone + Send + Sync, +{ + type Attributes = OpPayloadBuilderAttributes; + type BuiltPayload = OpBuiltPayload; + + fn try_build( + &self, + _args: reth_basic_payload_builder::BuildArguments, + ) -> Result, PayloadBuilderError> { + unimplemented!() + } + + fn build_empty_payload( + &self, + _config: reth_basic_payload_builder::PayloadConfig< + Self::Attributes, + reth_basic_payload_builder::HeaderForPayload, + >, + ) -> Result { + unimplemented!() + } +} + +impl OpPayloadBuilder +where + Pool: PoolBounds, + Client: ClientBounds, +{ + /// Constructs an Optimism payload from the transactions sent via the + /// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in + /// the payload attributes, the transaction pool will be ignored and the only transactions + /// included in the payload will be those sent through the attributes. + /// + /// Given build arguments including an Optimism client, transaction pool, + /// and configuration, this function creates a transaction payload. Returns + /// a result indicating success with the payload or an error in case of failure. + fn build_payload( + &self, + args: BuildArguments, OpBuiltPayload>, + best_payload: BlockCell, + ) -> Result<(), PayloadBuilderError> { + let block_build_start_time = Instant::now(); + let BuildArguments { config, cancel, .. } = args; + + let chain_spec = self.client.chain_spec(); + let timestamp = config.attributes.timestamp(); + let block_env_attributes = OpNextBlockEnvAttributes { + timestamp, + suggested_fee_recipient: config.attributes.suggested_fee_recipient(), + prev_randao: config.attributes.prev_randao(), + gas_limit: config + .attributes + .gas_limit + .unwrap_or(config.parent_header.gas_limit), + parent_beacon_block_root: config + .attributes + .payload_attributes + .parent_beacon_block_root, + extra_data: if chain_spec.is_holocene_active_at_timestamp(timestamp) { + config + .attributes + .get_holocene_extra_data(chain_spec.base_fee_params_at_timestamp(timestamp)) + .map_err(PayloadBuilderError::other)? + } else { + Default::default() + }, + }; + + let evm_env = self + .evm_config + .next_evm_env(&config.parent_header, &block_env_attributes) + .map_err(PayloadBuilderError::other)?; + + let ctx = OpPayloadBuilderCtx { + evm_config: self.evm_config.clone(), + chain_spec: self.client.chain_spec(), + config, + evm_env, + block_env_attributes, + cancel, + da_config: self.config.da_config.clone(), + builder_signer: self.config.builder_signer, + metrics: Default::default(), + }; + + let state_provider = self.client.state_by_block_hash(ctx.parent().hash())?; + let state = StateProviderDatabase::new(&state_provider); + + // 1. execute the pre steps and seal an early block with that + let sequencer_tx_start_time = Instant::now(); + let mut db = State::builder() + .with_database(state) + .with_bundle_update() + .build(); + + let mut info = execute_pre_steps(&mut db, &ctx)?; + ctx.metrics + .sequencer_tx_duration + .record(sequencer_tx_start_time.elapsed()); + + let (payload, fb_payload, mut bundle_state) = build_block(db, &ctx, &mut info)?; + + best_payload.set(payload.clone()); + self.ws_pub + .publish(&fb_payload) + .map_err(PayloadBuilderError::other)?; + + tracing::info!(target: "payload_builder", "Fallback block built"); + ctx.metrics + .payload_num_tx + .record(info.executed_transactions.len() as f64); + + if ctx.attributes().no_tx_pool { + tracing::info!( + target: "payload_builder", + "No transaction pool, skipping transaction pool processing", + ); + + self.metrics + .total_block_built_duration + .record(block_build_start_time.elapsed()); + + // return early since we don't need to build a block with transactions from the pool + return Ok(()); + } + + let gas_per_batch = ctx.block_gas_limit() / self.config.flashblocks_per_block(); + let mut total_gas_per_batch = gas_per_batch; + let total_da_bytes_per_batch = ctx + .da_config + .max_da_block_size() + .map(|limit| limit / self.config.flashblocks_per_block()); + + let mut flashblock_count = 0; + // Create a channel to coordinate flashblock building + let (build_tx, mut build_rx) = mpsc::channel(1); + + // Spawn the timer task that signals when to build a new flashblock + let cancel_clone = ctx.cancel.clone(); + let interval = self.config.specific.interval; + tokio::spawn(async move { + let mut interval = tokio::time::interval(interval); + loop { + tokio::select! { + // Add a cancellation check that only runs every 10ms to avoid tight polling + _ = tokio::time::sleep(Duration::from_millis(10)) => { + if cancel_clone.is_cancelled() { + tracing::info!(target: "payload_builder", "Job cancelled during sleep, stopping payload building"); + drop(build_tx); + break; + } + } + _ = interval.tick() => { + if let Err(err) = build_tx.send(()).await { + error!(target: "payload_builder", "Error sending build signal: {}", err); + break; + } + } + } + } + }); + + // Process flashblocks in a blocking loop + loop { + // Block on receiving a message, break on cancellation or closed channel + let received = tokio::task::block_in_place(|| { + // Get runtime handle + let rt = tokio::runtime::Handle::current(); + + // Run the async operation to completion, blocking the current thread + rt.block_on(async { + // Check for cancellation first + if ctx.cancel.is_cancelled() { + tracing::info!( + target: "payload_builder", + "Job cancelled, stopping payload building", + ); + return None; + } + + // Wait for next message + build_rx.recv().await + }) + }); + + // Exit loop if channel closed or cancelled + match received { + Some(()) => { + if flashblock_count >= self.config.flashblocks_per_block() { + tracing::info!( + target: "payload_builder", + "Skipping flashblock reached target={} idx={}", + self.config.flashblocks_per_block(), + flashblock_count + ); + continue; + } + + // Continue with flashblock building + tracing::info!( + target: "payload_builder", + "Building flashblock {} {}", + flashblock_count, + total_gas_per_batch, + ); + + let flashblock_build_start_time = Instant::now(); + let state = StateProviderDatabase::new(&state_provider); + + let mut db = State::builder() + .with_database(state) + .with_bundle_update() + .with_bundle_prestate(bundle_state) + .build(); + + let best_txs_start_time = Instant::now(); + let best_txs = BestPayloadTransactions::new( + self.pool + .best_transactions_with_attributes(ctx.best_transaction_attributes()), + ); + ctx.metrics + .transaction_pool_fetch_duration + .record(best_txs_start_time.elapsed()); + + let tx_execution_start_time = Instant::now(); + ctx.execute_best_transactions( + &mut info, + &mut db, + best_txs, + total_gas_per_batch.min(ctx.block_gas_limit()), + total_da_bytes_per_batch, + )?; + ctx.metrics + .payload_tx_simulation_duration + .record(tx_execution_start_time.elapsed()); + + if ctx.cancel.is_cancelled() { + tracing::info!( + target: "payload_builder", + "Job cancelled, stopping payload building", + ); + // if the job was cancelled, stop + return Ok(()); + } + + let total_block_built_duration = Instant::now(); + let build_result = build_block(db, &ctx, &mut info); + ctx.metrics + .total_block_built_duration + .record(total_block_built_duration.elapsed()); + + // Handle build errors with match pattern + match build_result { + Err(err) => { + // Track invalid/bad block + self.metrics.invalid_blocks_count.increment(1); + error!(target: "payload_builder", "Failed to build block {}, flashblock {}: {}", ctx.block_number(), flashblock_count, err); + // Return the error + return Err(err); + } + Ok((new_payload, mut fb_payload, new_bundle_state)) => { + fb_payload.index = flashblock_count + 1; // we do this because the fallback block is index 0 + fb_payload.base = None; + + self.ws_pub + .publish(&fb_payload) + .map_err(PayloadBuilderError::other)?; + + // Record flashblock build duration + self.metrics + .flashblock_build_duration + .record(flashblock_build_start_time.elapsed()); + ctx.metrics + .payload_byte_size + .record(new_payload.block().size() as f64); + ctx.metrics + .payload_num_tx + .record(info.executed_transactions.len() as f64); + + best_payload.set(new_payload.clone()); + // Update bundle_state for next iteration + bundle_state = new_bundle_state; + total_gas_per_batch += gas_per_batch; + flashblock_count += 1; + tracing::info!(target: "payload_builder", "Flashblock {} built", flashblock_count); + } + } + } + None => { + // Exit loop if channel closed or cancelled + self.metrics.block_built_success.increment(1); + self.metrics + .flashblock_count + .record(flashblock_count as f64); + return Ok(()); + } + } + } + } +} + +impl crate::builders::generator::PayloadBuilder for OpPayloadBuilder +where + Pool: PoolBounds, + Client: ClientBounds, +{ + type Attributes = OpPayloadBuilderAttributes; + type BuiltPayload = OpBuiltPayload; + + fn try_build( + &self, + args: BuildArguments, + best_payload: BlockCell, + ) -> Result<(), PayloadBuilderError> { + self.build_payload(args, best_payload) + } +} + +#[derive(Debug, Serialize, Deserialize)] +struct FlashblocksMetadata { + receipts: HashMap::Receipt>, + new_account_balances: HashMap, + block_number: u64, +} + +fn execute_pre_steps( + state: &mut State, + ctx: &OpPayloadBuilderCtx, +) -> Result, PayloadBuilderError> +where + DB: Database, +{ + // 1. apply pre-execution changes + ctx.evm_config + .builder_for_next_block(state, ctx.parent(), ctx.block_env_attributes.clone()) + .map_err(PayloadBuilderError::other)? + .apply_pre_execution_changes()?; + + // 3. execute sequencer transactions + let info = ctx.execute_sequencer_transactions(state)?; + + Ok(info) +} + +fn build_block( + mut state: State, + ctx: &OpPayloadBuilderCtx, + info: &mut ExecutionInfo, +) -> Result<(OpBuiltPayload, FlashblocksPayloadV1, BundleState), PayloadBuilderError> +where + DB: Database + AsRef

, + P: StateRootProvider + HashedPostStateProvider + StorageRootProvider, +{ + // TODO: We must run this only once per block, but we are running it on every flashblock + // merge all transitions into bundle state, this would apply the withdrawal balance changes + // and 4788 contract call + let state_merge_start_time = Instant::now(); + state.merge_transitions(BundleRetention::Reverts); + ctx.metrics + .state_transition_merge_duration + .record(state_merge_start_time.elapsed()); + + let new_bundle = state.take_bundle(); + + let block_number = ctx.block_number(); + assert_eq!(block_number, ctx.parent().number + 1); + + let execution_outcome = ExecutionOutcome::new( + new_bundle.clone(), + vec![info.receipts.clone()], + block_number, + vec![], + ); + + let receipts_root = execution_outcome + .generic_receipts_root_slow(block_number, |receipts| { + calculate_receipt_root_no_memo_optimism( + receipts, + &ctx.chain_spec, + ctx.attributes().timestamp(), + ) + }) + .expect("Number is in range"); + let logs_bloom = execution_outcome + .block_logs_bloom(block_number) + .expect("Number is in range"); + + // // calculate the state root + let state_root_start_time = Instant::now(); + let state_provider = state.database.as_ref(); + let hashed_state = state_provider.hashed_post_state(execution_outcome.state()); + let (state_root, _trie_output) = { + state + .database + .as_ref() + .state_root_with_updates(hashed_state.clone()) + .inspect_err(|err| { + warn!(target: "payload_builder", + parent_header=%ctx.parent().hash(), + %err, + "failed to calculate state root for payload" + ); + })? + }; + ctx.metrics + .state_root_calculation_duration + .record(state_root_start_time.elapsed()); + + let mut requests_hash = None; + let withdrawals_root = if ctx + .chain_spec + .is_isthmus_active_at_timestamp(ctx.attributes().timestamp()) + { + // always empty requests hash post isthmus + requests_hash = Some(EMPTY_REQUESTS_HASH); + + // withdrawals root field in block header is used for storage root of L2 predeploy + // `l2tol1-message-passer` + Some( + isthmus::withdrawals_root(execution_outcome.state(), state.database.as_ref()) + .map_err(PayloadBuilderError::other)?, + ) + } else if ctx + .chain_spec + .is_canyon_active_at_timestamp(ctx.attributes().timestamp()) + { + Some(EMPTY_WITHDRAWALS) + } else { + None + }; + + // create the block header + let transactions_root = proofs::calculate_transaction_root(&info.executed_transactions); + + // OP doesn't support blobs/EIP-4844. + // https://specs.optimism.io/protocol/exec-engine.html#ecotone-disable-blob-transactions + // Need [Some] or [None] based on hardfork to match block hash. + let (excess_blob_gas, blob_gas_used) = ctx.blob_fields(); + let extra_data = ctx.extra_data()?; + + let header = Header { + parent_hash: ctx.parent().hash(), + ommers_hash: EMPTY_OMMER_ROOT_HASH, + beneficiary: ctx.evm_env.block_env.beneficiary, + state_root, + transactions_root, + receipts_root, + withdrawals_root, + logs_bloom, + timestamp: ctx.attributes().payload_attributes.timestamp, + mix_hash: ctx.attributes().payload_attributes.prev_randao, + nonce: BEACON_NONCE.into(), + base_fee_per_gas: Some(ctx.base_fee()), + number: ctx.parent().number + 1, + gas_limit: ctx.block_gas_limit(), + difficulty: U256::ZERO, + gas_used: info.cumulative_gas_used, + extra_data, + parent_beacon_block_root: ctx.attributes().payload_attributes.parent_beacon_block_root, + blob_gas_used, + excess_blob_gas, + requests_hash, + }; + + // seal the block + let block = alloy_consensus::Block::::new( + header, + BlockBody { + transactions: info.executed_transactions.clone(), + ommers: vec![], + withdrawals: ctx.withdrawals().cloned(), + }, + ); + + let sealed_block = Arc::new(block.seal_slow()); + debug!(target: "payload_builder", ?sealed_block, "sealed built block"); + + let block_hash = sealed_block.hash(); + + // pick the new transactions from the info field and update the last flashblock index + let new_transactions = info.executed_transactions[info.extra.last_flashblock_index..].to_vec(); + + let new_transactions_encoded = new_transactions + .clone() + .into_iter() + .map(|tx| tx.encoded_2718().into()) + .collect::>(); + + let new_receipts = info.receipts[info.extra.last_flashblock_index..].to_vec(); + info.extra.last_flashblock_index = info.executed_transactions.len(); + let receipts_with_hash = new_transactions + .iter() + .zip(new_receipts.iter()) + .map(|(tx, receipt)| (tx.tx_hash(), receipt.clone())) + .collect::>(); + let new_account_balances = new_bundle + .state + .iter() + .filter_map(|(address, account)| account.info.as_ref().map(|info| (*address, info.balance))) + .collect::>(); + + let metadata: FlashblocksMetadata = FlashblocksMetadata { + receipts: receipts_with_hash, + new_account_balances, + block_number: ctx.parent().number + 1, + }; + + // Prepare the flashblocks message + let fb_payload = FlashblocksPayloadV1 { + payload_id: ctx.payload_id(), + index: 0, + base: Some(ExecutionPayloadBaseV1 { + parent_beacon_block_root: ctx + .attributes() + .payload_attributes + .parent_beacon_block_root + .unwrap(), + parent_hash: ctx.parent().hash(), + fee_recipient: ctx.attributes().suggested_fee_recipient(), + prev_randao: ctx.attributes().payload_attributes.prev_randao, + block_number: ctx.parent().number + 1, + gas_limit: ctx.block_gas_limit(), + timestamp: ctx.attributes().payload_attributes.timestamp, + extra_data: ctx.extra_data()?, + base_fee_per_gas: ctx.base_fee().try_into().unwrap(), + }), + diff: ExecutionPayloadFlashblockDeltaV1 { + state_root, + receipts_root, + logs_bloom, + gas_used: info.cumulative_gas_used, + block_hash, + transactions: new_transactions_encoded, + withdrawals: ctx.withdrawals().cloned().unwrap_or_default().to_vec(), + withdrawals_root, + }, + metadata: serde_json::to_value(&metadata).unwrap_or_default(), + }; + + Ok(( + OpBuiltPayload::new( + ctx.payload_id(), + sealed_block, + info.total_fees, + // This must be set to NONE for now because we are doing merge transitions on every flashblock + // when it should only happen once per block, thus, it returns a confusing state back to op-reth. + // We can live without this for now because Op syncs up the executed block using new_payload + // calls, but eventually we would want to return the executed block here. + None, + ), + fb_payload, + new_bundle, + )) +} diff --git a/crates/op-rbuilder/src/builders/flashblocks/service.rs b/crates/op-rbuilder/src/builders/flashblocks/service.rs new file mode 100644 index 000000000..210b7bb65 --- /dev/null +++ b/crates/op-rbuilder/src/builders/flashblocks/service.rs @@ -0,0 +1,56 @@ +use super::{payload::OpPayloadBuilder, FlashblocksConfig}; +use crate::{ + builders::{generator::BlockPayloadJobGenerator, BuilderConfig}, + traits::{NodeBounds, PoolBounds}, +}; +use reth_basic_payload_builder::BasicPayloadJobGeneratorConfig; +use reth_node_api::NodeTypes; +use reth_node_builder::{components::PayloadServiceBuilder, BuilderContext}; +use reth_optimism_evm::OpEvmConfig; +use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; +use reth_provider::CanonStateSubscriptions; + +pub struct FlashblocksServiceBuilder(pub BuilderConfig); + +impl PayloadServiceBuilder for FlashblocksServiceBuilder +where + Node: NodeBounds, + Pool: PoolBounds, +{ + async fn spawn_payload_builder_service( + self, + ctx: &BuilderContext, + pool: Pool, + _: OpEvmConfig, + ) -> eyre::Result::Payload>> { + tracing::debug!("Spawning flashblocks payload builder service"); + + let payload_builder = OpPayloadBuilder::new( + OpEvmConfig::optimism(ctx.chain_spec()), + pool, + ctx.provider().clone(), + self.0.clone(), + )?; + + let payload_job_config = BasicPayloadJobGeneratorConfig::default(); + + let payload_generator = BlockPayloadJobGenerator::with_builder( + ctx.provider().clone(), + ctx.task_executor().clone(), + payload_job_config, + payload_builder, + true, + self.0.block_time_leeway, + ); + + let (payload_service, payload_builder) = + PayloadBuilderService::new(payload_generator, ctx.provider().canonical_state_stream()); + + ctx.task_executor() + .spawn_critical("custom payload builder service", Box::pin(payload_service)); + + tracing::info!("Flashblocks payload builder service started"); + + Ok(payload_builder) + } +} diff --git a/crates/op-rbuilder/src/builders/flashblocks/wspub.rs b/crates/op-rbuilder/src/builders/flashblocks/wspub.rs new file mode 100644 index 000000000..a31ad3be7 --- /dev/null +++ b/crates/op-rbuilder/src/builders/flashblocks/wspub.rs @@ -0,0 +1,239 @@ +use core::{ + fmt::{Debug, Formatter}, + net::SocketAddr, + pin::Pin, + sync::atomic::{AtomicUsize, Ordering}, + task::{Context, Poll}, +}; +use futures::{Sink, SinkExt}; +use rollup_boost::primitives::FlashblocksPayloadV1; +use std::{io, net::TcpListener, sync::Arc}; +use tokio::{ + net::TcpStream, + sync::{ + broadcast::{self, error::RecvError, Receiver}, + watch, + }, +}; +use tokio_tungstenite::{ + accept_async, + tungstenite::{Message, Utf8Bytes}, + WebSocketStream, +}; +use tracing::warn; + +use crate::metrics::OpRBuilderMetrics; + +/// A WebSockets publisher that accepts connections from client websockets and broadcasts to them +/// updates about new flashblocks. It maintains a count of sent messages and active subscriptions. +/// +/// This is modelled as a `futures::Sink` that can be used to send `FlashblocksPayloadV1` messages. +pub struct WebSocketPublisher { + sent: Arc, + subs: Arc, + term: watch::Sender, + pipe: broadcast::Sender, +} + +impl WebSocketPublisher { + pub fn new(addr: SocketAddr, metrics: Arc) -> io::Result { + let (pipe, _) = broadcast::channel(100); + let (term, _) = watch::channel(false); + + let sent = Arc::new(AtomicUsize::new(0)); + let subs = Arc::new(AtomicUsize::new(0)); + let listener = TcpListener::bind(addr)?; + + tokio::spawn(listener_loop( + listener, + metrics, + pipe.subscribe(), + term.subscribe(), + Arc::clone(&sent), + Arc::clone(&subs), + )); + + Ok(Self { + sent, + subs, + term, + pipe, + }) + } + + pub fn publish(&self, payload: &FlashblocksPayloadV1) -> io::Result<()> { + // Serialize the payload to a UTF-8 string + // serialize only once, then just copy around only a pointer + // to the serialized data for each subscription. + let serialized = serde_json::to_string(payload)?; + let utf8_bytes = Utf8Bytes::from(serialized); + + // Send the serialized payload to all subscribers + self.pipe + .send(utf8_bytes) + .map_err(|e| io::Error::new(io::ErrorKind::ConnectionAborted, e))?; + Ok(()) + } +} + +impl Drop for WebSocketPublisher { + fn drop(&mut self) { + // Notify the listener loop to terminate + let _ = self.term.send(true); + tracing::info!("WebSocketPublisher dropped, terminating listener loop"); + } +} + +async fn listener_loop( + listener: TcpListener, + metrics: Arc, + receiver: Receiver, + term: watch::Receiver, + sent: Arc, + subs: Arc, +) { + listener + .set_nonblocking(true) + .expect("Failed to set TcpListener socket to non-blocking"); + + let listener = tokio::net::TcpListener::from_std(listener) + .expect("Failed to convert TcpListener to tokio TcpListener"); + + let listen_addr = listener + .local_addr() + .expect("Failed to get local address of listener"); + tracing::info!("Flashblocks WebSocketPublisher listening on {listen_addr}"); + + let mut term = term; + + loop { + let subs = Arc::clone(&subs); + let metrics = Arc::clone(&metrics); + + tokio::select! { + // drop this connection if the `WebSocketPublisher` is dropped + _ = term.changed() => { + if *term.borrow() { + return; + } + } + + // Accept new connections on the websocket listener + // when a new connection is established, spawn a dedicated task to handle + // the connection and broadcast with that connection. + Ok((connection, peer_addr)) = listener.accept() => { + let sent = Arc::clone(&sent); + let term = term.clone(); + let receiver_clone = receiver.resubscribe(); + + match accept_async(connection).await { + Ok(stream) => { + tokio::spawn(async move { + subs.fetch_add(1, Ordering::Relaxed); + tracing::debug!("WebSocket connection established with {}", peer_addr); + + // Handle the WebSocket connection in a dedicated task + broadcast_loop(stream, metrics, term, receiver_clone, sent).await; + + subs.fetch_sub(1, Ordering::Relaxed); + tracing::debug!("WebSocket connection closed for {}", peer_addr); + }); + } + Err(e) => { + warn!("Failed to accept WebSocket connection from {peer_addr}: {e}"); + } + } + } + } + } +} + +/// An instance of this loop is spawned for each connected WebSocket client. +/// It listens for broadcast updates about new flashblocks and sends them to the client. +/// It also handles termination signals to gracefully close the connection. +/// Any connectivity errors will terminate the loop, which will in turn +/// decrement the subscription count in the `WebSocketPublisher`. +async fn broadcast_loop( + stream: WebSocketStream, + metrics: Arc, + term: watch::Receiver, + blocks: broadcast::Receiver, + sent: Arc, +) { + let mut term = term; + let mut blocks = blocks; + let mut stream = stream; + let Ok(peer_addr) = stream.get_ref().peer_addr() else { + return; + }; + + loop { + let metrics = Arc::clone(&metrics); + + tokio::select! { + // Check if the publisher is terminated + _ = term.changed() => { + if *term.borrow() { + tracing::info!("WebSocketPublisher is terminating, closing broadcast loop"); + return; + } + } + + // Receive payloads from the broadcast channel + payload = blocks.recv() => match payload { + Ok(payload) => { + // Here you would typically send the payload to the WebSocket clients. + // For this example, we just increment the sent counter. + sent.fetch_add(1, Ordering::Relaxed); + metrics.messages_sent_count.increment(1); + + tracing::info!("Broadcasted payload: {:?}", payload); + if let Err(e) = stream.send(Message::Text(payload)).await { + tracing::debug!("Closing flashblocks subscription for {peer_addr}: {e}"); + break; // Exit the loop if sending fails + } + } + Err(RecvError::Closed) => { + tracing::debug!("Broadcast channel closed, exiting broadcast loop"); + return; + } + Err(RecvError::Lagged(_)) => { + tracing::warn!("Broadcast channel lagged, some messages were dropped"); + } + }, + } + } +} + +impl Debug for WebSocketPublisher { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + let subs = self.subs.load(Ordering::Relaxed); + let sent = self.sent.load(Ordering::Relaxed); + + f.debug_struct("WebSocketPublisher") + .field("subs", &subs) + .field("payloads_sent", &sent) + .finish() + } +} + +impl Sink<&FlashblocksPayloadV1> for WebSocketPublisher { + type Error = eyre::Report; + + fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn start_send(self: Pin<&mut Self>, item: &FlashblocksPayloadV1) -> Result<(), Self::Error> { + self.publish(item)?; + Ok(()) + } + + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } +} diff --git a/crates/op-rbuilder/src/generator.rs b/crates/op-rbuilder/src/builders/generator.rs similarity index 99% rename from crates/op-rbuilder/src/generator.rs rename to crates/op-rbuilder/src/builders/generator.rs index eefe62008..2132d1055 100644 --- a/crates/op-rbuilder/src/generator.rs +++ b/crates/op-rbuilder/src/builders/generator.rs @@ -355,11 +355,6 @@ impl BlockCell { } } - pub fn is_some(&self) -> bool { - let inner = self.inner.lock().unwrap(); - inner.is_some() - } - pub fn set(&self, value: T) { let mut inner = self.inner.lock().unwrap(); *inner = Some(value); diff --git a/crates/op-rbuilder/src/builders/mod.rs b/crates/op-rbuilder/src/builders/mod.rs new file mode 100644 index 000000000..9cbc4a0f0 --- /dev/null +++ b/crates/op-rbuilder/src/builders/mod.rs @@ -0,0 +1,163 @@ +use core::{ + convert::{Infallible, TryFrom}, + fmt::Debug, + time::Duration, +}; +use reth_node_builder::components::PayloadServiceBuilder; +use reth_optimism_evm::OpEvmConfig; +use reth_optimism_payload_builder::config::OpDAConfig; + +use crate::{ + args::OpRbuilderArgs, + traits::{NodeBounds, PoolBounds}, + tx_signer::Signer, +}; + +mod context; +mod flashblocks; +mod generator; +mod standard; + +pub use flashblocks::FlashblocksBuilder; +pub use standard::StandardBuilder; + +/// Defines the payload building mode for the OP builder. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub enum BuilderMode { + /// Uses the plain OP payload builder that produces blocks every chain blocktime. + #[default] + Standard, + /// Uses the flashblocks payload builder that progressively builds chunks of a + /// block every short interval and makes it available through a websocket update + /// then merges them into a full block every chain block time. + Flashblocks, +} + +/// Defines the interface for any block builder implementation API entry point. +/// +/// Instances of this trait are used during Reth node construction as an argument +/// to the `NodeBuilder::with_components` method to construct the payload builder +/// service that gets called whenver the current node is asked to build a block. +pub trait PayloadBuilder: Send + Sync + 'static { + /// The type that has an implementation specific variant of the Config struct. + /// This is used to configure the payload builder service during startup. + type Config: TryFrom + Clone + Debug + Send + Sync + 'static; + + /// The type that is used to instantiate the payload builder service + /// that will be used by reth to build blocks whenever the node is + /// asked to do so. + type ServiceBuilder: PayloadServiceBuilder + where + Node: NodeBounds, + Pool: PoolBounds; + + /// Called during node startup by reth. Returns a [`PayloadBuilderService`] instance + /// that is preloaded with a [`PayloadJobGenerator`] instance specific to the builder + /// type. + fn new_service( + config: BuilderConfig, + ) -> eyre::Result> + where + Node: NodeBounds, + Pool: PoolBounds; +} + +/// Configuration values that are applicable to any type of block builder. +#[derive(Clone)] +pub struct BuilderConfig { + /// Secret key of the builder that is used to sign the end of block transaction. + pub builder_signer: Option, + + /// When set to true, transactions are simulated by the builder and excluded from the block + /// if they revert. They may still be included in the block if individual transactions + /// opt-out of revert protection. + pub revert_protection: bool, + + /// The interval at which blocks are added to the chain. + /// This is also the frequency at which the builder will be receiving FCU rquests from the + /// sequencer. + pub block_time: Duration, + + /// Data Availability configuration for the OP builder + /// Defines constraints for the maximum size of data availability transactions. + pub da_config: OpDAConfig, + + // The deadline is critical for payload availability. If we reach the deadline, + // the payload job stops and cannot be queried again. With tight deadlines close + // to the block number, we risk reaching the deadline before the node queries the payload. + // + // Adding 0.5 seconds as wiggle room since block times are shorter here. + // TODO: A better long-term solution would be to implement cancellation logic + // that cancels existing jobs when receiving new block building requests. + // + // When batcher's max channel duration is big enough (e.g. 10m), the + // sequencer would send an avalanche of FCUs/getBlockByNumber on + // each batcher update (with 10m channel it's ~800 FCUs at once). + // At such moment it can happen that the time b/w FCU and ensuing + // getPayload would be on the scale of ~2.5s. Therefore we should + // "remember" the payloads long enough to accommodate this corner-case + // (without it we are losing blocks). Postponing the deadline for 5s + // (not just 0.5s) because of that. + pub block_time_leeway: Duration, + + /// Configuration values that are specific to the block builder implementation used. + pub specific: Specific, +} + +impl core::fmt::Debug for BuilderConfig { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("Config") + .field( + "builder_signer", + &match self.builder_signer.as_ref() { + Some(signer) => signer.address.to_string(), + None => "None".into(), + }, + ) + .field("revert_protection", &self.revert_protection) + .field("block_time", &self.block_time) + .field("block_time_leeway", &self.block_time_leeway) + .field("da_config", &self.da_config) + .field("specific", &self.specific) + .finish() + } +} + +impl Default for BuilderConfig { + fn default() -> Self { + Self { + builder_signer: None, + revert_protection: false, + block_time: Duration::from_secs(2), + block_time_leeway: Duration::from_millis(500), + da_config: OpDAConfig::default(), + specific: S::default(), + } + } +} + +impl TryFrom for BuilderConfig +where + S: TryFrom + Clone, +{ + type Error = S::Error; + + fn try_from(args: OpRbuilderArgs) -> Result { + Ok(Self { + builder_signer: args.builder_signer, + revert_protection: args.enable_revert_protection, + block_time: Duration::from_millis(args.chain_block_time), + block_time_leeway: Duration::from_millis(500), + da_config: Default::default(), + specific: S::try_from(args)?, + }) + } +} + +impl TryFrom for () { + type Error = Infallible; + + fn try_from(_: OpRbuilderArgs) -> Result { + Ok(()) + } +} diff --git a/crates/op-rbuilder/src/builders/standard/mod.rs b/crates/op-rbuilder/src/builders/standard/mod.rs new file mode 100644 index 000000000..98e9b44f7 --- /dev/null +++ b/crates/op-rbuilder/src/builders/standard/mod.rs @@ -0,0 +1,34 @@ +use payload::StandardPayloadBuilderBuilder; +use reth_node_builder::components::BasicPayloadServiceBuilder; + +use crate::traits::{NodeBounds, PoolBounds}; + +use super::BuilderConfig; + +mod payload; + +/// Block building strategy that builds blocks using the standard approach by +/// producing blocks every chain block time. +pub struct StandardBuilder; + +impl super::PayloadBuilder for StandardBuilder { + type Config = (); + + type ServiceBuilder + = BasicPayloadServiceBuilder + where + Node: NodeBounds, + Pool: PoolBounds; + + fn new_service( + config: BuilderConfig, + ) -> eyre::Result> + where + Node: NodeBounds, + Pool: PoolBounds, + { + Ok(BasicPayloadServiceBuilder::new( + StandardPayloadBuilderBuilder(config), + )) + } +} diff --git a/crates/op-rbuilder/src/builders/standard/payload.rs b/crates/op-rbuilder/src/builders/standard/payload.rs new file mode 100644 index 000000000..4c70e4488 --- /dev/null +++ b/crates/op-rbuilder/src/builders/standard/payload.rs @@ -0,0 +1,588 @@ +use alloy_consensus::{ + constants::EMPTY_WITHDRAWALS, proofs, BlockBody, Header, EMPTY_OMMER_ROOT_HASH, +}; +use alloy_eips::{ + eip7623::TOTAL_COST_FLOOR_PER_TOKEN, eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE, +}; +use alloy_primitives::U256; +use reth::payload::PayloadBuilderAttributes; +use reth_basic_payload_builder::{BuildOutcome, BuildOutcomeKind, MissingPayloadBehaviour}; +use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates}; +use reth_evm::{execute::BlockBuilder, ConfigureEvm}; +use reth_node_api::{Block, PayloadBuilderError}; +use reth_node_builder::{components::PayloadBuilderBuilder, BuilderContext}; +use reth_optimism_consensus::{calculate_receipt_root_no_memo_optimism, isthmus}; +use reth_optimism_evm::{OpEvmConfig, OpNextBlockEnvAttributes}; +use reth_optimism_forks::OpHardforks; +use reth_optimism_node::{OpBuiltPayload, OpPayloadBuilderAttributes}; +use reth_optimism_primitives::{OpPrimitives, OpTransactionSigned}; +use reth_payload_util::{BestPayloadTransactions, NoopPayloadTransactions, PayloadTransactions}; +use reth_primitives::RecoveredBlock; +use reth_provider::{ + ExecutionOutcome, HashedPostStateProvider, ProviderError, StateRootProvider, + StorageRootProvider, +}; +use reth_revm::{ + database::StateProviderDatabase, db::states::bundle_state::BundleRetention, State, +}; +use reth_transaction_pool::{BestTransactionsAttributes, PoolTransaction, TransactionPool}; +use revm::Database; +use std::{sync::Arc, time::Instant}; +use tokio_util::sync::CancellationToken; +use tracing::{info, warn}; + +use crate::{ + builders::{generator::BuildArguments, BuilderConfig}, + metrics::OpRBuilderMetrics, + primitives::reth::ExecutionInfo, + traits::{ClientBounds, NodeBounds, PayloadTxsBounds, PoolBounds}, +}; + +use super::super::context::OpPayloadBuilderCtx; + +pub struct StandardPayloadBuilderBuilder(pub BuilderConfig<()>); + +impl PayloadBuilderBuilder for StandardPayloadBuilderBuilder +where + Node: NodeBounds, + Pool: PoolBounds, +{ + type PayloadBuilder = StandardOpPayloadBuilder; + + async fn build_payload_builder( + self, + ctx: &BuilderContext, + pool: Pool, + _evm_config: OpEvmConfig, + ) -> eyre::Result { + Ok(StandardOpPayloadBuilder::new( + OpEvmConfig::optimism(ctx.chain_spec()), + pool, + ctx.provider().clone(), + self.0.clone(), + )) + } +} + +/// Optimism's payload builder +#[derive(Debug, Clone)] +pub struct StandardOpPayloadBuilder { + /// The type responsible for creating the evm. + pub evm_config: OpEvmConfig, + /// The transaction pool + pub pool: Pool, + /// Node client + pub client: Client, + /// Settings for the builder, e.g. DA settings. + pub config: BuilderConfig<()>, + /// The type responsible for yielding the best transactions for the payload if mempool + /// transactions are allowed. + pub best_transactions: Txs, + /// The metrics for the builder + pub metrics: Arc, +} + +impl StandardOpPayloadBuilder { + /// `OpPayloadBuilder` constructor. + pub fn new( + evm_config: OpEvmConfig, + pool: Pool, + client: Client, + config: BuilderConfig<()>, + ) -> Self { + Self { + pool, + client, + config, + evm_config, + best_transactions: (), + metrics: Default::default(), + } + } +} + +/// A type that returns a the [`PayloadTransactions`] that should be included in the pool. +pub trait OpPayloadTransactions: Clone + Send + Sync + Unpin + 'static { + /// Returns an iterator that yields the transaction in the order they should get included in the + /// new payload. + fn best_transactions>( + &self, + pool: Pool, + attr: BestTransactionsAttributes, + ) -> impl PayloadTransactions; +} + +impl OpPayloadTransactions for () { + fn best_transactions>( + &self, + pool: Pool, + attr: BestTransactionsAttributes, + ) -> impl PayloadTransactions { + BestPayloadTransactions::new(pool.best_transactions_with_attributes(attr)) + } +} + +impl reth_basic_payload_builder::PayloadBuilder + for StandardOpPayloadBuilder +where + Pool: PoolBounds, + Client: ClientBounds, + Txs: OpPayloadTransactions, +{ + type Attributes = OpPayloadBuilderAttributes; + type BuiltPayload = OpBuiltPayload; + + fn try_build( + &self, + args: reth_basic_payload_builder::BuildArguments, + ) -> Result, PayloadBuilderError> { + let pool = self.pool.clone(); + + let reth_basic_payload_builder::BuildArguments { + cached_reads, + config, + cancel: _, // TODO + best_payload: _, + } = args; + + let args = BuildArguments { + cached_reads, + config, + cancel: CancellationToken::new(), + }; + + self.build_payload(args, |attrs| { + #[allow(clippy::unit_arg)] + self.best_transactions + .best_transactions(pool.clone(), attrs) + }) + } + + fn on_missing_payload( + &self, + _args: reth_basic_payload_builder::BuildArguments, + ) -> MissingPayloadBehaviour { + MissingPayloadBehaviour::AwaitInProgress + } + + fn build_empty_payload( + &self, + config: reth_basic_payload_builder::PayloadConfig< + Self::Attributes, + reth_basic_payload_builder::HeaderForPayload, + >, + ) -> Result { + let args = BuildArguments { + config, + cached_reads: Default::default(), + cancel: Default::default(), + }; + self.build_payload(args, |_| { + NoopPayloadTransactions::::default() + })? + .into_payload() + .ok_or_else(|| PayloadBuilderError::MissingPayload) + } +} + +impl StandardOpPayloadBuilder +where + Pool: PoolBounds, + Client: ClientBounds, +{ + /// Constructs an Optimism payload from the transactions sent via the + /// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in + /// the payload attributes, the transaction pool will be ignored and the only transactions + /// included in the payload will be those sent through the attributes. + /// + /// Given build arguments including an Optimism client, transaction pool, + /// and configuration, this function creates a transaction payload. Returns + /// a result indicating success with the payload or an error in case of failure. + fn build_payload<'a, Txs: PayloadTxsBounds>( + &self, + args: BuildArguments, OpBuiltPayload>, + best: impl FnOnce(BestTransactionsAttributes) -> Txs + Send + Sync + 'a, + ) -> Result, PayloadBuilderError> { + let BuildArguments { + mut cached_reads, + config, + cancel, + } = args; + + let chain_spec = self.client.chain_spec(); + let timestamp = config.attributes.timestamp(); + let block_env_attributes = OpNextBlockEnvAttributes { + timestamp, + suggested_fee_recipient: config.attributes.suggested_fee_recipient(), + prev_randao: config.attributes.prev_randao(), + gas_limit: config + .attributes + .gas_limit + .unwrap_or(config.parent_header.gas_limit), + parent_beacon_block_root: config + .attributes + .payload_attributes + .parent_beacon_block_root, + extra_data: if chain_spec.is_holocene_active_at_timestamp(timestamp) { + config + .attributes + .get_holocene_extra_data(chain_spec.base_fee_params_at_timestamp(timestamp)) + .map_err(PayloadBuilderError::other)? + } else { + Default::default() + }, + }; + + let evm_env = self + .evm_config + .next_evm_env(&config.parent_header, &block_env_attributes) + .map_err(PayloadBuilderError::other)?; + + let ctx = OpPayloadBuilderCtx { + evm_config: self.evm_config.clone(), + da_config: self.config.da_config.clone(), + chain_spec, + config, + evm_env, + block_env_attributes, + cancel, + builder_signer: self.config.builder_signer, + metrics: self.metrics.clone(), + }; + + let builder = OpBuilder::new(best); + + let state_provider = self.client.state_by_block_hash(ctx.parent().hash())?; + let state = StateProviderDatabase::new(state_provider); + + if ctx.attributes().no_tx_pool { + let db = State::builder() + .with_database(state) + .with_bundle_update() + .build(); + builder.build(db, ctx) + } else { + // sequencer mode we can reuse cachedreads from previous runs + let db = State::builder() + .with_database(cached_reads.as_db_mut(state)) + .with_bundle_update() + .build(); + builder.build(db, ctx) + } + .map(|out| out.with_cached_reads(cached_reads)) + } +} + +/// The type that builds the payload. +/// +/// Payload building for optimism is composed of several steps. +/// The first steps are mandatory and defined by the protocol. +/// +/// 1. first all System calls are applied. +/// 2. After canyon the forced deployed `create2deployer` must be loaded +/// 3. all sequencer transactions are executed (part of the payload attributes) +/// +/// Depending on whether the node acts as a sequencer and is allowed to include additional +/// transactions (`no_tx_pool == false`): +/// 4. include additional transactions +/// +/// And finally +/// 5. build the block: compute all roots (txs, state) +#[derive(derive_more::Debug)] +pub struct OpBuilder<'a, Txs> { + /// Yields the best transaction to include if transactions from the mempool are allowed. + best: Box Txs + 'a>, +} + +impl<'a, Txs> OpBuilder<'a, Txs> { + fn new(best: impl FnOnce(BestTransactionsAttributes) -> Txs + Send + Sync + 'a) -> Self { + Self { + best: Box::new(best), + } + } +} + +/// Holds the state after execution +#[derive(Debug)] +pub struct ExecutedPayload { + /// Tracked execution info + pub info: ExecutionInfo, +} + +impl OpBuilder<'_, Txs> { + /// Executes the payload and returns the outcome. + pub fn execute( + self, + state: &mut State, + ctx: &OpPayloadBuilderCtx, + ) -> Result, PayloadBuilderError> + where + DB: Database + AsRef

, + P: StorageRootProvider, + { + let Self { best } = self; + info!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number, "building new payload"); + + // 1. apply pre-execution changes + ctx.evm_config + .builder_for_next_block(state, ctx.parent(), ctx.block_env_attributes.clone()) + .map_err(PayloadBuilderError::other)? + .apply_pre_execution_changes()?; + + let sequencer_tx_start_time = Instant::now(); + + // 3. execute sequencer transactions + let mut info = ctx.execute_sequencer_transactions(state)?; + + ctx.metrics + .sequencer_tx_duration + .record(sequencer_tx_start_time.elapsed()); + + // 4. if mem pool transactions are requested we execute them + + // gas reserved for builder tx + let message = format!("Block Number: {}", ctx.block_number()) + .as_bytes() + .to_vec(); + let builder_tx_gas = ctx + .builder_signer() + .map_or(0, |_| estimate_gas_for_builder_tx(message.clone())); + let block_gas_limit = ctx.block_gas_limit() - builder_tx_gas; + // Save some space in the block_da_limit for builder tx + let builder_tx_da_size = ctx + .estimate_builder_tx_da_size(state, builder_tx_gas, message.clone()) + .unwrap_or(0); + let block_da_limit = ctx + .da_config + .max_da_block_size() + .map(|da_size| da_size - builder_tx_da_size as u64); + // Check that it's possible to create builder tx, considering max_da_tx_size, otherwise panic + if let Some(tx_da_limit) = ctx.da_config.max_da_tx_size() { + // Panic indicate max_da_tx_size misconfiguration + assert!( + tx_da_limit >= builder_tx_da_size as u64, + "The configured da_config.max_da_tx_size is too small to accommodate builder tx." + ); + } + + if !ctx.attributes().no_tx_pool { + let best_txs_start_time = Instant::now(); + let best_txs = best(ctx.best_transaction_attributes()); + ctx.metrics + .transaction_pool_fetch_duration + .record(best_txs_start_time.elapsed()); + if ctx + .execute_best_transactions( + &mut info, + state, + best_txs, + block_gas_limit, + block_da_limit, + )? + .is_some() + { + return Ok(BuildOutcomeKind::Cancelled); + } + } + + // Add builder tx to the block + ctx.add_builder_tx(&mut info, state, builder_tx_gas, message); + + let state_merge_start_time = Instant::now(); + + // merge all transitions into bundle state, this would apply the withdrawal balance changes + // and 4788 contract call + state.merge_transitions(BundleRetention::Reverts); + + ctx.metrics + .state_transition_merge_duration + .record(state_merge_start_time.elapsed()); + ctx.metrics + .payload_num_tx + .record(info.executed_transactions.len() as f64); + + let payload = ExecutedPayload { info }; + + ctx.metrics.block_built_success.increment(1); + Ok(BuildOutcomeKind::Better { payload }) + } + + /// Builds the payload on top of the state. + pub fn build( + self, + mut state: State, + ctx: OpPayloadBuilderCtx, + ) -> Result, PayloadBuilderError> + where + DB: Database + AsRef

, + P: StateRootProvider + HashedPostStateProvider + StorageRootProvider, + { + let ExecutedPayload { info } = match self.execute(&mut state, &ctx)? { + BuildOutcomeKind::Better { payload } | BuildOutcomeKind::Freeze(payload) => payload, + BuildOutcomeKind::Cancelled => return Ok(BuildOutcomeKind::Cancelled), + BuildOutcomeKind::Aborted { fees } => return Ok(BuildOutcomeKind::Aborted { fees }), + }; + + let block_number = ctx.block_number(); + let execution_outcome = ExecutionOutcome::new( + state.take_bundle(), + vec![info.receipts], + block_number, + Vec::new(), + ); + let receipts_root = execution_outcome + .generic_receipts_root_slow(block_number, |receipts| { + calculate_receipt_root_no_memo_optimism( + receipts, + &ctx.chain_spec, + ctx.attributes().timestamp(), + ) + }) + .expect("Number is in range"); + let logs_bloom = execution_outcome + .block_logs_bloom(block_number) + .expect("Number is in range"); + + // calculate the state root + let state_root_start_time = Instant::now(); + + let state_provider = state.database.as_ref(); + let hashed_state = state_provider.hashed_post_state(execution_outcome.state()); + let (state_root, trie_output) = { + state + .database + .as_ref() + .state_root_with_updates(hashed_state.clone()) + .inspect_err(|err| { + warn!(target: "payload_builder", + parent_header=%ctx.parent().hash(), + %err, + "failed to calculate state root for payload" + ); + })? + }; + + ctx.metrics + .state_root_calculation_duration + .record(state_root_start_time.elapsed()); + + let (withdrawals_root, requests_hash) = if ctx.is_isthmus_active() { + // withdrawals root field in block header is used for storage root of L2 predeploy + // `l2tol1-message-passer` + ( + Some( + isthmus::withdrawals_root(execution_outcome.state(), state.database.as_ref()) + .map_err(PayloadBuilderError::other)?, + ), + Some(EMPTY_REQUESTS_HASH), + ) + } else if ctx.is_canyon_active() { + (Some(EMPTY_WITHDRAWALS), None) + } else { + (None, None) + }; + + // create the block header + let transactions_root = proofs::calculate_transaction_root(&info.executed_transactions); + + // OP doesn't support blobs/EIP-4844. + // https://specs.optimism.io/protocol/exec-engine.html#ecotone-disable-blob-transactions + // Need [Some] or [None] based on hardfork to match block hash. + let (excess_blob_gas, blob_gas_used) = ctx.blob_fields(); + let extra_data = ctx.extra_data()?; + + let header = Header { + parent_hash: ctx.parent().hash(), + ommers_hash: EMPTY_OMMER_ROOT_HASH, + beneficiary: ctx.evm_env.block_env.beneficiary, + state_root, + transactions_root, + receipts_root, + withdrawals_root, + logs_bloom, + timestamp: ctx.attributes().payload_attributes.timestamp, + mix_hash: ctx.attributes().payload_attributes.prev_randao, + nonce: BEACON_NONCE.into(), + base_fee_per_gas: Some(ctx.base_fee()), + number: ctx.parent().number + 1, + gas_limit: ctx.block_gas_limit(), + difficulty: U256::ZERO, + gas_used: info.cumulative_gas_used, + extra_data, + parent_beacon_block_root: ctx.attributes().payload_attributes.parent_beacon_block_root, + blob_gas_used, + excess_blob_gas, + requests_hash, + }; + + // seal the block + let block = alloy_consensus::Block::::new( + header, + BlockBody { + transactions: info.executed_transactions, + ommers: vec![], + withdrawals: ctx.withdrawals().cloned(), + }, + ); + + let sealed_block = Arc::new(block.seal_slow()); + info!(target: "payload_builder", id=%ctx.attributes().payload_id(), "sealed built block"); + + // create the executed block data + let executed: ExecutedBlockWithTrieUpdates = ExecutedBlockWithTrieUpdates { + block: ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::< + alloy_consensus::Block, + >::new_sealed( + sealed_block.as_ref().clone(), info.executed_senders + )), + execution_output: Arc::new(execution_outcome), + hashed_state: Arc::new(hashed_state), + }, + trie: Arc::new(trie_output), + }; + + let no_tx_pool = ctx.attributes().no_tx_pool; + + let payload = OpBuiltPayload::new( + ctx.payload_id(), + sealed_block, + info.total_fees, + Some(executed), + ); + + ctx.metrics + .payload_byte_size + .record(payload.block().size() as f64); + + if no_tx_pool { + // if `no_tx_pool` is set only transactions from the payload attributes will be included + // in the payload. In other words, the payload is deterministic and we can + // freeze it once we've successfully built it. + Ok(BuildOutcomeKind::Freeze(payload)) + } else { + Ok(BuildOutcomeKind::Better { payload }) + } + } +} + +fn estimate_gas_for_builder_tx(input: Vec) -> u64 { + // Count zero and non-zero bytes + let (zero_bytes, nonzero_bytes) = input.iter().fold((0, 0), |(zeros, nonzeros), &byte| { + if byte == 0 { + (zeros + 1, nonzeros) + } else { + (zeros, nonzeros + 1) + } + }); + + // Calculate gas cost (4 gas per zero byte, 16 gas per non-zero byte) + let zero_cost = zero_bytes * 4; + let nonzero_cost = nonzero_bytes * 16; + + // Tx gas should be not less than floor gas https://eips.ethereum.org/EIPS/eip-7623 + let tokens_in_calldata = zero_bytes + nonzero_bytes * 4; + let floor_gas = 21_000 + tokens_in_calldata * TOTAL_COST_FLOOR_PER_TOKEN; + + std::cmp::max(zero_cost + nonzero_cost + 21_000, floor_gas) +} diff --git a/crates/op-rbuilder/src/main.rs b/crates/op-rbuilder/src/main.rs index 5477f1d27..21f63a7b7 100644 --- a/crates/op-rbuilder/src/main.rs +++ b/crates/op-rbuilder/src/main.rs @@ -1,6 +1,6 @@ -use args::CliExt; -use clap::Parser; -use reth_optimism_cli::{chainspec::OpChainSpecParser, Cli}; +use args::*; +use builders::{BuilderConfig, BuilderMode, FlashblocksBuilder, StandardBuilder}; +use core::fmt::Debug; use reth_optimism_node::{ node::{OpAddOnsBuilder, OpPoolBuilder}, OpNode, @@ -9,25 +9,15 @@ use reth_transaction_pool::TransactionPool; /// CLI argument parsing. pub mod args; -pub mod generator; +mod builders; mod metrics; mod monitor_tx_pool; -#[cfg(feature = "flashblocks")] -pub mod payload_builder; mod primitives; mod revert_protection; +mod traits; mod tx; mod tx_signer; -#[cfg(not(feature = "flashblocks"))] -mod payload_builder_vanilla; - -#[cfg(not(feature = "flashblocks"))] -use payload_builder_vanilla::CustomOpPayloadBuilder; - -#[cfg(feature = "flashblocks")] -use payload_builder::CustomOpPayloadBuilder; - use metrics::{ VersionInfo, BUILD_PROFILE_NAME, CARGO_PKG_VERSION, VERGEN_BUILD_TIMESTAMP, VERGEN_CARGO_FEATURES, VERGEN_CARGO_TARGET_TRIPLE, VERGEN_GIT_SHA, @@ -41,86 +31,104 @@ use tx::FBPooledTransaction; #[global_allocator] static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; +const VERSION: VersionInfo = VersionInfo { + version: CARGO_PKG_VERSION, + build_timestamp: VERGEN_BUILD_TIMESTAMP, + cargo_features: VERGEN_CARGO_FEATURES, + git_sha: VERGEN_GIT_SHA, + target_triple: VERGEN_CARGO_TARGET_TRIPLE, + build_profile: BUILD_PROFILE_NAME, +}; + fn main() { - let version = VersionInfo { - version: CARGO_PKG_VERSION, - build_timestamp: VERGEN_BUILD_TIMESTAMP, - cargo_features: VERGEN_CARGO_FEATURES, - git_sha: VERGEN_GIT_SHA, - target_triple: VERGEN_CARGO_TARGET_TRIPLE, - build_profile: BUILD_PROFILE_NAME, + let cli = Cli::parsed(); + cli.logs + .init_tracing() + .expect("Failed to initialize tracing"); + + match cli.builder_mode() { + BuilderMode::Standard => { + tracing::info!("Starting OP builder in standard mode"); + start_builder_node::(cli); + } + BuilderMode::Flashblocks => { + tracing::info!("Starting OP builder in flashblocks mode"); + start_builder_node::(cli); + } }; +} - Cli::::parse() - .populate_defaults() - .run(|builder, builder_args| async move { - let rollup_args = builder_args.rollup_args; +/// Starts the OP builder node with a given payload builder implementation. +fn start_builder_node(cli: Cli) +where + BuilderConfig<::Config>: TryFrom, + ::Config> as TryFrom>::Error: + Debug, +{ + cli.run(|builder, builder_args| async move { + let builder_config = BuilderConfig::::try_from(builder_args.clone()) + .expect("Failed to convert rollup args to builder config"); - let op_node = OpNode::new(rollup_args.clone()); - let handle = builder - .with_types::() - .with_components( - op_node - .components() - .pool( - OpPoolBuilder::::default() - .with_enable_tx_conditional( - // Revert protection uses the same internal pool logic as conditional transactions - // to garbage collect transactions out of the bundle range. - rollup_args.enable_tx_conditional - || builder_args.enable_revert_protection, - ) - .with_supervisor( - rollup_args.supervisor_http.clone(), - rollup_args.supervisor_safety_level, - ), - ) - .payload(CustomOpPayloadBuilder::new( - builder_args.builder_signer, - std::time::Duration::from_secs(builder_args.extra_block_deadline_secs), - builder_args.flashblocks_ws_url, - builder_args.chain_block_time, - builder_args.flashblock_block_time, - )), - ) - .with_add_ons( - OpAddOnsBuilder::default() - .with_sequencer(rollup_args.sequencer.clone()) - .with_enable_tx_conditional(rollup_args.enable_tx_conditional) - .build(), - ) - .extend_rpc_modules(move |ctx| { - if builder_args.enable_revert_protection { - tracing::info!("Revert protection enabled"); + let rollup_args = builder_args.rollup_args; + let op_node = OpNode::new(rollup_args.clone()); + let handle = builder + .with_types::() + .with_components( + op_node + .components() + .pool( + OpPoolBuilder::::default() + .with_enable_tx_conditional( + // Revert protection uses the same internal pool logic as conditional transactions + // to garbage collect transactions out of the bundle range. + rollup_args.enable_tx_conditional + || builder_args.enable_revert_protection, + ) + .with_supervisor( + rollup_args.supervisor_http.clone(), + rollup_args.supervisor_safety_level, + ), + ) + .payload(B::new_service(builder_config)?), + ) + .with_add_ons( + OpAddOnsBuilder::default() + .with_sequencer(rollup_args.sequencer.clone()) + .with_enable_tx_conditional(rollup_args.enable_tx_conditional) + .build(), + ) + .extend_rpc_modules(move |ctx| { + if builder_args.enable_revert_protection { + tracing::info!("Revert protection enabled"); - let pool = ctx.pool().clone(); - let provider = ctx.provider().clone(); - let revert_protection_ext = RevertProtectionExt::new(pool, provider); + let pool = ctx.pool().clone(); + let provider = ctx.provider().clone(); + let revert_protection_ext = RevertProtectionExt::new(pool, provider); - ctx.modules - .merge_configured(revert_protection_ext.into_rpc())?; - } + ctx.modules + .merge_configured(revert_protection_ext.into_rpc())?; + } - Ok(()) - }) - .on_node_started(move |ctx| { - version.register_version_metrics(); - if builder_args.log_pool_transactions { - tracing::info!("Logging pool transactions"); - ctx.task_executor.spawn_critical( - "txlogging", - Box::pin(async move { - monitor_tx_pool(ctx.pool.all_transactions_event_listener()).await; - }), - ); - } + Ok(()) + }) + .on_node_started(move |ctx| { + VERSION.register_version_metrics(); + if builder_args.log_pool_transactions { + tracing::info!("Logging pool transactions"); + ctx.task_executor.spawn_critical( + "txlogging", + Box::pin(async move { + monitor_tx_pool(ctx.pool.all_transactions_event_listener()).await; + }), + ); + } - Ok(()) - }) - .launch() - .await?; + Ok(()) + }) + .launch() + .await?; - handle.node_exit_future.await - }) - .unwrap(); + handle.node_exit_future.await + }) + .unwrap(); } diff --git a/crates/op-rbuilder/src/metrics.rs b/crates/op-rbuilder/src/metrics.rs index e3efa5e0c..4b44410bf 100644 --- a/crates/op-rbuilder/src/metrics.rs +++ b/crates/op-rbuilder/src/metrics.rs @@ -28,19 +28,14 @@ pub struct OpRBuilderMetrics { /// Block built success pub block_built_success: Counter, /// Number of flashblocks added to block (Total per block) - #[cfg(feature = "flashblocks")] pub flashblock_count: Histogram, /// Number of messages sent - #[cfg(feature = "flashblocks")] pub messages_sent_count: Counter, /// Total duration of building a block - #[cfg(feature = "flashblocks")] pub total_block_built_duration: Histogram, /// Flashblock build duration - #[cfg(feature = "flashblocks")] pub flashblock_build_duration: Histogram, /// Number of invalid blocks - #[cfg(feature = "flashblocks")] pub invalid_blocks_count: Counter, /// Duration of fetching transactions from the pool pub transaction_pool_fetch_duration: Histogram, diff --git a/crates/op-rbuilder/src/payload_builder.rs b/crates/op-rbuilder/src/payload_builder.rs deleted file mode 100644 index 053165c36..000000000 --- a/crates/op-rbuilder/src/payload_builder.rs +++ /dev/null @@ -1,1269 +0,0 @@ -use crate::{ - generator::{BlockCell, BlockPayloadJobGenerator, BuildArguments, PayloadBuilder}, - metrics::OpRBuilderMetrics, - primitives::reth::ExecutionInfo, - tx_signer::Signer, -}; -use alloy_consensus::{ - constants::EMPTY_WITHDRAWALS, Eip658Value, Header, Transaction, Typed2718, - EMPTY_OMMER_ROOT_HASH, -}; -use alloy_eips::{eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE, Encodable2718}; -use alloy_op_evm::block::receipt_builder::OpReceiptBuilder; -use alloy_primitives::{map::HashMap, Address, Bytes, B256, U256}; -use alloy_rpc_types_engine::PayloadId; -use alloy_rpc_types_eth::Withdrawals; -use futures_util::{FutureExt, SinkExt}; -use op_alloy_consensus::OpDepositReceipt; -use op_revm::OpSpecId; -use reth::{ - builder::{ - components::{PayloadBuilderBuilder, PayloadServiceBuilder}, - node::FullNodeTypes, - BuilderContext, - }, - payload::PayloadBuilderHandle, -}; -use reth_basic_payload_builder::{BasicPayloadJobGeneratorConfig, BuildOutcome, PayloadConfig}; -use reth_chainspec::{ChainSpecProvider, EthChainSpec}; -use reth_evm::{ - env::EvmEnv, eth::receipt_builder::ReceiptBuilderCtx, execute::BlockBuilder, ConfigureEvm, - Database, Evm, EvmError, InvalidTxError, -}; -use reth_execution_types::ExecutionOutcome; -use reth_node_api::{NodePrimitives, NodeTypes, PrimitivesTy, TxTy}; -use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_consensus::{calculate_receipt_root_no_memo_optimism, isthmus}; -use reth_optimism_evm::{OpEvmConfig, OpNextBlockEnvAttributes}; -use reth_optimism_forks::OpHardforks; -use reth_optimism_node::OpEngineTypes; -use reth_optimism_payload_builder::{ - error::OpPayloadBuilderError, - payload::{OpBuiltPayload, OpPayloadBuilderAttributes}, -}; -use reth_optimism_primitives::{OpPrimitives, OpReceipt, OpTransactionSigned}; -use reth_optimism_txpool::OpPooledTx; -use reth_payload_builder::PayloadBuilderService; -use reth_payload_builder_primitives::PayloadBuilderError; -use reth_payload_primitives::PayloadBuilderAttributes; -use reth_payload_util::{BestPayloadTransactions, PayloadTransactions}; -use reth_primitives::{BlockBody, SealedHeader}; -use reth_primitives_traits::{proofs, Block as _, InMemorySize, SignedTransaction}; -use reth_provider::{ - CanonStateSubscriptions, HashedPostStateProvider, ProviderError, StateProviderFactory, - StateRootProvider, StorageRootProvider, -}; -use reth_revm::database::StateProviderDatabase; -use reth_transaction_pool::{BestTransactionsAttributes, PoolTransaction, TransactionPool}; -use revm::{ - context::{result::ResultAndState, Block as _}, - database::{states::bundle_state::BundleRetention, BundleState, State}, - DatabaseCommit, -}; -use rollup_boost::primitives::{ - ExecutionPayloadBaseV1, ExecutionPayloadFlashblockDeltaV1, FlashblocksPayloadV1, -}; -use serde::{Deserialize, Serialize}; -use std::{ - sync::{Arc, Mutex}, - time::{Duration, Instant}, -}; -use tokio::{ - net::{TcpListener, TcpStream}, - sync::mpsc, -}; -use tokio_tungstenite::{accept_async, WebSocketStream}; -use tokio_util::sync::CancellationToken; -use tracing::{debug, error, trace, warn}; - -/// Flashblocks specific payload building errors. -#[derive(Debug, thiserror::Error)] -pub enum FlashblockPayloadBuilderError { - /// Thrown when the job was cancelled. - #[error("error sending build signal")] - SendBuildSignalError, -} - -#[derive(Debug, Serialize, Deserialize)] -struct FlashblocksMetadata { - receipts: HashMap, - new_account_balances: HashMap, - block_number: u64, -} - -#[derive(Debug, Clone, Default)] -#[non_exhaustive] -pub struct CustomOpPayloadBuilder { - #[expect(dead_code)] - builder_signer: Option, - flashblocks_ws_url: String, - chain_block_time: u64, - flashblock_block_time: u64, - extra_block_deadline: std::time::Duration, -} - -impl CustomOpPayloadBuilder { - pub fn new( - builder_signer: Option, - extra_block_deadline: std::time::Duration, - flashblocks_ws_url: String, - chain_block_time: u64, - flashblock_block_time: u64, - ) -> Self { - Self { - builder_signer, - flashblocks_ws_url, - chain_block_time, - flashblock_block_time, - extra_block_deadline, - } - } -} - -impl PayloadBuilderBuilder for CustomOpPayloadBuilder -where - Node: FullNodeTypes< - Types: NodeTypes< - Payload = OpEngineTypes, - ChainSpec = OpChainSpec, - Primitives = OpPrimitives, - >, - >, - Pool: TransactionPool>> - + Unpin - + 'static, - Evm: ConfigureEvm< - Primitives = PrimitivesTy, - NextBlockEnvCtx = OpNextBlockEnvAttributes, - > + 'static, -{ - type PayloadBuilder = OpPayloadBuilder; - - async fn build_payload_builder( - self, - ctx: &BuilderContext, - pool: Pool, - _evm_config: Evm, - ) -> eyre::Result { - Ok(OpPayloadBuilder::new( - OpEvmConfig::optimism(ctx.chain_spec()), - pool, - ctx.provider().clone(), - self.flashblocks_ws_url.clone(), - self.chain_block_time, - self.flashblock_block_time, - )) - } -} - -impl PayloadServiceBuilder for CustomOpPayloadBuilder -where - Node: FullNodeTypes< - Types: NodeTypes< - Payload = OpEngineTypes, - ChainSpec = OpChainSpec, - Primitives = OpPrimitives, - >, - >, - Pool: TransactionPool>> - + Unpin - + 'static, - ::Transaction: OpPooledTx, - Evm: ConfigureEvm< - Primitives = PrimitivesTy, - NextBlockEnvCtx = OpNextBlockEnvAttributes, - > + 'static, -{ - async fn spawn_payload_builder_service( - self, - ctx: &BuilderContext, - pool: Pool, - evm_config: Evm, - ) -> eyre::Result::Payload>> { - tracing::info!("Spawning a custom payload builder"); - let extra_block_deadline = self.extra_block_deadline; - let payload_builder = self.build_payload_builder(ctx, pool, evm_config).await?; - let payload_job_config = BasicPayloadJobGeneratorConfig::default(); - - let payload_generator = BlockPayloadJobGenerator::with_builder( - ctx.provider().clone(), - ctx.task_executor().clone(), - payload_job_config, - payload_builder, - true, - extra_block_deadline, - ); - - let (payload_service, payload_builder) = - PayloadBuilderService::new(payload_generator, ctx.provider().canonical_state_stream()); - - ctx.task_executor() - .spawn_critical("custom payload builder service", Box::pin(payload_service)); - - tracing::info!("Custom payload service started"); - - Ok(payload_builder) - } -} - -impl reth_basic_payload_builder::PayloadBuilder for OpPayloadBuilder -where - Pool: Clone + Send + Sync, - Client: Clone + Send + Sync, -{ - type Attributes = OpPayloadBuilderAttributes; - type BuiltPayload = OpBuiltPayload; - - fn try_build( - &self, - _args: reth_basic_payload_builder::BuildArguments, - ) -> Result, PayloadBuilderError> { - unimplemented!() - } - - fn build_empty_payload( - &self, - _config: reth_basic_payload_builder::PayloadConfig< - Self::Attributes, - reth_basic_payload_builder::HeaderForPayload, - >, - ) -> Result { - unimplemented!() - } -} - -/// Optimism's payload builder -#[derive(Debug, Clone)] -pub struct OpPayloadBuilder { - /// The type responsible for creating the evm. - pub evm_config: OpEvmConfig, - /// The transaction pool - pub pool: Pool, - /// Node client - pub client: Client, - /// Channel sender for publishing messages - pub tx: mpsc::UnboundedSender, - /// chain block time - pub chain_block_time: u64, - /// Flashblock block time - pub flashblock_block_time: u64, - /// Number of flashblocks per block - pub flashblocks_per_block: u64, - /// The metrics for the builder - pub metrics: OpRBuilderMetrics, -} - -impl OpPayloadBuilder { - /// `OpPayloadBuilder` constructor. - pub fn new( - evm_config: OpEvmConfig, - pool: Pool, - client: Client, - flashblocks_ws_url: String, - chain_block_time: u64, - flashblock_block_time: u64, - ) -> Self { - let (tx, rx) = mpsc::unbounded_channel(); - let subscribers = Arc::new(Mutex::new(Vec::new())); - - Self::publish_task(rx, subscribers.clone()); - - tokio::spawn(async move { - Self::start_ws(subscribers, &flashblocks_ws_url).await; - }); - - Self { - evm_config, - pool, - client, - tx, - chain_block_time, - flashblock_block_time, - flashblocks_per_block: chain_block_time / flashblock_block_time, - metrics: Default::default(), - } - } - - /// Start the WebSocket server - pub async fn start_ws(subscribers: Arc>>>, addr: &str) { - let listener = TcpListener::bind(addr).await.unwrap(); - let subscribers = subscribers.clone(); - - tracing::info!("Starting WebSocket server on {}", addr); - - while let Ok((stream, _)) = listener.accept().await { - tracing::info!("Accepted websocket connection"); - let subscribers = subscribers.clone(); - - tokio::spawn(async move { - match accept_async(stream).await { - Ok(ws_stream) => { - let mut subs = subscribers.lock().unwrap(); - subs.push(ws_stream); - } - Err(e) => eprintln!("Error accepting websocket connection: {}", e), - } - }); - } - } - - /// Background task that handles publishing messages to WebSocket subscribers - fn publish_task( - mut rx: mpsc::UnboundedReceiver, - subscribers: Arc>>>, - ) { - tokio::spawn(async move { - while let Some(message) = rx.recv().await { - let mut subscribers = subscribers.lock().unwrap(); - - // Remove disconnected subscribers and send message to connected ones - subscribers.retain_mut(|ws_stream| { - let message = message.clone(); - async move { - ws_stream - .send(tokio_tungstenite::tungstenite::Message::Text( - message.into(), - )) - .await - .is_ok() - } - .now_or_never() - .unwrap_or(false) - }); - } - }); - } -} - -impl OpPayloadBuilder -where - Pool: TransactionPool>, - Client: StateProviderFactory + ChainSpecProvider, -{ - /// Send a message to be published - pub fn send_message(&self, message: String) -> Result<(), Box> { - self.tx.send(message)?; - self.metrics.messages_sent_count.increment(1); - Ok(()) - } - - /// Constructs an Optimism payload from the transactions sent via the - /// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in - /// the payload attributes, the transaction pool will be ignored and the only transactions - /// included in the payload will be those sent through the attributes. - /// - /// Given build arguments including an Optimism client, transaction pool, - /// and configuration, this function creates a transaction payload. Returns - /// a result indicating success with the payload or an error in case of failure. - fn build_payload( - &self, - args: BuildArguments, OpBuiltPayload>, - best_payload: BlockCell, - ) -> Result<(), PayloadBuilderError> { - let block_build_start_time = Instant::now(); - let BuildArguments { config, cancel, .. } = args; - - let chain_spec = self.client.chain_spec(); - let timestamp = config.attributes.timestamp(); - let block_env_attributes = OpNextBlockEnvAttributes { - timestamp, - suggested_fee_recipient: config.attributes.suggested_fee_recipient(), - prev_randao: config.attributes.prev_randao(), - gas_limit: config - .attributes - .gas_limit - .unwrap_or(config.parent_header.gas_limit), - parent_beacon_block_root: config - .attributes - .payload_attributes - .parent_beacon_block_root, - extra_data: if chain_spec.is_holocene_active_at_timestamp(timestamp) { - config - .attributes - .get_holocene_extra_data(chain_spec.base_fee_params_at_timestamp(timestamp)) - .map_err(PayloadBuilderError::other)? - } else { - Default::default() - }, - }; - - let evm_env = self - .evm_config - .next_evm_env(&config.parent_header, &block_env_attributes) - .map_err(PayloadBuilderError::other)?; - - let ctx = OpPayloadBuilderCtx { - evm_config: self.evm_config.clone(), - chain_spec: self.client.chain_spec(), - config, - evm_env, - block_env_attributes, - cancel, - metrics: Default::default(), - }; - - let state_provider = self.client.state_by_block_hash(ctx.parent().hash())?; - let state = StateProviderDatabase::new(&state_provider); - - // 1. execute the pre steps and seal an early block with that - let sequencer_tx_start_time = Instant::now(); - let mut db = State::builder() - .with_database(state) - .with_bundle_update() - .build(); - - let mut info = execute_pre_steps(&mut db, &ctx)?; - ctx.metrics - .sequencer_tx_duration - .record(sequencer_tx_start_time.elapsed()); - - let (payload, fb_payload, mut bundle_state) = build_block(db, &ctx, &mut info)?; - - best_payload.set(payload.clone()); - let _ = self.send_message(serde_json::to_string(&fb_payload).unwrap_or_default()); - - tracing::info!(target: "payload_builder", "Fallback block built"); - ctx.metrics - .payload_num_tx - .record(info.executed_transactions.len() as f64); - - if ctx.attributes().no_tx_pool { - tracing::info!( - target: "payload_builder", - "No transaction pool, skipping transaction pool processing", - ); - - self.metrics - .total_block_built_duration - .record(block_build_start_time.elapsed()); - - // return early since we don't need to build a block with transactions from the pool - return Ok(()); - } - - let gas_per_batch = ctx.block_gas_limit() / self.flashblocks_per_block; - - let mut total_gas_per_batch = gas_per_batch; - - let mut flashblock_count = 0; - // Create a channel to coordinate flashblock building - let (build_tx, mut build_rx) = mpsc::channel(1); - - // Spawn the timer task that signals when to build a new flashblock - let cancel_clone = ctx.cancel.clone(); - let flashblock_block_time = self.flashblock_block_time; - tokio::spawn(async move { - let mut interval = tokio::time::interval(Duration::from_millis(flashblock_block_time)); - loop { - tokio::select! { - // Add a cancellation check that only runs every 10ms to avoid tight polling - _ = tokio::time::sleep(Duration::from_millis(10)) => { - if cancel_clone.is_cancelled() { - tracing::info!(target: "payload_builder", "Job cancelled during sleep, stopping payload building"); - drop(build_tx); - break; - } - } - _ = interval.tick() => { - if let Err(err) = build_tx.send(()).await { - error!(target: "payload_builder", "Error sending build signal: {}", err); - break; - } - } - } - } - }); - - // Process flashblocks in a blocking loop - loop { - // Block on receiving a message, break on cancellation or closed channel - let received = tokio::task::block_in_place(|| { - // Get runtime handle - let rt = tokio::runtime::Handle::current(); - - // Run the async operation to completion, blocking the current thread - rt.block_on(async { - // Check for cancellation first - if ctx.cancel.is_cancelled() { - tracing::info!( - target: "payload_builder", - "Job cancelled, stopping payload building", - ); - return None; - } - - // Wait for next message - build_rx.recv().await - }) - }); - - // Exit loop if channel closed or cancelled - match received { - Some(()) => { - if flashblock_count >= self.flashblocks_per_block { - tracing::info!( - target: "payload_builder", - "Skipping flashblock reached target={} idx={}", - self.flashblocks_per_block, - flashblock_count - ); - continue; - } - - // Continue with flashblock building - tracing::info!( - target: "payload_builder", - "Building flashblock {} {}", - flashblock_count, - total_gas_per_batch, - ); - - let flashblock_build_start_time = Instant::now(); - let state = StateProviderDatabase::new(&state_provider); - - let mut db = State::builder() - .with_database(state) - .with_bundle_update() - .with_bundle_prestate(bundle_state) - .build(); - - let best_txs_start_time = Instant::now(); - let best_txs = BestPayloadTransactions::new( - self.pool - .best_transactions_with_attributes(ctx.best_transaction_attributes()), - ); - ctx.metrics - .transaction_pool_fetch_duration - .record(best_txs_start_time.elapsed()); - - let tx_execution_start_time = Instant::now(); - ctx.execute_best_transactions( - &mut info, - &mut db, - best_txs, - total_gas_per_batch.min(ctx.block_gas_limit()), - )?; - ctx.metrics - .payload_tx_simulation_duration - .record(tx_execution_start_time.elapsed()); - - if ctx.cancel.is_cancelled() { - tracing::info!( - target: "payload_builder", - "Job cancelled, stopping payload building", - ); - // if the job was cancelled, stop - return Ok(()); - } - - let total_block_built_duration = Instant::now(); - let build_result = build_block(db, &ctx, &mut info); - ctx.metrics - .total_block_built_duration - .record(total_block_built_duration.elapsed()); - - // Handle build errors with match pattern - match build_result { - Err(err) => { - // Track invalid/bad block - self.metrics.invalid_blocks_count.increment(1); - error!(target: "payload_builder", "Failed to build block {}, flashblock {}: {}", ctx.block_number(), flashblock_count, err); - // Return the error - return Err(err); - } - Ok((new_payload, mut fb_payload, new_bundle_state)) => { - fb_payload.index = flashblock_count + 1; // we do this because the fallback block is index 0 - fb_payload.base = None; - - if let Err(err) = self.send_message( - serde_json::to_string(&fb_payload).unwrap_or_default(), - ) { - error!(target: "payload_builder", "Failed to send flashblock message: {}", err); - } - - // Record flashblock build duration - self.metrics - .flashblock_build_duration - .record(flashblock_build_start_time.elapsed()); - ctx.metrics - .payload_byte_size - .record(new_payload.block().size() as f64); - ctx.metrics - .payload_num_tx - .record(info.executed_transactions.len() as f64); - - best_payload.set(new_payload.clone()); - // Update bundle_state for next iteration - bundle_state = new_bundle_state; - total_gas_per_batch += gas_per_batch; - flashblock_count += 1; - tracing::info!(target: "payload_builder", "Flashblock {} built", flashblock_count); - } - } - } - None => { - // Exit loop if channel closed or cancelled - self.metrics.block_built_success.increment(1); - self.metrics - .flashblock_count - .record(flashblock_count as f64); - return Ok(()); - } - } - } - } -} - -impl PayloadBuilder for OpPayloadBuilder -where - Client: StateProviderFactory + ChainSpecProvider + Clone, - Pool: TransactionPool>, -{ - type Attributes = OpPayloadBuilderAttributes; - type BuiltPayload = OpBuiltPayload; - - fn try_build( - &self, - args: BuildArguments, - best_payload: BlockCell, - ) -> Result<(), PayloadBuilderError> { - self.build_payload(args, best_payload) - } -} - -pub fn build_block( - mut state: State, - ctx: &OpPayloadBuilderCtx, - info: &mut ExecutionInfo, -) -> Result<(OpBuiltPayload, FlashblocksPayloadV1, BundleState), PayloadBuilderError> -where - ChainSpec: EthChainSpec + OpHardforks, - DB: Database + AsRef

, - P: StateRootProvider + HashedPostStateProvider + StorageRootProvider, -{ - // TODO: We must run this only once per block, but we are running it on every flashblock - // merge all transitions into bundle state, this would apply the withdrawal balance changes - // and 4788 contract call - let state_merge_start_time = Instant::now(); - state.merge_transitions(BundleRetention::Reverts); - ctx.metrics - .state_transition_merge_duration - .record(state_merge_start_time.elapsed()); - - let new_bundle = state.take_bundle(); - - let block_number = ctx.block_number(); - assert_eq!(block_number, ctx.parent().number + 1); - - let execution_outcome = ExecutionOutcome::new( - new_bundle.clone(), - vec![info.receipts.clone()], - block_number, - vec![], - ); - - let receipts_root = execution_outcome - .generic_receipts_root_slow(block_number, |receipts| { - calculate_receipt_root_no_memo_optimism( - receipts, - &ctx.chain_spec, - ctx.attributes().timestamp(), - ) - }) - .expect("Number is in range"); - let logs_bloom = execution_outcome - .block_logs_bloom(block_number) - .expect("Number is in range"); - - // // calculate the state root - let state_root_start_time = Instant::now(); - let state_provider = state.database.as_ref(); - let hashed_state = state_provider.hashed_post_state(execution_outcome.state()); - let (state_root, _trie_output) = { - state - .database - .as_ref() - .state_root_with_updates(hashed_state.clone()) - .inspect_err(|err| { - warn!(target: "payload_builder", - parent_header=%ctx.parent().hash(), - %err, - "failed to calculate state root for payload" - ); - })? - }; - ctx.metrics - .state_root_calculation_duration - .record(state_root_start_time.elapsed()); - - let mut requests_hash = None; - let withdrawals_root = if ctx - .chain_spec - .is_isthmus_active_at_timestamp(ctx.attributes().timestamp()) - { - // always empty requests hash post isthmus - requests_hash = Some(EMPTY_REQUESTS_HASH); - - // withdrawals root field in block header is used for storage root of L2 predeploy - // `l2tol1-message-passer` - Some( - isthmus::withdrawals_root(execution_outcome.state(), state.database.as_ref()) - .map_err(PayloadBuilderError::other)?, - ) - } else if ctx - .chain_spec - .is_canyon_active_at_timestamp(ctx.attributes().timestamp()) - { - Some(EMPTY_WITHDRAWALS) - } else { - None - }; - - // create the block header - let transactions_root = proofs::calculate_transaction_root(&info.executed_transactions); - - // OP doesn't support blobs/EIP-4844. - // https://specs.optimism.io/protocol/exec-engine.html#ecotone-disable-blob-transactions - // Need [Some] or [None] based on hardfork to match block hash. - let (excess_blob_gas, blob_gas_used) = ctx.blob_fields(); - let extra_data = ctx.extra_data()?; - - let header = Header { - parent_hash: ctx.parent().hash(), - ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: ctx.evm_env.block_env.beneficiary, - state_root, - transactions_root, - receipts_root, - withdrawals_root, - logs_bloom, - timestamp: ctx.attributes().payload_attributes.timestamp, - mix_hash: ctx.attributes().payload_attributes.prev_randao, - nonce: BEACON_NONCE.into(), - base_fee_per_gas: Some(ctx.base_fee()), - number: ctx.parent().number + 1, - gas_limit: ctx.block_gas_limit(), - difficulty: U256::ZERO, - gas_used: info.cumulative_gas_used, - extra_data, - parent_beacon_block_root: ctx.attributes().payload_attributes.parent_beacon_block_root, - blob_gas_used, - excess_blob_gas, - requests_hash, - }; - - // seal the block - let block = alloy_consensus::Block::::new( - header, - BlockBody { - transactions: info.executed_transactions.clone(), - ommers: vec![], - withdrawals: ctx.withdrawals().cloned(), - }, - ); - - let sealed_block = Arc::new(block.seal_slow()); - debug!(target: "payload_builder", ?sealed_block, "sealed built block"); - - let block_hash = sealed_block.hash(); - - // pick the new transactions from the info field and update the last flashblock index - let new_transactions = info.executed_transactions[info.last_flashblock_index..].to_vec(); - - let new_transactions_encoded = new_transactions - .clone() - .into_iter() - .map(|tx| tx.encoded_2718().into()) - .collect::>(); - - let new_receipts = info.receipts[info.last_flashblock_index..].to_vec(); - info.last_flashblock_index = info.executed_transactions.len(); - let receipts_with_hash = new_transactions - .iter() - .zip(new_receipts.iter()) - .map(|(tx, receipt)| (tx.tx_hash(), receipt.clone())) - .collect::>(); - let new_account_balances = new_bundle - .state - .iter() - .filter_map(|(address, account)| account.info.as_ref().map(|info| (*address, info.balance))) - .collect::>(); - - let metadata: FlashblocksMetadata = FlashblocksMetadata { - receipts: receipts_with_hash, - new_account_balances, - block_number: ctx.parent().number + 1, - }; - - // Prepare the flashblocks message - let fb_payload = FlashblocksPayloadV1 { - payload_id: ctx.payload_id(), - index: 0, - base: Some(ExecutionPayloadBaseV1 { - parent_beacon_block_root: ctx - .attributes() - .payload_attributes - .parent_beacon_block_root - .unwrap(), - parent_hash: ctx.parent().hash(), - fee_recipient: ctx.attributes().suggested_fee_recipient(), - prev_randao: ctx.attributes().payload_attributes.prev_randao, - block_number: ctx.parent().number + 1, - gas_limit: ctx.block_gas_limit(), - timestamp: ctx.attributes().payload_attributes.timestamp, - extra_data: ctx.extra_data()?, - base_fee_per_gas: ctx.base_fee().try_into().unwrap(), - }), - diff: ExecutionPayloadFlashblockDeltaV1 { - state_root, - receipts_root, - logs_bloom, - gas_used: info.cumulative_gas_used, - block_hash, - transactions: new_transactions_encoded, - withdrawals: ctx.withdrawals().cloned().unwrap_or_default().to_vec(), - withdrawals_root, - }, - metadata: serde_json::to_value(&metadata).unwrap_or_default(), - }; - - Ok(( - OpBuiltPayload::new( - ctx.payload_id(), - sealed_block, - info.total_fees, - // This must be set to NONE for now because we are doing merge transitions on every flashblock - // when it should only happen once per block, thus, it returns a confusing state back to op-reth. - // We can live without this for now because Op syncs up the executed block using new_payload - // calls, but eventually we would want to return the executed block here. - None, - ), - fb_payload, - new_bundle, - )) -} - -fn execute_pre_steps( - state: &mut State, - ctx: &OpPayloadBuilderCtx, -) -> Result, PayloadBuilderError> -where - ChainSpec: EthChainSpec + OpHardforks, - DB: Database, -{ - // 1. apply pre-execution changes - ctx.evm_config - .builder_for_next_block(state, ctx.parent(), ctx.block_env_attributes.clone()) - .map_err(PayloadBuilderError::other)? - .apply_pre_execution_changes()?; - - // 3. execute sequencer transactions - let info = ctx.execute_sequencer_transactions(state)?; - - Ok(info) -} - -/// A type that returns a the [`PayloadTransactions`] that should be included in the pool. -pub trait OpPayloadTransactions: Clone + Send + Sync + Unpin + 'static { - /// Returns an iterator that yields the transaction in the order they should get included in the - /// new payload. - fn best_transactions>( - &self, - pool: Pool, - attr: BestTransactionsAttributes, - ) -> impl PayloadTransactions; -} - -impl OpPayloadTransactions for () { - fn best_transactions>( - &self, - pool: Pool, - attr: BestTransactionsAttributes, - ) -> impl PayloadTransactions { - BestPayloadTransactions::new(pool.best_transactions_with_attributes(attr)) - } -} - -/// Container type that holds all necessities to build a new payload. -#[derive(Debug)] -pub struct OpPayloadBuilderCtx { - /// The type that knows how to perform system calls and configure the evm. - pub evm_config: OpEvmConfig, - /// The chainspec - pub chain_spec: Arc, - /// How to build the payload. - pub config: PayloadConfig>, - /// Evm Settings - pub evm_env: EvmEnv, - /// Block env attributes for the current block. - pub block_env_attributes: OpNextBlockEnvAttributes, - /// Marker to check whether the job has been cancelled. - pub cancel: CancellationToken, - /// The metrics for the builder - pub metrics: OpRBuilderMetrics, -} - -impl OpPayloadBuilderCtx -where - ChainSpec: EthChainSpec + OpHardforks, -{ - /// Returns the parent block the payload will be build on. - pub fn parent(&self) -> &SealedHeader { - &self.config.parent_header - } - - /// Returns the builder attributes. - pub const fn attributes(&self) -> &OpPayloadBuilderAttributes { - &self.config.attributes - } - - /// Returns the withdrawals if shanghai is active. - pub fn withdrawals(&self) -> Option<&Withdrawals> { - self.chain_spec - .is_shanghai_active_at_timestamp(self.attributes().timestamp()) - .then(|| &self.attributes().payload_attributes.withdrawals) - } - - /// Returns the block gas limit to target. - pub fn block_gas_limit(&self) -> u64 { - self.attributes() - .gas_limit - .unwrap_or(self.evm_env.block_env.gas_limit) - } - - /// Returns the block number for the block. - pub fn block_number(&self) -> u64 { - self.evm_env.block_env.number - } - - /// Returns the current base fee - pub fn base_fee(&self) -> u64 { - self.evm_env.block_env.basefee - } - - /// Returns the current blob gas price. - pub fn get_blob_gasprice(&self) -> Option { - self.evm_env - .block_env - .blob_gasprice() - .map(|gasprice| gasprice as u64) - } - - /// Returns the blob fields for the header. - /// - /// This will always return `Some(0)` after ecotone. - pub fn blob_fields(&self) -> (Option, Option) { - // OP doesn't support blobs/EIP-4844. - // https://specs.optimism.io/protocol/exec-engine.html#ecotone-disable-blob-transactions - // Need [Some] or [None] based on hardfork to match block hash. - if self.is_ecotone_active() { - (Some(0), Some(0)) - } else { - (None, None) - } - } - - /// Returns the extra data for the block. - /// - /// After holocene this extracts the extradata from the paylpad - pub fn extra_data(&self) -> Result { - if self.is_holocene_active() { - self.attributes() - .get_holocene_extra_data( - self.chain_spec.base_fee_params_at_timestamp( - self.attributes().payload_attributes.timestamp, - ), - ) - .map_err(PayloadBuilderError::other) - } else { - Ok(Default::default()) - } - } - - /// Returns the current fee settings for transactions from the mempool - pub fn best_transaction_attributes(&self) -> BestTransactionsAttributes { - BestTransactionsAttributes::new(self.base_fee(), self.get_blob_gasprice()) - } - - /// Returns the unique id for this payload job. - pub fn payload_id(&self) -> PayloadId { - self.attributes().payload_id() - } - - /// Returns true if regolith is active for the payload. - pub fn is_regolith_active(&self) -> bool { - self.chain_spec - .is_regolith_active_at_timestamp(self.attributes().timestamp()) - } - - /// Returns true if ecotone is active for the payload. - pub fn is_ecotone_active(&self) -> bool { - self.chain_spec - .is_ecotone_active_at_timestamp(self.attributes().timestamp()) - } - - /// Returns true if canyon is active for the payload. - pub fn is_canyon_active(&self) -> bool { - self.chain_spec - .is_canyon_active_at_timestamp(self.attributes().timestamp()) - } - - /// Returns true if holocene is active for the payload. - pub fn is_holocene_active(&self) -> bool { - self.chain_spec - .is_holocene_active_at_timestamp(self.attributes().timestamp()) - } -} - -impl OpPayloadBuilderCtx -where - ChainSpec: EthChainSpec + OpHardforks, -{ - /// Constructs a receipt for the given transaction. - fn build_receipt( - &self, - ctx: ReceiptBuilderCtx<'_, OpTransactionSigned, E>, - deposit_nonce: Option, - ) -> OpReceipt { - let receipt_builder = self.evm_config.block_executor_factory().receipt_builder(); - match receipt_builder.build_receipt(ctx) { - Ok(receipt) => receipt, - Err(ctx) => { - let receipt = alloy_consensus::Receipt { - // Success flag was added in `EIP-658: Embedding transaction status code - // in receipts`. - status: Eip658Value::Eip658(ctx.result.is_success()), - cumulative_gas_used: ctx.cumulative_gas_used, - logs: ctx.result.into_logs(), - }; - - receipt_builder.build_deposit_receipt(OpDepositReceipt { - inner: receipt, - deposit_nonce, - // The deposit receipt version was introduced in Canyon to indicate an - // update to how receipt hashes should be computed - // when set. The state transition process ensures - // this is only set for post-Canyon deposit - // transactions. - deposit_receipt_version: self.is_canyon_active().then_some(1), - }) - } - } - } - - /// Executes all sequencer transactions that are included in the payload attributes. - pub fn execute_sequencer_transactions( - &self, - db: &mut State, - ) -> Result, PayloadBuilderError> - where - DB: Database, - { - let mut info = ExecutionInfo::with_capacity(self.attributes().transactions.len()); - - let mut evm = self.evm_config.evm_with_env(&mut *db, self.evm_env.clone()); - - for sequencer_tx in &self.attributes().transactions { - // A sequencer's block should never contain blob transactions. - if sequencer_tx.value().is_eip4844() { - return Err(PayloadBuilderError::other( - OpPayloadBuilderError::BlobTransactionRejected, - )); - } - - // Convert the transaction to a [Recovered]. This is - // purely for the purposes of utilizing the `evm_config.tx_env`` function. - // Deposit transactions do not have signatures, so if the tx is a deposit, this - // will just pull in its `from` address. - let sequencer_tx = sequencer_tx - .value() - .clone() - .try_clone_into_recovered() - .map_err(|_| { - PayloadBuilderError::other(OpPayloadBuilderError::TransactionEcRecoverFailed) - })?; - - // Cache the depositor account prior to the state transition for the deposit nonce. - // - // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces - // were not introduced in Bedrock. In addition, regular transactions don't have deposit - // nonces, so we don't need to touch the DB for those. - let depositor_nonce = (self.is_regolith_active() && sequencer_tx.is_deposit()) - .then(|| { - evm.db_mut() - .load_cache_account(sequencer_tx.signer()) - .map(|acc| acc.account_info().unwrap_or_default().nonce) - }) - .transpose() - .map_err(|_| { - PayloadBuilderError::other(OpPayloadBuilderError::AccountLoadFailed( - sequencer_tx.signer(), - )) - })?; - - let ResultAndState { result, state } = match evm.transact(&sequencer_tx) { - Ok(res) => res, - Err(err) => { - if err.is_invalid_tx_err() { - trace!(target: "payload_builder", %err, ?sequencer_tx, "Error in sequencer transaction, skipping."); - continue; - } - // this is an error that we should treat as fatal for this attempt - return Err(PayloadBuilderError::EvmExecutionError(Box::new(err))); - } - }; - - // add gas used by the transaction to cumulative gas used, before creating the receipt - let gas_used = result.gas_used(); - info.cumulative_gas_used += gas_used; - - let ctx = ReceiptBuilderCtx { - tx: sequencer_tx.inner(), - evm: &evm, - result, - state: &state, - cumulative_gas_used: info.cumulative_gas_used, - }; - info.receipts.push(self.build_receipt(ctx, depositor_nonce)); - - // commit changes - evm.db_mut().commit(state); - - // append sender and transaction to the respective lists - info.executed_senders.push(sequencer_tx.signer()); - info.executed_transactions.push(sequencer_tx.into_inner()); - } - - Ok(info) - } - - /// Executes the given best transactions and updates the execution info. - /// - /// Returns `Ok(Some(())` if the job was cancelled. - pub fn execute_best_transactions( - &self, - info: &mut ExecutionInfo, - db: &mut State, - mut best_txs: impl PayloadTransactions< - Transaction: PoolTransaction, - >, - batch_gas_limit: u64, - ) -> Result, PayloadBuilderError> - where - DB: Database, - { - let base_fee = self.base_fee(); - let mut num_txs_considered = 0; - let mut num_txs_simulated = 0; - let mut num_txs_simulated_success = 0; - let mut num_txs_simulated_fail = 0; - - let mut evm = self.evm_config.evm_with_env(&mut *db, self.evm_env.clone()); - - while let Some(tx) = best_txs.next(()) { - let tx = tx.into_consensus(); - num_txs_considered += 1; - - // check in info if the txn has been executed already - if info.executed_transactions.contains(&tx) { - continue; - } - - // ensure we still have capacity for this transaction - if info.is_tx_over_limits(tx.inner(), batch_gas_limit, None, None) { - // we can't fit this transaction into the block, so we need to mark it as - // invalid which also removes all dependent transaction from - // the iterator before we can continue - best_txs.mark_invalid(tx.signer(), tx.nonce()); - continue; - } - - // A sequencer's block should never contain blob or deposit transactions from the pool. - if tx.is_eip4844() || tx.is_deposit() { - best_txs.mark_invalid(tx.signer(), tx.nonce()); - continue; - } - - // check if the job was cancelled, if so we can exit early - if self.cancel.is_cancelled() { - return Ok(Some(())); - } - - let tx_simulation_start_time = Instant::now(); - let ResultAndState { result, state } = match evm.transact(&tx) { - Ok(res) => res, - Err(err) => { - if let Some(err) = err.as_invalid_tx_err() { - if err.is_nonce_too_low() { - // if the nonce is too low, we can skip this transaction - trace!(target: "payload_builder", %err, ?tx, "skipping nonce too low transaction"); - } else { - // if the transaction is invalid, we can skip it and all of its - // descendants - trace!(target: "payload_builder", %err, ?tx, "skipping invalid transaction and its descendants"); - best_txs.mark_invalid(tx.signer(), tx.nonce()); - } - - continue; - } - // this is an error that we should treat as fatal for this attempt - return Err(PayloadBuilderError::EvmExecutionError(Box::new(err))); - } - }; - - self.metrics - .tx_simulation_duration - .record(tx_simulation_start_time.elapsed()); - self.metrics.tx_byte_size.record(tx.inner().size() as f64); - num_txs_simulated += 1; - - if result.is_success() { - num_txs_simulated_success += 1; - } else { - num_txs_simulated_fail += 1; - trace!(target: "payload_builder", ?tx, "reverted transaction"); - } - - // add gas used by the transaction to cumulative gas used, before creating the receipt - let gas_used = result.gas_used(); - info.cumulative_gas_used += gas_used; - - let ctx = ReceiptBuilderCtx { - tx: tx.inner(), - evm: &evm, - result, - state: &state, - cumulative_gas_used: info.cumulative_gas_used, - }; - info.receipts.push(self.build_receipt(ctx, None)); - - // commit changes - evm.db_mut().commit(state); - - // update add to total fees - let miner_fee = tx - .effective_tip_per_gas(base_fee) - .expect("fee is always valid; execution succeeded"); - info.total_fees += U256::from(miner_fee) * U256::from(gas_used); - - // append sender and transaction to the respective lists - info.executed_senders.push(tx.signer()); - info.executed_transactions.push(tx.into_inner()); - } - - self.metrics - .payload_num_tx_considered - .record(num_txs_considered as f64); - self.metrics - .payload_num_tx_simulated - .record(num_txs_simulated as f64); - self.metrics - .payload_num_tx_simulated_success - .record(num_txs_simulated_success as f64); - self.metrics - .payload_num_tx_simulated_fail - .record(num_txs_simulated_fail as f64); - - Ok(None) - } -} diff --git a/crates/op-rbuilder/src/payload_builder_vanilla.rs b/crates/op-rbuilder/src/payload_builder_vanilla.rs deleted file mode 100644 index 377aafce4..000000000 --- a/crates/op-rbuilder/src/payload_builder_vanilla.rs +++ /dev/null @@ -1,1221 +0,0 @@ -use crate::{ - generator::BuildArguments, - metrics::OpRBuilderMetrics, - primitives::reth::ExecutionInfo, - tx::{FBPoolTransaction, MaybeRevertingTransaction}, - tx_signer::Signer, -}; -use alloy_consensus::{ - constants::EMPTY_WITHDRAWALS, transaction::Recovered, Eip658Value, Header, Transaction, - TxEip1559, Typed2718, EMPTY_OMMER_ROOT_HASH, -}; -use alloy_eips::{eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE}; -use alloy_op_evm::block::receipt_builder::OpReceiptBuilder; -use alloy_primitives::{private::alloy_rlp::Encodable, Address, Bytes, TxKind, U256}; -use alloy_rpc_types_engine::PayloadId; -use alloy_rpc_types_eth::Withdrawals; -use op_alloy_consensus::{OpDepositReceipt, OpTypedTransaction}; -use op_revm::OpSpecId; -use reth::{ - builder::{components::PayloadBuilderBuilder, node::FullNodeTypes, BuilderContext}, - core::primitives::InMemorySize, -}; -use reth_basic_payload_builder::{ - BuildOutcome, BuildOutcomeKind, MissingPayloadBehaviour, PayloadConfig, -}; -use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates}; -use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; -use reth_evm::{ - env::EvmEnv, eth::receipt_builder::ReceiptBuilderCtx, execute::BlockBuilder, ConfigureEvm, - Database, Evm, EvmError, InvalidTxError, -}; -use reth_execution_types::ExecutionOutcome; -use reth_node_api::{NodePrimitives, NodeTypes, PrimitivesTy}; -use reth_node_builder::components::BasicPayloadServiceBuilder; -use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_consensus::{calculate_receipt_root_no_memo_optimism, isthmus}; -use reth_optimism_evm::{OpEvmConfig, OpNextBlockEnvAttributes}; -use reth_optimism_forks::OpHardforks; -use reth_optimism_node::OpEngineTypes; -use reth_optimism_payload_builder::{ - config::{OpBuilderConfig, OpDAConfig}, - error::OpPayloadBuilderError, - payload::{OpBuiltPayload, OpPayloadBuilderAttributes}, - OpPayloadPrimitives, -}; -use reth_optimism_primitives::{OpPrimitives, OpReceipt, OpTransactionSigned}; -use reth_payload_builder_primitives::PayloadBuilderError; -use reth_payload_primitives::PayloadBuilderAttributes; -use reth_payload_util::{BestPayloadTransactions, NoopPayloadTransactions, PayloadTransactions}; -use reth_primitives::{BlockBody, SealedHeader}; -use reth_primitives_traits::{proofs, Block as _, RecoveredBlock, SignedTransaction}; -use reth_provider::{ - HashedPostStateProvider, ProviderError, StateProviderFactory, StateRootProvider, - StorageRootProvider, -}; -use reth_revm::database::StateProviderDatabase; -use reth_transaction_pool::{BestTransactionsAttributes, PoolTransaction, TransactionPool}; -use revm::{ - context::{result::ResultAndState, Block as _}, - database::{states::bundle_state::BundleRetention, State}, - DatabaseCommit, -}; -use std::{sync::Arc, time::Instant}; -use tokio_util::sync::CancellationToken; -use tracing::*; - -// From https://eips.ethereum.org/EIPS/eip-7623 -const TOTAL_COST_FLOOR_PER_TOKEN: u64 = 10; - -/// Holds the state after execution -#[derive(Debug)] -pub struct ExecutedPayload { - /// Tracked execution info - pub info: ExecutionInfo, -} - -#[derive(Debug, Clone, Default)] -#[non_exhaustive] -pub struct CustomOpPayloadBuilder { - builder_signer: Option, - #[allow(dead_code)] - extra_block_deadline: std::time::Duration, - #[cfg(feature = "flashblocks")] - flashblocks_ws_url: String, - #[cfg(feature = "flashblocks")] - chain_block_time: u64, - #[cfg(feature = "flashblocks")] - flashblock_block_time: u64, -} - -impl CustomOpPayloadBuilder { - #[cfg(feature = "flashblocks")] - pub fn new( - builder_signer: Option, - flashblocks_ws_url: String, - chain_block_time: u64, - flashblock_block_time: u64, - ) -> Self { - Self { - builder_signer, - flashblocks_ws_url, - chain_block_time, - flashblock_block_time, - } - } - - #[cfg(not(feature = "flashblocks"))] - pub fn new( - builder_signer: Option, - extra_block_deadline: std::time::Duration, - _flashblocks_ws_url: String, - _chain_block_time: u64, - _flashblock_block_time: u64, - ) -> BasicPayloadServiceBuilder { - BasicPayloadServiceBuilder::new(CustomOpPayloadBuilder { - builder_signer, - extra_block_deadline, - }) - } -} - -impl PayloadBuilderBuilder for CustomOpPayloadBuilder -where - Node: FullNodeTypes< - Types: NodeTypes< - Payload = OpEngineTypes, - ChainSpec = OpChainSpec, - Primitives = OpPrimitives, - >, - >, - Pool: TransactionPool> - + Unpin - + 'static, - ::Transaction: FBPoolTransaction, - Evm: ConfigureEvm< - Primitives = PrimitivesTy, - NextBlockEnvCtx = OpNextBlockEnvAttributes, - > + 'static, -{ - type PayloadBuilder = OpPayloadBuilderVanilla; - - async fn build_payload_builder( - self, - ctx: &BuilderContext, - pool: Pool, - _evm_config: Evm, - ) -> eyre::Result { - Ok(OpPayloadBuilderVanilla::new( - OpEvmConfig::optimism(ctx.chain_spec()), - self.builder_signer, - pool, - ctx.provider().clone(), - )) - } -} - -impl reth_basic_payload_builder::PayloadBuilder - for OpPayloadBuilderVanilla -where - Pool: TransactionPool>, - Client: StateProviderFactory + ChainSpecProvider + Clone, - Txs: OpPayloadTransactions, -{ - type Attributes = OpPayloadBuilderAttributes; - type BuiltPayload = OpBuiltPayload; - - fn try_build( - &self, - args: reth_basic_payload_builder::BuildArguments, - ) -> Result, PayloadBuilderError> { - let pool = self.pool.clone(); - - let reth_basic_payload_builder::BuildArguments { - cached_reads, - config, - cancel: _, // TODO - best_payload: _, - } = args; - - let args = BuildArguments { - cached_reads, - config, - cancel: CancellationToken::new(), - }; - - self.build_payload(args, |attrs| { - #[allow(clippy::unit_arg)] - self.best_transactions - .best_transactions(pool.clone(), attrs) - }) - } - - fn on_missing_payload( - &self, - _args: reth_basic_payload_builder::BuildArguments, - ) -> MissingPayloadBehaviour { - MissingPayloadBehaviour::AwaitInProgress - } - - fn build_empty_payload( - &self, - config: reth_basic_payload_builder::PayloadConfig< - Self::Attributes, - reth_basic_payload_builder::HeaderForPayload, - >, - ) -> Result { - let args = BuildArguments { - config, - cached_reads: Default::default(), - cancel: Default::default(), - }; - self.build_payload(args, |_| { - NoopPayloadTransactions::::default() - })? - .into_payload() - .ok_or_else(|| PayloadBuilderError::MissingPayload) - } -} - -/// Optimism's payload builder -#[derive(Debug, Clone)] -pub struct OpPayloadBuilderVanilla { - /// The type responsible for creating the evm. - pub evm_config: OpEvmConfig, - /// The builder's signer key to use for an end of block tx - pub builder_signer: Option, - /// The transaction pool - pub pool: Pool, - /// Node client - pub client: Client, - /// Settings for the builder, e.g. DA settings. - pub config: OpBuilderConfig, - /// The type responsible for yielding the best transactions for the payload if mempool - /// transactions are allowed. - pub best_transactions: Txs, - /// The metrics for the builder - pub metrics: OpRBuilderMetrics, -} - -impl OpPayloadBuilderVanilla { - /// `OpPayloadBuilder` constructor. - pub fn new( - evm_config: OpEvmConfig, - builder_signer: Option, - pool: Pool, - client: Client, - ) -> Self { - Self::with_builder_config(evm_config, builder_signer, pool, client, Default::default()) - } - - pub fn with_builder_config( - evm_config: OpEvmConfig, - builder_signer: Option, - pool: Pool, - client: Client, - config: OpBuilderConfig, - ) -> Self { - Self { - pool, - client, - config, - evm_config, - best_transactions: (), - metrics: Default::default(), - builder_signer, - } - } -} - -impl OpPayloadBuilderVanilla -where - Pool: TransactionPool>, - Client: StateProviderFactory + ChainSpecProvider, -{ - /// Constructs an Optimism payload from the transactions sent via the - /// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in - /// the payload attributes, the transaction pool will be ignored and the only transactions - /// included in the payload will be those sent through the attributes. - /// - /// Given build arguments including an Optimism client, transaction pool, - /// and configuration, this function creates a transaction payload. Returns - /// a result indicating success with the payload or an error in case of failure. - fn build_payload<'a, Txs>( - &self, - args: BuildArguments, OpBuiltPayload>, - best: impl FnOnce(BestTransactionsAttributes) -> Txs + Send + Sync + 'a, - ) -> Result, PayloadBuilderError> - where - Txs: PayloadTransactions>, - { - let BuildArguments { - mut cached_reads, - config, - cancel, - } = args; - - let chain_spec = self.client.chain_spec(); - let timestamp = config.attributes.timestamp(); - let block_env_attributes = OpNextBlockEnvAttributes { - timestamp, - suggested_fee_recipient: config.attributes.suggested_fee_recipient(), - prev_randao: config.attributes.prev_randao(), - gas_limit: config - .attributes - .gas_limit - .unwrap_or(config.parent_header.gas_limit), - parent_beacon_block_root: config - .attributes - .payload_attributes - .parent_beacon_block_root, - extra_data: if chain_spec.is_holocene_active_at_timestamp(timestamp) { - config - .attributes - .get_holocene_extra_data(chain_spec.base_fee_params_at_timestamp(timestamp)) - .map_err(PayloadBuilderError::other)? - } else { - Default::default() - }, - }; - - let evm_env = self - .evm_config - .next_evm_env(&config.parent_header, &block_env_attributes) - .map_err(PayloadBuilderError::other)?; - - let ctx = OpPayloadBuilderCtx { - evm_config: self.evm_config.clone(), - da_config: self.config.da_config.clone(), - chain_spec, - config, - evm_env, - block_env_attributes, - cancel, - builder_signer: self.builder_signer, - metrics: self.metrics.clone(), - }; - - let builder = OpBuilder::new(best); - - let state_provider = self.client.state_by_block_hash(ctx.parent().hash())?; - let state = StateProviderDatabase::new(state_provider); - - if ctx.attributes().no_tx_pool { - let db = State::builder() - .with_database(state) - .with_bundle_update() - .build(); - builder.build(db, ctx) - } else { - // sequencer mode we can reuse cachedreads from previous runs - let db = State::builder() - .with_database(cached_reads.as_db_mut(state)) - .with_bundle_update() - .build(); - builder.build(db, ctx) - } - .map(|out| out.with_cached_reads(cached_reads)) - } -} - -/// The type that builds the payload. -/// -/// Payload building for optimism is composed of several steps. -/// The first steps are mandatory and defined by the protocol. -/// -/// 1. first all System calls are applied. -/// 2. After canyon the forced deployed `create2deployer` must be loaded -/// 3. all sequencer transactions are executed (part of the payload attributes) -/// -/// Depending on whether the node acts as a sequencer and is allowed to include additional -/// transactions (`no_tx_pool == false`): -/// 4. include additional transactions -/// -/// And finally -/// 5. build the block: compute all roots (txs, state) -#[derive(derive_more::Debug)] -pub struct OpBuilder<'a, Txs> { - /// Yields the best transaction to include if transactions from the mempool are allowed. - best: Box Txs + 'a>, -} - -impl<'a, Txs> OpBuilder<'a, Txs> { - fn new(best: impl FnOnce(BestTransactionsAttributes) -> Txs + Send + Sync + 'a) -> Self { - Self { - best: Box::new(best), - } - } -} - -impl OpBuilder<'_, Txs> { - /// Executes the payload and returns the outcome. - pub fn execute( - self, - state: &mut State, - ctx: &OpPayloadBuilderCtx, - ) -> Result>, PayloadBuilderError> - where - N: OpPayloadPrimitives<_TX = OpTransactionSigned>, - Txs: PayloadTransactions>, - ChainSpec: EthChainSpec + OpHardforks, - DB: Database + AsRef

, - P: StorageRootProvider, - { - let Self { best } = self; - info!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number, "building new payload"); - - // 1. apply pre-execution changes - ctx.evm_config - .builder_for_next_block(state, ctx.parent(), ctx.block_env_attributes.clone()) - .map_err(PayloadBuilderError::other)? - .apply_pre_execution_changes()?; - - let sequencer_tx_start_time = Instant::now(); - - // 3. execute sequencer transactions - let mut info = ctx.execute_sequencer_transactions(state)?; - - ctx.metrics - .sequencer_tx_duration - .record(sequencer_tx_start_time.elapsed()); - - // 4. if mem pool transactions are requested we execute them - - // gas reserved for builder tx - let message = format!("Block Number: {}", ctx.block_number()) - .as_bytes() - .to_vec(); - let builder_tx_gas = ctx - .builder_signer() - .map_or(0, |_| estimate_gas_for_builder_tx(message.clone())); - let block_gas_limit = ctx.block_gas_limit() - builder_tx_gas; - // Save some space in the block_da_limit for builder tx - let builder_tx_da_size = ctx - .estimate_builder_tx_da_size(state, builder_tx_gas, message.clone()) - .unwrap_or(0); - let block_da_limit = ctx - .da_config - .max_da_block_size() - .map(|da_size| da_size - builder_tx_da_size as u64); - // Check that it's possible to create builder tx, considering max_da_tx_size, otherwise panic - if let Some(tx_da_limit) = ctx.da_config.max_da_tx_size() { - // Panic indicate max_da_tx_size misconfiguration - assert!( - tx_da_limit >= builder_tx_da_size as u64, - "The configured da_config.max_da_tx_size is too small to accommodate builder tx." - ); - } - - if !ctx.attributes().no_tx_pool { - let best_txs_start_time = Instant::now(); - let best_txs = best(ctx.best_transaction_attributes()); - ctx.metrics - .transaction_pool_fetch_duration - .record(best_txs_start_time.elapsed()); - if ctx - .execute_best_transactions( - &mut info, - state, - best_txs, - block_gas_limit, - block_da_limit, - )? - .is_some() - { - return Ok(BuildOutcomeKind::Cancelled); - } - } - - // Add builder tx to the block - ctx.add_builder_tx(&mut info, state, builder_tx_gas, message); - - let state_merge_start_time = Instant::now(); - - // merge all transitions into bundle state, this would apply the withdrawal balance changes - // and 4788 contract call - state.merge_transitions(BundleRetention::Reverts); - - ctx.metrics - .state_transition_merge_duration - .record(state_merge_start_time.elapsed()); - ctx.metrics - .payload_num_tx - .record(info.executed_transactions.len() as f64); - - let payload = ExecutedPayload { info }; - - ctx.metrics.block_built_success.increment(1); - Ok(BuildOutcomeKind::Better { payload }) - } - - /// Builds the payload on top of the state. - pub fn build( - self, - mut state: State, - ctx: OpPayloadBuilderCtx, - ) -> Result, PayloadBuilderError> - where - ChainSpec: EthChainSpec + OpHardforks, - Txs: PayloadTransactions>, - DB: Database + AsRef

, - P: StateRootProvider + HashedPostStateProvider + StorageRootProvider, - { - let ExecutedPayload { info } = match self.execute(&mut state, &ctx)? { - BuildOutcomeKind::Better { payload } | BuildOutcomeKind::Freeze(payload) => payload, - BuildOutcomeKind::Cancelled => return Ok(BuildOutcomeKind::Cancelled), - BuildOutcomeKind::Aborted { fees } => return Ok(BuildOutcomeKind::Aborted { fees }), - }; - - let block_number = ctx.block_number(); - let execution_outcome = ExecutionOutcome::new( - state.take_bundle(), - vec![info.receipts], - block_number, - Vec::new(), - ); - let receipts_root = execution_outcome - .generic_receipts_root_slow(block_number, |receipts| { - calculate_receipt_root_no_memo_optimism( - receipts, - &ctx.chain_spec, - ctx.attributes().timestamp(), - ) - }) - .expect("Number is in range"); - let logs_bloom = execution_outcome - .block_logs_bloom(block_number) - .expect("Number is in range"); - - // calculate the state root - let state_root_start_time = Instant::now(); - - let state_provider = state.database.as_ref(); - let hashed_state = state_provider.hashed_post_state(execution_outcome.state()); - let (state_root, trie_output) = { - state - .database - .as_ref() - .state_root_with_updates(hashed_state.clone()) - .inspect_err(|err| { - warn!(target: "payload_builder", - parent_header=%ctx.parent().hash(), - %err, - "failed to calculate state root for payload" - ); - })? - }; - - ctx.metrics - .state_root_calculation_duration - .record(state_root_start_time.elapsed()); - - let (withdrawals_root, requests_hash) = if ctx.is_isthmus_active() { - // withdrawals root field in block header is used for storage root of L2 predeploy - // `l2tol1-message-passer` - ( - Some( - isthmus::withdrawals_root(execution_outcome.state(), state.database.as_ref()) - .map_err(PayloadBuilderError::other)?, - ), - Some(EMPTY_REQUESTS_HASH), - ) - } else if ctx.is_canyon_active() { - (Some(EMPTY_WITHDRAWALS), None) - } else { - (None, None) - }; - - // create the block header - let transactions_root = proofs::calculate_transaction_root(&info.executed_transactions); - - // OP doesn't support blobs/EIP-4844. - // https://specs.optimism.io/protocol/exec-engine.html#ecotone-disable-blob-transactions - // Need [Some] or [None] based on hardfork to match block hash. - let (excess_blob_gas, blob_gas_used) = ctx.blob_fields(); - let extra_data = ctx.extra_data()?; - - let header = Header { - parent_hash: ctx.parent().hash(), - ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: ctx.evm_env.block_env.beneficiary, - state_root, - transactions_root, - receipts_root, - withdrawals_root, - logs_bloom, - timestamp: ctx.attributes().payload_attributes.timestamp, - mix_hash: ctx.attributes().payload_attributes.prev_randao, - nonce: BEACON_NONCE.into(), - base_fee_per_gas: Some(ctx.base_fee()), - number: ctx.parent().number + 1, - gas_limit: ctx.block_gas_limit(), - difficulty: U256::ZERO, - gas_used: info.cumulative_gas_used, - extra_data, - parent_beacon_block_root: ctx.attributes().payload_attributes.parent_beacon_block_root, - blob_gas_used, - excess_blob_gas, - requests_hash, - }; - - // seal the block - let block = alloy_consensus::Block::::new( - header, - BlockBody { - transactions: info.executed_transactions, - ommers: vec![], - withdrawals: ctx.withdrawals().cloned(), - }, - ); - - let sealed_block = Arc::new(block.seal_slow()); - info!(target: "payload_builder", id=%ctx.attributes().payload_id(), "sealed built block"); - - // create the executed block data - let executed: ExecutedBlockWithTrieUpdates = ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(RecoveredBlock::< - alloy_consensus::Block, - >::new_sealed( - sealed_block.as_ref().clone(), info.executed_senders - )), - execution_output: Arc::new(execution_outcome), - hashed_state: Arc::new(hashed_state), - }, - trie: Arc::new(trie_output), - }; - - let no_tx_pool = ctx.attributes().no_tx_pool; - - let payload = OpBuiltPayload::new( - ctx.payload_id(), - sealed_block, - info.total_fees, - Some(executed), - ); - - ctx.metrics - .payload_byte_size - .record(payload.block().size() as f64); - - if no_tx_pool { - // if `no_tx_pool` is set only transactions from the payload attributes will be included - // in the payload. In other words, the payload is deterministic and we can - // freeze it once we've successfully built it. - Ok(BuildOutcomeKind::Freeze(payload)) - } else { - Ok(BuildOutcomeKind::Better { payload }) - } - } -} - -/// A type that returns a the [`PayloadTransactions`] that should be included in the pool. -pub trait OpPayloadTransactions: Clone + Send + Sync + Unpin + 'static { - /// Returns an iterator that yields the transaction in the order they should get included in the - /// new payload. - fn best_transactions>( - &self, - pool: Pool, - attr: BestTransactionsAttributes, - ) -> impl PayloadTransactions; -} - -impl OpPayloadTransactions for () { - fn best_transactions>( - &self, - pool: Pool, - attr: BestTransactionsAttributes, - ) -> impl PayloadTransactions { - BestPayloadTransactions::new(pool.best_transactions_with_attributes(attr)) - } -} - -/// Container type that holds all necessities to build a new payload. -#[derive(Debug)] -pub struct OpPayloadBuilderCtx { - /// The type that knows how to perform system calls and configure the evm. - pub evm_config: OpEvmConfig, - /// The DA config for the payload builder - pub da_config: OpDAConfig, - /// The chainspec - pub chain_spec: Arc, - /// How to build the payload. - pub config: PayloadConfig>, - /// Evm Settings - pub evm_env: EvmEnv, - /// Block env attributes for the current block. - pub block_env_attributes: OpNextBlockEnvAttributes, - /// Marker to check whether the job has been cancelled. - pub cancel: CancellationToken, - /// The builder signer - pub builder_signer: Option, - /// The metrics for the builder - pub metrics: OpRBuilderMetrics, -} - -impl OpPayloadBuilderCtx -where - ChainSpec: EthChainSpec + OpHardforks, - N: NodePrimitives, -{ - /// Returns the parent block the payload will be build on. - pub fn parent(&self) -> &SealedHeader { - &self.config.parent_header - } - - /// Returns the builder attributes. - pub const fn attributes(&self) -> &OpPayloadBuilderAttributes { - &self.config.attributes - } - - /// Returns the withdrawals if shanghai is active. - pub fn withdrawals(&self) -> Option<&Withdrawals> { - self.chain_spec - .is_shanghai_active_at_timestamp(self.attributes().timestamp()) - .then(|| &self.attributes().payload_attributes.withdrawals) - } - - /// Returns the block gas limit to target. - pub fn block_gas_limit(&self) -> u64 { - self.attributes() - .gas_limit - .unwrap_or(self.evm_env.block_env.gas_limit) - } - - /// Returns the block number for the block. - pub fn block_number(&self) -> u64 { - self.evm_env.block_env.number - } - - /// Returns the current base fee - pub fn base_fee(&self) -> u64 { - self.evm_env.block_env.basefee - } - - /// Returns the current blob gas price. - pub fn get_blob_gasprice(&self) -> Option { - self.evm_env - .block_env - .blob_gasprice() - .map(|gasprice| gasprice as u64) - } - - /// Returns the blob fields for the header. - /// - /// This will always return `Some(0)` after ecotone. - pub fn blob_fields(&self) -> (Option, Option) { - // OP doesn't support blobs/EIP-4844. - // https://specs.optimism.io/protocol/exec-engine.html#ecotone-disable-blob-transactions - // Need [Some] or [None] based on hardfork to match block hash. - if self.is_ecotone_active() { - (Some(0), Some(0)) - } else { - (None, None) - } - } - - /// Returns the extra data for the block. - /// - /// After holocene this extracts the extradata from the paylpad - pub fn extra_data(&self) -> Result { - if self.is_holocene_active() { - self.attributes() - .get_holocene_extra_data( - self.chain_spec.base_fee_params_at_timestamp( - self.attributes().payload_attributes.timestamp, - ), - ) - .map_err(PayloadBuilderError::other) - } else { - Ok(Default::default()) - } - } - - /// Returns the current fee settings for transactions from the mempool - pub fn best_transaction_attributes(&self) -> BestTransactionsAttributes { - BestTransactionsAttributes::new(self.base_fee(), self.get_blob_gasprice()) - } - - /// Returns the unique id for this payload job. - pub fn payload_id(&self) -> PayloadId { - self.attributes().payload_id() - } - - /// Returns true if regolith is active for the payload. - pub fn is_regolith_active(&self) -> bool { - self.chain_spec - .is_regolith_active_at_timestamp(self.attributes().timestamp()) - } - - /// Returns true if ecotone is active for the payload. - pub fn is_ecotone_active(&self) -> bool { - self.chain_spec - .is_ecotone_active_at_timestamp(self.attributes().timestamp()) - } - - /// Returns true if canyon is active for the payload. - pub fn is_canyon_active(&self) -> bool { - self.chain_spec - .is_canyon_active_at_timestamp(self.attributes().timestamp()) - } - - /// Returns true if holocene is active for the payload. - pub fn is_holocene_active(&self) -> bool { - self.chain_spec - .is_holocene_active_at_timestamp(self.attributes().timestamp()) - } - - /// Returns true if isthmus is active for the payload. - pub fn is_isthmus_active(&self) -> bool { - self.chain_spec - .is_isthmus_active_at_timestamp(self.attributes().timestamp()) - } - - /// Returns the chain id - pub fn chain_id(&self) -> u64 { - self.chain_spec.chain_id() - } - - /// Returns the builder signer - pub fn builder_signer(&self) -> Option { - self.builder_signer - } -} - -impl OpPayloadBuilderCtx -where - ChainSpec: EthChainSpec + OpHardforks, - N: OpPayloadPrimitives<_TX = OpTransactionSigned>, -{ - /// Constructs a receipt for the given transaction. - fn build_receipt( - &self, - ctx: ReceiptBuilderCtx<'_, OpTransactionSigned, E>, - deposit_nonce: Option, - ) -> OpReceipt { - let receipt_builder = self.evm_config.block_executor_factory().receipt_builder(); - match receipt_builder.build_receipt(ctx) { - Ok(receipt) => receipt, - Err(ctx) => { - let receipt = alloy_consensus::Receipt { - // Success flag was added in `EIP-658: Embedding transaction status code - // in receipts`. - status: Eip658Value::Eip658(ctx.result.is_success()), - cumulative_gas_used: ctx.cumulative_gas_used, - logs: ctx.result.into_logs(), - }; - - receipt_builder.build_deposit_receipt(OpDepositReceipt { - inner: receipt, - deposit_nonce, - // The deposit receipt version was introduced in Canyon to indicate an - // update to how receipt hashes should be computed - // when set. The state transition process ensures - // this is only set for post-Canyon deposit - // transactions. - deposit_receipt_version: self.is_canyon_active().then_some(1), - }) - } - } - } - - /// Executes all sequencer transactions that are included in the payload attributes. - pub fn execute_sequencer_transactions( - &self, - db: &mut State, - ) -> Result, PayloadBuilderError> - where - DB: Database, - { - let mut info = ExecutionInfo::with_capacity(self.attributes().transactions.len()); - - let mut evm = self.evm_config.evm_with_env(&mut *db, self.evm_env.clone()); - - for sequencer_tx in &self.attributes().transactions { - // A sequencer's block should never contain blob transactions. - if sequencer_tx.value().is_eip4844() { - return Err(PayloadBuilderError::other( - OpPayloadBuilderError::BlobTransactionRejected, - )); - } - - // Convert the transaction to a [Recovered]. This is - // purely for the purposes of utilizing the `evm_config.tx_env`` function. - // Deposit transactions do not have signatures, so if the tx is a deposit, this - // will just pull in its `from` address. - let sequencer_tx = sequencer_tx - .value() - .try_clone_into_recovered() - .map_err(|_| { - PayloadBuilderError::other(OpPayloadBuilderError::TransactionEcRecoverFailed) - })?; - - // Cache the depositor account prior to the state transition for the deposit nonce. - // - // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces - // were not introduced in Bedrock. In addition, regular transactions don't have deposit - // nonces, so we don't need to touch the DB for those. - let depositor_nonce = (self.is_regolith_active() && sequencer_tx.is_deposit()) - .then(|| { - evm.db_mut() - .load_cache_account(sequencer_tx.signer()) - .map(|acc| acc.account_info().unwrap_or_default().nonce) - }) - .transpose() - .map_err(|_| { - PayloadBuilderError::other(OpPayloadBuilderError::AccountLoadFailed( - sequencer_tx.signer(), - )) - })?; - - let ResultAndState { result, state } = match evm.transact(&sequencer_tx) { - Ok(res) => res, - Err(err) => { - if err.is_invalid_tx_err() { - trace!(target: "payload_builder", %err, ?sequencer_tx, "Error in sequencer transaction, skipping."); - continue; - } - // this is an error that we should treat as fatal for this attempt - return Err(PayloadBuilderError::EvmExecutionError(Box::new(err))); - } - }; - - // add gas used by the transaction to cumulative gas used, before creating the receipt - let gas_used = result.gas_used(); - info.cumulative_gas_used += gas_used; - - let ctx = ReceiptBuilderCtx { - tx: sequencer_tx.inner(), - evm: &evm, - result, - state: &state, - cumulative_gas_used: info.cumulative_gas_used, - }; - info.receipts.push(self.build_receipt(ctx, depositor_nonce)); - - // commit changes - evm.db_mut().commit(state); - - // append sender and transaction to the respective lists - info.executed_senders.push(sequencer_tx.signer()); - info.executed_transactions.push(sequencer_tx.into_inner()); - } - - Ok(info) - } - - /// Executes the given best transactions and updates the execution info. - /// - /// Returns `Ok(Some(())` if the job was cancelled. - pub fn execute_best_transactions( - &self, - info: &mut ExecutionInfo, - db: &mut State, - mut best_txs: impl PayloadTransactions< - Transaction: FBPoolTransaction, - >, - block_gas_limit: u64, - block_da_limit: Option, - ) -> Result, PayloadBuilderError> - where - DB: Database, - { - let execute_txs_start_time = Instant::now(); - let mut num_txs_considered = 0; - let mut num_txs_simulated = 0; - let mut num_txs_simulated_success = 0; - let mut num_txs_simulated_fail = 0; - let base_fee = self.base_fee(); - let tx_da_limit = self.da_config.max_da_tx_size(); - let mut evm = self.evm_config.evm_with_env(&mut *db, self.evm_env.clone()); - - while let Some(tx) = best_txs.next(()) { - let exclude_reverting_txs = tx.exclude_reverting_txs(); - - let tx = tx.into_consensus(); - num_txs_considered += 1; - // ensure we still have capacity for this transaction - if info.is_tx_over_limits(tx.inner(), block_gas_limit, tx_da_limit, block_da_limit) { - // we can't fit this transaction into the block, so we need to mark it as - // invalid which also removes all dependent transaction from - // the iterator before we can continue - best_txs.mark_invalid(tx.signer(), tx.nonce()); - continue; - } - - // A sequencer's block should never contain blob or deposit transactions from the pool. - if tx.is_eip4844() || tx.is_deposit() { - best_txs.mark_invalid(tx.signer(), tx.nonce()); - continue; - } - - // check if the job was cancelled, if so we can exit early - if self.cancel.is_cancelled() { - return Ok(Some(())); - } - - let tx_simulation_start_time = Instant::now(); - let ResultAndState { result, state } = match evm.transact(&tx) { - Ok(res) => res, - Err(err) => { - if let Some(err) = err.as_invalid_tx_err() { - if err.is_nonce_too_low() { - // if the nonce is too low, we can skip this transaction - trace!(target: "payload_builder", %err, ?tx, "skipping nonce too low transaction"); - } else { - // if the transaction is invalid, we can skip it and all of its - // descendants - trace!(target: "payload_builder", %err, ?tx, "skipping invalid transaction and its descendants"); - best_txs.mark_invalid(tx.signer(), tx.nonce()); - } - - continue; - } - // this is an error that we should treat as fatal for this attempt - return Err(PayloadBuilderError::EvmExecutionError(Box::new(err))); - } - }; - - self.metrics - .tx_simulation_duration - .record(tx_simulation_start_time.elapsed()); - self.metrics.tx_byte_size.record(tx.inner().size() as f64); - num_txs_simulated += 1; - if result.is_success() { - num_txs_simulated_success += 1; - } else { - num_txs_simulated_fail += 1; - if exclude_reverting_txs { - info!(target: "payload_builder", tx_hash = ?tx.tx_hash(), "skipping reverted transaction"); - best_txs.mark_invalid(tx.signer(), tx.nonce()); - continue; - } - } - - // add gas used by the transaction to cumulative gas used, before creating the - // receipt - let gas_used = result.gas_used(); - info.cumulative_gas_used += gas_used; - - // Push transaction changeset and calculate header bloom filter for receipt. - let ctx = ReceiptBuilderCtx { - tx: tx.inner(), - evm: &evm, - result, - state: &state, - cumulative_gas_used: info.cumulative_gas_used, - }; - info.receipts.push(self.build_receipt(ctx, None)); - - // commit changes - evm.db_mut().commit(state); - - // update add to total fees - let miner_fee = tx - .effective_tip_per_gas(base_fee) - .expect("fee is always valid; execution succeeded"); - info.total_fees += U256::from(miner_fee) * U256::from(gas_used); - - // append sender and transaction to the respective lists - info.executed_senders.push(tx.signer()); - info.executed_transactions.push(tx.into_inner()); - } - - self.metrics - .payload_tx_simulation_duration - .record(execute_txs_start_time.elapsed()); - self.metrics - .payload_num_tx_considered - .record(num_txs_considered as f64); - self.metrics - .payload_num_tx_simulated - .record(num_txs_simulated as f64); - self.metrics - .payload_num_tx_simulated_success - .record(num_txs_simulated_success as f64); - self.metrics - .payload_num_tx_simulated_fail - .record(num_txs_simulated_fail as f64); - - Ok(None) - } - - pub fn add_builder_tx( - &self, - info: &mut ExecutionInfo, - db: &mut State, - builder_tx_gas: u64, - message: Vec, - ) -> Option<()> - where - DB: Database, - { - self.builder_signer() - .map(|signer| { - let base_fee = self.base_fee(); - let chain_id = self.chain_id(); - // Create and sign the transaction - let builder_tx = - signed_builder_tx(db, builder_tx_gas, message, signer, base_fee, chain_id)?; - - let mut evm = self.evm_config.evm_with_env(&mut *db, self.evm_env.clone()); - - let ResultAndState { result, state } = evm - .transact(&builder_tx) - .map_err(|err| PayloadBuilderError::EvmExecutionError(Box::new(err)))?; - - // Add gas used by the transaction to cumulative gas used, before creating the receipt - let gas_used = result.gas_used(); - info.cumulative_gas_used += gas_used; - - let ctx = ReceiptBuilderCtx { - tx: builder_tx.inner(), - evm: &evm, - result, - state: &state, - cumulative_gas_used: info.cumulative_gas_used, - }; - info.receipts.push(self.build_receipt(ctx, None)); - - // Release the db reference by dropping evm - drop(evm); - // Commit changes - db.commit(state); - - // Append sender and transaction to the respective lists - info.executed_senders.push(builder_tx.signer()); - info.executed_transactions.push(builder_tx.into_inner()); - Ok(()) - }) - .transpose() - .unwrap_or_else(|err: PayloadBuilderError| { - warn!(target: "payload_builder", %err, "Failed to add builder transaction"); - None - }) - } - - /// Calculates EIP 2718 builder transaction size - pub fn estimate_builder_tx_da_size( - &self, - db: &mut State, - builder_tx_gas: u64, - message: Vec, - ) -> Option - where - DB: Database, - { - self.builder_signer() - .map(|signer| { - let base_fee = self.base_fee(); - let chain_id = self.chain_id(); - // Create and sign the transaction - let builder_tx = - signed_builder_tx(db, builder_tx_gas, message, signer, base_fee, chain_id)?; - Ok(builder_tx.length()) - }) - .transpose() - .unwrap_or_else(|err: PayloadBuilderError| { - warn!(target: "payload_builder", %err, "Failed to add builder transaction"); - None - }) - } -} - -/// Creates signed builder tx to Address::ZERO and specified message as input -pub fn signed_builder_tx( - db: &mut State, - builder_tx_gas: u64, - message: Vec, - signer: Signer, - base_fee: u64, - chain_id: u64, -) -> Result, PayloadBuilderError> -where - DB: Database, -{ - // Create message with block number for the builder to sign - let nonce = db - .load_cache_account(signer.address) - .map(|acc| acc.account_info().unwrap_or_default().nonce) - .map_err(|_| { - PayloadBuilderError::other(OpPayloadBuilderError::AccountLoadFailed(signer.address)) - })?; - - // Create the EIP-1559 transaction - let tx = OpTypedTransaction::Eip1559(TxEip1559 { - chain_id, - nonce, - gas_limit: builder_tx_gas, - max_fee_per_gas: base_fee.into(), - max_priority_fee_per_gas: 0, - to: TxKind::Call(Address::ZERO), - // Include the message as part of the transaction data - input: message.into(), - ..Default::default() - }); - // Sign the transaction - let builder_tx = signer.sign_tx(tx).map_err(PayloadBuilderError::other)?; - - Ok(builder_tx) -} - -fn estimate_gas_for_builder_tx(input: Vec) -> u64 { - // Count zero and non-zero bytes - let (zero_bytes, nonzero_bytes) = input.iter().fold((0, 0), |(zeros, nonzeros), &byte| { - if byte == 0 { - (zeros + 1, nonzeros) - } else { - (zeros, nonzeros + 1) - } - }); - - // Calculate gas cost (4 gas per zero byte, 16 gas per non-zero byte) - let zero_cost = zero_bytes * 4; - let nonzero_cost = nonzero_bytes * 16; - - // Tx gas should be not less than floor gas https://eips.ethereum.org/EIPS/eip-7623 - let tokens_in_calldata = zero_bytes + nonzero_bytes * 4; - let floor_gas = 21_000 + tokens_in_calldata * TOTAL_COST_FLOOR_PER_TOKEN; - - std::cmp::max(zero_cost + nonzero_cost + 21_000, floor_gas) -} diff --git a/crates/op-rbuilder/src/primitives/reth/execution.rs b/crates/op-rbuilder/src/primitives/reth/execution.rs index 3f5bf1774..1caf2a60c 100644 --- a/crates/op-rbuilder/src/primitives/reth/execution.rs +++ b/crates/op-rbuilder/src/primitives/reth/execution.rs @@ -1,13 +1,13 @@ //! Heavily influenced by [reth](https://github.com/paradigmxyz/reth/blob/1e965caf5fa176f244a31c0d2662ba1b590938db/crates/optimism/payload/src/builder.rs#L570) use alloy_consensus::Transaction; use alloy_primitives::{private::alloy_rlp::Encodable, Address, U256}; -use reth_node_api::NodePrimitives; -use reth_optimism_primitives::OpReceipt; +use core::fmt::Debug; +use reth_optimism_primitives::{OpReceipt, OpTransactionSigned}; #[derive(Default, Debug)] -pub struct ExecutionInfo { +pub struct ExecutionInfo { /// All executed transactions (unrecovered). - pub executed_transactions: Vec, + pub executed_transactions: Vec, /// The recovered senders for the executed transactions. pub executed_senders: Vec

, /// The transaction receipts @@ -18,12 +18,11 @@ pub struct ExecutionInfo { pub cumulative_da_bytes_used: u64, /// Tracks fees from executed mempool transactions pub total_fees: U256, - #[cfg(feature = "flashblocks")] - /// Index of the last consumed flashblock - pub last_flashblock_index: usize, + /// Extra execution information that can be attached by individual builders. + pub extra: Extra, } -impl ExecutionInfo { +impl ExecutionInfo { /// Create a new instance with allocated slots. pub fn with_capacity(capacity: usize) -> Self { Self { @@ -33,8 +32,7 @@ impl ExecutionInfo { cumulative_gas_used: 0, cumulative_da_bytes_used: 0, total_fees: U256::ZERO, - #[cfg(feature = "flashblocks")] - last_flashblock_index: 0, + extra: Default::default(), } } @@ -46,7 +44,7 @@ impl ExecutionInfo { /// maximum allowed DA limit per block. pub fn is_tx_over_limits( &self, - tx: &N::SignedTx, + tx: &OpTransactionSigned, block_gas_limit: u64, tx_data_limit: Option, block_data_limit: Option, diff --git a/crates/op-rbuilder/src/tests/framework/op.rs b/crates/op-rbuilder/src/tests/framework/op.rs index ea66f306c..45c2daf73 100644 --- a/crates/op-rbuilder/src/tests/framework/op.rs +++ b/crates/op-rbuilder/src/tests/framework/op.rs @@ -143,6 +143,8 @@ impl Service for OpRbuilderConfig { } if let Some(flashblocks_ws_url) = &self.flashblocks_ws_url { + cmd.arg("--rollup.enable-flashblocks").arg("true"); + cmd.arg("--rollup.flashblocks-ws-url") .arg(flashblocks_ws_url); } diff --git a/crates/op-rbuilder/src/tests/framework/txs.rs b/crates/op-rbuilder/src/tests/framework/txs.rs index b4049673f..7c04f9524 100644 --- a/crates/op-rbuilder/src/tests/framework/txs.rs +++ b/crates/op-rbuilder/src/tests/framework/txs.rs @@ -142,7 +142,7 @@ impl TransactionBuilder { } pub async fn send(self) -> eyre::Result> { - let bundle_opts = self.bundle_opts.clone(); + let bundle_opts = self.bundle_opts; let provider = self.provider.clone(); let transaction = self.build().await; let transaction_encoded = transaction.encoded_2718(); diff --git a/crates/op-rbuilder/src/tests/mod.rs b/crates/op-rbuilder/src/tests/mod.rs index 05ed01b61..e3f96187c 100644 --- a/crates/op-rbuilder/src/tests/mod.rs +++ b/crates/op-rbuilder/src/tests/mod.rs @@ -2,8 +2,5 @@ mod framework; pub use framework::*; -#[cfg(not(feature = "flashblocks"))] -mod vanilla; - -#[cfg(feature = "flashblocks")] mod flashblocks; +mod vanilla; diff --git a/crates/op-rbuilder/src/traits.rs b/crates/op-rbuilder/src/traits.rs new file mode 100644 index 000000000..3c9243530 --- /dev/null +++ b/crates/op-rbuilder/src/traits.rs @@ -0,0 +1,70 @@ +use alloy_consensus::Header; +use reth_node_api::{FullNodeTypes, NodeTypes}; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_node::OpEngineTypes; +use reth_optimism_primitives::{OpPrimitives, OpTransactionSigned}; +use reth_payload_util::PayloadTransactions; +use reth_provider::{BlockReaderIdExt, ChainSpecProvider, StateProviderFactory}; +use reth_transaction_pool::TransactionPool; + +use crate::tx::FBPoolTransaction; + +pub trait NodeBounds: + FullNodeTypes< + Types: NodeTypes, +> +{ +} + +impl NodeBounds for T where + T: FullNodeTypes< + Types: NodeTypes< + Payload = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + >, + > +{ +} + +pub trait PoolBounds: + TransactionPool> + Unpin + 'static +where + ::Transaction: FBPoolTransaction, +{ +} + +impl PoolBounds for T +where + T: TransactionPool> + + Unpin + + 'static, + ::Transaction: FBPoolTransaction, +{ +} + +pub trait ClientBounds: + StateProviderFactory + + ChainSpecProvider + + BlockReaderIdExt
+ + Clone +{ +} + +impl ClientBounds for T where + T: StateProviderFactory + + ChainSpecProvider + + BlockReaderIdExt
+ + Clone +{ +} + +pub trait PayloadTxsBounds: + PayloadTransactions> +{ +} + +impl PayloadTxsBounds for T where + T: PayloadTransactions> +{ +}