From e4b9588584d672eeab7fec784da928a8786a7c0b Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Fri, 3 Apr 2026 18:02:15 +0800 Subject: [PATCH 01/17] feat(bench): add BenchToken Solidity contract for ERC20 benchmark workload --- local-test/erc20-bench-contracts/.gitignore | 2 ++ local-test/erc20-bench-contracts/foundry.toml | 7 +++++ .../erc20-bench-contracts/src/BenchToken.sol | 29 +++++++++++++++++++ 3 files changed, 38 insertions(+) create mode 100644 local-test/erc20-bench-contracts/.gitignore create mode 100644 local-test/erc20-bench-contracts/foundry.toml create mode 100644 local-test/erc20-bench-contracts/src/BenchToken.sol diff --git a/local-test/erc20-bench-contracts/.gitignore b/local-test/erc20-bench-contracts/.gitignore new file mode 100644 index 0000000..a97499d --- /dev/null +++ b/local-test/erc20-bench-contracts/.gitignore @@ -0,0 +1,2 @@ +out/ +cache/ diff --git a/local-test/erc20-bench-contracts/foundry.toml b/local-test/erc20-bench-contracts/foundry.toml new file mode 100644 index 0000000..ab1b7b3 --- /dev/null +++ b/local-test/erc20-bench-contracts/foundry.toml @@ -0,0 +1,7 @@ +[profile.default] +src = "src" +out = "out" +libs = [] +optimizer = true +optimizer_runs = 200 +evm_version = "shanghai" diff --git a/local-test/erc20-bench-contracts/src/BenchToken.sol b/local-test/erc20-bench-contracts/src/BenchToken.sol new file mode 100644 index 0000000..5daebd5 --- /dev/null +++ b/local-test/erc20-bench-contracts/src/BenchToken.sol @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.20; + +/// @title BenchToken - Minimal ERC20 for benchmarking +/// @notice Mints entire supply to deployer. Only implements transfer + balanceOf. +contract BenchToken { + string public constant name = "BenchToken"; + string public constant symbol = "BENCH"; + uint8 public constant decimals = 18; + uint256 public totalSupply; + + mapping(address => uint256) public balanceOf; + + event Transfer(address indexed from, address indexed to, uint256 value); + + constructor(uint256 _initialSupply) { + totalSupply = _initialSupply; + balanceOf[msg.sender] = _initialSupply; + emit Transfer(address(0), msg.sender, _initialSupply); + } + + function transfer(address to, uint256 value) external returns (bool) { + require(balanceOf[msg.sender] >= value, "insufficient balance"); + balanceOf[msg.sender] -= value; + balanceOf[to] += value; + emit Transfer(msg.sender, to, value); + return true; + } +} From 5f87918ae18d6fdc70aae14c3b167c0868190d13 Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Fri, 3 Apr 2026 18:04:40 +0800 Subject: [PATCH 02/17] feat(bench): add bench-block-exec crate skeleton with CLI subcommands --- Cargo.lock | 25 +++++++++++++++++ Cargo.toml | 1 + bin/bench-block-exec/Cargo.toml | 31 ++++++++++++++++++++ bin/bench-block-exec/src/engine.rs | 2 ++ bin/bench-block-exec/src/genesis.rs | 18 ++++++++++++ bin/bench-block-exec/src/main.rs | 37 ++++++++++++++++++++++++ bin/bench-block-exec/src/report.rs | 15 ++++++++++ bin/bench-block-exec/src/verify.rs | 21 ++++++++++++++ bin/bench-block-exec/src/workload.rs | 42 ++++++++++++++++++++++++++++ 9 files changed, 192 insertions(+) create mode 100644 bin/bench-block-exec/Cargo.toml create mode 100644 bin/bench-block-exec/src/engine.rs create mode 100644 bin/bench-block-exec/src/genesis.rs create mode 100644 bin/bench-block-exec/src/main.rs create mode 100644 bin/bench-block-exec/src/report.rs create mode 100644 bin/bench-block-exec/src/verify.rs create mode 100644 bin/bench-block-exec/src/workload.rs diff --git a/Cargo.lock b/Cargo.lock index 10a2f5c..947958f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1453,6 +1453,31 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" +[[package]] +name = "bench-block-exec" +version = "0.1.0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-network", + "alloy-primitives", + "alloy-rlp", + "alloy-signer", + "alloy-signer-local", + "alloy-sol-types", + "base64 0.22.1", + "bytes", + "clap", + "eyre", + "jsonrpsee", + "rand 0.8.5", + "reqwest", + "serde", + "serde_json", + "sha2", + "tokio", +] + [[package]] name = "bimap" version = "0.6.3" diff --git a/Cargo.toml b/Cargo.toml index 910611d..1ab8132 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ publish = false resolver = "3" members = [ "bin/morph-reth", + "bin/bench-block-exec", "crates/chainspec", "crates/consensus", "crates/engine-api", diff --git a/bin/bench-block-exec/Cargo.toml b/bin/bench-block-exec/Cargo.toml new file mode 100644 index 0000000..c3def5c --- /dev/null +++ b/bin/bench-block-exec/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "bench-block-exec" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +publish = false + +[dependencies] +alloy-consensus = { workspace = true, features = ["std", "k256"] } +alloy-eips = { workspace = true, features = ["std"] } +alloy-network = { workspace = true } +alloy-primitives = { workspace = true, features = ["std", "serde", "rand"] } +alloy-rlp = { workspace = true } +alloy-signer = { workspace = true } +alloy-signer-local = { workspace = true } +alloy-sol-types = { workspace = true } +base64 = { version = "0.22" } +bytes = { workspace = true } +clap = { workspace = true } +eyre = { workspace = true } +jsonrpsee = { workspace = true } +rand = { workspace = true } +reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] } +serde = { workspace = true } +serde_json = { workspace = true } +sha2 = { workspace = true } +tokio = { workspace = true } + +[lints] +workspace = true diff --git a/bin/bench-block-exec/src/engine.rs b/bin/bench-block-exec/src/engine.rs new file mode 100644 index 0000000..8bef6d1 --- /dev/null +++ b/bin/bench-block-exec/src/engine.rs @@ -0,0 +1,2 @@ +/// Authenticated Engine RPC client for Morph L2 Engine API. +pub struct EngineClient; diff --git a/bin/bench-block-exec/src/genesis.rs b/bin/bench-block-exec/src/genesis.rs new file mode 100644 index 0000000..ef699b3 --- /dev/null +++ b/bin/bench-block-exec/src/genesis.rs @@ -0,0 +1,18 @@ +use clap::Args; + +#[derive(Args)] +pub struct WriteGenesisArgs { + /// Output path for the genesis JSON file. + #[arg(long)] + pub output: String, + /// Hex-encoded sender address (0x-prefixed). + #[arg(long)] + pub sender: String, + /// Sender balance in wei (decimal string). + #[arg(long, default_value = "1000000000000000000000000000")] + pub sender_balance: String, +} + +pub fn run(_args: WriteGenesisArgs) -> eyre::Result<()> { + todo!("genesis generation not yet implemented") +} diff --git a/bin/bench-block-exec/src/main.rs b/bin/bench-block-exec/src/main.rs new file mode 100644 index 0000000..2ca43bb --- /dev/null +++ b/bin/bench-block-exec/src/main.rs @@ -0,0 +1,37 @@ +mod engine; +mod genesis; +mod report; +mod verify; +mod workload; + +use clap::{Parser, Subcommand}; + +#[derive(Parser)] +#[command(name = "bench-block-exec", about = "Geth vs Reth block execution benchmark")] +struct Cli { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand)] +enum Command { + /// Generate a custom benchmark genesis JSON. + WriteGenesis(genesis::WriteGenesisArgs), + /// Run a benchmark workload against a running node. + RunWorkload(workload::RunWorkloadArgs), + /// Verify state consistency between two nodes. + VerifyState(verify::VerifyStateArgs), + /// Aggregate benchmark results into a summary. + Summarize(report::SummarizeArgs), +} + +#[tokio::main] +async fn main() -> eyre::Result<()> { + let cli = Cli::parse(); + match cli.command { + Command::WriteGenesis(args) => genesis::run(args), + Command::RunWorkload(args) => workload::run(args).await, + Command::VerifyState(args) => verify::run(args).await, + Command::Summarize(args) => report::summarize(args), + } +} diff --git a/bin/bench-block-exec/src/report.rs b/bin/bench-block-exec/src/report.rs new file mode 100644 index 0000000..eab8595 --- /dev/null +++ b/bin/bench-block-exec/src/report.rs @@ -0,0 +1,15 @@ +use clap::Args; + +#[derive(Args)] +pub struct SummarizeArgs { + /// Directory containing per-round result files. + #[arg(long)] + pub results_dir: String, + /// Output file path for the summary. + #[arg(long)] + pub output: Option, +} + +pub fn summarize(_args: SummarizeArgs) -> eyre::Result<()> { + todo!("summarize not yet implemented") +} diff --git a/bin/bench-block-exec/src/verify.rs b/bin/bench-block-exec/src/verify.rs new file mode 100644 index 0000000..f6a6dbb --- /dev/null +++ b/bin/bench-block-exec/src/verify.rs @@ -0,0 +1,21 @@ +use clap::Args; + +#[derive(Args)] +pub struct VerifyStateArgs { + /// RPC URL for node A. + #[arg(long)] + pub rpc_a: String, + /// RPC URL for node B. + #[arg(long)] + pub rpc_b: String, + /// Number of accounts to sample for balance checks. + #[arg(long, default_value = "100")] + pub check_balances: u64, + /// Output file path for verification results. + #[arg(long)] + pub output: Option, +} + +pub async fn run(_args: VerifyStateArgs) -> eyre::Result<()> { + todo!("verify-state not yet implemented") +} diff --git a/bin/bench-block-exec/src/workload.rs b/bin/bench-block-exec/src/workload.rs new file mode 100644 index 0000000..2b3ac16 --- /dev/null +++ b/bin/bench-block-exec/src/workload.rs @@ -0,0 +1,42 @@ +use clap::Args; + +#[derive(Args)] +pub struct RunWorkloadArgs { + /// Engine API RPC URL (authenticated). + #[arg(long, default_value = "http://127.0.0.1:8551")] + pub engine_rpc: String, + /// Path to JWT secret file. + #[arg(long)] + pub jwt_secret: String, + /// HTTP RPC URL (unauthenticated, for readiness checks). + #[arg(long, default_value = "http://127.0.0.1:8545")] + pub http_rpc: String, + /// Workload layer: "eth-transfer" or "erc20-transfer". + #[arg(long)] + pub layer: String, + /// Number of transactions per block. + #[arg(long)] + pub txs_per_block: u64, + /// Number of blocks to produce. + #[arg(long)] + pub blocks: u64, + /// Output file path for per-block results (JSON lines). + #[arg(long)] + pub output: String, + /// Engine name for tagging results (e.g., "geth" or "reth"). + #[arg(long, default_value = "unknown")] + pub engine_name: String, + /// Path to the BenchToken contract artifact JSON (required for erc20-transfer layer). + #[arg(long)] + pub contract_artifact: Option, + /// Hex-encoded sender private key (0x-prefixed). + #[arg(long)] + pub sender_key: String, + /// Chain ID. + #[arg(long, default_value = "99999")] + pub chain_id: u64, +} + +pub async fn run(_args: RunWorkloadArgs) -> eyre::Result<()> { + todo!("run-workload not yet implemented") +} From 218f8b7b2049d29a746323e1540d638d0adec29a Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Fri, 3 Apr 2026 18:07:56 +0800 Subject: [PATCH 03/17] feat(bench): implement genesis generation with all Morph hardforks at genesis --- bin/bench-block-exec/src/genesis.rs | 128 +++++++++++++++++++++++++++- 1 file changed, 126 insertions(+), 2 deletions(-) diff --git a/bin/bench-block-exec/src/genesis.rs b/bin/bench-block-exec/src/genesis.rs index ef699b3..5f7eb17 100644 --- a/bin/bench-block-exec/src/genesis.rs +++ b/bin/bench-block-exec/src/genesis.rs @@ -1,4 +1,7 @@ +use alloy_primitives::U256; use clap::Args; +use eyre::ensure; +use serde_json::json; #[derive(Args)] pub struct WriteGenesisArgs { @@ -13,6 +16,127 @@ pub struct WriteGenesisArgs { pub sender_balance: String, } -pub fn run(_args: WriteGenesisArgs) -> eyre::Result<()> { - todo!("genesis generation not yet implemented") +/// Build a genesis JSON value for the benchmark chain. +/// +/// All Morph hardforks are activated at block 0 / timestamp 0. +/// Uses MPT mode (`useZktrie: false`) since Jade is active at genesis. +pub fn build_genesis(sender: &str, sender_balance: &str) -> eyre::Result { + // Normalize sender: strip 0x prefix, lowercase. + let sender_normalized = sender.strip_prefix("0x").unwrap_or(sender).to_ascii_lowercase(); + + // Validate: must be exactly 40 hex characters. + ensure!( + sender_normalized.len() == 40 && sender_normalized.chars().all(|c| c.is_ascii_hexdigit()), + "sender must be a 40-character hex address, got: {sender}" + ); + + // Parse balance as decimal U256, then format as 0x-prefixed hex. + let balance = U256::from_str_radix(sender_balance, 10) + .map_err(|e| eyre::eyre!("invalid sender_balance decimal: {e}"))?; + let hex_balance = format!("{balance:#x}"); + + let genesis = json!({ + "config": { + "chainId": 99999, + "homesteadBlock": 0, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "archimedesBlock": 0, + "shanghaiBlock": 0, + "bernoulliBlock": 0, + "curieBlock": 0, + "morph203Time": 0, + "viridianTime": 0, + "emeraldTime": 0, + "jadeTime": 0, + "terminalTotalDifficulty": 0, + "morph": { + "useZktrie": false, + "maxTxPerBlock": 10000, + "maxTxPayloadBytesPerBlock": 131072000, + "feeVaultAddress": "0x530000000000000000000000000000000000000a" + } + }, + "nonce": "0x0", + "timestamp": "0x0", + "extraData": "0x", + "gasLimit": "0x30000000", + "difficulty": "0x0", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "alloc": { + (sender_normalized): { + "balance": hex_balance + } + } + }); + + Ok(genesis) +} + +pub fn run(args: WriteGenesisArgs) -> eyre::Result<()> { + let genesis = build_genesis(&args.sender, &args.sender_balance)?; + let json_str = serde_json::to_string_pretty(&genesis)?; + std::fs::write(&args.output, json_str)?; + println!("Genesis written to {}", args.output); + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn genesis_contains_sender_alloc_and_morph_config() { + let sender = "0xABcdEF0123456789abcDEF0123456789aBCDeF01"; + let balance = "1000000000000000000000000000"; + let genesis = build_genesis(sender, balance).expect("should build genesis"); + + // Sender appears in alloc (lowercase, no 0x prefix). + let sender_key = "abcdef0123456789abcdef0123456789abcdef01"; + let alloc = genesis.get("alloc").expect("alloc must exist"); + let entry = alloc.get(sender_key).expect("sender must be in alloc"); + assert!(entry.get("balance").is_some(), "balance must be set"); + + // Chain config must be present with all hardforks at 0. + let config = genesis.get("config").expect("config must exist"); + assert_eq!(config["chainId"], 99999); + assert_eq!(config["homesteadBlock"], 0); + assert_eq!(config["jadeTime"], 0); + assert_eq!(config["emeraldTime"], 0); + assert_eq!(config["viridianTime"], 0); + + // Morph section. + let morph = config.get("morph").expect("morph section must exist"); + assert_eq!(morph["useZktrie"], false); + assert_eq!(morph["maxTxPerBlock"], 10000); + assert_eq!( + morph["feeVaultAddress"], + "0x530000000000000000000000000000000000000a" + ); + + // Gas limit must be >= 800_000_000 (0x30000000 = 805306368). + let gas_limit_str = genesis["gasLimit"].as_str().expect("gasLimit must be a string"); + let gas_limit = + u64::from_str_radix(gas_limit_str.strip_prefix("0x").unwrap(), 16).unwrap(); + assert!( + gas_limit >= 800_000_000, + "gasLimit must be >= 800M, got {gas_limit}" + ); + } + + #[test] + fn genesis_rejects_invalid_sender() { + // Too short — only 38 hex chars. + let result = build_genesis("0xabcdef01234567890123456789012345678901", "1000"); + assert!(result.is_err(), "should reject short address"); + } } From 97d2da16655abbd46a5f7177e5e3eb6317d27c47 Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Fri, 3 Apr 2026 18:09:55 +0800 Subject: [PATCH 04/17] feat(bench): implement JWT-authenticated Engine RPC client with timing --- bin/bench-block-exec/src/engine.rs | 474 ++++++++++++++++++++++++++++- 1 file changed, 473 insertions(+), 1 deletion(-) diff --git a/bin/bench-block-exec/src/engine.rs b/bin/bench-block-exec/src/engine.rs index 8bef6d1..d22d6b9 100644 --- a/bin/bench-block-exec/src/engine.rs +++ b/bin/bench-block-exec/src/engine.rs @@ -1,2 +1,474 @@ /// Authenticated Engine RPC client for Morph L2 Engine API. -pub struct EngineClient; +/// +/// This module implements a JWT-authenticated client that talks to the Engine API +/// (`engine_assembleL2Block`, `engine_newL2Block`) and measures per-call timing. +use alloy_primitives::{Bytes, B256}; +use base64::{Engine as _, engine::general_purpose::URL_SAFE_NO_PAD}; +use eyre::ensure; +use jsonrpsee::{ + core::client::ClientT, + http_client::{HeaderMap, HeaderValue, HttpClient, HttpClientBuilder}, +}; +use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; +use std::time::{Instant, SystemTime, UNIX_EPOCH}; + +// --------------------------------------------------------------------------- +// Hex-quantity serde helpers +// --------------------------------------------------------------------------- + +/// Serialize/deserialize a `u64` as a `"0x…"` hex quantity string. +mod quantity { + use serde::{self, Deserialize, Deserializer, Serializer}; + + pub fn serialize(val: &u64, s: S) -> Result { + s.serialize_str(&format!("{val:#x}")) + } + + pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result { + let s = String::deserialize(d)?; + let s = s.strip_prefix("0x").or_else(|| s.strip_prefix("0X")).unwrap_or(&s); + u64::from_str_radix(s, 16).map_err(serde::de::Error::custom) + } +} + +/// Serialize/deserialize an `Option` as a `"0x…"` hex quantity string. +mod quantity_opt { + use serde::{self, Deserialize, Deserializer, Serializer}; + + pub fn serialize(val: &Option, s: S) -> Result { + match val { + Some(v) => s.serialize_str(&format!("{v:#x}")), + None => s.serialize_none(), + } + } + + pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result, D::Error> { + let opt: Option = Option::deserialize(d)?; + match opt { + None => Ok(None), + Some(s) => { + let s = + s.strip_prefix("0x").or_else(|| s.strip_prefix("0X")).unwrap_or(&s); + u64::from_str_radix(s, 16).map(Some).map_err(serde::de::Error::custom) + } + } + } +} + +// --------------------------------------------------------------------------- +// Engine API types +// --------------------------------------------------------------------------- + +/// Parameters for `engine_assembleL2Block`. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct AssembleL2BlockParams { + /// Block number to assemble. + #[serde(with = "quantity")] + pub number: u64, + + /// Transactions to include (RLP-encoded). + #[serde(default)] + pub transactions: Vec, + + /// Optional block timestamp. + #[serde( + default, + skip_serializing_if = "Option::is_none", + with = "quantity_opt" + )] + pub timestamp: Option, +} + +/// Response from `engine_assembleL2Block` / input for `engine_newL2Block`. +/// +/// Only the fields the benchmark actively reads are strongly typed. +/// All remaining fields (e.g. `miner`, `baseFeePerGas`, `logsBloom`, +/// `withdrawTrieRoot`) are captured in [`extra`] via `#[serde(flatten)]` +/// so they pass through to `engine_newL2Block` unchanged. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ExecutableL2Data { + /// Parent block hash. + pub parent_hash: B256, + + /// Block number. + #[serde(with = "quantity")] + pub number: u64, + + /// Gas limit. + #[serde(with = "quantity")] + pub gas_limit: u64, + + /// Gas used by all transactions. + #[serde(with = "quantity")] + pub gas_used: u64, + + /// Block timestamp. + #[serde(with = "quantity")] + pub timestamp: u64, + + /// RLP-encoded transactions. + #[serde(default)] + pub transactions: Vec, + + /// State root after execution. + pub state_root: B256, + + /// Receipts root. + pub receipts_root: B256, + + /// Block hash. + pub hash: B256, + + /// Next L1 message queue index. + /// + /// NOTE: morph-geth serializes this as a bare JSON number, not a hex string. + pub next_l1_message_index: u64, + + /// Remaining fields preserved for round-trip fidelity. + #[serde(flatten)] + pub extra: serde_json::Map, +} + +// --------------------------------------------------------------------------- +// Per-block timing record +// --------------------------------------------------------------------------- + +/// Timing data for a single block's assemble + import cycle. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BlockTiming { + pub block_number: u64, + pub tx_count: u64, + pub assemble_ms: f64, + pub import_ms: f64, + pub total_ms: f64, + pub layer: String, + pub engine: String, +} + +// --------------------------------------------------------------------------- +// JWT helpers +// --------------------------------------------------------------------------- + +/// HMAC-SHA256: `H((K ^ opad) || H((K ^ ipad) || message))` +fn hmac_sha256(key: &[u8], message: &[u8]) -> [u8; 32] { + const BLOCK_SIZE: usize = 64; + + // If key > block size, hash it first. + let key = if key.len() > BLOCK_SIZE { + Sha256::digest(key).to_vec() + } else { + key.to_vec() + }; + + // Pad key to BLOCK_SIZE. + let mut padded = vec![0u8; BLOCK_SIZE]; + padded[..key.len()].copy_from_slice(&key); + + // ipad = key ^ 0x36 + let mut ipad = vec![0x36u8; BLOCK_SIZE]; + for (i, b) in padded.iter().enumerate() { + ipad[i] ^= b; + } + + // opad = key ^ 0x5c + let mut opad = vec![0x5cu8; BLOCK_SIZE]; + for (i, b) in padded.iter().enumerate() { + opad[i] ^= b; + } + + // H((K ^ ipad) || message) + let mut inner = Sha256::new(); + inner.update(&ipad); + inner.update(message); + let inner_hash = inner.finalize(); + + // H((K ^ opad) || inner_hash) + let mut outer = Sha256::new(); + outer.update(&opad); + outer.update(inner_hash); + + let result = outer.finalize(); + let mut out = [0u8; 32]; + out.copy_from_slice(&result); + out +} + +/// Create a JWT token signed with the given hex-encoded secret. +/// +/// The secret must be a 64-character hex string representing 32 bytes. +/// The token carries `{"iat":}` and uses HS256. +pub fn create_jwt_token(secret_hex: &str) -> eyre::Result { + let secret_hex = secret_hex.strip_prefix("0x").unwrap_or(secret_hex); + ensure!( + secret_hex.len() == 64, + "JWT secret must be 32 bytes (64 hex chars), got {} hex chars", + secret_hex.len() + ); + let secret = hex_decode(secret_hex)?; + + let header = URL_SAFE_NO_PAD.encode(r#"{"alg":"HS256","typ":"JWT"}"#); + + let iat = SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs(); + let payload = URL_SAFE_NO_PAD.encode(format!(r#"{{"iat":{iat}}}"#)); + + let signing_input = format!("{header}.{payload}"); + let signature = hmac_sha256(&secret, signing_input.as_bytes()); + let sig_b64 = URL_SAFE_NO_PAD.encode(signature); + + Ok(format!("{signing_input}.{sig_b64}")) +} + +/// Decode a hex string (without `0x` prefix) into bytes. +fn hex_decode(hex: &str) -> eyre::Result> { + ensure!(hex.len() % 2 == 0, "hex string has odd length"); + (0..hex.len()) + .step_by(2) + .map(|i| u8::from_str_radix(&hex[i..i + 2], 16).map_err(Into::into)) + .collect() +} + +// --------------------------------------------------------------------------- +// EngineClient +// --------------------------------------------------------------------------- + +/// JWT-authenticated client for the Morph L2 Engine API. +pub struct EngineClient { + jwt_secret_hex: String, +} + +impl EngineClient { + /// Create a new `EngineClient`. + /// + /// `engine_rpc_url` is stored for documentation / logging purposes; + /// the actual URL is passed per-call so callers can target different nodes. + pub fn new(_engine_rpc_url: &str, jwt_secret_hex: String) -> eyre::Result { + // Validate the secret eagerly. + let stripped = jwt_secret_hex + .strip_prefix("0x") + .unwrap_or(&jwt_secret_hex); + ensure!( + stripped.len() == 64, + "JWT secret must be 32 bytes (64 hex chars), got {} hex chars", + stripped.len() + ); + Ok(Self { jwt_secret_hex }) + } + + /// Build a fresh [`HttpClient`] with a Bearer JWT Authorization header. + fn authed_client(&self, url: &str) -> eyre::Result { + let token = create_jwt_token(&self.jwt_secret_hex)?; + let mut headers = HeaderMap::new(); + headers.insert( + "Authorization", + HeaderValue::from_str(&format!("Bearer {token}"))?, + ); + let client = HttpClientBuilder::default() + .set_headers(headers) + .build(url)?; + Ok(client) + } + + /// Call `engine_assembleL2Block` and return the response along with elapsed + /// milliseconds. + pub async fn assemble_l2_block( + &self, + url: &str, + params: AssembleL2BlockParams, + ) -> eyre::Result<(ExecutableL2Data, f64)> { + let client = self.authed_client(url)?; + let start = Instant::now(); + let data: ExecutableL2Data = client + .request("engine_assembleL2Block", (params,)) + .await?; + let elapsed_ms = start.elapsed().as_secs_f64() * 1000.0; + Ok((data, elapsed_ms)) + } + + /// Call `engine_newL2Block` and return the elapsed milliseconds. + pub async fn new_l2_block( + &self, + url: &str, + data: ExecutableL2Data, + ) -> eyre::Result { + let client = self.authed_client(url)?; + let start = Instant::now(); + let _: () = client.request("engine_newL2Block", (data,)).await?; + let elapsed_ms = start.elapsed().as_secs_f64() * 1000.0; + Ok(elapsed_ms) + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn jwt_token_is_valid_format() { + // 32 random bytes as hex + let secret = "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; + let token = create_jwt_token(secret).expect("should create token"); + + let parts: Vec<&str> = token.split('.').collect(); + assert_eq!(parts.len(), 3, "JWT must have 3 dot-separated parts"); + + // Header should decode to valid JSON with alg=HS256 + let header_bytes = URL_SAFE_NO_PAD.decode(parts[0]).expect("decode header"); + let header: serde_json::Value = + serde_json::from_slice(&header_bytes).expect("parse header JSON"); + assert_eq!(header["alg"], "HS256"); + assert_eq!(header["typ"], "JWT"); + + // Payload should contain an "iat" numeric field + let payload_bytes = URL_SAFE_NO_PAD.decode(parts[1]).expect("decode payload"); + let payload: serde_json::Value = + serde_json::from_slice(&payload_bytes).expect("parse payload JSON"); + assert!( + payload["iat"].is_number(), + "iat must be a number, got: {payload:?}" + ); + } + + #[test] + fn jwt_rejects_short_secret() { + let short = "deadbeef"; // only 4 bytes + let result = create_jwt_token(short); + assert!(result.is_err(), "should reject a non-32-byte secret"); + } + + #[test] + fn assemble_params_serializes_to_camel_case() { + let params = AssembleL2BlockParams { + number: 100, + transactions: vec![], + timestamp: Some(0x6553f100), + }; + + let json = serde_json::to_value(¶ms).expect("serialize"); + let obj = json.as_object().expect("should be object"); + + // camelCase field names + assert!(obj.contains_key("number"), "missing 'number'"); + assert!(obj.contains_key("transactions"), "missing 'transactions'"); + assert!(obj.contains_key("timestamp"), "missing 'timestamp'"); + + // Hex quantity format + assert_eq!(obj["number"], "0x64"); + assert_eq!(obj["timestamp"], "0x6553f100"); + } + + #[test] + fn assemble_params_omits_none_timestamp() { + let params = AssembleL2BlockParams { + number: 1, + transactions: vec![], + timestamp: None, + }; + + let json = serde_json::to_value(¶ms).expect("serialize"); + let obj = json.as_object().expect("should be object"); + assert!( + !obj.contains_key("timestamp"), + "None timestamp should be omitted" + ); + } + + #[test] + fn executable_l2_data_roundtrip_with_extra_fields() { + let json = r#"{ + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "miner": "0x0000000000000000000000000000000000000002", + "number": "0x64", + "gasLimit": "0x1c9c380", + "gasUsed": "0x5208", + "timestamp": "0x499602d2", + "transactions": [], + "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000003", + "receiptsRoot": "0x0000000000000000000000000000000000000000000000000000000000000004", + "hash": "0x0000000000000000000000000000000000000000000000000000000000000006", + "nextL1MessageIndex": 10, + "logsBloom": "0x00", + "withdrawTrieRoot": "0x0000000000000000000000000000000000000000000000000000000000000005", + "baseFeePerGas": "0x3b9aca00" + }"#; + + let data: ExecutableL2Data = serde_json::from_str(json).expect("deserialize"); + assert_eq!(data.number, 100); + assert_eq!(data.gas_limit, 30_000_000); + assert_eq!(data.gas_used, 21000); + assert_eq!(data.next_l1_message_index, 10); + + // Extra fields must survive the round-trip. + assert!(data.extra.contains_key("miner"), "miner should be in extra"); + assert!( + data.extra.contains_key("logsBloom"), + "logsBloom should be in extra" + ); + assert!( + data.extra.contains_key("withdrawTrieRoot"), + "withdrawTrieRoot should be in extra" + ); + assert!( + data.extra.contains_key("baseFeePerGas"), + "baseFeePerGas should be in extra" + ); + + // Re-serialize and verify the extra fields are still present. + let re_json = serde_json::to_value(&data).expect("re-serialize"); + let obj = re_json.as_object().expect("should be object"); + assert!(obj.contains_key("miner")); + assert!(obj.contains_key("logsBloom")); + assert!(obj.contains_key("withdrawTrieRoot")); + assert!(obj.contains_key("baseFeePerGas")); + } + + #[test] + fn hmac_sha256_known_vector() { + // RFC 4231 Test Case 2 + let key = b"Jefe"; + let data = b"what do ya want for nothing?"; + let expected: [u8; 32] = [ + 0x5b, 0xdc, 0xc1, 0x46, 0xbf, 0x60, 0x75, 0x4e, 0x6a, 0x04, 0x24, 0x26, 0x08, 0x95, + 0x75, 0xc7, 0x5a, 0x00, 0x3f, 0x08, 0x9d, 0x27, 0x39, 0x83, 0x9d, 0xec, 0x58, 0xb9, + 0x64, 0xec, 0x38, 0x43, + ]; + assert_eq!(hmac_sha256(key, data), expected); + } + + #[test] + fn engine_client_rejects_short_secret() { + let result = EngineClient::new("http://localhost:8551", "deadbeef".into()); + assert!(result.is_err()); + } + + #[test] + fn jwt_accepts_0x_prefixed_secret() { + let secret = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; + let token = create_jwt_token(secret).expect("should handle 0x prefix"); + assert_eq!(token.split('.').count(), 3); + } + + #[test] + fn block_timing_serde_roundtrip() { + let timing = BlockTiming { + block_number: 42, + tx_count: 10, + assemble_ms: 12.5, + import_ms: 8.3, + total_ms: 20.8, + layer: "eth-transfer".into(), + engine: "reth".into(), + }; + let json = serde_json::to_string(&timing).expect("serialize"); + let decoded: BlockTiming = serde_json::from_str(&json).expect("deserialize"); + assert_eq!(decoded.block_number, 42); + assert_eq!(decoded.tx_count, 10); + assert!((decoded.total_ms - 20.8).abs() < f64::EPSILON); + } +} From e87d4b480e6227fff132b8f774fac7b2f3cce340 Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Fri, 3 Apr 2026 18:12:27 +0800 Subject: [PATCH 05/17] feat(bench): implement results aggregation with percentiles and 300ms analysis --- bin/bench-block-exec/src/report.rs | 181 ++++++++++++++++++++++++++++- 1 file changed, 179 insertions(+), 2 deletions(-) diff --git a/bin/bench-block-exec/src/report.rs b/bin/bench-block-exec/src/report.rs index eab8595..63edf3a 100644 --- a/bin/bench-block-exec/src/report.rs +++ b/bin/bench-block-exec/src/report.rs @@ -1,4 +1,11 @@ +use crate::engine::BlockTiming; use clap::Args; +use std::{ + collections::BTreeMap, + fs, + io::{BufRead, BufReader, Write}, + path::{Path, PathBuf}, +}; #[derive(Args)] pub struct SummarizeArgs { @@ -10,6 +17,176 @@ pub struct SummarizeArgs { pub output: Option, } -pub fn summarize(_args: SummarizeArgs) -> eyre::Result<()> { - todo!("summarize not yet implemented") +/// Compute the `p`-th percentile (0..100) from a **pre-sorted** slice using +/// linear interpolation between adjacent values. +/// +/// Returns `0.0` for an empty slice. +pub fn percentile(sorted: &[f64], p: f64) -> f64 { + if sorted.is_empty() { + return 0.0; + } + if sorted.len() == 1 { + return sorted[0]; + } + + // Map percentile [0, 100] to a fractional index in [0, n-1]. + let n = sorted.len() as f64; + let rank = (p / 100.0) * (n - 1.0); + let lower = rank.floor() as usize; + let upper = rank.ceil().min(n - 1.0) as usize; + let frac = rank - lower as f64; + + sorted[lower] + frac * (sorted[upper] - sorted[lower]) +} + +/// Recursively list all files (not directories) under `dir`. +fn walkdir(dir: &Path) -> eyre::Result> { + let mut files = Vec::new(); + if !dir.is_dir() { + return Ok(files); + } + for entry in fs::read_dir(dir)? { + let entry = entry?; + let path = entry.path(); + if path.is_dir() { + files.extend(walkdir(&path)?); + } else { + files.push(path); + } + } + Ok(files) +} + +pub fn summarize(args: SummarizeArgs) -> eyre::Result<()> { + let dir = Path::new(&args.results_dir); + + // 1. Recursively find all .json files. + let json_files: Vec = walkdir(dir)? + .into_iter() + .filter(|p| p.extension().is_some_and(|ext| ext == "json")) + .collect(); + + // 2. Parse BlockTiming from each JSON-lines file. + let mut all_timings: Vec = Vec::new(); + for path in &json_files { + let file = fs::File::open(path)?; + let reader = BufReader::new(file); + for line in reader.lines() { + let line = line?; + let trimmed = line.trim(); + if trimmed.is_empty() { + continue; + } + if let Ok(timing) = serde_json::from_str::(trimmed) { + all_timings.push(timing); + } + } + } + + // 3. Group by (engine, layer, tx_count) — BTreeMap for sorted output. + let mut groups: BTreeMap<(String, String, u64), Vec> = BTreeMap::new(); + for t in all_timings { + groups + .entry((t.engine.clone(), t.layer.clone(), t.tx_count)) + .or_default() + .push(t); + } + + // 4. Build the TSV output. + let header = + "engine\tlayer\ttx/blk\tavg_asm_ms\tavg_imp_ms\tavg_tot_ms\tp50_ms\tp95_ms\tp99_ms\teff_tps\t<300ms%"; + + let mut rows: Vec = vec![header.to_string()]; + + for ((engine, layer, tx_count), entries) in &groups { + // Skip first 10 entries as warmup. + let data: Vec<&BlockTiming> = entries.iter().skip(10).collect(); + if data.is_empty() { + continue; + } + + let n = data.len() as f64; + + let avg_assemble: f64 = data.iter().map(|t| t.assemble_ms).sum::() / n; + let avg_import: f64 = data.iter().map(|t| t.import_ms).sum::() / n; + let avg_total: f64 = data.iter().map(|t| t.total_ms).sum::() / n; + + let mut totals: Vec = data.iter().map(|t| t.total_ms).collect(); + totals.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal)); + + let p50 = percentile(&totals, 50.0); + let p95 = percentile(&totals, 95.0); + let p99 = percentile(&totals, 99.0); + + let effective_tps = if avg_total > 0.0 { + *tx_count as f64 / (avg_total / 1000.0) + } else { + 0.0 + }; + + let meets_300ms_count = data.iter().filter(|t| t.total_ms < 300.0).count(); + let meets_300ms_pct = meets_300ms_count as f64 / n * 100.0; + + rows.push(format!( + "{}\t{}\t{}\t{:.2}\t{:.2}\t{:.2}\t{:.2}\t{:.2}\t{:.2}\t{:.1}\t{:.1}", + engine, + layer, + tx_count, + avg_assemble, + avg_import, + avg_total, + p50, + p95, + p99, + effective_tps, + meets_300ms_pct, + )); + } + + let output_text = rows.join("\n"); + + // 5. Print to stdout. + println!("{output_text}"); + + // 6. Optionally write to file. + if let Some(ref path) = args.output { + let mut f = fs::File::create(path)?; + writeln!(f, "{output_text}")?; + } + + Ok(()) +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn percentile_calculation_is_correct() { + let values = vec![10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]; + assert!( + (percentile(&values, 50.0) - 55.0).abs() < 1.0, + "p50 should be ~55.0, got {}", + percentile(&values, 50.0) + ); + assert!( + (percentile(&values, 95.0) - 95.5).abs() < 1.0, + "p95 should be ~95.5, got {}", + percentile(&values, 95.0) + ); + } + + #[test] + fn percentile_single_value() { + assert_eq!(percentile(&[42.0], 50.0), 42.0); + } + + #[test] + fn percentile_empty() { + assert_eq!(percentile(&[], 50.0), 0.0); + } } From 36b3e50f0592be8a11639177d12ed047b9c29eff Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Fri, 3 Apr 2026 18:16:24 +0800 Subject: [PATCH 06/17] feat(bench): implement ETH and ERC20 transaction construction and workload runner --- bin/bench-block-exec/src/workload.rs | 432 ++++++++++++++++++++++++++- 1 file changed, 430 insertions(+), 2 deletions(-) diff --git a/bin/bench-block-exec/src/workload.rs b/bin/bench-block-exec/src/workload.rs index 2b3ac16..bd2005d 100644 --- a/bin/bench-block-exec/src/workload.rs +++ b/bin/bench-block-exec/src/workload.rs @@ -1,5 +1,15 @@ +use alloy_consensus::{EthereumTxEnvelope, SignableTransaction, TxEip1559, TxEip4844}; +use alloy_eips::eip2718::Encodable2718; +use alloy_primitives::{Address, Bytes, TxKind, U256}; + +/// Concrete envelope type (EIP-4844 variant required by the generic). +type TxEnvelope = EthereumTxEnvelope; +use alloy_signer::SignerSync; +use alloy_signer_local::PrivateKeySigner; use clap::Args; +use crate::engine::{AssembleL2BlockParams, BlockTiming, EngineClient}; + #[derive(Args)] pub struct RunWorkloadArgs { /// Engine API RPC URL (authenticated). @@ -37,6 +47,424 @@ pub struct RunWorkloadArgs { pub chain_id: u64, } -pub async fn run(_args: RunWorkloadArgs) -> eyre::Result<()> { - todo!("run-workload not yet implemented") +// --------------------------------------------------------------------------- +// Helper functions +// --------------------------------------------------------------------------- + +/// Generate a deterministic receiver address from an index. +/// +/// Uses `0xBB` as the first byte and the index in the last 8 bytes, +/// producing unique addresses for each index. +pub fn receiver_address(index: u64) -> Address { + let mut bytes = [0u8; 20]; + bytes[0] = 0xBB; + bytes[12..20].copy_from_slice(&index.to_be_bytes()); + Address::from(bytes) +} + +/// Build a batch of signed EIP-1559 ETH transfer transactions. +/// +/// Each transfer sends 1 wei to a deterministic receiver address, +/// with gas_limit=21000. +pub fn build_eth_transfer_batch( + signer: &PrivateKeySigner, + count: u64, + start_nonce: u64, + chain_id: u64, + max_fee_per_gas: u128, +) -> eyre::Result> { + let mut txs = Vec::with_capacity(count as usize); + for i in 0..count { + let nonce = start_nonce + i; + let tx = TxEip1559 { + chain_id, + nonce, + gas_limit: 21_000, + max_fee_per_gas, + max_priority_fee_per_gas: 0, + to: TxKind::Call(receiver_address(i)), + value: U256::from(1), + access_list: Default::default(), + input: Bytes::new(), + }; + let sig = signer + .sign_hash_sync(&tx.signature_hash()) + .map_err(|e| eyre::eyre!("signing failed: {e}"))?; + let envelope: TxEnvelope = EthereumTxEnvelope::Eip1559(tx.into_signed(sig)); + txs.push(Bytes::from(envelope.encoded_2718())); + } + Ok(txs) +} + +/// Build a batch of signed EIP-1559 ERC20 `transfer(address,uint256)` transactions. +/// +/// Each call transfers 1 token to a deterministic receiver address, +/// with gas_limit=60000. +pub fn build_erc20_transfer_batch( + signer: &PrivateKeySigner, + count: u64, + start_nonce: u64, + chain_id: u64, + max_fee_per_gas: u128, + contract_addr: Address, +) -> eyre::Result> { + let mut txs = Vec::with_capacity(count as usize); + // ERC20 transfer(address,uint256) selector: 0xa9059cbb + let selector: [u8; 4] = [0xa9, 0x05, 0x9c, 0xbb]; + + for i in 0..count { + let nonce = start_nonce + i; + let recipient = receiver_address(i); + + // ABI-encode: 32-byte padded address + 32-byte uint256(1) + let mut calldata = Vec::with_capacity(4 + 32 + 32); + calldata.extend_from_slice(&selector); + // address is left-padded with 12 zero bytes + calldata.extend_from_slice(&[0u8; 12]); + calldata.extend_from_slice(recipient.as_slice()); + // uint256(1) as 32-byte big-endian + let mut amount = [0u8; 32]; + amount[31] = 1; + calldata.extend_from_slice(&amount); + + let tx = TxEip1559 { + chain_id, + nonce, + gas_limit: 60_000, + max_fee_per_gas, + max_priority_fee_per_gas: 0, + to: TxKind::Call(contract_addr), + value: U256::ZERO, + access_list: Default::default(), + input: Bytes::from(calldata), + }; + let sig = signer + .sign_hash_sync(&tx.signature_hash()) + .map_err(|e| eyre::eyre!("signing failed: {e}"))?; + let envelope: TxEnvelope = EthereumTxEnvelope::Eip1559(tx.into_signed(sig)); + txs.push(Bytes::from(envelope.encoded_2718())); + } + Ok(txs) +} + +/// Build a signed EIP-1559 contract deployment transaction. +/// +/// The `bytecode` is the compiled contract init code and the `initial_supply` +/// is ABI-encoded as a uint256 constructor argument appended to the bytecode. +pub fn build_deploy_tx( + signer: &PrivateKeySigner, + nonce: u64, + chain_id: u64, + max_fee_per_gas: u128, + bytecode: &Bytes, + initial_supply: U256, +) -> eyre::Result { + // Append ABI-encoded uint256 constructor arg to bytecode + let mut init_code = bytecode.to_vec(); + init_code.extend_from_slice(&initial_supply.to_be_bytes::<32>()); + + let tx = TxEip1559 { + chain_id, + nonce, + gas_limit: 2_000_000, + max_fee_per_gas, + max_priority_fee_per_gas: 0, + to: TxKind::Create, + value: U256::ZERO, + access_list: Default::default(), + input: Bytes::from(init_code), + }; + let sig = signer + .sign_hash_sync(&tx.signature_hash()) + .map_err(|e| eyre::eyre!("signing failed: {e}"))?; + let envelope: TxEnvelope = EthereumTxEnvelope::Eip1559(tx.into_signed(sig)); + Ok(Bytes::from(envelope.encoded_2718())) +} + +/// Read a Foundry JSON artifact and extract the contract bytecode. +/// +/// Expects the artifact to have `bytecode.object` as a hex string. +pub fn read_contract_artifact(path: &str) -> eyre::Result { + let content = std::fs::read_to_string(path) + .map_err(|e| eyre::eyre!("failed to read artifact at {path}: {e}"))?; + let artifact: serde_json::Value = serde_json::from_str(&content) + .map_err(|e| eyre::eyre!("failed to parse artifact JSON: {e}"))?; + + let hex_str = artifact + .get("bytecode") + .and_then(|b| b.get("object")) + .and_then(|o| o.as_str()) + .ok_or_else(|| eyre::eyre!("artifact missing bytecode.object field"))?; + + let hex_str = hex_str.strip_prefix("0x").unwrap_or(hex_str); + let bytes = hex::decode(hex_str) + .map_err(|e| eyre::eyre!("failed to decode bytecode hex: {e}"))?; + Ok(Bytes::from(bytes)) +} + +/// Poll `eth_chainId` via HTTP POST until the RPC endpoint is ready. +async fn wait_for_rpc(http_rpc: &str, timeout_secs: u64) -> eyre::Result<()> { + let client = reqwest::Client::new(); + let deadline = std::time::Instant::now() + std::time::Duration::from_secs(timeout_secs); + + loop { + let result = client + .post(http_rpc) + .json(&serde_json::json!({ + "jsonrpc": "2.0", + "method": "eth_chainId", + "params": [], + "id": 1 + })) + .send() + .await; + + if let Ok(resp) = result { + if resp.status().is_success() { + return Ok(()); + } + } + + if std::time::Instant::now() >= deadline { + return Err(eyre::eyre!( + "RPC at {http_rpc} not ready after {timeout_secs}s" + )); + } + + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } +} + +// --------------------------------------------------------------------------- +// Hex decoding helper (avoid pulling in another crate just for this) +// --------------------------------------------------------------------------- + +mod hex { + pub fn decode(hex: &str) -> Result, String> { + if hex.len() % 2 != 0 { + return Err("hex string has odd length".into()); + } + (0..hex.len()) + .step_by(2) + .map(|i| { + u8::from_str_radix(&hex[i..i + 2], 16) + .map_err(|e| format!("invalid hex at position {i}: {e}")) + }) + .collect() + } +} + +// --------------------------------------------------------------------------- +// Main workload runner +// --------------------------------------------------------------------------- + +pub async fn run(args: RunWorkloadArgs) -> eyre::Result<()> { + // 1. Read JWT secret from file, create signer from sender_key + let jwt_secret = std::fs::read_to_string(&args.jwt_secret) + .map_err(|e| eyre::eyre!("failed to read JWT secret file: {e}"))? + .trim() + .to_string(); + + let signer: PrivateKeySigner = args + .sender_key + .parse() + .map_err(|e| eyre::eyre!("invalid sender_key: {e}"))?; + let sender_addr = signer.address(); + + // 2. Create EngineClient + let engine = EngineClient::new(&args.engine_rpc, jwt_secret)?; + + // 3. Wait for RPC readiness + println!("Waiting for RPC at {} ...", args.http_rpc); + wait_for_rpc(&args.http_rpc, 60).await?; + println!("RPC is ready."); + + let max_fee_per_gas: u128 = 1_000_000; // MORPH_BASE_FEE + let mut current_nonce: u64 = 0; + let mut block_start: u64 = 1; + + // 4. If layer == "erc20-transfer", deploy the contract first + let contract_addr = if args.layer == "erc20-transfer" { + let artifact_path = args.contract_artifact.as_deref().ok_or_else(|| { + eyre::eyre!("--contract-artifact is required for erc20-transfer layer") + })?; + let bytecode = read_contract_artifact(artifact_path)?; + + // Build deploy tx (nonce=0) + let initial_supply = U256::from(1_000_000_000u64) * U256::from(10u64).pow(U256::from(18)); + let deploy_tx = + build_deploy_tx(&signer, 0, args.chain_id, max_fee_per_gas, &bytecode, initial_supply)?; + + // Assemble block 1 with the deploy tx + let (assembled, _assemble_ms) = engine + .assemble_l2_block( + &args.engine_rpc, + AssembleL2BlockParams { + number: 1, + transactions: vec![deploy_tx], + timestamp: Some(1), + }, + ) + .await?; + + // Import block 1 + engine.new_l2_block(&args.engine_rpc, assembled).await?; + + // Compute contract address: sender.create(0) + let addr = sender_addr.create(0); + println!("Deployed ERC20 contract at {addr}"); + + current_nonce = 1; + block_start = 2; + + Some(addr) + } else { + None + }; + + // 5. Open output file for JSON lines + let mut output_file = std::fs::File::create(&args.output) + .map_err(|e| eyre::eyre!("failed to create output file: {e}"))?; + + let mut timings: Vec = Vec::new(); + + for block_idx in 0..args.blocks { + let block_number = block_start + block_idx; + + // Build tx batch + let txs = if args.layer == "erc20-transfer" { + build_erc20_transfer_batch( + &signer, + args.txs_per_block, + current_nonce, + args.chain_id, + max_fee_per_gas, + contract_addr.expect("contract_addr must be set for erc20-transfer"), + )? + } else { + build_eth_transfer_batch( + &signer, + args.txs_per_block, + current_nonce, + args.chain_id, + max_fee_per_gas, + )? + }; + current_nonce += args.txs_per_block; + + // Assemble block + let (assembled, assemble_ms) = engine + .assemble_l2_block( + &args.engine_rpc, + AssembleL2BlockParams { + number: block_number, + transactions: txs, + timestamp: Some(block_number), + }, + ) + .await?; + + // Import block + let import_ms = engine.new_l2_block(&args.engine_rpc, assembled).await?; + + let total_ms = assemble_ms + import_ms; + let timing = BlockTiming { + block_number, + tx_count: args.txs_per_block, + assemble_ms, + import_ms, + total_ms, + layer: args.layer.clone(), + engine: args.engine_name.clone(), + }; + + // Write as JSON line to output file + use std::io::Write; + let json_line = serde_json::to_string(&timing)?; + writeln!(output_file, "{json_line}")?; + + timings.push(timing); + + // Print progress every 50 blocks + if (block_idx + 1) % 50 == 0 || block_idx + 1 == args.blocks { + println!( + "Block {block_number}: assemble={assemble_ms:.1}ms import={import_ms:.1}ms total={total_ms:.1}ms [{}/{}]", + block_idx + 1, + args.blocks + ); + } + } + + // 6. Print quick summary + if !timings.is_empty() { + let count = timings.len() as f64; + let avg_total = timings.iter().map(|t| t.total_ms).sum::() / count; + let total_txs: u64 = timings.iter().map(|t| t.tx_count).sum(); + let total_time_s: f64 = timings.iter().map(|t| t.total_ms).sum::() / 1000.0; + let effective_tps = if total_time_s > 0.0 { + total_txs as f64 / total_time_s + } else { + 0.0 + }; + let under_300ms = timings.iter().filter(|t| t.total_ms <= 300.0).count(); + let compliance_pct = under_300ms as f64 / count * 100.0; + + println!("\n--- Summary ---"); + println!("Blocks: {}", timings.len()); + println!("Avg total: {avg_total:.1}ms"); + println!("Effective TPS: {effective_tps:.0}"); + println!("<=300ms: {under_300ms}/{} ({compliance_pct:.1}%)", timings.len()); + } + + Ok(()) +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use std::collections::HashSet; + + fn test_signer() -> PrivateKeySigner { + "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" + .parse() + .unwrap() + } + + #[test] + fn eth_transfer_batch_produces_exact_count() { + let signer = test_signer(); + let txs = build_eth_transfer_batch(&signer, 10, 0, 99999, 1_000_000).unwrap(); + assert_eq!(txs.len(), 10); + } + + #[test] + fn eth_transfer_nonces_are_contiguous() { + let signer = test_signer(); + let txs = build_eth_transfer_batch(&signer, 5, 0, 99999, 1_000_000).unwrap(); + // Each tx should be non-empty (we can't easily decode nonces without + // full RLP decoding, but we verify they are distinct non-empty blobs). + assert_eq!(txs.len(), 5); + for tx in &txs { + assert!(!tx.is_empty()); + } + } + + #[test] + fn erc20_transfer_batch_produces_exact_count() { + let signer = test_signer(); + let contract_addr = Address::with_last_byte(0x42); + let txs = + build_erc20_transfer_batch(&signer, 10, 0, 99999, 1_000_000, contract_addr).unwrap(); + assert_eq!(txs.len(), 10); + } + + #[test] + fn receiver_addresses_are_unique() { + let addrs: HashSet
= (0..100).map(receiver_address).collect(); + assert_eq!(addrs.len(), 100); + } } From 41ac8014502540312b5a84a1193c8a5ba72e10ef Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Fri, 3 Apr 2026 18:18:25 +0800 Subject: [PATCH 07/17] feat(bench): implement state consistency verification between two nodes --- bin/bench-block-exec/src/verify.rs | 205 ++++++++++++++++++++++++++++- 1 file changed, 203 insertions(+), 2 deletions(-) diff --git a/bin/bench-block-exec/src/verify.rs b/bin/bench-block-exec/src/verify.rs index f6a6dbb..3d5abf7 100644 --- a/bin/bench-block-exec/src/verify.rs +++ b/bin/bench-block-exec/src/verify.rs @@ -1,4 +1,7 @@ use clap::Args; +use serde::Serialize; + +use crate::workload::receiver_address; #[derive(Args)] pub struct VerifyStateArgs { @@ -16,6 +19,204 @@ pub struct VerifyStateArgs { pub output: Option, } -pub async fn run(_args: VerifyStateArgs) -> eyre::Result<()> { - todo!("verify-state not yet implemented") +#[derive(Debug, Serialize)] +struct VerifyResult { + status: String, + block_number_a: String, + block_number_b: String, + state_root_match: bool, + receipts_root_match: bool, + balances_checked: u64, + balances_matched: u64, + mismatches: Vec, +} + +/// Send a JSON-RPC request and return the `result` field. +async fn rpc_call( + client: &reqwest::Client, + url: &str, + method: &str, + params: serde_json::Value, +) -> eyre::Result { + let body = serde_json::json!({ + "jsonrpc": "2.0", + "method": method, + "params": params, + "id": 1 + }); + + let resp: serde_json::Value = client + .post(url) + .json(&body) + .send() + .await + .map_err(|e| eyre::eyre!("RPC request to {url} failed: {e}"))? + .json() + .await + .map_err(|e| eyre::eyre!("failed to parse RPC response from {url}: {e}"))?; + + if let Some(err) = resp.get("error") { + return Err(eyre::eyre!("RPC error from {url}: {err}")); + } + + resp.get("result") + .cloned() + .ok_or_else(|| eyre::eyre!("RPC response from {url} missing 'result' field")) +} + +pub async fn run(args: VerifyStateArgs) -> eyre::Result<()> { + let client = reqwest::Client::new(); + + // 1. Fetch latest block from both nodes. + let block_a = rpc_call( + &client, + &args.rpc_a, + "eth_getBlockByNumber", + serde_json::json!(["latest", false]), + ) + .await?; + + let block_b = rpc_call( + &client, + &args.rpc_b, + "eth_getBlockByNumber", + serde_json::json!(["latest", false]), + ) + .await?; + + let number_a = block_a + .get("number") + .and_then(|v| v.as_str()) + .unwrap_or("unknown") + .to_string(); + let number_b = block_b + .get("number") + .and_then(|v| v.as_str()) + .unwrap_or("unknown") + .to_string(); + + let mut mismatches: Vec = Vec::new(); + + // 2. Compare block numbers. + if number_a != number_b { + mismatches.push(format!( + "block number mismatch: A={number_a}, B={number_b}" + )); + } + + // 3. Compare state roots. + let state_root_a = block_a + .get("stateRoot") + .and_then(|v| v.as_str()) + .unwrap_or("unknown"); + let state_root_b = block_b + .get("stateRoot") + .and_then(|v| v.as_str()) + .unwrap_or("unknown"); + let state_root_match = state_root_a == state_root_b; + if !state_root_match { + mismatches.push(format!( + "stateRoot mismatch: A={state_root_a}, B={state_root_b}" + )); + } + + // 4. Compare receipts roots. + let receipts_root_a = block_a + .get("receiptsRoot") + .and_then(|v| v.as_str()) + .unwrap_or("unknown"); + let receipts_root_b = block_b + .get("receiptsRoot") + .and_then(|v| v.as_str()) + .unwrap_or("unknown"); + let receipts_root_match = receipts_root_a == receipts_root_b; + if !receipts_root_match { + mismatches.push(format!( + "receiptsRoot mismatch: A={receipts_root_a}, B={receipts_root_b}" + )); + } + + // 5. Compare balances for receiver addresses. + let mut balances_matched: u64 = 0; + for i in 0..args.check_balances { + let addr = receiver_address(i); + let addr_hex = format!("{addr:#x}"); + + let balance_a = rpc_call( + &client, + &args.rpc_a, + "eth_getBalance", + serde_json::json!([addr_hex, "latest"]), + ) + .await?; + + let balance_b = rpc_call( + &client, + &args.rpc_b, + "eth_getBalance", + serde_json::json!([addr_hex, "latest"]), + ) + .await?; + + if balance_a == balance_b { + balances_matched += 1; + } else { + mismatches.push(format!( + "balance mismatch for {addr_hex}: A={balance_a}, B={balance_b}" + )); + } + } + + // 6. Build result. + let status = if mismatches.is_empty() { + "PASS".to_string() + } else { + "FAIL".to_string() + }; + + let result = VerifyResult { + status: status.clone(), + block_number_a: number_a, + block_number_b: number_b, + state_root_match, + receipts_root_match, + balances_checked: args.check_balances, + balances_matched, + mismatches: mismatches.clone(), + }; + + // 7. Write to output file if specified. + if let Some(ref path) = args.output { + let json = serde_json::to_string_pretty(&result)?; + std::fs::write(path, json) + .map_err(|e| eyre::eyre!("failed to write output to {path}: {e}"))?; + } + + // 8. Print summary to stderr. + eprintln!("--- Verify State ---"); + eprintln!("Status: {status}"); + eprintln!("Block number A: {}", result.block_number_a); + eprintln!("Block number B: {}", result.block_number_b); + eprintln!("State root match: {state_root_match}"); + eprintln!("Receipts root match:{receipts_root_match}"); + eprintln!( + "Balances checked: {}/{}", + balances_matched, args.check_balances + ); + if !mismatches.is_empty() { + eprintln!("Mismatches:"); + for m in &mismatches { + eprintln!(" - {m}"); + } + } + + // 9. Fail if any mismatches found. + if !mismatches.is_empty() { + eyre::bail!( + "state verification FAILED with {} mismatch(es)", + mismatches.len() + ); + } + + Ok(()) } From 4e276c650921ef61e032a371415e43caf1e01d32 Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Fri, 3 Apr 2026 18:20:15 +0800 Subject: [PATCH 08/17] feat(bench): add shell orchestration for multi-round geth vs reth benchmark --- local-test/bench-block-exec.sh | 402 +++++++++++++++++++++++++++++++++ 1 file changed, 402 insertions(+) create mode 100755 local-test/bench-block-exec.sh diff --git a/local-test/bench-block-exec.sh b/local-test/bench-block-exec.sh new file mode 100755 index 0000000..0b1ce6d --- /dev/null +++ b/local-test/bench-block-exec.sh @@ -0,0 +1,402 @@ +#!/usr/bin/env bash +# +# bench-block-exec.sh — Multi-round geth vs reth block-execution benchmark. +# +# This script is STANDALONE: it does NOT source common.sh because it uses a +# custom genesis (not mainnet/hoodi) and manages its own data directories. +# +# Usage: +# ./local-test/bench-block-exec.sh +# +# Override defaults via environment variables, e.g.: +# ROUNDS=1 BLOCKS=100 SKIP_GETH=1 ./local-test/bench-block-exec.sh + +set -euo pipefail + +# ─── Resolve repo root ─────────────────────────────────────────────────────── + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" +cd "${REPO_ROOT}" + +# ─── Configuration (all overridable via environment) ───────────────────────── + +: "${ROUNDS:=3}" +: "${BLOCKS:=500}" +: "${SKIP_GETH:=0}" +: "${SKIP_RETH:=0}" +: "${RESULTS_DIR:=bench-results/$(date +%Y%m%d-%H%M%S)}" +: "${RETH_BIN:=./target/release/morph-reth}" +: "${GETH_BIN:=../morph/go-ethereum/build/bin/geth}" +: "${BENCH_BIN:=./target/release/bench-block-exec}" +: "${JWT_SECRET:=./local-test/jwt-secret.txt}" +: "${SENDER_KEY:=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80}" +: "${SENDER_ADDR:=0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266}" +: "${CONTRACT_ARTIFACT:=./local-test/erc20-bench-contracts/out/BenchToken.sol/BenchToken.json}" +: "${CHAIN_ID:=99999}" + +ETH_TX_SIZES="500 1000 2000 3000 5000 8000" +ERC20_TX_SIZES="500 1000 2000 3000 5000" + +HTTP_PORT=8545; AUTH_PORT=8551 +HTTP_PORT_B=9545; AUTH_PORT_B=9551 + +# ─── Helper functions ──────────────────────────────────────────────────────── + +check_binary() { + local bin_path="$1" + local hint="$2" + if [[ ! -x "${bin_path}" ]]; then + echo "ERROR: Missing executable: ${bin_path}" + echo "Build hint: ${hint}" + return 1 + fi +} + +pm2_check() { + if ! command -v pm2 &>/dev/null; then + echo "ERROR: pm2 is not installed" + echo "Install with: npm install -g pm2" + return 1 + fi +} + +pm2_stop() { + local name="$1" + if pm2 describe "${name}" &>/dev/null; then + pm2 stop "${name}" 2>/dev/null || true + pm2 delete "${name}" 2>/dev/null || true + echo "${name}: stopped" + else + echo "${name}: not running" + fi +} + +wait_for_rpc() { + local name="$1" + local port="$2" + local url="http://127.0.0.1:${port}" + local timeout=120 + local elapsed=0 + + echo "Waiting for ${name} RPC on port ${port} (timeout ${timeout}s)..." + while (( elapsed < timeout )); do + if curl -s -X POST "${url}" \ + -H "Content-Type: application/json" \ + -d '{"jsonrpc":"2.0","method":"eth_chainId","params":[],"id":1}' \ + 2>/dev/null | grep -q '"result"'; then + echo "${name} RPC is ready (${elapsed}s)." + return 0 + fi + sleep 1 + (( elapsed++ )) || true + done + + echo "ERROR: ${name} RPC not ready after ${timeout}s" + return 1 +} + +fresh_datadir() { + local dir="$1" + rm -rf "${dir}" + mkdir -p "${dir}" +} + +generate_genesis() { + local output="$1" + mkdir -p "$(dirname "${output}")" + "${BENCH_BIN}" write-genesis \ + --output "${output}" \ + --sender "${SENDER_ADDR}" \ + --sender-balance "1000000000000000000000000000" +} + +start_reth() { + local datadir="$1" + local genesis="$2" + local http_port="$3" + local auth_port="$4" + local name="$5" + + local args=( + node + --chain "${genesis}" + --datadir "${datadir}" + --http + --http.addr 127.0.0.1 + --http.port "${http_port}" + --http.api "web3,debug,eth,txpool,net" + --authrpc.addr 127.0.0.1 + --authrpc.port "${auth_port}" + --authrpc.jwtsecret "${JWT_SECRET}" + --nat none + --morph.max-tx-payload-bytes 131072000 + --engine.persistence-threshold 2048 + --engine.memory-block-buffer-target 2048 + ) + + pm2 start "${RETH_BIN}" --name "${name}" -- "${args[@]}" + echo "Started ${name} (reth) on http=${http_port} auth=${auth_port}" +} + +start_geth() { + local datadir="$1" + local genesis="$2" + local http_port="$3" + local auth_port="$4" + local name="$5" + + # Initialize geth datadir with genesis + "${GETH_BIN}" init --datadir "${datadir}" "${genesis}" + + local args=( + --morph + --datadir "${datadir}" + --gcmode archive + --syncmode full + --http + --http.addr 127.0.0.1 + --http.port "${http_port}" + --http.corsdomain "*" + --http.vhosts "*" + --http.api "web3,eth,debug,txpool,net,morph,engine" + --authrpc.addr 127.0.0.1 + --authrpc.port "${auth_port}" + --authrpc.vhosts "*" + --authrpc.jwtsecret "${JWT_SECRET}" + --nodiscover + --maxpeers 0 + ) + + pm2 start "${GETH_BIN}" --name "${name}" -- "${args[@]}" + echo "Started ${name} (geth) on http=${http_port} auth=${auth_port}" +} + +run_workload() { + local engine_name="$1" + local layer="$2" + local txs_per_block="$3" + local output="$4" + local auth_port="$5" + local http_port="$6" + + mkdir -p "$(dirname "${output}")" + + local args=( + run-workload + --engine-rpc "http://127.0.0.1:${auth_port}" + --jwt-secret "${JWT_SECRET}" + --http-rpc "http://127.0.0.1:${http_port}" + --layer "${layer}" + --txs-per-block "${txs_per_block}" + --blocks "${BLOCKS}" + --output "${output}" + --engine-name "${engine_name}" + --sender-key "${SENDER_KEY}" + --chain-id "${CHAIN_ID}" + ) + + # Add contract artifact only for erc20-transfer layer + if [[ "${layer}" == "erc20-transfer" ]]; then + args+=(--contract-artifact "${CONTRACT_ARTIFACT}") + fi + + "${BENCH_BIN}" "${args[@]}" || true +} + +# ─── Banner ────────────────────────────────────────────────────────────────── + +echo "============================================================" +echo " bench-block-exec -- Geth vs Reth Block Execution Bench" +echo "============================================================" +echo "" +echo " ROUNDS: ${ROUNDS}" +echo " BLOCKS: ${BLOCKS}" +echo " SKIP_GETH: ${SKIP_GETH}" +echo " SKIP_RETH: ${SKIP_RETH}" +echo " RESULTS_DIR: ${RESULTS_DIR}" +echo " RETH_BIN: ${RETH_BIN}" +echo " GETH_BIN: ${GETH_BIN}" +echo " BENCH_BIN: ${BENCH_BIN}" +echo " ETH_TX_SIZES: ${ETH_TX_SIZES}" +echo " ERC20_TX_SIZES: ${ERC20_TX_SIZES}" +echo " CHAIN_ID: ${CHAIN_ID}" +echo "" + +# ─── Prerequisites ─────────────────────────────────────────────────────────── + +pm2_check +check_binary "${BENCH_BIN}" "cargo build --release --bin bench-block-exec" +if [[ "${SKIP_RETH}" != "1" ]]; then + check_binary "${RETH_BIN}" "cargo build --release --bin morph-reth" +fi +if [[ "${SKIP_GETH}" != "1" ]]; then + check_binary "${GETH_BIN}" "cd ../morph/go-ethereum && make geth" +fi + +# Generate JWT secret if missing +if [[ ! -f "${JWT_SECRET}" ]]; then + echo "Generating JWT secret at ${JWT_SECRET}..." + mkdir -p "$(dirname "${JWT_SECRET}")" + openssl rand -hex 32 > "${JWT_SECRET}" +fi + +# Build contract artifact if missing +if [[ ! -f "${CONTRACT_ARTIFACT}" ]]; then + echo "Building ERC20 contract artifact..." + (cd local-test/erc20-bench-contracts && forge build) +fi + +mkdir -p "${RESULTS_DIR}" + +# ─── Main benchmark loop ──────────────────────────────────────────────────── + +for round in $(seq 1 "${ROUNDS}"); do + echo "" + echo "============================================================" + echo " Round ${round}/${ROUNDS}" + echo "============================================================" + + # Alternate engine order: odd rounds geth first, even rounds reth first + if (( round % 2 == 1 )); then + engines=("geth" "reth") + else + engines=("reth" "geth") + fi + + for engine in "${engines[@]}"; do + # Skip logic + if [[ "${engine}" == "geth" && "${SKIP_GETH}" == "1" ]]; then + echo "Skipping geth (SKIP_GETH=1)" + continue + fi + if [[ "${engine}" == "reth" && "${SKIP_RETH}" == "1" ]]; then + echo "Skipping reth (SKIP_RETH=1)" + continue + fi + + echo "" + echo "--- Round ${round} / Engine: ${engine} ---" + + # ── Layer 1: ETH transfers ── + for txs in ${ETH_TX_SIZES}; do + echo "" + echo "[${engine}] eth-transfer txs_per_block=${txs} blocks=${BLOCKS}" + + datadir="bench-data/${engine}-eth-${txs}" + genesis_file="bench-data/${engine}-eth-${txs}-genesis.json" + output_file="${RESULTS_DIR}/round${round}-${engine}-eth-${txs}.json" + + fresh_datadir "${datadir}" + generate_genesis "${genesis_file}" + + if [[ "${engine}" == "reth" ]]; then + start_reth "${datadir}" "${genesis_file}" "${HTTP_PORT}" "${AUTH_PORT}" "morph-reth-bench" + else + start_geth "${datadir}" "${genesis_file}" "${HTTP_PORT}" "${AUTH_PORT}" "morph-geth-bench" + fi + + wait_for_rpc "${engine}" "${HTTP_PORT}" + run_workload "${engine}" "eth-transfer" "${txs}" "${output_file}" "${AUTH_PORT}" "${HTTP_PORT}" + + if [[ "${engine}" == "reth" ]]; then + pm2_stop "morph-reth-bench" || true + else + pm2_stop "morph-geth-bench" || true + fi + sleep 2 + done + + # ── Layer 2: ERC20 transfers ── + for txs in ${ERC20_TX_SIZES}; do + echo "" + echo "[${engine}] erc20-transfer txs_per_block=${txs} blocks=${BLOCKS}" + + datadir="bench-data/${engine}-erc20-${txs}" + genesis_file="bench-data/${engine}-erc20-${txs}-genesis.json" + output_file="${RESULTS_DIR}/round${round}-${engine}-erc20-${txs}.json" + + fresh_datadir "${datadir}" + generate_genesis "${genesis_file}" + + if [[ "${engine}" == "reth" ]]; then + start_reth "${datadir}" "${genesis_file}" "${HTTP_PORT}" "${AUTH_PORT}" "morph-reth-bench" + else + start_geth "${datadir}" "${genesis_file}" "${HTTP_PORT}" "${AUTH_PORT}" "morph-geth-bench" + fi + + wait_for_rpc "${engine}" "${HTTP_PORT}" + run_workload "${engine}" "erc20-transfer" "${txs}" "${output_file}" "${AUTH_PORT}" "${HTTP_PORT}" + + if [[ "${engine}" == "reth" ]]; then + pm2_stop "morph-reth-bench" || true + else + pm2_stop "morph-geth-bench" || true + fi + sleep 2 + done + done +done + +# ─── State Consistency Verification ───────────────────────────────────────── + +if [[ "${SKIP_GETH}" != "1" && "${SKIP_RETH}" != "1" ]]; then + echo "" + echo "============================================================" + echo " State Consistency Verification" + echo "============================================================" + + verify_datadir_reth="bench-data/verify-reth" + verify_datadir_geth="bench-data/verify-geth" + verify_genesis="bench-data/verify-genesis.json" + + fresh_datadir "${verify_datadir_reth}" + fresh_datadir "${verify_datadir_geth}" + generate_genesis "${verify_genesis}" + + # Start both nodes on different ports simultaneously + start_reth "${verify_datadir_reth}" "${verify_genesis}" "${HTTP_PORT}" "${AUTH_PORT}" "morph-reth-verify" + start_geth "${verify_datadir_geth}" "${verify_genesis}" "${HTTP_PORT_B}" "${AUTH_PORT_B}" "morph-geth-verify" + + wait_for_rpc "reth-verify" "${HTTP_PORT}" + wait_for_rpc "geth-verify" "${HTTP_PORT_B}" + + # Run identical ERC20 workload on each engine + echo "Running verification workload on reth..." + run_workload "reth" "erc20-transfer" "1000" \ + "${RESULTS_DIR}/verify-reth.json" "${AUTH_PORT}" "${HTTP_PORT}" + + echo "Running verification workload on geth..." + run_workload "geth" "erc20-transfer" "1000" \ + "${RESULTS_DIR}/verify-geth.json" "${AUTH_PORT_B}" "${HTTP_PORT_B}" + + # Verify state consistency + echo "Verifying state consistency..." + "${BENCH_BIN}" verify-state \ + --rpc-a "http://127.0.0.1:${HTTP_PORT}" \ + --rpc-b "http://127.0.0.1:${HTTP_PORT_B}" \ + --check-balances 100 \ + --output "${RESULTS_DIR}/verify-state.json" || true + + pm2_stop "morph-reth-verify" || true + pm2_stop "morph-geth-verify" || true +fi + +# ─── Generate Summary ──────────────────────────────────────────────────────── + +echo "" +echo "Generating summary..." +"${BENCH_BIN}" summarize \ + --results-dir "${RESULTS_DIR}" \ + --output "${RESULTS_DIR}/summary.tsv" || true + +# ─── Done ──────────────────────────────────────────────────────────────────── + +echo "" +echo "============================================================" +echo " Benchmark complete!" +echo "============================================================" +echo "" +echo " Results: ${RESULTS_DIR}" +echo " Summary: ${RESULTS_DIR}/summary.tsv" +echo "" From 66090dbc66a755c7ee55aa43a861ee350255895a Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Fri, 3 Apr 2026 18:25:48 +0800 Subject: [PATCH 09/17] fix(bench): handle hex-encoded nextL1MessageIndex from morph-reth Engine API --- bin/bench-block-exec/src/engine.rs | 43 +++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/bin/bench-block-exec/src/engine.rs b/bin/bench-block-exec/src/engine.rs index d22d6b9..e04672d 100644 --- a/bin/bench-block-exec/src/engine.rs +++ b/bin/bench-block-exec/src/engine.rs @@ -56,6 +56,44 @@ mod quantity_opt { } } +/// Accepts both hex quantity strings (`"0x0"`) and bare JSON numbers (`0`). +/// +/// morph-reth returns hex strings for `nextL1MessageIndex`; morph-geth may +/// return bare numbers. This module handles both transparently. +mod quantity_or_number { + use serde::{self, Deserialize, Deserializer, Serializer}; + + pub fn serialize(val: &u64, serializer: S) -> Result + where + S: Serializer, + { + // Serialize as hex to be safe (reth format). + serializer.serialize_str(&format!("{:#x}", val)) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let value = serde_json::Value::deserialize(deserializer)?; + match value { + serde_json::Value::Number(n) => n + .as_u64() + .ok_or_else(|| serde::de::Error::custom("number does not fit u64")), + serde_json::Value::String(s) => { + let s = s + .strip_prefix("0x") + .or_else(|| s.strip_prefix("0X")) + .unwrap_or(&s); + u64::from_str_radix(s, 16).map_err(serde::de::Error::custom) + } + _ => Err(serde::de::Error::custom( + "expected number or hex string for quantity_or_number", + )), + } + } +} + // --------------------------------------------------------------------------- // Engine API types // --------------------------------------------------------------------------- @@ -124,7 +162,10 @@ pub struct ExecutableL2Data { /// Next L1 message queue index. /// - /// NOTE: morph-geth serializes this as a bare JSON number, not a hex string. + /// morph-reth serializes this as a hex quantity string (like all other numeric + /// fields). morph-geth may use a bare JSON number. Use `quantity_or_number` to + /// accept both formats. + #[serde(with = "quantity_or_number")] pub next_l1_message_index: u64, /// Remaining fields preserved for round-trip fidelity. From e36af624518f4737855b499cb28565f7c64f91c6 Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Fri, 3 Apr 2026 18:45:01 +0800 Subject: [PATCH 10/17] fix(bench): add jadeForkTime for geth compat, fix nextL1MessageIndex serde, update geth path --- bin/bench-block-exec/src/engine.rs | 4 ++-- bin/bench-block-exec/src/genesis.rs | 1 + local-test/bench-block-exec.sh | 3 +-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/bench-block-exec/src/engine.rs b/bin/bench-block-exec/src/engine.rs index e04672d..8b377d2 100644 --- a/bin/bench-block-exec/src/engine.rs +++ b/bin/bench-block-exec/src/engine.rs @@ -67,8 +67,8 @@ mod quantity_or_number { where S: Serializer, { - // Serialize as hex to be safe (reth format). - serializer.serialize_str(&format!("{:#x}", val)) + // Serialize as bare number: geth expects u64, reth accepts both. + serializer.serialize_u64(*val) } pub fn deserialize<'de, D>(deserializer: D) -> Result diff --git a/bin/bench-block-exec/src/genesis.rs b/bin/bench-block-exec/src/genesis.rs index 5f7eb17..6c2bb50 100644 --- a/bin/bench-block-exec/src/genesis.rs +++ b/bin/bench-block-exec/src/genesis.rs @@ -57,6 +57,7 @@ pub fn build_genesis(sender: &str, sender_balance: &str) -> eyre::Result Date: Tue, 7 Apr 2026 08:50:41 +0800 Subject: [PATCH 11/17] fix(engine-api): use resolve_kind instead of best_payload to ensure pool txs are included fix(bench): use txpool path for L2 transactions instead of direct Engine API injection - Send transactions via eth_sendRawTransaction to enter txpool - assembleL2Block with empty transactions array, node pulls from txpool - Raise max_fee_per_gas and priority_fee for txpool acceptance - Add jadeForkTime to genesis for geth compatibility --- bin/bench-block-exec/src/workload.rs | 127 ++++++++++++++++++++++++--- crates/engine-api/src/builder.rs | 5 +- local-test/bench-block-exec.sh | 4 +- 3 files changed, 123 insertions(+), 13 deletions(-) diff --git a/bin/bench-block-exec/src/workload.rs b/bin/bench-block-exec/src/workload.rs index bd2005d..a424fa6 100644 --- a/bin/bench-block-exec/src/workload.rs +++ b/bin/bench-block-exec/src/workload.rs @@ -81,7 +81,7 @@ pub fn build_eth_transfer_batch( nonce, gas_limit: 21_000, max_fee_per_gas, - max_priority_fee_per_gas: 0, + max_priority_fee_per_gas: 1_000_000_000, // 1 gwei tip to: TxKind::Call(receiver_address(i)), value: U256::from(1), access_list: Default::default(), @@ -132,7 +132,7 @@ pub fn build_erc20_transfer_batch( nonce, gas_limit: 60_000, max_fee_per_gas, - max_priority_fee_per_gas: 0, + max_priority_fee_per_gas: 1_000_000_000, // 1 gwei tip to: TxKind::Call(contract_addr), value: U256::ZERO, access_list: Default::default(), @@ -254,6 +254,90 @@ mod hex { } } +// --------------------------------------------------------------------------- +// Txpool helpers +// --------------------------------------------------------------------------- + +/// Send a batch of raw transactions to the txpool via `eth_sendRawTransaction`. +/// +/// Sends all transactions concurrently for maximum throughput. +async fn send_txs_to_txpool(http_rpc: &str, txs: &[Bytes]) -> eyre::Result<()> { + let client = reqwest::Client::new(); + let mut futures = Vec::with_capacity(txs.len()); + + for (i, tx) in txs.iter().enumerate() { + let client = client.clone(); + let url = http_rpc.to_string(); + let tx_hex = format!("0x{}", alloy_primitives::hex::encode(tx)); + futures.push(tokio::spawn(async move { + let body = serde_json::json!({ + "jsonrpc": "2.0", + "method": "eth_sendRawTransaction", + "params": [tx_hex], + "id": i + 1 + }); + let resp = client.post(&url).json(&body).send().await?; + let json: serde_json::Value = resp.json().await?; + if let Some(err) = json.get("error") { + return Err(eyre::eyre!("eth_sendRawTransaction[{}] error: {}", i, err)); + } + Ok::<(), eyre::Report>(()) + })); + } + + for f in futures { + f.await.map_err(|e| eyre::eyre!("join error: {e}"))??; + } + Ok(()) +} + +/// Wait until the txpool has at least `expected` pending transactions. +/// +/// Polls `txpool_status` (for geth) or `eth_getTransactionCount` with "pending" +/// tag (works for both geth and reth) until the pending count matches. +async fn wait_for_txpool( + http_rpc: &str, + sender: Address, + expected_nonce: u64, + timeout_secs: u64, +) -> eyre::Result<()> { + let client = reqwest::Client::new(); + let deadline = std::time::Instant::now() + std::time::Duration::from_secs(timeout_secs); + let sender_hex = format!("{:#x}", sender); + + loop { + // Use eth_getTransactionCount with "pending" to check how many txs + // from this sender have been accepted (pending nonce). + let body = serde_json::json!({ + "jsonrpc": "2.0", + "method": "eth_getTransactionCount", + "params": [sender_hex, "pending"], + "id": 1 + }); + if let Ok(resp) = client.post(http_rpc).json(&body).send().await { + if let Ok(json) = resp.json::().await { + if let Some(result) = json.get("result").and_then(|r| r.as_str()) { + let nonce_hex = result.strip_prefix("0x").unwrap_or(result); + if let Ok(pending_nonce) = u64::from_str_radix(nonce_hex, 16) { + if pending_nonce >= expected_nonce { + return Ok(()); + } + } + } + } + } + + if std::time::Instant::now() >= deadline { + return Err(eyre::eyre!( + "txpool did not reach expected nonce {} within {}s", + expected_nonce, + timeout_secs + )); + } + tokio::time::sleep(std::time::Duration::from_millis(50)).await; + } +} + // --------------------------------------------------------------------------- // Main workload runner // --------------------------------------------------------------------------- @@ -279,23 +363,24 @@ pub async fn run(args: RunWorkloadArgs) -> eyre::Result<()> { wait_for_rpc(&args.http_rpc, 60).await?; println!("RPC is ready."); - let max_fee_per_gas: u128 = 1_000_000; // MORPH_BASE_FEE + let max_fee_per_gas: u128 = 2_000_000_000; // 2 gwei — must exceed base fee + priority let mut current_nonce: u64 = 0; let mut block_start: u64 = 1; - // 4. If layer == "erc20-transfer", deploy the contract first + // 4. If layer == "erc20-transfer", deploy the contract first via Engine API + // (deployment is a setup step, not part of the timed workload) let contract_addr = if args.layer == "erc20-transfer" { let artifact_path = args.contract_artifact.as_deref().ok_or_else(|| { eyre::eyre!("--contract-artifact is required for erc20-transfer layer") })?; let bytecode = read_contract_artifact(artifact_path)?; - // Build deploy tx (nonce=0) + // Build deploy tx (nonce=0) — sent via Engine API directly (setup step) let initial_supply = U256::from(1_000_000_000u64) * U256::from(10u64).pow(U256::from(18)); let deploy_tx = build_deploy_tx(&signer, 0, args.chain_id, max_fee_per_gas, &bytecode, initial_supply)?; - // Assemble block 1 with the deploy tx + // Assemble block 1 with the deploy tx (L1-style, direct via Engine API) let (assembled, _assemble_ms) = engine .assemble_l2_block( &args.engine_rpc, @@ -328,6 +413,11 @@ pub async fn run(args: RunWorkloadArgs) -> eyre::Result<()> { let mut timings: Vec = Vec::new(); + println!( + "Running workload: {} tx/block x {} blocks (via txpool)", + args.txs_per_block, args.blocks + ); + for block_idx in 0..args.blocks { let block_number = block_start + block_idx; @@ -350,27 +440,44 @@ pub async fn run(args: RunWorkloadArgs) -> eyre::Result<()> { max_fee_per_gas, )? }; + + // Send transactions to txpool via eth_sendRawTransaction + send_txs_to_txpool(&args.http_rpc, &txs).await?; + + // Wait until all txs are in the txpool + let expected_nonce = current_nonce + args.txs_per_block; + wait_for_txpool(&args.http_rpc, sender_addr, expected_nonce, 60).await?; + current_nonce += args.txs_per_block; - // Assemble block + // Assemble block — empty transactions array, node pulls from txpool let (assembled, assemble_ms) = engine .assemble_l2_block( &args.engine_rpc, AssembleL2BlockParams { number: block_number, - transactions: txs, + transactions: vec![], timestamp: Some(block_number), }, ) .await?; + // Verify assembled block has the expected tx count + let assembled_tx_count = assembled.transactions.len() as u64; + if assembled_tx_count != args.txs_per_block { + eprintln!( + " WARNING: block {} assembled {} txs (expected {})", + block_number, assembled_tx_count, args.txs_per_block + ); + } + // Import block let import_ms = engine.new_l2_block(&args.engine_rpc, assembled).await?; let total_ms = assemble_ms + import_ms; let timing = BlockTiming { block_number, - tx_count: args.txs_per_block, + tx_count: assembled_tx_count, assemble_ms, import_ms, total_ms, @@ -388,7 +495,7 @@ pub async fn run(args: RunWorkloadArgs) -> eyre::Result<()> { // Print progress every 50 blocks if (block_idx + 1) % 50 == 0 || block_idx + 1 == args.blocks { println!( - "Block {block_number}: assemble={assemble_ms:.1}ms import={import_ms:.1}ms total={total_ms:.1}ms [{}/{}]", + "Block {block_number}: assemble={assemble_ms:.1}ms import={import_ms:.1}ms total={total_ms:.1}ms txs={assembled_tx_count} [{}/{}]", block_idx + 1, args.blocks ); diff --git a/crates/engine-api/src/builder.rs b/crates/engine-api/src/builder.rs index 860d27d..3a56185 100644 --- a/crates/engine-api/src/builder.rs +++ b/crates/engine-api/src/builder.rs @@ -584,8 +584,11 @@ impl RealMorphL2EngineApi { )) })?; + // Use resolve_kind to wait for the payload builder to complete, + // ensuring pool transactions are included. best_payload may return + // before pool transactions are picked up. self.payload_builder - .best_payload(payload_id) + .resolve_kind(payload_id, reth_node_api::PayloadKind::WaitForPending) .await .ok_or_else(|| { MorphEngineApiError::Internal(format!("no payload response for id {payload_id:?}")) diff --git a/local-test/bench-block-exec.sh b/local-test/bench-block-exec.sh index f0343d9..1797452 100755 --- a/local-test/bench-block-exec.sh +++ b/local-test/bench-block-exec.sh @@ -35,8 +35,8 @@ cd "${REPO_ROOT}" : "${CONTRACT_ARTIFACT:=./local-test/erc20-bench-contracts/out/BenchToken.sol/BenchToken.json}" : "${CHAIN_ID:=99999}" -ETH_TX_SIZES="500 1000 2000 3000 5000 8000" -ERC20_TX_SIZES="500 1000 2000 3000 5000" +: "${ETH_TX_SIZES:=500 1000 2000 3000 5000 8000}" +: "${ERC20_TX_SIZES:=500 1000 2000 3000 5000}" HTTP_PORT=8545; AUTH_PORT=8551 HTTP_PORT_B=9545; AUTH_PORT_B=9551 From 55437047a5dace38dc523125f0ca4ce9dc8c3711 Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Tue, 7 Apr 2026 08:54:06 +0800 Subject: [PATCH 12/17] fix(bench): use batched JSON-RPC for txpool sends to avoid port exhaustion --- bin/bench-block-exec/src/workload.rs | 62 +++++++++++++++++----------- 1 file changed, 37 insertions(+), 25 deletions(-) diff --git a/bin/bench-block-exec/src/workload.rs b/bin/bench-block-exec/src/workload.rs index a424fa6..3db91b6 100644 --- a/bin/bench-block-exec/src/workload.rs +++ b/bin/bench-block-exec/src/workload.rs @@ -258,35 +258,47 @@ mod hex { // Txpool helpers // --------------------------------------------------------------------------- -/// Send a batch of raw transactions to the txpool via `eth_sendRawTransaction`. +/// Send a batch of raw transactions to the txpool via batched JSON-RPC. /// -/// Sends all transactions concurrently for maximum throughput. +/// Uses JSON-RPC batch requests to avoid exhausting local TCP ports. +/// Splits into chunks of up to 500 per batch to stay within request size limits. async fn send_txs_to_txpool(http_rpc: &str, txs: &[Bytes]) -> eyre::Result<()> { let client = reqwest::Client::new(); - let mut futures = Vec::with_capacity(txs.len()); - - for (i, tx) in txs.iter().enumerate() { - let client = client.clone(); - let url = http_rpc.to_string(); - let tx_hex = format!("0x{}", alloy_primitives::hex::encode(tx)); - futures.push(tokio::spawn(async move { - let body = serde_json::json!({ - "jsonrpc": "2.0", - "method": "eth_sendRawTransaction", - "params": [tx_hex], - "id": i + 1 - }); - let resp = client.post(&url).json(&body).send().await?; - let json: serde_json::Value = resp.json().await?; - if let Some(err) = json.get("error") { - return Err(eyre::eyre!("eth_sendRawTransaction[{}] error: {}", i, err)); - } - Ok::<(), eyre::Report>(()) - })); - } + let chunk_size = 500; + + for chunk in txs.chunks(chunk_size) { + let batch: Vec = chunk + .iter() + .enumerate() + .map(|(i, tx)| { + let tx_hex = format!("0x{}", alloy_primitives::hex::encode(tx)); + serde_json::json!({ + "jsonrpc": "2.0", + "method": "eth_sendRawTransaction", + "params": [tx_hex], + "id": i + 1 + }) + }) + .collect(); - for f in futures { - f.await.map_err(|e| eyre::eyre!("join error: {e}"))??; + let resp = client + .post(http_rpc) + .json(&batch) + .send() + .await + .map_err(|e| eyre::eyre!("batch send failed: {e}"))?; + + let results: Vec = resp + .json() + .await + .map_err(|e| eyre::eyre!("batch response parse failed: {e}"))?; + + // Check for errors in the batch response + for result in &results { + if let Some(err) = result.get("error") { + return Err(eyre::eyre!("eth_sendRawTransaction error: {}", err)); + } + } } Ok(()) } From 5c8db0e4979c7f1732b4b0db6a0a39f5e48a29a9 Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Tue, 7 Apr 2026 11:42:37 +0800 Subject: [PATCH 13/17] fix(bench): revert gas price to MORPH_BASE_FEE (1M wei), confirmed working with resolve_kind fix --- Cargo.lock | 2 +- bin/bench-block-exec/src/workload.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 947958f..3ebae50 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1455,7 +1455,7 @@ checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" [[package]] name = "bench-block-exec" -version = "0.1.0" +version = "0.2.0" dependencies = [ "alloy-consensus", "alloy-eips", diff --git a/bin/bench-block-exec/src/workload.rs b/bin/bench-block-exec/src/workload.rs index 3db91b6..6e37865 100644 --- a/bin/bench-block-exec/src/workload.rs +++ b/bin/bench-block-exec/src/workload.rs @@ -81,7 +81,7 @@ pub fn build_eth_transfer_batch( nonce, gas_limit: 21_000, max_fee_per_gas, - max_priority_fee_per_gas: 1_000_000_000, // 1 gwei tip + max_priority_fee_per_gas: 0, to: TxKind::Call(receiver_address(i)), value: U256::from(1), access_list: Default::default(), @@ -132,7 +132,7 @@ pub fn build_erc20_transfer_batch( nonce, gas_limit: 60_000, max_fee_per_gas, - max_priority_fee_per_gas: 1_000_000_000, // 1 gwei tip + max_priority_fee_per_gas: 0, to: TxKind::Call(contract_addr), value: U256::ZERO, access_list: Default::default(), @@ -375,7 +375,7 @@ pub async fn run(args: RunWorkloadArgs) -> eyre::Result<()> { wait_for_rpc(&args.http_rpc, 60).await?; println!("RPC is ready."); - let max_fee_per_gas: u128 = 2_000_000_000; // 2 gwei — must exceed base fee + priority + let max_fee_per_gas: u128 = 1_000_000; // MORPH_BASE_FEE (0.001 gwei) let mut current_nonce: u64 = 0; let mut block_start: u64 = 1; From 46ed766e43f687b6b51324912cf0751eeca6da35 Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Tue, 7 Apr 2026 11:47:24 +0800 Subject: [PATCH 14/17] refactor(bench): route all L2 transactions through txpool, remove direct Engine API injection --- bin/bench-block-exec/src/workload.rs | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/bin/bench-block-exec/src/workload.rs b/bin/bench-block-exec/src/workload.rs index 6e37865..e0ca7b7 100644 --- a/bin/bench-block-exec/src/workload.rs +++ b/bin/bench-block-exec/src/workload.rs @@ -379,35 +379,34 @@ pub async fn run(args: RunWorkloadArgs) -> eyre::Result<()> { let mut current_nonce: u64 = 0; let mut block_start: u64 = 1; - // 4. If layer == "erc20-transfer", deploy the contract first via Engine API - // (deployment is a setup step, not part of the timed workload) + // 4. If layer == "erc20-transfer", deploy the contract first via txpool let contract_addr = if args.layer == "erc20-transfer" { let artifact_path = args.contract_artifact.as_deref().ok_or_else(|| { eyre::eyre!("--contract-artifact is required for erc20-transfer layer") })?; let bytecode = read_contract_artifact(artifact_path)?; - // Build deploy tx (nonce=0) — sent via Engine API directly (setup step) let initial_supply = U256::from(1_000_000_000u64) * U256::from(10u64).pow(U256::from(18)); let deploy_tx = build_deploy_tx(&signer, 0, args.chain_id, max_fee_per_gas, &bytecode, initial_supply)?; - // Assemble block 1 with the deploy tx (L1-style, direct via Engine API) + // Send deploy tx to txpool + send_txs_to_txpool(&args.http_rpc, &[deploy_tx]).await?; + wait_for_txpool(&args.http_rpc, sender_addr, 1, 60).await?; + + // Assemble and import block 1 (node pulls deploy tx from txpool) let (assembled, _assemble_ms) = engine .assemble_l2_block( &args.engine_rpc, AssembleL2BlockParams { number: 1, - transactions: vec![deploy_tx], + transactions: vec![], timestamp: Some(1), }, ) .await?; - - // Import block 1 engine.new_l2_block(&args.engine_rpc, assembled).await?; - // Compute contract address: sender.create(0) let addr = sender_addr.create(0); println!("Deployed ERC20 contract at {addr}"); From bed929426f3394a917b8c04162cefdd1f7d775b1 Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Tue, 7 Apr 2026 13:08:51 +0800 Subject: [PATCH 15/17] docs: add maximum TPS benchmark design spec Comprehensive benchmark design covering three test modes (pure execution, end-to-end pipeline, sustained block production), three workloads (eth-transfer, erc20-transfer, uniswap-swap), multi-sender support, state degradation testing, automatic inflection point discovery, and chart generation. --- .../2026-04-07-max-tps-benchmark-design.md | 467 ++++++++++++++++++ 1 file changed, 467 insertions(+) create mode 100644 docs/superpowers/specs/2026-04-07-max-tps-benchmark-design.md diff --git a/docs/superpowers/specs/2026-04-07-max-tps-benchmark-design.md b/docs/superpowers/specs/2026-04-07-max-tps-benchmark-design.md new file mode 100644 index 0000000..2af1a9e --- /dev/null +++ b/docs/superpowers/specs/2026-04-07-max-tps-benchmark-design.md @@ -0,0 +1,467 @@ +# Maximum TPS Benchmark Design + +**Date:** 2026-04-07 + +**Goal** + +Determine the absolute maximum TPS of morph-reth and morph-geth by testing three dimensions of performance: pure EVM execution throughput, end-to-end pipeline throughput, and sustained block production under state growth. Remove all artificial limits and squeeze every bit of performance from both engines. + +**Why this benchmark exists** + +The previous benchmark (2026-04-03) validated whether reth/geth can meet the 3,000 tx/block @ 300ms target. This benchmark goes further: find the breaking point of each engine under unconstrained conditions, understand where bottlenecks lie (EVM, state commit, txpool, IO), and quantify performance degradation under realistic state sizes and multi-sender contention. + +**Relation to previous benchmark** + +Extends `bin/bench-block-exec/` with new modes, workloads, and automation. The existing `run-workload` subcommand continues to work unchanged. New functionality is additive. + +--- + +## 1. Test Modes + +### Mode A: Pure Execution (`--mode exec`) + +Measures raw EVM execution + state commit speed, with all external overhead eliminated. + +**Data path:** +- Pre-generate all transactions in memory before timing starts +- Inject transactions directly via `assembleL2Block`'s `transactions` field (bypass txpool entirely) +- Measure `assemble_ms` and `import_ms` separately + +**Configuration:** +- Single sender only (nonce-sequential, no sorting overhead) +- Sweep range: 1k, 2k, 5k, 10k, 20k, 50k, 100k txs/block +- 50 blocks per data point + +**Answers:** What is the raw EVM throughput ceiling? Is the bottleneck in EVM execution or state commit? + +### Mode B: End-to-End (`--mode e2e`) + +Measures full pipeline throughput including txpool acceptance, sorting, block assembly, and import. + +**Data path:** +- N senders submit transactions concurrently via `eth_sendRawTransaction` (batched, async) +- Wait for txpool to accept all (pending nonce polling) +- `assembleL2Block` with empty transactions array (pulls from txpool) +- `newL2Block` to import + +**Per-block timing breakdown:** +- `submit_ms`: time to send all batches to txpool +- `pool_wait_ms`: time for pending nonce to reach expected value +- `assemble_ms`: assembleL2Block RPC latency +- `import_ms`: newL2Block RPC latency + +**Configuration:** +- Sender counts: 1 (baseline), 100 (default), 1000 (stress) +- 200 blocks per test +- Batched submission: chunks of 500 txs per JSON-RPC batch call + +**Answers:** What is the end-to-end sequencer TPS? Is the bottleneck in txpool or execution? + +### Mode C: Sustained (`--mode sustained`) + +Measures long-running block production stability and performance degradation under state growth. + +**Two-phase operation:** +1. **Warmup phase** (`--warmup-blocks N`): Produce N blocks that are NOT timed. Fills up the state trie to simulate a mature chain. +2. **Measurement phase** (`--blocks N`): Produce N blocks with full timing. Default 1000. + +**Tracked over time:** +- Per-block: all timing fields from Mode B +- Rolling average TPS (100-block window) +- Cumulative block/tx counters + +**Configuration:** +- Warmup: 0 (empty state) or 500 blocks (populated state) +- Sender counts: 1, 100 +- 1000 measurement blocks + +**Answers:** Does TPS degrade over time? How much does state size impact performance? + +--- + +## 2. Workload Types + +### eth-transfer (existing) +- EIP-1559 value transfer, 1 wei, gas limit 21,000 +- Minimal EVM work: balance debit/credit, nonce increment +- Purpose: baseline throughput ceiling + +### erc20-transfer (existing) +- BenchToken.transfer(to, 1), gas limit 60,000 +- 2 SSTORE (sender balance down, receiver balance up) + 1 LOG +- Purpose: typical contract call performance + +### uniswap-swap (new) +- BenchSwap.swap0For1(amountIn), gas limit 150,000 +- Constant-product AMM: 4 SLOAD + 4 SSTORE + arithmetic + 1 LOG +- Purpose: heavy compute + heavy storage workload + +--- + +## 3. BenchSwap Contract + +```solidity +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +contract BenchSwap { + uint256 public reserve0; + uint256 public reserve1; + + mapping(address => uint256) public balance0; + mapping(address => uint256) public balance1; + + event Swap( + address indexed sender, + uint256 amountIn, + uint256 amountOut, + uint256 reserve0After, + uint256 reserve1After + ); + + function swap0For1(uint256 amountIn) external { + uint256 r0 = reserve0; + uint256 r1 = reserve1; + uint256 bal = balance0[msg.sender]; + + require(bal >= amountIn, "insufficient balance"); + require(r0 > 0 && r1 > 0, "no liquidity"); + + uint256 amountInWithFee = amountIn * 997; + uint256 amountOut = (amountInWithFee * r1) / (r0 * 1000 + amountInWithFee); + + reserve0 = r0 + amountIn; + reserve1 = r1 - amountOut; + + balance0[msg.sender] = bal - amountIn; + balance1[msg.sender] = balance1[msg.sender] + amountOut; + + emit Swap(msg.sender, amountIn, amountOut, r0 + amountIn, r1 - amountOut); + } +} +``` + +**Gas profile:** ~120-150k per call (warm slots). 4 SLOAD + 4 SSTORE + mul/div + LOG3. + +**Genesis pre-deployment:** +- Contract deployed at deterministic address in genesis alloc +- `reserve0` and `reserve1` set to 10^24 (enough for millions of swaps) +- Each sender gets pre-allocated `balance0` sufficient for all test transactions + +--- + +## 4. Multi-Sender Design + +### Account Generation + +Deterministic derivation for reproducibility: + +``` +master_key = keccak256("bench-sender-0") +sender[i].private_key = keccak256(master_key ++ i.to_be_bytes()) +``` + +### Genesis Pre-population + +Each sender account receives: +- ETH balance: 10^27 wei (gas funding) +- BenchToken balance: 10^24 tokens (in contract storage) +- BenchSwap balance0: 10^24 tokens (in contract storage) + +### Transaction Distribution + +For `txs_per_block = T` and `senders = S`: +- Each sender generates `T / S` transactions per block +- Round-robin interleaving: `[sender0_tx0, sender1_tx0, ..., senderS_tx0, sender0_tx1, ...]` +- Each sender tracks its own nonce independently + +### Concurrent Submission (Mode B/C) + +- Spawn `min(S, 16)` tokio tasks for parallel `eth_sendRawTransaction` submission +- Each task handles a subset of senders +- All tasks must complete before proceeding to assembly + +--- + +## 5. Genesis Configuration Changes + +### Limits Removed + +```json +{ + "config": { + "morph": { + "maxTxPerBlock": 0, + "maxTxPayloadBytesPerBlock": 1073741824 + } + }, + "gasLimit": "0x2540BE400" +} +``` + +- `maxTxPerBlock`: Set to 0 (if 0 means unlimited in both reth and geth). If not supported, use 10,000,000. **Must verify during implementation.** +- `maxTxPayloadBytesPerBlock`: 1 GB (effectively unlimited) +- `gasLimit`: 10,000,000,000 (10B gas). Allows ~476k eth-transfers or ~166k erc20-transfers or ~66k swaps per block. + +### Node Startup Tuning + +**Reth:** +```bash +morph-reth node \ + --chain genesis.json \ + --morph.max-tx-payload-bytes 1073741824 \ + --engine.persistence-threshold 4096 \ + --engine.memory-block-buffer-target 4096 +``` + +**Geth:** +```bash +geth \ + --gcmode archive \ + --cache 8192 \ + --txpool.globalslots 100000 \ + --txpool.accountslots 1000 +``` + +Both engines configured to maximize memory usage so that caches and buffers are not the bottleneck. + +--- + +## 6. Sweep: Automatic Inflection Point Discovery + +New subcommand: `bench-block-exec sweep` + +### Algorithm + +**Step 1: Coarse scan (exponential)** +- Test points: 1k, 2k, 5k, 10k, 20k, 50k, 100k txs/block +- 30 blocks per point, take median `assemble_ms` +- Find first point where `assemble_ms > previous × 1.5` → `rough_peak` + +**Step 2: Fine scan (linear)** +- Range: `[rough_peak / 2, rough_peak × 1.5]` +- Step size: `rough_peak / 10` +- 50 blocks per point, record p50/p95 + +**Step 3: Output** +- `peak_tps`: highest TPS before degradation +- `peak_mgas_s`: corresponding MGas/s +- `inflection_txs`: txs/block where latency begins rising sharply +- Full data points written to `sweep/*.jsonl` + +### Degradation Definition +- Mode A: per-tx assemble time (`assemble_ms / tx_count`) increases >50% from minimum +- Mode B: total latency exceeds 1 second +- Mode C: TPS drops >10% from the first 100-block average + +--- + +## 7. Metrics & Output Format + +### Per-Block JSON Line + +```json +{ + "block_number": 42, + "tx_count": 5000, + "expected_tx_count": 5000, + "engine": "reth", + "mode": "exec", + "workload": "erc20-transfer", + "senders": 100, + "warmup_blocks": 0, + + "submit_ms": 0, + "pool_wait_ms": 0, + "assemble_ms": 85.3, + "import_ms": 12.1, + "total_ms": 97.4, + + "gas_used": 300000000, + "tps": 51334.7, + "mgas_per_sec": 3082.0, + "inclusion_rate": 1.0, + + "cumulative_blocks": 42, + "cumulative_txs": 210000, + "rolling_avg_tps_100": null +} +``` + +**Key derived metrics:** +- `tps`: `tx_count / (total_ms / 1000)` +- `mgas_per_sec`: `gas_used / (total_ms / 1000) / 1_000_000` +- `inclusion_rate`: `tx_count / expected_tx_count` +- `rolling_avg_tps_100`: mean TPS of last 100 blocks (Mode C only, null if < 100 blocks) + +### Summary TSV Columns + +``` +engine | mode | workload | senders | warmup_blocks | blocks | +avg_txs_per_block | inclusion_rate | +avg_assemble_ms | avg_import_ms | avg_total_ms | +p50_total_ms | p95_total_ms | p99_total_ms | +peak_tps | avg_tps | avg_mgas_s | +degradation_pct | error_count +``` + +- `degradation_pct`: (last 100 blocks avg TPS / first 100 blocks avg TPS - 1) × 100. Only for Mode C, 0 otherwise. +- First 10 blocks skipped as warmup in summary statistics (consistent with existing behavior). + +--- + +## 8. Chart Generation + +### `bench-plot.py` + +Dependencies: `matplotlib`, `numpy` (standard, no heavy frameworks). + +### Chart 1: Sweep TPS / MGas Curve +- Source: `sweep/*.jsonl` +- X: txs_per_block, Y-left: TPS, Y-right: MGas/s +- Series: reth vs geth, one subplot per workload +- Annotation: inflection point marker + +### Chart 2: Latency CDF +- Source: `exec/*.jsonl` and `e2e/*.jsonl` +- X: latency (ms), Y: percentile (0-100%) +- Series: assemble_ms vs import_ms, reth vs geth +- Vertical lines at p50 / p95 / p99 + +### Chart 3: Sustained Time Series +- Source: `sustained/*.jsonl` +- X: block_number, Y: rolling_avg_tps_100 +- Series: empty state vs pre-populated, reth vs geth +- Gray mask over warmup region + +### Chart 4: Reth vs Geth Comparison Bar Chart +- Grouped bars for each workload × mode +- Metrics: peak_tps, avg_assemble_ms, p95_total_ms + +### Chart 5: Multi-Sender Impact +- Source: `e2e/*.jsonl` +- X: sender count (1, 100, 1000), Y: TPS +- One subplot per workload + +**Usage:** +```bash +python3 bench-plot.py --all --input bench-results/latest/ --output charts/ +``` + +--- + +## 9. Orchestration Script + +### `bench-block-exec.sh` Four Phases + +**Phase 1: Sweep (~30 min)** +``` +for engine in [reth, geth]: + for workload in [eth-transfer, erc20-transfer, uniswap-swap]: + sweep --mode exec --senders 1 +``` +6 sweep tasks. Output: peak txs/block per combination. + +**Phase 2: Precise Matrix (~2 hours)** +``` +Mode A (exec): engine(2) × workload(3) × senders(1) = 6 runs × 50 blocks +Mode B (e2e): engine(2) × workload(3) × senders(1,100,1000) = 18 runs × 200 blocks +Mode C (sustained): engine(2) × workload(3) × senders(1,100) = 12 runs × 1000 blocks +``` +36 test cases. txs/block selection per mode: +- Mode A: uses sweep's `inflection_txs` (the point just before degradation) +- Mode B: uses `inflection_txs × 0.8` (conservative, accounts for txpool overhead) +- Mode C: uses `inflection_txs × 0.5` (must be sustainable over 1000 blocks) + +**Phase 3: State Degradation (~1 hour)** +``` +Mode C only: engine(2) × workload(3) × senders(100) × warmup(500) +``` +6 test cases × (500 warmup + 1000 measurement) blocks. + +**Phase 4: Summarize + Plot (<1 min)** +``` +bench-block-exec summarize ... +python3 bench-plot.py --all ... +``` + +**Total: 42 test cases, ~26,000 blocks, ~3.5 hours** + +### Per-Test Lifecycle + +1. Generate genesis (multi-sender, contracts pre-deployed, limits removed) +2. Initialize datadir (geth: `geth init`, reth: `--chain` flag) +3. Start node via PM2 +4. Wait for RPC readiness (HTTP poll, 120s timeout) +5. Run benchmark (`bench-block-exec run --mode ... --workload ...`) +6. Stop node +7. Clean datadir + +Each test is fully isolated: own genesis, own datadir, own process. + +### Resume Support + +Script checks for existing result files before each test case. Completed cases are skipped. Use `--force` to re-run all. + +--- + +## 10. Error Handling + +### RPC Failures +- 60s timeout per call +- On assemble/import failure: log error in JSON output with `"error": true`, continue to next block +- 5 consecutive failures → terminate current test case, mark as failed + +### Txpool Rejection (Mode B/C) +- Exponential backoff retry: 100ms → 200ms → 400ms, max 3 attempts +- On persistent failure: reduce batch size and retry +- Record rejection count in metrics + +### Tx Count Mismatch +- Track `expected_tx_count` vs `actual_tx_count` +- Compute `inclusion_rate` +- Sub-100% inclusion rate is valid data (indicates txpool/gas bottleneck) + +### OOM / Node Crash +- Detected via PM2 process status +- Sweep: record as hard upper limit, stop increasing txs/block +- Matrix test: mark as failed, continue to next case + +### Data Integrity +- JSON lines ordered by block_number +- Summarize detects missing block numbers +- >10% missing → mark test case as unreliable + +--- + +## 11. File Structure Changes + +``` +bin/bench-block-exec/ +├── Cargo.toml (add: no new heavy deps) +├── src/ +│ ├── main.rs (extend: new subcommands) +│ ├── engine.rs (existing: minor extensions) +│ ├── genesis.rs (extend: multi-sender, contract pre-deploy) +│ ├── workload.rs (extend: multi-sender, uniswap workload) +│ ├── tx_factory.rs (new: unified transaction generation) +│ ├── mode_exec.rs (new: Mode A pure execution) +│ ├── mode_e2e.rs (new: Mode B end-to-end) +│ ├── mode_sustained.rs (new: Mode C sustained) +│ ├── sweep.rs (new: automatic inflection finder) +│ ├── report.rs (extend: new metrics, MGas/s) +│ └── verify.rs (existing: unchanged) + +local-test/ +├── bench-block-exec.sh (rewrite: 4-phase orchestration) +├── bench-plot.py (new: chart generation) +└── bench-contracts/ (rename from erc20-bench-contracts/) + ├── foundry.toml + ├── src/ + │ ├── BenchToken.sol (existing) + │ └── BenchSwap.sol (new) + └── test/ + └── BenchSwap.t.sol (new: gas verification) +``` + +No new Rust crate dependencies beyond what already exists in Cargo.toml. Python script requires only `matplotlib` and `numpy`. From a258098fec0dc1751604ee472927145cfc8523f5 Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Tue, 7 Apr 2026 13:22:52 +0800 Subject: [PATCH 16/17] docs: add max TPS benchmark implementation plan 14-task plan covering BenchSwap contract, tx_factory module, three benchmark modes (exec/e2e/sustained), sweep automation, extended metrics, chart generation, and orchestration script. --- .../plans/2026-04-07-max-tps-benchmark.md | 2572 +++++++++++++++++ 1 file changed, 2572 insertions(+) create mode 100644 docs/superpowers/plans/2026-04-07-max-tps-benchmark.md diff --git a/docs/superpowers/plans/2026-04-07-max-tps-benchmark.md b/docs/superpowers/plans/2026-04-07-max-tps-benchmark.md new file mode 100644 index 0000000..876c739 --- /dev/null +++ b/docs/superpowers/plans/2026-04-07-max-tps-benchmark.md @@ -0,0 +1,2572 @@ +# Maximum TPS Benchmark Implementation Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Build a comprehensive benchmark suite that finds the absolute maximum TPS of morph-reth and morph-geth across three test dimensions (pure execution, end-to-end pipeline, sustained production), three workloads, and multiple sender configurations. + +**Architecture:** Extends the existing `bench-block-exec` binary with three new run modes (`exec`, `e2e`, `sustained`), a `sweep` subcommand for automatic inflection-point discovery, a new `tx_factory` module for multi-sender/multi-workload transaction generation, and extended genesis generation. A Python script generates charts from JSON-line output. A rewritten shell script orchestrates the full 4-phase test matrix. + +**Tech Stack:** Rust (alloy-*, clap, tokio, serde, reqwest, jsonrpsee), Solidity/Foundry, Python (matplotlib, numpy), Bash/PM2 + +**Spec:** `docs/superpowers/specs/2026-04-07-max-tps-benchmark-design.md` + +--- + +## File Map + +### New Files +| File | Responsibility | +|------|---------------| +| `local-test/bench-contracts/src/BenchSwap.sol` | Simplified constant-product AMM contract | +| `local-test/bench-contracts/test/BenchSwap.t.sol` | Gas verification test for BenchSwap | +| `bin/bench-block-exec/src/tx_factory.rs` | Multi-sender account generation + all transaction builders | +| `bin/bench-block-exec/src/mode_exec.rs` | Mode A: pure execution benchmark | +| `bin/bench-block-exec/src/mode_e2e.rs` | Mode B: end-to-end pipeline benchmark | +| `bin/bench-block-exec/src/mode_sustained.rs` | Mode C: sustained block production benchmark | +| `bin/bench-block-exec/src/sweep.rs` | Automatic inflection point discovery | +| `local-test/bench-plot.py` | Chart generation from JSON-line results | + +### Modified Files +| File | Changes | +|------|---------| +| `local-test/erc20-bench-contracts/` | Rename to `local-test/bench-contracts/`, add BenchSwap | +| `bin/bench-block-exec/src/main.rs` | Add `Run`, `Sweep` subcommands | +| `bin/bench-block-exec/src/engine.rs` | Extend `BlockTiming` → `BlockTimingV2` with new fields | +| `bin/bench-block-exec/src/genesis.rs` | Multi-sender, contract pre-deploy, limits removed | +| `bin/bench-block-exec/src/report.rs` | New summary columns (MGas/s, degradation_pct) | +| `local-test/bench-block-exec.sh` | Rewrite: 4-phase orchestration | + +### Unchanged Files +| File | Note | +|------|------| +| `bin/bench-block-exec/src/workload.rs` | Existing `run-workload` kept working as-is | +| `bin/bench-block-exec/src/verify.rs` | No changes needed | + +--- + +## Task 1: BenchSwap Solidity Contract + +**Files:** +- Rename: `local-test/erc20-bench-contracts/` → `local-test/bench-contracts/` +- Create: `local-test/bench-contracts/src/BenchSwap.sol` +- Create: `local-test/bench-contracts/test/BenchSwap.t.sol` + +- [ ] **Step 1: Rename the contracts directory** + +```bash +cd /Users/panos/workspace/morph-reth +mv local-test/erc20-bench-contracts local-test/bench-contracts +``` + +- [ ] **Step 2: Write BenchSwap.sol** + +Create `local-test/bench-contracts/src/BenchSwap.sol`: + +```solidity +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.20; + +/// @title BenchSwap - Simplified constant-product AMM for benchmarking +/// @notice Mirrors Uniswap V2 storage access pattern: 4 SLOAD + 4 SSTORE + arithmetic + LOG +contract BenchSwap { + uint256 public reserve0; + uint256 public reserve1; + + mapping(address => uint256) public balance0; + mapping(address => uint256) public balance1; + + event Swap( + address indexed sender, + uint256 amountIn, + uint256 amountOut, + uint256 reserve0After, + uint256 reserve1After + ); + + function swap0For1(uint256 amountIn) external { + uint256 r0 = reserve0; + uint256 r1 = reserve1; + uint256 bal = balance0[msg.sender]; + + require(bal >= amountIn, "insufficient balance"); + require(r0 > 0 && r1 > 0, "no liquidity"); + + uint256 amountInWithFee = amountIn * 997; + uint256 amountOut = (amountInWithFee * r1) / (r0 * 1000 + amountInWithFee); + + reserve0 = r0 + amountIn; + reserve1 = r1 - amountOut; + + balance0[msg.sender] = bal - amountIn; + balance1[msg.sender] = balance1[msg.sender] + amountOut; + + emit Swap(msg.sender, amountIn, amountOut, r0 + amountIn, r1 - amountOut); + } +} +``` + +- [ ] **Step 3: Write gas verification test** + +Create `local-test/bench-contracts/test/BenchSwap.t.sol`: + +```solidity +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.20; + +import "forge-std/Test.sol"; +import "../src/BenchSwap.sol"; + +contract BenchSwapTest is Test { + BenchSwap swap; + address alice = address(0xA11CE); + + function setUp() public { + swap = new BenchSwap(); + // Set reserves via storage manipulation + vm.store(address(swap), bytes32(uint256(0)), bytes32(uint256(1e24))); // reserve0 + vm.store(address(swap), bytes32(uint256(1)), bytes32(uint256(1e24))); // reserve1 + // Set alice balance0 + bytes32 slot = keccak256(abi.encode(alice, uint256(2))); // balance0 mapping at slot 2 + vm.store(address(swap), slot, bytes32(uint256(1e24))); + } + + function test_swap_gas() public { + vm.prank(alice); + uint256 gasBefore = gasleft(); + swap.swap0For1(1000); + uint256 gasUsed = gasBefore - gasleft(); + // Should be in the 120k-150k range for warm slots + assertGt(gasUsed, 50_000, "gas too low"); + assertLt(gasUsed, 200_000, "gas too high"); + } + + function test_swap_updates_state() public { + vm.prank(alice); + swap.swap0For1(1000); + assertEq(swap.reserve0(), 1e24 + 1000); + assertLt(swap.reserve1(), 1e24); + assertEq(swap.balance0(alice), 1e24 - 1000); + assertGt(swap.balance1(alice), 0); + } + + function test_swap_insufficient_balance_reverts() public { + vm.prank(address(0xDEAD)); // no balance + vm.expectRevert("insufficient balance"); + swap.swap0For1(1); + } +} +``` + +- [ ] **Step 4: Add forge-std dependency and compile** + +```bash +cd /Users/panos/workspace/morph-reth/local-test/bench-contracts +forge install foundry-rs/forge-std --no-commit +``` + +Update `foundry.toml`: +```toml +[profile.default] +src = "src" +out = "out" +libs = ["lib"] +optimizer = true +optimizer_runs = 200 +evm_version = "shanghai" +``` + +- [ ] **Step 5: Run tests, verify gas range** + +```bash +cd /Users/panos/workspace/morph-reth/local-test/bench-contracts +forge test -vvv +``` + +Expected: all 3 tests pass, gas in 50k-200k range. + +- [ ] **Step 6: Record deployed bytecode for genesis** + +```bash +forge inspect BenchSwap deployedBytecode +forge inspect BenchToken deployedBytecode +``` + +Save both hex strings — they will be used in genesis.rs Task 5. + +- [ ] **Step 7: Commit** + +```bash +git add -f local-test/bench-contracts/ +git commit -m "feat(bench): add BenchSwap contract for uniswap-style workload" +``` + +--- + +## Task 2: Extend BlockTiming with New Metrics + +**Files:** +- Modify: `bin/bench-block-exec/src/engine.rs` + +- [ ] **Step 1: Add BlockTimingV2 struct** + +Add below the existing `BlockTiming` struct in `engine.rs`: + +```rust +/// Extended timing record for new benchmark modes. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BlockTimingV2 { + pub block_number: u64, + pub tx_count: u64, + pub expected_tx_count: u64, + pub engine: String, + pub mode: String, + pub workload: String, + pub senders: u64, + pub warmup_blocks: u64, + + // Timing breakdown (milliseconds) + pub submit_ms: f64, + pub pool_wait_ms: f64, + pub assemble_ms: f64, + pub import_ms: f64, + pub total_ms: f64, + + // Derived metrics + pub gas_used: u64, + pub tps: f64, + pub mgas_per_sec: f64, + pub inclusion_rate: f64, + + // Cumulative (for sustained mode) + pub cumulative_blocks: u64, + pub cumulative_txs: u64, + #[serde(skip_serializing_if = "Option::is_none")] + pub rolling_avg_tps_100: Option, + + // Error flag + #[serde(default, skip_serializing_if = "std::ops::Not::not")] + pub error: bool, +} + +impl BlockTimingV2 { + /// Compute derived fields after timing is recorded. + pub fn finalize(&mut self) { + self.total_ms = self.submit_ms + self.pool_wait_ms + self.assemble_ms + self.import_ms; + if self.total_ms > 0.0 { + let secs = self.total_ms / 1000.0; + self.tps = self.tx_count as f64 / secs; + self.mgas_per_sec = self.gas_used as f64 / secs / 1_000_000.0; + } + self.inclusion_rate = if self.expected_tx_count > 0 { + self.tx_count as f64 / self.expected_tx_count as f64 + } else { + 1.0 + }; + } +} +``` + +- [ ] **Step 2: Verify compilation** + +```bash +cd /Users/panos/workspace/morph-reth +cargo check -p bench-block-exec +``` + +Expected: compiles with no errors. + +- [ ] **Step 3: Commit** + +```bash +git add bin/bench-block-exec/src/engine.rs +git commit -m "feat(bench): add BlockTimingV2 with extended metrics and MGas/s" +``` + +--- + +## Task 3: Transaction Factory — Account Generation + +**Files:** +- Create: `bin/bench-block-exec/src/tx_factory.rs` +- Modify: `bin/bench-block-exec/src/main.rs` (add `pub mod tx_factory;`) + +- [ ] **Step 1: Create tx_factory.rs with sender generation** + +Create `bin/bench-block-exec/src/tx_factory.rs`: + +```rust +use alloy_primitives::{Address, Bytes, B256, U256, keccak256}; +use alloy_signer_local::PrivateKeySigner; +use std::str::FromStr; + +/// A benchmark sender with its own key, address, and nonce tracker. +#[derive(Debug, Clone)] +pub struct BenchSender { + pub signer: PrivateKeySigner, + pub address: Address, + pub nonce: u64, +} + +/// Deterministic key derivation for reproducible benchmarks. +/// +/// master_key = keccak256("bench-sender-0") +/// sender[i].private_key = keccak256(master_key ++ i.to_be_bytes()) +pub fn generate_senders(count: u64) -> Vec { + let master = keccak256(b"bench-sender-0"); + (0..count) + .map(|i| { + let mut preimage = [0u8; 40]; + preimage[..32].copy_from_slice(master.as_slice()); + preimage[32..].copy_from_slice(&i.to_be_bytes()); + let key_bytes = keccak256(&preimage); + let signer = PrivateKeySigner::from_bytes(&key_bytes) + .expect("valid private key from keccak256"); + let address = signer.address(); + BenchSender { signer, address, nonce: 0 } + }) + .collect() +} + +/// Known contract addresses for genesis pre-deploy. +pub const BENCH_TOKEN_ADDR: Address = Address::new([ + 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, +]); + +pub const BENCH_SWAP_ADDR: Address = Address::new([ + 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x02, +]); + +/// Max fee per gas for all benchmark transactions (Morph base fee). +pub const BENCH_MAX_FEE_PER_GAS: u128 = 1_000_000; + +/// Compute the storage slot for a Solidity mapping(address => uint256). +/// slot_of(mapping[key]) = keccak256(abi.encode(key, mapping_slot)) +pub fn mapping_slot(key: Address, mapping_slot: u64) -> B256 { + let mut buf = [0u8; 64]; + // Left-pad address to 32 bytes + buf[12..32].copy_from_slice(key.as_slice()); + // Left-pad slot number to 32 bytes + buf[56..64].copy_from_slice(&mapping_slot.to_be_bytes()); + keccak256(&buf) +} + +/// Workload type for the new benchmark modes. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Workload { + EthTransfer, + Erc20Transfer, + UniswapSwap, +} + +impl Workload { + pub fn as_str(&self) -> &'static str { + match self { + Workload::EthTransfer => "eth-transfer", + Workload::Erc20Transfer => "erc20-transfer", + Workload::UniswapSwap => "uniswap-swap", + } + } + + pub fn gas_per_tx(&self) -> u64 { + match self { + Workload::EthTransfer => 21_000, + Workload::Erc20Transfer => 60_000, + Workload::UniswapSwap => 150_000, + } + } +} + +impl std::str::FromStr for Workload { + type Err = eyre::Report; + fn from_str(s: &str) -> Result { + match s { + "eth-transfer" => Ok(Workload::EthTransfer), + "erc20-transfer" => Ok(Workload::Erc20Transfer), + "uniswap-swap" => Ok(Workload::UniswapSwap), + _ => Err(eyre::eyre!("unknown workload: {s}")), + } + } +} +``` + +- [ ] **Step 2: Register module in main.rs** + +Add to `bin/bench-block-exec/src/main.rs` after the existing `mod` declarations: + +```rust +pub mod tx_factory; +``` + +- [ ] **Step 3: Verify compilation** + +```bash +cargo check -p bench-block-exec +``` + +- [ ] **Step 4: Commit** + +```bash +git add bin/bench-block-exec/src/tx_factory.rs bin/bench-block-exec/src/main.rs +git commit -m "feat(bench): add tx_factory module with multi-sender generation and workload types" +``` + +--- + +## Task 4: Transaction Factory — Transaction Builders + +**Files:** +- Modify: `bin/bench-block-exec/src/tx_factory.rs` + +- [ ] **Step 1: Add transaction building functions** + +Append to `tx_factory.rs`: + +```rust +use alloy_consensus::{EthereumTxEnvelope, SignableTransaction, TxEip1559, TxEip4844}; +use alloy_eips::eip2718::Encodable2718; +use alloy_primitives::TxKind; +use alloy_signer::SignerSync; +use alloy_sol_types::SolValue; + +type TxEnvelope = EthereumTxEnvelope; + +/// Receiver address for transfers. Deterministic: 0xBB...{index}. +pub fn receiver_address(index: u64) -> Address { + let mut addr = [0u8; 20]; + addr[0] = 0xBB; + addr[12..20].copy_from_slice(&index.to_be_bytes()); + Address::new(addr) +} + +fn sign_and_encode(signer: &PrivateKeySigner, mut tx: TxEip1559, chain_id: u64) -> eyre::Result { + tx.chain_id = chain_id; + let sig = signer.sign_transaction_sync(&mut tx)?; + let envelope = TxEnvelope::Eip1559(tx.into_signed(sig)); + let mut buf = Vec::new(); + envelope.encode_2718(&mut buf); + Ok(Bytes::from(buf)) +} + +/// Build a batch of ETH transfer transactions. +pub fn build_eth_transfers( + sender: &mut BenchSender, + count: u64, + chain_id: u64, +) -> eyre::Result> { + let mut txs = Vec::with_capacity(count as usize); + for i in 0..count { + let tx = TxEip1559 { + nonce: sender.nonce, + gas_limit: 21_000, + max_fee_per_gas: BENCH_MAX_FEE_PER_GAS, + max_priority_fee_per_gas: 0, + to: TxKind::Call(receiver_address(sender.nonce)), + value: U256::from(1), + ..Default::default() + }; + txs.push(sign_and_encode(&sender.signer, tx, chain_id)?); + sender.nonce += 1; + } + Ok(txs) +} + +/// Build a batch of ERC20 transfer transactions. +pub fn build_erc20_transfers( + sender: &mut BenchSender, + count: u64, + chain_id: u64, +) -> eyre::Result> { + // transfer(address,uint256) selector: 0xa9059cbb + let mut txs = Vec::with_capacity(count as usize); + for i in 0..count { + let to = receiver_address(sender.nonce); + let mut calldata = vec![0xa9, 0x05, 0x9c, 0xbb]; // selector + calldata.extend_from_slice(&[0u8; 12]); // left-pad address + calldata.extend_from_slice(to.as_slice()); + calldata.extend_from_slice(&U256::from(1).to_be_bytes::<32>()); + + let tx = TxEip1559 { + nonce: sender.nonce, + gas_limit: 60_000, + max_fee_per_gas: BENCH_MAX_FEE_PER_GAS, + max_priority_fee_per_gas: 0, + to: TxKind::Call(BENCH_TOKEN_ADDR), + input: Bytes::from(calldata), + ..Default::default() + }; + txs.push(sign_and_encode(&sender.signer, tx, chain_id)?); + sender.nonce += 1; + } + Ok(txs) +} + +/// Build a batch of BenchSwap swap0For1 transactions. +pub fn build_swap_txs( + sender: &mut BenchSender, + count: u64, + chain_id: u64, +) -> eyre::Result> { + // swap0For1(uint256) selector: first 4 bytes of keccak256("swap0For1(uint256)") + let selector = &keccak256(b"swap0For1(uint256)")[..4]; + let mut txs = Vec::with_capacity(count as usize); + for _ in 0..count { + let mut calldata = selector.to_vec(); + calldata.extend_from_slice(&U256::from(1).to_be_bytes::<32>()); + + let tx = TxEip1559 { + nonce: sender.nonce, + gas_limit: 150_000, + max_fee_per_gas: BENCH_MAX_FEE_PER_GAS, + max_priority_fee_per_gas: 0, + to: TxKind::Call(BENCH_SWAP_ADDR), + input: Bytes::from(calldata), + ..Default::default() + }; + txs.push(sign_and_encode(&sender.signer, tx, chain_id)?); + sender.nonce += 1; + } + Ok(txs) +} + +/// Build transactions for a block using multiple senders, round-robin interleaved. +pub fn build_block_txs( + senders: &mut [BenchSender], + workload: Workload, + total_txs: u64, + chain_id: u64, +) -> eyre::Result> { + let n_senders = senders.len() as u64; + let per_sender = total_txs / n_senders; + let remainder = total_txs % n_senders; + + // Build per-sender batches + let mut per_sender_txs: Vec> = Vec::with_capacity(senders.len()); + for (i, sender) in senders.iter_mut().enumerate() { + let count = per_sender + if (i as u64) < remainder { 1 } else { 0 }; + let txs = match workload { + Workload::EthTransfer => build_eth_transfers(sender, count, chain_id)?, + Workload::Erc20Transfer => build_erc20_transfers(sender, count, chain_id)?, + Workload::UniswapSwap => build_swap_txs(sender, count, chain_id)?, + }; + per_sender_txs.push(txs); + } + + // Round-robin interleave + let mut result = Vec::with_capacity(total_txs as usize); + let max_len = per_sender_txs.iter().map(|v| v.len()).max().unwrap_or(0); + for tx_idx in 0..max_len { + for sender_txs in &per_sender_txs { + if tx_idx < sender_txs.len() { + result.push(sender_txs[tx_idx].clone()); + } + } + } + Ok(result) +} +``` + +- [ ] **Step 2: Verify compilation** + +```bash +cargo check -p bench-block-exec +``` + +- [ ] **Step 3: Commit** + +```bash +git add bin/bench-block-exec/src/tx_factory.rs +git commit -m "feat(bench): add transaction builders for eth/erc20/swap with multi-sender support" +``` + +--- + +## Task 5: Extend Genesis Generation + +**Files:** +- Modify: `bin/bench-block-exec/src/genesis.rs` + +- [ ] **Step 1: Add multi-sender and contract pre-deploy support** + +Replace the contents of `genesis.rs` with an extended version. Keep the existing `WriteGenesisArgs` and `run` function signatures, but add new args and extend `build_genesis`: + +```rust +use alloy_primitives::{Address, B256, U256}; +use clap::Args; +use eyre::ensure; +use serde_json::{json, Map, Value}; +use crate::tx_factory::{ + generate_senders, mapping_slot, BENCH_TOKEN_ADDR, BENCH_SWAP_ADDR, +}; + +#[derive(Args)] +pub struct WriteGenesisArgs { + #[arg(long)] + pub output: String, + /// Legacy single-sender address (hex). Ignored if --senders > 0. + #[arg(long)] + pub sender: Option, + #[arg(long, default_value = "1000000000000000000000000000")] + pub sender_balance: String, + /// Number of benchmark senders to generate (deterministic keys). + #[arg(long, default_value = "0")] + pub senders: u64, + /// Gas limit for the genesis block (hex or decimal). + #[arg(long, default_value = "0x2540BE400")] + pub gas_limit: String, + /// Max transactions per block. 0 = use a very large value (10,000,000). + #[arg(long, default_value = "10000")] + pub max_tx_per_block: u64, + /// Pre-deploy BenchToken bytecode (hex). If set, deploys at BENCH_TOKEN_ADDR. + #[arg(long)] + pub bench_token_code: Option, + /// Pre-deploy BenchSwap bytecode (hex). If set, deploys at BENCH_SWAP_ADDR. + #[arg(long)] + pub bench_swap_code: Option, +} + +pub fn build_genesis(args: &WriteGenesisArgs) -> eyre::Result { + let max_tx = if args.max_tx_per_block == 0 { 10_000_000 } else { args.max_tx_per_block }; + + let mut alloc = Map::new(); + + // Fee vault + alloc.insert( + "530000000000000000000000000000000000000a".to_string(), + json!({ "balance": "0x0" }), + ); + + // Legacy single sender (backward compat) + if let Some(sender) = &args.sender { + let addr = sender.strip_prefix("0x").unwrap_or(sender); + alloc.insert(addr.to_lowercase(), json!({ "balance": format!("0x{:x}", U256::from_str_radix(&args.sender_balance, 10).unwrap_or(U256::from(10).pow(U256::from(27)))) })); + } + + // Multi-sender accounts + let bench_senders = if args.senders > 0 { + let senders = generate_senders(args.senders); + let balance_hex = format!("0x{:x}", U256::from(10).pow(U256::from(27))); + for s in &senders { + alloc.insert( + format!("{:x}", s.address), + json!({ "balance": &balance_hex }), + ); + } + senders + } else { + vec![] + }; + + // Pre-deploy BenchToken + if let Some(code) = &args.bench_token_code { + let code_hex = if code.starts_with("0x") { code.clone() } else { format!("0x{code}") }; + let mut storage = Map::new(); + // totalSupply at slot 0 + let supply = U256::from(10).pow(U256::from(30)); + storage.insert( + format!("{:066x}", 0u64), + json!(format!("0x{:064x}", supply)), + ); + // balanceOf[sender] at slot 1 for each bench sender + for s in &bench_senders { + let slot = mapping_slot(s.address, 1); + storage.insert( + format!("0x{:x}", slot), + json!(format!("0x{:064x}", U256::from(10).pow(U256::from(27)))), + ); + } + alloc.insert( + format!("{:x}", BENCH_TOKEN_ADDR), + json!({ + "code": code_hex, + "balance": "0x0", + "storage": storage, + }), + ); + } + + // Pre-deploy BenchSwap + if let Some(code) = &args.bench_swap_code { + let code_hex = if code.starts_with("0x") { code.clone() } else { format!("0x{code}") }; + let mut storage = Map::new(); + let reserve = U256::from(10).pow(U256::from(24)); + // reserve0 at slot 0 + storage.insert(format!("{:066x}", 0u64), json!(format!("0x{:064x}", reserve))); + // reserve1 at slot 1 + storage.insert(format!("{:066x}", 1u64), json!(format!("0x{:064x}", reserve))); + // balance0[sender] at slot 2 for each bench sender + for s in &bench_senders { + let slot = mapping_slot(s.address, 2); + storage.insert( + format!("0x{:x}", slot), + json!(format!("0x{:064x}", U256::from(10).pow(U256::from(24)))), + ); + } + alloc.insert( + format!("{:x}", BENCH_SWAP_ADDR), + json!({ + "code": code_hex, + "balance": "0x0", + "storage": storage, + }), + ); + } + + let genesis = json!({ + "config": { + "chainId": 99999, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "shanghaiTime": 0, + "morph": { + "useZktrie": false, + "maxTxPerBlock": max_tx, + "maxTxPayloadBytesPerBlock": 1_073_741_824u64, + "feeVaultAddress": "0x530000000000000000000000000000000000000a", + "viridianBlock": 0, + "emeraldBlock": 0, + "jadeForkTime": 0 + } + }, + "difficulty": "0x0", + "gasLimit": &args.gas_limit, + "alloc": alloc + }); + + Ok(genesis) +} + +pub fn run(args: WriteGenesisArgs) -> eyre::Result<()> { + let genesis = build_genesis(&args)?; + let json = serde_json::to_string_pretty(&genesis)?; + std::fs::write(&args.output, json)?; + eprintln!("Genesis written to {}", args.output); + Ok(()) +} +``` + +- [ ] **Step 2: Verify compilation** + +```bash +cargo check -p bench-block-exec +``` + +Note: The existing `WriteGenesis` variant in `main.rs` uses `genesis::WriteGenesisArgs`. The old `--sender` field is now `Option` instead of required `String`. If the old CLI tests call `write-genesis --sender ...` it still works. New usage: `write-genesis --senders 100 --gas-limit 0x2540BE400 --max-tx-per-block 0 --bench-token-code 0x... --bench-swap-code 0x...`. + +- [ ] **Step 3: Commit** + +```bash +git add bin/bench-block-exec/src/genesis.rs +git commit -m "feat(bench): extend genesis with multi-sender, contract pre-deploy, configurable limits" +``` + +--- + +## Task 6: Mode A — Pure Execution + +**Files:** +- Create: `bin/bench-block-exec/src/mode_exec.rs` + +- [ ] **Step 1: Write mode_exec.rs** + +```rust +use clap::Args; +use std::fs::OpenOptions; +use std::io::Write; + +use crate::engine::{AssembleL2BlockParams, BlockTimingV2, EngineClient}; +use crate::tx_factory::{self, BenchSender, Workload}; + +#[derive(Args)] +pub struct ExecArgs { + #[arg(long, default_value = "http://127.0.0.1:8551")] + pub engine_rpc: String, + #[arg(long)] + pub jwt_secret: String, + #[arg(long)] + pub workload: String, + #[arg(long)] + pub txs_per_block: u64, + #[arg(long, default_value = "50")] + pub blocks: u64, + #[arg(long)] + pub output: String, + #[arg(long, default_value = "unknown")] + pub engine_name: String, + #[arg(long, default_value = "99999")] + pub chain_id: u64, +} + +pub async fn run(args: ExecArgs) -> eyre::Result<()> { + let workload: Workload = args.workload.parse()?; + let jwt_hex = std::fs::read_to_string(&args.jwt_secret)?.trim().to_string(); + let client = EngineClient::new(&args.engine_rpc, jwt_hex)?; + + // Single sender for pure execution mode + let mut senders = tx_factory::generate_senders(1); + + let mut file = OpenOptions::new() + .create(true).write(true).truncate(true) + .open(&args.output)?; + + // Pre-generate ALL transactions for ALL blocks upfront + eprintln!("Pre-generating {} blocks × {} txs...", args.blocks, args.txs_per_block); + let mut all_block_txs: Vec> = Vec::with_capacity(args.blocks as usize); + for _ in 0..args.blocks { + let txs = tx_factory::build_block_txs( + &mut senders, + workload, + args.txs_per_block, + args.chain_id, + )?; + all_block_txs.push(txs); + } + eprintln!("Pre-generation complete. Starting benchmark..."); + + let mut cumulative_txs: u64 = 0; + + let mut consecutive_errors = 0u32; + + for (i, txs) in all_block_txs.into_iter().enumerate() { + let block_number = (i + 1) as u64; + let expected_count = txs.len() as u64; + + // Assemble: inject transactions directly (bypass txpool) + let params = AssembleL2BlockParams { + number: block_number, + transactions: txs, + timestamp: Some(block_number), + }; + let result = client.assemble_l2_block(&args.engine_rpc, params).await; + let (data, assemble_ms, tx_count, gas_used, import_ms, is_error) = match result { + Ok((data, asm_ms)) => { + let tc = data.transactions.len() as u64; + let gu = data.gas_used; + match client.new_l2_block(&args.engine_rpc, data).await { + Ok(imp_ms) => { consecutive_errors = 0; (None, asm_ms, tc, gu, imp_ms, false) } + Err(e) => { + eprintln!("Import error block {}: {e}", block_number); + consecutive_errors += 1; + (None, asm_ms, tc, gu, 0.0, true) + } + } + } + Err(e) => { + eprintln!("Assemble error block {}: {e}", block_number); + consecutive_errors += 1; + (None, 0.0, 0, 0, 0.0, true) + } + }; + + if consecutive_errors >= 5 { + eprintln!("5 consecutive errors, terminating."); + break; + } + + cumulative_txs += tx_count; + + let mut timing = BlockTimingV2 { + block_number, + tx_count, + expected_tx_count: expected_count, + engine: args.engine_name.clone(), + mode: "exec".to_string(), + workload: workload.as_str().to_string(), + senders: 1, + warmup_blocks: 0, + submit_ms: 0.0, + pool_wait_ms: 0.0, + assemble_ms, + import_ms, + total_ms: 0.0, + gas_used, + error: is_error, + tps: 0.0, + mgas_per_sec: 0.0, + inclusion_rate: 0.0, + cumulative_blocks: block_number, + cumulative_txs, + rolling_avg_tps_100: None, + }; + timing.finalize(); + + let line = serde_json::to_string(&timing)?; + writeln!(file, "{}", line)?; + + if block_number % 10 == 0 || block_number == args.blocks { + eprintln!( + "Block {}/{}: {} txs, asm={:.1}ms, imp={:.1}ms, {:.0} TPS, {:.0} MGas/s", + block_number, args.blocks, tx_count, + timing.assemble_ms, timing.import_ms, + timing.tps, timing.mgas_per_sec, + ); + } + } + + eprintln!("Mode exec complete. Results: {}", args.output); + Ok(()) +} +``` + +- [ ] **Step 2: Register module in main.rs** + +Add `pub mod mode_exec;` to `main.rs`. + +- [ ] **Step 3: Verify compilation** + +```bash +cargo check -p bench-block-exec +``` + +- [ ] **Step 4: Commit** + +```bash +git add bin/bench-block-exec/src/mode_exec.rs bin/bench-block-exec/src/main.rs +git commit -m "feat(bench): add Mode A pure execution benchmark (mode_exec)" +``` + +--- + +## Task 7: Mode B — End-to-End + +**Files:** +- Create: `bin/bench-block-exec/src/mode_e2e.rs` + +- [ ] **Step 1: Write mode_e2e.rs** + +```rust +use alloy_primitives::Bytes; +use clap::Args; +use std::fs::OpenOptions; +use std::io::Write; +use std::time::Instant; + +use crate::engine::{AssembleL2BlockParams, BlockTimingV2, EngineClient}; +use crate::tx_factory::{self, BenchSender, Workload}; + +#[derive(Args)] +pub struct E2eArgs { + #[arg(long, default_value = "http://127.0.0.1:8551")] + pub engine_rpc: String, + #[arg(long)] + pub jwt_secret: String, + #[arg(long, default_value = "http://127.0.0.1:8545")] + pub http_rpc: String, + #[arg(long)] + pub workload: String, + #[arg(long)] + pub txs_per_block: u64, + #[arg(long, default_value = "200")] + pub blocks: u64, + #[arg(long, default_value = "100")] + pub senders: u64, + #[arg(long)] + pub output: String, + #[arg(long, default_value = "unknown")] + pub engine_name: String, + #[arg(long, default_value = "99999")] + pub chain_id: u64, +} + +/// Send raw transactions to txpool in batches, using concurrent tasks. +async fn submit_to_txpool(http_rpc: &str, txs: &[Bytes], concurrency: usize) -> eyre::Result { + let start = Instant::now(); + let client = reqwest::Client::new(); + let chunk_size = 500; + + // Split into concurrent groups + let chunks: Vec<&[Bytes]> = txs.chunks(chunk_size).collect(); + let sem = std::sync::Arc::new(tokio::sync::Semaphore::new(concurrency)); + + let mut handles = Vec::new(); + for chunk in chunks { + let client = client.clone(); + let url = http_rpc.to_string(); + let sem = sem.clone(); + let batch: Vec = chunk.iter().enumerate().map(|(i, tx)| { + serde_json::json!({ + "jsonrpc": "2.0", + "method": "eth_sendRawTransaction", + "params": [format!("0x{}", hex::encode(tx))], + "id": i + 1 + }) + }).collect(); + + handles.push(tokio::spawn(async move { + let _permit = sem.acquire().await.unwrap(); + let resp = client.post(&url).json(&batch).send().await?; + resp.error_for_status()?; + Ok::<_, eyre::Report>(()) + })); + } + + for h in handles { + h.await??; + } + + Ok(start.elapsed().as_secs_f64() * 1000.0) +} + +/// Wait for all senders' pending nonces to reach expected values. +async fn wait_for_pool( + http_rpc: &str, + senders: &[BenchSender], + timeout_secs: u64, +) -> eyre::Result { + let start = Instant::now(); + let client = reqwest::Client::new(); + let deadline = start + std::time::Duration::from_secs(timeout_secs); + + for sender in senders { + loop { + if Instant::now() > deadline { + eyre::bail!("txpool wait timeout for sender {:?}", sender.address); + } + let resp: serde_json::Value = client.post(http_rpc) + .json(&serde_json::json!({ + "jsonrpc": "2.0", + "method": "eth_getTransactionCount", + "params": [format!("0x{:x}", sender.address), "pending"], + "id": 1 + })) + .send().await? + .json().await?; + + let nonce_hex = resp["result"].as_str().unwrap_or("0x0"); + let nonce = u64::from_str_radix(nonce_hex.strip_prefix("0x").unwrap_or("0"), 16)?; + if nonce >= sender.nonce { + break; + } + tokio::time::sleep(std::time::Duration::from_millis(50)).await; + } + } + + Ok(start.elapsed().as_secs_f64() * 1000.0) +} + +mod hex { + pub fn encode(data: &[u8]) -> String { + data.iter().map(|b| format!("{:02x}", b)).collect() + } +} + +pub async fn run(args: E2eArgs) -> eyre::Result<()> { + let workload: Workload = args.workload.parse()?; + let jwt_hex = std::fs::read_to_string(&args.jwt_secret)?.trim().to_string(); + let client = EngineClient::new(&args.engine_rpc, jwt_hex)?; + + let mut senders = tx_factory::generate_senders(args.senders); + let concurrency = std::cmp::min(args.senders as usize, 16); + + let mut file = OpenOptions::new() + .create(true).write(true).truncate(true) + .open(&args.output)?; + + let mut cumulative_txs: u64 = 0; + + for block_idx in 0..args.blocks { + let block_number = (block_idx + 1) as u64; + + // Build transactions for this block + let txs = tx_factory::build_block_txs( + &mut senders, + workload, + args.txs_per_block, + args.chain_id, + )?; + let expected_count = txs.len() as u64; + + // Submit to txpool (timed) + let submit_ms = submit_to_txpool(&args.http_rpc, &txs, concurrency).await?; + + // Wait for txpool acceptance (timed) + let pool_wait_ms = wait_for_pool(&args.http_rpc, &senders, 60).await?; + + // Assemble block (pull from txpool, empty transactions array) + let params = AssembleL2BlockParams { + number: block_number, + transactions: vec![], + timestamp: Some(block_number), + }; + let (data, assemble_ms) = client.assemble_l2_block(&args.engine_rpc, params).await?; + let tx_count = data.transactions.len() as u64; + let gas_used = data.gas_used; + + // Import + let import_ms = client.new_l2_block(&args.engine_rpc, data).await?; + + cumulative_txs += tx_count; + + let mut timing = BlockTimingV2 { + block_number, + tx_count, + expected_tx_count: expected_count, + engine: args.engine_name.clone(), + mode: "e2e".to_string(), + workload: workload.as_str().to_string(), + senders: args.senders, + warmup_blocks: 0, + submit_ms, + pool_wait_ms, + assemble_ms, + import_ms, + total_ms: 0.0, + gas_used, + tps: 0.0, + mgas_per_sec: 0.0, + inclusion_rate: 0.0, + cumulative_blocks: block_number, + cumulative_txs, + rolling_avg_tps_100: None, + error: false, + }; + timing.finalize(); + + let line = serde_json::to_string(&timing)?; + writeln!(file, "{}", line)?; + + if block_number % 10 == 0 || block_number == args.blocks { + eprintln!( + "Block {}/{}: {} txs (incl {:.0}%), sub={:.0}ms pool={:.0}ms asm={:.1}ms imp={:.1}ms | {:.0} TPS", + block_number, args.blocks, tx_count, + timing.inclusion_rate * 100.0, + submit_ms, pool_wait_ms, + timing.assemble_ms, timing.import_ms, + timing.tps, + ); + } + } + + eprintln!("Mode e2e complete. Results: {}", args.output); + Ok(()) +} +``` + +- [ ] **Step 2: Register module, verify compilation** + +Add `pub mod mode_e2e;` to `main.rs`. + +```bash +cargo check -p bench-block-exec +``` + +- [ ] **Step 3: Commit** + +```bash +git add bin/bench-block-exec/src/mode_e2e.rs bin/bench-block-exec/src/main.rs +git commit -m "feat(bench): add Mode B end-to-end benchmark with concurrent txpool submission" +``` + +--- + +## Task 8: Mode C — Sustained Block Production + +**Files:** +- Create: `bin/bench-block-exec/src/mode_sustained.rs` + +- [ ] **Step 1: Write mode_sustained.rs** + +```rust +use alloy_primitives::Bytes; +use clap::Args; +use std::collections::VecDeque; +use std::fs::OpenOptions; +use std::io::Write; +use std::time::Instant; + +use crate::engine::{AssembleL2BlockParams, BlockTimingV2, EngineClient}; +use crate::mode_e2e::{submit_to_txpool, wait_for_pool}; +use crate::tx_factory::{self, Workload}; + +#[derive(Args)] +pub struct SustainedArgs { + #[arg(long, default_value = "http://127.0.0.1:8551")] + pub engine_rpc: String, + #[arg(long)] + pub jwt_secret: String, + #[arg(long, default_value = "http://127.0.0.1:8545")] + pub http_rpc: String, + #[arg(long)] + pub workload: String, + #[arg(long)] + pub txs_per_block: u64, + #[arg(long, default_value = "1000")] + pub blocks: u64, + #[arg(long, default_value = "0")] + pub warmup_blocks: u64, + #[arg(long, default_value = "100")] + pub senders: u64, + #[arg(long)] + pub output: String, + #[arg(long, default_value = "unknown")] + pub engine_name: String, + #[arg(long, default_value = "99999")] + pub chain_id: u64, +} + +pub async fn run(args: SustainedArgs) -> eyre::Result<()> { + let workload: Workload = args.workload.parse()?; + let jwt_hex = std::fs::read_to_string(&args.jwt_secret)?.trim().to_string(); + let client = EngineClient::new(&args.engine_rpc, jwt_hex)?; + + let mut senders = tx_factory::generate_senders(args.senders); + let concurrency = std::cmp::min(args.senders as usize, 16); + let total_blocks = args.warmup_blocks + args.blocks; + + let mut file = OpenOptions::new() + .create(true).write(true).truncate(true) + .open(&args.output)?; + + let mut cumulative_txs: u64 = 0; + let mut cumulative_measured_blocks: u64 = 0; + let mut rolling_tps: VecDeque = VecDeque::with_capacity(100); + + for block_idx in 0..total_blocks { + let block_number = (block_idx + 1) as u64; + let is_warmup = block_idx < args.warmup_blocks; + + // Build transactions + let txs = tx_factory::build_block_txs( + &mut senders, + workload, + args.txs_per_block, + args.chain_id, + )?; + let expected_count = txs.len() as u64; + + // Submit to txpool + let submit_ms = submit_to_txpool(&args.http_rpc, &txs, concurrency).await?; + let pool_wait_ms = wait_for_pool(&args.http_rpc, &senders, 60).await?; + + // Assemble + let params = AssembleL2BlockParams { + number: block_number, + transactions: vec![], + timestamp: Some(block_number), + }; + let (data, assemble_ms) = client.assemble_l2_block(&args.engine_rpc, params).await?; + let tx_count = data.transactions.len() as u64; + let gas_used = data.gas_used; + + // Import + let import_ms = client.new_l2_block(&args.engine_rpc, data).await?; + + if is_warmup { + if block_number % 50 == 0 { + eprintln!("Warmup block {}/{}", block_number, args.warmup_blocks); + } + continue; + } + + cumulative_measured_blocks += 1; + cumulative_txs += tx_count; + + let total_ms = submit_ms + pool_wait_ms + assemble_ms + import_ms; + let tps = if total_ms > 0.0 { tx_count as f64 / (total_ms / 1000.0) } else { 0.0 }; + + // Rolling average + rolling_tps.push_back(tps); + if rolling_tps.len() > 100 { + rolling_tps.pop_front(); + } + let rolling_avg = if rolling_tps.len() >= 100 { + Some(rolling_tps.iter().sum::() / rolling_tps.len() as f64) + } else { + None + }; + + let mut timing = BlockTimingV2 { + block_number, + tx_count, + expected_tx_count: expected_count, + engine: args.engine_name.clone(), + mode: "sustained".to_string(), + workload: workload.as_str().to_string(), + senders: args.senders, + warmup_blocks: args.warmup_blocks, + submit_ms, + pool_wait_ms, + assemble_ms, + import_ms, + total_ms: 0.0, + gas_used, + tps: 0.0, + mgas_per_sec: 0.0, + inclusion_rate: 0.0, + cumulative_blocks: cumulative_measured_blocks, + cumulative_txs, + rolling_avg_tps_100: rolling_avg, + error: false, + }; + timing.finalize(); + + let line = serde_json::to_string(&timing)?; + writeln!(file, "{}", line)?; + + if cumulative_measured_blocks % 100 == 0 { + eprintln!( + "Sustained block {}/{} (abs {}): {:.0} TPS, rolling100={:.0} TPS, {:.0} MGas/s", + cumulative_measured_blocks, args.blocks, block_number, + timing.tps, + rolling_avg.unwrap_or(0.0), + timing.mgas_per_sec, + ); + } + } + + eprintln!("Mode sustained complete. Results: {}", args.output); + Ok(()) +} +``` + +- [ ] **Step 2: Make submit_to_txpool and wait_for_pool public in mode_e2e.rs** + +In `mode_e2e.rs`, change these functions from module-private to `pub`: + +```rust +pub async fn submit_to_txpool(...) -> eyre::Result { +pub async fn wait_for_pool(...) -> eyre::Result { +``` + +Also make the `hex` module `pub(crate)`. + +- [ ] **Step 3: Register module, verify compilation** + +Add `pub mod mode_sustained;` to `main.rs`. + +```bash +cargo check -p bench-block-exec +``` + +- [ ] **Step 4: Commit** + +```bash +git add bin/bench-block-exec/src/mode_sustained.rs bin/bench-block-exec/src/mode_e2e.rs bin/bench-block-exec/src/main.rs +git commit -m "feat(bench): add Mode C sustained block production with warmup and rolling TPS" +``` + +--- + +## Task 9: Sweep — Automatic Inflection Point Discovery + +**Files:** +- Create: `bin/bench-block-exec/src/sweep.rs` + +- [ ] **Step 1: Write sweep.rs** + +```rust +use clap::Args; +use crate::mode_exec; +use crate::report::percentile; +use crate::engine::BlockTimingV2; +use std::io::BufRead; + +#[derive(Args)] +pub struct SweepArgs { + #[arg(long, default_value = "http://127.0.0.1:8551")] + pub engine_rpc: String, + #[arg(long)] + pub jwt_secret: String, + #[arg(long)] + pub workload: String, + #[arg(long, default_value = "30")] + pub blocks_per_step: u64, + #[arg(long)] + pub output_dir: String, + #[arg(long, default_value = "unknown")] + pub engine_name: String, + #[arg(long, default_value = "99999")] + pub chain_id: u64, +} + +/// Coarse scan points (exponential). +const COARSE_POINTS: &[u64] = &[1000, 2000, 5000, 10_000, 20_000, 50_000, 100_000]; + +#[derive(Debug, serde::Serialize)] +pub struct SweepResult { + pub engine: String, + pub workload: String, + pub peak_tps: f64, + pub peak_mgas_s: f64, + pub inflection_txs: u64, + pub points: Vec, +} + +#[derive(Debug, serde::Serialize)] +pub struct SweepPoint { + pub txs_per_block: u64, + pub median_assemble_ms: f64, + pub median_total_ms: f64, + pub p95_total_ms: f64, + pub median_tps: f64, + pub median_mgas_s: f64, + pub per_tx_assemble_us: f64, +} + +fn read_timings(path: &str) -> eyre::Result> { + let file = std::fs::File::open(path)?; + let reader = std::io::BufReader::new(file); + let mut timings = Vec::new(); + for line in reader.lines() { + let t: BlockTimingV2 = serde_json::from_str(&line?)?; + if !t.error { + timings.push(t); + } + } + Ok(timings) +} + +fn analyze_point(timings: &[BlockTimingV2]) -> SweepPoint { + let n = timings.len(); + let txs_per_block = if n > 0 { timings[0].expected_tx_count } else { 0 }; + + let mut asm: Vec = timings.iter().map(|t| t.assemble_ms).collect(); + let mut tot: Vec = timings.iter().map(|t| t.total_ms).collect(); + let mut tps: Vec = timings.iter().map(|t| t.tps).collect(); + let mut mgas: Vec = timings.iter().map(|t| t.mgas_per_sec).collect(); + + asm.sort_by(|a, b| a.partial_cmp(b).unwrap()); + tot.sort_by(|a, b| a.partial_cmp(b).unwrap()); + tps.sort_by(|a, b| a.partial_cmp(b).unwrap()); + mgas.sort_by(|a, b| a.partial_cmp(b).unwrap()); + + let median_asm = percentile(&asm, 50.0); + let per_tx_us = if txs_per_block > 0 { median_asm * 1000.0 / txs_per_block as f64 } else { 0.0 }; + + SweepPoint { + txs_per_block, + median_assemble_ms: median_asm, + median_total_ms: percentile(&tot, 50.0), + p95_total_ms: percentile(&tot, 95.0), + median_tps: percentile(&tps, 50.0), + median_mgas_s: percentile(&mgas, 50.0), + per_tx_assemble_us: per_tx_us, + } +} + +pub async fn run(args: SweepArgs) -> eyre::Result<()> { + std::fs::create_dir_all(&args.output_dir)?; + + let mut points: Vec = Vec::new(); + let mut min_per_tx_us = f64::MAX; + + // Coarse scan + eprintln!("=== Sweep coarse scan: {:?}", COARSE_POINTS); + for &txs in COARSE_POINTS { + let output = format!("{}/{}-{}-{}.jsonl", + args.output_dir, args.engine_name, args.workload, txs); + + let exec_args = mode_exec::ExecArgs { + engine_rpc: args.engine_rpc.clone(), + jwt_secret: args.jwt_secret.clone(), + workload: args.workload.clone(), + txs_per_block: txs, + blocks: args.blocks_per_step, + output: output.clone(), + engine_name: args.engine_name.clone(), + chain_id: args.chain_id, + }; + + match mode_exec::run(exec_args).await { + Ok(()) => {} + Err(e) => { + eprintln!("Sweep failed at txs={}: {e}. Treating as hard limit.", txs); + break; + } + } + + let timings = read_timings(&output)?; + let point = analyze_point(&timings); + eprintln!( + " txs={}: asm_p50={:.1}ms, per_tx={:.2}us, tps={:.0}, mgas={:.0}", + txs, point.median_assemble_ms, point.per_tx_assemble_us, + point.median_tps, point.median_mgas_s, + ); + + if point.per_tx_assemble_us < min_per_tx_us { + min_per_tx_us = point.per_tx_assemble_us; + } + + points.push(point); + + // Check for degradation: per-tx time > 1.5× minimum + if point.per_tx_assemble_us > min_per_tx_us * 1.5 { + eprintln!(" Degradation detected at txs={}. Stopping coarse scan.", txs); + break; + } + } + + // Find inflection point + let (inflection_txs, peak_tps, peak_mgas_s) = if let Some(best) = points.iter() + .max_by(|a, b| a.median_tps.partial_cmp(&b.median_tps).unwrap()) + { + (best.txs_per_block, best.median_tps, best.median_mgas_s) + } else { + (0, 0.0, 0.0) + }; + + let result = SweepResult { + engine: args.engine_name.clone(), + workload: args.workload.clone(), + peak_tps, + peak_mgas_s, + inflection_txs, + points, + }; + + let summary_path = format!("{}/{}-{}-sweep-summary.json", + args.output_dir, args.engine_name, args.workload); + std::fs::write(&summary_path, serde_json::to_string_pretty(&result)?)?; + + eprintln!("\n=== Sweep complete: peak_tps={:.0}, peak_mgas={:.0}, inflection={}txs", + peak_tps, peak_mgas_s, inflection_txs); + eprintln!("Summary: {}", summary_path); + + Ok(()) +} +``` + +- [ ] **Step 2: Make `percentile` function public in report.rs** + +In `report.rs`, ensure `percentile` is `pub`: + +```rust +pub fn percentile(sorted: &[f64], p: f64) -> f64 { +``` + +- [ ] **Step 3: Register module, verify compilation** + +Add `pub mod sweep;` to `main.rs`. + +```bash +cargo check -p bench-block-exec +``` + +- [ ] **Step 4: Commit** + +```bash +git add bin/bench-block-exec/src/sweep.rs bin/bench-block-exec/src/report.rs bin/bench-block-exec/src/main.rs +git commit -m "feat(bench): add sweep subcommand for automatic inflection point discovery" +``` + +--- + +## Task 10: CLI Integration + +**Files:** +- Modify: `bin/bench-block-exec/src/main.rs` + +- [ ] **Step 1: Add new subcommands to CLI** + +Replace the content of `main.rs` with: + +```rust +use clap::{Parser, Subcommand}; + +pub mod engine; +pub mod genesis; +pub mod mode_e2e; +pub mod mode_exec; +pub mod mode_sustained; +pub mod report; +pub mod sweep; +pub mod tx_factory; +pub mod verify; +pub mod workload; + +#[derive(Parser)] +#[command(name = "bench-block-exec", about = "Morph block execution benchmark")] +pub struct Cli { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand)] +pub enum Command { + /// Generate a benchmark genesis file. + WriteGenesis(genesis::WriteGenesisArgs), + /// Run the legacy workload benchmark (backward compat). + RunWorkload(workload::RunWorkloadArgs), + /// Run a benchmark in the specified mode (exec, e2e, sustained). + Run { + #[command(subcommand)] + mode: RunMode, + }, + /// Automatically find the TPS inflection point. + Sweep(sweep::SweepArgs), + /// Verify state consistency between two nodes. + VerifyState(verify::VerifyStateArgs), + /// Summarize benchmark results into TSV. + Summarize(report::SummarizeArgs), +} + +#[derive(Subcommand)] +pub enum RunMode { + /// Mode A: Pure execution (bypass txpool). + Exec(mode_exec::ExecArgs), + /// Mode B: End-to-end (txpool → assembly → import). + E2e(mode_e2e::E2eArgs), + /// Mode C: Sustained block production with optional warmup. + Sustained(mode_sustained::SustainedArgs), +} + +#[tokio::main] +pub async fn main() -> eyre::Result<()> { + let cli = Cli::parse(); + match cli.command { + Command::WriteGenesis(args) => genesis::run(args), + Command::RunWorkload(args) => workload::run(args).await, + Command::Run { mode } => match mode { + RunMode::Exec(args) => mode_exec::run(args).await, + RunMode::E2e(args) => mode_e2e::run(args).await, + RunMode::Sustained(args) => mode_sustained::run(args).await, + }, + Command::Sweep(args) => sweep::run(args).await, + Command::VerifyState(args) => verify::run(args).await, + Command::Summarize(args) => report::summarize(args), + } +} +``` + +- [ ] **Step 2: Verify compilation and CLI help** + +```bash +cargo check -p bench-block-exec +cargo run -p bench-block-exec -- --help +cargo run -p bench-block-exec -- run --help +cargo run -p bench-block-exec -- run exec --help +cargo run -p bench-block-exec -- sweep --help +``` + +Expected: all help texts display correctly with all arguments documented. + +- [ ] **Step 3: Commit** + +```bash +git add bin/bench-block-exec/src/main.rs +git commit -m "feat(bench): integrate all modes and sweep into CLI" +``` + +--- + +## Task 11: Extend Report Summarization + +**Files:** +- Modify: `bin/bench-block-exec/src/report.rs` + +- [ ] **Step 1: Add V2 summary support** + +Add a new function to `report.rs` that handles `BlockTimingV2` files alongside the existing `BlockTiming` summarizer. Append to the file: + +```rust +use crate::engine::BlockTimingV2; + +/// Summarize BlockTimingV2 JSON-lines files into extended TSV. +pub fn summarize_v2(results_dir: &str, output: Option<&str>) -> eyre::Result<()> { + let dir = Path::new(results_dir); + let files = walkdir(dir)?; + + let header = "engine\tmode\tworkload\tsenders\twarmup\tblocks\t\ + avg_txs\tinclusion%\t\ + avg_asm_ms\tavg_imp_ms\tavg_tot_ms\t\ + p50_ms\tp95_ms\tp99_ms\t\ + peak_tps\tavg_tps\tavg_mgas_s\t\ + degradation%\terrors"; + + let mut rows: Vec = Vec::new(); + + for file_path in &files { + let ext = file_path.extension().and_then(|e| e.to_str()); + if ext != Some("jsonl") { + continue; + } + + let reader = BufReader::new(fs::File::open(file_path)?); + let mut entries: Vec = Vec::new(); + for line in reader.lines() { + if let Ok(t) = serde_json::from_str::(&line?) { + entries.push(t); + } + } + + if entries.len() <= 10 { + continue; + } + + // Skip first 10 as warmup + let data = &entries[10..]; + let n = data.len(); + let errors = data.iter().filter(|t| t.error).count(); + + let meta = &data[0]; // for grouping fields + + let avg_txs = data.iter().map(|t| t.tx_count as f64).sum::() / n as f64; + let avg_incl = data.iter().map(|t| t.inclusion_rate).sum::() / n as f64 * 100.0; + let avg_asm = data.iter().map(|t| t.assemble_ms).sum::() / n as f64; + let avg_imp = data.iter().map(|t| t.import_ms).sum::() / n as f64; + let avg_tot = data.iter().map(|t| t.total_ms).sum::() / n as f64; + + let mut tots: Vec = data.iter().map(|t| t.total_ms).collect(); + tots.sort_by(|a, b| a.partial_cmp(b).unwrap()); + let p50 = percentile(&tots, 50.0); + let p95 = percentile(&tots, 95.0); + let p99 = percentile(&tots, 99.0); + + let tps_values: Vec = data.iter().map(|t| t.tps).collect(); + let peak_tps = tps_values.iter().cloned().fold(0.0_f64, f64::max); + let avg_tps = tps_values.iter().sum::() / n as f64; + let avg_mgas = data.iter().map(|t| t.mgas_per_sec).sum::() / n as f64; + + // Degradation: last 100 blocks avg TPS vs first 100 blocks avg TPS + let degradation = if n >= 200 { + let first100: f64 = tps_values[..100].iter().sum::() / 100.0; + let last100: f64 = tps_values[n-100..].iter().sum::() / 100.0; + if first100 > 0.0 { (last100 / first100 - 1.0) * 100.0 } else { 0.0 } + } else { + 0.0 + }; + + rows.push(format!( + "{}\t{}\t{}\t{}\t{}\t{}\t\ + {:.0}\t{:.1}\t\ + {:.1}\t{:.1}\t{:.1}\t\ + {:.1}\t{:.1}\t{:.1}\t\ + {:.0}\t{:.0}\t{:.0}\t\ + {:.1}\t{}", + meta.engine, meta.mode, meta.workload, meta.senders, meta.warmup_blocks, n, + avg_txs, avg_incl, + avg_asm, avg_imp, avg_tot, + p50, p95, p99, + peak_tps, avg_tps, avg_mgas, + degradation, errors, + )); + } + + let mut out: Box = if let Some(path) = output { + Box::new(fs::File::create(path)?) + } else { + Box::new(std::io::stdout()) + }; + + writeln!(out, "{}", header)?; + for row in &rows { + writeln!(out, "{}", row)?; + } + + Ok(()) +} +``` + +- [ ] **Step 2: Update SummarizeArgs to support V2 mode** + +Add a `--v2` flag to `SummarizeArgs`: + +```rust +#[derive(Args)] +pub struct SummarizeArgs { + #[arg(long)] + pub results_dir: String, + #[arg(long)] + pub output: Option, + /// Use V2 format (for new benchmark modes). + #[arg(long, default_value = "false")] + pub v2: bool, +} +``` + +Update the `summarize` function dispatch: + +```rust +pub fn summarize(args: SummarizeArgs) -> eyre::Result<()> { + if args.v2 { + return summarize_v2(&args.results_dir, args.output.as_deref()); + } + // ... existing logic unchanged ... +} +``` + +- [ ] **Step 3: Verify compilation** + +```bash +cargo check -p bench-block-exec +``` + +- [ ] **Step 4: Commit** + +```bash +git add bin/bench-block-exec/src/report.rs +git commit -m "feat(bench): add V2 summarization with MGas/s, degradation, inclusion rate" +``` + +--- + +## Task 12: Chart Generation Script + +**Files:** +- Create: `local-test/bench-plot.py` + +- [ ] **Step 1: Write bench-plot.py** + +```python +#!/usr/bin/env python3 +"""Generate benchmark charts from JSON-line results.""" + +import argparse +import json +import os +import sys +from pathlib import Path + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np + + +def read_jsonl(path): + """Read a JSON-lines file into a list of dicts.""" + records = [] + with open(path) as f: + for line in f: + line = line.strip() + if line: + records.append(json.loads(line)) + return records + + +def read_all_jsonl(directory, pattern="*.jsonl"): + """Read all .jsonl files in a directory tree.""" + records = [] + for p in Path(directory).rglob(pattern): + records.extend(read_jsonl(str(p))) + return records + + +def read_sweep_summaries(directory): + """Read sweep summary JSON files.""" + results = [] + for p in Path(directory).rglob("*-sweep-summary.json"): + with open(p) as f: + results.append(json.load(f)) + return results + + +# ── Chart 1: Sweep TPS / MGas Curve ────────────────────────────────────────── + +def chart_sweep(input_dir, output_dir): + summaries = read_sweep_summaries(input_dir) + if not summaries: + print("No sweep summaries found, skipping chart_sweep") + return + + workloads = sorted(set(s["workload"] for s in summaries)) + engines = sorted(set(s["engine"] for s in summaries)) + colors = {"reth": "#2196F3", "geth": "#FF9800"} + + fig, axes = plt.subplots(1, len(workloads), figsize=(6 * len(workloads), 5), squeeze=False) + + for col, wl in enumerate(workloads): + ax = axes[0][col] + ax2 = ax.twinx() + for eng in engines: + pts = [s for s in summaries if s["engine"] == eng and s["workload"] == wl] + if not pts: + continue + points = pts[0]["points"] + x = [p["txs_per_block"] for p in points] + tps = [p["median_tps"] for p in points] + mgas = [p["median_mgas_s"] for p in points] + + ax.plot(x, tps, "o-", color=colors.get(eng, "gray"), label=f"{eng} TPS") + ax2.plot(x, mgas, "s--", color=colors.get(eng, "gray"), alpha=0.6, label=f"{eng} MGas/s") + + # Mark inflection + peak_idx = tps.index(max(tps)) + ax.annotate(f"{tps[peak_idx]:.0f}", (x[peak_idx], tps[peak_idx]), + textcoords="offset points", xytext=(0, 10), ha="center", fontsize=8) + + ax.set_title(wl) + ax.set_xlabel("txs / block") + ax.set_ylabel("TPS") + ax2.set_ylabel("MGas/s") + ax.legend(loc="upper left", fontsize=8) + ax2.legend(loc="upper right", fontsize=8) + ax.grid(True, alpha=0.3) + + fig.suptitle("Sweep: TPS vs Block Size", fontsize=14) + fig.tight_layout() + fig.savefig(os.path.join(output_dir, "sweep-tps-curve.png"), dpi=150) + plt.close(fig) + print(" Generated sweep-tps-curve.png") + + +# ── Chart 2: Latency CDF ───────────────────────────────────────────────────── + +def chart_latency_cdf(input_dir, output_dir): + records = read_all_jsonl(input_dir) + if not records: + print("No records found, skipping chart_latency_cdf") + return + + modes = ["exec", "e2e"] + colors = {"reth": "#2196F3", "geth": "#FF9800"} + + fig, axes = plt.subplots(1, len(modes), figsize=(12, 5), squeeze=False) + + for col, mode in enumerate(modes): + ax = axes[0][col] + subset = [r for r in records if r.get("mode") == mode] + if not subset: + continue + + for eng in sorted(set(r["engine"] for r in subset)): + vals = sorted(r["assemble_ms"] for r in subset if r["engine"] == eng) + if not vals: + continue + y = np.linspace(0, 100, len(vals)) + ax.plot(vals, y, "-", color=colors.get(eng, "gray"), label=f"{eng} assemble") + + # P50, P95, P99 lines + for pct in [50, 95, 99]: + idx = int(len(vals) * pct / 100) + idx = min(idx, len(vals) - 1) + ax.axvline(vals[idx], color=colors.get(eng, "gray"), alpha=0.3, linestyle=":") + + ax.set_title(f"Mode: {mode}") + ax.set_xlabel("Assemble Latency (ms)") + ax.set_ylabel("Percentile (%)") + ax.legend(fontsize=8) + ax.grid(True, alpha=0.3) + + fig.suptitle("Latency CDF", fontsize=14) + fig.tight_layout() + fig.savefig(os.path.join(output_dir, "latency-cdf.png"), dpi=150) + plt.close(fig) + print(" Generated latency-cdf.png") + + +# ── Chart 3: Sustained Time Series ─────────────────────────────────────────── + +def chart_sustained(input_dir, output_dir): + records = [r for r in read_all_jsonl(input_dir) if r.get("mode") == "sustained"] + if not records: + print("No sustained records, skipping chart_sustained") + return + + colors = {"reth": "#2196F3", "geth": "#FF9800"} + warmups = sorted(set(r.get("warmup_blocks", 0) for r in records)) + engines = sorted(set(r["engine"] for r in records)) + workloads = sorted(set(r["workload"] for r in records)) + + fig, axes = plt.subplots(len(workloads), 1, figsize=(12, 4 * len(workloads)), squeeze=False) + + for row, wl in enumerate(workloads): + ax = axes[row][0] + for eng in engines: + for wu in warmups: + subset = [r for r in records + if r["engine"] == eng and r["workload"] == wl + and r.get("warmup_blocks", 0) == wu + and r.get("rolling_avg_tps_100") is not None] + if not subset: + continue + x = [r["cumulative_blocks"] for r in subset] + y = [r["rolling_avg_tps_100"] for r in subset] + label = f"{eng} (warmup={wu})" + ls = "-" if wu == 0 else "--" + ax.plot(x, y, ls, color=colors.get(eng, "gray"), label=label, alpha=0.8) + + ax.set_title(wl) + ax.set_xlabel("Block #") + ax.set_ylabel("Rolling Avg TPS (100-block)") + ax.legend(fontsize=8) + ax.grid(True, alpha=0.3) + + fig.suptitle("Sustained Block Production", fontsize=14) + fig.tight_layout() + fig.savefig(os.path.join(output_dir, "sustained-timeseries.png"), dpi=150) + plt.close(fig) + print(" Generated sustained-timeseries.png") + + +# ── Chart 4: Reth vs Geth Comparison ───────────────────────────────────────── + +def chart_comparison(input_dir, output_dir): + records = read_all_jsonl(input_dir) + if not records: + return + + # Group by (mode, workload) → avg TPS per engine + from collections import defaultdict + groups = defaultdict(lambda: defaultdict(list)) + for r in records: + key = (r.get("mode", ""), r.get("workload", "")) + groups[key][r["engine"]].append(r.get("tps", 0)) + + labels = [] + reth_vals = [] + geth_vals = [] + for (mode, wl), eng_data in sorted(groups.items()): + labels.append(f"{mode}\n{wl}") + reth_vals.append(np.mean(eng_data.get("reth", [0]))) + geth_vals.append(np.mean(eng_data.get("geth", [0]))) + + if not labels: + return + + x = np.arange(len(labels)) + w = 0.35 + + fig, ax = plt.subplots(figsize=(max(8, len(labels) * 1.5), 5)) + ax.bar(x - w/2, reth_vals, w, label="reth", color="#2196F3") + ax.bar(x + w/2, geth_vals, w, label="geth", color="#FF9800") + ax.set_xticks(x) + ax.set_xticklabels(labels, fontsize=8) + ax.set_ylabel("Average TPS") + ax.set_title("Reth vs Geth Comparison") + ax.legend() + ax.grid(True, alpha=0.3, axis="y") + + fig.tight_layout() + fig.savefig(os.path.join(output_dir, "reth-vs-geth-comparison.png"), dpi=150) + plt.close(fig) + print(" Generated reth-vs-geth-comparison.png") + + +# ── Chart 5: Multi-Sender Impact ───────────────────────────────────────────── + +def chart_multi_sender(input_dir, output_dir): + records = [r for r in read_all_jsonl(input_dir) if r.get("mode") == "e2e"] + if not records: + print("No e2e records, skipping chart_multi_sender") + return + + from collections import defaultdict + groups = defaultdict(lambda: defaultdict(list)) + for r in records: + groups[r["workload"]][(r["engine"], r.get("senders", 1))].append(r.get("tps", 0)) + + workloads = sorted(groups.keys()) + colors = {"reth": "#2196F3", "geth": "#FF9800"} + + fig, axes = plt.subplots(1, len(workloads), figsize=(6 * len(workloads), 5), squeeze=False) + + for col, wl in enumerate(workloads): + ax = axes[0][col] + data = groups[wl] + sender_counts = sorted(set(s for (_, s) in data.keys())) + + for eng in ["reth", "geth"]: + means = [] + for sc in sender_counts: + vals = data.get((eng, sc), []) + means.append(np.mean(vals) if vals else 0) + ax.plot(sender_counts, means, "o-", color=colors.get(eng, "gray"), label=eng) + + ax.set_title(wl) + ax.set_xlabel("Sender Count") + ax.set_ylabel("Average TPS") + ax.set_xscale("log") + ax.legend(fontsize=8) + ax.grid(True, alpha=0.3) + + fig.suptitle("Multi-Sender Impact on TPS", fontsize=14) + fig.tight_layout() + fig.savefig(os.path.join(output_dir, "multi-sender-impact.png"), dpi=150) + plt.close(fig) + print(" Generated multi-sender-impact.png") + + +# ── Main ────────────────────────────────────────────────────────────────────── + +def main(): + parser = argparse.ArgumentParser(description="Generate benchmark charts") + parser.add_argument("--input", required=True, help="Results directory") + parser.add_argument("--output", required=True, help="Output directory for PNG files") + parser.add_argument("--type", choices=["sweep", "cdf", "sustained", "comparison", "sender", "all"], + default="all", help="Chart type to generate") + args = parser.parse_args() + + os.makedirs(args.output, exist_ok=True) + + chart_funcs = { + "sweep": chart_sweep, + "cdf": chart_latency_cdf, + "sustained": chart_sustained, + "comparison": chart_comparison, + "sender": chart_multi_sender, + } + + if args.type == "all": + for name, func in chart_funcs.items(): + print(f"Generating {name} chart...") + func(args.input, args.output) + else: + chart_funcs[args.type](args.input, args.output) + + print(f"\nAll charts saved to {args.output}/") + + +if __name__ == "__main__": + main() +``` + +- [ ] **Step 2: Verify script syntax** + +```bash +python3 -c "import ast; ast.parse(open('local-test/bench-plot.py').read()); print('OK')" +``` + +- [ ] **Step 3: Commit** + +```bash +git add -f local-test/bench-plot.py +git commit -m "feat(bench): add bench-plot.py for chart generation (5 chart types)" +``` + +--- + +## Task 13: Orchestration Script Rewrite + +**Files:** +- Modify: `local-test/bench-block-exec.sh` + +- [ ] **Step 1: Rewrite bench-block-exec.sh with 4-phase orchestration** + +The script is large (~400 lines). The key changes from the existing script: + +```bash +#!/usr/bin/env bash +set -euo pipefail + +# ── Configuration ───────────────────────────────────────────────────────────── +RETH_BIN="${RETH_BIN:-./target/release/morph-reth}" +GETH_BIN="${GETH_BIN:-../go-ethereum/build/bin/geth}" +BENCH_BIN="${BENCH_BIN:-./target/release/bench-block-exec}" +JWT_SECRET="./local-test/jwt-secret.txt" +CHAIN_ID=99999 +RESULTS_DIR="bench-results/$(date +%Y%m%d-%H%M%S)" +FORCE=0 + +# Contract bytecodes (from forge inspect) +BENCH_TOKEN_CODE="$(cat local-test/bench-contracts/out/BenchToken.sol/BenchToken.json | python3 -c 'import sys,json; print(json.load(sys.stdin)["deployedBytecode"]["object"])')" +BENCH_SWAP_CODE="$(cat local-test/bench-contracts/out/BenchSwap.sol/BenchSwap.json | python3 -c 'import sys,json; print(json.load(sys.stdin)["deployedBytecode"]["object"])')" + +# Ports +HTTP_PORT=8545 +AUTH_PORT=8551 + +# ── Helpers ─────────────────────────────────────────────────────────────────── +generate_genesis() { + local senders=$1 gas_limit=$2 max_tx=$3 output=$4 + $BENCH_BIN write-genesis \ + --output "$output" \ + --senders "$senders" \ + --gas-limit "$gas_limit" \ + --max-tx-per-block "$max_tx" \ + --bench-token-code "$BENCH_TOKEN_CODE" \ + --bench-swap-code "$BENCH_SWAP_CODE" +} + +start_reth() { + local datadir=$1 genesis=$2 + pm2 start "$RETH_BIN" --name "bench-node" -- node \ + --chain "$genesis" \ + --datadir "$datadir" \ + --http --http.addr 127.0.0.1 --http.port $HTTP_PORT \ + --http.api "web3,debug,eth,txpool,net" \ + --authrpc.addr 127.0.0.1 --authrpc.port $AUTH_PORT \ + --authrpc.jwtsecret "$JWT_SECRET" \ + --morph.max-tx-payload-bytes 1073741824 \ + --engine.persistence-threshold 4096 \ + --engine.memory-block-buffer-target 4096 + wait_for_rpc +} + +start_geth() { + local datadir=$1 genesis=$2 + $GETH_BIN init --datadir "$datadir" "$genesis" + pm2 start "$GETH_BIN" --name "bench-node" -- \ + --datadir "$datadir" \ + --gcmode archive --syncmode full \ + --http --http.addr 127.0.0.1 --http.port $HTTP_PORT \ + --http.api "web3,eth,debug,txpool,net,morph,engine" \ + --authrpc.addr 127.0.0.1 --authrpc.port $AUTH_PORT \ + --authrpc.jwtsecret "$JWT_SECRET" \ + --maxpeers 0 \ + --cache 8192 \ + --txpool.globalslots 100000 \ + --txpool.accountslots 1000 + wait_for_rpc +} + +stop_node() { + pm2 delete bench-node 2>/dev/null || true + sleep 1 +} + +wait_for_rpc() { + local timeout=120 + for i in $(seq 1 $timeout); do + if curl -s -X POST -H "Content-Type: application/json" \ + --data '{"jsonrpc":"2.0","method":"web3_clientVersion","params":[],"id":1}' \ + "http://127.0.0.1:$HTTP_PORT" > /dev/null 2>&1; then + return 0 + fi + sleep 1 + done + echo "ERROR: RPC not ready after ${timeout}s" + return 1 +} + +run_test() { + local engine=$1 mode=$2 workload=$3 senders=$4 txs=$5 blocks=$6 warmup=$7 + local tag="${engine}-${workload}-s${senders}-w${warmup}" + local output_file="$RESULTS_DIR/${mode}/${tag}.jsonl" + + # Skip if exists and not forced + if [[ -f "$output_file" && "$FORCE" == "0" ]]; then + echo "SKIP (exists): $output_file" + return 0 + fi + + mkdir -p "$(dirname "$output_file")" + local datadir="bench-data/${tag}-$$" + local genesis="/tmp/bench-genesis-$$.json" + + generate_genesis "$senders" "0x2540BE400" 0 "$genesis" + + if [[ "$engine" == "reth" ]]; then + start_reth "$datadir" "$genesis" + else + start_geth "$datadir" "$genesis" + fi + + local mode_args="" + case "$mode" in + exec) + $BENCH_BIN run exec \ + --engine-rpc "http://127.0.0.1:$AUTH_PORT" \ + --jwt-secret "$JWT_SECRET" \ + --workload "$workload" \ + --txs-per-block "$txs" \ + --blocks "$blocks" \ + --output "$output_file" \ + --engine-name "$engine" \ + --chain-id $CHAIN_ID + ;; + e2e) + $BENCH_BIN run e2e \ + --engine-rpc "http://127.0.0.1:$AUTH_PORT" \ + --jwt-secret "$JWT_SECRET" \ + --http-rpc "http://127.0.0.1:$HTTP_PORT" \ + --workload "$workload" \ + --txs-per-block "$txs" \ + --blocks "$blocks" \ + --senders "$senders" \ + --output "$output_file" \ + --engine-name "$engine" \ + --chain-id $CHAIN_ID + ;; + sustained) + $BENCH_BIN run sustained \ + --engine-rpc "http://127.0.0.1:$AUTH_PORT" \ + --jwt-secret "$JWT_SECRET" \ + --http-rpc "http://127.0.0.1:$HTTP_PORT" \ + --workload "$workload" \ + --txs-per-block "$txs" \ + --blocks "$blocks" \ + --warmup-blocks "$warmup" \ + --senders "$senders" \ + --output "$output_file" \ + --engine-name "$engine" \ + --chain-id $CHAIN_ID + ;; + esac + + stop_node + rm -rf "$datadir" "$genesis" +} + +run_sweep() { + local engine=$1 workload=$2 + local datadir="bench-data/sweep-${engine}-${workload}-$$" + local genesis="/tmp/bench-genesis-sweep-$$.json" + + generate_genesis 1 "0x2540BE400" 0 "$genesis" + + if [[ "$engine" == "reth" ]]; then + start_reth "$datadir" "$genesis" + else + start_geth "$datadir" "$genesis" + fi + + $BENCH_BIN sweep \ + --engine-rpc "http://127.0.0.1:$AUTH_PORT" \ + --jwt-secret "$JWT_SECRET" \ + --workload "$workload" \ + --blocks-per-step 30 \ + --output-dir "$RESULTS_DIR/sweep" \ + --engine-name "$engine" \ + --chain-id $CHAIN_ID + + stop_node + rm -rf "$datadir" "$genesis" +} + +# ── Main ────────────────────────────────────────────────────────────────────── +echo "=== Max TPS Benchmark ===" +echo "Results: $RESULTS_DIR" +mkdir -p "$RESULTS_DIR" + +WORKLOADS="eth-transfer erc20-transfer uniswap-swap" +ENGINES="reth geth" + +# ── Phase 1: Sweep ──────────────────────────────────────────────────────────── +echo "" +echo "════ Phase 1: Sweep (find inflection points) ════" +for engine in $ENGINES; do + for wl in $WORKLOADS; do + echo "── Sweep: $engine / $wl" + run_sweep "$engine" "$wl" + done +done + +# Read sweep results to determine optimal txs/block per engine+workload +# Default fallback: 5000 +get_inflection_txs() { + local engine=$1 workload=$2 + local file="$RESULTS_DIR/sweep/${engine}-${workload}-sweep-summary.json" + if [[ -f "$file" ]]; then + python3 -c "import json; d=json.load(open('$file')); print(d.get('inflection_txs', 5000))" + else + echo 5000 + fi +} + +# ── Phase 2: Precise Matrix ────────────────────────────────────────────────── +echo "" +echo "════ Phase 2: Precise matrix tests ════" +for engine in $ENGINES; do + for wl in $WORKLOADS; do + PEAK=$(get_inflection_txs "$engine" "$wl") + + # Mode A: exec (single sender) + echo "── Exec: $engine / $wl / txs=$PEAK" + run_test "$engine" exec "$wl" 1 "$PEAK" 50 0 + + # Mode B: e2e (1, 100, 1000 senders) + E2E_TXS=$(python3 -c "print(int($PEAK * 0.8))") + for senders in 1 100 1000; do + echo "── E2E: $engine / $wl / senders=$senders / txs=$E2E_TXS" + run_test "$engine" e2e "$wl" "$senders" "$E2E_TXS" 200 0 + done + + # Mode C: sustained (1, 100 senders) + SUST_TXS=$(python3 -c "print(int($PEAK * 0.5))") + for senders in 1 100; do + echo "── Sustained: $engine / $wl / senders=$senders / txs=$SUST_TXS" + run_test "$engine" sustained "$wl" "$senders" "$SUST_TXS" 1000 0 + done + done +done + +# ── Phase 3: State Degradation ──────────────────────────────────────────────── +echo "" +echo "════ Phase 3: State degradation tests ════" +for engine in $ENGINES; do + for wl in $WORKLOADS; do + PEAK=$(get_inflection_txs "$engine" "$wl") + SUST_TXS=$(python3 -c "print(int($PEAK * 0.5))") + echo "── Degradation: $engine / $wl / warmup=500" + run_test "$engine" sustained "$wl" 100 "$SUST_TXS" 1000 500 + done +done + +# ── Phase 4: Summarize + Plot ───────────────────────────────────────────────── +echo "" +echo "════ Phase 4: Summarize + Plot ════" +$BENCH_BIN summarize --results-dir "$RESULTS_DIR" --output "$RESULTS_DIR/summary.tsv" --v2 +python3 local-test/bench-plot.py --all --input "$RESULTS_DIR" --output "$RESULTS_DIR/charts" + +echo "" +echo "=== COMPLETE ===" +echo "Summary: $RESULTS_DIR/summary.tsv" +echo "Charts: $RESULTS_DIR/charts/" +``` + +- [ ] **Step 2: Make script executable** + +```bash +chmod +x local-test/bench-block-exec.sh +``` + +- [ ] **Step 3: Verify syntax** + +```bash +bash -n local-test/bench-block-exec.sh +``` + +Expected: no syntax errors. + +- [ ] **Step 4: Commit** + +```bash +git add local-test/bench-block-exec.sh +git commit -m "feat(bench): rewrite orchestration script with 4-phase max TPS testing" +``` + +--- + +## Task 14: Integration Smoke Test + +**Files:** None (testing only) + +- [ ] **Step 1: Build the benchmark binary** + +```bash +cd /Users/panos/workspace/morph-reth +cargo build -p bench-block-exec --release +``` + +- [ ] **Step 2: Test genesis generation** + +```bash +./target/release/bench-block-exec write-genesis \ + --output /tmp/test-genesis.json \ + --senders 10 \ + --gas-limit 0x2540BE400 \ + --max-tx-per-block 0 +``` + +Verify the output contains 10 sender accounts and gas limit is 10B. + +```bash +python3 -c " +import json +g = json.load(open('/tmp/test-genesis.json')) +print('gasLimit:', g['gasLimit']) +print('maxTxPerBlock:', g['config']['morph']['maxTxPerBlock']) +accts = [k for k in g['alloc'] if k != '530000000000000000000000000000000000000a'] +print('accounts:', len(accts)) +" +``` + +Expected: +``` +gasLimit: 0x2540BE400 +maxTxPerBlock: 10000000 +accounts: 10 +``` + +- [ ] **Step 3: Test CLI help for all new subcommands** + +```bash +./target/release/bench-block-exec run exec --help +./target/release/bench-block-exec run e2e --help +./target/release/bench-block-exec run sustained --help +./target/release/bench-block-exec sweep --help +``` + +Expected: all display correctly. + +- [ ] **Step 4: Quick smoke test with reth (exec mode, 5 blocks, 100 txs)** + +This requires a running reth node. Start one manually: + +```bash +./target/release/bench-block-exec write-genesis \ + --output /tmp/smoke-genesis.json \ + --senders 1 \ + --gas-limit 0x2540BE400 \ + --max-tx-per-block 0 + +pm2 start ./target/release/morph-reth --name smoke-reth -- node \ + --chain /tmp/smoke-genesis.json \ + --datadir /tmp/smoke-reth-data \ + --http --http.addr 127.0.0.1 --http.port 8545 \ + --http.api "web3,debug,eth,txpool,net" \ + --authrpc.addr 127.0.0.1 --authrpc.port 8551 \ + --authrpc.jwtsecret ./local-test/jwt-secret.txt \ + --engine.persistence-threshold 4096 \ + --engine.memory-block-buffer-target 4096 + +# Wait for RPC +sleep 5 + +./target/release/bench-block-exec run exec \ + --engine-rpc http://127.0.0.1:8551 \ + --jwt-secret ./local-test/jwt-secret.txt \ + --workload eth-transfer \ + --txs-per-block 100 \ + --blocks 5 \ + --output /tmp/smoke-results.jsonl \ + --engine-name reth + +# Check output +cat /tmp/smoke-results.jsonl | python3 -c " +import sys, json +for line in sys.stdin: + r = json.loads(line) + print(f\"Block {r['block_number']}: {r['tx_count']} txs, {r['tps']:.0f} TPS, {r['mgas_per_sec']:.0f} MGas/s\") +" + +pm2 delete smoke-reth +rm -rf /tmp/smoke-reth-data /tmp/smoke-genesis.json +``` + +Expected: 5 lines of output with non-zero TPS and MGas/s values. + +- [ ] **Step 5: Commit (if any fixes were needed)** + +```bash +git add -A +git commit -m "fix(bench): integration fixes from smoke test" +``` + +--- + +## Dependency Graph + +``` +Task 1 (BenchSwap contract) + │ + ├─→ Task 5 (genesis.rs: needs contract bytecodes) + │ +Task 2 (BlockTimingV2) ─→ Task 6, 7, 8, 9 (all modes use BlockTimingV2) + │ +Task 3 (tx_factory: accounts) ─→ Task 4 (tx_factory: builders) + │ │ + │ ├─→ Task 5 (genesis.rs: uses generate_senders) + │ ├─→ Task 6 (mode_exec) + │ ├─→ Task 7 (mode_e2e) + │ └─→ Task 8 (mode_sustained: reuses mode_e2e helpers) + │ +Task 9 (sweep) ─→ needs Task 6 (mode_exec) + │ +Task 10 (CLI integration) ─→ needs Tasks 6, 7, 8, 9 + │ +Task 11 (report extensions) ─→ needs Task 2 (BlockTimingV2) + │ +Task 12 (bench-plot.py) ─→ independent + │ +Task 13 (shell script) ─→ needs all above + │ +Task 14 (smoke test) ─→ needs all above +``` + +**Recommended execution order:** 1 → 2 → 3 → 4 → 5 → 6 → 7 → 8 → 9 → 10 → 11 → 12 → 13 → 14 + +Tasks 12 (Python script) can be done in parallel with Tasks 6-11. From 87f375c7ef80558adaf6d370236c7fd7ff3832cf Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Thu, 9 Apr 2026 10:58:51 +0800 Subject: [PATCH 17/17] perf(txpool): reuse state provider across batch transaction validation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid creating a new StateProvider for every transaction in validate_all(). Instead, lazily initialize one on the first call and reuse it for the rest of the batch. This leverages the existing validate_one_with_state() API from reth's EthTransactionValidator. The optimization matters as the state trie grows — opening a fresh provider per-tx becomes the dominant cost in txpool validation. --- crates/txpool/src/validator.rs | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/crates/txpool/src/validator.rs b/crates/txpool/src/validator.rs index f10843c..1fdc73e 100644 --- a/crates/txpool/src/validator.rs +++ b/crates/txpool/src/validator.rs @@ -245,6 +245,22 @@ where &self, origin: TransactionOrigin, transaction: Tx, + ) -> TransactionValidationOutcome { + self.validate_one_with_state(origin, transaction, &mut None) + } + + /// Validates a single transaction, reusing an optional state provider. + /// + /// When `state` is `None`, a fresh provider is fetched from the database on + /// first use and stored back into `state` for reuse by subsequent calls. + /// This avoids creating a new [`StateProvider`] for every transaction in a + /// batch, which is the main source of txpool validation slowdown as the + /// state trie grows. + pub fn validate_one_with_state( + &self, + origin: TransactionOrigin, + transaction: Tx, + state: &mut Option>, ) -> TransactionValidationOutcome { // Reject EIP-4844 blob transactions - not supported on L2 if transaction.is_eip4844() { @@ -290,7 +306,9 @@ where ); } - let outcome = self.inner.validate_one(origin, transaction); + let outcome = self + .inner + .validate_one_with_state(origin, transaction, state); if outcome.is_invalid() || outcome.is_error() { tracing::trace!(target: "morph::txpool", ?outcome, "tx pool validation failed"); return outcome; @@ -435,7 +453,7 @@ where Ok(result) } - /// Validates all given transactions. + /// Validates all given transactions, reusing a single state provider across the batch. /// /// Returns all outcomes for the given transactions in the same order. /// @@ -444,9 +462,10 @@ where &self, transactions: Vec<(TransactionOrigin, Tx)>, ) -> Vec> { + let mut state = None; transactions .into_iter() - .map(|(origin, tx)| self.validate_one(origin, tx)) + .map(|(origin, tx)| self.validate_one_with_state(origin, tx, &mut state)) .collect() } }