diff --git a/LITE_MODE_RPC.md b/LITE_MODE_RPC.md new file mode 100644 index 00000000000..e463b764488 --- /dev/null +++ b/LITE_MODE_RPC.md @@ -0,0 +1,416 @@ +## External Safe-Blocks RPC for Safe/Finalized Heads (op-node) + +> Note: This plan is written before implementation. As we learn more during development and testing, we may change or disregard specific implementation details outlined here. + +This document describes enabling op-node to source safe and finalized L2 heads from an external RPC by configuring a new CLI option. This is not a new "mode"; the behavior is enabled implicitly when the external safe-blocks RPC is configured. + +### Implementation update (current) +- Single-block advancement per tick for both safe and finalized to favor simplicity and correctness. +- Commit-based ingestion: when a needed block is not present locally, build an `eth.ExecutionPayloadEnvelope` from RPC using `op-service/sources.RPCBlock.ExecutionPayloadEnvelope(false)` and ingest it with `Engine.CommitBlock` before applying labels. +- Common-ancestor reconciliation: start from the local safe tip, walk back comparing remote hashes at the same height to find a common ancestor, then only ingest/apply the next block (`ancestor.Number + 1`). +- Finalized advances conservatively: never beyond local unsafe or local safe and only when the block exists locally. +- EngineController guards: derivation-based promotions for safe/finalized are inert while SAFE_BLOCKS_RPC is enabled; unsafe remains unchanged. Head-setting methods log changes and trigger FCU via `TryUpdateEngine`. + +### Usage +Run unit tests for safeblocks: +```bash +cd /Users/karl/workspace/optimism-1/op-node/safeblocks +go test -v -count=1 +``` + +Run the acceptance test (external EL): +```bash +cd /Users/karl/workspace/optimism-1/op-acceptance-tests/tests/sync_tester/sync_tester_ext_el +OP_NODE_SAFE_BLOCKS_RPC=xxx \ + CIRCLECI_PARAMETERS_SYNC_TEST_OP_NODE_DISPATCH=true \ + TAILSCALE_NETWORKING=true \ + NETWORK_PRESET=op-sepolia \ + GOMAXPROCS=5 \ + go test -run '^TestSyncTesterExtEL$' -v -count=1 | tee test_safe_blocks.log | cat +``` + +Sync-to-tip variant: +```bash +cd /Users/karl/workspace/optimism-1/op-acceptance-tests/tests/sync_tester/sync_tester_ext_el_tip +OP_NODE_SAFE_BLOCKS_RPC=xxx \ + CIRCLECI_PARAMETERS_SYNC_TEST_OP_NODE_DISPATCH=true \ + TAILSCALE_NETWORKING=true \ + NETWORK_PRESET=op-sepolia \ + GOMAXPROCS=5 \ + go test -run '^TestSyncTesterExtELTip$' -v -count=1 | tee test_safe_blocks_tip.log | cat +``` + +Quick verification greps: +```bash +grep -n "SAFE_BLOCKS_RPC test env detected\|Safe-blocks RPC enabled: skipping local finalizer wiring" test_safe_blocks*.log | head -n 20 | cat +grep -n "Applying safe block" test_safe_blocks*.log | head -n 20 | cat +grep -n "Set finalized head\|Set safe head\|Set local safe head" test_safe_blocks*.log | head -n 20 | cat +grep -n "NewPayloadV[0-9]\|ForkchoiceUpdatedV[0-9]" test_safe_blocks*.log | head -n 20 | cat +``` + +### 1) Goals and constraints +- Disable existing safe derivation and finality logic when an external safe-blocks RPC is configured. +- Do not touch unsafe logic/paths. +- Source safe/finalized from an external RPC; apply locally and call FCU. +- On mismatch with previous safe: reorg EL to the external safe (or shared ancestor if desired), then FCU. +- Explicitly exclude Interop/indexing; the external safe-blocks RPC feature must not run there. +- Minimize diffs: handful of if-guards + a small poller + flags/wiring. + +### 2) Flags and config +- Added to op-node CLI flags (implemented): + - --safe-blocks-rpc (string; stubbed; logs and exits) + - Env: OP_NODE_SAFE_BLOCKS_RPC (for in-process tests) +- Pending (not yet implemented): + - --safe-blocks-rpc-poll-interval (duration; default 2s) +- Add to op-node CLI flags: + - `--safe-blocks-rpc` (string; if set, enables external safe/finalized sourcing) + - `--safe-blocks-rpc-poll-interval` (duration; default 2s) +- Startup constraint: + - If `--safe-blocks-rpc` is set AND interop/indexing is configured (e.g., `--interop.rpc.addr` set or supervisor indexing mode), error and exit. + +Example additions in `op-node/flags/flags.go`: +```go +// new flags (add to optional flags) +SafeBlocksRPC = &cli.StringFlag{ + Name: "safe-blocks-rpc", + Usage: "External L2 RPC endpoint to query safe/finalized heads from", + EnvVars: prefixEnvVars("SAFE_BLOCKS_RPC"), + Category: RollupCategory, +} +SafeBlocksRPCPollInterval = &cli.DurationFlag{ + Name: "safe-blocks-rpc-poll-interval", + Usage: "Polling interval for external safe-blocks RPC updates", + EnvVars: prefixEnvVars("SAFE_BLOCKS_RPC_POLL_INTERVAL"), + Value: time.Second * 2, + Category: RollupCategory, +} + +// ensure to append to optionalFlags in init(): +optionalFlags = append(optionalFlags, SafeBlocksRPC, SafeBlocksRPCPollInterval) +``` + +Interop exclusion at service startup (pseudocode): +```go +if ctx.IsSet(flags.SafeBlocksRPC.Name) { + if ctx.IsSet(flags.InteropRPCAddr.Name) || indexingMode { + return fmt.Errorf("safe-blocks RPC cannot run with interop/indexing enabled") + } +} +``` + +### 3) EngineController: guard and minimal changes +- Add a field to `EngineController`: +```go +type EngineController struct { + // ... existing fields ... + safeBlocksRPCEnabled bool +} +``` +- Set in constructor wiring (see Driver wiring below). +- Add early-returns at the top of these methods (one-liners), so safe/finality code paths never hydrate when the safe-blocks RPC is configured: + +```go +func (e *EngineController) TryUpdatePendingSafe(ctx context.Context, ref eth.L2BlockRef, concluding bool, source eth.L1BlockRef) { + if e.safeBlocksRPCEnabled { return } + // existing body +} + +func (e *EngineController) TryUpdateLocalSafe(ctx context.Context, ref eth.L2BlockRef, concluding bool, source eth.L1BlockRef) { + if e.safeBlocksRPCEnabled { return } + // existing body +} + +func (e *EngineController) PromoteSafe(ctx context.Context, ref eth.L2BlockRef, source eth.L1BlockRef) { + if e.safeBlocksRPCEnabled { return } + // existing body +} + +func (e *EngineController) PromoteFinalized(ctx context.Context, ref eth.L2BlockRef) { + if e.safeBlocksRPCEnabled { return } + // existing body +} +``` + +Notes: +- We intentionally do not gate `TryUpdateUnsafe`, leaving unsafe behavior unchanged. +- We do not rely on any `IsInterop(...)` checks; the feature simply does not run in interop environments. + +### 4) SyncDeriver: avoid attributes hydration when using safe-blocks RPC +In `SyncDeriver.SyncStep()` guard the pending-safe poke when the safe-blocks RPC is configured: +```go +if s.Engine.SafeBlocksRPCEnabled() { // add a getter or expose the bool + return +} +s.Engine.RequestPendingSafeUpdate(s.Ctx) +``` + +### 5) Driver wiring +- In `driver.NewDriver(...)`: + - Determine `safeBlocksRPCEnabled := cfg.SafeBlocksRPC != ""` and pass it through. + - Pass `safeBlocksRPCEnabled` into the `EngineController` (via constructor arg or setter). + - If enabled (and not interop/indexing), do NOT construct/register the `Finalizer`. + - Construct the poller (below) and store it on the driver. +- In `Driver.Start()`, if enabled, start the poller goroutine. + +Example driver wiring (pseudocode): +```go +ec := engine.NewEngineController(driverCtx, l2, log, metrics, cfg, syncCfg, sys.Register("engine-controller", nil)) +ec.SetSafeBlocksRPCEnabled(driverCfg.SafeBlocksRPCEnabled) + +var finalizer Finalizer +if !driverCfg.SafeBlocksRPCEnabled { + // existing finalizer wiring +} + +if driverCfg.SafeBlocksRPCEnabled { + sb := safeblocks.New(safeblocks.Config{RPC: cfg.SafeBlocksRPC, Interval: cfg.SafeBlocksRPCPollInterval}, log, ec, l2) + s.safeblocks = sb +} +``` + +### 6) Safe-Blocks RPC poller: minimal skeleton +Create `op-node/safeblocks/safeblocks.go`. + +```go +package safeblocks + +import ( + "context" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +type Engine interface { + // Minimal surface used by the poller + UnsafeL2Head() eth.L2BlockRef + SafeL2Head() eth.L2BlockRef + Finalized() eth.L2BlockRef + SetSafeHead(eth.L2BlockRef) + SetLocalSafeHead(eth.L2BlockRef) + SetFinalizedHead(eth.L2BlockRef) + TryUpdateEngine(ctx context.Context) +} + +type L2 interface { + L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L2BlockRef, error) +} + +type Config struct { + RPC string + Interval time.Duration +} + +type Client struct { + log log.Logger + cfg Config + eng Engine + l2 L2 + cancel context.CancelFunc +} + +func New(cfg Config, log log.Logger, eng Engine, l2 L2) *Client { + return &Client{cfg: cfg, log: log, eng: eng, l2: l2} +} + +func (c *Client) Start(ctx context.Context) error { + if c.cfg.RPC == "" { return nil } + cctx, cancel := context.WithCancel(ctx) + c.cancel = cancel + cli := client.NewBaseRPCClient(c.cfg.RPC) + ticker := time.NewTicker(c.cfg.Interval) + go func() { + defer ticker.Stop() + for { + select { + case <-cctx.Done(): + return + case <-ticker.C: + c.tick(cctx, cli) + } + } + }() + return nil +} + +func (c *Client) Close() { if c.cancel != nil { c.cancel() } } + +func (c *Client) tick(ctx context.Context, cli *client.BaseRPCClient) { + extSafe, ok1 := fetchBlockByTag(ctx, cli, "safe") + extFin, ok2 := fetchBlockByTag(ctx, cli, "finalized") + if !ok1 && !ok2 { return } + + // Apply finalized first, then safe; ensure finalized ≤ safe + if ok2 { c.applyFinalized(ctx, extFin) } + if ok1 { c.applySafe(ctx, extSafe) } +} + +// fetchBlockByTag queries external RPC for a block reference by tag ("safe"/"finalized"). +func fetchBlockByTag(ctx context.Context, cli *client.BaseRPCClient, tag string) (eth.L2BlockRef, bool) { + // Implement using eth_getBlockByNumber(tag,false) and map to L2BlockRef (hash, parent, number, time, l1origin fields if available) + return eth.L2BlockRef{}, false +} + +func (c *Client) applySafe(ctx context.Context, ext eth.L2BlockRef) { + localSafe := c.eng.SafeL2Head() + if localSafe.Hash == ext.Hash { return } + if _, err := c.l2.L2BlockRefByHash(ctx, ext.Hash); err == nil { + // Known locally; snap safe to ext and FCU + c.eng.SetLocalSafeHead(ext) + c.eng.SetSafeHead(ext) + c.eng.TryUpdateEngine(ctx) + return + } + // Unknown or conflicting: simplest snap to external safe + c.eng.SetLocalSafeHead(ext) + c.eng.SetSafeHead(ext) + c.eng.TryUpdateEngine(ctx) +} + +func (c *Client) applyFinalized(ctx context.Context, ext eth.L2BlockRef) { + localFin := c.eng.Finalized() + if localFin.Hash == ext.Hash { return } + // Ensure not ahead of safe; the engine controller already validates FCU state + c.eng.SetFinalizedHead(ext) + c.eng.TryUpdateEngine(ctx) +} +``` + +### 7) External RPC details +- Recommended: external EL-compatible L2 RPC supporting tags: + - `eth_getBlockByNumber("safe", false)` + - `eth_getBlockByNumber("finalized", false)` +- Alternative: external OP Node RPC (`rollup_syncStatus`) plus `eth_getBlockByNumber` for block hashes. +- Poll interval: configurable (default 2s). + +### 8) Reorg logic (simple and minimal) +- If external safe hash ≠ local safe hash: + - If external block is known locally and is a descendant of local safe: set safe to external and FCU. + - Else: snap to external safe (or implement shared-ancestor search if preferred). Then FCU. +- Apply finalized similarly; enforce `finalized ≤ safe`. + +### 9) Interop exclusion +- Safe-blocks RPC must not run when: + - Interop RPC (`--interop.rpc.addr`) is configured, or + - Node is in indexing/supervisor-managed mode. +- Enforce at startup; error if `--safe-blocks-rpc` is set alongside these configs. + +### 10) Tests +- Unit tests: + - `EngineController` with safe-blocks enabled: the 4 guarded methods are inert. + - `SyncDeriver.SyncStep` with safe-blocks enabled: does not call `RequestPendingSafeUpdate`. +- Integration tests: + - Safe-blocks RPC advances safe/finalized from external source → `ForkchoiceUpdateEvent` reflects updates, FCU returns valid. + - Reorg case: external safe jumps branches → EL snaps/reorgs accordingly, FCU valid. + - Interop: enabling safe-blocks with interop settings errors at startup. +- Non-regression: unsafe payload/FCU paths unchanged. + +### 11) Estimated diff size +- `engine_controller.go`: +1 field, +4 one-line guards. +- `sync_deriver.go`: +1 guard. +- `driver.go`: wiring for safe-blocks enable + poller start, skip finalizer. +- `flags.go`: 2 flags. +- `safeblocks.go`: ~150–250 LOC. + +Total: ~180–300 LOC, mostly additive; unsafe logic untouched. + + +### End-to-end testing plan (Sepolia) + +This complements unit/integration tests with practical end-to-end checks on Sepolia before and after changes. + +- Baseline goal (pre-implementation): Run a standard op-geth + op-node, consensus-sync Sepolia to a small height, and verify correctness by comparing a specific block with a trusted remote. +- Post-change goal: Repeat the same steps with safe-blocks RPC configured; confirm behavior is preserved. Then extend to sync-to-tip including unsafe P2P gossip. + +Prereqs +- A non-committed env file `op-up/external-l1.env` containing at least: + - `L1_RPC_URL` (Sepolia L1 RPC) + - `REMOTE_L2_RPC_URL` (trusted external L2 RPC for block comparison) + - `L2_JWT_PATH` (path to the JWT secret used by op-geth/op-node) + - Optional ports: `OP_GETH_HTTP_PORT`, `OP_NODE_RPC_PORT` (or defaults) + +Baseline script (no safe-blocks RPC) +```bash +#!/usr/bin/env bash +set -euo pipefail + +source op-up/external-l1.env + +: "${L1_RPC_URL:?set in env}" # Sepolia L1 +: "${REMOTE_L2_RPC_URL:?set in env}" # Trusted L2 for comparison +: "${L2_JWT_PATH:?set in env}" + +OP_GETH_HTTP_PORT=${OP_GETH_HTTP_PORT:-9545} +OP_NODE_RPC_PORT=${OP_NODE_RPC_PORT:-9546} + +# 1) Start op-geth (execution) +# Example (adjust paths/flags as needed): +# op-geth \ +# --http --http.addr 127.0.0.1 --http.port ${OP_GETH_HTTP_PORT} \ +# --authrpc.addr 127.0.0.1 --authrpc.port 8551 --authrpc.jwtsecret ${L2_JWT_PATH} \ +# --sepolia + +# 2) Start op-node pointing at L1 + op-geth (no safe-blocks RPC) +# op-node \ +# --l1 ${L1_RPC_URL} \ +# --l2 http://127.0.0.1:8551 \ +# --l2.jwt-secret ${L2_JWT_PATH} \ +# --rpc.addr 0.0.0.0 --rpc.port ${OP_NODE_RPC_PORT} + +# 3) Wait until local EL reaches block 20 +TARGET_HEX=0x14 +until curl -s -X POST localhost:${OP_GETH_HTTP_PORT} \ + -H 'content-type: application/json' \ + --data '{"jsonrpc":"2.0","id":1,"method":"eth_getBlockByNumber","params":["'${TARGET_HEX}'",false]}' | jq -e '.result.hash' >/dev/null; do + sleep 1 +done + +# 4) Compare local vs remote block 20 hashes +LOCAL_HASH=$(curl -s -X POST localhost:${OP_GETH_HTTP_PORT} \ + -H 'content-type: application/json' \ + --data '{"jsonrpc":"2.0","id":1,"method":"eth_getBlockByNumber","params":["'${TARGET_HEX}'",false]}' | jq -r '.result.hash') +REMOTE_HASH=$(curl -s -X POST ${REMOTE_L2_RPC_URL} \ + -H 'content-type: application/json' \ + --data '{"jsonrpc":"2.0","id":1,"method":"eth_getBlockByNumber","params":["'${TARGET_HEX}'",false]}' | jq -r '.result.hash') + +test "${LOCAL_HASH}" = "${REMOTE_HASH}" || { echo "hash mismatch at block 20"; exit 1; } +echo "Baseline OK: block 20 matches (${LOCAL_HASH})" +``` + +Post-change script (safe-blocks RPC enabled) +- Repeat the baseline with the op-node started using: + - `--safe-blocks-rpc ${REMOTE_L2_RPC_URL}` + - `--safe-blocks-rpc-poll-interval 2s` (or as configured) +- Validate the same comparison at block 20 succeeds. + +Final test: sync to tip with P2P gossip +- Start op-node with P2P enabled (standard P2P flags) and ensure connection to peers. +- Let the node sync to tip, including unsafe sequencer gossip, and observe: + - Local unsafe head advancing steadily + - Safe-blocks RPC still updating safe/finalized labels without interfering with unsafe + - No regressions in FCU results (no invalid forkchoice state errors) + +Outcome +- Baseline parity at block 20 without safe-blocks RPC +- Parity at block 20 with safe-blocks RPC +- Sustained sync-to-tip with P2P gossip while safe-blocks RPC is active + + +### Milestones +- [x] Milestone 1: Baseline Sepolia sync + - Run a test that proves we can sync Sepolia + - SOLVED: ./op-acceptance-tests/tests/sync_tester/sync_tester_ext_el and sync_tester_ext_el_tip can be run to prove safe head progression. +- [x] Milestone 2: Introduce minimal guards (no behavior change yet) + - Implemented --safe-blocks-rpc (default off). When set, op-node logs and exits (stub). Baseline passes when unset. + - Add inert feature-flag plumbing; ensure baseline still passes when off and `--safe-blocks-rpc` is unset. +- [ ] Milestone 3: Enable safe-blocks RPC, preserve baseline behavior + - Start with `--safe-blocks-rpc` and re-run block 20 parity check. +- [ ] Milestone 4: Full sync-to-tip with P2P + - Enable P2P and verify sustained sync to tip, including unsafe gossip. + + + diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_ext_el/sync_tester_ext_el_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_ext_el/sync_tester_ext_el_test.go index 9c4f57f2638..a45eca64c51 100644 --- a/op-acceptance-tests/tests/sync_tester/sync_tester_ext_el/sync_tester_ext_el_test.go +++ b/op-acceptance-tests/tests/sync_tester/sync_tester_ext_el/sync_tester_ext_el_test.go @@ -188,7 +188,7 @@ func setupOrchestrator(gt *testing.T, t devtest.T, blocksToSync uint64) (*sysgo. l.Info("L2_CL_SYNCMODE", "value", L2CLSyncMode) // Setup orchestrator - logger := testlog.Logger(gt, log.LevelInfo) + logger := testlog.Logger(gt, log.LevelDebug) onFail := func(now bool) { if now { gt.FailNow() diff --git a/op-devstack/sysgo/l2_cl_opnode.go b/op-devstack/sysgo/l2_cl_opnode.go index 9fab9be3ca0..03c9c9cdfdf 100644 --- a/op-devstack/sysgo/l2_cl_opnode.go +++ b/op-devstack/sysgo/l2_cl_opnode.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "flag" "fmt" + "os" "sync" "time" @@ -296,6 +297,17 @@ func WithOpNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L IgnoreMissingPectraBlobSchedule: false, ExperimentalOPStackAPI: true, } + // Minimal plumbing for experimental lite mode RPC flag in tests: + // allow configuring via OP_NODE_LITE_MODE_RPC env var to simulate CLI flag. + if v := os.Getenv("OP_NODE_LITE_MODE_RPC"); v != "" { + nodeCfg.LiteModeRPC = v + // Also wire through driver for guards/poller + nodeCfg.Driver.LiteModeRPC = v + if nodeCfg.Driver.LiteModePollInterval == 0 { + nodeCfg.Driver.LiteModePollInterval = time.Second * 2 + } + logger.Info("LITE_MODE_RPC test env detected; enabling lite mode", "endpoint", v) + } if cfg.SafeDBPath != "" { nodeCfg.SafeDBPath = cfg.SafeDBPath } diff --git a/op-node/config/config.go b/op-node/config/config.go index 80a438c1883..9169b126d27 100644 --- a/op-node/config/config.go +++ b/op-node/config/config.go @@ -87,6 +87,11 @@ type Config struct { // Experimental. Enables new opstack RPC namespace. Used by op-test-sequencer. ExperimentalOPStackAPI bool + + // Experimental: If non-empty, enables lite mode via external RPC + LiteModeRPC string + // Poll interval for external lite mode updates + LiteModePollInterval time.Duration } // ConductorRPCFunc retrieves the endpoint. The RPC may not immediately be available. diff --git a/op-node/flags/flags.go b/op-node/flags/flags.go index e032e14c439..c4800345e46 100644 --- a/op-node/flags/flags.go +++ b/op-node/flags/flags.go @@ -305,6 +305,20 @@ var ( EnvVars: prefixEnvVars("SAFEDB_PATH"), Category: OperationsCategory, } + // Experimental: Lite mode RPC feature flag (minimal plumbing) + LiteModeRPC = &cli.StringFlag{ + Name: "lite-mode-rpc", + Usage: "External L2 RPC endpoint to source lite-mode heads (experimental)", + EnvVars: prefixEnvVars("LITE_MODE_RPC"), + Category: RollupCategory, + } + LiteModePollInterval = &cli.DurationFlag{ + Name: "lite-mode-poll-interval", + Usage: "Polling interval for external lite-mode RPC updates", + EnvVars: prefixEnvVars("LITE_MODE_POLL_INTERVAL"), + Value: time.Second * 2, + Category: RollupCategory, + } /* Deprecated Flags */ L2EngineSyncEnabled = &cli.BoolFlag{ Name: "l2.engine-sync", @@ -456,6 +470,8 @@ var optionalFlags = []cli.Flag{ ConductorRpcFlag, ConductorRpcTimeoutFlag, SafeDBPath, + LiteModeRPC, + LiteModePollInterval, L2EngineKind, L2EngineRpcTimeout, InteropRPCAddr, diff --git a/op-node/litemode/DESIGN.md b/op-node/litemode/DESIGN.md new file mode 100644 index 00000000000..2794c8e7a51 --- /dev/null +++ b/op-node/litemode/DESIGN.md @@ -0,0 +1,37 @@ +## Lite Mode Poller Main Loop (Design) + +This component advances the local safe/finalized labels from an external L2 RPC. + +Inputs: +- Endpoint: `--lite-mode-rpc` (also `OP_NODE_LITE_MODE_RPC`) +- Poll interval: `--lite-mode-poll-interval` + +Responsibilities: +- Read external safe/finalized heads periodically +- Apply finalized first, then safe +- Advance safe head strictly along parent links; on mismatch walk backwards to find a connecting ancestor + +Main loop: +1. Every `Interval`: + - Fetch `finalizedHeadNum` using `eth_getBlockByNumber("finalized", false)` + - Fetch `safeHeadNum` using `eth_getBlockByNumber("safe", false)` + - If present, apply finalized by number + - If present, advance safe to `safeHeadNum` + +Advance safe to N: +- Let `local` be the current local safe head +- While `local.Number < N`: + - Fetch `b = eth_getBlockByNumber(local.Number+1)` + - If `b.ParentHash == local.Hash`: apply `b` as safe + - Else (mismatch): walk backwards from `b.Number-1` until a block `pb` with `pb.ParentHash == local.Hash` is found + - If found: set `b = pb` and apply as safe (repeat) + - If none found before reaching `local.Number`: stop (wait for next tick) + +Applying labels: +- Finalized: `SetFinalizedHead(ext)` then `TryUpdateEngine` +- Safe: set both local-safe and cross-safe to `ext`, then `TryUpdateEngine` + +Notes: +- The loop intentionally stops on data gaps or mismatches and waits for the next tick. +- Unsafe behavior is unchanged; only safe/finalized labels are adjusted. + diff --git a/op-node/litemode/litemode.go b/op-node/litemode/litemode.go new file mode 100644 index 00000000000..64749fa5bcb --- /dev/null +++ b/op-node/litemode/litemode.go @@ -0,0 +1,487 @@ +package litemode + +import ( + "context" + "crypto/sha256" + "encoding/binary" + "strconv" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + gethrpc "github.com/ethereum/go-ethereum/rpc" + + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" + opsigner "github.com/ethereum-optimism/optimism/op-service/signer" + opsources "github.com/ethereum-optimism/optimism/op-service/sources" +) + +type Engine interface { + UnsafeL2Head() eth.L2BlockRef + SafeL2Head() eth.L2BlockRef + Finalized() eth.L2BlockRef + SetUnsafeHead(eth.L2BlockRef) + SetSafeHead(eth.L2BlockRef) + SetLocalSafeHead(eth.L2BlockRef) + SetFinalizedHead(eth.L2BlockRef) + SetCrossUnsafeHead(eth.L2BlockRef) + TryUpdateEngine(ctx context.Context) + CommitBlock(ctx context.Context, signed *opsigner.SignedExecutionPayloadEnvelope) error +} + +type L2 interface { + L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L2BlockRef, error) + L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2BlockRef, error) +} + +type Config struct { + RPC string + Interval time.Duration +} + +type Client struct { + log log.Logger + cfg Config + eng Engine + l2 L2 + cancel context.CancelFunc + fetch BlockFetcher + // buildEnvelopeFn builds an ExecutionPayloadEnvelope for the given block number. + // Defaults to buildEnvelopeByNumber, and is overridden in unit tests. + buildEnvelopeFn func(ctx context.Context, num uint64) (*eth.ExecutionPayloadEnvelope, bool) +} + +func New(cfg Config, log log.Logger, eng Engine, l2 L2) *Client { + c := &Client{cfg: cfg, log: log.New("module", "lite-mode"), eng: eng, l2: l2} + c.buildEnvelopeFn = c.buildEnvelopeByNumber + return c +} + +func (c *Client) Start(ctx context.Context) error { + if c.cfg.RPC == "" { + return nil + } + cctx, cancel := context.WithCancel(ctx) + c.cancel = cancel + raw, err := gethrpc.DialContext(ctx, c.cfg.RPC) + if err != nil { + return err + } + cli := client.NewBaseRPCClient(raw) + c.fetch = &rpcFetcher{cli: cli} + ticker := time.NewTicker(c.cfg.Interval) + go func() { + defer ticker.Stop() + for { + select { + case <-cctx.Done(): + return + case <-ticker.C: + c.tick(cctx) + } + } + }() + return nil +} + +func (c *Client) Close() { + if c.cancel != nil { + c.cancel() + } +} + +func (c *Client) tick(ctx context.Context) { + if c.fetch == nil { + return + } + // Ensure engine heads are initialized before attempting any advancement. + if s := c.eng.SafeL2Head(); s.Number == 0 && c.eng.UnsafeL2Head().Number == 0 && c.eng.Finalized().Number == 0 { + c.log.Debug("Waiting for engine heads to initialize before advancing") + return + } + // Fetch targets first + safeNum, hasSafe := c.fetch.SafeHeadNumber(ctx) + finNum, hasFin := c.fetch.FinalizedHeadNumber(ctx) + // Step finalized by at most one, only when present locally and not beyond safe + if hasFin { + c.stepFinalizedOne(ctx, finNum) + } + // Step safe by at most one: find common ancestor, then ingest next block + if hasSafe { + c.stepSafeOne(ctx, safeNum) + } +} + +func fetchBlockByTag(ctx context.Context, cli client.RPC, tag string) (eth.L2BlockRef, bool) { + var res struct { + Hash common.Hash `json:"hash"` + Number string `json:"number"` + ParentHash common.Hash `json:"parentHash"` + Timestamp string `json:"timestamp"` + } + if err := cli.CallContext(ctx, &res, "eth_getBlockByNumber", tag, false); err != nil { + return eth.L2BlockRef{}, false + } + if res.Hash == (common.Hash{}) { + return eth.L2BlockRef{}, false + } + num, ok := parseHexUint64(res.Number) + if !ok { + return eth.L2BlockRef{}, false + } + tim, ok := parseHexUint64(res.Timestamp) + if !ok { + tim = 0 + } + return eth.L2BlockRef{Hash: res.Hash, Number: num, ParentHash: res.ParentHash, Time: tim}, true +} + +func (c *Client) applySafe(ctx context.Context, ext eth.L2BlockRef) { + localSafe := c.eng.SafeL2Head() + if localSafe.Hash == ext.Hash { + return + } + if _, err := c.l2.L2BlockRefByHash(ctx, ext.Hash); err != nil { + c.log.Debug("Skipping safe update: block not present locally", "num", ext.Number, "hash", ext.Hash) + return + } + // Also advance local unsafe to match, if behind + if u := c.eng.UnsafeL2Head(); ext.Number > u.Number { + c.eng.SetUnsafeHead(ext) + } + c.eng.SetLocalSafeHead(ext) + c.eng.SetSafeHead(ext) + c.eng.TryUpdateEngine(ctx) +} + +func (c *Client) applyFinalized(ctx context.Context, ext eth.L2BlockRef) { + localFin := c.eng.Finalized() + if localFin.Hash == ext.Hash { + return + } + if _, err := c.l2.L2BlockRefByHash(ctx, ext.Hash); err != nil { + c.log.Debug("Skipping finalized update: block not present locally", "num", ext.Number, "hash", ext.Hash) + return + } + c.eng.SetFinalizedHead(ext) + c.eng.TryUpdateEngine(ctx) +} + +func (c *Client) applyFinalizedByNumber(ctx context.Context, num uint64) { + if num == 0 { + return + } + if b, ok := c.fetch.BlockByNumber(ctx, num); ok { + c.applyFinalized(ctx, b) + } +} + +// stepFinalizedOne advances finalized by at most one block towards targetFinNum. +// It never moves beyond the local safe head, and only labels blocks present locally. +func (c *Client) stepFinalizedOne(ctx context.Context, targetFinNum uint64) { + if targetFinNum == 0 { + return + } + // Do not finalize beyond safe or unsafe + if u := c.eng.UnsafeL2Head(); targetFinNum > u.Number { + targetFinNum = u.Number + } + if s := c.eng.SafeL2Head(); targetFinNum > s.Number { + targetFinNum = s.Number + } + localFin := c.eng.Finalized() + if localFin.Number >= targetFinNum { + return + } + // Walk back from localFin towards a matching remote ancestor if needed + anchor := localFin + // if local finalized is zero, try to anchor at current safe head number + if anchor.Number == 0 { + anchor = c.eng.SafeL2Head() + } + // Find first height <= anchor that matches remote + for { + if rb, ok := c.fetch.BlockByNumber(ctx, anchor.Number); ok { + if rb.Hash == anchor.Hash { + break + } + } else { + return + } + if anchor.Number == 0 { + return + } + anchor.Number-- + // refresh anchor hash from local if available + if lr, err := c.l2.L2BlockRefByHash(ctx, anchor.Hash); err == nil { + anchor = lr + } + } + nextNum := anchor.Number + 1 + if nextNum > targetFinNum { + return + } + // Only set finalized if the next block exists locally (safe loop will ingest it) + if rb, ok := c.fetch.BlockByNumber(ctx, nextNum); ok { + if _, err := c.l2.L2BlockRefByHash(ctx, rb.Hash); err == nil { + c.applyFinalized(ctx, rb) + } + } +} + +// advanceSafeTo increments local safe towards the target using sequential block-by-number RPCs. +func (c *Client) advanceSafeTo(ctx context.Context, targetSafeNum uint64) { + local := c.eng.SafeL2Head() + if local.Number < targetSafeNum { + c.log.Info("Advancing safe head", "from", local.Number, "to", targetSafeNum) + } + for local.Number < targetSafeNum { + nextNum := local.Number + 1 + b, ok := c.fetch.BlockByNumber(ctx, nextNum) + if !ok { + return + } + // Ensure block is present locally by committing its payload if missing. + if _, err := c.l2.L2BlockRefByHash(ctx, b.Hash); err != nil { + build := c.buildEnvelopeFn + if build == nil { + build = c.buildEnvelopeByNumber + } + if env, ok := build(ctx, nextNum); ok { + signed := &opsigner.SignedExecutionPayloadEnvelope{Envelope: env} + if err := c.eng.CommitBlock(ctx, signed); err != nil { + c.log.Warn("CommitBlock failed", "num", nextNum, "err", err) + return + } + } else { + c.log.Warn("Failed to build payload envelope for block", "num", nextNum) + return + } + } + if b.ParentHash != local.Hash { + // Walk back from the target head to find a connecting block + probeNum := targetSafeNum + connected := false + for probeNum > local.Number { + pb, ok := c.fetch.BlockByNumber(ctx, probeNum) + if !ok { + return + } + if _, err := c.l2.L2BlockRefByHash(ctx, pb.Hash); err != nil { + build := c.buildEnvelopeFn + if build == nil { + build = c.buildEnvelopeByNumber + } + if env, ok := build(ctx, probeNum); ok { + signed := &opsigner.SignedExecutionPayloadEnvelope{Envelope: env} + if err := c.eng.CommitBlock(ctx, signed); err != nil { + c.log.Warn("CommitBlock failed (backtrack)", "num", probeNum, "err", err) + return + } + } else { + c.log.Warn("Failed to build payload envelope (backtrack)", "num", probeNum) + return + } + } + if pb.ParentHash == local.Hash { + b = pb + connected = true + break + } + probeNum-- + } + if !connected { + return + } + } + c.log.Info("Applying safe block", "num", b.Number, "hash", b.Hash) + c.applySafe(ctx, b) + local = b + } +} + +// stepSafeOne advances safe by at most one block toward targetSafeNum. +// It finds a common ancestor with the remote and commits exactly the next block. +func (c *Client) stepSafeOne(ctx context.Context, targetSafeNum uint64) { + local := c.eng.SafeL2Head() + if local.Number >= targetSafeNum { + return + } + // Find common ancestor by comparing hashes at the same height, walking back + anchor := local + for { + // Remote block at current anchor height + rb, ok := c.fetch.BlockByNumber(ctx, anchor.Number) + if !ok { + return + } + if rb.Hash == anchor.Hash { + break + } + if anchor.Number == 0 { + return + } + // step back one height locally + anchor.Number-- + // best-effort refresh of local anchor hash if available + if lr, err := c.l2.L2BlockRefByHash(ctx, anchor.Hash); err == nil { + anchor = lr + } + } + // Propose the next block after the matching anchor + nextNum := anchor.Number + 1 + if nextNum > targetSafeNum { + return + } + nb, ok := c.fetch.BlockByNumber(ctx, nextNum) + if !ok { + return + } + if nb.ParentHash != anchor.Hash { + return + } + // Compare against local EL canonical block at the same height. If the local canonical + // does not match the remote block, reorg unsafe back to the local canonical block + // and try again on the next tick. + if localAtNext, err := c.l2.L2BlockRefByNumber(ctx, nextNum); err == nil { + if localAtNext.Hash != nb.Hash { + c.eng.SetUnsafeHead(localAtNext) + c.eng.TryUpdateEngine(ctx) + return + } + } + // Ensure present locally; if not, commit its payload + if _, err := c.l2.L2BlockRefByHash(ctx, nb.Hash); err != nil { + build := c.buildEnvelopeFn + if build == nil { + build = c.buildEnvelopeByNumber + } + if env, ok := build(ctx, nextNum); ok { + signed := &opsigner.SignedExecutionPayloadEnvelope{Envelope: env} + if err := c.eng.CommitBlock(ctx, signed); err != nil { + c.log.Warn("CommitBlock failed (single-step)", "num", nextNum, "err", err) + return + } + } else { + c.log.Warn("Failed to build payload envelope (single-step)", "num", nextNum) + return + } + } + c.log.Info("Applying safe block", "num", nb.Number, "hash", nb.Hash) + c.applySafe(ctx, nb) +} + +// buildEnvelopeByNumber reconstructs an ExecutionPayloadEnvelope from JSON-RPC for the given block number. +func (c *Client) buildEnvelopeByNumber(ctx context.Context, num uint64) (*eth.ExecutionPayloadEnvelope, bool) { + rf, ok := c.fetch.(*rpcFetcher) + if !ok { + return nil, false + } + hexNum := "0x" + strconv.FormatUint(num, 16) + var full opsources.RPCBlock + if err := rf.cli.CallContext(ctx, &full, "eth_getBlockByNumber", hexNum, true); err != nil { + return nil, false + } + env, err := full.ExecutionPayloadEnvelope(false) + if err != nil { + return nil, false + } + return env, true +} + +// parseHexUint64 parses 0x-prefixed hex numbers to uint64 +func parseHexUint64(s string) (uint64, bool) { + if len(s) < 3 || s[:2] != "0x" { + return 0, false + } + var v uint64 + for i := 2; i < len(s); i++ { + ch := s[i] + var d byte + switch { + case '0' <= ch && ch <= '9': + d = ch - '0' + case 'a' <= ch && ch <= 'f': + d = ch - 'a' + 10 + case 'A' <= ch && ch <= 'F': + d = ch - 'A' + 10 + default: + return 0, false + } + v = (v << 4) | uint64(d) + } + return v, true +} + +// BlockFetcher abstracts fetching blocks and head numbers for safe/finalized. +type BlockFetcher interface { + SafeHeadNumber(ctx context.Context) (uint64, bool) + FinalizedHeadNumber(ctx context.Context) (uint64, bool) + BlockByNumber(ctx context.Context, num uint64) (eth.L2BlockRef, bool) +} + +type rpcFetcher struct{ cli client.RPC } + +func (r *rpcFetcher) SafeHeadNumber(ctx context.Context) (uint64, bool) { + if b, ok := fetchBlockByTag(ctx, r.cli, "safe"); ok { + return b.Number, true + } + return 0, false +} + +func (r *rpcFetcher) FinalizedHeadNumber(ctx context.Context) (uint64, bool) { + if b, ok := fetchBlockByTag(ctx, r.cli, "finalized"); ok { + return b.Number, true + } + return 0, false +} + +func (r *rpcFetcher) BlockByNumber(ctx context.Context, num uint64) (eth.L2BlockRef, bool) { + hex := "0x" + strconv.FormatUint(num, 16) + var res struct { + Hash common.Hash `json:"hash"` + Number string `json:"number"` + ParentHash common.Hash `json:"parentHash"` + Timestamp string `json:"timestamp"` + } + if err := r.cli.CallContext(ctx, &res, "eth_getBlockByNumber", hex, false); err != nil { + return eth.L2BlockRef{}, false + } + if res.Hash == (common.Hash{}) { + return eth.L2BlockRef{}, false + } + n, ok := parseHexUint64(res.Number) + if !ok { + return eth.L2BlockRef{}, false + } + t, _ := parseHexUint64(res.Timestamp) + return eth.L2BlockRef{Hash: res.Hash, Number: n, ParentHash: res.ParentHash, Time: t}, true +} + +// computePayloadID deterministically derives a payload ID from parent hash and payload attributes (no Engine API). +func computePayloadID(parent common.Hash, attrs *eth.PayloadAttributes) eth.PayloadID { + h := sha256.New() + h.Write(parent[:]) + _ = binary.Write(h, binary.BigEndian, attrs.Timestamp) + h.Write(attrs.PrevRandao[:]) + h.Write(attrs.SuggestedFeeRecipient[:]) + _ = binary.Write(h, binary.BigEndian, attrs.NoTxPool) + _ = binary.Write(h, binary.BigEndian, uint64(len(attrs.Transactions))) + for _, tx := range attrs.Transactions { + _ = binary.Write(h, binary.BigEndian, uint64(len(tx))) + h.Write(tx) + } + if attrs.GasLimit != nil { + _ = binary.Write(h, binary.BigEndian, *attrs.GasLimit) + } + if attrs.EIP1559Params != nil { + h.Write(attrs.EIP1559Params[:]) + } + if attrs.MinBaseFee != nil { + _ = binary.Write(h, binary.BigEndian, *attrs.MinBaseFee) + } + var out eth.PayloadID + copy(out[:], h.Sum(nil)[:8]) + return out +} diff --git a/op-node/litemode/litemode_test.go b/op-node/litemode/litemode_test.go new file mode 100644 index 00000000000..dd6d268f63c --- /dev/null +++ b/op-node/litemode/litemode_test.go @@ -0,0 +1,232 @@ +package litemode + +import ( + "context" + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" + opsigner "github.com/ethereum-optimism/optimism/op-service/signer" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +type fakeEngine struct { + safe eth.L2BlockRef + localSafe eth.L2BlockRef + fin eth.L2BlockRef + committed []uint64 + l2 *fakeL2 +} + +func (f *fakeEngine) UnsafeL2Head() eth.L2BlockRef { return eth.L2BlockRef{} } +func (f *fakeEngine) SafeL2Head() eth.L2BlockRef { return f.safe } +func (f *fakeEngine) Finalized() eth.L2BlockRef { return f.fin } +func (f *fakeEngine) SetUnsafeHead(r eth.L2BlockRef) {} +func (f *fakeEngine) SetSafeHead(r eth.L2BlockRef) { f.safe = r } +func (f *fakeEngine) SetLocalSafeHead(r eth.L2BlockRef) { f.localSafe = r } +func (f *fakeEngine) SetFinalizedHead(r eth.L2BlockRef) { f.fin = r } +func (f *fakeEngine) SetCrossUnsafeHead(r eth.L2BlockRef) {} +func (f *fakeEngine) TryUpdateEngine(ctx context.Context) {} +func (f *fakeEngine) CommitBlock(ctx context.Context, signed *opsigner.SignedExecutionPayloadEnvelope) error { + if signed != nil && signed.Envelope != nil && signed.Envelope.ExecutionPayload != nil { + f.committed = append(f.committed, uint64(signed.Envelope.ExecutionPayload.BlockNumber)) + if f.l2 != nil { + f.l2.markPresent(signed.Envelope.ExecutionPayload.BlockHash) + } + } + return nil +} + +type fakeL2 struct{ present map[common.Hash]bool } + +func (f *fakeL2) L2BlockRefByHash(ctx context.Context, h common.Hash) (eth.L2BlockRef, error) { + if f.present != nil && f.present[h] { + return eth.L2BlockRef{Hash: h}, nil + } + return eth.L2BlockRef{}, context.DeadlineExceeded +} + +func (f *fakeL2) L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2BlockRef, error) { + // Create a synthetic hash from the number for tests that call by number. + // Tests that require specific hashes will ensure presence via markPresent. + b := []byte{byte(num)} + h := common.BytesToHash(b) + if f.present != nil && f.present[h] { + return eth.L2BlockRef{Hash: h, Number: num}, nil + } + return eth.L2BlockRef{}, context.DeadlineExceeded +} + +func (f *fakeL2) markPresent(h common.Hash) { + if f.present == nil { + f.present = make(map[common.Hash]bool) + } + f.present[h] = true +} + +func TestApplyFinalizedAndSafe(t *testing.T) { + ctx := context.Background() + l2 := &fakeL2{} + eng := &fakeEngine{l2: l2} + c := New(Config{RPC: "", Interval: 0}, log.New(), eng, l2) + + fin := eth.L2BlockRef{Hash: common.HexToHash("0x01"), Number: 10} + l2.markPresent(fin.Hash) + c.applyFinalized(ctx, fin) + require.Equal(t, uint64(10), eng.Finalized().Number) + + safe := eth.L2BlockRef{Hash: common.HexToHash("0x02"), Number: 12} + l2.markPresent(safe.Hash) + c.applySafe(ctx, safe) + require.Equal(t, uint64(12), eng.SafeL2Head().Number) +} + +type fakeFetcher struct { + safeNum uint64 + finNum uint64 + blocks map[uint64]eth.L2BlockRef +} + +func (f *fakeFetcher) SafeHeadNumber(ctx context.Context) (uint64, bool) { + return f.safeNum, f.safeNum != 0 +} +func (f *fakeFetcher) FinalizedHeadNumber(ctx context.Context) (uint64, bool) { + return f.finNum, f.finNum != 0 +} +func (f *fakeFetcher) BlockByNumber(ctx context.Context, num uint64) (eth.L2BlockRef, bool) { + b, ok := f.blocks[num] + return b, ok +} + +func mkRef(num int, hashHex string, parentHex string) eth.L2BlockRef { + return eth.L2BlockRef{ + Hash: common.HexToHash(hashHex), + Number: uint64(num), + ParentHash: common.HexToHash(parentHex), + } +} + +func TestAdvanceSafeStraightLine(t *testing.T) { + ctx := context.Background() + l2 := &fakeL2{} + eng := &fakeEngine{l2: l2} + c := New(Config{RPC: "", Interval: 0}, log.New(), eng, l2) + + // local safe at 0 + h0 := common.HexToHash("0x00") + eng.safe = eth.L2BlockRef{Hash: h0, Number: 0} + + ff := &fakeFetcher{ + safeNum: 3, + blocks: map[uint64]eth.L2BlockRef{ + 1: mkRef(1, "0x01", "0x00"), + 2: mkRef(2, "0x02", "0x01"), + 3: mkRef(3, "0x03", "0x02"), + }, + } + c.fetch = ff + // Inject a payload builder that returns envelopes with matching numbers + c.buildEnvelopeFn = func(ctx context.Context, num uint64) (*eth.ExecutionPayloadEnvelope, bool) { + h := common.HexToHash("0x" + common.Bytes2Hex([]byte{byte(num)})) + return ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{BlockNumber: eth.Uint64Quantity(num), BlockHash: h}}, true + } + + c.advanceSafeTo(ctx, 3) + require.Equal(t, uint64(3), eng.safe.Number) + require.Equal(t, uint64(3), eng.localSafe.Number) + require.Equal(t, []uint64{1, 2, 3}, eng.committed) +} + +func TestAdvanceSafeStopsOnMismatch(t *testing.T) { + ctx := context.Background() + eng := &fakeEngine{} + l2 := &fakeL2{} + c := New(Config{RPC: "", Interval: 0}, log.New(), eng, l2) + + // local safe at 1 + h1 := common.HexToHash("0x11") + eng.safe = eth.L2BlockRef{Hash: h1, Number: 1} + + // external chain 2 does not build on 1 + ff := &fakeFetcher{ + safeNum: 3, + blocks: map[uint64]eth.L2BlockRef{ + 2: mkRef(2, "0x22", "0xaa"), + 3: mkRef(3, "0x33", "0x22"), + }, + } + c.fetch = ff + + c.advanceSafeTo(ctx, 3) + // Should not move since cannot connect + require.Equal(t, uint64(1), eng.safe.Number) +} + +func TestAdvanceSafeStopsOnMissingBlock(t *testing.T) { + ctx := context.Background() + l2 := &fakeL2{} + eng := &fakeEngine{l2: l2} + c := New(Config{RPC: "", Interval: 0}, log.New(), eng, l2) + + // local safe at 0 + h0 := common.HexToHash("0x00") + eng.safe = eth.L2BlockRef{Hash: h0, Number: 0} + + // missing block 1 + ff := &fakeFetcher{ + safeNum: 2, + blocks: map[uint64]eth.L2BlockRef{ + 2: mkRef(2, "0x02", "0x01"), + }, + } + c.fetch = ff + c.buildEnvelopeFn = func(ctx context.Context, num uint64) (*eth.ExecutionPayloadEnvelope, bool) { + if num == 1 { + h := common.HexToHash("0x" + common.Bytes2Hex([]byte{byte(num)})) + return ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{BlockNumber: eth.Uint64Quantity(num), BlockHash: h}}, true + } + return nil, false + } + + c.advanceSafeTo(ctx, 2) + // Should remain at 0 + require.Equal(t, uint64(0), eng.safe.Number) +} + +func TestAdvanceSafeReorgWithBacktrackCommit(t *testing.T) { + ctx := context.Background() + l2 := &fakeL2{} + eng := &fakeEngine{l2: l2} + c := New(Config{RPC: "", Interval: 0}, log.New(), eng, l2) + + // local safe at 5 + h5 := common.HexToHash("0x05") + eng.safe = eth.L2BlockRef{Hash: h5, Number: 5} + + // external reorg: 6a (parent 5), 7a (parent 6a) + ff := &fakeFetcher{ + safeNum: 7, + blocks: map[uint64]eth.L2BlockRef{ + 6: mkRef(6, "0x06a", "0x05"), + 7: mkRef(7, "0x07a", "0x06a"), + }, + } + c.fetch = ff + c.buildEnvelopeFn = func(ctx context.Context, num uint64) (*eth.ExecutionPayloadEnvelope, bool) { + switch num { + case 6: + return ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{BlockNumber: eth.Uint64Quantity(6), BlockHash: common.HexToHash("0x06a")}}, true + case 7: + return ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{BlockNumber: eth.Uint64Quantity(7), BlockHash: common.HexToHash("0x07a")}}, true + default: + return nil, false + } + } + + c.advanceSafeTo(ctx, 7) + require.Equal(t, uint64(7), eng.safe.Number) + require.Equal(t, []uint64{6, 7}, eng.committed) +} + +// no-op: using geth log.New() which is a valid Logger implementation diff --git a/op-node/node/node.go b/op-node/node/node.go index 7c0dc9dc393..40767d7d3ce 100644 --- a/op-node/node/node.go +++ b/op-node/node/node.go @@ -222,7 +222,9 @@ func (n *OpNode) initL1Handlers(cfg *config.Config) error { // TODO(#16917) Remove Event System Refactor Comments // FinalizeL1Event fan out is updated to procedural method calls n.l2Driver.StatusTracker.OnL1Finalized(sig) - n.l2Driver.Finalizer.OnL1Finalized(sig) + if n.l2Driver.Finalizer != nil { + n.l2Driver.Finalizer.OnL1Finalized(sig) + } n.l2Driver.SyncDeriver.OnL1Finalized(ctx) } diff --git a/op-node/rollup/driver/config.go b/op-node/rollup/driver/config.go index 5446e4da16a..974a1593a2b 100644 --- a/op-node/rollup/driver/config.go +++ b/op-node/rollup/driver/config.go @@ -1,5 +1,7 @@ package driver +import "time" + type Config struct { // VerifierConfDepth is the distance to keep from the L1 head when reading L1 data for L2 derivation. VerifierConfDepth uint64 `json:"verifier_conf_depth"` @@ -24,4 +26,9 @@ type Config struct { // RecoverMode forces the sequencer to select the next L1 Origin exactly, and create an empty block, // to be compatible with verifiers forcefully generating the same block while catching up the sequencing window timeout. RecoverMode bool `json:"recover_mode"` + + // Experimental: External lite mode sourcing via RPC + LiteModeRPC string `json:"lite_mode_rpc"` + // Poll interval for external lite mode updates + LiteModePollInterval time.Duration `json:"lite_mode_poll_interval"` } diff --git a/op-node/rollup/driver/driver.go b/op-node/rollup/driver/driver.go index 0d746fbca78..0fcf5e06b28 100644 --- a/op-node/rollup/driver/driver.go +++ b/op-node/rollup/driver/driver.go @@ -10,6 +10,7 @@ import ( gosync "sync" + "github.com/ethereum-optimism/optimism/op-node/litemode" "github.com/ethereum-optimism/optimism/op-node/metrics/metered" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/async" @@ -59,6 +60,8 @@ func NewDriver( verifConfDepth := confdepth.NewConfDepth(driverCfg.VerifierConfDepth, statusTracker.L1Head, l1) ec := engine.NewEngineController(driverCtx, l2, log, metrics, cfg, syncCfg, sys.Register("engine-controller", nil)) + // Enable external lite mode sourcing if configured + ec.SetLiteModeEnabled(driverCfg.LiteModeRPC != "") // TODO(#17115): Refactor dependency cycles ec.SetCrossUpdateHandler(statusTracker) @@ -70,12 +73,27 @@ func NewDriver( sys.Register("cl-sync", clSync) var finalizer Finalizer - if cfg.AltDAEnabled() { - finalizer = finality.NewAltDAFinalizer(driverCtx, log, cfg, l1, altDA, ec) + if !ec.LiteModeEnabled() { + if cfg.AltDAEnabled() { + finalizer = finality.NewAltDAFinalizer(driverCtx, log, cfg, l1, altDA, ec) + } else { + finalizer = finality.NewFinalizer(driverCtx, log, cfg, l1, ec) + } + sys.Register("finalizer", finalizer) } else { - finalizer = finality.NewFinalizer(driverCtx, log, cfg, l1, ec) + log.Info("Lite mode RPC enabled: skipping local finalizer wiring") + } + + // If enabled, set up lite mode poller + if ec.LiteModeEnabled() { + sbCfg := litemode.Config{RPC: driverCfg.LiteModeRPC, Interval: driverCfg.LiteModePollInterval} + poller := litemode.New(sbCfg, log, ec, l2) + sys.Register("lite-mode", event.DeriverFunc(func(evCtx context.Context, ev event.Event) bool { return false })) + // Start poller after driver start + go func() { + _ = poller.Start(driverCtx) + }() } - sys.Register("finalizer", finalizer) attrHandler := attributes.NewAttributesHandler(log, cfg, driverCtx, l2, ec) sys.Register("attributes-handler", attrHandler) diff --git a/op-node/rollup/engine/engine_controller.go b/op-node/rollup/engine/engine_controller.go index 34e824610fb..243d5b58757 100644 --- a/op-node/rollup/engine/engine_controller.go +++ b/op-node/rollup/engine/engine_controller.go @@ -129,6 +129,10 @@ type EngineController struct { // Handler for cross-unsafe and cross-safe updates crossUpdateHandler CrossUpdateHandler + + // When lite mode is enabled, safe/finalization updates are sourced externally and these + // local derivation promotion paths should be inert. + liteModeEnabled bool } func NewEngineController(ctx context.Context, engine ExecEngine, log log.Logger, m opmetrics.Metricer, @@ -153,6 +157,12 @@ func NewEngineController(ctx context.Context, engine ExecEngine, log log.Logger, } } +// LiteModeEnabled returns whether external lite mode sourcing is enabled. +func (e *EngineController) LiteModeEnabled() bool { return e.liteModeEnabled } + +// SetLiteModeEnabled toggles external lite mode sourcing behavior. +func (e *EngineController) SetLiteModeEnabled(enabled bool) { e.liteModeEnabled = enabled } + // State Getters func (e *EngineController) UnsafeL2Head() eth.L2BlockRef { @@ -204,6 +214,7 @@ func (e *EngineController) IsEngineSyncing() bool { // SetFinalizedHead implements LocalEngineControl. func (e *EngineController) SetFinalizedHead(r eth.L2BlockRef) { + e.log.Info("Set finalized head", "hash", r.Hash, "num", r.Number) e.metrics.RecordL2Ref("l2_finalized", r) e.finalizedHead = r e.needFCUCall = true @@ -217,12 +228,14 @@ func (e *EngineController) SetPendingSafeL2Head(r eth.L2BlockRef) { // SetLocalSafeHead sets the local-safe head. func (e *EngineController) SetLocalSafeHead(r eth.L2BlockRef) { + e.log.Info("Set local safe head", "hash", r.Hash, "num", r.Number) e.metrics.RecordL2Ref("l2_local_safe", r) e.localSafeHead = r } // SetSafeHead sets the cross-safe head. func (e *EngineController) SetSafeHead(r eth.L2BlockRef) { + e.log.Info("Set safe head", "hash", r.Hash, "num", r.Number) e.metrics.RecordL2Ref("l2_safe", r) e.safeHead = r e.needFCUCall = true @@ -733,6 +746,9 @@ func (d *EngineController) RequestPendingSafeUpdate(ctx context.Context) { // TryUpdatePendingSafe updates the pending safe head if the new reference is newer func (e *EngineController) TryUpdatePendingSafe(ctx context.Context, ref eth.L2BlockRef, concluding bool, source eth.L1BlockRef) { + if e.liteModeEnabled { + return + } // Only promote if not already stale. // Resets/overwrites happen through engine-resets, not through promotion. if ref.Number > e.PendingSafeL2Head().Number { @@ -747,6 +763,9 @@ func (e *EngineController) TryUpdatePendingSafe(ctx context.Context, ref eth.L2B // TryUpdateLocalSafe updates the local safe head if the new reference is newer and concluding func (e *EngineController) TryUpdateLocalSafe(ctx context.Context, ref eth.L2BlockRef, concluding bool, source eth.L1BlockRef) { + if e.liteModeEnabled { + return + } if concluding && ref.Number > e.LocalSafeL2Head().Number { // Promote to local safe e.log.Debug("Updating local safe", "local_safe", ref, "safe", e.SafeL2Head(), "unsafe", e.UnsafeL2Head()) @@ -766,6 +785,9 @@ func (e *EngineController) TryUpdateUnsafe(ctx context.Context, ref eth.L2BlockR } func (e *EngineController) PromoteSafe(ctx context.Context, ref eth.L2BlockRef, source eth.L1BlockRef) { + if e.liteModeEnabled { + return + } e.log.Debug("Updating safe", "safe", ref, "unsafe", e.UnsafeL2Head()) e.SetSafeHead(ref) // Finalizer can pick up this safe cross-block now @@ -781,6 +803,9 @@ func (e *EngineController) PromoteSafe(ctx context.Context, ref eth.L2BlockRef, } func (e *EngineController) PromoteFinalized(ctx context.Context, ref eth.L2BlockRef) { + if e.liteModeEnabled { + return + } if ref.Number < e.Finalized().Number { e.log.Error("Cannot rewind finality,", "ref", ref, "finalized", e.Finalized()) return diff --git a/op-node/service.go b/op-node/service.go index b27503f33a7..a7113797542 100644 --- a/op-node/service.go +++ b/op-node/service.go @@ -55,6 +55,9 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*config.Config, error) { configPersistence := NewConfigPersistence(ctx) driverConfig := NewDriverConfig(ctx) + // Thread lite mode RPC config into driver + driverConfig.LiteModeRPC = ctx.String(flags.LiteModeRPC.Name) + driverConfig.LiteModePollInterval = ctx.Duration(flags.LiteModePollInterval.Name) p2pSignerSetup, err := p2pcli.LoadSignerSetup(ctx, log) if err != nil { @@ -121,6 +124,15 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*config.Config, error) { FetchWithdrawalRootFromState: ctx.Bool(flags.FetchWithdrawalRootFromState.Name), ExperimentalOPStackAPI: ctx.Bool(flags.ExperimentalOPStackAPI.Name), + LiteModeRPC: ctx.String(flags.LiteModeRPC.Name), + LiteModePollInterval: ctx.Duration(flags.LiteModePollInterval.Name), + } + + // Enforce: lite mode RPC cannot be used with interop/indexing + if cfg.LiteModeRPC != "" { + if ctx.String(flags.InteropRPCAddr.Name) != "" { + return nil, fmt.Errorf("lite mode RPC cannot run with interop/indexing enabled") + } } if err := cfg.LoadPersisted(log); err != nil {