diff --git a/op-core/derive/DESIGN.md b/op-core/derive/DESIGN.md new file mode 100644 index 0000000000000..bfadf40eada89 --- /dev/null +++ b/op-core/derive/DESIGN.md @@ -0,0 +1,172 @@ +# Derivation Iterator + +## Objective + +Derive L2 payload attributes from L1 data one block at a time, equivalent in +behavior to the existing streaming pipeline in `op-node/rollup/derive`, but +without I/O, caching, or state access beyond what the caller provides. + +```go +d, _ := NewDeriver(cfg, l1ChainConfig, lgr, safeHead, sysConfig) +d.AddL1Block(l1Blocks...) +attrs, l1Ref, err := d.Next(safeHead) +``` + +The `Deriver` iterator accepts L1 blocks incrementally and produces one +`PayloadAttributes` at a time. The caller executes each block on the engine, +then calls `Next` again with the updated safe head. + +## Motivation + +The existing derivation pipeline is streaming and pull-based: it requests L1 +data on demand, maintains internal state across steps, and interleaves I/O with +computation. This makes it difficult to test, reason about, and use in contexts +where all data is already available (ZK provers, auditing tools, replay +utilities). + +An earlier batch-mode `PureDerive` function took all L1 data upfront and +returned all derived blocks at once. This didn't match how derivation works in +practice: derive one block, execute on engine, verify, then derive the next. It +also couldn't validate parent hashes (needs L2 block hashes from execution) and +had no mechanism for L1 reorgs. + +The iterator solves both: incremental L1 ingestion, one-at-a-time derivation +with full `CheckBatch` validation including parent hash checks, and explicit +reorg handling via `Reset`. + +## Scope + +**In scope:** Post-Karst derivation only. Karst implies Holocene, Granite, +Fjord, and all prior forks. This simplifies the implementation: + +- Single-channel assembly (Holocene rule: one active channel at a time) +- Strict frame ordering (Holocene) +- No span batch overlap handling (Karst rejects overlapping span batches as + `BatchPast`) + +**Out of scope:** +- Pre-Karst derivation +- L2 execution (we produce attributes, not executed blocks) + +## API + +```go +var ErrNeedL1Data = errors.New("need more L1 data") +var ErrReorg = errors.New("L1 reorg detected") + +func NewDeriver(cfg, l1ChainConfig, lgr, safeHead, sysConfig) (*Deriver, error) + +// AddL1Block appends L1 blocks. Must be contiguous with previously added +// blocks. Returns ErrReorg on parent hash mismatch. +func (d *Deriver) AddL1Block(blocks ...L1Input) error + +// Next returns the next derived payload attributes and the L1 block they +// were derived from. Returns ErrNeedL1Data when more L1 blocks are needed. +func (d *Deriver) Next(safeHead eth.L2BlockRef) (*eth.PayloadAttributes, eth.L1BlockRef, error) + +// Reset clears all state back to the given safe head + system config. +// Used after L1 reorgs. The caller must re-add L1 blocks from the new chain. +func (d *Deriver) Reset(safeHead eth.L2BlockRef, sysConfig eth.SystemConfig) +``` + +## Architecture + +``` + AddL1Block + │ + ▼ +L1Input[] ──► frame parsing ──► channel assembly ──► batch decoding ──► CheckBatch ──► attribute building ──► PayloadAttributes + │ │ + timeout check parent hash check + (per L1 block) (via safe head) + │ + empty batch fallback + (seq window expired) +``` + +### Components + +| File | Responsibility | +|------|---------------| +| `deriver.go` | `Deriver` iterator: `NewDeriver`, `AddL1Block`, `Next`, `Reset` | +| `channels.go` | Push-based Holocene single-channel assembler | +| `batches.go` | `decodeBatches` (channel → singular batches via upstream decode) | +| `empty_batch.go` | `makeEmptyBatch` (pure function for seq window expiry) | +| `attributes.go` | `buildAttributes` (batch + L1 data → PayloadAttributes) | +| `types.go` | `L1Input`, `l2Cursor`, sentinel errors | + +### Next() Flow + +1. Try consuming from `pendingBatches`: + - `CheckBatch` → `BatchAccept`: build attributes, advance cursor, return + - `CheckBatch` → `BatchPast`: skip, try next batch + - `CheckBatch` → `BatchDrop`: discard remaining channel batches + - `CheckBatch` → `BatchUndecided`: return `ErrNeedL1Data` +2. Process more L1 blocks (`l1Pos < len(l1Blocks)`): + - Process config logs, check channel timeout + - Parse frames → assemble channel → if ready, decode into `pendingBatches` + - If got pending batches, go to step 1 + - After each L1 block, check for empty batches (seq window expired) +3. Return `ErrNeedL1Data` + +### Empty Batch Generation + +When no batcher data covers a time range and the sequencing window expires +(`currentL1.Number > cursor.L1Origin.Number + SeqWindowSize`), the pipeline +generates one empty batch to maintain L2 liveness. Epoch advancement follows +the rule: advance to the next L1 origin when the L2 timestamp >= the next L1 +block's timestamp. + +## Batch Validation + +Batch validation is delegated entirely to upstream `derive.CheckBatch`, which +dispatches to `checkSingularBatch`. Since `Next` receives a full +`eth.L2BlockRef` with `Hash`, `checkSingularBatch` validates +`batch.ParentHash != l2SafeHead.Hash` — solving the parent hash problem that +the earlier batch-mode approach had to defer. + +`CheckBatch` expects `l1Blocks[0]` to match `safeHead.L1Origin`. The deriver +computes the starting index dynamically: + +```go +startIdx := safeHead.L1Origin.Number - d.firstL1Num +l1BlocksForCheck := d.l1Origins[startIdx:] +``` + +### Attribute Building Equivalence + +`buildAttributes` matches `derive.AttributesDeposited` for: +- L1 info deposit transaction (via `derive.L1InfoDeposit`) +- User deposits at epoch boundaries +- Sequencer transactions from the batch +- Canyon withdrawals, Ecotone parent beacon root +- Holocene EIP-1559 params, Jovian MinBaseFee +- Gas limit from system config +- `NoTxPool: true` + +Not included: network upgrade transactions (NUTs) for pre-Karst forks, since +all pre-Karst forks are already active. Future forks with NUTs must be added. + +## Dependencies on Upstream + +The implementation reuses these upstream types and functions (aliased as +`opderive` to avoid naming conflict with this package): +- `opderive.ParseFrames`, `opderive.Channel`, `opderive.Frame` +- `opderive.BatchReader`, `opderive.GetSingularBatch`, `opderive.DeriveSpanBatch` +- `opderive.CheckBatch`, `opderive.CheckSpanBatchPrefix` +- `opderive.L1InfoDeposit` +- `opderive.ProcessSystemConfigUpdateLogEvent` +- `rollup.Config`, `rollup.ChainSpec` +- `eth.PayloadAttributes`, `eth.L1BlockRef`, `eth.L2BlockRef`, `eth.SystemConfig` + +## Testing + +Unit tests cover each component in isolation: +- `channels_test.go`: Channel assembly, timeout, frame ordering +- `attributes_test.go`: Payload attribute construction +- `types_test.go`: Cursor advancement, empty batch detection +- `batches_test.go`: Batch decoding from channel data +- `empty_batch_test.go`: Empty batch generation (same epoch, epoch advance, missing L1) +- `deriver_test.go`: Iterator integration tests (single batch, incremental L1, + empty batches, reorg detection, reorg reset, channel timeout, invalid batch + drop, parent hash check, pre-Karst rejection, multi-channel multi-epoch) diff --git a/op-core/derive/attributes.go b/op-core/derive/attributes.go new file mode 100644 index 0000000000000..e9cd4556b32fc --- /dev/null +++ b/op-core/derive/attributes.go @@ -0,0 +1,109 @@ +package derive + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + + "github.com/ethereum-optimism/optimism/op-core/predeploys" + "github.com/ethereum-optimism/optimism/op-node/rollup" + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// buildAttributes constructs PayloadAttributes from a validated singular batch, +// its L1 origin, the current derivation cursor, and the active system config. +// +// Transaction ordering follows the OP Stack derivation spec: +// 1. L1 info deposit transaction (always first) +// 2. User deposit transactions (only at epoch boundaries) +// 3. Batch transactions from the sequencer +// +// Network upgrade transactions (NUTs) are not included because all pre-Karst +// forks are already active (the Deriver requires Karst), and Karst itself has +// no NUTs. Future forks with NUTs must be added here. +func buildAttributes( + batch *opderive.SingularBatch, + l1Block *L1Input, + cursor l2Cursor, + sysConfig eth.SystemConfig, + cfg *rollup.Config, + l1ChainConfig *params.ChainConfig, +) (*eth.PayloadAttributes, error) { + epochChanged := uint64(batch.EpochNum) != cursor.L1Origin.Number + + var seqNumber uint64 + if epochChanged { + seqNumber = 0 + } else { + seqNumber = cursor.SequenceNumber + 1 + } + + l2Timestamp := batch.Timestamp + + l1InfoTx, err := opderive.L1InfoDeposit(cfg, l1ChainConfig, sysConfig, seqNumber, eth.HeaderBlockInfo(l1Block.Header), l2Timestamp) + if err != nil { + return nil, fmt.Errorf("failed to create L1 info deposit tx: %w", err) + } + + encodedL1Info, err := types.NewTx(l1InfoTx).MarshalBinary() + if err != nil { + return nil, fmt.Errorf("failed to encode L1 info deposit tx: %w", err) + } + + txCount := 1 + len(batch.Transactions) + if epochChanged { + txCount += len(l1Block.Deposits) + } + txs := make([]hexutil.Bytes, 0, txCount) + txs = append(txs, encodedL1Info) + + if epochChanged { + for _, dep := range l1Block.Deposits { + encoded, err := types.NewTx(dep).MarshalBinary() + if err != nil { + return nil, fmt.Errorf("failed to encode user deposit tx: %w", err) + } + txs = append(txs, encoded) + } + } + + txs = append(txs, batch.Transactions...) + + gasLimit := sysConfig.GasLimit + + var withdrawals *types.Withdrawals + if cfg.IsCanyon(l2Timestamp) { + withdrawals = &types.Withdrawals{} + } + + var parentBeaconRoot *common.Hash + if cfg.IsEcotone(l2Timestamp) { + parentBeaconRoot = new(common.Hash) + } + + attrs := ð.PayloadAttributes{ + Timestamp: hexutil.Uint64(l2Timestamp), + PrevRandao: eth.Bytes32(l1Block.Header.MixDigest), + SuggestedFeeRecipient: predeploys.SequencerFeeVaultAddr, + Transactions: txs, + NoTxPool: true, + GasLimit: (*eth.Uint64Quantity)(&gasLimit), + Withdrawals: withdrawals, + ParentBeaconBlockRoot: parentBeaconRoot, + } + + if cfg.IsHolocene(l2Timestamp) { + attrs.EIP1559Params = new(eth.Bytes8) + *attrs.EIP1559Params = sysConfig.EIP1559Params + } + + if cfg.IsJovian(l2Timestamp) { + attrs.MinBaseFee = &sysConfig.MinBaseFee + } + + return attrs, nil +} diff --git a/op-core/derive/attributes_test.go b/op-core/derive/attributes_test.go new file mode 100644 index 0000000000000..a2d374fd1ab17 --- /dev/null +++ b/op-core/derive/attributes_test.go @@ -0,0 +1,245 @@ +package derive + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/ethereum-optimism/optimism/op-core/predeploys" + "github.com/ethereum-optimism/optimism/op-node/rollup" + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/bigs" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/stretchr/testify/require" +) + +func TestBuildAttributes_EpochStart(t *testing.T) { + cfg := testRollupConfig() + sysConfig := testSystemConfig() + l1Block := makeTestL1Input(5) + l1Block.Deposits = []*types.DepositTx{makeTestDeposit(), makeTestDeposit()} + + l1Num := bigs.Uint64Strict(l1Block.Header.Number) + l1Hash := l1Block.Header.Hash() + + userTx := hexutil.Bytes{0x01, 0x02, 0x03} + batch := &opderive.SingularBatch{ + ParentHash: common.HexToHash("0xaaaa"), + EpochNum: rollup.Epoch(l1Num), + EpochHash: l1Hash, + Timestamp: l1Block.Header.Time + cfg.BlockTime, + Transactions: []hexutil.Bytes{userTx}, + } + + // Cursor has a different L1 origin so this is an epoch boundary + cursor := l2Cursor{ + Number: 10, + Timestamp: batch.Timestamp - cfg.BlockTime, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xprev"), Number: l1Num - 1}, + SequenceNumber: 3, + } + + attrs, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) + require.NoError(t, err) + require.NotNil(t, attrs) + + // L1 info deposit + 2 user deposits + 1 batch tx = 4 + require.GreaterOrEqual(t, len(attrs.Transactions), 3) + require.Len(t, attrs.Transactions, 4) + + require.True(t, attrs.NoTxPool) + require.Equal(t, hexutil.Uint64(batch.Timestamp), attrs.Timestamp) + require.Equal(t, eth.Bytes32(l1Block.Header.MixDigest), attrs.PrevRandao) + require.Equal(t, predeploys.SequencerFeeVaultAddr, attrs.SuggestedFeeRecipient) + require.NotNil(t, attrs.GasLimit) + require.Equal(t, sysConfig.GasLimit, uint64(*attrs.GasLimit)) + require.NotNil(t, attrs.Withdrawals) + require.Empty(t, *attrs.Withdrawals) + + // The last transaction should be the batch tx + require.Equal(t, userTx, attrs.Transactions[len(attrs.Transactions)-1]) +} + +func TestBuildAttributes_SameEpoch(t *testing.T) { + cfg := testRollupConfig() + sysConfig := testSystemConfig() + l1Block := makeTestL1Input(5) + l1Block.Deposits = []*types.DepositTx{makeTestDeposit()} + + l1Num := bigs.Uint64Strict(l1Block.Header.Number) + l1Hash := l1Block.Header.Hash() + + userTx := hexutil.Bytes{0xaa, 0xbb} + batch := &opderive.SingularBatch{ + ParentHash: common.HexToHash("0xbbbb"), + EpochNum: rollup.Epoch(l1Num), + EpochHash: l1Hash, + Timestamp: l1Block.Header.Time + 2*cfg.BlockTime, + Transactions: []hexutil.Bytes{userTx}, + } + + // Same L1 origin -- not an epoch boundary + cursor := l2Cursor{ + Number: 10, + Timestamp: batch.Timestamp - cfg.BlockTime, + L1Origin: eth.BlockID{Hash: l1Hash, Number: l1Num}, + SequenceNumber: 2, + } + + attrs, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) + require.NoError(t, err) + require.NotNil(t, attrs) + + // L1 info deposit + 1 batch tx = 2 (no user deposits because same epoch) + require.GreaterOrEqual(t, len(attrs.Transactions), 2) + require.Len(t, attrs.Transactions, 2) + + require.True(t, attrs.NoTxPool) + require.Equal(t, hexutil.Uint64(batch.Timestamp), attrs.Timestamp) + + // The last transaction should be the batch tx + require.Equal(t, userTx, attrs.Transactions[len(attrs.Transactions)-1]) +} + +func TestBuildAttributes_EmptyBatch(t *testing.T) { + cfg := testRollupConfig() + sysConfig := testSystemConfig() + + t.Run("empty batch at epoch start", func(t *testing.T) { + l1Block := makeTestL1Input(5) + l1Block.Deposits = []*types.DepositTx{makeTestDeposit()} + + l1Num := bigs.Uint64Strict(l1Block.Header.Number) + l1Hash := l1Block.Header.Hash() + + batch := &opderive.SingularBatch{ + ParentHash: common.HexToHash("0xcccc"), + EpochNum: rollup.Epoch(l1Num), + EpochHash: l1Hash, + Timestamp: l1Block.Header.Time + cfg.BlockTime, + Transactions: nil, + } + + cursor := l2Cursor{ + Number: 10, + Timestamp: batch.Timestamp - cfg.BlockTime, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xold"), Number: l1Num - 1}, + SequenceNumber: 0, + } + + attrs, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) + require.NoError(t, err) + + // L1 info deposit + 1 user deposit = 2 (no batch txs) + require.Len(t, attrs.Transactions, 2) + }) + + t.Run("empty batch same epoch", func(t *testing.T) { + l1Block := makeTestL1Input(5) + + l1Num := bigs.Uint64Strict(l1Block.Header.Number) + l1Hash := l1Block.Header.Hash() + + batch := &opderive.SingularBatch{ + ParentHash: common.HexToHash("0xdddd"), + EpochNum: rollup.Epoch(l1Num), + EpochHash: l1Hash, + Timestamp: l1Block.Header.Time + 2*cfg.BlockTime, + Transactions: nil, + } + + cursor := l2Cursor{ + Number: 10, + Timestamp: batch.Timestamp - cfg.BlockTime, + L1Origin: eth.BlockID{Hash: l1Hash, Number: l1Num}, + SequenceNumber: 1, + } + + attrs, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) + require.NoError(t, err) + + // Only L1 info deposit, no user deposits, no batch txs + require.Len(t, attrs.Transactions, 1) + }) +} + +func TestBuildAttributes_HoloceneFields(t *testing.T) { + cfg := testRollupConfig() + sysConfig := testSystemConfig() + sysConfig.EIP1559Params = eth.Bytes8{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08} + + l1Block := makeTestL1Input(5) + l1Num := bigs.Uint64Strict(l1Block.Header.Number) + l1Hash := l1Block.Header.Hash() + + batch := &opderive.SingularBatch{ + ParentHash: common.HexToHash("0xeeee"), + EpochNum: rollup.Epoch(l1Num), + EpochHash: l1Hash, + Timestamp: l1Block.Header.Time + cfg.BlockTime, + } + + cursor := l2Cursor{ + Number: 10, + Timestamp: batch.Timestamp - cfg.BlockTime, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xold"), Number: l1Num - 1}, + SequenceNumber: 0, + } + + attrs, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) + require.NoError(t, err) + require.NotNil(t, attrs.EIP1559Params) + require.Equal(t, sysConfig.EIP1559Params, *attrs.EIP1559Params) + require.NotNil(t, attrs.ParentBeaconBlockRoot) + require.NotNil(t, attrs.Withdrawals) +} + +func TestBuildAttributes_SequenceNumber(t *testing.T) { + cfg := testRollupConfig() + sysConfig := testSystemConfig() + l1Block := makeTestL1Input(5) + l1Num := bigs.Uint64Strict(l1Block.Header.Number) + l1Hash := l1Block.Header.Hash() + + t.Run("epoch start resets to zero", func(t *testing.T) { + batch := &opderive.SingularBatch{ + ParentHash: common.HexToHash("0x1111"), + EpochNum: rollup.Epoch(l1Num), + EpochHash: l1Hash, + Timestamp: l1Block.Header.Time + cfg.BlockTime, + } + + cursor := l2Cursor{ + Number: 10, + Timestamp: batch.Timestamp - cfg.BlockTime, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xold"), Number: l1Num - 1}, + SequenceNumber: 5, + } + + attrs, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) + require.NoError(t, err) + require.NotNil(t, attrs) + }) + + t.Run("same epoch increments", func(t *testing.T) { + batch := &opderive.SingularBatch{ + ParentHash: common.HexToHash("0x2222"), + EpochNum: rollup.Epoch(l1Num), + EpochHash: l1Hash, + Timestamp: l1Block.Header.Time + 4*cfg.BlockTime, + } + + cursor := l2Cursor{ + Number: 10, + Timestamp: batch.Timestamp - cfg.BlockTime, + L1Origin: eth.BlockID{Hash: l1Hash, Number: l1Num}, + SequenceNumber: 5, + } + + attrs, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) + require.NoError(t, err) + require.NotNil(t, attrs) + }) +} diff --git a/op-core/derive/batches.go b/op-core/derive/batches.go new file mode 100644 index 0000000000000..e83c4c68d5982 --- /dev/null +++ b/op-core/derive/batches.go @@ -0,0 +1,119 @@ +package derive + +import ( + "context" + "io" + + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// decodeBatches reads all batches from a completed channel's compressed data +// and returns them as singular batches. Span batches are expanded into +// individual singular batches using the provided L1 origins and cursor. +// +// Decode errors are logged and cause the function to return whatever batches +// were successfully decoded so far. Only programming errors (bugs) would +// warrant propagating errors upward; all data-dependent failures are treated +// as bad input. +// +// Span batch prefix validation is delegated to opderive.CheckSpanBatchPrefix, +// which rejects overlapping span batches under Karst. If the prefix check +// returns BatchPast, the span batch is skipped. Any other non-Accept result +// causes the function to return the batches collected so far. +func decodeBatches( + lgr log.Logger, + r io.Reader, + cfg *rollup.Config, + l1Origins []eth.L1BlockRef, + cursor l2Cursor, + l1InclusionBlock eth.L1BlockRef, +) []*opderive.SingularBatch { + spec := rollup.NewChainSpec(cfg) + maxRLP := spec.MaxRLPBytesPerChannel(cursor.Timestamp) + + readBatch, err := opderive.BatchReader(r, maxRLP, true) // Fjord always active (implied by Karst) + if err != nil { + lgr.Warn("failed to create batch reader", "err", err) + return nil + } + + var batches []*opderive.SingularBatch + for { + batchData, err := readBatch() + if err == io.EOF { + break + } else if err != nil { + lgr.Warn("failed to read batch", "err", err) + return batches + } + + switch batchData.GetBatchType() { + case opderive.SingularBatchType: + singular, err := opderive.GetSingularBatch(batchData) + if err != nil { + lgr.Warn("failed to extract singular batch", "err", err) + return batches + } + batches = append(batches, singular) + + case opderive.SpanBatchType: + spanBatch, err := opderive.DeriveSpanBatch( + batchData, + cfg.BlockTime, + cfg.Genesis.L2Time, + cfg.L2ChainID, + ) + if err != nil { + lgr.Warn("failed to derive span batch", "err", err) + return batches + } + + l2SafeHead := eth.L2BlockRef{ + Number: cursor.Number, + Time: cursor.Timestamp, + L1Origin: cursor.L1Origin, + SequenceNumber: cursor.SequenceNumber, + } + + // Build l1Blocks slice starting from the cursor's epoch, as + // CheckSpanBatchPrefix expects l1Blocks[0] to be the current epoch. + var l1Blocks []eth.L1BlockRef + for _, ref := range l1Origins { + if ref.Number >= cursor.L1Origin.Number { + l1Blocks = append(l1Blocks, ref) + } + } + + validity, _ := opderive.CheckSpanBatchPrefix( + context.Background(), cfg, + lgr, + l1Blocks, l2SafeHead, spanBatch, l1InclusionBlock, nil, + ) + if validity == opderive.BatchPast { + lgr.Debug("span batch is past safe head, skipping") + continue + } + if validity != opderive.BatchAccept { + lgr.Warn("span batch prefix check failed", "validity", validity) + return batches + } + + singular, err := spanBatch.GetSingularBatches(l1Origins, l2SafeHead) + if err != nil { + lgr.Warn("failed to expand span batch", "err", err) + return batches + } + batches = append(batches, singular...) + + default: + lgr.Warn("unknown batch type", "type", batchData.GetBatchType()) + return batches + } + } + + return batches +} diff --git a/op-core/derive/batches_test.go b/op-core/derive/batches_test.go new file mode 100644 index 0000000000000..0eb5849c94c06 --- /dev/null +++ b/op-core/derive/batches_test.go @@ -0,0 +1,39 @@ +package derive + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +func TestDecodeBatches_SingularBatch(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + l1Ref := testL1Ref(1) + + batch := &opderive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Ref.Number), + EpochHash: l1Ref.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + } + + channelData := encodeBatchToChannelData(t, batch) + + cursor := newCursor(safeHead) + l1Origins := []eth.L1BlockRef{testL1Ref(0), l1Ref} + + batches := decodeBatches(testLogger, bytes.NewReader(channelData), cfg, l1Origins, cursor, l1Ref) + require.Len(t, batches, 1) + + decoded := batches[0] + require.Equal(t, batch.ParentHash, decoded.ParentHash) + require.Equal(t, batch.EpochNum, decoded.EpochNum) + require.Equal(t, batch.EpochHash, decoded.EpochHash) + require.Equal(t, batch.Timestamp, decoded.Timestamp) +} diff --git a/op-core/derive/channels.go b/op-core/derive/channels.go new file mode 100644 index 0000000000000..003f13799710e --- /dev/null +++ b/op-core/derive/channels.go @@ -0,0 +1,81 @@ +package derive + +import ( + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// readyChannel is a completed channel ready for batch decoding. +type readyChannel struct { + id opderive.ChannelID + openBlock eth.L1BlockRef + channel *opderive.Channel +} + +// channelAssembler implements Holocene single-channel strict-order assembly. +// Only one channel is active at a time. Frames must arrive in order. +// A frame for a new channel ID discards the current in-progress channel. +// +// This is intentionally separate from the existing ChannelAssembler in +// op-node/rollup/derive/channel_assembler.go. That assembler is pull-based +// (requires NextFrameProvider and Metrics interfaces) and designed for the +// streaming pipeline. Our push-based model feeds frames directly, making a +// simpler implementation appropriate. +type channelAssembler struct { + current *opderive.Channel + currentID opderive.ChannelID + openBlock eth.L1BlockRef + nextFrame uint16 +} + +func newChannelAssembler() *channelAssembler { + return &channelAssembler{} +} + +// addFrame processes a single frame. Returns a readyChannel if the channel is complete. +func (ca *channelAssembler) addFrame(frame opderive.Frame, l1Ref eth.L1BlockRef) *readyChannel { + if ca.current == nil || frame.ID != ca.currentID { + ca.current = opderive.NewChannel(frame.ID, l1Ref, true) + ca.currentID = frame.ID + ca.openBlock = l1Ref + ca.nextFrame = 0 + } + + if frame.FrameNumber != ca.nextFrame { + return nil + } + + if err := ca.current.AddFrame(frame, l1Ref); err != nil { + return nil + } + ca.nextFrame++ + + if ca.current.IsReady() { + ready := &readyChannel{ + id: ca.currentID, + openBlock: ca.openBlock, + channel: ca.current, + } + ca.current = nil + return ready + } + return nil +} + +// reset clears the assembler's in-progress channel state. +func (ca *channelAssembler) reset() { + ca.current = nil + ca.nextFrame = 0 +} + +// checkTimeout returns true and discards the current channel if it has timed out. +func (ca *channelAssembler) checkTimeout(current eth.L1BlockRef, channelTimeout uint64) bool { + if ca.current == nil { + return false + } + if current.Number > ca.openBlock.Number+channelTimeout { + ca.current = nil + return true + } + return false +} diff --git a/op-core/derive/channels_test.go b/op-core/derive/channels_test.go new file mode 100644 index 0000000000000..8b983fbda088e --- /dev/null +++ b/op-core/derive/channels_test.go @@ -0,0 +1,145 @@ +package derive + +import ( + "testing" + + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/stretchr/testify/require" +) + +func testChannelID(b byte) opderive.ChannelID { + var id opderive.ChannelID + id[0] = b + return id +} + +func TestChannelAssembler_SingleFrameChannel(t *testing.T) { + ca := newChannelAssembler() + l1 := testL1Ref(1) + + ready := ca.addFrame(opderive.Frame{ + ID: testChannelID(0xAA), + FrameNumber: 0, + Data: []byte("hello"), + IsLast: true, + }, l1) + + require.NotNil(t, ready, "single-frame channel should be ready immediately") + require.Equal(t, testChannelID(0xAA), ready.id) + require.Equal(t, l1, ready.openBlock) + require.True(t, ready.channel.IsReady()) +} + +func TestChannelAssembler_MultiFrameChannel(t *testing.T) { + ca := newChannelAssembler() + chID := testChannelID(0xBB) + l1 := testL1Ref(1) + + ready := ca.addFrame(opderive.Frame{ + ID: chID, + FrameNumber: 0, + Data: []byte("part1"), + IsLast: false, + }, l1) + require.Nil(t, ready, "channel should not be ready after first frame") + + l1b := testL1Ref(2) + ready = ca.addFrame(opderive.Frame{ + ID: chID, + FrameNumber: 1, + Data: []byte("part2"), + IsLast: true, + }, l1b) + + require.NotNil(t, ready, "channel should be ready after last frame") + require.Equal(t, chID, ready.id) + require.Equal(t, l1, ready.openBlock, "openBlock should be from the first frame") + require.True(t, ready.channel.IsReady()) +} + +func TestChannelAssembler_NewChannelDiscardsOld(t *testing.T) { + ca := newChannelAssembler() + chA := testChannelID(0xAA) + chB := testChannelID(0xBB) + l1 := testL1Ref(1) + + ready := ca.addFrame(opderive.Frame{ + ID: chA, + FrameNumber: 0, + Data: []byte("A-frame0"), + IsLast: false, + }, l1) + require.Nil(t, ready) + require.Equal(t, chA, ca.currentID) + + l1b := testL1Ref(2) + ready = ca.addFrame(opderive.Frame{ + ID: chB, + FrameNumber: 0, + Data: []byte("B-frame0"), + IsLast: true, + }, l1b) + + require.NotNil(t, ready, "new channel B should complete") + require.Equal(t, chB, ready.id) + require.Equal(t, l1b, ready.openBlock, "openBlock should be from channel B's first frame") +} + +func TestChannelAssembler_Timeout(t *testing.T) { + ca := newChannelAssembler() + chID := testChannelID(0xCC) + l1Open := testL1Ref(10) + + ca.addFrame(opderive.Frame{ + ID: chID, + FrameNumber: 0, + Data: []byte("data"), + IsLast: false, + }, l1Open) + require.NotNil(t, ca.current, "channel should be in progress") + + channelTimeout := uint64(50) + + notTimedOut := testL1Ref(10 + channelTimeout) + require.False(t, ca.checkTimeout(notTimedOut, channelTimeout), + "should not timeout at exactly openBlock + channelTimeout") + require.NotNil(t, ca.current) + + timedOut := testL1Ref(10 + channelTimeout + 1) + require.True(t, ca.checkTimeout(timedOut, channelTimeout), + "should timeout when current.Number > openBlock.Number + channelTimeout") + require.Nil(t, ca.current, "channel should be discarded after timeout") +} + +func TestChannelAssembler_OutOfOrderFrame(t *testing.T) { + ca := newChannelAssembler() + chID := testChannelID(0xDD) + l1 := testL1Ref(1) + + ready := ca.addFrame(opderive.Frame{ + ID: chID, + FrameNumber: 0, + Data: []byte("frame0"), + IsLast: false, + }, l1) + require.Nil(t, ready) + + ready = ca.addFrame(opderive.Frame{ + ID: chID, + FrameNumber: 2, // skip frame 1 + Data: []byte("frame2"), + IsLast: true, + }, l1) + require.Nil(t, ready, "out-of-order frame should be dropped") + + require.NotNil(t, ca.current, "channel should still be in progress") + require.Equal(t, uint16(1), ca.nextFrame, "nextFrame should still expect frame 1") + + ready = ca.addFrame(opderive.Frame{ + ID: chID, + FrameNumber: 1, + Data: []byte("frame1"), + IsLast: true, + }, l1) + require.NotNil(t, ready, "channel should complete once gap is filled") +} diff --git a/op-core/derive/deriver.go b/op-core/derive/deriver.go new file mode 100644 index 0000000000000..735def7a83ea8 --- /dev/null +++ b/op-core/derive/deriver.go @@ -0,0 +1,298 @@ +package derive + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// Deriver is an iterator that produces one payload attributes at a time from +// incrementally-added L1 blocks. It replaces the batch-mode PureDerive function +// with an API that matches how derivation works in practice: derive one block, +// execute on engine, verify, then derive the next. +type Deriver struct { + cfg *rollup.Config + l1ChainConfig *params.ChainConfig + lgr log.Logger + spec *rollup.ChainSpec + + // L1 data — appended via AddL1Block + l1Blocks []L1Input + l1Origins []eth.L1BlockRef + firstL1Num uint64 + + // System config — evolves with config logs + sysConfig eth.SystemConfig + + // L1 processing position — next L1 block index to scan for frames + l1Pos int + + // Channel assembly + assembler *channelAssembler + + // Batch buffer from completed channels + pendingBatches []*opderive.SingularBatch + batchInclusionBlock eth.L1BlockRef + + // Derivation cursor + cursor l2Cursor +} + +// NewDeriver creates a new iterator-style deriver starting from the given safe +// head. The caller must then call AddL1Block to provide L1 data and Next to +// consume derived payload attributes. +func NewDeriver( + cfg *rollup.Config, + l1ChainConfig *params.ChainConfig, + lgr log.Logger, + safeHead eth.L2BlockRef, + sysConfig eth.SystemConfig, +) (*Deriver, error) { + if !cfg.IsKarst(safeHead.Time) { + return nil, fmt.Errorf("derivation requires Karst fork (no overlapping span batches), safe head time %d is pre-Karst", safeHead.Time) + } + + return &Deriver{ + cfg: cfg, + l1ChainConfig: l1ChainConfig, + lgr: lgr, + spec: rollup.NewChainSpec(cfg), + sysConfig: sysConfig, + assembler: newChannelAssembler(), + cursor: newCursor(safeHead), + }, nil +} + +// AddL1Block appends one or more L1 blocks. Blocks must be contiguous with +// previously added blocks. Returns ErrReorg if a block's parent hash doesn't +// match the tip of the already-added chain. +func (d *Deriver) AddL1Block(blocks ...L1Input) error { + for i := range blocks { + ref := blocks[i].BlockRef() + + if len(d.l1Origins) > 0 { + tip := d.l1Origins[len(d.l1Origins)-1] + if ref.ParentHash != tip.Hash { + return fmt.Errorf("%w: block %d parent %s != tip %s", ErrReorg, ref.Number, ref.ParentHash, tip.Hash) + } + } + + if len(d.l1Blocks) == 0 { + d.firstL1Num = ref.Number + } + + d.l1Blocks = append(d.l1Blocks, blocks[i]) + d.l1Origins = append(d.l1Origins, ref) + } + return nil +} + +// Next returns the next derived payload attributes and the L1 block they were +// derived from. safeHead provides the current L2 safe head (including Hash) +// for parent hash validation via upstream CheckBatch. +// +// Returns ErrNeedL1Data when more L1 blocks are needed. +func (d *Deriver) Next(safeHead eth.L2BlockRef) (*eth.PayloadAttributes, eth.L1BlockRef, error) { + // Step 1: Try consuming from pending batches first. + if attrs, l1Ref, err := d.tryPendingBatch(safeHead); err != nil { + return nil, eth.L1BlockRef{}, err + } else if attrs != nil { + return attrs, l1Ref, nil + } + + // Step 2: Process more L1 blocks to find new channels/batches. + for d.l1Pos < len(d.l1Blocks) { + if err := d.processNextL1Block(); err != nil { + return nil, eth.L1BlockRef{}, err + } + + // If we got new pending batches, try them. + if attrs, l1Ref, err := d.tryPendingBatch(safeHead); err != nil { + return nil, eth.L1BlockRef{}, err + } else if attrs != nil { + return attrs, l1Ref, nil + } + + // After each L1 block, check if the seq window expired → empty batch. + if attrs, l1Ref, err := d.tryEmptyBatch(safeHead); err != nil { + return nil, eth.L1BlockRef{}, err + } else if attrs != nil { + return attrs, l1Ref, nil + } + } + + // Step 3: Nothing to do — need more L1 data. + return nil, eth.L1BlockRef{}, ErrNeedL1Data +} + +// Reset clears all internal state back to the given safe head + system config. +// Used after L1 reorgs. The caller must re-add L1 blocks from the new chain. +func (d *Deriver) Reset(safeHead eth.L2BlockRef, sysConfig eth.SystemConfig) { + d.l1Blocks = nil + d.l1Origins = nil + d.firstL1Num = 0 + d.l1Pos = 0 + d.sysConfig = sysConfig + d.assembler.reset() + d.pendingBatches = nil + d.batchInclusionBlock = eth.L1BlockRef{} + d.cursor = newCursor(safeHead) +} + +// findL1 does O(1) lookup by block number into the l1Blocks slice. +func (d *Deriver) findL1(number uint64) *L1Input { + if len(d.l1Blocks) == 0 { + return nil + } + idx := int(number - d.firstL1Num) + if idx >= 0 && idx < len(d.l1Blocks) { + return &d.l1Blocks[idx] + } + return nil +} + +// tryPendingBatch validates the next pending batch via CheckBatch and builds +// attributes if accepted. +func (d *Deriver) tryPendingBatch(safeHead eth.L2BlockRef) (*eth.PayloadAttributes, eth.L1BlockRef, error) { + for len(d.pendingBatches) > 0 { + batch := d.pendingBatches[0] + + // Build l1Blocks slice for CheckBatch: must start at safeHead.L1Origin. + startIdx := int(safeHead.L1Origin.Number - d.firstL1Num) + if startIdx < 0 || startIdx >= len(d.l1Origins) { + return nil, eth.L1BlockRef{}, ErrNeedL1Data + } + l1BlocksForCheck := d.l1Origins[startIdx:] + + batchWithInclusion := &opderive.BatchWithL1InclusionBlock{ + Batch: batch, + L1InclusionBlock: d.batchInclusionBlock, + } + + validity := opderive.CheckBatch( + context.Background(), d.cfg, d.lgr, + l1BlocksForCheck, safeHead, batchWithInclusion, nil, + ) + + switch validity { + case opderive.BatchAccept: + d.pendingBatches = d.pendingBatches[1:] + + epochL1 := d.findL1(uint64(batch.EpochNum)) + if epochL1 == nil { + return nil, eth.L1BlockRef{}, fmt.Errorf("missing L1 block %d for batch epoch", batch.EpochNum) + } + + attrs, err := buildAttributes(batch, epochL1, d.cursor, d.sysConfig, d.cfg, d.l1ChainConfig) + if err != nil { + return nil, eth.L1BlockRef{}, fmt.Errorf("building attributes: %w", err) + } + + epochID := eth.BlockID{Number: uint64(batch.EpochNum), Hash: batch.EpochHash} + var seqNum uint64 + if epochID.Number != d.cursor.L1Origin.Number { + seqNum = 0 + } else { + seqNum = d.cursor.SequenceNumber + 1 + } + d.cursor.advance(batch.Timestamp, epochID, seqNum) + + return attrs, d.batchInclusionBlock, nil + + case opderive.BatchPast: + d.pendingBatches = d.pendingBatches[1:] + continue + + case opderive.BatchUndecided: + return nil, eth.L1BlockRef{}, ErrNeedL1Data + + default: // BatchDrop, BatchFuture, etc. + d.lgr.Warn("invalid batch, discarding remaining channel batches", + "timestamp", batch.Timestamp, "epoch", batch.EpochNum, "validity", validity) + d.pendingBatches = nil + return nil, eth.L1BlockRef{}, nil + } + } + return nil, eth.L1BlockRef{}, nil +} + +// processNextL1Block processes the L1 block at l1Pos: applies config logs, +// checks channel timeout, parses frames, and decodes any completed channels +// into pendingBatches. +func (d *Deriver) processNextL1Block() error { + l1 := d.l1Blocks[d.l1Pos] + l1Ref := d.l1Origins[d.l1Pos] + d.l1Pos++ + + for _, configLog := range l1.ConfigLogs { + if err := opderive.ProcessSystemConfigUpdateLogEvent(&d.sysConfig, configLog, d.cfg, l1.Header.Time); err != nil { + return fmt.Errorf("processing system config update at L1 block %d: %w", l1Ref.Number, err) + } + } + + d.assembler.checkTimeout(l1Ref, d.spec.ChannelTimeout(l1Ref.Time)) + + for _, txData := range l1.BatcherData { + frames, err := opderive.ParseFrames(txData) + if err != nil { + d.lgr.Warn("failed to parse frames", "l1_block", l1Ref.Number, "err", err) + continue + } + + for _, frame := range frames { + ready := d.assembler.addFrame(frame, l1Ref) + if ready == nil { + continue + } + + d.lgr.Debug("channel ready", "channel", ready.id, "l1_block", l1Ref.Number) + + batches := decodeBatches(d.lgr, ready.channel.Reader(), d.cfg, d.l1Origins, d.cursor, l1Ref) + if len(batches) > 0 { + d.pendingBatches = batches + d.batchInclusionBlock = l1Ref + } + } + } + + return nil +} + +// tryEmptyBatch generates an empty batch if the sequencing window has expired +// for the most recently processed L1 block. +func (d *Deriver) tryEmptyBatch(safeHead eth.L2BlockRef) (*eth.PayloadAttributes, eth.L1BlockRef, error) { + if d.l1Pos == 0 { + return nil, eth.L1BlockRef{}, nil + } + currentL1 := d.l1Origins[d.l1Pos-1] + + if !d.cursor.needsEmptyBatch(currentL1, d.cfg) { + return nil, eth.L1BlockRef{}, nil + } + + batch, epochL1, newOrigin := makeEmptyBatch(d.cursor, d.findL1, d.cfg) + if batch == nil { + return nil, eth.L1BlockRef{}, nil + } + + attrs, err := buildAttributes(batch, epochL1, d.cursor, d.sysConfig, d.cfg, d.l1ChainConfig) + if err != nil { + return nil, eth.L1BlockRef{}, fmt.Errorf("building empty batch attributes: %w", err) + } + + var seqNum uint64 + if newOrigin.Number != d.cursor.L1Origin.Number { + seqNum = 0 + } else { + seqNum = d.cursor.SequenceNumber + 1 + } + d.cursor.advance(batch.Timestamp, newOrigin, seqNum) + + return attrs, currentL1, nil +} diff --git a/op-core/derive/deriver_test.go b/op-core/derive/deriver_test.go new file mode 100644 index 0000000000000..ea62ef387af77 --- /dev/null +++ b/op-core/derive/deriver_test.go @@ -0,0 +1,408 @@ +package derive + +import ( + "bytes" + "errors" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +var testLogger = log.NewLogger(log.DiscardHandler()) + +// addBatchToL1 adds a batcher transaction containing the given singular batch +// to a pre-existing L1Input. +func addBatchToL1(t *testing.T, l1 *L1Input, batch *opderive.SingularBatch) { + t.Helper() + channelData := encodeBatchToChannelData(t, batch) + var chID opderive.ChannelID + copy(chID[:], l1.Header.Hash().Bytes()) + l1.BatcherData = [][]byte{wrapInFrames(channelData, chID)} +} + +func TestDeriver_SingleBatch(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + chain := makeTestL1Chain(2) + l1Ref1 := chain[1].BlockRef() + addBatchToL1(t, chain[1], &opderive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Ref1.Number), + EpochHash: l1Ref1.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + }) + + require.NoError(t, d.AddL1Block(*chain[0], *chain[1])) + + attrs, l1Ref, err := d.Next(safeHead) + require.NoError(t, err) + require.NotNil(t, attrs) + require.Equal(t, hexutil.Uint64(safeHead.Time+cfg.BlockTime), attrs.Timestamp) + require.True(t, attrs.NoTxPool) + require.Equal(t, chain[1].BlockRef(), l1Ref) + + // No more to opderive. + _, _, err = d.Next(safeHead) + require.True(t, errors.Is(err, ErrNeedL1Data)) +} + +func TestDeriver_NeedL1Data(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + _, _, err = d.Next(safeHead) + require.True(t, errors.Is(err, ErrNeedL1Data)) +} + +func TestDeriver_IncrementalL1(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + chain := makeTestL1Chain(2) + require.NoError(t, d.AddL1Block(*chain[0])) + + // No batches in block 0, need more data. + _, _, err = d.Next(safeHead) + require.True(t, errors.Is(err, ErrNeedL1Data)) + + // Add block 1 with a batch. + l1Ref1 := chain[1].BlockRef() + addBatchToL1(t, chain[1], &opderive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Ref1.Number), + EpochHash: l1Ref1.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + }) + require.NoError(t, d.AddL1Block(*chain[1])) + + attrs, _, err := d.Next(safeHead) + require.NoError(t, err) + require.NotNil(t, attrs) + require.Equal(t, hexutil.Uint64(safeHead.Time+cfg.BlockTime), attrs.Timestamp) +} + +func TestDeriver_EmptyBatches(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + // Add SeqWindowSize + 2 L1 blocks with no batcher data. + numBlocks := cfg.SeqWindowSize + 2 + chain := makeTestL1Chain(numBlocks) + for _, block := range chain { + require.NoError(t, d.AddL1Block(*block)) + } + + // Should generate empty batches when the sequencing window expires. + currentSafeHead := safeHead + var derived []*eth.PayloadAttributes + for { + attrs, _, err := d.Next(currentSafeHead) + if errors.Is(err, ErrNeedL1Data) { + break + } + require.NoError(t, err) + require.NotNil(t, attrs) + derived = append(derived, attrs) + + // Advance the safe head for the next call. + currentSafeHead = eth.L2BlockRef{ + Hash: common.Hash{byte(len(derived))}, + Number: currentSafeHead.Number + 1, + Time: uint64(attrs.Timestamp), + L1Origin: currentSafeHead.L1Origin, + SequenceNumber: currentSafeHead.SequenceNumber + 1, + } + } + + require.Greater(t, len(derived), 0, "empty batches should be generated when sequencer window expires") + + expectedTimestamp := safeHead.Time + cfg.BlockTime + for _, attrs := range derived { + require.Equal(t, hexutil.Uint64(expectedTimestamp), attrs.Timestamp) + expectedTimestamp += cfg.BlockTime + } +} + +func TestDeriver_ReorgDetection(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + chain := makeTestL1Chain(1) + require.NoError(t, d.AddL1Block(*chain[0])) + + // Create a block that doesn't chain to block 0. + reorgedBlock := makeTestL1Input(1) + reorgedBlock.Header.ParentHash = common.HexToHash("0xbadparent") + + err = d.AddL1Block(*reorgedBlock) + require.True(t, errors.Is(err, ErrReorg)) +} + +func TestDeriver_ReorgReset(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + chain := makeTestL1Chain(2) + l1Ref1 := chain[1].BlockRef() + addBatchToL1(t, chain[1], &opderive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Ref1.Number), + EpochHash: l1Ref1.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + }) + require.NoError(t, d.AddL1Block(*chain[0], *chain[1])) + + // Derive the first block. + attrs, _, err := d.Next(safeHead) + require.NoError(t, err) + require.NotNil(t, attrs) + + // Now reset (simulating reorg). + d.Reset(safeHead, sysConfig) + + // Need L1 data again. + _, _, err = d.Next(safeHead) + require.True(t, errors.Is(err, ErrNeedL1Data)) + + // Re-add blocks, can derive again. + require.NoError(t, d.AddL1Block(*chain[0], *chain[1])) + attrs, _, err = d.Next(safeHead) + require.NoError(t, err) + require.NotNil(t, attrs) +} + +func TestDeriver_ChannelTimeout(t *testing.T) { + cfg := testRollupConfig() + cfg.ChannelTimeoutBedrock = 2 + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + chain := makeTestL1Chain(5) + l1Block0Ref := chain[0].BlockRef() + + // Incomplete channel at L1 block 1. + incompleteChID := testChannelID(0xAA) + channelData := encodeBatchToChannelData(t, &opderive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Block0Ref.Number), + EpochHash: l1Block0Ref.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + }) + frame0 := opderive.Frame{ + ID: incompleteChID, + FrameNumber: 0, + Data: channelData, + IsLast: false, + } + var buf bytes.Buffer + buf.WriteByte(params.DerivationVersion0) + require.NoError(t, frame0.MarshalBinary(&buf)) + chain[1].BatcherData = [][]byte{buf.Bytes()} + + // Complete channel at L1 block 4 (after timeout: 4 > 1 + 2). + completeChID := testChannelID(0xBB) + completeBatch := &opderive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Block0Ref.Number), + EpochHash: l1Block0Ref.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + } + completeChannelData := encodeBatchToChannelData(t, completeBatch) + completeTx := wrapInFrames(completeChannelData, completeChID) + chain[4].BatcherData = [][]byte{completeTx} + + for _, block := range chain { + require.NoError(t, d.AddL1Block(*block)) + } + + attrs, _, err := d.Next(safeHead) + require.NoError(t, err) + require.NotNil(t, attrs) + require.Equal(t, hexutil.Uint64(safeHead.Time+cfg.BlockTime), attrs.Timestamp) +} + +func TestDeriver_InvalidBatchDropped(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + chain := makeTestL1Chain(2) + l1Ref1 := chain[1].BlockRef() + + // Batch with wrong timestamp. + addBatchToL1(t, chain[1], &opderive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Ref1.Number), + EpochHash: l1Ref1.Hash, + Timestamp: safeHead.Time + cfg.BlockTime + 999, + }) + + require.NoError(t, d.AddL1Block(*chain[0], *chain[1])) + + // The invalid batch should be dropped, returning ErrNeedL1Data. + _, _, err = d.Next(safeHead) + require.True(t, errors.Is(err, ErrNeedL1Data)) +} + +func TestDeriver_ParentHashCheck(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + chain := makeTestL1Chain(2) + l1Ref1 := chain[1].BlockRef() + + // Create a batch with WRONG parent hash. + addBatchToL1(t, chain[1], &opderive.SingularBatch{ + ParentHash: common.HexToHash("0xwrongparent"), + EpochNum: rollup.Epoch(l1Ref1.Number), + EpochHash: l1Ref1.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + }) + + require.NoError(t, d.AddL1Block(*chain[0], *chain[1])) + + // CheckBatch will reject this because ParentHash != safeHead.Hash. + _, _, err = d.Next(safeHead) + require.True(t, errors.Is(err, ErrNeedL1Data)) +} + +func TestDeriver_RejectsPreKarst(t *testing.T) { + cfg := testRollupConfig() + cfg.KarstTime = nil + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + _, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.Error(t, err) + require.Contains(t, err.Error(), "Karst fork") +} + +func TestDeriver_MultipleChannelsAndEpochs(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + chain := makeTestL1Chain(4) + + // Parent hashes must match the safe head hashes we'll pass to Next(). + // We use a deterministic scheme: genesis hash, then Hash{1}, Hash{2}, etc. + l2Hashes := []common.Hash{ + safeHead.Hash, + {1}, + {2}, + {3}, + } + + // Block 1: batch for epoch 1 + l1Ref1 := chain[1].BlockRef() + addBatchToL1(t, chain[1], &opderive.SingularBatch{ + ParentHash: l2Hashes[0], + EpochNum: rollup.Epoch(l1Ref1.Number), + EpochHash: l1Ref1.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + }) + + // Block 2: batch for epoch 2 + l1Ref2 := chain[2].BlockRef() + chData2 := encodeBatchToChannelData(t, &opderive.SingularBatch{ + ParentHash: l2Hashes[1], + EpochNum: rollup.Epoch(l1Ref2.Number), + EpochHash: l1Ref2.Hash, + Timestamp: safeHead.Time + 2*cfg.BlockTime, + }) + var chID2 opderive.ChannelID + chID2[0] = 0x02 + chain[2].BatcherData = [][]byte{wrapInFrames(chData2, chID2)} + + // Block 3: batch for epoch 3 + l1Ref3 := chain[3].BlockRef() + chData3 := encodeBatchToChannelData(t, &opderive.SingularBatch{ + ParentHash: l2Hashes[2], + EpochNum: rollup.Epoch(l1Ref3.Number), + EpochHash: l1Ref3.Hash, + Timestamp: safeHead.Time + 3*cfg.BlockTime, + }) + var chID3 opderive.ChannelID + chID3[0] = 0x03 + chain[3].BatcherData = [][]byte{wrapInFrames(chData3, chID3)} + + for _, block := range chain { + require.NoError(t, d.AddL1Block(*block)) + } + + var derived []*eth.PayloadAttributes + currentSafeHead := safeHead + for { + attrs, _, err := d.Next(currentSafeHead) + if errors.Is(err, ErrNeedL1Data) { + break + } + require.NoError(t, err) + require.NotNil(t, attrs) + derived = append(derived, attrs) + + epochIdx := len(derived) // epoch advances 1:1 with derived blocks here + currentSafeHead = eth.L2BlockRef{ + Hash: l2Hashes[epochIdx], + Number: currentSafeHead.Number + 1, + Time: uint64(attrs.Timestamp), + L1Origin: chain[epochIdx].BlockRef().ID(), + SequenceNumber: 0, + } + } + + require.Greater(t, len(derived), 1, "should derive multiple blocks from multiple epochs") + + expectedTimestamp := safeHead.Time + cfg.BlockTime + for i, attrs := range derived { + require.Equal(t, hexutil.Uint64(expectedTimestamp), attrs.Timestamp, + "block %d should have timestamp %d", i, expectedTimestamp) + expectedTimestamp += cfg.BlockTime + } +} diff --git a/op-core/derive/empty_batch.go b/op-core/derive/empty_batch.go new file mode 100644 index 0000000000000..9942a40c7f0a1 --- /dev/null +++ b/op-core/derive/empty_batch.go @@ -0,0 +1,39 @@ +package derive + +import ( + "github.com/ethereum-optimism/optimism/op-node/rollup" + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// makeEmptyBatch generates a single empty batch when the sequencing window has +// expired. It returns the batch, the L1 input for the batch's epoch (for +// buildAttributes), and the new L1 origin. Returns nil if no empty batch is needed. +// +// The epoch advances when the next L2 timestamp >= the next L1 block's timestamp. +func makeEmptyBatch( + cursor l2Cursor, + findL1 func(uint64) *L1Input, + cfg *rollup.Config, +) (*opderive.SingularBatch, *L1Input, eth.BlockID) { + nextTimestamp := cursor.Timestamp + cfg.BlockTime + newOrigin := cursor.L1Origin + epochL1 := findL1(cursor.L1Origin.Number) + if epochL1 == nil { + return nil, nil, eth.BlockID{} + } + + nextL1 := findL1(cursor.L1Origin.Number + 1) + if nextL1 != nil && nextTimestamp >= nextL1.Header.Time { + newOrigin = nextL1.BlockID() + epochL1 = nextL1 + } + + batch := &opderive.SingularBatch{ + EpochNum: rollup.Epoch(newOrigin.Number), + EpochHash: newOrigin.Hash, + Timestamp: nextTimestamp, + } + + return batch, epochL1, newOrigin +} diff --git a/op-core/derive/empty_batch_test.go b/op-core/derive/empty_batch_test.go new file mode 100644 index 0000000000000..f1147ef7354c9 --- /dev/null +++ b/op-core/derive/empty_batch_test.go @@ -0,0 +1,82 @@ +package derive + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/stretchr/testify/require" +) + +func TestMakeEmptyBatch_SameEpoch(t *testing.T) { + cfg := testRollupConfig() + + // Cursor at L1 origin 5, next L1 block at num 6 has time 12. + // Next L2 timestamp = 100 + 2 = 102, which is < 12... wait, our test + // L1 blocks have Time = num*2, so L1#6 has time 12. + // With cursor.Timestamp = 100, nextTimestamp = 102 which is >= 12, + // so it would advance. Let's set up so it stays. + cursor := l2Cursor{ + Number: 10, + Timestamp: 4, // nextTimestamp = 6 + L1Origin: testL1Ref(5).ID(), + } + + // L1 block 6 has time 12. nextTimestamp = 6 < 12 → stays at same epoch. + findL1 := func(num uint64) *L1Input { + if num <= 6 { + return makeTestL1Input(num) + } + return nil + } + + batch, epochL1, newOrigin := makeEmptyBatch(cursor, findL1, cfg) + require.NotNil(t, batch) + require.NotNil(t, epochL1) + require.Equal(t, uint64(5), newOrigin.Number) + require.Equal(t, uint64(6), batch.Timestamp) + require.Equal(t, cursor.L1Origin.Number, uint64(batch.EpochNum)) +} + +func TestMakeEmptyBatch_AdvancesEpoch(t *testing.T) { + cfg := testRollupConfig() + + // Cursor at L1 origin 5. L1 block 5 has time 10, L1 block 6 has time 12. + // If cursor.Timestamp = 100, nextTimestamp = 102 >= 12 → advances epoch. + cursor := l2Cursor{ + Number: 10, + Timestamp: 100, + L1Origin: testL1Ref(5).ID(), + } + + findL1 := func(num uint64) *L1Input { + if num <= 6 { + return makeTestL1Input(num) + } + return nil + } + + batch, epochL1, newOrigin := makeEmptyBatch(cursor, findL1, cfg) + require.NotNil(t, batch) + require.NotNil(t, epochL1) + require.Equal(t, uint64(6), newOrigin.Number) + require.Equal(t, uint64(102), batch.Timestamp) + require.Equal(t, uint64(6), uint64(batch.EpochNum)) +} + +func TestMakeEmptyBatch_MissingL1(t *testing.T) { + cfg := testRollupConfig() + + cursor := l2Cursor{ + Number: 10, + Timestamp: 100, + L1Origin: eth.BlockID{Number: 99}, + } + + findL1 := func(num uint64) *L1Input { + return nil + } + + batch, epochL1, _ := makeEmptyBatch(cursor, findL1, cfg) + require.Nil(t, batch) + require.Nil(t, epochL1) +} diff --git a/op-core/derive/helpers_test.go b/op-core/derive/helpers_test.go new file mode 100644 index 0000000000000..8f7f423480364 --- /dev/null +++ b/op-core/derive/helpers_test.go @@ -0,0 +1,236 @@ +package derive + +import ( + "bytes" + "compress/zlib" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + gethparams "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" + "github.com/ethereum-optimism/optimism/op-service/bigs" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +func testRollupConfig() *rollup.Config { + zero := uint64(0) + return &rollup.Config{ + Genesis: rollup.Genesis{ + L1: eth.BlockID{ + Hash: common.HexToHash("0x01"), + Number: 0, + }, + L2: eth.BlockID{ + Hash: common.HexToHash("0x02"), + Number: 0, + }, + L2Time: 0, + SystemConfig: testSystemConfig(), + }, + BlockTime: 2, + MaxSequencerDrift: 600, + SeqWindowSize: 10, + ChannelTimeoutBedrock: 50, + L1ChainID: big.NewInt(1), + L2ChainID: big.NewInt(10), + // Activate all forks at genesis for post-Karst only pipeline + RegolithTime: &zero, + CanyonTime: &zero, + DeltaTime: &zero, + EcotoneTime: &zero, + FjordTime: &zero, + GraniteTime: &zero, + HoloceneTime: &zero, + JovianTime: &zero, + KarstTime: &zero, + BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000000000010"), + DepositContractAddress: common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"), + } +} + +func testSystemConfig() eth.SystemConfig { + return eth.SystemConfig{ + BatcherAddr: common.HexToAddress("0xba7c4e500000000000000000000000000000ba7c"), + GasLimit: 30_000_000, + } +} + +func testL1ChainConfig() *gethparams.ChainConfig { + return gethparams.AllDevChainProtocolChanges +} + +func testSafeHead(cfg *rollup.Config) eth.L2BlockRef { + return eth.L2BlockRef{ + Hash: cfg.Genesis.L2.Hash, + Number: cfg.Genesis.L2.Number, + ParentHash: common.Hash{}, + Time: cfg.Genesis.L2Time, + L1Origin: cfg.Genesis.L1, + } +} + +func makeTestL1Input(num uint64) *L1Input { + return &L1Input{ + Header: &types.Header{ + ParentHash: common.BigToHash(new(big.Int).SetUint64(num + 0x100 - 1)), + Number: new(big.Int).SetUint64(num), + Time: num * 2, // match L2 block time for simple epoch advancement in tests + BaseFee: big.NewInt(7), + MixDigest: common.BigToHash(new(big.Int).SetUint64(num + 0x200)), + // ExcessBlobGas required for BlobBaseFee to work via HeaderBlockInfo + ExcessBlobGas: ptrTo(uint64(0)), + }, + } +} + +func makeTestDeposit() *types.DepositTx { + return &types.DepositTx{ + SourceHash: common.HexToHash("0xdead"), + From: common.HexToAddress("0x1111"), + To: ptrTo(common.HexToAddress("0x2222")), + Value: big.NewInt(0), + Gas: 100_000, + Data: nil, + } +} + +func ptrTo[T any](v T) *T { + return &v +} + +func testL1Ref(num uint64) eth.L1BlockRef { + input := makeTestL1Input(num) + return input.BlockRef() +} + +// makeTestL1Chain creates a sequence of properly chained L1 inputs where each +// block's ParentHash matches the previous block's actual hash. This is required +// for AddL1Block's reorg detection. +func makeTestL1Chain(count uint64) []*L1Input { + chain := make([]*L1Input, count) + for i := uint64(0); i < count; i++ { + input := makeTestL1Input(i) + if i > 0 { + input.Header.ParentHash = chain[i-1].Header.Hash() + } + chain[i] = input + } + return chain +} + +// encodeBatchToChannelData RLP-encodes a singular batch and zlib-compresses +// it into channel data (the format read by the channel reader stage). +func encodeBatchToChannelData(t *testing.T, batch *opderive.SingularBatch) []byte { + t.Helper() + + bd := opderive.NewBatchData(batch) + batchBytes, err := bd.MarshalBinary() + if err != nil { + t.Fatalf("marshal batch: %v", err) + } + + // Wrap in RLP string encoding as the channel reader expects RLP-encoded batch data + var rlpBuf bytes.Buffer + if err := rlp.Encode(&rlpBuf, batchBytes); err != nil { + t.Fatalf("rlp encode batch: %v", err) + } + + // zlib compress + var compressed bytes.Buffer + w := zlib.NewWriter(&compressed) + if _, err := w.Write(rlpBuf.Bytes()); err != nil { + t.Fatalf("zlib write: %v", err) + } + if err := w.Close(); err != nil { + t.Fatalf("zlib close: %v", err) + } + + return compressed.Bytes() +} + +// wrapInFrames wraps channel data in a single-frame batcher transaction. +// The result is a raw batcher tx data payload (DerivationVersion0 prefix + frame). +func wrapInFrames(channelData []byte, channelID opderive.ChannelID) []byte { + frame := opderive.Frame{ + ID: channelID, + FrameNumber: 0, + Data: channelData, + IsLast: true, + } + + var buf bytes.Buffer + buf.WriteByte(params.DerivationVersion0) + _ = frame.MarshalBinary(&buf) + return buf.Bytes() +} + +func TestHelpers(t *testing.T) { + cfg := testRollupConfig() + require.NotNil(t, cfg) + require.Equal(t, uint64(2), cfg.BlockTime) + require.Equal(t, uint64(10), cfg.SeqWindowSize) + require.Equal(t, uint64(50), cfg.ChannelTimeoutBedrock) + require.NotNil(t, cfg.HoloceneTime) + require.NotNil(t, cfg.KarstTime) + + sysCfg := testSystemConfig() + require.Equal(t, uint64(30_000_000), sysCfg.GasLimit) + + safeHead := testSafeHead(cfg) + require.Equal(t, cfg.Genesis.L2.Hash, safeHead.Hash) + require.Equal(t, cfg.Genesis.L2.Number, safeHead.Number) + + l1 := makeTestL1Input(5) + require.Equal(t, uint64(5), bigs.Uint64Strict(l1.Header.Number)) + require.Equal(t, uint64(5*2), l1.Header.Time) + + dep := makeTestDeposit() + require.NotNil(t, dep) + require.NotNil(t, dep.To) + + ref := testL1Ref(10) + require.Equal(t, uint64(10), ref.Number) + + l1WithBatch := makeL1WithBatch(t, cfg, 1, safeHead, sysCfg) + require.Len(t, l1WithBatch.BatcherData, 1) + require.NotEmpty(t, l1WithBatch.BatcherData[0]) + + // Verify the batcher tx can be parsed as frames + frames, err := opderive.ParseFrames(l1WithBatch.BatcherData[0]) + require.NoError(t, err) + require.Len(t, frames, 1) + require.True(t, frames[0].IsLast) +} + +// makeL1WithBatch creates an L1Input containing a batcher tx with one singular batch +// targeting the given safe head as parent. +func makeL1WithBatch(t *testing.T, cfg *rollup.Config, l1Num uint64, safeHead eth.L2BlockRef, sysCfg eth.SystemConfig) *L1Input { + t.Helper() + _ = sysCfg // reserved for future use in batch construction + + l1 := makeTestL1Input(l1Num) + l1Ref := l1.BlockRef() + + batch := &opderive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Ref.Number), + EpochHash: l1Ref.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + } + + channelData := encodeBatchToChannelData(t, batch) + + var chID opderive.ChannelID + copy(chID[:], common.Hex2Bytes("deadbeefdeadbeefdeadbeefdeadbeef")) + batcherTx := wrapInFrames(channelData, chID) + + l1.BatcherData = [][]byte{batcherTx} + return l1 +} diff --git a/op-core/derive/types.go b/op-core/derive/types.go new file mode 100644 index 0000000000000..7e4241fb6e067 --- /dev/null +++ b/op-core/derive/types.go @@ -0,0 +1,70 @@ +package derive + +import ( + "errors" + + "github.com/ethereum/go-ethereum/core/types" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +var ( + ErrNeedL1Data = errors.New("need more L1 data") + ErrReorg = errors.New("L1 reorg detected") +) + +// L1Input is a pre-processed L1 block containing only derivation-relevant data. +// The caller is responsible for filtering batcher transactions, extracting deposits +// from receipts, and extracting system config update logs. +// +// Header contains the full L1 block header. Callers will typically already have +// the header at hand when constructing an L1Input. +type L1Input struct { + Header *types.Header + + BatcherData [][]byte // raw batcher transaction data (calldata or blob content) + Deposits []*types.DepositTx // user deposit transactions extracted from receipts + ConfigLogs []*types.Log // system config update logs, pre-filtered +} + +// BlockRef converts the L1 header to an eth.L1BlockRef. +func (l *L1Input) BlockRef() eth.L1BlockRef { + return *eth.BlockRefFromHeader(l.Header) +} + +// BlockID returns the block's ID (hash + number). +func (l *L1Input) BlockID() eth.BlockID { + return eth.HeaderBlockID(l.Header) +} + +// l2Cursor tracks the derivation position without knowing the L2 block hash. +type l2Cursor struct { + Number uint64 + Timestamp uint64 + L1Origin eth.BlockID + SequenceNumber uint64 +} + +func newCursor(safeHead eth.L2BlockRef) l2Cursor { + return l2Cursor{ + Number: safeHead.Number, + Timestamp: safeHead.Time, + L1Origin: safeHead.L1Origin, + SequenceNumber: safeHead.SequenceNumber, + } +} + +func (c *l2Cursor) advance(timestamp uint64, l1Origin eth.BlockID, seqNum uint64) { + c.Number++ + c.Timestamp = timestamp + c.L1Origin = l1Origin + c.SequenceNumber = seqNum +} + +// needsEmptyBatch returns true when the sequencing window has expired, +// meaning the cursor's L1 origin is more than SeqWindowSize blocks behind +// the current L1 block. +func (c l2Cursor) needsEmptyBatch(currentL1 eth.L1BlockRef, cfg *rollup.Config) bool { + return currentL1.Number > c.L1Origin.Number+cfg.SeqWindowSize +} diff --git a/op-core/derive/types_test.go b/op-core/derive/types_test.go new file mode 100644 index 0000000000000..a6d4649296894 --- /dev/null +++ b/op-core/derive/types_test.go @@ -0,0 +1,96 @@ +package derive + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +func TestL1InputBlockRef(t *testing.T) { + input := L1Input{ + Header: &types.Header{ + ParentHash: common.HexToHash("0x99"), + Number: big.NewInt(100), + Time: 1000, + BaseFee: big.NewInt(1), + }, + } + ref := input.BlockRef() + require.Equal(t, input.Header.Hash(), ref.Hash) + require.Equal(t, uint64(100), ref.Number) + require.Equal(t, uint64(1000), ref.Time) + require.Equal(t, input.Header.ParentHash, ref.ParentHash) +} + +func TestL1InputBlockID(t *testing.T) { + input := L1Input{ + Header: &types.Header{ + Number: big.NewInt(42), + }, + } + id := input.BlockID() + require.Equal(t, input.Header.Hash(), id.Hash) + require.Equal(t, uint64(42), id.Number) +} + +func TestCursorAdvance(t *testing.T) { + c := newCursor(eth.L2BlockRef{ + Number: 10, + Time: 100, + L1Origin: eth.BlockID{Number: 5}, + SequenceNumber: 2, + }) + require.Equal(t, uint64(10), c.Number) + require.Equal(t, uint64(100), c.Timestamp) + require.Equal(t, uint64(2), c.SequenceNumber) + + c.advance(102, eth.BlockID{Number: 5}, 3) + require.Equal(t, uint64(11), c.Number) + require.Equal(t, uint64(102), c.Timestamp) + require.Equal(t, uint64(3), c.SequenceNumber) +} + +func TestL1InputHeaderBlockInfo(t *testing.T) { + header := &types.Header{ + ParentHash: common.HexToHash("0x99"), + Number: big.NewInt(100), + Time: 1000, + MixDigest: common.HexToHash("0xdd"), + BaseFee: big.NewInt(7), + ExcessBlobGas: ptrTo(uint64(0)), + } + info := eth.HeaderBlockInfo(header) + + require.Equal(t, header.Hash(), info.Hash()) + require.Equal(t, header.ParentHash, info.ParentHash()) + require.Equal(t, uint64(100), info.NumberU64()) + require.Equal(t, uint64(1000), info.Time()) + require.Equal(t, header.MixDigest, info.MixDigest()) + require.Equal(t, header.BaseFee, info.BaseFee()) + + require.Equal(t, header, info.Header()) + _, err := info.HeaderRLP() + require.NoError(t, err) +} + +func TestCursorNeedsEmptyBatch(t *testing.T) { + cfg := testRollupConfig() // SeqWindowSize = 10 + + cursor := l2Cursor{ + Number: 10, + Timestamp: 100, + L1Origin: eth.BlockID{Number: 5}, + } + + // currentL1.Number (15) == cursor.L1Origin.Number (5) + SeqWindowSize (10) + // Not strictly greater, so window not expired + require.False(t, cursor.needsEmptyBatch(eth.L1BlockRef{Number: 15}, cfg)) + + // currentL1.Number (16) > cursor.L1Origin.Number (5) + SeqWindowSize (10) + require.True(t, cursor.needsEmptyBatch(eth.L1BlockRef{Number: 16}, cfg)) +} diff --git a/op-node/rollup/derive/batch_stage.go b/op-node/rollup/derive/batch_stage.go index 6014da7b9dcf4..65c68aa17e313 100644 --- a/op-node/rollup/derive/batch_stage.go +++ b/op-node/rollup/derive/batch_stage.go @@ -136,7 +136,7 @@ func (bs *BatchStage) nextSingularBatchCandidate(ctx context.Context, parent eth return nil, NewCriticalError(errors.New("failed type assertion to SpanBatch")) } - validity, _ := checkSpanBatchPrefix(ctx, bs.config, bs.Log(), bs.l1Blocks, parent, spanBatch, bs.origin, bs.l2) + validity, _ := CheckSpanBatchPrefix(ctx, bs.config, bs.Log(), bs.l1Blocks, parent, spanBatch, bs.origin, bs.l2) switch validity { case BatchAccept: // continue spanBatch.LogContext(bs.Log()).Info("Found next valid span batch") diff --git a/op-node/rollup/derive/batches.go b/op-node/rollup/derive/batches.go index 4db6d69f1a9b1..95c11f28e37b5 100644 --- a/op-node/rollup/derive/batches.go +++ b/op-node/rollup/derive/batches.go @@ -191,10 +191,14 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo return BatchAccept } -// checkSpanBatchPrefix performs the span batch prefix rules for Holocene. +// CheckSpanBatchPrefix performs the span batch prefix rules for Holocene. // Next to the validity, it also returns the parent L2 block as determined during the checks for // further consumption. -func checkSpanBatchPrefix(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef, +// +// Under Karst, overlapping span batches (timestamp < next expected) are dropped. +// When l2Fetcher is nil, the parent hash check is skipped (for pure derivation +// where L2 block hashes are not available). +func CheckSpanBatchPrefix(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef, batch *SpanBatch, l1InclusionBlock eth.L1BlockRef, l2Fetcher SafeBlockFetcher, ) (BatchValidity, eth.L2BlockRef) { // add details to the log @@ -240,9 +244,14 @@ func checkSpanBatchPrefix(ctx context.Context, cfg *rollup.Config, log log.Logge } // finding parent block of the span batch. - // if the span batch does not overlap the current safe chain, parentBLock should be l2SafeHead. + // if the span batch does not overlap the current safe chain, parentBlock should be l2SafeHead. parentBlock := l2SafeHead if batch.GetTimestamp() < nextTimestamp { + // Under Karst, overlapping span batches are invalid. + if cfg.IsKarst(l2SafeHead.Time) { + log.Warn("dropping overlapping span batch under Karst") + return BatchDrop, eth.L2BlockRef{} + } if batch.GetTimestamp() > l2SafeHead.Time { // batch timestamp cannot be between safe head and next timestamp log.Warn("batch has misaligned timestamp, block time is too short") @@ -261,7 +270,9 @@ func checkSpanBatchPrefix(ctx context.Context, cfg *rollup.Config, log log.Logge return BatchUndecided, eth.L2BlockRef{} } } - if !batch.CheckParentHash(parentBlock.Hash) { + // Skip parent hash check when l2Fetcher is nil (pure derivation mode + // where L2 block hashes are not available). + if l2Fetcher != nil && !batch.CheckParentHash(parentBlock.Hash) { log.Warn("ignoring batch with mismatching parent hash", "parent_block", parentBlock.Hash) return BatchDrop, parentBlock } @@ -308,7 +319,7 @@ func checkSpanBatchPrefix(ctx context.Context, cfg *rollup.Config, log log.Logge func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef, batch *SpanBatch, l1InclusionBlock eth.L1BlockRef, l2Fetcher SafeBlockFetcher, ) BatchValidity { - prefixValidity, parentBlock := checkSpanBatchPrefix(ctx, cfg, log, l1Blocks, l2SafeHead, batch, l1InclusionBlock, l2Fetcher) + prefixValidity, parentBlock := CheckSpanBatchPrefix(ctx, cfg, log, l1Blocks, l2SafeHead, batch, l1InclusionBlock, l2Fetcher) if prefixValidity != BatchAccept { return prefixValidity }