From cf240df193a378791af1ac164939743d7c3b34fc Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Tue, 24 Feb 2026 22:37:52 -0500 Subject: [PATCH 01/15] pure: add types and test helpers for pure derivation pipeline Co-Authored-By: Claude Opus 4.6 --- op-core/pure/helpers_test.go | 210 +++++++++++++++++++++++++++++++++++ op-core/pure/types.go | 126 +++++++++++++++++++++ op-core/pure/types_test.go | 107 ++++++++++++++++++ 3 files changed, 443 insertions(+) create mode 100644 op-core/pure/helpers_test.go create mode 100644 op-core/pure/types.go create mode 100644 op-core/pure/types_test.go diff --git a/op-core/pure/helpers_test.go b/op-core/pure/helpers_test.go new file mode 100644 index 0000000000000..bd8292c74e471 --- /dev/null +++ b/op-core/pure/helpers_test.go @@ -0,0 +1,210 @@ +package pure + +import ( + "bytes" + "compress/zlib" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +func testRollupConfig() *rollup.Config { + zero := uint64(0) + return &rollup.Config{ + Genesis: rollup.Genesis{ + L1: eth.BlockID{ + Hash: common.HexToHash("0x01"), + Number: 0, + }, + L2: eth.BlockID{ + Hash: common.HexToHash("0x02"), + Number: 0, + }, + L2Time: 0, + SystemConfig: testSystemConfig(), + }, + BlockTime: 2, + MaxSequencerDrift: 600, + SeqWindowSize: 10, + ChannelTimeoutBedrock: 50, + L1ChainID: big.NewInt(1), + L2ChainID: big.NewInt(10), + // Activate all forks at genesis for post-Holocene only pipeline + RegolithTime: &zero, + CanyonTime: &zero, + DeltaTime: &zero, + EcotoneTime: &zero, + FjordTime: &zero, + GraniteTime: &zero, + HoloceneTime: &zero, + BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000000000010"), + DepositContractAddress: common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"), + } +} + +func testSystemConfig() eth.SystemConfig { + return eth.SystemConfig{ + BatcherAddr: common.HexToAddress("0xba7c4e500000000000000000000000000000ba7c"), + GasLimit: 30_000_000, + } +} + +func testSafeHead(cfg *rollup.Config) eth.L2BlockRef { + return eth.L2BlockRef{ + Hash: cfg.Genesis.L2.Hash, + Number: cfg.Genesis.L2.Number, + ParentHash: common.Hash{}, + Time: cfg.Genesis.L2Time, + L1Origin: cfg.Genesis.L1, + } +} + +func makeTestL1Input(num uint64) *L1Input { + return &L1Input{ + Hash: common.BigToHash(new(big.Int).SetUint64(num + 0x100)), + Number: num, + Timestamp: 1000 + num*12, + BaseFee: big.NewInt(7), + BlobBaseFee: big.NewInt(1), + ParentHash: common.BigToHash(new(big.Int).SetUint64(num + 0x100 - 1)), + MixDigest: common.BigToHash(new(big.Int).SetUint64(num + 0x200)), + } +} + +func makeTestDeposit() *types.DepositTx { + return &types.DepositTx{ + SourceHash: common.HexToHash("0xdead"), + From: common.HexToAddress("0x1111"), + To: ptrTo(common.HexToAddress("0x2222")), + Value: big.NewInt(0), + Gas: 100_000, + Data: nil, + } +} + +func ptrTo[T any](v T) *T { + return &v +} + +func testL1Ref(num uint64) eth.L1BlockRef { + input := makeTestL1Input(num) + return input.BlockRef() +} + +// encodeBatchToChannelData RLP-encodes a singular batch and zlib-compresses +// it into channel data (the format read by the channel reader stage). +func encodeBatchToChannelData(t *testing.T, batch *derive.SingularBatch) []byte { + t.Helper() + + bd := derive.NewBatchData(batch) + batchBytes, err := bd.MarshalBinary() + if err != nil { + t.Fatalf("marshal batch: %v", err) + } + + // Wrap in RLP string encoding as the channel reader expects RLP-encoded batch data + var rlpBuf bytes.Buffer + if err := rlp.Encode(&rlpBuf, batchBytes); err != nil { + t.Fatalf("rlp encode batch: %v", err) + } + + // zlib compress + var compressed bytes.Buffer + w := zlib.NewWriter(&compressed) + if _, err := w.Write(rlpBuf.Bytes()); err != nil { + t.Fatalf("zlib write: %v", err) + } + if err := w.Close(); err != nil { + t.Fatalf("zlib close: %v", err) + } + + return compressed.Bytes() +} + +// wrapInFrames wraps channel data in a single-frame batcher transaction. +// The result is a raw batcher tx data payload (DerivationVersion0 prefix + frame). +func wrapInFrames(channelData []byte, channelID derive.ChannelID) []byte { + frame := derive.Frame{ + ID: channelID, + FrameNumber: 0, + Data: channelData, + IsLast: true, + } + + var buf bytes.Buffer + buf.WriteByte(params.DerivationVersion0) + _ = frame.MarshalBinary(&buf) + return buf.Bytes() +} + +func TestHelpers(t *testing.T) { + cfg := testRollupConfig() + require.NotNil(t, cfg) + require.Equal(t, uint64(2), cfg.BlockTime) + require.Equal(t, uint64(10), cfg.SeqWindowSize) + require.Equal(t, uint64(50), cfg.ChannelTimeoutBedrock) + require.NotNil(t, cfg.HoloceneTime) + + sysCfg := testSystemConfig() + require.Equal(t, uint64(30_000_000), sysCfg.GasLimit) + + safeHead := testSafeHead(cfg) + require.Equal(t, cfg.Genesis.L2.Hash, safeHead.Hash) + require.Equal(t, cfg.Genesis.L2.Number, safeHead.Number) + + l1 := makeTestL1Input(5) + require.Equal(t, uint64(5), l1.Number) + require.Equal(t, uint64(1000+5*12), l1.Timestamp) + + dep := makeTestDeposit() + require.NotNil(t, dep) + require.NotNil(t, dep.To) + + ref := testL1Ref(10) + require.Equal(t, uint64(10), ref.Number) + + l1WithBatch := makeL1WithBatch(t, cfg, 1, safeHead, sysCfg) + require.Len(t, l1WithBatch.BatcherData, 1) + require.NotEmpty(t, l1WithBatch.BatcherData[0]) + + // Verify the batcher tx can be parsed as frames + frames, err := derive.ParseFrames(l1WithBatch.BatcherData[0]) + require.NoError(t, err) + require.Len(t, frames, 1) + require.True(t, frames[0].IsLast) +} + +// makeL1WithBatch creates an L1Input containing a batcher tx with one singular batch +// targeting the given safe head as parent. +func makeL1WithBatch(t *testing.T, cfg *rollup.Config, l1Num uint64, safeHead eth.L2BlockRef, sysCfg eth.SystemConfig) *L1Input { + t.Helper() + _ = sysCfg // reserved for future use in batch construction + + l1 := makeTestL1Input(l1Num) + l1Ref := l1.BlockRef() + + batch := &derive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Ref.Number), + EpochHash: l1Ref.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + } + + channelData := encodeBatchToChannelData(t, batch) + + var chID derive.ChannelID + copy(chID[:], common.Hex2Bytes("deadbeefdeadbeefdeadbeefdeadbeef")) + batcherTx := wrapInFrames(channelData, chID) + + l1.BatcherData = [][]byte{batcherTx} + return l1 +} diff --git a/op-core/pure/types.go b/op-core/pure/types.go new file mode 100644 index 0000000000000..ad34ef9ac372f --- /dev/null +++ b/op-core/pure/types.go @@ -0,0 +1,126 @@ +package pure + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// L1Input is a pre-processed L1 block containing only derivation-relevant data. +// The caller is responsible for filtering batcher transactions, extracting deposits +// from receipts, and extracting system config update logs. +type L1Input struct { + Hash common.Hash + Number uint64 + Timestamp uint64 + BaseFee *big.Int + BlobBaseFee *big.Int + ParentHash common.Hash + MixDigest common.Hash // prevrandao + + BatcherData [][]byte // raw batcher transaction data (calldata or blob content) + Deposits []*types.DepositTx + ConfigLogs []*types.Log // system config update logs, pre-filtered +} + +// BlockRef converts L1Input header fields to an eth.L1BlockRef. +func (l *L1Input) BlockRef() eth.L1BlockRef { + return eth.L1BlockRef{ + Hash: l.Hash, + Number: l.Number, + ParentHash: l.ParentHash, + Time: l.Timestamp, + } +} + +// BlockID returns the block's ID (hash + number). +func (l *L1Input) BlockID() eth.BlockID { + return eth.BlockID{Hash: l.Hash, Number: l.Number} +} + +// DerivedBlock is a single derived L2 block -- payload attributes ready for execution. +type DerivedBlock struct { + Attributes *eth.PayloadAttributes + ExpectedParentHash common.Hash // from batch ParentHash field; zero if unavailable + DerivedFrom eth.L1BlockRef +} + +// l2Cursor tracks the derivation position without knowing the L2 block hash. +type l2Cursor struct { + Number uint64 + Timestamp uint64 + L1Origin eth.BlockID + SequenceNumber uint64 +} + +func newCursor(safeHead eth.L2BlockRef) l2Cursor { + return l2Cursor{ + Number: safeHead.Number, + Timestamp: safeHead.Time, + L1Origin: safeHead.L1Origin, + SequenceNumber: safeHead.SequenceNumber, + } +} + +func (c *l2Cursor) advance(timestamp uint64, l1Origin eth.BlockID, seqNum uint64) { + c.Number++ + c.Timestamp = timestamp + c.L1Origin = l1Origin + c.SequenceNumber = seqNum +} + +// l1InputInfo adapts L1Input to the eth.BlockInfo interface +// needed by derive.L1InfoDeposit. +type l1InputInfo struct { + *L1Input +} + +var _ eth.BlockInfo = (*l1InputInfo)(nil) + +func (i *l1InputInfo) Hash() common.Hash { return i.L1Input.Hash } +func (i *l1InputInfo) ParentHash() common.Hash { return i.L1Input.ParentHash } +func (i *l1InputInfo) Coinbase() common.Address { return common.Address{} } +func (i *l1InputInfo) Root() common.Hash { return common.Hash{} } +func (i *l1InputInfo) NumberU64() uint64 { return i.L1Input.Number } +func (i *l1InputInfo) Time() uint64 { return i.L1Input.Timestamp } +func (i *l1InputInfo) MixDigest() common.Hash { return i.L1Input.MixDigest } +func (i *l1InputInfo) BaseFee() *big.Int { return i.L1Input.BaseFee } +func (i *l1InputInfo) ReceiptHash() common.Hash { return common.Hash{} } +func (i *l1InputInfo) GasUsed() uint64 { return 0 } +func (i *l1InputInfo) GasLimit() uint64 { return 0 } +func (i *l1InputInfo) ParentBeaconRoot() *common.Hash { return nil } +func (i *l1InputInfo) WithdrawalsRoot() *common.Hash { return nil } + +func (i *l1InputInfo) BlobBaseFee(_ *params.ChainConfig) *big.Int { + return i.L1Input.BlobBaseFee +} + +func (i *l1InputInfo) ExcessBlobGas() *uint64 { + if i.L1Input.BlobBaseFee != nil { + zero := uint64(0) + return &zero + } + return nil +} + +func (i *l1InputInfo) BlobGasUsed() *uint64 { return nil } + +func (i *l1InputInfo) HeaderRLP() ([]byte, error) { + h := i.Header() + return rlp.EncodeToBytes(h) +} + +func (i *l1InputInfo) Header() *types.Header { + return &types.Header{ + ParentHash: i.L1Input.ParentHash, + Number: new(big.Int).SetUint64(i.L1Input.Number), + Time: i.L1Input.Timestamp, + BaseFee: i.L1Input.BaseFee, + MixDigest: i.L1Input.MixDigest, + } +} diff --git a/op-core/pure/types_test.go b/op-core/pure/types_test.go new file mode 100644 index 0000000000000..8630a75e1ec1a --- /dev/null +++ b/op-core/pure/types_test.go @@ -0,0 +1,107 @@ +package pure + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +func TestL1InputBlockRef(t *testing.T) { + input := L1Input{ + Hash: common.HexToHash("0xaa"), + Number: 100, + Timestamp: 1000, + ParentHash: common.HexToHash("0x99"), + BaseFee: big.NewInt(1), + BlobBaseFee: big.NewInt(1), + } + ref := input.BlockRef() + require.Equal(t, input.Hash, ref.Hash) + require.Equal(t, input.Number, ref.Number) + require.Equal(t, input.Timestamp, ref.Time) + require.Equal(t, input.ParentHash, ref.ParentHash) +} + +func TestL1InputBlockID(t *testing.T) { + input := L1Input{ + Hash: common.HexToHash("0xbb"), + Number: 42, + } + id := input.BlockID() + require.Equal(t, input.Hash, id.Hash) + require.Equal(t, input.Number, id.Number) +} + +func TestCursorAdvance(t *testing.T) { + c := newCursor(eth.L2BlockRef{ + Number: 10, + Time: 100, + L1Origin: eth.BlockID{Number: 5}, + SequenceNumber: 2, + }) + require.Equal(t, uint64(10), c.Number) + require.Equal(t, uint64(100), c.Timestamp) + require.Equal(t, uint64(2), c.SequenceNumber) + + c.advance(102, eth.BlockID{Number: 5}, 3) + require.Equal(t, uint64(11), c.Number) + require.Equal(t, uint64(102), c.Timestamp) + require.Equal(t, uint64(3), c.SequenceNumber) +} + +func TestL1InputInfoBlockInfo(t *testing.T) { + input := &L1Input{ + Hash: common.HexToHash("0xaa"), + Number: 100, + Timestamp: 1000, + ParentHash: common.HexToHash("0x99"), + MixDigest: common.HexToHash("0xdd"), + BaseFee: big.NewInt(7), + BlobBaseFee: big.NewInt(3), + } + info := &l1InputInfo{input} + + require.Equal(t, input.Hash, info.Hash()) + require.Equal(t, input.ParentHash, info.ParentHash()) + require.Equal(t, input.Number, info.NumberU64()) + require.Equal(t, input.Timestamp, info.Time()) + require.Equal(t, input.MixDigest, info.MixDigest()) + require.Equal(t, input.BaseFee, info.BaseFee()) + require.Equal(t, input.BlobBaseFee, info.BlobBaseFee(nil)) + + // Zero-value methods + require.Equal(t, common.Address{}, info.Coinbase()) + require.Equal(t, common.Hash{}, info.Root()) + require.Equal(t, common.Hash{}, info.ReceiptHash()) + require.Equal(t, uint64(0), info.GasUsed()) + require.Equal(t, uint64(0), info.GasLimit()) + require.Nil(t, info.ParentBeaconRoot()) + require.Nil(t, info.WithdrawalsRoot()) + + // ExcessBlobGas is non-nil when BlobBaseFee is set + require.NotNil(t, info.ExcessBlobGas()) + + // Header returns a valid header + h := info.Header() + require.Equal(t, input.ParentHash, h.ParentHash) + require.Equal(t, input.Number, h.Number.Uint64()) + + // HeaderRLP doesn't error + _, err := info.HeaderRLP() + require.NoError(t, err) +} + +func TestL1InputInfoNilBlobBaseFee(t *testing.T) { + input := &L1Input{ + Hash: common.HexToHash("0xaa"), + Number: 100, + BaseFee: big.NewInt(7), + } + info := &l1InputInfo{input} + require.Nil(t, info.BlobBaseFee(nil)) + require.Nil(t, info.ExcessBlobGas()) +} From 550047aa497acb92c97b39975b8290de44e0ce87 Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Tue, 24 Feb 2026 22:41:14 -0500 Subject: [PATCH 02/15] pure: add Holocene channel assembler Co-Authored-By: Claude Opus 4.6 --- op-core/pure/channels.go | 69 ++++++++++++++++ op-core/pure/channels_test.go | 145 ++++++++++++++++++++++++++++++++++ 2 files changed, 214 insertions(+) create mode 100644 op-core/pure/channels.go create mode 100644 op-core/pure/channels_test.go diff --git a/op-core/pure/channels.go b/op-core/pure/channels.go new file mode 100644 index 0000000000000..ffce0e12d5b69 --- /dev/null +++ b/op-core/pure/channels.go @@ -0,0 +1,69 @@ +package pure + +import ( + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// readyChannel is a completed channel ready for batch decoding. +type readyChannel struct { + id derive.ChannelID + openBlock eth.L1BlockRef + channel *derive.Channel +} + +// channelAssembler implements Holocene single-channel strict-order assembly. +// Only one channel is active at a time. Frames must arrive in order. +// A frame for a new channel ID discards the current in-progress channel. +type channelAssembler struct { + current *derive.Channel + currentID derive.ChannelID + openBlock eth.L1BlockRef + nextFrame uint16 +} + +func newChannelAssembler() *channelAssembler { + return &channelAssembler{} +} + +// addFrame processes a single frame. Returns a readyChannel if the channel is complete. +func (ca *channelAssembler) addFrame(frame derive.Frame, l1Ref eth.L1BlockRef) *readyChannel { + if ca.current == nil || frame.ID != ca.currentID { + ca.current = derive.NewChannel(frame.ID, l1Ref, true) + ca.currentID = frame.ID + ca.openBlock = l1Ref + ca.nextFrame = 0 + } + + if frame.FrameNumber != ca.nextFrame { + return nil + } + + if err := ca.current.AddFrame(frame, l1Ref); err != nil { + return nil + } + ca.nextFrame++ + + if ca.current.IsReady() { + ready := &readyChannel{ + id: ca.currentID, + openBlock: ca.openBlock, + channel: ca.current, + } + ca.current = nil + return ready + } + return nil +} + +// checkTimeout returns true and discards the current channel if it has timed out. +func (ca *channelAssembler) checkTimeout(current eth.L1BlockRef, channelTimeout uint64) bool { + if ca.current == nil { + return false + } + if current.Number > ca.openBlock.Number+channelTimeout { + ca.current = nil + return true + } + return false +} diff --git a/op-core/pure/channels_test.go b/op-core/pure/channels_test.go new file mode 100644 index 0000000000000..054f0bcb289c5 --- /dev/null +++ b/op-core/pure/channels_test.go @@ -0,0 +1,145 @@ +package pure + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/stretchr/testify/require" +) + +func testChannelID(b byte) derive.ChannelID { + var id derive.ChannelID + id[0] = b + return id +} + +func TestChannelAssembler_SingleFrameChannel(t *testing.T) { + ca := newChannelAssembler() + l1 := testL1Ref(1) + + ready := ca.addFrame(derive.Frame{ + ID: testChannelID(0xAA), + FrameNumber: 0, + Data: []byte("hello"), + IsLast: true, + }, l1) + + require.NotNil(t, ready, "single-frame channel should be ready immediately") + require.Equal(t, testChannelID(0xAA), ready.id) + require.Equal(t, l1, ready.openBlock) + require.True(t, ready.channel.IsReady()) +} + +func TestChannelAssembler_MultiFrameChannel(t *testing.T) { + ca := newChannelAssembler() + chID := testChannelID(0xBB) + l1 := testL1Ref(1) + + ready := ca.addFrame(derive.Frame{ + ID: chID, + FrameNumber: 0, + Data: []byte("part1"), + IsLast: false, + }, l1) + require.Nil(t, ready, "channel should not be ready after first frame") + + l1b := testL1Ref(2) + ready = ca.addFrame(derive.Frame{ + ID: chID, + FrameNumber: 1, + Data: []byte("part2"), + IsLast: true, + }, l1b) + + require.NotNil(t, ready, "channel should be ready after last frame") + require.Equal(t, chID, ready.id) + require.Equal(t, l1, ready.openBlock, "openBlock should be from the first frame") + require.True(t, ready.channel.IsReady()) +} + +func TestChannelAssembler_NewChannelDiscardsOld(t *testing.T) { + ca := newChannelAssembler() + chA := testChannelID(0xAA) + chB := testChannelID(0xBB) + l1 := testL1Ref(1) + + ready := ca.addFrame(derive.Frame{ + ID: chA, + FrameNumber: 0, + Data: []byte("A-frame0"), + IsLast: false, + }, l1) + require.Nil(t, ready) + require.Equal(t, chA, ca.currentID) + + l1b := testL1Ref(2) + ready = ca.addFrame(derive.Frame{ + ID: chB, + FrameNumber: 0, + Data: []byte("B-frame0"), + IsLast: true, + }, l1b) + + require.NotNil(t, ready, "new channel B should complete") + require.Equal(t, chB, ready.id) + require.Equal(t, l1b, ready.openBlock, "openBlock should be from channel B's first frame") +} + +func TestChannelAssembler_Timeout(t *testing.T) { + ca := newChannelAssembler() + chID := testChannelID(0xCC) + l1Open := testL1Ref(10) + + ca.addFrame(derive.Frame{ + ID: chID, + FrameNumber: 0, + Data: []byte("data"), + IsLast: false, + }, l1Open) + require.NotNil(t, ca.current, "channel should be in progress") + + channelTimeout := uint64(50) + + notTimedOut := testL1Ref(10 + channelTimeout) + require.False(t, ca.checkTimeout(notTimedOut, channelTimeout), + "should not timeout at exactly openBlock + channelTimeout") + require.NotNil(t, ca.current) + + timedOut := testL1Ref(10 + channelTimeout + 1) + require.True(t, ca.checkTimeout(timedOut, channelTimeout), + "should timeout when current.Number > openBlock.Number + channelTimeout") + require.Nil(t, ca.current, "channel should be discarded after timeout") +} + +func TestChannelAssembler_OutOfOrderFrame(t *testing.T) { + ca := newChannelAssembler() + chID := testChannelID(0xDD) + l1 := testL1Ref(1) + + ready := ca.addFrame(derive.Frame{ + ID: chID, + FrameNumber: 0, + Data: []byte("frame0"), + IsLast: false, + }, l1) + require.Nil(t, ready) + + ready = ca.addFrame(derive.Frame{ + ID: chID, + FrameNumber: 2, // skip frame 1 + Data: []byte("frame2"), + IsLast: true, + }, l1) + require.Nil(t, ready, "out-of-order frame should be dropped") + + require.NotNil(t, ca.current, "channel should still be in progress") + require.Equal(t, uint16(1), ca.nextFrame, "nextFrame should still expect frame 1") + + ready = ca.addFrame(derive.Frame{ + ID: chID, + FrameNumber: 1, + Data: []byte("frame1"), + IsLast: true, + }, l1) + require.NotNil(t, ready, "channel should complete once gap is filled") +} From e3d19e8ab543d064ff77728feecac9a1b1c10a48 Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Tue, 24 Feb 2026 22:47:37 -0500 Subject: [PATCH 03/15] pure: add batch decoding, validation, and empty batch generation Co-Authored-By: Claude Opus 4.6 --- op-core/pure/batches.go | 124 ++++++++++++++++++++++ op-core/pure/batches_test.go | 197 +++++++++++++++++++++++++++++++++++ 2 files changed, 321 insertions(+) create mode 100644 op-core/pure/batches.go create mode 100644 op-core/pure/batches_test.go diff --git a/op-core/pure/batches.go b/op-core/pure/batches.go new file mode 100644 index 0000000000000..fc1f82cf018dc --- /dev/null +++ b/op-core/pure/batches.go @@ -0,0 +1,124 @@ +package pure + +import ( + "fmt" + "io" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// decodeBatches reads all batches from a completed channel's compressed data +// and returns them as singular batches. Span batches are expanded into +// individual singular batches using the provided L1 origins and cursor. +func decodeBatches( + r io.Reader, + cfg *rollup.Config, + l1Origins []eth.L1BlockRef, + cursor l2Cursor, +) ([]*derive.SingularBatch, error) { + spec := rollup.NewChainSpec(cfg) + maxRLP := spec.MaxRLPBytesPerChannel(cursor.Timestamp) + isFjord := cfg.IsFjord(cursor.Timestamp) + + readBatch, err := derive.BatchReader(r, maxRLP, isFjord) + if err != nil { + return nil, fmt.Errorf("creating batch reader: %w", err) + } + + var batches []*derive.SingularBatch + for { + batchData, err := readBatch() + if err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("reading batch: %w", err) + } + + switch batchData.GetBatchType() { + case derive.SingularBatchType: + singular, err := derive.GetSingularBatch(batchData) + if err != nil { + return nil, fmt.Errorf("extracting singular batch: %w", err) + } + batches = append(batches, singular) + + case derive.SpanBatchType: + spanBatch, err := derive.DeriveSpanBatch( + batchData, + cfg.BlockTime, + cfg.Genesis.L2Time, + cfg.L2ChainID, + ) + if err != nil { + return nil, fmt.Errorf("deriving span batch: %w", err) + } + l2SafeHead := eth.L2BlockRef{ + Number: cursor.Number, + Time: cursor.Timestamp, + L1Origin: cursor.L1Origin, + SequenceNumber: cursor.SequenceNumber, + } + singular, err := spanBatch.GetSingularBatches(l1Origins, l2SafeHead) + if err != nil { + return nil, fmt.Errorf("expanding span batch: %w", err) + } + batches = append(batches, singular...) + + default: + return nil, fmt.Errorf("unknown batch type: %d", batchData.GetBatchType()) + } + } + + return batches, nil +} + +// validateBatch checks whether a singular batch is valid given the current +// derivation cursor and known L1 origins. +func validateBatch(batch *derive.SingularBatch, cursor l2Cursor, l1Origins []eth.L1BlockRef, cfg *rollup.Config) bool { + expectedTimestamp := cursor.Timestamp + cfg.BlockTime + if batch.Timestamp != expectedTimestamp { + return false + } + + epochNum := uint64(batch.EpochNum) + + if epochNum < cursor.L1Origin.Number { + return false + } + + if len(l1Origins) == 0 { + return false + } + latestOrigin := l1Origins[len(l1Origins)-1] + if epochNum > latestOrigin.Number { + return false + } + + for _, origin := range l1Origins { + if origin.Number == epochNum { + return batch.EpochHash == origin.Hash + } + } + + return false +} + +// needsEmptyBatch returns true when the sequencing window has expired, +// meaning the cursor's L1 origin is more than SeqWindowSize blocks behind +// the current L1 block. +func needsEmptyBatch(cursor l2Cursor, currentL1 eth.L1BlockRef, cfg *rollup.Config) bool { + return currentL1.Number > cursor.L1Origin.Number+cfg.SeqWindowSize +} + +// makeEmptyBatch creates a batch with no transactions at the next expected +// timestamp, advancing from the current cursor position. +func makeEmptyBatch(cursor l2Cursor, cfg *rollup.Config) *derive.SingularBatch { + return &derive.SingularBatch{ + EpochNum: rollup.Epoch(cursor.L1Origin.Number), + EpochHash: cursor.L1Origin.Hash, + Timestamp: cursor.Timestamp + cfg.BlockTime, + } +} diff --git a/op-core/pure/batches_test.go b/op-core/pure/batches_test.go new file mode 100644 index 0000000000000..f0850b411edc0 --- /dev/null +++ b/op-core/pure/batches_test.go @@ -0,0 +1,197 @@ +package pure + +import ( + "bytes" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +func TestDecodeBatches_SingularBatch(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + l1Ref := testL1Ref(1) + + batch := &derive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Ref.Number), + EpochHash: l1Ref.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + } + + channelData := encodeBatchToChannelData(t, batch) + + cursor := newCursor(safeHead) + l1Origins := []eth.L1BlockRef{testL1Ref(0), l1Ref} + + batches, err := decodeBatches(bytes.NewReader(channelData), cfg, l1Origins, cursor) + require.NoError(t, err) + require.Len(t, batches, 1) + + decoded := batches[0] + require.Equal(t, batch.ParentHash, decoded.ParentHash) + require.Equal(t, batch.EpochNum, decoded.EpochNum) + require.Equal(t, batch.EpochHash, decoded.EpochHash) + require.Equal(t, batch.Timestamp, decoded.Timestamp) +} + +func TestValidateBatch_ValidSingular(t *testing.T) { + cfg := testRollupConfig() + l1Origin := testL1Ref(5) + + cursor := l2Cursor{ + Number: 10, + Timestamp: 100, + L1Origin: l1Origin.ID(), + } + + batch := &derive.SingularBatch{ + EpochNum: rollup.Epoch(l1Origin.Number), + EpochHash: l1Origin.Hash, + Timestamp: cursor.Timestamp + cfg.BlockTime, + } + + l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} + + require.True(t, validateBatch(batch, cursor, l1Origins, cfg)) +} + +func TestValidateBatch_WrongTimestamp(t *testing.T) { + cfg := testRollupConfig() + l1Origin := testL1Ref(5) + + cursor := l2Cursor{ + Number: 10, + Timestamp: 100, + L1Origin: l1Origin.ID(), + } + + batch := &derive.SingularBatch{ + EpochNum: rollup.Epoch(l1Origin.Number), + EpochHash: l1Origin.Hash, + Timestamp: cursor.Timestamp + cfg.BlockTime + 1, // wrong + } + + l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} + + require.False(t, validateBatch(batch, cursor, l1Origins, cfg)) +} + +func TestValidateBatch_SpanBatchNoOverlap(t *testing.T) { + cfg := testRollupConfig() + l1Origin := testL1Ref(5) + + cursor := l2Cursor{ + Number: 10, + Timestamp: 100, + L1Origin: l1Origin.ID(), + } + + // Timestamp before cursor (overlap) -- this will fail the timestamp == cursor + blockTime check + batch := &derive.SingularBatch{ + EpochNum: rollup.Epoch(l1Origin.Number), + EpochHash: l1Origin.Hash, + Timestamp: cursor.Timestamp - 2, + } + + l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} + + require.False(t, validateBatch(batch, cursor, l1Origins, cfg)) +} + +func TestValidateBatch_EpochTooOld(t *testing.T) { + cfg := testRollupConfig() + l1Origin := testL1Ref(5) + + cursor := l2Cursor{ + Number: 10, + Timestamp: 100, + L1Origin: l1Origin.ID(), + } + + oldOrigin := testL1Ref(3) + batch := &derive.SingularBatch{ + EpochNum: rollup.Epoch(oldOrigin.Number), // before cursor's L1 origin + EpochHash: oldOrigin.Hash, + Timestamp: cursor.Timestamp + cfg.BlockTime, + } + + l1Origins := []eth.L1BlockRef{oldOrigin, testL1Ref(4), l1Origin, testL1Ref(6)} + + require.False(t, validateBatch(batch, cursor, l1Origins, cfg)) +} + +func TestValidateBatch_EpochTooNew(t *testing.T) { + cfg := testRollupConfig() + l1Origin := testL1Ref(5) + + cursor := l2Cursor{ + Number: 10, + Timestamp: 100, + L1Origin: l1Origin.ID(), + } + + batch := &derive.SingularBatch{ + EpochNum: rollup.Epoch(100), // way beyond latest L1 origin + EpochHash: common.Hash{0xab}, + Timestamp: cursor.Timestamp + cfg.BlockTime, + } + + l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} + + require.False(t, validateBatch(batch, cursor, l1Origins, cfg)) +} + +func TestNeedEmptyBatch_WindowNotExpired(t *testing.T) { + cfg := testRollupConfig() // SeqWindowSize = 10 + + cursor := l2Cursor{ + Number: 10, + Timestamp: 100, + L1Origin: eth.BlockID{Number: 5}, + } + + // currentL1.Number (15) == cursor.L1Origin.Number (5) + SeqWindowSize (10) + // Not strictly greater, so window not expired + currentL1 := eth.L1BlockRef{Number: 15} + + require.False(t, needsEmptyBatch(cursor, currentL1, cfg)) +} + +func TestNeedEmptyBatch_WindowExpired(t *testing.T) { + cfg := testRollupConfig() // SeqWindowSize = 10 + + cursor := l2Cursor{ + Number: 10, + Timestamp: 100, + L1Origin: eth.BlockID{Number: 5}, + } + + // currentL1.Number (16) > cursor.L1Origin.Number (5) + SeqWindowSize (10) + currentL1 := eth.L1BlockRef{Number: 16} + + require.True(t, needsEmptyBatch(cursor, currentL1, cfg)) +} + +func TestMakeEmptyBatch(t *testing.T) { + cfg := testRollupConfig() + origin := testL1Ref(5) + + cursor := l2Cursor{ + Number: 10, + Timestamp: 100, + L1Origin: origin.ID(), + } + + batch := makeEmptyBatch(cursor, cfg) + + require.Equal(t, rollup.Epoch(origin.Number), batch.EpochNum) + require.Equal(t, origin.Hash, batch.EpochHash) + require.Equal(t, cursor.Timestamp+cfg.BlockTime, batch.Timestamp) + require.Empty(t, batch.Transactions) +} From 4435847b71fd7bef79587bf209b3ae657d68b72e Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Tue, 24 Feb 2026 22:52:47 -0500 Subject: [PATCH 04/15] pure: add payload attributes assembly Co-Authored-By: Claude Opus 4.6 --- op-core/pure/attributes.go | 109 +++++++++++++++ op-core/pure/attributes_test.go | 237 ++++++++++++++++++++++++++++++++ 2 files changed, 346 insertions(+) create mode 100644 op-core/pure/attributes.go create mode 100644 op-core/pure/attributes_test.go diff --git a/op-core/pure/attributes.go b/op-core/pure/attributes.go new file mode 100644 index 0000000000000..350f08e6b7957 --- /dev/null +++ b/op-core/pure/attributes.go @@ -0,0 +1,109 @@ +package pure + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/ethereum-optimism/optimism/op-core/predeploys" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// buildAttributes constructs a DerivedBlock (PayloadAttributes + metadata) from +// a validated singular batch, its L1 origin, the current derivation cursor, +// and the active system config. +// +// Transaction ordering follows the OP Stack derivation spec: +// 1. L1 info deposit transaction (always first) +// 2. User deposit transactions (only at epoch boundaries) +// 3. Batch transactions from the sequencer +func buildAttributes( + batch *derive.SingularBatch, + l1Block *L1Input, + cursor l2Cursor, + sysConfig eth.SystemConfig, + cfg *rollup.Config, +) (*DerivedBlock, error) { + epochChanged := uint64(batch.EpochNum) != cursor.L1Origin.Number + + var seqNumber uint64 + if epochChanged { + seqNumber = 0 + } else { + seqNumber = cursor.SequenceNumber + 1 + } + + l2Timestamp := batch.Timestamp + blockInfo := &l1InputInfo{l1Block} + + l1InfoTx, err := derive.L1InfoDeposit(cfg, nil, sysConfig, seqNumber, blockInfo, l2Timestamp) + if err != nil { + return nil, fmt.Errorf("failed to create L1 info deposit tx: %w", err) + } + + encodedL1Info, err := types.NewTx(l1InfoTx).MarshalBinary() + if err != nil { + return nil, fmt.Errorf("failed to encode L1 info deposit tx: %w", err) + } + + txCount := 1 + len(batch.Transactions) + if epochChanged { + txCount += len(l1Block.Deposits) + } + txs := make([]hexutil.Bytes, 0, txCount) + txs = append(txs, encodedL1Info) + + if epochChanged { + for _, dep := range l1Block.Deposits { + encoded, err := types.NewTx(dep).MarshalBinary() + if err != nil { + return nil, fmt.Errorf("failed to encode user deposit tx: %w", err) + } + txs = append(txs, encoded) + } + } + + txs = append(txs, batch.Transactions...) + + gasLimit := sysConfig.GasLimit + + var withdrawals *types.Withdrawals + if cfg.IsCanyon(l2Timestamp) { + withdrawals = &types.Withdrawals{} + } + + var parentBeaconRoot *common.Hash + if cfg.IsEcotone(l2Timestamp) { + parentBeaconRoot = new(common.Hash) + } + + attrs := ð.PayloadAttributes{ + Timestamp: hexutil.Uint64(l2Timestamp), + PrevRandao: eth.Bytes32(l1Block.MixDigest), + SuggestedFeeRecipient: predeploys.SequencerFeeVaultAddr, + Transactions: txs, + NoTxPool: true, + GasLimit: (*eth.Uint64Quantity)(&gasLimit), + Withdrawals: withdrawals, + ParentBeaconBlockRoot: parentBeaconRoot, + } + + if cfg.IsHolocene(l2Timestamp) { + attrs.EIP1559Params = new(eth.Bytes8) + *attrs.EIP1559Params = sysConfig.EIP1559Params + } + + if cfg.IsJovian(l2Timestamp) { + attrs.MinBaseFee = &sysConfig.MinBaseFee + } + + return &DerivedBlock{ + Attributes: attrs, + ExpectedParentHash: batch.ParentHash, + DerivedFrom: l1Block.BlockRef(), + }, nil +} diff --git a/op-core/pure/attributes_test.go b/op-core/pure/attributes_test.go new file mode 100644 index 0000000000000..028c9d1023dbf --- /dev/null +++ b/op-core/pure/attributes_test.go @@ -0,0 +1,237 @@ +package pure + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/ethereum-optimism/optimism/op-core/predeploys" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/stretchr/testify/require" +) + +func TestBuildAttributes_EpochStart(t *testing.T) { + cfg := testRollupConfig() + sysConfig := testSystemConfig() + l1Block := makeTestL1Input(5) + l1Block.Deposits = []*types.DepositTx{makeTestDeposit(), makeTestDeposit()} + + userTx := hexutil.Bytes{0x01, 0x02, 0x03} + batch := &derive.SingularBatch{ + ParentHash: common.HexToHash("0xaaaa"), + EpochNum: rollup.Epoch(l1Block.Number), + EpochHash: l1Block.Hash, + Timestamp: l1Block.Timestamp + cfg.BlockTime, + Transactions: []hexutil.Bytes{userTx}, + } + + // Cursor has a different L1 origin so this is an epoch boundary + cursor := l2Cursor{ + Number: 10, + Timestamp: batch.Timestamp - cfg.BlockTime, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xprev"), Number: l1Block.Number - 1}, + SequenceNumber: 3, + } + + result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg) + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Attributes) + + attrs := result.Attributes + + // L1 info deposit + 2 user deposits + 1 batch tx = 4 + require.GreaterOrEqual(t, len(attrs.Transactions), 3) + require.Len(t, attrs.Transactions, 4) + + require.True(t, attrs.NoTxPool) + require.Equal(t, hexutil.Uint64(batch.Timestamp), attrs.Timestamp) + require.Equal(t, eth.Bytes32(l1Block.MixDigest), attrs.PrevRandao) + require.Equal(t, predeploys.SequencerFeeVaultAddr, attrs.SuggestedFeeRecipient) + require.NotNil(t, attrs.GasLimit) + require.Equal(t, sysConfig.GasLimit, uint64(*attrs.GasLimit)) + require.NotNil(t, attrs.Withdrawals) + require.Empty(t, *attrs.Withdrawals) + + // The last transaction should be the batch tx + require.Equal(t, userTx, attrs.Transactions[len(attrs.Transactions)-1]) + + require.Equal(t, batch.ParentHash, result.ExpectedParentHash) + require.Equal(t, l1Block.BlockRef(), result.DerivedFrom) +} + +func TestBuildAttributes_SameEpoch(t *testing.T) { + cfg := testRollupConfig() + sysConfig := testSystemConfig() + l1Block := makeTestL1Input(5) + l1Block.Deposits = []*types.DepositTx{makeTestDeposit()} + + userTx := hexutil.Bytes{0xaa, 0xbb} + batch := &derive.SingularBatch{ + ParentHash: common.HexToHash("0xbbbb"), + EpochNum: rollup.Epoch(l1Block.Number), + EpochHash: l1Block.Hash, + Timestamp: l1Block.Timestamp + 2*cfg.BlockTime, + Transactions: []hexutil.Bytes{userTx}, + } + + // Same L1 origin -- not an epoch boundary + cursor := l2Cursor{ + Number: 10, + Timestamp: batch.Timestamp - cfg.BlockTime, + L1Origin: eth.BlockID{Hash: l1Block.Hash, Number: l1Block.Number}, + SequenceNumber: 2, + } + + result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg) + require.NoError(t, err) + require.NotNil(t, result) + + attrs := result.Attributes + + // L1 info deposit + 1 batch tx = 2 (no user deposits because same epoch) + require.GreaterOrEqual(t, len(attrs.Transactions), 2) + require.Len(t, attrs.Transactions, 2) + + require.True(t, attrs.NoTxPool) + require.Equal(t, hexutil.Uint64(batch.Timestamp), attrs.Timestamp) + + // The last transaction should be the batch tx + require.Equal(t, userTx, attrs.Transactions[len(attrs.Transactions)-1]) +} + +func TestBuildAttributes_EmptyBatch(t *testing.T) { + cfg := testRollupConfig() + sysConfig := testSystemConfig() + + t.Run("empty batch at epoch start", func(t *testing.T) { + l1Block := makeTestL1Input(5) + l1Block.Deposits = []*types.DepositTx{makeTestDeposit()} + + batch := &derive.SingularBatch{ + ParentHash: common.HexToHash("0xcccc"), + EpochNum: rollup.Epoch(l1Block.Number), + EpochHash: l1Block.Hash, + Timestamp: l1Block.Timestamp + cfg.BlockTime, + Transactions: nil, + } + + cursor := l2Cursor{ + Number: 10, + Timestamp: batch.Timestamp - cfg.BlockTime, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xold"), Number: l1Block.Number - 1}, + SequenceNumber: 0, + } + + result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg) + require.NoError(t, err) + + // L1 info deposit + 1 user deposit = 2 (no batch txs) + require.Len(t, result.Attributes.Transactions, 2) + }) + + t.Run("empty batch same epoch", func(t *testing.T) { + l1Block := makeTestL1Input(5) + + batch := &derive.SingularBatch{ + ParentHash: common.HexToHash("0xdddd"), + EpochNum: rollup.Epoch(l1Block.Number), + EpochHash: l1Block.Hash, + Timestamp: l1Block.Timestamp + 2*cfg.BlockTime, + Transactions: nil, + } + + cursor := l2Cursor{ + Number: 10, + Timestamp: batch.Timestamp - cfg.BlockTime, + L1Origin: eth.BlockID{Hash: l1Block.Hash, Number: l1Block.Number}, + SequenceNumber: 1, + } + + result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg) + require.NoError(t, err) + + // Only L1 info deposit, no user deposits, no batch txs + require.Len(t, result.Attributes.Transactions, 1) + }) +} + +func TestBuildAttributes_HoloceneFields(t *testing.T) { + cfg := testRollupConfig() + sysConfig := testSystemConfig() + sysConfig.EIP1559Params = eth.Bytes8{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08} + + l1Block := makeTestL1Input(5) + batch := &derive.SingularBatch{ + ParentHash: common.HexToHash("0xeeee"), + EpochNum: rollup.Epoch(l1Block.Number), + EpochHash: l1Block.Hash, + Timestamp: l1Block.Timestamp + cfg.BlockTime, + } + + cursor := l2Cursor{ + Number: 10, + Timestamp: batch.Timestamp - cfg.BlockTime, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xold"), Number: l1Block.Number - 1}, + SequenceNumber: 0, + } + + result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg) + require.NoError(t, err) + require.NotNil(t, result.Attributes.EIP1559Params) + require.Equal(t, sysConfig.EIP1559Params, *result.Attributes.EIP1559Params) + require.NotNil(t, result.Attributes.ParentBeaconBlockRoot) + require.NotNil(t, result.Attributes.Withdrawals) +} + +func TestBuildAttributes_SequenceNumber(t *testing.T) { + cfg := testRollupConfig() + sysConfig := testSystemConfig() + l1Block := makeTestL1Input(5) + + t.Run("epoch start resets to zero", func(t *testing.T) { + batch := &derive.SingularBatch{ + ParentHash: common.HexToHash("0x1111"), + EpochNum: rollup.Epoch(l1Block.Number), + EpochHash: l1Block.Hash, + Timestamp: l1Block.Timestamp + cfg.BlockTime, + } + + cursor := l2Cursor{ + Number: 10, + Timestamp: batch.Timestamp - cfg.BlockTime, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xold"), Number: l1Block.Number - 1}, + SequenceNumber: 5, + } + + // Sequence number 0 is used internally; we verify the result is valid + result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg) + require.NoError(t, err) + require.NotNil(t, result) + }) + + t.Run("same epoch increments", func(t *testing.T) { + batch := &derive.SingularBatch{ + ParentHash: common.HexToHash("0x2222"), + EpochNum: rollup.Epoch(l1Block.Number), + EpochHash: l1Block.Hash, + Timestamp: l1Block.Timestamp + 4*cfg.BlockTime, + } + + cursor := l2Cursor{ + Number: 10, + Timestamp: batch.Timestamp - cfg.BlockTime, + L1Origin: eth.BlockID{Hash: l1Block.Hash, Number: l1Block.Number}, + SequenceNumber: 5, + } + + // Sequence number 6 is used internally; we verify the result is valid + result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg) + require.NoError(t, err) + require.NotNil(t, result) + }) +} From 57b0fb998e9ef82584ec77c88b06cb32a17e962a Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Tue, 24 Feb 2026 23:00:46 -0500 Subject: [PATCH 05/15] pure: implement PureDerive main function Co-Authored-By: Claude Opus 4.6 --- op-core/pure/derive.go | 132 ++++++++++++++++++++ op-core/pure/derive_test.go | 237 ++++++++++++++++++++++++++++++++++++ 2 files changed, 369 insertions(+) create mode 100644 op-core/pure/derive.go create mode 100644 op-core/pure/derive_test.go diff --git a/op-core/pure/derive.go b/op-core/pure/derive.go new file mode 100644 index 0000000000000..56bfaf3af623a --- /dev/null +++ b/op-core/pure/derive.go @@ -0,0 +1,132 @@ +package pure + +import ( + "fmt" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// PureDerive is the main entry point for pure derivation. It takes an L2 safe +// head, system config, and a sequence of L1 blocks and produces the derived L2 +// blocks (as payload attributes) that follow from those inputs. +// +// The function is stateless and deterministic: given the same inputs it always +// produces the same outputs. No network access, no caching, no side effects. +func PureDerive( + cfg *rollup.Config, + safeHead eth.L2BlockRef, + sysConfig eth.SystemConfig, + l1Blocks []L1Input, +) ([]DerivedBlock, error) { + cursor := newCursor(safeHead) + assembler := newChannelAssembler() + + l1Origins := make([]eth.L1BlockRef, len(l1Blocks)) + for i := range l1Blocks { + l1Origins[i] = l1Blocks[i].BlockRef() + } + + var derived []DerivedBlock + + for i := range l1Blocks { + l1 := l1Blocks[i] + l1Ref := l1.BlockRef() + + for _, log := range l1.ConfigLogs { + if err := derive.ProcessSystemConfigUpdateLogEvent(&sysConfig, log, cfg, l1.Timestamp); err != nil { + return nil, fmt.Errorf("processing system config update at L1 block %d: %w", l1.Number, err) + } + } + + assembler.checkTimeout(l1Ref, cfg.ChannelTimeoutBedrock) + + for _, txData := range l1.BatcherData { + frames, err := derive.ParseFrames(txData) + if err != nil { + continue + } + + for _, frame := range frames { + ready := assembler.addFrame(frame, l1Ref) + if ready == nil { + continue + } + + batches, err := decodeBatches(ready.channel.Reader(), cfg, l1Origins, cursor) + if err != nil { + continue + } + + for _, batch := range batches { + if !validateBatch(batch, cursor, l1Origins, cfg) { + continue + } + + epochL1 := findL1Origin(l1Blocks, uint64(batch.EpochNum)) + if epochL1 == nil { + epochL1 = &l1 + } + + block, err := buildAttributes(batch, epochL1, cursor, sysConfig, cfg) + if err != nil { + return nil, fmt.Errorf("building attributes at L1 block %d: %w", l1.Number, err) + } + derived = append(derived, *block) + + epochID := eth.BlockID{Number: uint64(batch.EpochNum), Hash: batch.EpochHash} + var seqNum uint64 + if epochID.Number != cursor.L1Origin.Number { + seqNum = 0 + } else { + seqNum = cursor.SequenceNumber + 1 + } + cursor.advance(batch.Timestamp, epochID, seqNum) + } + } + } + + for needsEmptyBatch(cursor, l1Ref, cfg) { + nextTimestamp := cursor.Timestamp + cfg.BlockTime + newOrigin := cursor.L1Origin + newSeqNum := cursor.SequenceNumber + 1 + + // Advance epoch if the next L2 timestamp >= next L1 block's timestamp. + nextL1 := findL1Origin(l1Blocks, cursor.L1Origin.Number+1) + if nextL1 != nil && nextTimestamp >= nextL1.Timestamp { + newOrigin = nextL1.BlockID() + newSeqNum = 0 + } + + emptyBatch := &derive.SingularBatch{ + EpochNum: rollup.Epoch(newOrigin.Number), + EpochHash: newOrigin.Hash, + Timestamp: nextTimestamp, + } + + epochL1 := findL1Origin(l1Blocks, newOrigin.Number) + if epochL1 == nil { + epochL1 = &l1 + } + block, err := buildAttributes(emptyBatch, epochL1, cursor, sysConfig, cfg) + if err != nil { + return nil, fmt.Errorf("building empty batch attributes at L1 block %d: %w", l1.Number, err) + } + derived = append(derived, *block) + cursor.advance(emptyBatch.Timestamp, newOrigin, newSeqNum) + } + } + + return derived, nil +} + +// findL1Origin looks up an L1Input by block number from the provided slice. +func findL1Origin(l1Blocks []L1Input, number uint64) *L1Input { + for i := range l1Blocks { + if l1Blocks[i].Number == number { + return &l1Blocks[i] + } + } + return nil +} diff --git a/op-core/pure/derive_test.go b/op-core/pure/derive_test.go new file mode 100644 index 0000000000000..5ee5908b4e286 --- /dev/null +++ b/op-core/pure/derive_test.go @@ -0,0 +1,237 @@ +package pure + +import ( + "bytes" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +func TestPureDerive_SingleBatch(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + l1 := makeL1WithBatch(t, cfg, 1, safeHead, sysConfig) + + derived, err := PureDerive(cfg, safeHead, sysConfig, []L1Input{*l1}) + require.NoError(t, err) + require.Len(t, derived, 1) + + block := derived[0] + require.Equal(t, hexutil.Uint64(safeHead.Time+cfg.BlockTime), block.Attributes.Timestamp) + require.True(t, block.Attributes.NoTxPool) + require.Equal(t, l1.BlockRef(), block.DerivedFrom) +} + +func TestPureDerive_EmptyEpoch(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + // Create SeqWindowSize + 2 L1 blocks with no batcher data. + // The sequencer window expires once we get far enough ahead of the cursor's L1 origin. + numBlocks := cfg.SeqWindowSize + 2 + l1Blocks := make([]L1Input, numBlocks) + for i := uint64(0); i < numBlocks; i++ { + l1Blocks[i] = *makeTestL1Input(i) + } + + derived, err := PureDerive(cfg, safeHead, sysConfig, l1Blocks) + require.NoError(t, err) + require.Greater(t, len(derived), 0, "empty batches should be generated when sequencer window expires") + + // Each derived block should have sequential timestamps. + expectedTimestamp := safeHead.Time + cfg.BlockTime + for _, block := range derived { + require.Equal(t, hexutil.Uint64(expectedTimestamp), block.Attributes.Timestamp) + expectedTimestamp += cfg.BlockTime + } +} + +func TestPureDerive_MultipleChannelsAndEpochs(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + l1Blocks := makeMultiEpochL1Inputs(t, cfg, safeHead, sysConfig) + + derived, err := PureDerive(cfg, safeHead, sysConfig, l1Blocks) + require.NoError(t, err) + require.Greater(t, len(derived), 1, "should derive multiple blocks from multiple epochs") + + // Each derived block should have sequential timestamps. + expectedTimestamp := safeHead.Time + cfg.BlockTime + for i, block := range derived { + require.Equal(t, hexutil.Uint64(expectedTimestamp), block.Attributes.Timestamp, + "block %d should have timestamp %d", i, expectedTimestamp) + expectedTimestamp += cfg.BlockTime + } +} + +func TestPureDerive_ChannelTimeout(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + // Create an incomplete channel at L1 block 1 (frame 0 of 2, not last). + incompleteL1 := makeTestL1Input(1) + incompleteChID := testChannelID(0xAA) + + batch := &derive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(incompleteL1.Number), + EpochHash: incompleteL1.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + } + channelData := encodeBatchToChannelData(t, batch) + + // Split into two frames but only include the first (non-last) frame. + frame0 := derive.Frame{ + ID: incompleteChID, + FrameNumber: 0, + Data: channelData, + IsLast: false, + } + var buf bytes.Buffer + buf.WriteByte(params.DerivationVersion0) + require.NoError(t, frame0.MarshalBinary(&buf)) + incompleteL1.BatcherData = [][]byte{buf.Bytes()} + + // Fill gap L1 blocks until timeout. Channel timeout is 50, so we need + // blocks 2..52 to cause timeout at block 52. + var l1Blocks []L1Input + l1Blocks = append(l1Blocks, *incompleteL1) + for i := uint64(2); i <= cfg.ChannelTimeoutBedrock+2; i++ { + l1Blocks = append(l1Blocks, *makeTestL1Input(i)) + } + + // After timeout, add a complete channel. + completeL1Num := cfg.ChannelTimeoutBedrock + 3 + completeL1 := makeTestL1Input(completeL1Num) + completeChID := testChannelID(0xBB) + + // The batch must reference an L1 block we have. Use block 1's ref as epoch. + completeBatch := &derive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(1), + EpochHash: incompleteL1.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + } + completeChannelData := encodeBatchToChannelData(t, completeBatch) + completeTx := wrapInFrames(completeChannelData, completeChID) + completeL1.BatcherData = [][]byte{completeTx} + l1Blocks = append(l1Blocks, *completeL1) + + derived, err := PureDerive(cfg, safeHead, sysConfig, l1Blocks) + require.NoError(t, err) + + // We should get at least one derived block from the complete channel. + // The incomplete channel should have timed out and produced nothing. + foundFromComplete := false + for _, block := range derived { + if uint64(block.Attributes.Timestamp) == safeHead.Time+cfg.BlockTime { + foundFromComplete = true + break + } + } + require.True(t, foundFromComplete, "should have a derived block from the complete channel after timeout") +} + +func TestPureDerive_InvalidBatchSkipped(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + l1 := makeTestL1Input(1) + l1Ref := l1.BlockRef() + + // Create a batch with wrong timestamp (should be safeHead.Time + BlockTime). + invalidBatch := &derive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Ref.Number), + EpochHash: l1Ref.Hash, + Timestamp: safeHead.Time + cfg.BlockTime + 999, // wrong timestamp + } + + channelData := encodeBatchToChannelData(t, invalidBatch) + var chID derive.ChannelID + copy(chID[:], common.Hex2Bytes("cccccccccccccccccccccccccccccccc")) + batcherTx := wrapInFrames(channelData, chID) + l1.BatcherData = [][]byte{batcherTx} + + derived, err := PureDerive(cfg, safeHead, sysConfig, []L1Input{*l1}) + require.NoError(t, err) + require.Empty(t, derived, "invalid batch should be skipped without error") +} + +func TestFindL1Origin(t *testing.T) { + l1Blocks := []L1Input{ + *makeTestL1Input(5), + *makeTestL1Input(10), + *makeTestL1Input(15), + } + + found := findL1Origin(l1Blocks, 10) + require.NotNil(t, found) + require.Equal(t, uint64(10), found.Number) + + notFound := findL1Origin(l1Blocks, 99) + require.Nil(t, notFound) +} + +// makeMultiEpochL1Inputs builds several L1 blocks with batches at different +// epochs, suitable for testing multi-channel, multi-epoch derivation. +func makeMultiEpochL1Inputs(t *testing.T, cfg *rollup.Config, safeHead eth.L2BlockRef, sysConfig eth.SystemConfig) []L1Input { + t.Helper() + _ = sysConfig + + // Block 1: batch for epoch 1, timestamp = safeHead.Time + BlockTime + l1Block1 := makeTestL1Input(1) + l1Ref1 := l1Block1.BlockRef() + batch1 := &derive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Ref1.Number), + EpochHash: l1Ref1.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + } + chData1 := encodeBatchToChannelData(t, batch1) + var chID1 derive.ChannelID + chID1[0] = 0x01 + l1Block1.BatcherData = [][]byte{wrapInFrames(chData1, chID1)} + + // Block 2: batch for epoch 2, timestamp = safeHead.Time + 2*BlockTime + l1Block2 := makeTestL1Input(2) + l1Ref2 := l1Block2.BlockRef() + batch2 := &derive.SingularBatch{ + EpochNum: rollup.Epoch(l1Ref2.Number), + EpochHash: l1Ref2.Hash, + Timestamp: safeHead.Time + 2*cfg.BlockTime, + } + chData2 := encodeBatchToChannelData(t, batch2) + var chID2 derive.ChannelID + chID2[0] = 0x02 + l1Block2.BatcherData = [][]byte{wrapInFrames(chData2, chID2)} + + // Block 3: batch for epoch 3, timestamp = safeHead.Time + 3*BlockTime + l1Block3 := makeTestL1Input(3) + l1Ref3 := l1Block3.BlockRef() + batch3 := &derive.SingularBatch{ + EpochNum: rollup.Epoch(l1Ref3.Number), + EpochHash: l1Ref3.Hash, + Timestamp: safeHead.Time + 3*cfg.BlockTime, + } + chData3 := encodeBatchToChannelData(t, batch3) + var chID3 derive.ChannelID + chID3[0] = 0x03 + l1Block3.BatcherData = [][]byte{wrapInFrames(chData3, chID3)} + + return []L1Input{*l1Block1, *l1Block2, *l1Block3} +} From 73a2125b688262a7f7f1e169f8bc4e8d5a97df51 Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Wed, 25 Feb 2026 08:24:30 -0500 Subject: [PATCH 06/15] pure: address PR review feedback - Refactor L1Input to embed *types.Header instead of individual fields - Replace l1InputInfo adapter with l1BlockInfoAdapter wrapping eth.HeaderBlockInfo - Add Karst fork gate: PureDerive requires Karst to be active - Add span batch overlap rejection under Karst - Move needsEmptyBatch to l2Cursor method - Optimize findL1Origin with map-based O(1) index - Return error instead of fallback when L1 block is missing - Validate l1Blocks start from safe head L1 origin - Add channelAssembler comment explaining design vs upstream - Add Jovian network upgrade transactions in attributes.go - Add comprehensive doc comment on PureDerive listing skipped checks - Add batch validation comments referencing upstream functions - Add JovianTime and KarstTime to test rollup config - Fix all lint issues (bigs.Uint64Strict, goimports) - Add new tests: RejectsPreKarst, ValidatesL1BlockRange, EmptyL1Blocks Co-Authored-By: Claude Opus 4.6 --- op-core/pure/attributes.go | 27 ++++-- op-core/pure/attributes_test.go | 78 +++++++++++------- op-core/pure/batches.go | 37 ++++++--- op-core/pure/batches_test.go | 31 ------- op-core/pure/channels.go | 6 ++ op-core/pure/derive.go | 74 ++++++++++++----- op-core/pure/derive_test.go | 74 +++++++++++++---- op-core/pure/helpers_test.go | 40 +++++---- op-core/pure/types.go | 142 +++++++++++++++++--------------- op-core/pure/types_test.go | 105 +++++++++++------------ 10 files changed, 356 insertions(+), 258 deletions(-) diff --git a/op-core/pure/attributes.go b/op-core/pure/attributes.go index 350f08e6b7957..ea852b5e20654 100644 --- a/op-core/pure/attributes.go +++ b/op-core/pure/attributes.go @@ -20,7 +20,8 @@ import ( // Transaction ordering follows the OP Stack derivation spec: // 1. L1 info deposit transaction (always first) // 2. User deposit transactions (only at epoch boundaries) -// 3. Batch transactions from the sequencer +// 3. Network upgrade transactions (at fork activation blocks) +// 4. Batch transactions from the sequencer func buildAttributes( batch *derive.SingularBatch, l1Block *L1Input, @@ -38,9 +39,8 @@ func buildAttributes( } l2Timestamp := batch.Timestamp - blockInfo := &l1InputInfo{l1Block} - l1InfoTx, err := derive.L1InfoDeposit(cfg, nil, sysConfig, seqNumber, blockInfo, l2Timestamp) + l1InfoTx, err := derive.L1InfoDeposit(cfg, nil, sysConfig, seqNumber, l1Block.blockInfo(), l2Timestamp) if err != nil { return nil, fmt.Errorf("failed to create L1 info deposit tx: %w", err) } @@ -50,7 +50,23 @@ func buildAttributes( return nil, fmt.Errorf("failed to encode L1 info deposit tx: %w", err) } - txCount := 1 + len(batch.Transactions) + // Network upgrade transactions (NUTs). Only forks from Jovian onward are + // included; earlier forks (Ecotone, Fjord, Isthmus) cannot be activation + // blocks since PureDerive requires Karst to already be active. + var upgradeTxs []hexutil.Bytes + + if cfg.IsJovianActivationBlock(l2Timestamp) { + jovianTxs, err := derive.JovianNetworkUpgradeTransactions() + if err != nil { + return nil, fmt.Errorf("failed to build Jovian network upgrade txs: %w", err) + } + upgradeTxs = append(upgradeTxs, jovianTxs...) + } + + // TODO: Add Karst NUTs here once KarstNetworkUpgradeTransactions() exists. + // Karst currently has no network upgrade transactions. + + txCount := 1 + len(upgradeTxs) + len(batch.Transactions) if epochChanged { txCount += len(l1Block.Deposits) } @@ -67,6 +83,7 @@ func buildAttributes( } } + txs = append(txs, upgradeTxs...) txs = append(txs, batch.Transactions...) gasLimit := sysConfig.GasLimit @@ -83,7 +100,7 @@ func buildAttributes( attrs := ð.PayloadAttributes{ Timestamp: hexutil.Uint64(l2Timestamp), - PrevRandao: eth.Bytes32(l1Block.MixDigest), + PrevRandao: eth.Bytes32(l1Block.Header.MixDigest), SuggestedFeeRecipient: predeploys.SequencerFeeVaultAddr, Transactions: txs, NoTxPool: true, diff --git a/op-core/pure/attributes_test.go b/op-core/pure/attributes_test.go index 028c9d1023dbf..f284f91ae6326 100644 --- a/op-core/pure/attributes_test.go +++ b/op-core/pure/attributes_test.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/stretchr/testify/require" ) @@ -20,12 +21,15 @@ func TestBuildAttributes_EpochStart(t *testing.T) { l1Block := makeTestL1Input(5) l1Block.Deposits = []*types.DepositTx{makeTestDeposit(), makeTestDeposit()} + l1Num := bigs.Uint64Strict(l1Block.Header.Number) + l1Hash := l1Block.Header.Hash() + userTx := hexutil.Bytes{0x01, 0x02, 0x03} batch := &derive.SingularBatch{ ParentHash: common.HexToHash("0xaaaa"), - EpochNum: rollup.Epoch(l1Block.Number), - EpochHash: l1Block.Hash, - Timestamp: l1Block.Timestamp + cfg.BlockTime, + EpochNum: rollup.Epoch(l1Num), + EpochHash: l1Hash, + Timestamp: l1Block.Header.Time + cfg.BlockTime, Transactions: []hexutil.Bytes{userTx}, } @@ -33,7 +37,7 @@ func TestBuildAttributes_EpochStart(t *testing.T) { cursor := l2Cursor{ Number: 10, Timestamp: batch.Timestamp - cfg.BlockTime, - L1Origin: eth.BlockID{Hash: common.HexToHash("0xprev"), Number: l1Block.Number - 1}, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xprev"), Number: l1Num - 1}, SequenceNumber: 3, } @@ -50,7 +54,7 @@ func TestBuildAttributes_EpochStart(t *testing.T) { require.True(t, attrs.NoTxPool) require.Equal(t, hexutil.Uint64(batch.Timestamp), attrs.Timestamp) - require.Equal(t, eth.Bytes32(l1Block.MixDigest), attrs.PrevRandao) + require.Equal(t, eth.Bytes32(l1Block.Header.MixDigest), attrs.PrevRandao) require.Equal(t, predeploys.SequencerFeeVaultAddr, attrs.SuggestedFeeRecipient) require.NotNil(t, attrs.GasLimit) require.Equal(t, sysConfig.GasLimit, uint64(*attrs.GasLimit)) @@ -70,12 +74,15 @@ func TestBuildAttributes_SameEpoch(t *testing.T) { l1Block := makeTestL1Input(5) l1Block.Deposits = []*types.DepositTx{makeTestDeposit()} + l1Num := bigs.Uint64Strict(l1Block.Header.Number) + l1Hash := l1Block.Header.Hash() + userTx := hexutil.Bytes{0xaa, 0xbb} batch := &derive.SingularBatch{ ParentHash: common.HexToHash("0xbbbb"), - EpochNum: rollup.Epoch(l1Block.Number), - EpochHash: l1Block.Hash, - Timestamp: l1Block.Timestamp + 2*cfg.BlockTime, + EpochNum: rollup.Epoch(l1Num), + EpochHash: l1Hash, + Timestamp: l1Block.Header.Time + 2*cfg.BlockTime, Transactions: []hexutil.Bytes{userTx}, } @@ -83,7 +90,7 @@ func TestBuildAttributes_SameEpoch(t *testing.T) { cursor := l2Cursor{ Number: 10, Timestamp: batch.Timestamp - cfg.BlockTime, - L1Origin: eth.BlockID{Hash: l1Block.Hash, Number: l1Block.Number}, + L1Origin: eth.BlockID{Hash: l1Hash, Number: l1Num}, SequenceNumber: 2, } @@ -112,18 +119,21 @@ func TestBuildAttributes_EmptyBatch(t *testing.T) { l1Block := makeTestL1Input(5) l1Block.Deposits = []*types.DepositTx{makeTestDeposit()} + l1Num := bigs.Uint64Strict(l1Block.Header.Number) + l1Hash := l1Block.Header.Hash() + batch := &derive.SingularBatch{ ParentHash: common.HexToHash("0xcccc"), - EpochNum: rollup.Epoch(l1Block.Number), - EpochHash: l1Block.Hash, - Timestamp: l1Block.Timestamp + cfg.BlockTime, + EpochNum: rollup.Epoch(l1Num), + EpochHash: l1Hash, + Timestamp: l1Block.Header.Time + cfg.BlockTime, Transactions: nil, } cursor := l2Cursor{ Number: 10, Timestamp: batch.Timestamp - cfg.BlockTime, - L1Origin: eth.BlockID{Hash: common.HexToHash("0xold"), Number: l1Block.Number - 1}, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xold"), Number: l1Num - 1}, SequenceNumber: 0, } @@ -137,18 +147,21 @@ func TestBuildAttributes_EmptyBatch(t *testing.T) { t.Run("empty batch same epoch", func(t *testing.T) { l1Block := makeTestL1Input(5) + l1Num := bigs.Uint64Strict(l1Block.Header.Number) + l1Hash := l1Block.Header.Hash() + batch := &derive.SingularBatch{ ParentHash: common.HexToHash("0xdddd"), - EpochNum: rollup.Epoch(l1Block.Number), - EpochHash: l1Block.Hash, - Timestamp: l1Block.Timestamp + 2*cfg.BlockTime, + EpochNum: rollup.Epoch(l1Num), + EpochHash: l1Hash, + Timestamp: l1Block.Header.Time + 2*cfg.BlockTime, Transactions: nil, } cursor := l2Cursor{ Number: 10, Timestamp: batch.Timestamp - cfg.BlockTime, - L1Origin: eth.BlockID{Hash: l1Block.Hash, Number: l1Block.Number}, + L1Origin: eth.BlockID{Hash: l1Hash, Number: l1Num}, SequenceNumber: 1, } @@ -166,17 +179,20 @@ func TestBuildAttributes_HoloceneFields(t *testing.T) { sysConfig.EIP1559Params = eth.Bytes8{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08} l1Block := makeTestL1Input(5) + l1Num := bigs.Uint64Strict(l1Block.Header.Number) + l1Hash := l1Block.Header.Hash() + batch := &derive.SingularBatch{ ParentHash: common.HexToHash("0xeeee"), - EpochNum: rollup.Epoch(l1Block.Number), - EpochHash: l1Block.Hash, - Timestamp: l1Block.Timestamp + cfg.BlockTime, + EpochNum: rollup.Epoch(l1Num), + EpochHash: l1Hash, + Timestamp: l1Block.Header.Time + cfg.BlockTime, } cursor := l2Cursor{ Number: 10, Timestamp: batch.Timestamp - cfg.BlockTime, - L1Origin: eth.BlockID{Hash: common.HexToHash("0xold"), Number: l1Block.Number - 1}, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xold"), Number: l1Num - 1}, SequenceNumber: 0, } @@ -192,23 +208,24 @@ func TestBuildAttributes_SequenceNumber(t *testing.T) { cfg := testRollupConfig() sysConfig := testSystemConfig() l1Block := makeTestL1Input(5) + l1Num := bigs.Uint64Strict(l1Block.Header.Number) + l1Hash := l1Block.Header.Hash() t.Run("epoch start resets to zero", func(t *testing.T) { batch := &derive.SingularBatch{ ParentHash: common.HexToHash("0x1111"), - EpochNum: rollup.Epoch(l1Block.Number), - EpochHash: l1Block.Hash, - Timestamp: l1Block.Timestamp + cfg.BlockTime, + EpochNum: rollup.Epoch(l1Num), + EpochHash: l1Hash, + Timestamp: l1Block.Header.Time + cfg.BlockTime, } cursor := l2Cursor{ Number: 10, Timestamp: batch.Timestamp - cfg.BlockTime, - L1Origin: eth.BlockID{Hash: common.HexToHash("0xold"), Number: l1Block.Number - 1}, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xold"), Number: l1Num - 1}, SequenceNumber: 5, } - // Sequence number 0 is used internally; we verify the result is valid result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg) require.NoError(t, err) require.NotNil(t, result) @@ -217,19 +234,18 @@ func TestBuildAttributes_SequenceNumber(t *testing.T) { t.Run("same epoch increments", func(t *testing.T) { batch := &derive.SingularBatch{ ParentHash: common.HexToHash("0x2222"), - EpochNum: rollup.Epoch(l1Block.Number), - EpochHash: l1Block.Hash, - Timestamp: l1Block.Timestamp + 4*cfg.BlockTime, + EpochNum: rollup.Epoch(l1Num), + EpochHash: l1Hash, + Timestamp: l1Block.Header.Time + 4*cfg.BlockTime, } cursor := l2Cursor{ Number: 10, Timestamp: batch.Timestamp - cfg.BlockTime, - L1Origin: eth.BlockID{Hash: l1Block.Hash, Number: l1Block.Number}, + L1Origin: eth.BlockID{Hash: l1Hash, Number: l1Num}, SequenceNumber: 5, } - // Sequence number 6 is used internally; we verify the result is valid result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg) require.NoError(t, err) require.NotNil(t, result) diff --git a/op-core/pure/batches.go b/op-core/pure/batches.go index fc1f82cf018dc..326a4f2642bff 100644 --- a/op-core/pure/batches.go +++ b/op-core/pure/batches.go @@ -12,6 +12,11 @@ import ( // decodeBatches reads all batches from a completed channel's compressed data // and returns them as singular batches. Span batches are expanded into // individual singular batches using the provided L1 origins and cursor. +// +// With Karst active, span batches must not overlap the safe chain. If the first +// batch in a span has timestamp <= cursor.Timestamp, the entire span is rejected. +// See checkSpanBatchPrefix in op-node/rollup/derive/batches.go for the full +// upstream overlap handling. func decodeBatches( r io.Reader, cfg *rollup.Config, @@ -55,10 +60,19 @@ func decodeBatches( if err != nil { return nil, fmt.Errorf("deriving span batch: %w", err) } + + // Reject overlapping span batches. Under Karst, span batches that start + // at or before the safe head are invalid. This mirrors the overlap rejection + // in checkSpanBatchPrefix (op-node/rollup/derive/batches.go). + if spanBatch.GetTimestamp() <= cursor.Timestamp { + return nil, fmt.Errorf("span batch timestamp %d overlaps safe head at %d (rejected under Karst)", + spanBatch.GetTimestamp(), cursor.Timestamp) + } + l2SafeHead := eth.L2BlockRef{ - Number: cursor.Number, - Time: cursor.Timestamp, - L1Origin: cursor.L1Origin, + Number: cursor.Number, + Time: cursor.Timestamp, + L1Origin: cursor.L1Origin, SequenceNumber: cursor.SequenceNumber, } singular, err := spanBatch.GetSingularBatches(l1Origins, l2SafeHead) @@ -75,8 +89,14 @@ func decodeBatches( return batches, nil } -// validateBatch checks whether a singular batch is valid given the current -// derivation cursor and known L1 origins. +// validateBatch performs simplified batch validation suitable for Karst and later. +// It checks timestamp sequencing, epoch bounds, and epoch hash consistency. +// +// This is a subset of the full validation in op-node/rollup/derive/batches.go +// (checkSingularBatch / CheckBatch). The upstream functions are unexported and +// require an l2Fetcher for L2 state lookups that we intentionally avoid. +// With Karst active, overlapping span batches are already rejected in decodeBatches, +// so the remaining checks here are sufficient for correctness. func validateBatch(batch *derive.SingularBatch, cursor l2Cursor, l1Origins []eth.L1BlockRef, cfg *rollup.Config) bool { expectedTimestamp := cursor.Timestamp + cfg.BlockTime if batch.Timestamp != expectedTimestamp { @@ -106,13 +126,6 @@ func validateBatch(batch *derive.SingularBatch, cursor l2Cursor, l1Origins []eth return false } -// needsEmptyBatch returns true when the sequencing window has expired, -// meaning the cursor's L1 origin is more than SeqWindowSize blocks behind -// the current L1 block. -func needsEmptyBatch(cursor l2Cursor, currentL1 eth.L1BlockRef, cfg *rollup.Config) bool { - return currentL1.Number > cursor.L1Origin.Number+cfg.SeqWindowSize -} - // makeEmptyBatch creates a batch with no transactions at the next expected // timestamp, advancing from the current cursor position. func makeEmptyBatch(cursor l2Cursor, cfg *rollup.Config) *derive.SingularBatch { diff --git a/op-core/pure/batches_test.go b/op-core/pure/batches_test.go index f0850b411edc0..0229e811744df 100644 --- a/op-core/pure/batches_test.go +++ b/op-core/pure/batches_test.go @@ -147,37 +147,6 @@ func TestValidateBatch_EpochTooNew(t *testing.T) { require.False(t, validateBatch(batch, cursor, l1Origins, cfg)) } -func TestNeedEmptyBatch_WindowNotExpired(t *testing.T) { - cfg := testRollupConfig() // SeqWindowSize = 10 - - cursor := l2Cursor{ - Number: 10, - Timestamp: 100, - L1Origin: eth.BlockID{Number: 5}, - } - - // currentL1.Number (15) == cursor.L1Origin.Number (5) + SeqWindowSize (10) - // Not strictly greater, so window not expired - currentL1 := eth.L1BlockRef{Number: 15} - - require.False(t, needsEmptyBatch(cursor, currentL1, cfg)) -} - -func TestNeedEmptyBatch_WindowExpired(t *testing.T) { - cfg := testRollupConfig() // SeqWindowSize = 10 - - cursor := l2Cursor{ - Number: 10, - Timestamp: 100, - L1Origin: eth.BlockID{Number: 5}, - } - - // currentL1.Number (16) > cursor.L1Origin.Number (5) + SeqWindowSize (10) - currentL1 := eth.L1BlockRef{Number: 16} - - require.True(t, needsEmptyBatch(cursor, currentL1, cfg)) -} - func TestMakeEmptyBatch(t *testing.T) { cfg := testRollupConfig() origin := testL1Ref(5) diff --git a/op-core/pure/channels.go b/op-core/pure/channels.go index ffce0e12d5b69..ea1b8d9e6edff 100644 --- a/op-core/pure/channels.go +++ b/op-core/pure/channels.go @@ -15,6 +15,12 @@ type readyChannel struct { // channelAssembler implements Holocene single-channel strict-order assembly. // Only one channel is active at a time. Frames must arrive in order. // A frame for a new channel ID discards the current in-progress channel. +// +// This is intentionally separate from the existing ChannelAssembler in +// op-node/rollup/derive/channel_assembler.go. That assembler is pull-based +// (requires NextFrameProvider and Metrics interfaces) and designed for the +// streaming pipeline. Our push-based model feeds frames directly, making a +// simpler implementation appropriate. type channelAssembler struct { current *derive.Channel currentID derive.ChannelID diff --git a/op-core/pure/derive.go b/op-core/pure/derive.go index 56bfaf3af623a..55b95c9e60570 100644 --- a/op-core/pure/derive.go +++ b/op-core/pure/derive.go @@ -5,6 +5,7 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -14,20 +15,61 @@ import ( // // The function is stateless and deterministic: given the same inputs it always // produces the same outputs. No network access, no caching, no side effects. +// +// l1Blocks should contain L1 blocks starting from safeHead.L1Origin.Number. +// The caller must ensure this range is complete. +// +// Requires the Karst fork to be active at the safe head timestamp. Before Karst, +// span batches may overlap the safe chain, which this implementation does not support. +// +// Compared to the legacy pipeline (op-node/rollup/derive), this implementation +// intentionally skips the following checks: +// - Parent hash validation against the actual L2 chain (deferred to post-execution) +// - L2 block hash verification (no L2 state access) +// - Span batch overlap comparison (rejected by Karst; overlaps are invalid) +// - Pipeline reset / reorg handling (caller is responsible for providing correct inputs) +// - Sequencer drift checks requiring L2 state lookups +// +// See op-node/rollup/derive/batches.go for the full upstream validation logic. func PureDerive( cfg *rollup.Config, safeHead eth.L2BlockRef, sysConfig eth.SystemConfig, l1Blocks []L1Input, ) ([]DerivedBlock, error) { + if !cfg.IsKarst(safeHead.Time) { + return nil, fmt.Errorf("pure derivation requires Karst fork (no overlapping span batches), safe head time %d is pre-Karst", safeHead.Time) + } + + if len(l1Blocks) == 0 { + return nil, nil + } + + // Validate that l1Blocks start from the safe head's L1 origin. + firstL1Num := bigs.Uint64Strict(l1Blocks[0].Header.Number) + if firstL1Num > safeHead.L1Origin.Number { + return nil, fmt.Errorf("l1Blocks start at %d but safe head L1 origin is %d", firstL1Num, safeHead.L1Origin.Number) + } + cursor := newCursor(safeHead) assembler := newChannelAssembler() + // Build index for O(1) lookups by L1 block number. + l1ByNumber := make(map[uint64]int, len(l1Blocks)) l1Origins := make([]eth.L1BlockRef, len(l1Blocks)) for i := range l1Blocks { + num := bigs.Uint64Strict(l1Blocks[i].Header.Number) + l1ByNumber[num] = i l1Origins[i] = l1Blocks[i].BlockRef() } + findL1 := func(number uint64) *L1Input { + if idx, ok := l1ByNumber[number]; ok { + return &l1Blocks[idx] + } + return nil + } + var derived []DerivedBlock for i := range l1Blocks { @@ -35,8 +77,8 @@ func PureDerive( l1Ref := l1.BlockRef() for _, log := range l1.ConfigLogs { - if err := derive.ProcessSystemConfigUpdateLogEvent(&sysConfig, log, cfg, l1.Timestamp); err != nil { - return nil, fmt.Errorf("processing system config update at L1 block %d: %w", l1.Number, err) + if err := derive.ProcessSystemConfigUpdateLogEvent(&sysConfig, log, cfg, l1.Header.Time); err != nil { + return nil, fmt.Errorf("processing system config update at L1 block %d: %w", l1Ref.Number, err) } } @@ -64,14 +106,14 @@ func PureDerive( continue } - epochL1 := findL1Origin(l1Blocks, uint64(batch.EpochNum)) + epochL1 := findL1(uint64(batch.EpochNum)) if epochL1 == nil { - epochL1 = &l1 + return nil, fmt.Errorf("missing L1 block %d for batch epoch", batch.EpochNum) } block, err := buildAttributes(batch, epochL1, cursor, sysConfig, cfg) if err != nil { - return nil, fmt.Errorf("building attributes at L1 block %d: %w", l1.Number, err) + return nil, fmt.Errorf("building attributes at L1 block %d: %w", l1Ref.Number, err) } derived = append(derived, *block) @@ -87,14 +129,14 @@ func PureDerive( } } - for needsEmptyBatch(cursor, l1Ref, cfg) { + for cursor.needsEmptyBatch(l1Ref, cfg) { nextTimestamp := cursor.Timestamp + cfg.BlockTime newOrigin := cursor.L1Origin newSeqNum := cursor.SequenceNumber + 1 // Advance epoch if the next L2 timestamp >= next L1 block's timestamp. - nextL1 := findL1Origin(l1Blocks, cursor.L1Origin.Number+1) - if nextL1 != nil && nextTimestamp >= nextL1.Timestamp { + nextL1 := findL1(cursor.L1Origin.Number + 1) + if nextL1 != nil && nextTimestamp >= nextL1.Header.Time { newOrigin = nextL1.BlockID() newSeqNum = 0 } @@ -105,13 +147,13 @@ func PureDerive( Timestamp: nextTimestamp, } - epochL1 := findL1Origin(l1Blocks, newOrigin.Number) + epochL1 := findL1(newOrigin.Number) if epochL1 == nil { - epochL1 = &l1 + return nil, fmt.Errorf("missing L1 block %d for empty batch epoch", newOrigin.Number) } block, err := buildAttributes(emptyBatch, epochL1, cursor, sysConfig, cfg) if err != nil { - return nil, fmt.Errorf("building empty batch attributes at L1 block %d: %w", l1.Number, err) + return nil, fmt.Errorf("building empty batch attributes at L1 block %d: %w", l1Ref.Number, err) } derived = append(derived, *block) cursor.advance(emptyBatch.Timestamp, newOrigin, newSeqNum) @@ -120,13 +162,3 @@ func PureDerive( return derived, nil } - -// findL1Origin looks up an L1Input by block number from the provided slice. -func findL1Origin(l1Blocks []L1Input, number uint64) *L1Input { - for i := range l1Blocks { - if l1Blocks[i].Number == number { - return &l1Blocks[i] - } - } - return nil -} diff --git a/op-core/pure/derive_test.go b/op-core/pure/derive_test.go index 5ee5908b4e286..c42d049d4dca9 100644 --- a/op-core/pure/derive_test.go +++ b/op-core/pure/derive_test.go @@ -11,6 +11,7 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" + "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -19,9 +20,10 @@ func TestPureDerive_SingleBatch(t *testing.T) { safeHead := testSafeHead(cfg) sysConfig := testSystemConfig() + l1Origin := makeTestL1Input(0) // safe head's L1 origin l1 := makeL1WithBatch(t, cfg, 1, safeHead, sysConfig) - derived, err := PureDerive(cfg, safeHead, sysConfig, []L1Input{*l1}) + derived, err := PureDerive(cfg, safeHead, sysConfig, []L1Input{*l1Origin, *l1}) require.NoError(t, err) require.Len(t, derived, 1) @@ -83,12 +85,13 @@ func TestPureDerive_ChannelTimeout(t *testing.T) { // Create an incomplete channel at L1 block 1 (frame 0 of 2, not last). incompleteL1 := makeTestL1Input(1) + incompleteL1Ref := incompleteL1.BlockRef() incompleteChID := testChannelID(0xAA) batch := &derive.SingularBatch{ ParentHash: safeHead.Hash, - EpochNum: rollup.Epoch(incompleteL1.Number), - EpochHash: incompleteL1.Hash, + EpochNum: rollup.Epoch(incompleteL1Ref.Number), + EpochHash: incompleteL1Ref.Hash, Timestamp: safeHead.Time + cfg.BlockTime, } channelData := encodeBatchToChannelData(t, batch) @@ -108,6 +111,7 @@ func TestPureDerive_ChannelTimeout(t *testing.T) { // Fill gap L1 blocks until timeout. Channel timeout is 50, so we need // blocks 2..52 to cause timeout at block 52. var l1Blocks []L1Input + l1Blocks = append(l1Blocks, *makeTestL1Input(0)) // safe head's L1 origin l1Blocks = append(l1Blocks, *incompleteL1) for i := uint64(2); i <= cfg.ChannelTimeoutBedrock+2; i++ { l1Blocks = append(l1Blocks, *makeTestL1Input(i)) @@ -122,7 +126,7 @@ func TestPureDerive_ChannelTimeout(t *testing.T) { completeBatch := &derive.SingularBatch{ ParentHash: safeHead.Hash, EpochNum: rollup.Epoch(1), - EpochHash: incompleteL1.Hash, + EpochHash: incompleteL1Ref.Hash, Timestamp: safeHead.Time + cfg.BlockTime, } completeChannelData := encodeBatchToChannelData(t, completeBatch) @@ -167,24 +171,44 @@ func TestPureDerive_InvalidBatchSkipped(t *testing.T) { batcherTx := wrapInFrames(channelData, chID) l1.BatcherData = [][]byte{batcherTx} - derived, err := PureDerive(cfg, safeHead, sysConfig, []L1Input{*l1}) + l1Origin := makeTestL1Input(0) // safe head's L1 origin + derived, err := PureDerive(cfg, safeHead, sysConfig, []L1Input{*l1Origin, *l1}) require.NoError(t, err) require.Empty(t, derived, "invalid batch should be skipped without error") } -func TestFindL1Origin(t *testing.T) { - l1Blocks := []L1Input{ - *makeTestL1Input(5), - *makeTestL1Input(10), - *makeTestL1Input(15), - } +func TestPureDerive_RejectsPreKarst(t *testing.T) { + cfg := testRollupConfig() + cfg.KarstTime = nil // disable Karst + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + _, err := PureDerive(cfg, safeHead, sysConfig, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "Karst fork") +} + +func TestPureDerive_ValidatesL1BlockRange(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() - found := findL1Origin(l1Blocks, 10) - require.NotNil(t, found) - require.Equal(t, uint64(10), found.Number) + // Start L1 blocks after the safe head's L1 origin (gap) + l1Blocks := []L1Input{*makeTestL1Input(5)} - notFound := findL1Origin(l1Blocks, 99) - require.Nil(t, notFound) + _, err := PureDerive(cfg, safeHead, sysConfig, l1Blocks) + require.Error(t, err) + require.Contains(t, err.Error(), "l1Blocks start at") +} + +func TestPureDerive_EmptyL1Blocks(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + derived, err := PureDerive(cfg, safeHead, sysConfig, nil) + require.NoError(t, err) + require.Nil(t, derived) } // makeMultiEpochL1Inputs builds several L1 blocks with batches at different @@ -233,5 +257,21 @@ func makeMultiEpochL1Inputs(t *testing.T, cfg *rollup.Config, safeHead eth.L2Blo chID3[0] = 0x03 l1Block3.BatcherData = [][]byte{wrapInFrames(chData3, chID3)} - return []L1Input{*l1Block1, *l1Block2, *l1Block3} + // Include block 0 (safe head's L1 origin) at the start. + l1Block0 := makeTestL1Input(0) + return []L1Input{*l1Block0, *l1Block1, *l1Block2, *l1Block3} +} + +// Verify that test inputs are constructed correctly through BlockRef/BlockID. +func TestL1InputIntegration(t *testing.T) { + l1 := makeTestL1Input(10) + ref := l1.BlockRef() + require.Equal(t, bigs.Uint64Strict(l1.Header.Number), ref.Number) + require.Equal(t, l1.Header.Hash(), ref.Hash) + require.Equal(t, l1.Header.ParentHash, ref.ParentHash) + require.Equal(t, l1.Header.Time, ref.Time) + + id := l1.BlockID() + require.Equal(t, ref.Hash, id.Hash) + require.Equal(t, ref.Number, id.Number) } diff --git a/op-core/pure/helpers_test.go b/op-core/pure/helpers_test.go index bd8292c74e471..254b1c8b9c021 100644 --- a/op-core/pure/helpers_test.go +++ b/op-core/pure/helpers_test.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" + "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -38,14 +39,16 @@ func testRollupConfig() *rollup.Config { ChannelTimeoutBedrock: 50, L1ChainID: big.NewInt(1), L2ChainID: big.NewInt(10), - // Activate all forks at genesis for post-Holocene only pipeline - RegolithTime: &zero, - CanyonTime: &zero, - DeltaTime: &zero, - EcotoneTime: &zero, - FjordTime: &zero, - GraniteTime: &zero, - HoloceneTime: &zero, + // Activate all forks at genesis for post-Karst only pipeline + RegolithTime: &zero, + CanyonTime: &zero, + DeltaTime: &zero, + EcotoneTime: &zero, + FjordTime: &zero, + GraniteTime: &zero, + HoloceneTime: &zero, + JovianTime: &zero, + KarstTime: &zero, BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000000000010"), DepositContractAddress: common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"), } @@ -70,13 +73,15 @@ func testSafeHead(cfg *rollup.Config) eth.L2BlockRef { func makeTestL1Input(num uint64) *L1Input { return &L1Input{ - Hash: common.BigToHash(new(big.Int).SetUint64(num + 0x100)), - Number: num, - Timestamp: 1000 + num*12, - BaseFee: big.NewInt(7), - BlobBaseFee: big.NewInt(1), - ParentHash: common.BigToHash(new(big.Int).SetUint64(num + 0x100 - 1)), - MixDigest: common.BigToHash(new(big.Int).SetUint64(num + 0x200)), + Header: &types.Header{ + ParentHash: common.BigToHash(new(big.Int).SetUint64(num + 0x100 - 1)), + Number: new(big.Int).SetUint64(num), + Time: 1000 + num*12, + BaseFee: big.NewInt(7), + MixDigest: common.BigToHash(new(big.Int).SetUint64(num + 0x200)), + // ExcessBlobGas required for BlobBaseFee to work via HeaderBlockInfo + ExcessBlobGas: ptrTo(uint64(0)), + }, } } @@ -153,6 +158,7 @@ func TestHelpers(t *testing.T) { require.Equal(t, uint64(10), cfg.SeqWindowSize) require.Equal(t, uint64(50), cfg.ChannelTimeoutBedrock) require.NotNil(t, cfg.HoloceneTime) + require.NotNil(t, cfg.KarstTime) sysCfg := testSystemConfig() require.Equal(t, uint64(30_000_000), sysCfg.GasLimit) @@ -162,8 +168,8 @@ func TestHelpers(t *testing.T) { require.Equal(t, cfg.Genesis.L2.Number, safeHead.Number) l1 := makeTestL1Input(5) - require.Equal(t, uint64(5), l1.Number) - require.Equal(t, uint64(1000+5*12), l1.Timestamp) + require.Equal(t, uint64(5), bigs.Uint64Strict(l1.Header.Number)) + require.Equal(t, uint64(1000+5*12), l1.Header.Time) dep := makeTestDeposit() require.NotNil(t, dep) diff --git a/op-core/pure/types.go b/op-core/pure/types.go index ad34ef9ac372f..3aafc6bec1255 100644 --- a/op-core/pure/types.go +++ b/op-core/pure/types.go @@ -6,41 +6,93 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" ) // L1Input is a pre-processed L1 block containing only derivation-relevant data. // The caller is responsible for filtering batcher transactions, extracting deposits // from receipts, and extracting system config update logs. +// +// Header contains the full L1 block header. Callers will typically already have +// the header at hand when constructing an L1Input. type L1Input struct { - Hash common.Hash - Number uint64 - Timestamp uint64 - BaseFee *big.Int - BlobBaseFee *big.Int - ParentHash common.Hash - MixDigest common.Hash // prevrandao - - BatcherData [][]byte // raw batcher transaction data (calldata or blob content) - Deposits []*types.DepositTx - ConfigLogs []*types.Log // system config update logs, pre-filtered + Header *types.Header + + BatcherData [][]byte // raw batcher transaction data (calldata or blob content) + Deposits []*types.DepositTx // user deposit transactions extracted from receipts + ConfigLogs []*types.Log // system config update logs, pre-filtered } -// BlockRef converts L1Input header fields to an eth.L1BlockRef. +// BlockRef converts the L1 header to an eth.L1BlockRef. func (l *L1Input) BlockRef() eth.L1BlockRef { return eth.L1BlockRef{ - Hash: l.Hash, - Number: l.Number, - ParentHash: l.ParentHash, - Time: l.Timestamp, + Hash: l.Header.Hash(), + Number: bigs.Uint64Strict(l.Header.Number), + ParentHash: l.Header.ParentHash, + Time: l.Header.Time, } } // BlockID returns the block's ID (hash + number). func (l *L1Input) BlockID() eth.BlockID { - return eth.BlockID{Hash: l.Hash, Number: l.Number} + return eth.BlockID{Hash: l.Header.Hash(), Number: bigs.Uint64Strict(l.Header.Number)} +} + +// blockInfo returns an eth.BlockInfo adapter for the L1Input's header, +// suitable for derive.L1InfoDeposit and similar consumers. +// +// L1InfoDeposit is called with a nil L1 chain config, so the standard +// HeaderBlockInfo cannot be used directly (its BlobBaseFee method calls +// eip4844.CalcBlobFee which requires a non-nil chain config). This wrapper +// delegates everything to HeaderBlockInfo except BlobBaseFee, which returns +// nil to let L1InfoDeposit apply its own fallback logic. +func (l *L1Input) blockInfo() eth.BlockInfo { + return &l1BlockInfoAdapter{inner: eth.HeaderBlockInfo(l.Header)} +} + +// l1BlockInfoAdapter wraps eth.BlockInfo to handle the nil-chainConfig case +// in BlobBaseFee. L1InfoDeposit computes blob base fee from ExcessBlobGas +// when BlobBaseFee returns nil, so we return nil here and let it handle the +// computation with its own chain config awareness. +type l1BlockInfoAdapter struct { + inner eth.BlockInfo +} + +var _ eth.BlockInfo = (*l1BlockInfoAdapter)(nil) + +func (a *l1BlockInfoAdapter) Hash() common.Hash { return a.inner.Hash() } +func (a *l1BlockInfoAdapter) ParentHash() common.Hash { return a.inner.ParentHash() } +func (a *l1BlockInfoAdapter) Coinbase() common.Address { return a.inner.Coinbase() } +func (a *l1BlockInfoAdapter) Root() common.Hash { return a.inner.Root() } +func (a *l1BlockInfoAdapter) NumberU64() uint64 { return a.inner.NumberU64() } +func (a *l1BlockInfoAdapter) Time() uint64 { return a.inner.Time() } +func (a *l1BlockInfoAdapter) MixDigest() common.Hash { return a.inner.MixDigest() } +func (a *l1BlockInfoAdapter) BaseFee() *big.Int { return a.inner.BaseFee() } +func (a *l1BlockInfoAdapter) ReceiptHash() common.Hash { return a.inner.ReceiptHash() } +func (a *l1BlockInfoAdapter) GasUsed() uint64 { return a.inner.GasUsed() } +func (a *l1BlockInfoAdapter) GasLimit() uint64 { return a.inner.GasLimit() } +func (a *l1BlockInfoAdapter) ParentBeaconRoot() *common.Hash { return a.inner.ParentBeaconRoot() } +func (a *l1BlockInfoAdapter) WithdrawalsRoot() *common.Hash { return a.inner.WithdrawalsRoot() } +func (a *l1BlockInfoAdapter) ExcessBlobGas() *uint64 { return a.inner.ExcessBlobGas() } +func (a *l1BlockInfoAdapter) BlobGasUsed() *uint64 { return a.inner.BlobGasUsed() } +func (a *l1BlockInfoAdapter) HeaderRLP() ([]byte, error) { return a.inner.HeaderRLP() } +func (a *l1BlockInfoAdapter) Header() *types.Header { return a.inner.Header() } + +// BlobBaseFee computes the blob base fee from the header's ExcessBlobGas +// without requiring an L1 chain config. L1InfoDeposit is called with a nil +// L1 chain config in pure derivation, so we cannot delegate to the standard +// HeaderBlockInfo.BlobBaseFee (which calls eip4844.CalcBlobFee with the +// chain config). Instead we use CalcBlobFeeCancun which only needs the +// excess blob gas value. +func (a *l1BlockInfoAdapter) BlobBaseFee(_ *params.ChainConfig) *big.Int { + ebg := a.inner.ExcessBlobGas() + if ebg == nil { + return nil + } + return eth.CalcBlobFeeCancun(*ebg) } // DerivedBlock is a single derived L2 block -- payload attributes ready for execution. @@ -74,53 +126,9 @@ func (c *l2Cursor) advance(timestamp uint64, l1Origin eth.BlockID, seqNum uint64 c.SequenceNumber = seqNum } -// l1InputInfo adapts L1Input to the eth.BlockInfo interface -// needed by derive.L1InfoDeposit. -type l1InputInfo struct { - *L1Input -} - -var _ eth.BlockInfo = (*l1InputInfo)(nil) - -func (i *l1InputInfo) Hash() common.Hash { return i.L1Input.Hash } -func (i *l1InputInfo) ParentHash() common.Hash { return i.L1Input.ParentHash } -func (i *l1InputInfo) Coinbase() common.Address { return common.Address{} } -func (i *l1InputInfo) Root() common.Hash { return common.Hash{} } -func (i *l1InputInfo) NumberU64() uint64 { return i.L1Input.Number } -func (i *l1InputInfo) Time() uint64 { return i.L1Input.Timestamp } -func (i *l1InputInfo) MixDigest() common.Hash { return i.L1Input.MixDigest } -func (i *l1InputInfo) BaseFee() *big.Int { return i.L1Input.BaseFee } -func (i *l1InputInfo) ReceiptHash() common.Hash { return common.Hash{} } -func (i *l1InputInfo) GasUsed() uint64 { return 0 } -func (i *l1InputInfo) GasLimit() uint64 { return 0 } -func (i *l1InputInfo) ParentBeaconRoot() *common.Hash { return nil } -func (i *l1InputInfo) WithdrawalsRoot() *common.Hash { return nil } - -func (i *l1InputInfo) BlobBaseFee(_ *params.ChainConfig) *big.Int { - return i.L1Input.BlobBaseFee -} - -func (i *l1InputInfo) ExcessBlobGas() *uint64 { - if i.L1Input.BlobBaseFee != nil { - zero := uint64(0) - return &zero - } - return nil -} - -func (i *l1InputInfo) BlobGasUsed() *uint64 { return nil } - -func (i *l1InputInfo) HeaderRLP() ([]byte, error) { - h := i.Header() - return rlp.EncodeToBytes(h) -} - -func (i *l1InputInfo) Header() *types.Header { - return &types.Header{ - ParentHash: i.L1Input.ParentHash, - Number: new(big.Int).SetUint64(i.L1Input.Number), - Time: i.L1Input.Timestamp, - BaseFee: i.L1Input.BaseFee, - MixDigest: i.L1Input.MixDigest, - } +// needsEmptyBatch returns true when the sequencing window has expired, +// meaning the cursor's L1 origin is more than SeqWindowSize blocks behind +// the current L1 block. +func (c l2Cursor) needsEmptyBatch(currentL1 eth.L1BlockRef, cfg *rollup.Config) bool { + return currentL1.Number > c.L1Origin.Number+cfg.SeqWindowSize } diff --git a/op-core/pure/types_test.go b/op-core/pure/types_test.go index 8630a75e1ec1a..5647c28c9ca20 100644 --- a/op-core/pure/types_test.go +++ b/op-core/pure/types_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -12,28 +13,29 @@ import ( func TestL1InputBlockRef(t *testing.T) { input := L1Input{ - Hash: common.HexToHash("0xaa"), - Number: 100, - Timestamp: 1000, - ParentHash: common.HexToHash("0x99"), - BaseFee: big.NewInt(1), - BlobBaseFee: big.NewInt(1), + Header: &types.Header{ + ParentHash: common.HexToHash("0x99"), + Number: big.NewInt(100), + Time: 1000, + BaseFee: big.NewInt(1), + }, } ref := input.BlockRef() - require.Equal(t, input.Hash, ref.Hash) - require.Equal(t, input.Number, ref.Number) - require.Equal(t, input.Timestamp, ref.Time) - require.Equal(t, input.ParentHash, ref.ParentHash) + require.Equal(t, input.Header.Hash(), ref.Hash) + require.Equal(t, uint64(100), ref.Number) + require.Equal(t, uint64(1000), ref.Time) + require.Equal(t, input.Header.ParentHash, ref.ParentHash) } func TestL1InputBlockID(t *testing.T) { input := L1Input{ - Hash: common.HexToHash("0xbb"), - Number: 42, + Header: &types.Header{ + Number: big.NewInt(42), + }, } id := input.BlockID() - require.Equal(t, input.Hash, id.Hash) - require.Equal(t, input.Number, id.Number) + require.Equal(t, input.Header.Hash(), id.Hash) + require.Equal(t, uint64(42), id.Number) } func TestCursorAdvance(t *testing.T) { @@ -53,55 +55,44 @@ func TestCursorAdvance(t *testing.T) { require.Equal(t, uint64(3), c.SequenceNumber) } -func TestL1InputInfoBlockInfo(t *testing.T) { - input := &L1Input{ - Hash: common.HexToHash("0xaa"), - Number: 100, - Timestamp: 1000, - ParentHash: common.HexToHash("0x99"), - MixDigest: common.HexToHash("0xdd"), - BaseFee: big.NewInt(7), - BlobBaseFee: big.NewInt(3), +func TestL1InputBlockInfo(t *testing.T) { + header := &types.Header{ + ParentHash: common.HexToHash("0x99"), + Number: big.NewInt(100), + Time: 1000, + MixDigest: common.HexToHash("0xdd"), + BaseFee: big.NewInt(7), + ExcessBlobGas: ptrTo(uint64(0)), } - info := &l1InputInfo{input} + input := &L1Input{Header: header} + info := input.blockInfo() - require.Equal(t, input.Hash, info.Hash()) - require.Equal(t, input.ParentHash, info.ParentHash()) - require.Equal(t, input.Number, info.NumberU64()) - require.Equal(t, input.Timestamp, info.Time()) - require.Equal(t, input.MixDigest, info.MixDigest()) - require.Equal(t, input.BaseFee, info.BaseFee()) - require.Equal(t, input.BlobBaseFee, info.BlobBaseFee(nil)) + require.Equal(t, header.Hash(), info.Hash()) + require.Equal(t, header.ParentHash, info.ParentHash()) + require.Equal(t, uint64(100), info.NumberU64()) + require.Equal(t, uint64(1000), info.Time()) + require.Equal(t, header.MixDigest, info.MixDigest()) + require.Equal(t, header.BaseFee, info.BaseFee()) - // Zero-value methods - require.Equal(t, common.Address{}, info.Coinbase()) - require.Equal(t, common.Hash{}, info.Root()) - require.Equal(t, common.Hash{}, info.ReceiptHash()) - require.Equal(t, uint64(0), info.GasUsed()) - require.Equal(t, uint64(0), info.GasLimit()) - require.Nil(t, info.ParentBeaconRoot()) - require.Nil(t, info.WithdrawalsRoot()) - - // ExcessBlobGas is non-nil when BlobBaseFee is set - require.NotNil(t, info.ExcessBlobGas()) - - // Header returns a valid header - h := info.Header() - require.Equal(t, input.ParentHash, h.ParentHash) - require.Equal(t, input.Number, h.Number.Uint64()) - - // HeaderRLP doesn't error + // Header and HeaderRLP delegate to the underlying header + require.Equal(t, header, info.Header()) _, err := info.HeaderRLP() require.NoError(t, err) } -func TestL1InputInfoNilBlobBaseFee(t *testing.T) { - input := &L1Input{ - Hash: common.HexToHash("0xaa"), - Number: 100, - BaseFee: big.NewInt(7), +func TestCursorNeedsEmptyBatch(t *testing.T) { + cfg := testRollupConfig() // SeqWindowSize = 10 + + cursor := l2Cursor{ + Number: 10, + Timestamp: 100, + L1Origin: eth.BlockID{Number: 5}, } - info := &l1InputInfo{input} - require.Nil(t, info.BlobBaseFee(nil)) - require.Nil(t, info.ExcessBlobGas()) + + // currentL1.Number (15) == cursor.L1Origin.Number (5) + SeqWindowSize (10) + // Not strictly greater, so window not expired + require.False(t, cursor.needsEmptyBatch(eth.L1BlockRef{Number: 15}, cfg)) + + // currentL1.Number (16) > cursor.L1Origin.Number (5) + SeqWindowSize (10) + require.True(t, cursor.needsEmptyBatch(eth.L1BlockRef{Number: 16}, cfg)) } From 223092d567d33405fa7b715252a7d834346a1ef4 Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Wed, 25 Feb 2026 11:01:46 -0500 Subject: [PATCH 07/15] pure: complete batch validation, remove pre-Karst NUTs, cleanup - Add full batch validation matching upstream checkSingularBatch: sequence window, epoch advancement (current/next only), timestamp >= L1 origin, max sequencer drift, fork activation blocks, and transaction validation (no empty txs, no deposits) - Remove Jovian NUTs (pre-Karst, can never be activation block) - Remove unused makeEmptyBatch - Remove isFjord variable (always true under Karst) - Simplify channel timeout integration test - Fix test L1 block times for realistic epoch advancement Co-Authored-By: Claude Opus 4.6 --- op-core/pure/attributes.go | 26 +++------- op-core/pure/batches.go | 93 ++++++++++++++++++++++++++---------- op-core/pure/batches_test.go | 75 ++++++++++++++++++++++++----- op-core/pure/derive.go | 6 +-- op-core/pure/derive_test.go | 44 ++++++++--------- op-core/pure/helpers_test.go | 4 +- 6 files changed, 162 insertions(+), 86 deletions(-) diff --git a/op-core/pure/attributes.go b/op-core/pure/attributes.go index ea852b5e20654..896ac5a06b8cb 100644 --- a/op-core/pure/attributes.go +++ b/op-core/pure/attributes.go @@ -20,8 +20,11 @@ import ( // Transaction ordering follows the OP Stack derivation spec: // 1. L1 info deposit transaction (always first) // 2. User deposit transactions (only at epoch boundaries) -// 3. Network upgrade transactions (at fork activation blocks) -// 4. Batch transactions from the sequencer +// 3. Batch transactions from the sequencer +// +// Network upgrade transactions (NUTs) are not included because all pre-Karst +// forks are already active (PureDerive requires Karst), and Karst itself has +// no NUTs. Future forks with NUTs must be added here. func buildAttributes( batch *derive.SingularBatch, l1Block *L1Input, @@ -50,23 +53,7 @@ func buildAttributes( return nil, fmt.Errorf("failed to encode L1 info deposit tx: %w", err) } - // Network upgrade transactions (NUTs). Only forks from Jovian onward are - // included; earlier forks (Ecotone, Fjord, Isthmus) cannot be activation - // blocks since PureDerive requires Karst to already be active. - var upgradeTxs []hexutil.Bytes - - if cfg.IsJovianActivationBlock(l2Timestamp) { - jovianTxs, err := derive.JovianNetworkUpgradeTransactions() - if err != nil { - return nil, fmt.Errorf("failed to build Jovian network upgrade txs: %w", err) - } - upgradeTxs = append(upgradeTxs, jovianTxs...) - } - - // TODO: Add Karst NUTs here once KarstNetworkUpgradeTransactions() exists. - // Karst currently has no network upgrade transactions. - - txCount := 1 + len(upgradeTxs) + len(batch.Transactions) + txCount := 1 + len(batch.Transactions) if epochChanged { txCount += len(l1Block.Deposits) } @@ -83,7 +70,6 @@ func buildAttributes( } } - txs = append(txs, upgradeTxs...) txs = append(txs, batch.Transactions...) gasLimit := sysConfig.GasLimit diff --git a/op-core/pure/batches.go b/op-core/pure/batches.go index 326a4f2642bff..312e8262de21c 100644 --- a/op-core/pure/batches.go +++ b/op-core/pure/batches.go @@ -4,6 +4,8 @@ import ( "fmt" "io" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -25,9 +27,8 @@ func decodeBatches( ) ([]*derive.SingularBatch, error) { spec := rollup.NewChainSpec(cfg) maxRLP := spec.MaxRLPBytesPerChannel(cursor.Timestamp) - isFjord := cfg.IsFjord(cursor.Timestamp) - readBatch, err := derive.BatchReader(r, maxRLP, isFjord) + readBatch, err := derive.BatchReader(r, maxRLP, true) // Fjord always active (implied by Karst) if err != nil { return nil, fmt.Errorf("creating batch reader: %w", err) } @@ -89,15 +90,15 @@ func decodeBatches( return batches, nil } -// validateBatch performs simplified batch validation suitable for Karst and later. -// It checks timestamp sequencing, epoch bounds, and epoch hash consistency. +// validateBatch performs batch validation matching the checks in +// op-node/rollup/derive/batches.go (checkSingularBatch), minus checks that +// require L2 state access: +// - Parent hash validation (deferred to post-execution via DerivedBlock.ExpectedParentHash) // -// This is a subset of the full validation in op-node/rollup/derive/batches.go -// (checkSingularBatch / CheckBatch). The upstream functions are unexported and -// require an l2Fetcher for L2 state lookups that we intentionally avoid. -// With Karst active, overlapping span batches are already rejected in decodeBatches, -// so the remaining checks here are sufficient for correctness. -func validateBatch(batch *derive.SingularBatch, cursor l2Cursor, l1Origins []eth.L1BlockRef, cfg *rollup.Config) bool { +// All other checks from checkSingularBatch are replicated here. With Karst active, +// overlapping span batches are already rejected in decodeBatches. +func validateBatch(batch *derive.SingularBatch, cursor l2Cursor, l1Origins []eth.L1BlockRef, cfg *rollup.Config, l1InclusionNum uint64) bool { + // Timestamp must be the next expected L2 timestamp. expectedTimestamp := cursor.Timestamp + cfg.BlockTime if batch.Timestamp != expectedTimestamp { return false @@ -105,33 +106,75 @@ func validateBatch(batch *derive.SingularBatch, cursor l2Cursor, l1Origins []eth epochNum := uint64(batch.EpochNum) + // Sequence window: batch must be included within SeqWindowSize of its epoch. + if epochNum+cfg.SeqWindowSize < l1InclusionNum { + return false + } + + // Epoch must be current or next (cannot skip epochs). if epochNum < cursor.L1Origin.Number { return false } + if epochNum > cursor.L1Origin.Number+1 { + return false + } - if len(l1Origins) == 0 { + // Find the batch's L1 origin and verify epoch hash. + var batchOrigin *eth.L1BlockRef + for i := range l1Origins { + if l1Origins[i].Number == epochNum { + batchOrigin = &l1Origins[i] + break + } + } + if batchOrigin == nil { return false } - latestOrigin := l1Origins[len(l1Origins)-1] - if epochNum > latestOrigin.Number { + if batch.EpochHash != batchOrigin.Hash { return false } - for _, origin := range l1Origins { - if origin.Number == epochNum { - return batch.EpochHash == origin.Hash + // Batch timestamp must be >= L1 origin timestamp. + if batch.Timestamp < batchOrigin.Time { + return false + } + + // Sequencer time drift: L2 time must not exceed L1 time + MaxSequencerDrift. + spec := rollup.NewChainSpec(cfg) + maxDrift := batchOrigin.Time + spec.MaxSequencerDrift(batchOrigin.Time) + if batch.Timestamp > maxDrift { + if len(batch.Transactions) == 0 { + // Empty batches may exceed drift to maintain L2 time >= L1 time invariant, + // but only if they don't advance the epoch and the next origin isn't available. + if epochNum == cursor.L1Origin.Number { + for i := range l1Origins { + if l1Origins[i].Number == epochNum+1 { + if batch.Timestamp >= l1Origins[i].Time { + return false // should have adopted next origin + } + break + } + } + } + } else { + return false } } - return false -} + // Fork activation blocks must not contain user transactions. + if cfg.IsKarstActivationBlock(batch.Timestamp) && len(batch.Transactions) > 0 { + return false + } -// makeEmptyBatch creates a batch with no transactions at the next expected -// timestamp, advancing from the current cursor position. -func makeEmptyBatch(cursor l2Cursor, cfg *rollup.Config) *derive.SingularBatch { - return &derive.SingularBatch{ - EpochNum: rollup.Epoch(cursor.L1Origin.Number), - EpochHash: cursor.L1Origin.Hash, - Timestamp: cursor.Timestamp + cfg.BlockTime, + // Transaction validation. + for _, txBytes := range batch.Transactions { + if len(txBytes) == 0 { + return false + } + if txBytes[0] == types.DepositTxType { + return false + } } + + return true } diff --git a/op-core/pure/batches_test.go b/op-core/pure/batches_test.go index 0229e811744df..92b1c31645939 100644 --- a/op-core/pure/batches_test.go +++ b/op-core/pure/batches_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/op-node/rollup" @@ -58,7 +59,7 @@ func TestValidateBatch_ValidSingular(t *testing.T) { l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - require.True(t, validateBatch(batch, cursor, l1Origins, cfg)) + require.True(t, validateBatch(batch, cursor, l1Origins, cfg, l1Origin.Number)) } func TestValidateBatch_WrongTimestamp(t *testing.T) { @@ -79,7 +80,7 @@ func TestValidateBatch_WrongTimestamp(t *testing.T) { l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - require.False(t, validateBatch(batch, cursor, l1Origins, cfg)) + require.False(t, validateBatch(batch, cursor, l1Origins, cfg, l1Origin.Number)) } func TestValidateBatch_SpanBatchNoOverlap(t *testing.T) { @@ -101,7 +102,7 @@ func TestValidateBatch_SpanBatchNoOverlap(t *testing.T) { l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - require.False(t, validateBatch(batch, cursor, l1Origins, cfg)) + require.False(t, validateBatch(batch, cursor, l1Origins, cfg, l1Origin.Number)) } func TestValidateBatch_EpochTooOld(t *testing.T) { @@ -123,7 +124,7 @@ func TestValidateBatch_EpochTooOld(t *testing.T) { l1Origins := []eth.L1BlockRef{oldOrigin, testL1Ref(4), l1Origin, testL1Ref(6)} - require.False(t, validateBatch(batch, cursor, l1Origins, cfg)) + require.False(t, validateBatch(batch, cursor, l1Origins, cfg, l1Origin.Number)) } func TestValidateBatch_EpochTooNew(t *testing.T) { @@ -144,23 +145,71 @@ func TestValidateBatch_EpochTooNew(t *testing.T) { l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - require.False(t, validateBatch(batch, cursor, l1Origins, cfg)) + require.False(t, validateBatch(batch, cursor, l1Origins, cfg, l1Origin.Number)) } -func TestMakeEmptyBatch(t *testing.T) { +func TestValidateBatch_SequenceWindowExpired(t *testing.T) { cfg := testRollupConfig() - origin := testL1Ref(5) + l1Origin := testL1Ref(5) cursor := l2Cursor{ Number: 10, Timestamp: 100, - L1Origin: origin.ID(), + L1Origin: l1Origin.ID(), + } + + batch := &derive.SingularBatch{ + EpochNum: rollup.Epoch(l1Origin.Number), + EpochHash: l1Origin.Hash, + Timestamp: cursor.Timestamp + cfg.BlockTime, + } + + l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} + + // Inclusion at block 16: epochNum(5) + SeqWindowSize(10) = 15 < 16 → expired + require.False(t, validateBatch(batch, cursor, l1Origins, cfg, 16)) +} + +func TestValidateBatch_EpochSkip(t *testing.T) { + cfg := testRollupConfig() + l1Origin := testL1Ref(5) + + cursor := l2Cursor{ + Number: 10, + Timestamp: 100, + L1Origin: l1Origin.ID(), } - batch := makeEmptyBatch(cursor, cfg) + // Epoch 7 skips over epoch 6 (cursor is at 5, can only go to 6) + batch := &derive.SingularBatch{ + EpochNum: rollup.Epoch(7), + EpochHash: testL1Ref(7).Hash, + Timestamp: cursor.Timestamp + cfg.BlockTime, + } + + l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6), testL1Ref(7)} + + require.False(t, validateBatch(batch, cursor, l1Origins, cfg, l1Origin.Number)) +} + +func TestValidateBatch_DepositTxRejected(t *testing.T) { + cfg := testRollupConfig() + l1Origin := testL1Ref(5) + + cursor := l2Cursor{ + Number: 10, + Timestamp: 100, + L1Origin: l1Origin.ID(), + } + + batch := &derive.SingularBatch{ + EpochNum: rollup.Epoch(l1Origin.Number), + EpochHash: l1Origin.Hash, + Timestamp: cursor.Timestamp + cfg.BlockTime, + Transactions: []hexutil.Bytes{{0x7e, 0x01, 0x02}}, // deposit tx type + } + + l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - require.Equal(t, rollup.Epoch(origin.Number), batch.EpochNum) - require.Equal(t, origin.Hash, batch.EpochHash) - require.Equal(t, cursor.Timestamp+cfg.BlockTime, batch.Timestamp) - require.Empty(t, batch.Transactions) + require.False(t, validateBatch(batch, cursor, l1Origins, cfg, l1Origin.Number)) } diff --git a/op-core/pure/derive.go b/op-core/pure/derive.go index 55b95c9e60570..ce5e2db700e76 100644 --- a/op-core/pure/derive.go +++ b/op-core/pure/derive.go @@ -24,11 +24,11 @@ import ( // // Compared to the legacy pipeline (op-node/rollup/derive), this implementation // intentionally skips the following checks: -// - Parent hash validation against the actual L2 chain (deferred to post-execution) +// - Parent hash validation against the actual L2 chain (deferred to post-execution +// via DerivedBlock.ExpectedParentHash) // - L2 block hash verification (no L2 state access) // - Span batch overlap comparison (rejected by Karst; overlaps are invalid) // - Pipeline reset / reorg handling (caller is responsible for providing correct inputs) -// - Sequencer drift checks requiring L2 state lookups // // See op-node/rollup/derive/batches.go for the full upstream validation logic. func PureDerive( @@ -102,7 +102,7 @@ func PureDerive( } for _, batch := range batches { - if !validateBatch(batch, cursor, l1Origins, cfg) { + if !validateBatch(batch, cursor, l1Origins, cfg, l1Ref.Number) { continue } diff --git a/op-core/pure/derive_test.go b/op-core/pure/derive_test.go index c42d049d4dca9..f655b4f92114b 100644 --- a/op-core/pure/derive_test.go +++ b/op-core/pure/derive_test.go @@ -80,23 +80,24 @@ func TestPureDerive_MultipleChannelsAndEpochs(t *testing.T) { func TestPureDerive_ChannelTimeout(t *testing.T) { cfg := testRollupConfig() + cfg.ChannelTimeoutBedrock = 2 // short timeout for testing safeHead := testSafeHead(cfg) sysConfig := testSystemConfig() - // Create an incomplete channel at L1 block 1 (frame 0 of 2, not last). + l1Block0 := makeTestL1Input(0) // safe head's L1 origin + l1Block0Ref := l1Block0.BlockRef() + + // Create an incomplete channel at L1 block 1 (frame 0, not last). incompleteL1 := makeTestL1Input(1) - incompleteL1Ref := incompleteL1.BlockRef() incompleteChID := testChannelID(0xAA) - batch := &derive.SingularBatch{ + channelData := encodeBatchToChannelData(t, &derive.SingularBatch{ ParentHash: safeHead.Hash, - EpochNum: rollup.Epoch(incompleteL1Ref.Number), - EpochHash: incompleteL1Ref.Hash, + EpochNum: rollup.Epoch(l1Block0Ref.Number), + EpochHash: l1Block0Ref.Hash, Timestamp: safeHead.Time + cfg.BlockTime, - } - channelData := encodeBatchToChannelData(t, batch) + }) - // Split into two frames but only include the first (non-last) frame. frame0 := derive.Frame{ ID: incompleteChID, FrameNumber: 0, @@ -108,25 +109,23 @@ func TestPureDerive_ChannelTimeout(t *testing.T) { require.NoError(t, frame0.MarshalBinary(&buf)) incompleteL1.BatcherData = [][]byte{buf.Bytes()} - // Fill gap L1 blocks until timeout. Channel timeout is 50, so we need - // blocks 2..52 to cause timeout at block 52. + // L1 blocks: 0 (origin), 1 (incomplete channel), 2, 3, 4 (complete channel). + // Timeout fires at block 4 (4 > 1 + 2). No empty batches generated + // because we're well within SeqWindowSize (10). var l1Blocks []L1Input - l1Blocks = append(l1Blocks, *makeTestL1Input(0)) // safe head's L1 origin + l1Blocks = append(l1Blocks, *l1Block0) l1Blocks = append(l1Blocks, *incompleteL1) - for i := uint64(2); i <= cfg.ChannelTimeoutBedrock+2; i++ { - l1Blocks = append(l1Blocks, *makeTestL1Input(i)) - } + l1Blocks = append(l1Blocks, *makeTestL1Input(2)) + l1Blocks = append(l1Blocks, *makeTestL1Input(3)) - // After timeout, add a complete channel. - completeL1Num := cfg.ChannelTimeoutBedrock + 3 - completeL1 := makeTestL1Input(completeL1Num) + // Complete channel at L1 block 4 (after timeout). + completeL1 := makeTestL1Input(4) completeChID := testChannelID(0xBB) - // The batch must reference an L1 block we have. Use block 1's ref as epoch. completeBatch := &derive.SingularBatch{ ParentHash: safeHead.Hash, - EpochNum: rollup.Epoch(1), - EpochHash: incompleteL1Ref.Hash, + EpochNum: rollup.Epoch(l1Block0Ref.Number), + EpochHash: l1Block0Ref.Hash, Timestamp: safeHead.Time + cfg.BlockTime, } completeChannelData := encodeBatchToChannelData(t, completeBatch) @@ -137,8 +136,7 @@ func TestPureDerive_ChannelTimeout(t *testing.T) { derived, err := PureDerive(cfg, safeHead, sysConfig, l1Blocks) require.NoError(t, err) - // We should get at least one derived block from the complete channel. - // The incomplete channel should have timed out and produced nothing. + // The incomplete channel timed out. Only the complete channel produces a block. foundFromComplete := false for _, block := range derived { if uint64(block.Attributes.Timestamp) == safeHead.Time+cfg.BlockTime { @@ -146,7 +144,7 @@ func TestPureDerive_ChannelTimeout(t *testing.T) { break } } - require.True(t, foundFromComplete, "should have a derived block from the complete channel after timeout") + require.True(t, foundFromComplete, "should derive block from complete channel after timeout") } func TestPureDerive_InvalidBatchSkipped(t *testing.T) { diff --git a/op-core/pure/helpers_test.go b/op-core/pure/helpers_test.go index 254b1c8b9c021..3ab5c77e2d7b1 100644 --- a/op-core/pure/helpers_test.go +++ b/op-core/pure/helpers_test.go @@ -76,7 +76,7 @@ func makeTestL1Input(num uint64) *L1Input { Header: &types.Header{ ParentHash: common.BigToHash(new(big.Int).SetUint64(num + 0x100 - 1)), Number: new(big.Int).SetUint64(num), - Time: 1000 + num*12, + Time: num * 2, // match L2 block time for simple epoch advancement in tests BaseFee: big.NewInt(7), MixDigest: common.BigToHash(new(big.Int).SetUint64(num + 0x200)), // ExcessBlobGas required for BlobBaseFee to work via HeaderBlockInfo @@ -169,7 +169,7 @@ func TestHelpers(t *testing.T) { l1 := makeTestL1Input(5) require.Equal(t, uint64(5), bigs.Uint64Strict(l1.Header.Number)) - require.Equal(t, uint64(1000+5*12), l1.Header.Time) + require.Equal(t, uint64(5*2), l1.Header.Time) dep := makeTestDeposit() require.NotNil(t, dep) From f557d526a6eadbf0beefa501ecd6e71513bc3caf Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Wed, 25 Feb 2026 13:26:31 -0500 Subject: [PATCH 08/15] pure: address review comments - types.go: use eth.BlockRefFromHeader and eth.HeaderBlockID - batches.go: export CheckSpanBatchPrefix with Karst overlap rejection and nil l2Fetcher support, call from pure code - Remove inline span batch overlap check in favor of upstream Co-Authored-By: Claude Opus 4.6 --- op-core/pure/batches.go | 36 ++++++++++++++++++---------- op-core/pure/batches_test.go | 2 +- op-core/pure/derive.go | 2 +- op-core/pure/types.go | 10 ++------ op-node/rollup/derive/batch_stage.go | 2 +- op-node/rollup/derive/batches.go | 21 ++++++++++++---- 6 files changed, 45 insertions(+), 28 deletions(-) diff --git a/op-core/pure/batches.go b/op-core/pure/batches.go index 312e8262de21c..64a28c3813b87 100644 --- a/op-core/pure/batches.go +++ b/op-core/pure/batches.go @@ -1,10 +1,12 @@ package pure import ( + "context" "fmt" "io" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" @@ -15,15 +17,14 @@ import ( // and returns them as singular batches. Span batches are expanded into // individual singular batches using the provided L1 origins and cursor. // -// With Karst active, span batches must not overlap the safe chain. If the first -// batch in a span has timestamp <= cursor.Timestamp, the entire span is rejected. -// See checkSpanBatchPrefix in op-node/rollup/derive/batches.go for the full -// upstream overlap handling. +// Span batch prefix validation is delegated to derive.CheckSpanBatchPrefix, +// which rejects overlapping span batches under Karst. func decodeBatches( r io.Reader, cfg *rollup.Config, l1Origins []eth.L1BlockRef, cursor l2Cursor, + l1InclusionBlock eth.L1BlockRef, ) ([]*derive.SingularBatch, error) { spec := rollup.NewChainSpec(cfg) maxRLP := spec.MaxRLPBytesPerChannel(cursor.Timestamp) @@ -62,20 +63,31 @@ func decodeBatches( return nil, fmt.Errorf("deriving span batch: %w", err) } - // Reject overlapping span batches. Under Karst, span batches that start - // at or before the safe head are invalid. This mirrors the overlap rejection - // in checkSpanBatchPrefix (op-node/rollup/derive/batches.go). - if spanBatch.GetTimestamp() <= cursor.Timestamp { - return nil, fmt.Errorf("span batch timestamp %d overlaps safe head at %d (rejected under Karst)", - spanBatch.GetTimestamp(), cursor.Timestamp) - } - l2SafeHead := eth.L2BlockRef{ Number: cursor.Number, Time: cursor.Timestamp, L1Origin: cursor.L1Origin, SequenceNumber: cursor.SequenceNumber, } + + // Build l1Blocks slice starting from the cursor's epoch, as + // CheckSpanBatchPrefix expects l1Blocks[0] to be the current epoch. + var l1Blocks []eth.L1BlockRef + for _, ref := range l1Origins { + if ref.Number >= cursor.L1Origin.Number { + l1Blocks = append(l1Blocks, ref) + } + } + + validity, _ := derive.CheckSpanBatchPrefix( + context.Background(), cfg, + log.NewLogger(log.DiscardHandler()), + l1Blocks, l2SafeHead, spanBatch, l1InclusionBlock, nil, + ) + if validity != derive.BatchAccept { + return nil, fmt.Errorf("span batch prefix check failed (validity=%d)", validity) + } + singular, err := spanBatch.GetSingularBatches(l1Origins, l2SafeHead) if err != nil { return nil, fmt.Errorf("expanding span batch: %w", err) diff --git a/op-core/pure/batches_test.go b/op-core/pure/batches_test.go index 92b1c31645939..8cd105252ee2e 100644 --- a/op-core/pure/batches_test.go +++ b/op-core/pure/batches_test.go @@ -30,7 +30,7 @@ func TestDecodeBatches_SingularBatch(t *testing.T) { cursor := newCursor(safeHead) l1Origins := []eth.L1BlockRef{testL1Ref(0), l1Ref} - batches, err := decodeBatches(bytes.NewReader(channelData), cfg, l1Origins, cursor) + batches, err := decodeBatches(bytes.NewReader(channelData), cfg, l1Origins, cursor, l1Ref) require.NoError(t, err) require.Len(t, batches, 1) diff --git a/op-core/pure/derive.go b/op-core/pure/derive.go index ce5e2db700e76..70b344b4ba5c2 100644 --- a/op-core/pure/derive.go +++ b/op-core/pure/derive.go @@ -96,7 +96,7 @@ func PureDerive( continue } - batches, err := decodeBatches(ready.channel.Reader(), cfg, l1Origins, cursor) + batches, err := decodeBatches(ready.channel.Reader(), cfg, l1Origins, cursor, ready.openBlock) if err != nil { continue } diff --git a/op-core/pure/types.go b/op-core/pure/types.go index 3aafc6bec1255..2b4f05fc11256 100644 --- a/op-core/pure/types.go +++ b/op-core/pure/types.go @@ -8,7 +8,6 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -28,17 +27,12 @@ type L1Input struct { // BlockRef converts the L1 header to an eth.L1BlockRef. func (l *L1Input) BlockRef() eth.L1BlockRef { - return eth.L1BlockRef{ - Hash: l.Header.Hash(), - Number: bigs.Uint64Strict(l.Header.Number), - ParentHash: l.Header.ParentHash, - Time: l.Header.Time, - } + return *eth.BlockRefFromHeader(l.Header) } // BlockID returns the block's ID (hash + number). func (l *L1Input) BlockID() eth.BlockID { - return eth.BlockID{Hash: l.Header.Hash(), Number: bigs.Uint64Strict(l.Header.Number)} + return eth.HeaderBlockID(l.Header) } // blockInfo returns an eth.BlockInfo adapter for the L1Input's header, diff --git a/op-node/rollup/derive/batch_stage.go b/op-node/rollup/derive/batch_stage.go index 6014da7b9dcf4..65c68aa17e313 100644 --- a/op-node/rollup/derive/batch_stage.go +++ b/op-node/rollup/derive/batch_stage.go @@ -136,7 +136,7 @@ func (bs *BatchStage) nextSingularBatchCandidate(ctx context.Context, parent eth return nil, NewCriticalError(errors.New("failed type assertion to SpanBatch")) } - validity, _ := checkSpanBatchPrefix(ctx, bs.config, bs.Log(), bs.l1Blocks, parent, spanBatch, bs.origin, bs.l2) + validity, _ := CheckSpanBatchPrefix(ctx, bs.config, bs.Log(), bs.l1Blocks, parent, spanBatch, bs.origin, bs.l2) switch validity { case BatchAccept: // continue spanBatch.LogContext(bs.Log()).Info("Found next valid span batch") diff --git a/op-node/rollup/derive/batches.go b/op-node/rollup/derive/batches.go index 4db6d69f1a9b1..95c11f28e37b5 100644 --- a/op-node/rollup/derive/batches.go +++ b/op-node/rollup/derive/batches.go @@ -191,10 +191,14 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo return BatchAccept } -// checkSpanBatchPrefix performs the span batch prefix rules for Holocene. +// CheckSpanBatchPrefix performs the span batch prefix rules for Holocene. // Next to the validity, it also returns the parent L2 block as determined during the checks for // further consumption. -func checkSpanBatchPrefix(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef, +// +// Under Karst, overlapping span batches (timestamp < next expected) are dropped. +// When l2Fetcher is nil, the parent hash check is skipped (for pure derivation +// where L2 block hashes are not available). +func CheckSpanBatchPrefix(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef, batch *SpanBatch, l1InclusionBlock eth.L1BlockRef, l2Fetcher SafeBlockFetcher, ) (BatchValidity, eth.L2BlockRef) { // add details to the log @@ -240,9 +244,14 @@ func checkSpanBatchPrefix(ctx context.Context, cfg *rollup.Config, log log.Logge } // finding parent block of the span batch. - // if the span batch does not overlap the current safe chain, parentBLock should be l2SafeHead. + // if the span batch does not overlap the current safe chain, parentBlock should be l2SafeHead. parentBlock := l2SafeHead if batch.GetTimestamp() < nextTimestamp { + // Under Karst, overlapping span batches are invalid. + if cfg.IsKarst(l2SafeHead.Time) { + log.Warn("dropping overlapping span batch under Karst") + return BatchDrop, eth.L2BlockRef{} + } if batch.GetTimestamp() > l2SafeHead.Time { // batch timestamp cannot be between safe head and next timestamp log.Warn("batch has misaligned timestamp, block time is too short") @@ -261,7 +270,9 @@ func checkSpanBatchPrefix(ctx context.Context, cfg *rollup.Config, log log.Logge return BatchUndecided, eth.L2BlockRef{} } } - if !batch.CheckParentHash(parentBlock.Hash) { + // Skip parent hash check when l2Fetcher is nil (pure derivation mode + // where L2 block hashes are not available). + if l2Fetcher != nil && !batch.CheckParentHash(parentBlock.Hash) { log.Warn("ignoring batch with mismatching parent hash", "parent_block", parentBlock.Hash) return BatchDrop, parentBlock } @@ -308,7 +319,7 @@ func checkSpanBatchPrefix(ctx context.Context, cfg *rollup.Config, log log.Logge func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef, batch *SpanBatch, l1InclusionBlock eth.L1BlockRef, l2Fetcher SafeBlockFetcher, ) BatchValidity { - prefixValidity, parentBlock := checkSpanBatchPrefix(ctx, cfg, log, l1Blocks, l2SafeHead, batch, l1InclusionBlock, l2Fetcher) + prefixValidity, parentBlock := CheckSpanBatchPrefix(ctx, cfg, log, l1Blocks, l2SafeHead, batch, l1InclusionBlock, l2Fetcher) if prefixValidity != BatchAccept { return prefixValidity } From 35b9eb09c49414b13048d482ce4932cfb69f9db6 Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Wed, 25 Feb 2026 13:44:31 -0500 Subject: [PATCH 09/15] pure: pass L1 chain config through to L1InfoDeposit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove l1BlockInfoAdapter — pass the L1 chain config as an explicit dependency to PureDerive and thread it through to L1InfoDeposit, which needs it for BlobBaseFee computation. Co-Authored-By: Claude Opus 4.6 --- op-core/pure/attributes.go | 4 ++- op-core/pure/attributes_test.go | 14 ++++---- op-core/pure/derive.go | 7 ++-- op-core/pure/derive_test.go | 16 ++++----- op-core/pure/helpers_test.go | 5 +++ op-core/pure/types.go | 57 --------------------------------- op-core/pure/types_test.go | 6 ++-- 7 files changed, 30 insertions(+), 79 deletions(-) diff --git a/op-core/pure/attributes.go b/op-core/pure/attributes.go index 896ac5a06b8cb..065b8a0f9c9e7 100644 --- a/op-core/pure/attributes.go +++ b/op-core/pure/attributes.go @@ -6,6 +6,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-node/rollup" @@ -31,6 +32,7 @@ func buildAttributes( cursor l2Cursor, sysConfig eth.SystemConfig, cfg *rollup.Config, + l1ChainConfig *params.ChainConfig, ) (*DerivedBlock, error) { epochChanged := uint64(batch.EpochNum) != cursor.L1Origin.Number @@ -43,7 +45,7 @@ func buildAttributes( l2Timestamp := batch.Timestamp - l1InfoTx, err := derive.L1InfoDeposit(cfg, nil, sysConfig, seqNumber, l1Block.blockInfo(), l2Timestamp) + l1InfoTx, err := derive.L1InfoDeposit(cfg, l1ChainConfig, sysConfig, seqNumber, eth.HeaderBlockInfo(l1Block.Header), l2Timestamp) if err != nil { return nil, fmt.Errorf("failed to create L1 info deposit tx: %w", err) } diff --git a/op-core/pure/attributes_test.go b/op-core/pure/attributes_test.go index f284f91ae6326..8a296d1d4d2c8 100644 --- a/op-core/pure/attributes_test.go +++ b/op-core/pure/attributes_test.go @@ -41,7 +41,7 @@ func TestBuildAttributes_EpochStart(t *testing.T) { SequenceNumber: 3, } - result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg) + result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) require.NoError(t, err) require.NotNil(t, result) require.NotNil(t, result.Attributes) @@ -94,7 +94,7 @@ func TestBuildAttributes_SameEpoch(t *testing.T) { SequenceNumber: 2, } - result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg) + result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) require.NoError(t, err) require.NotNil(t, result) @@ -137,7 +137,7 @@ func TestBuildAttributes_EmptyBatch(t *testing.T) { SequenceNumber: 0, } - result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg) + result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) require.NoError(t, err) // L1 info deposit + 1 user deposit = 2 (no batch txs) @@ -165,7 +165,7 @@ func TestBuildAttributes_EmptyBatch(t *testing.T) { SequenceNumber: 1, } - result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg) + result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) require.NoError(t, err) // Only L1 info deposit, no user deposits, no batch txs @@ -196,7 +196,7 @@ func TestBuildAttributes_HoloceneFields(t *testing.T) { SequenceNumber: 0, } - result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg) + result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) require.NoError(t, err) require.NotNil(t, result.Attributes.EIP1559Params) require.Equal(t, sysConfig.EIP1559Params, *result.Attributes.EIP1559Params) @@ -226,7 +226,7 @@ func TestBuildAttributes_SequenceNumber(t *testing.T) { SequenceNumber: 5, } - result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg) + result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) require.NoError(t, err) require.NotNil(t, result) }) @@ -246,7 +246,7 @@ func TestBuildAttributes_SequenceNumber(t *testing.T) { SequenceNumber: 5, } - result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg) + result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) require.NoError(t, err) require.NotNil(t, result) }) diff --git a/op-core/pure/derive.go b/op-core/pure/derive.go index 70b344b4ba5c2..91f971a97076b 100644 --- a/op-core/pure/derive.go +++ b/op-core/pure/derive.go @@ -3,6 +3,8 @@ package pure import ( "fmt" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/bigs" @@ -33,6 +35,7 @@ import ( // See op-node/rollup/derive/batches.go for the full upstream validation logic. func PureDerive( cfg *rollup.Config, + l1ChainConfig *params.ChainConfig, safeHead eth.L2BlockRef, sysConfig eth.SystemConfig, l1Blocks []L1Input, @@ -111,7 +114,7 @@ func PureDerive( return nil, fmt.Errorf("missing L1 block %d for batch epoch", batch.EpochNum) } - block, err := buildAttributes(batch, epochL1, cursor, sysConfig, cfg) + block, err := buildAttributes(batch, epochL1, cursor, sysConfig, cfg, l1ChainConfig) if err != nil { return nil, fmt.Errorf("building attributes at L1 block %d: %w", l1Ref.Number, err) } @@ -151,7 +154,7 @@ func PureDerive( if epochL1 == nil { return nil, fmt.Errorf("missing L1 block %d for empty batch epoch", newOrigin.Number) } - block, err := buildAttributes(emptyBatch, epochL1, cursor, sysConfig, cfg) + block, err := buildAttributes(emptyBatch, epochL1, cursor, sysConfig, cfg, l1ChainConfig) if err != nil { return nil, fmt.Errorf("building empty batch attributes at L1 block %d: %w", l1Ref.Number, err) } diff --git a/op-core/pure/derive_test.go b/op-core/pure/derive_test.go index f655b4f92114b..3e19235894496 100644 --- a/op-core/pure/derive_test.go +++ b/op-core/pure/derive_test.go @@ -23,7 +23,7 @@ func TestPureDerive_SingleBatch(t *testing.T) { l1Origin := makeTestL1Input(0) // safe head's L1 origin l1 := makeL1WithBatch(t, cfg, 1, safeHead, sysConfig) - derived, err := PureDerive(cfg, safeHead, sysConfig, []L1Input{*l1Origin, *l1}) + derived, err := PureDerive(cfg, testL1ChainConfig(), safeHead, sysConfig, []L1Input{*l1Origin, *l1}) require.NoError(t, err) require.Len(t, derived, 1) @@ -46,7 +46,7 @@ func TestPureDerive_EmptyEpoch(t *testing.T) { l1Blocks[i] = *makeTestL1Input(i) } - derived, err := PureDerive(cfg, safeHead, sysConfig, l1Blocks) + derived, err := PureDerive(cfg, testL1ChainConfig(), safeHead, sysConfig, l1Blocks) require.NoError(t, err) require.Greater(t, len(derived), 0, "empty batches should be generated when sequencer window expires") @@ -65,7 +65,7 @@ func TestPureDerive_MultipleChannelsAndEpochs(t *testing.T) { l1Blocks := makeMultiEpochL1Inputs(t, cfg, safeHead, sysConfig) - derived, err := PureDerive(cfg, safeHead, sysConfig, l1Blocks) + derived, err := PureDerive(cfg, testL1ChainConfig(), safeHead, sysConfig, l1Blocks) require.NoError(t, err) require.Greater(t, len(derived), 1, "should derive multiple blocks from multiple epochs") @@ -133,7 +133,7 @@ func TestPureDerive_ChannelTimeout(t *testing.T) { completeL1.BatcherData = [][]byte{completeTx} l1Blocks = append(l1Blocks, *completeL1) - derived, err := PureDerive(cfg, safeHead, sysConfig, l1Blocks) + derived, err := PureDerive(cfg, testL1ChainConfig(), safeHead, sysConfig, l1Blocks) require.NoError(t, err) // The incomplete channel timed out. Only the complete channel produces a block. @@ -170,7 +170,7 @@ func TestPureDerive_InvalidBatchSkipped(t *testing.T) { l1.BatcherData = [][]byte{batcherTx} l1Origin := makeTestL1Input(0) // safe head's L1 origin - derived, err := PureDerive(cfg, safeHead, sysConfig, []L1Input{*l1Origin, *l1}) + derived, err := PureDerive(cfg, testL1ChainConfig(), safeHead, sysConfig, []L1Input{*l1Origin, *l1}) require.NoError(t, err) require.Empty(t, derived, "invalid batch should be skipped without error") } @@ -181,7 +181,7 @@ func TestPureDerive_RejectsPreKarst(t *testing.T) { safeHead := testSafeHead(cfg) sysConfig := testSystemConfig() - _, err := PureDerive(cfg, safeHead, sysConfig, nil) + _, err := PureDerive(cfg, testL1ChainConfig(), safeHead, sysConfig, nil) require.Error(t, err) require.Contains(t, err.Error(), "Karst fork") } @@ -194,7 +194,7 @@ func TestPureDerive_ValidatesL1BlockRange(t *testing.T) { // Start L1 blocks after the safe head's L1 origin (gap) l1Blocks := []L1Input{*makeTestL1Input(5)} - _, err := PureDerive(cfg, safeHead, sysConfig, l1Blocks) + _, err := PureDerive(cfg, testL1ChainConfig(), safeHead, sysConfig, l1Blocks) require.Error(t, err) require.Contains(t, err.Error(), "l1Blocks start at") } @@ -204,7 +204,7 @@ func TestPureDerive_EmptyL1Blocks(t *testing.T) { safeHead := testSafeHead(cfg) sysConfig := testSystemConfig() - derived, err := PureDerive(cfg, safeHead, sysConfig, nil) + derived, err := PureDerive(cfg, testL1ChainConfig(), safeHead, sysConfig, nil) require.NoError(t, err) require.Nil(t, derived) } diff --git a/op-core/pure/helpers_test.go b/op-core/pure/helpers_test.go index 3ab5c77e2d7b1..30cefcbb4c476 100644 --- a/op-core/pure/helpers_test.go +++ b/op-core/pure/helpers_test.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + gethparams "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/require" @@ -61,6 +62,10 @@ func testSystemConfig() eth.SystemConfig { } } +func testL1ChainConfig() *gethparams.ChainConfig { + return gethparams.AllDevChainProtocolChanges +} + func testSafeHead(cfg *rollup.Config) eth.L2BlockRef { return eth.L2BlockRef{ Hash: cfg.Genesis.L2.Hash, diff --git a/op-core/pure/types.go b/op-core/pure/types.go index 2b4f05fc11256..34497e0c7f6d0 100644 --- a/op-core/pure/types.go +++ b/op-core/pure/types.go @@ -1,11 +1,8 @@ package pure import ( - "math/big" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/params" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -35,60 +32,6 @@ func (l *L1Input) BlockID() eth.BlockID { return eth.HeaderBlockID(l.Header) } -// blockInfo returns an eth.BlockInfo adapter for the L1Input's header, -// suitable for derive.L1InfoDeposit and similar consumers. -// -// L1InfoDeposit is called with a nil L1 chain config, so the standard -// HeaderBlockInfo cannot be used directly (its BlobBaseFee method calls -// eip4844.CalcBlobFee which requires a non-nil chain config). This wrapper -// delegates everything to HeaderBlockInfo except BlobBaseFee, which returns -// nil to let L1InfoDeposit apply its own fallback logic. -func (l *L1Input) blockInfo() eth.BlockInfo { - return &l1BlockInfoAdapter{inner: eth.HeaderBlockInfo(l.Header)} -} - -// l1BlockInfoAdapter wraps eth.BlockInfo to handle the nil-chainConfig case -// in BlobBaseFee. L1InfoDeposit computes blob base fee from ExcessBlobGas -// when BlobBaseFee returns nil, so we return nil here and let it handle the -// computation with its own chain config awareness. -type l1BlockInfoAdapter struct { - inner eth.BlockInfo -} - -var _ eth.BlockInfo = (*l1BlockInfoAdapter)(nil) - -func (a *l1BlockInfoAdapter) Hash() common.Hash { return a.inner.Hash() } -func (a *l1BlockInfoAdapter) ParentHash() common.Hash { return a.inner.ParentHash() } -func (a *l1BlockInfoAdapter) Coinbase() common.Address { return a.inner.Coinbase() } -func (a *l1BlockInfoAdapter) Root() common.Hash { return a.inner.Root() } -func (a *l1BlockInfoAdapter) NumberU64() uint64 { return a.inner.NumberU64() } -func (a *l1BlockInfoAdapter) Time() uint64 { return a.inner.Time() } -func (a *l1BlockInfoAdapter) MixDigest() common.Hash { return a.inner.MixDigest() } -func (a *l1BlockInfoAdapter) BaseFee() *big.Int { return a.inner.BaseFee() } -func (a *l1BlockInfoAdapter) ReceiptHash() common.Hash { return a.inner.ReceiptHash() } -func (a *l1BlockInfoAdapter) GasUsed() uint64 { return a.inner.GasUsed() } -func (a *l1BlockInfoAdapter) GasLimit() uint64 { return a.inner.GasLimit() } -func (a *l1BlockInfoAdapter) ParentBeaconRoot() *common.Hash { return a.inner.ParentBeaconRoot() } -func (a *l1BlockInfoAdapter) WithdrawalsRoot() *common.Hash { return a.inner.WithdrawalsRoot() } -func (a *l1BlockInfoAdapter) ExcessBlobGas() *uint64 { return a.inner.ExcessBlobGas() } -func (a *l1BlockInfoAdapter) BlobGasUsed() *uint64 { return a.inner.BlobGasUsed() } -func (a *l1BlockInfoAdapter) HeaderRLP() ([]byte, error) { return a.inner.HeaderRLP() } -func (a *l1BlockInfoAdapter) Header() *types.Header { return a.inner.Header() } - -// BlobBaseFee computes the blob base fee from the header's ExcessBlobGas -// without requiring an L1 chain config. L1InfoDeposit is called with a nil -// L1 chain config in pure derivation, so we cannot delegate to the standard -// HeaderBlockInfo.BlobBaseFee (which calls eip4844.CalcBlobFee with the -// chain config). Instead we use CalcBlobFeeCancun which only needs the -// excess blob gas value. -func (a *l1BlockInfoAdapter) BlobBaseFee(_ *params.ChainConfig) *big.Int { - ebg := a.inner.ExcessBlobGas() - if ebg == nil { - return nil - } - return eth.CalcBlobFeeCancun(*ebg) -} - // DerivedBlock is a single derived L2 block -- payload attributes ready for execution. type DerivedBlock struct { Attributes *eth.PayloadAttributes diff --git a/op-core/pure/types_test.go b/op-core/pure/types_test.go index 5647c28c9ca20..9e2a8202d0574 100644 --- a/op-core/pure/types_test.go +++ b/op-core/pure/types_test.go @@ -55,7 +55,7 @@ func TestCursorAdvance(t *testing.T) { require.Equal(t, uint64(3), c.SequenceNumber) } -func TestL1InputBlockInfo(t *testing.T) { +func TestL1InputHeaderBlockInfo(t *testing.T) { header := &types.Header{ ParentHash: common.HexToHash("0x99"), Number: big.NewInt(100), @@ -64,8 +64,7 @@ func TestL1InputBlockInfo(t *testing.T) { BaseFee: big.NewInt(7), ExcessBlobGas: ptrTo(uint64(0)), } - input := &L1Input{Header: header} - info := input.blockInfo() + info := eth.HeaderBlockInfo(header) require.Equal(t, header.Hash(), info.Hash()) require.Equal(t, header.ParentHash, info.ParentHash()) @@ -74,7 +73,6 @@ func TestL1InputBlockInfo(t *testing.T) { require.Equal(t, header.MixDigest, info.MixDigest()) require.Equal(t, header.BaseFee, info.BaseFee()) - // Header and HeaderRLP delegate to the underlying header require.Equal(t, header, info.Header()) _, err := info.HeaderRLP() require.NoError(t, err) From 417a7a2a7d706da7f571992244865cdabc2851ae Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Fri, 27 Feb 2026 14:01:56 -0500 Subject: [PATCH 10/15] pure: add logger, simplify L1 lookups, fix error propagation and batch validity MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Address review feedback: - Pass log.Logger through PureDerive → decodeBatches → validateBatch - Replace map-based L1 lookup with O(1) index arithmetic - Require l1Blocks to start ChannelTimeoutBedrock before safe head origin - decodeBatches no longer returns error; bad input is logged and returns partial results - Restructure EOF handling: if err == io.EOF { break } else if err != nil - Span batch: BatchPast → skip, other non-Accept → return collected batches - Invalid batch in derive loop → break (flush channel) instead of continue Co-Authored-By: Claude Opus 4.6 --- op-core/pure/batches.go | 66 +++++++++++++++++++++++++----------- op-core/pure/batches_test.go | 22 ++++++------ op-core/pure/derive.go | 52 +++++++++++++++++----------- op-core/pure/derive_test.go | 16 ++++----- 4 files changed, 99 insertions(+), 57 deletions(-) diff --git a/op-core/pure/batches.go b/op-core/pure/batches.go index 64a28c3813b87..c20c3726d8d1c 100644 --- a/op-core/pure/batches.go +++ b/op-core/pure/batches.go @@ -2,7 +2,6 @@ package pure import ( "context" - "fmt" "io" "github.com/ethereum/go-ethereum/core/types" @@ -17,38 +16,48 @@ import ( // and returns them as singular batches. Span batches are expanded into // individual singular batches using the provided L1 origins and cursor. // +// Decode errors are logged and cause the function to return whatever batches +// were successfully decoded so far. Only programming errors (bugs) would +// warrant propagating errors upward; all data-dependent failures are treated +// as bad input. +// // Span batch prefix validation is delegated to derive.CheckSpanBatchPrefix, -// which rejects overlapping span batches under Karst. +// which rejects overlapping span batches under Karst. If the prefix check +// returns BatchPast, the span batch is skipped. Any other non-Accept result +// causes the function to return the batches collected so far. func decodeBatches( + lgr log.Logger, r io.Reader, cfg *rollup.Config, l1Origins []eth.L1BlockRef, cursor l2Cursor, l1InclusionBlock eth.L1BlockRef, -) ([]*derive.SingularBatch, error) { +) []*derive.SingularBatch { spec := rollup.NewChainSpec(cfg) maxRLP := spec.MaxRLPBytesPerChannel(cursor.Timestamp) readBatch, err := derive.BatchReader(r, maxRLP, true) // Fjord always active (implied by Karst) if err != nil { - return nil, fmt.Errorf("creating batch reader: %w", err) + lgr.Warn("failed to create batch reader", "err", err) + return nil } var batches []*derive.SingularBatch for { batchData, err := readBatch() - if err != nil { - if err == io.EOF { - break - } - return nil, fmt.Errorf("reading batch: %w", err) + if err == io.EOF { + break + } else if err != nil { + lgr.Warn("failed to read batch", "err", err) + return batches } switch batchData.GetBatchType() { case derive.SingularBatchType: singular, err := derive.GetSingularBatch(batchData) if err != nil { - return nil, fmt.Errorf("extracting singular batch: %w", err) + lgr.Warn("failed to extract singular batch", "err", err) + return batches } batches = append(batches, singular) @@ -60,7 +69,8 @@ func decodeBatches( cfg.L2ChainID, ) if err != nil { - return nil, fmt.Errorf("deriving span batch: %w", err) + lgr.Warn("failed to derive span batch", "err", err) + return batches } l2SafeHead := eth.L2BlockRef{ @@ -81,25 +91,32 @@ func decodeBatches( validity, _ := derive.CheckSpanBatchPrefix( context.Background(), cfg, - log.NewLogger(log.DiscardHandler()), + lgr, l1Blocks, l2SafeHead, spanBatch, l1InclusionBlock, nil, ) + if validity == derive.BatchPast { + lgr.Debug("span batch is past safe head, skipping") + continue + } if validity != derive.BatchAccept { - return nil, fmt.Errorf("span batch prefix check failed (validity=%d)", validity) + lgr.Warn("span batch prefix check failed", "validity", validity) + return batches } singular, err := spanBatch.GetSingularBatches(l1Origins, l2SafeHead) if err != nil { - return nil, fmt.Errorf("expanding span batch: %w", err) + lgr.Warn("failed to expand span batch", "err", err) + return batches } batches = append(batches, singular...) default: - return nil, fmt.Errorf("unknown batch type: %d", batchData.GetBatchType()) + lgr.Warn("unknown batch type", "type", batchData.GetBatchType()) + return batches } } - return batches, nil + return batches } // validateBatch performs batch validation matching the checks in @@ -109,10 +126,10 @@ func decodeBatches( // // All other checks from checkSingularBatch are replicated here. With Karst active, // overlapping span batches are already rejected in decodeBatches. -func validateBatch(batch *derive.SingularBatch, cursor l2Cursor, l1Origins []eth.L1BlockRef, cfg *rollup.Config, l1InclusionNum uint64) bool { - // Timestamp must be the next expected L2 timestamp. +func validateBatch(lgr log.Logger, batch *derive.SingularBatch, cursor l2Cursor, l1Origins []eth.L1BlockRef, cfg *rollup.Config, l1InclusionNum uint64) bool { expectedTimestamp := cursor.Timestamp + cfg.BlockTime if batch.Timestamp != expectedTimestamp { + lgr.Warn("batch has wrong timestamp", "expected", expectedTimestamp, "got", batch.Timestamp) return false } @@ -120,14 +137,17 @@ func validateBatch(batch *derive.SingularBatch, cursor l2Cursor, l1Origins []eth // Sequence window: batch must be included within SeqWindowSize of its epoch. if epochNum+cfg.SeqWindowSize < l1InclusionNum { + lgr.Warn("batch sequence window expired", "epoch", epochNum, "inclusion", l1InclusionNum, "window", cfg.SeqWindowSize) return false } // Epoch must be current or next (cannot skip epochs). if epochNum < cursor.L1Origin.Number { + lgr.Warn("batch epoch too old", "epoch", epochNum, "cursor_origin", cursor.L1Origin.Number) return false } if epochNum > cursor.L1Origin.Number+1 { + lgr.Warn("batch epoch too new", "epoch", epochNum, "cursor_origin", cursor.L1Origin.Number) return false } @@ -140,14 +160,17 @@ func validateBatch(batch *derive.SingularBatch, cursor l2Cursor, l1Origins []eth } } if batchOrigin == nil { + lgr.Warn("batch epoch L1 origin not found", "epoch", epochNum) return false } if batch.EpochHash != batchOrigin.Hash { + lgr.Warn("batch epoch hash mismatch", "epoch", epochNum, "expected", batchOrigin.Hash, "got", batch.EpochHash) return false } // Batch timestamp must be >= L1 origin timestamp. if batch.Timestamp < batchOrigin.Time { + lgr.Warn("batch timestamp before L1 origin", "batch_time", batch.Timestamp, "l1_time", batchOrigin.Time) return false } @@ -162,28 +185,33 @@ func validateBatch(batch *derive.SingularBatch, cursor l2Cursor, l1Origins []eth for i := range l1Origins { if l1Origins[i].Number == epochNum+1 { if batch.Timestamp >= l1Origins[i].Time { - return false // should have adopted next origin + lgr.Warn("empty batch exceeds drift but should have adopted next origin") + return false } break } } } } else { + lgr.Warn("batch exceeds sequencer drift", "batch_time", batch.Timestamp, "max_drift", maxDrift) return false } } // Fork activation blocks must not contain user transactions. if cfg.IsKarstActivationBlock(batch.Timestamp) && len(batch.Transactions) > 0 { + lgr.Warn("batch has transactions at Karst activation block") return false } // Transaction validation. for _, txBytes := range batch.Transactions { if len(txBytes) == 0 { + lgr.Warn("batch contains empty transaction") return false } if txBytes[0] == types.DepositTxType { + lgr.Warn("batch contains deposit transaction") return false } } diff --git a/op-core/pure/batches_test.go b/op-core/pure/batches_test.go index 8cd105252ee2e..04d1066a5d3f9 100644 --- a/op-core/pure/batches_test.go +++ b/op-core/pure/batches_test.go @@ -6,6 +6,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/op-node/rollup" @@ -13,6 +14,8 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" ) +var testLogger = log.NewLogger(log.DiscardHandler()) + func TestDecodeBatches_SingularBatch(t *testing.T) { cfg := testRollupConfig() safeHead := testSafeHead(cfg) @@ -30,8 +33,7 @@ func TestDecodeBatches_SingularBatch(t *testing.T) { cursor := newCursor(safeHead) l1Origins := []eth.L1BlockRef{testL1Ref(0), l1Ref} - batches, err := decodeBatches(bytes.NewReader(channelData), cfg, l1Origins, cursor, l1Ref) - require.NoError(t, err) + batches := decodeBatches(testLogger, bytes.NewReader(channelData), cfg, l1Origins, cursor, l1Ref) require.Len(t, batches, 1) decoded := batches[0] @@ -59,7 +61,7 @@ func TestValidateBatch_ValidSingular(t *testing.T) { l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - require.True(t, validateBatch(batch, cursor, l1Origins, cfg, l1Origin.Number)) + require.True(t, validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) } func TestValidateBatch_WrongTimestamp(t *testing.T) { @@ -80,7 +82,7 @@ func TestValidateBatch_WrongTimestamp(t *testing.T) { l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - require.False(t, validateBatch(batch, cursor, l1Origins, cfg, l1Origin.Number)) + require.False(t, validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) } func TestValidateBatch_SpanBatchNoOverlap(t *testing.T) { @@ -102,7 +104,7 @@ func TestValidateBatch_SpanBatchNoOverlap(t *testing.T) { l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - require.False(t, validateBatch(batch, cursor, l1Origins, cfg, l1Origin.Number)) + require.False(t, validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) } func TestValidateBatch_EpochTooOld(t *testing.T) { @@ -124,7 +126,7 @@ func TestValidateBatch_EpochTooOld(t *testing.T) { l1Origins := []eth.L1BlockRef{oldOrigin, testL1Ref(4), l1Origin, testL1Ref(6)} - require.False(t, validateBatch(batch, cursor, l1Origins, cfg, l1Origin.Number)) + require.False(t, validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) } func TestValidateBatch_EpochTooNew(t *testing.T) { @@ -145,7 +147,7 @@ func TestValidateBatch_EpochTooNew(t *testing.T) { l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - require.False(t, validateBatch(batch, cursor, l1Origins, cfg, l1Origin.Number)) + require.False(t, validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) } func TestValidateBatch_SequenceWindowExpired(t *testing.T) { @@ -167,7 +169,7 @@ func TestValidateBatch_SequenceWindowExpired(t *testing.T) { l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} // Inclusion at block 16: epochNum(5) + SeqWindowSize(10) = 15 < 16 → expired - require.False(t, validateBatch(batch, cursor, l1Origins, cfg, 16)) + require.False(t, validateBatch(testLogger, batch, cursor, l1Origins, cfg, 16)) } func TestValidateBatch_EpochSkip(t *testing.T) { @@ -189,7 +191,7 @@ func TestValidateBatch_EpochSkip(t *testing.T) { l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6), testL1Ref(7)} - require.False(t, validateBatch(batch, cursor, l1Origins, cfg, l1Origin.Number)) + require.False(t, validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) } func TestValidateBatch_DepositTxRejected(t *testing.T) { @@ -211,5 +213,5 @@ func TestValidateBatch_DepositTxRejected(t *testing.T) { l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - require.False(t, validateBatch(batch, cursor, l1Origins, cfg, l1Origin.Number)) + require.False(t, validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) } diff --git a/op-core/pure/derive.go b/op-core/pure/derive.go index 91f971a97076b..7100f9634d643 100644 --- a/op-core/pure/derive.go +++ b/op-core/pure/derive.go @@ -3,11 +3,11 @@ package pure import ( "fmt" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" - "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -18,8 +18,9 @@ import ( // The function is stateless and deterministic: given the same inputs it always // produces the same outputs. No network access, no caching, no side effects. // -// l1Blocks should contain L1 blocks starting from safeHead.L1Origin.Number. -// The caller must ensure this range is complete. +// l1Blocks must be contiguous and strictly ordered by number. They should start +// at least ChannelTimeoutBedrock blocks before safeHead.L1Origin.Number to +// ensure channels opened before the safe head can still be decoded. // // Requires the Karst fork to be active at the safe head timestamp. Before Karst, // span batches may overlap the safe chain, which this implementation does not support. @@ -36,6 +37,7 @@ import ( func PureDerive( cfg *rollup.Config, l1ChainConfig *params.ChainConfig, + lgr log.Logger, safeHead eth.L2BlockRef, sysConfig eth.SystemConfig, l1Blocks []L1Input, @@ -48,26 +50,34 @@ func PureDerive( return nil, nil } - // Validate that l1Blocks start from the safe head's L1 origin. - firstL1Num := bigs.Uint64Strict(l1Blocks[0].Header.Number) - if firstL1Num > safeHead.L1Origin.Number { - return nil, fmt.Errorf("l1Blocks start at %d but safe head L1 origin is %d", firstL1Num, safeHead.L1Origin.Number) + // L1 blocks must be contiguous and strictly ordered. Compute the base + // number so we can do O(1) lookups by index arithmetic. + firstL1Num := l1Blocks[0].Header.Number.Uint64() + + // Require l1Blocks to start at least ChannelTimeoutBedrock before the safe + // head's L1 origin so that channels opened before the safe head are available. + requiredStart := safeHead.L1Origin.Number + if requiredStart > cfg.ChannelTimeoutBedrock { + requiredStart -= cfg.ChannelTimeoutBedrock + } else { + requiredStart = 0 + } + if firstL1Num > requiredStart { + return nil, fmt.Errorf("l1Blocks start at %d but must start at or before %d (safe head origin %d minus channel timeout %d)", + firstL1Num, requiredStart, safeHead.L1Origin.Number, cfg.ChannelTimeoutBedrock) } cursor := newCursor(safeHead) assembler := newChannelAssembler() - // Build index for O(1) lookups by L1 block number. - l1ByNumber := make(map[uint64]int, len(l1Blocks)) l1Origins := make([]eth.L1BlockRef, len(l1Blocks)) for i := range l1Blocks { - num := bigs.Uint64Strict(l1Blocks[i].Header.Number) - l1ByNumber[num] = i l1Origins[i] = l1Blocks[i].BlockRef() } findL1 := func(number uint64) *L1Input { - if idx, ok := l1ByNumber[number]; ok { + idx := int(number - firstL1Num) + if idx >= 0 && idx < len(l1Blocks) { return &l1Blocks[idx] } return nil @@ -79,8 +89,8 @@ func PureDerive( l1 := l1Blocks[i] l1Ref := l1.BlockRef() - for _, log := range l1.ConfigLogs { - if err := derive.ProcessSystemConfigUpdateLogEvent(&sysConfig, log, cfg, l1.Header.Time); err != nil { + for _, configLog := range l1.ConfigLogs { + if err := derive.ProcessSystemConfigUpdateLogEvent(&sysConfig, configLog, cfg, l1.Header.Time); err != nil { return nil, fmt.Errorf("processing system config update at L1 block %d: %w", l1Ref.Number, err) } } @@ -90,6 +100,7 @@ func PureDerive( for _, txData := range l1.BatcherData { frames, err := derive.ParseFrames(txData) if err != nil { + lgr.Warn("failed to parse frames", "l1_block", l1Ref.Number, "err", err) continue } @@ -99,14 +110,15 @@ func PureDerive( continue } - batches, err := decodeBatches(ready.channel.Reader(), cfg, l1Origins, cursor, ready.openBlock) - if err != nil { - continue - } + lgr.Debug("channel ready", "channel", ready.id, "l1_block", l1Ref.Number) + + batches := decodeBatches(lgr, ready.channel.Reader(), cfg, l1Origins, cursor, ready.openBlock) for _, batch := range batches { - if !validateBatch(batch, cursor, l1Origins, cfg, l1Ref.Number) { - continue + if !validateBatch(lgr, batch, cursor, l1Origins, cfg, l1Ref.Number) { + lgr.Warn("invalid batch, flushing channel", + "timestamp", batch.Timestamp, "epoch", batch.EpochNum, "l1_block", l1Ref.Number) + break } epochL1 := findL1(uint64(batch.EpochNum)) diff --git a/op-core/pure/derive_test.go b/op-core/pure/derive_test.go index 3e19235894496..fcf5ebc809f3c 100644 --- a/op-core/pure/derive_test.go +++ b/op-core/pure/derive_test.go @@ -23,7 +23,7 @@ func TestPureDerive_SingleBatch(t *testing.T) { l1Origin := makeTestL1Input(0) // safe head's L1 origin l1 := makeL1WithBatch(t, cfg, 1, safeHead, sysConfig) - derived, err := PureDerive(cfg, testL1ChainConfig(), safeHead, sysConfig, []L1Input{*l1Origin, *l1}) + derived, err := PureDerive(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig, []L1Input{*l1Origin, *l1}) require.NoError(t, err) require.Len(t, derived, 1) @@ -46,7 +46,7 @@ func TestPureDerive_EmptyEpoch(t *testing.T) { l1Blocks[i] = *makeTestL1Input(i) } - derived, err := PureDerive(cfg, testL1ChainConfig(), safeHead, sysConfig, l1Blocks) + derived, err := PureDerive(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig, l1Blocks) require.NoError(t, err) require.Greater(t, len(derived), 0, "empty batches should be generated when sequencer window expires") @@ -65,7 +65,7 @@ func TestPureDerive_MultipleChannelsAndEpochs(t *testing.T) { l1Blocks := makeMultiEpochL1Inputs(t, cfg, safeHead, sysConfig) - derived, err := PureDerive(cfg, testL1ChainConfig(), safeHead, sysConfig, l1Blocks) + derived, err := PureDerive(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig, l1Blocks) require.NoError(t, err) require.Greater(t, len(derived), 1, "should derive multiple blocks from multiple epochs") @@ -133,7 +133,7 @@ func TestPureDerive_ChannelTimeout(t *testing.T) { completeL1.BatcherData = [][]byte{completeTx} l1Blocks = append(l1Blocks, *completeL1) - derived, err := PureDerive(cfg, testL1ChainConfig(), safeHead, sysConfig, l1Blocks) + derived, err := PureDerive(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig, l1Blocks) require.NoError(t, err) // The incomplete channel timed out. Only the complete channel produces a block. @@ -170,7 +170,7 @@ func TestPureDerive_InvalidBatchSkipped(t *testing.T) { l1.BatcherData = [][]byte{batcherTx} l1Origin := makeTestL1Input(0) // safe head's L1 origin - derived, err := PureDerive(cfg, testL1ChainConfig(), safeHead, sysConfig, []L1Input{*l1Origin, *l1}) + derived, err := PureDerive(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig, []L1Input{*l1Origin, *l1}) require.NoError(t, err) require.Empty(t, derived, "invalid batch should be skipped without error") } @@ -181,7 +181,7 @@ func TestPureDerive_RejectsPreKarst(t *testing.T) { safeHead := testSafeHead(cfg) sysConfig := testSystemConfig() - _, err := PureDerive(cfg, testL1ChainConfig(), safeHead, sysConfig, nil) + _, err := PureDerive(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig, nil) require.Error(t, err) require.Contains(t, err.Error(), "Karst fork") } @@ -194,7 +194,7 @@ func TestPureDerive_ValidatesL1BlockRange(t *testing.T) { // Start L1 blocks after the safe head's L1 origin (gap) l1Blocks := []L1Input{*makeTestL1Input(5)} - _, err := PureDerive(cfg, testL1ChainConfig(), safeHead, sysConfig, l1Blocks) + _, err := PureDerive(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig, l1Blocks) require.Error(t, err) require.Contains(t, err.Error(), "l1Blocks start at") } @@ -204,7 +204,7 @@ func TestPureDerive_EmptyL1Blocks(t *testing.T) { safeHead := testSafeHead(cfg) sysConfig := testSystemConfig() - derived, err := PureDerive(cfg, testL1ChainConfig(), safeHead, sysConfig, nil) + derived, err := PureDerive(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig, nil) require.NoError(t, err) require.Nil(t, derived) } From ba0b42d0cc274c2e110ff46827752cbbdb5141c3 Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Mon, 2 Mar 2026 14:15:55 -0400 Subject: [PATCH 11/15] pure: address review round 4 - Use spec.ChannelTimeout(l1Ref.Time) instead of cfg.ChannelTimeoutBedrock for both the initial L1 range check and per-block timeout check. - Distinguish BatchPast (continue) from BatchDrop (break/flush channel) in the main derivation loop. - Add Jovian and Interop fork activation block checks alongside Karst. - Add SetCode (EIP-7702) transaction rejection before Isthmus. - validateBatch returns derive.BatchValidity instead of bool. - Remove redundant findL1 call in empty batch epoch advancement. Co-Authored-By: Claude Opus 4.6 --- op-core/pure/batches.go | 63 +++++++++++++++++++++++------------- op-core/pure/batches_test.go | 23 +++++++------ op-core/pure/derive.go | 35 +++++++++++++------- 3 files changed, 74 insertions(+), 47 deletions(-) diff --git a/op-core/pure/batches.go b/op-core/pure/batches.go index c20c3726d8d1c..18cb0c60e0c5f 100644 --- a/op-core/pure/batches.go +++ b/op-core/pure/batches.go @@ -124,13 +124,22 @@ func decodeBatches( // require L2 state access: // - Parent hash validation (deferred to post-execution via DerivedBlock.ExpectedParentHash) // -// All other checks from checkSingularBatch are replicated here. With Karst active, -// overlapping span batches are already rejected in decodeBatches. -func validateBatch(lgr log.Logger, batch *derive.SingularBatch, cursor l2Cursor, l1Origins []eth.L1BlockRef, cfg *rollup.Config, l1InclusionNum uint64) bool { +// Returns BatchAccept, BatchPast, or BatchDrop. With Karst active (implying +// Holocene), past batches return BatchPast instead of BatchDrop. +// +// Overlapping span batches are already rejected in decodeBatches via +// CheckSpanBatchPrefix. +func validateBatch(lgr log.Logger, batch *derive.SingularBatch, cursor l2Cursor, l1Origins []eth.L1BlockRef, cfg *rollup.Config, l1InclusionNum uint64) derive.BatchValidity { expectedTimestamp := cursor.Timestamp + cfg.BlockTime - if batch.Timestamp != expectedTimestamp { - lgr.Warn("batch has wrong timestamp", "expected", expectedTimestamp, "got", batch.Timestamp) - return false + + // Holocene (implied by Karst): past batches are BatchPast, future batches are BatchDrop. + if batch.Timestamp > expectedTimestamp { + lgr.Warn("batch timestamp too new", "expected", expectedTimestamp, "got", batch.Timestamp) + return derive.BatchDrop + } + if batch.Timestamp < expectedTimestamp { + lgr.Debug("batch is past safe head", "expected", expectedTimestamp, "got", batch.Timestamp) + return derive.BatchPast } epochNum := uint64(batch.EpochNum) @@ -138,17 +147,17 @@ func validateBatch(lgr log.Logger, batch *derive.SingularBatch, cursor l2Cursor, // Sequence window: batch must be included within SeqWindowSize of its epoch. if epochNum+cfg.SeqWindowSize < l1InclusionNum { lgr.Warn("batch sequence window expired", "epoch", epochNum, "inclusion", l1InclusionNum, "window", cfg.SeqWindowSize) - return false + return derive.BatchDrop } // Epoch must be current or next (cannot skip epochs). if epochNum < cursor.L1Origin.Number { lgr.Warn("batch epoch too old", "epoch", epochNum, "cursor_origin", cursor.L1Origin.Number) - return false + return derive.BatchDrop } if epochNum > cursor.L1Origin.Number+1 { lgr.Warn("batch epoch too new", "epoch", epochNum, "cursor_origin", cursor.L1Origin.Number) - return false + return derive.BatchDrop } // Find the batch's L1 origin and verify epoch hash. @@ -161,17 +170,26 @@ func validateBatch(lgr log.Logger, batch *derive.SingularBatch, cursor l2Cursor, } if batchOrigin == nil { lgr.Warn("batch epoch L1 origin not found", "epoch", epochNum) - return false + return derive.BatchDrop } if batch.EpochHash != batchOrigin.Hash { lgr.Warn("batch epoch hash mismatch", "epoch", epochNum, "expected", batchOrigin.Hash, "got", batch.EpochHash) - return false + return derive.BatchDrop } // Batch timestamp must be >= L1 origin timestamp. if batch.Timestamp < batchOrigin.Time { lgr.Warn("batch timestamp before L1 origin", "batch_time", batch.Timestamp, "l1_time", batchOrigin.Time) - return false + return derive.BatchDrop + } + + // Fork activation blocks must not contain user transactions. + if (cfg.IsJovianActivationBlock(batch.Timestamp) || + cfg.IsKarstActivationBlock(batch.Timestamp) || + cfg.IsInteropActivationBlock(batch.Timestamp)) && + len(batch.Transactions) > 0 { + lgr.Warn("batch has transactions at fork activation block") + return derive.BatchDrop } // Sequencer time drift: L2 time must not exceed L1 time + MaxSequencerDrift. @@ -186,7 +204,7 @@ func validateBatch(lgr log.Logger, batch *derive.SingularBatch, cursor l2Cursor, if l1Origins[i].Number == epochNum+1 { if batch.Timestamp >= l1Origins[i].Time { lgr.Warn("empty batch exceeds drift but should have adopted next origin") - return false + return derive.BatchDrop } break } @@ -194,27 +212,26 @@ func validateBatch(lgr log.Logger, batch *derive.SingularBatch, cursor l2Cursor, } } else { lgr.Warn("batch exceeds sequencer drift", "batch_time", batch.Timestamp, "max_drift", maxDrift) - return false + return derive.BatchDrop } } - // Fork activation blocks must not contain user transactions. - if cfg.IsKarstActivationBlock(batch.Timestamp) && len(batch.Transactions) > 0 { - lgr.Warn("batch has transactions at Karst activation block") - return false - } - // Transaction validation. + isIsthmus := cfg.IsIsthmus(batch.Timestamp) for _, txBytes := range batch.Transactions { if len(txBytes) == 0 { lgr.Warn("batch contains empty transaction") - return false + return derive.BatchDrop } if txBytes[0] == types.DepositTxType { lgr.Warn("batch contains deposit transaction") - return false + return derive.BatchDrop + } + if !isIsthmus && txBytes[0] == types.SetCodeTxType { + lgr.Warn("batch contains SetCode transaction before Isthmus") + return derive.BatchDrop } } - return true + return derive.BatchAccept } diff --git a/op-core/pure/batches_test.go b/op-core/pure/batches_test.go index 04d1066a5d3f9..86bba7d9945f2 100644 --- a/op-core/pure/batches_test.go +++ b/op-core/pure/batches_test.go @@ -61,7 +61,7 @@ func TestValidateBatch_ValidSingular(t *testing.T) { l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - require.True(t, validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) + require.Equal(t, derive.BatchValidity(derive.BatchAccept), validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) } func TestValidateBatch_WrongTimestamp(t *testing.T) { @@ -77,15 +77,15 @@ func TestValidateBatch_WrongTimestamp(t *testing.T) { batch := &derive.SingularBatch{ EpochNum: rollup.Epoch(l1Origin.Number), EpochHash: l1Origin.Hash, - Timestamp: cursor.Timestamp + cfg.BlockTime + 1, // wrong + Timestamp: cursor.Timestamp + cfg.BlockTime + 1, // too new } l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - require.False(t, validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) + require.Equal(t, derive.BatchValidity(derive.BatchDrop), validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) } -func TestValidateBatch_SpanBatchNoOverlap(t *testing.T) { +func TestValidateBatch_PastTimestamp(t *testing.T) { cfg := testRollupConfig() l1Origin := testL1Ref(5) @@ -95,16 +95,15 @@ func TestValidateBatch_SpanBatchNoOverlap(t *testing.T) { L1Origin: l1Origin.ID(), } - // Timestamp before cursor (overlap) -- this will fail the timestamp == cursor + blockTime check batch := &derive.SingularBatch{ EpochNum: rollup.Epoch(l1Origin.Number), EpochHash: l1Origin.Hash, - Timestamp: cursor.Timestamp - 2, + Timestamp: cursor.Timestamp - 2, // in the past } l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - require.False(t, validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) + require.Equal(t, derive.BatchValidity(derive.BatchPast), validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) } func TestValidateBatch_EpochTooOld(t *testing.T) { @@ -126,7 +125,7 @@ func TestValidateBatch_EpochTooOld(t *testing.T) { l1Origins := []eth.L1BlockRef{oldOrigin, testL1Ref(4), l1Origin, testL1Ref(6)} - require.False(t, validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) + require.Equal(t, derive.BatchValidity(derive.BatchDrop), validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) } func TestValidateBatch_EpochTooNew(t *testing.T) { @@ -147,7 +146,7 @@ func TestValidateBatch_EpochTooNew(t *testing.T) { l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - require.False(t, validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) + require.Equal(t, derive.BatchValidity(derive.BatchDrop), validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) } func TestValidateBatch_SequenceWindowExpired(t *testing.T) { @@ -169,7 +168,7 @@ func TestValidateBatch_SequenceWindowExpired(t *testing.T) { l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} // Inclusion at block 16: epochNum(5) + SeqWindowSize(10) = 15 < 16 → expired - require.False(t, validateBatch(testLogger, batch, cursor, l1Origins, cfg, 16)) + require.Equal(t, derive.BatchValidity(derive.BatchDrop), validateBatch(testLogger, batch, cursor, l1Origins, cfg, 16)) } func TestValidateBatch_EpochSkip(t *testing.T) { @@ -191,7 +190,7 @@ func TestValidateBatch_EpochSkip(t *testing.T) { l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6), testL1Ref(7)} - require.False(t, validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) + require.Equal(t, derive.BatchValidity(derive.BatchDrop), validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) } func TestValidateBatch_DepositTxRejected(t *testing.T) { @@ -213,5 +212,5 @@ func TestValidateBatch_DepositTxRejected(t *testing.T) { l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - require.False(t, validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) + require.Equal(t, derive.BatchValidity(derive.BatchDrop), validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) } diff --git a/op-core/pure/derive.go b/op-core/pure/derive.go index 7100f9634d643..31d426be7233f 100644 --- a/op-core/pure/derive.go +++ b/op-core/pure/derive.go @@ -19,8 +19,8 @@ import ( // produces the same outputs. No network access, no caching, no side effects. // // l1Blocks must be contiguous and strictly ordered by number. They should start -// at least ChannelTimeoutBedrock blocks before safeHead.L1Origin.Number to -// ensure channels opened before the safe head can still be decoded. +// at least ChannelTimeout blocks before safeHead.L1Origin.Number to ensure +// channels opened before the safe head can still be decoded. // // Requires the Karst fork to be active at the safe head timestamp. Before Karst, // span batches may overlap the safe chain, which this implementation does not support. @@ -50,21 +50,24 @@ func PureDerive( return nil, nil } + spec := rollup.NewChainSpec(cfg) + // L1 blocks must be contiguous and strictly ordered. Compute the base // number so we can do O(1) lookups by index arithmetic. firstL1Num := l1Blocks[0].Header.Number.Uint64() - // Require l1Blocks to start at least ChannelTimeoutBedrock before the safe + // Require l1Blocks to start at least ChannelTimeout before the safe // head's L1 origin so that channels opened before the safe head are available. + channelTimeout := spec.ChannelTimeout(safeHead.Time) requiredStart := safeHead.L1Origin.Number - if requiredStart > cfg.ChannelTimeoutBedrock { - requiredStart -= cfg.ChannelTimeoutBedrock + if requiredStart > channelTimeout { + requiredStart -= channelTimeout } else { requiredStart = 0 } if firstL1Num > requiredStart { return nil, fmt.Errorf("l1Blocks start at %d but must start at or before %d (safe head origin %d minus channel timeout %d)", - firstL1Num, requiredStart, safeHead.L1Origin.Number, cfg.ChannelTimeoutBedrock) + firstL1Num, requiredStart, safeHead.L1Origin.Number, channelTimeout) } cursor := newCursor(safeHead) @@ -95,7 +98,7 @@ func PureDerive( } } - assembler.checkTimeout(l1Ref, cfg.ChannelTimeoutBedrock) + assembler.checkTimeout(l1Ref, spec.ChannelTimeout(l1Ref.Time)) for _, txData := range l1.BatcherData { frames, err := derive.ParseFrames(txData) @@ -115,7 +118,13 @@ func PureDerive( batches := decodeBatches(lgr, ready.channel.Reader(), cfg, l1Origins, cursor, ready.openBlock) for _, batch := range batches { - if !validateBatch(lgr, batch, cursor, l1Origins, cfg, l1Ref.Number) { + validity := validateBatch(lgr, batch, cursor, l1Origins, cfg, l1Ref.Number) + if validity == derive.BatchPast { + lgr.Debug("batch is past, skipping", + "timestamp", batch.Timestamp, "epoch", batch.EpochNum) + continue + } + if validity != derive.BatchAccept { lgr.Warn("invalid batch, flushing channel", "timestamp", batch.Timestamp, "epoch", batch.EpochNum, "l1_block", l1Ref.Number) break @@ -149,11 +158,17 @@ func PureDerive( newOrigin := cursor.L1Origin newSeqNum := cursor.SequenceNumber + 1 + epochL1 := findL1(cursor.L1Origin.Number) + if epochL1 == nil { + return nil, fmt.Errorf("missing L1 block %d for empty batch epoch", cursor.L1Origin.Number) + } + // Advance epoch if the next L2 timestamp >= next L1 block's timestamp. nextL1 := findL1(cursor.L1Origin.Number + 1) if nextL1 != nil && nextTimestamp >= nextL1.Header.Time { newOrigin = nextL1.BlockID() newSeqNum = 0 + epochL1 = nextL1 } emptyBatch := &derive.SingularBatch{ @@ -162,10 +177,6 @@ func PureDerive( Timestamp: nextTimestamp, } - epochL1 := findL1(newOrigin.Number) - if epochL1 == nil { - return nil, fmt.Errorf("missing L1 block %d for empty batch epoch", newOrigin.Number) - } block, err := buildAttributes(emptyBatch, epochL1, cursor, sysConfig, cfg, l1ChainConfig) if err != nil { return nil, fmt.Errorf("building empty batch attributes at L1 block %d: %w", l1Ref.Number, err) From 07223e313029d1569e538d224b808cdbe0a0521b Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Mon, 2 Mar 2026 14:19:09 -0400 Subject: [PATCH 12/15] pure: add design doc with behavioral equivalence mapping Documents the objective, architecture, and a check-by-check comparison between validateBatch and upstream checkSingularBatch. Co-Authored-By: Claude Opus 4.6 --- op-core/pure/DESIGN.md | 159 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 159 insertions(+) create mode 100644 op-core/pure/DESIGN.md diff --git a/op-core/pure/DESIGN.md b/op-core/pure/DESIGN.md new file mode 100644 index 0000000000000..c5b5b9566fbc8 --- /dev/null +++ b/op-core/pure/DESIGN.md @@ -0,0 +1,159 @@ +# Pure Derivation Pipeline + +## Objective + +Implement a pure function that derives L2 payload attributes from L1 data, +equivalent in behavior to the existing streaming pipeline in +`op-node/rollup/derive`, but without I/O, caching, or state access. + +``` +PureDerive(cfg, l1ChainConfig, logger, safeHead, sysConfig, l1Blocks) → []DerivedBlock +``` + +Given the same inputs, the function always produces the same outputs. The caller +provides all L1 data upfront; the function never fetches anything. + +## Motivation + +The existing derivation pipeline is streaming and pull-based: it requests L1 +data on demand, maintains internal state across steps, and interleaves I/O with +computation. This makes it difficult to test, reason about, and use in contexts +where all data is already available (ZK provers, auditing tools, replay +utilities). + +A pure function is deterministic, composable, and trivially testable. + +## Scope + +**In scope:** Post-Karst derivation only. Karst implies Holocene, Granite, +Fjord, and all prior forks. This simplifies the implementation: + +- No `BatchFuture` or `BatchUndecided` (Holocene semantics: future → drop, + undecided conditions don't arise with complete L1 data) +- No span batch overlap handling (Karst rejects overlapping span batches as + `BatchPast`) +- Single-channel assembly (Holocene rule: one active channel at a time) +- Strict frame ordering (Holocene) + +**Out of scope:** +- Pre-Karst derivation +- Pipeline reset / reorg detection (caller responsibility) +- L2 execution (we produce attributes, not executed blocks) + +## Architecture + +``` +L1Input[] ──► frame parsing ──► channel assembly ──► batch decoding ──► batch validation ──► attribute building ──► DerivedBlock[] + │ + timeout check + (per L1 block) +``` + +### Components + +| File | Responsibility | +|------|---------------| +| `derive.go` | `PureDerive` entry point, main loop over L1 blocks | +| `channels.go` | Push-based Holocene single-channel assembler | +| `batches.go` | `decodeBatches` (channel → singular batches), `validateBatch` | +| `attributes.go` | `buildAttributes` (batch + L1 data → PayloadAttributes) | +| `types.go` | `L1Input`, `DerivedBlock`, `l2Cursor` | + +### Main Loop (derive.go) + +For each L1 block: +1. Process system config update logs +2. Check channel timeout (fork-aware via `spec.ChannelTimeout`) +3. Parse frames from batcher transactions +4. Assemble frames into channels +5. When a channel completes: decode batches, validate each, build attributes +6. After processing all channels: generate empty batches if the sequencing + window has expired + +### Empty Batch Generation + +When no batcher data covers a time range and the sequencing window expires +(`currentL1.Number > cursor.L1Origin.Number + SeqWindowSize`), the pipeline +generates empty batches to maintain L2 liveness. Epoch advancement follows the +rule: advance to the next L1 origin when the L2 timestamp >= the next L1 +block's timestamp. + +## Behavioral Equivalence + +The implementation must match `checkSingularBatch` in +`op-node/rollup/derive/batches.go` for all checks that don't require L2 state. + +### Upstream Check Mapping + +| # | Upstream Check | Pure Implementation | Notes | +|---|---------------|-------------------|-------| +| 1 | `len(l1Blocks) == 0` → `BatchUndecided` | N/A | We always have all L1 data | +| 2 | `timestamp > next` → `BatchFuture`/`BatchDrop` | `BatchDrop` | Holocene always active (implied by Karst) | +| 3 | `timestamp < next` → `BatchDrop`/`BatchPast` | `BatchPast` | Holocene always active | +| 4 | Parent hash mismatch → `BatchDrop` | Deferred | Stored in `DerivedBlock.ExpectedParentHash` for post-execution verification | +| 5 | Sequence window expired → `BatchDrop` | `epochNum + SeqWindowSize < l1InclusionNum` → `BatchDrop` | Equivalent | +| 6a | Epoch too old → `BatchDrop` | `epochNum < cursor.L1Origin.Number` → `BatchDrop` | Equivalent | +| 6b | Epoch is next but no L1 data → `BatchUndecided` | N/A | We always have all L1 data | +| 6c | Epoch too far ahead → `BatchDrop` | `epochNum > cursor.L1Origin.Number+1` → `BatchDrop` | Equivalent | +| 7 | Epoch hash mismatch → `BatchDrop` | Look up origin, compare hash → `BatchDrop` | Equivalent | +| 8 | Timestamp < L1 origin time → `BatchDrop` | `batch.Timestamp < batchOrigin.Time` → `BatchDrop` | Equivalent | +| 9 | Fork activation block with txs → `BatchDrop` | Jovian, Karst, Interop checks → `BatchDrop` | Equivalent | +| 10 | Sequencer drift exceeded → `BatchDrop` | Same logic with empty batch exception → `BatchDrop` | Equivalent | +| 11a | Empty transaction → `BatchDrop` | `len(txBytes) == 0` → `BatchDrop` | Equivalent | +| 11b | Deposit transaction → `BatchDrop` | `txBytes[0] == DepositTxType` → `BatchDrop` | Equivalent | +| 11c | SetCode before Isthmus → `BatchDrop` | `!isIsthmus && txBytes[0] == SetCodeTxType` → `BatchDrop` | Equivalent | +| 12 | All pass → `BatchAccept` | → `BatchAccept` | Equivalent | + +### Intentional Differences + +1. **Parent hash validation (check #4):** Deferred to post-execution. The pure + function has no L2 block hashes. The caller can verify + `DerivedBlock.ExpectedParentHash` against actual execution results. + +2. **No `BatchUndecided` or `BatchFuture`:** With Holocene active and all L1 + data provided, these states cannot occur. + +3. **Span batch overlaps:** Under Karst, `CheckSpanBatchPrefix` rejects + overlapping span batches as `BatchPast` (upstream treats them as errors + pre-Karst). This is the one behavioral change vs pre-Karst upstream. + +4. **`BatchPast` handling:** In the main loop, `BatchPast` batches are skipped + (`continue`), not flushed. `BatchDrop` and other non-accept results cause a + `break` that flushes the remaining batches from the current channel. This + matches Holocene semantics where past batches are harmless leftovers. + +### Attribute Building Equivalence + +`buildAttributes` matches `derive.AttributesDeposited` for: +- L1 info deposit transaction (via `derive.L1InfoDeposit`) +- User deposits at epoch boundaries +- Sequencer transactions from the batch +- Canyon withdrawals, Ecotone parent beacon root +- Holocene EIP-1559 params, Jovian MinBaseFee +- Gas limit from system config +- `NoTxPool: true` + +Not included: network upgrade transactions (NUTs) for pre-Karst forks, since +all pre-Karst forks are already active. Future forks with NUTs must be added. + +## Dependencies on Upstream + +The implementation reuses these upstream types and functions: +- `derive.ParseFrames`, `derive.Channel`, `derive.Frame` +- `derive.BatchReader`, `derive.GetSingularBatch`, `derive.DeriveSpanBatch` +- `derive.CheckSpanBatchPrefix` +- `derive.L1InfoDeposit` +- `derive.ProcessSystemConfigUpdateLogEvent` +- `rollup.Config`, `rollup.ChainSpec` +- `eth.PayloadAttributes`, `eth.L1BlockRef`, `eth.L2BlockRef`, `eth.SystemConfig` + +## Testing + +Unit tests cover each component in isolation: +- `batches_test.go`: Batch decoding and all `validateBatch` rejection paths +- `channels_test.go`: Channel assembly, timeout, frame ordering +- `attributes_test.go`: Payload attribute construction +- `types_test.go`: Cursor advancement, empty batch detection +- `derive_test.go`: Integration tests for `PureDerive` (single batch, empty + epochs, multi-channel, channel timeout, invalid batch skip, pre-Karst + rejection, L1 range validation) From e792afc4f652ae0bc2e5288af586af785559a119 Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Mon, 2 Mar 2026 14:22:51 -0400 Subject: [PATCH 13/15] pure: fix span batch inclusion block Pass l1Ref (where the channel completed) instead of ready.openBlock (where the channel opened) to decodeBatches. The inclusion block is used by CheckSpanBatchPrefix for the sequence window check and must be the L1 block where the batch became available for derivation. Co-Authored-By: Claude Opus 4.6 --- op-core/pure/derive.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/op-core/pure/derive.go b/op-core/pure/derive.go index 31d426be7233f..eeb6d657320d1 100644 --- a/op-core/pure/derive.go +++ b/op-core/pure/derive.go @@ -115,7 +115,7 @@ func PureDerive( lgr.Debug("channel ready", "channel", ready.id, "l1_block", l1Ref.Number) - batches := decodeBatches(lgr, ready.channel.Reader(), cfg, l1Origins, cursor, ready.openBlock) + batches := decodeBatches(lgr, ready.channel.Reader(), cfg, l1Origins, cursor, l1Ref) for _, batch := range batches { validity := validateBatch(lgr, batch, cursor, l1Origins, cfg, l1Ref.Number) From af368d199a6175e2815d0015dfa8f6e0d4af3a63 Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Mon, 2 Mar 2026 19:44:08 -0400 Subject: [PATCH 14/15] derive: replace batch-mode PureDerive with iterator-style Deriver Introduces a Deriver iterator that produces one PayloadAttributes at a time from incrementally-added L1 blocks, replacing the batch-mode PureDerive function. Key changes: - NewDeriver/AddL1Block/Next/Reset API for incremental derivation - Upstream CheckBatch for batch validation (including parent hash checks) - ErrNeedL1Data/ErrReorg sentinels for iterator control flow - Empty batch generation extracted as pure makeEmptyBatch function - Package renamed from op-core/pure to op-core/derive (upstream aliased as opderive) - Removed PureDerive, validateBatch, and DerivedBlock Co-Authored-By: Claude Opus 4.6 --- op-core/{pure => derive}/DESIGN.md | 0 op-core/{pure => derive}/attributes.go | 23 +- op-core/{pure => derive}/attributes_test.go | 60 ++- op-core/derive/batches.go | 119 ++++++ op-core/derive/batches_test.go | 39 ++ op-core/{pure => derive}/channels.go | 22 +- op-core/{pure => derive}/channels_test.go | 26 +- op-core/derive/deriver.go | 298 ++++++++++++++ op-core/derive/deriver_test.go | 408 ++++++++++++++++++++ op-core/derive/empty_batch.go | 39 ++ op-core/derive/empty_batch_test.go | 82 ++++ op-core/{pure => derive}/helpers_test.go | 33 +- op-core/{pure => derive}/types.go | 17 +- op-core/{pure => derive}/types_test.go | 2 +- op-core/pure/batches.go | 237 ------------ op-core/pure/batches_test.go | 216 ----------- op-core/pure/derive.go | 190 --------- op-core/pure/derive_test.go | 275 ------------- 18 files changed, 1080 insertions(+), 1006 deletions(-) rename op-core/{pure => derive}/DESIGN.md (100%) rename op-core/{pure => derive}/attributes.go (79%) rename op-core/{pure => derive}/attributes_test.go (80%) create mode 100644 op-core/derive/batches.go create mode 100644 op-core/derive/batches_test.go rename op-core/{pure => derive}/channels.go (78%) rename op-core/{pure => derive}/channels_test.go (87%) create mode 100644 op-core/derive/deriver.go create mode 100644 op-core/derive/deriver_test.go create mode 100644 op-core/derive/empty_batch.go create mode 100644 op-core/derive/empty_batch_test.go rename op-core/{pure => derive}/helpers_test.go (87%) rename op-core/{pure => derive}/types.go (85%) rename op-core/{pure => derive}/types_test.go (99%) delete mode 100644 op-core/pure/batches.go delete mode 100644 op-core/pure/batches_test.go delete mode 100644 op-core/pure/derive.go delete mode 100644 op-core/pure/derive_test.go diff --git a/op-core/pure/DESIGN.md b/op-core/derive/DESIGN.md similarity index 100% rename from op-core/pure/DESIGN.md rename to op-core/derive/DESIGN.md diff --git a/op-core/pure/attributes.go b/op-core/derive/attributes.go similarity index 79% rename from op-core/pure/attributes.go rename to op-core/derive/attributes.go index 065b8a0f9c9e7..e9cd4556b32fc 100644 --- a/op-core/pure/attributes.go +++ b/op-core/derive/attributes.go @@ -1,4 +1,4 @@ -package pure +package derive import ( "fmt" @@ -10,13 +10,12 @@ import ( "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/eth" ) -// buildAttributes constructs a DerivedBlock (PayloadAttributes + metadata) from -// a validated singular batch, its L1 origin, the current derivation cursor, -// and the active system config. +// buildAttributes constructs PayloadAttributes from a validated singular batch, +// its L1 origin, the current derivation cursor, and the active system config. // // Transaction ordering follows the OP Stack derivation spec: // 1. L1 info deposit transaction (always first) @@ -24,16 +23,16 @@ import ( // 3. Batch transactions from the sequencer // // Network upgrade transactions (NUTs) are not included because all pre-Karst -// forks are already active (PureDerive requires Karst), and Karst itself has +// forks are already active (the Deriver requires Karst), and Karst itself has // no NUTs. Future forks with NUTs must be added here. func buildAttributes( - batch *derive.SingularBatch, + batch *opderive.SingularBatch, l1Block *L1Input, cursor l2Cursor, sysConfig eth.SystemConfig, cfg *rollup.Config, l1ChainConfig *params.ChainConfig, -) (*DerivedBlock, error) { +) (*eth.PayloadAttributes, error) { epochChanged := uint64(batch.EpochNum) != cursor.L1Origin.Number var seqNumber uint64 @@ -45,7 +44,7 @@ func buildAttributes( l2Timestamp := batch.Timestamp - l1InfoTx, err := derive.L1InfoDeposit(cfg, l1ChainConfig, sysConfig, seqNumber, eth.HeaderBlockInfo(l1Block.Header), l2Timestamp) + l1InfoTx, err := opderive.L1InfoDeposit(cfg, l1ChainConfig, sysConfig, seqNumber, eth.HeaderBlockInfo(l1Block.Header), l2Timestamp) if err != nil { return nil, fmt.Errorf("failed to create L1 info deposit tx: %w", err) } @@ -106,9 +105,5 @@ func buildAttributes( attrs.MinBaseFee = &sysConfig.MinBaseFee } - return &DerivedBlock{ - Attributes: attrs, - ExpectedParentHash: batch.ParentHash, - DerivedFrom: l1Block.BlockRef(), - }, nil + return attrs, nil } diff --git a/op-core/pure/attributes_test.go b/op-core/derive/attributes_test.go similarity index 80% rename from op-core/pure/attributes_test.go rename to op-core/derive/attributes_test.go index 8a296d1d4d2c8..a2d374fd1ab17 100644 --- a/op-core/pure/attributes_test.go +++ b/op-core/derive/attributes_test.go @@ -1,4 +1,4 @@ -package pure +package derive import ( "testing" @@ -9,7 +9,7 @@ import ( "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/stretchr/testify/require" @@ -25,7 +25,7 @@ func TestBuildAttributes_EpochStart(t *testing.T) { l1Hash := l1Block.Header.Hash() userTx := hexutil.Bytes{0x01, 0x02, 0x03} - batch := &derive.SingularBatch{ + batch := &opderive.SingularBatch{ ParentHash: common.HexToHash("0xaaaa"), EpochNum: rollup.Epoch(l1Num), EpochHash: l1Hash, @@ -41,12 +41,9 @@ func TestBuildAttributes_EpochStart(t *testing.T) { SequenceNumber: 3, } - result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) + attrs, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) require.NoError(t, err) - require.NotNil(t, result) - require.NotNil(t, result.Attributes) - - attrs := result.Attributes + require.NotNil(t, attrs) // L1 info deposit + 2 user deposits + 1 batch tx = 4 require.GreaterOrEqual(t, len(attrs.Transactions), 3) @@ -63,9 +60,6 @@ func TestBuildAttributes_EpochStart(t *testing.T) { // The last transaction should be the batch tx require.Equal(t, userTx, attrs.Transactions[len(attrs.Transactions)-1]) - - require.Equal(t, batch.ParentHash, result.ExpectedParentHash) - require.Equal(t, l1Block.BlockRef(), result.DerivedFrom) } func TestBuildAttributes_SameEpoch(t *testing.T) { @@ -78,7 +72,7 @@ func TestBuildAttributes_SameEpoch(t *testing.T) { l1Hash := l1Block.Header.Hash() userTx := hexutil.Bytes{0xaa, 0xbb} - batch := &derive.SingularBatch{ + batch := &opderive.SingularBatch{ ParentHash: common.HexToHash("0xbbbb"), EpochNum: rollup.Epoch(l1Num), EpochHash: l1Hash, @@ -94,11 +88,9 @@ func TestBuildAttributes_SameEpoch(t *testing.T) { SequenceNumber: 2, } - result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) + attrs, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) require.NoError(t, err) - require.NotNil(t, result) - - attrs := result.Attributes + require.NotNil(t, attrs) // L1 info deposit + 1 batch tx = 2 (no user deposits because same epoch) require.GreaterOrEqual(t, len(attrs.Transactions), 2) @@ -122,7 +114,7 @@ func TestBuildAttributes_EmptyBatch(t *testing.T) { l1Num := bigs.Uint64Strict(l1Block.Header.Number) l1Hash := l1Block.Header.Hash() - batch := &derive.SingularBatch{ + batch := &opderive.SingularBatch{ ParentHash: common.HexToHash("0xcccc"), EpochNum: rollup.Epoch(l1Num), EpochHash: l1Hash, @@ -137,11 +129,11 @@ func TestBuildAttributes_EmptyBatch(t *testing.T) { SequenceNumber: 0, } - result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) + attrs, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) require.NoError(t, err) // L1 info deposit + 1 user deposit = 2 (no batch txs) - require.Len(t, result.Attributes.Transactions, 2) + require.Len(t, attrs.Transactions, 2) }) t.Run("empty batch same epoch", func(t *testing.T) { @@ -150,7 +142,7 @@ func TestBuildAttributes_EmptyBatch(t *testing.T) { l1Num := bigs.Uint64Strict(l1Block.Header.Number) l1Hash := l1Block.Header.Hash() - batch := &derive.SingularBatch{ + batch := &opderive.SingularBatch{ ParentHash: common.HexToHash("0xdddd"), EpochNum: rollup.Epoch(l1Num), EpochHash: l1Hash, @@ -165,11 +157,11 @@ func TestBuildAttributes_EmptyBatch(t *testing.T) { SequenceNumber: 1, } - result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) + attrs, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) require.NoError(t, err) // Only L1 info deposit, no user deposits, no batch txs - require.Len(t, result.Attributes.Transactions, 1) + require.Len(t, attrs.Transactions, 1) }) } @@ -182,7 +174,7 @@ func TestBuildAttributes_HoloceneFields(t *testing.T) { l1Num := bigs.Uint64Strict(l1Block.Header.Number) l1Hash := l1Block.Header.Hash() - batch := &derive.SingularBatch{ + batch := &opderive.SingularBatch{ ParentHash: common.HexToHash("0xeeee"), EpochNum: rollup.Epoch(l1Num), EpochHash: l1Hash, @@ -196,12 +188,12 @@ func TestBuildAttributes_HoloceneFields(t *testing.T) { SequenceNumber: 0, } - result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) + attrs, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) require.NoError(t, err) - require.NotNil(t, result.Attributes.EIP1559Params) - require.Equal(t, sysConfig.EIP1559Params, *result.Attributes.EIP1559Params) - require.NotNil(t, result.Attributes.ParentBeaconBlockRoot) - require.NotNil(t, result.Attributes.Withdrawals) + require.NotNil(t, attrs.EIP1559Params) + require.Equal(t, sysConfig.EIP1559Params, *attrs.EIP1559Params) + require.NotNil(t, attrs.ParentBeaconBlockRoot) + require.NotNil(t, attrs.Withdrawals) } func TestBuildAttributes_SequenceNumber(t *testing.T) { @@ -212,7 +204,7 @@ func TestBuildAttributes_SequenceNumber(t *testing.T) { l1Hash := l1Block.Header.Hash() t.Run("epoch start resets to zero", func(t *testing.T) { - batch := &derive.SingularBatch{ + batch := &opderive.SingularBatch{ ParentHash: common.HexToHash("0x1111"), EpochNum: rollup.Epoch(l1Num), EpochHash: l1Hash, @@ -226,13 +218,13 @@ func TestBuildAttributes_SequenceNumber(t *testing.T) { SequenceNumber: 5, } - result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) + attrs, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) require.NoError(t, err) - require.NotNil(t, result) + require.NotNil(t, attrs) }) t.Run("same epoch increments", func(t *testing.T) { - batch := &derive.SingularBatch{ + batch := &opderive.SingularBatch{ ParentHash: common.HexToHash("0x2222"), EpochNum: rollup.Epoch(l1Num), EpochHash: l1Hash, @@ -246,8 +238,8 @@ func TestBuildAttributes_SequenceNumber(t *testing.T) { SequenceNumber: 5, } - result, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) + attrs, err := buildAttributes(batch, l1Block, cursor, sysConfig, cfg, testL1ChainConfig()) require.NoError(t, err) - require.NotNil(t, result) + require.NotNil(t, attrs) }) } diff --git a/op-core/derive/batches.go b/op-core/derive/batches.go new file mode 100644 index 0000000000000..e83c4c68d5982 --- /dev/null +++ b/op-core/derive/batches.go @@ -0,0 +1,119 @@ +package derive + +import ( + "context" + "io" + + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// decodeBatches reads all batches from a completed channel's compressed data +// and returns them as singular batches. Span batches are expanded into +// individual singular batches using the provided L1 origins and cursor. +// +// Decode errors are logged and cause the function to return whatever batches +// were successfully decoded so far. Only programming errors (bugs) would +// warrant propagating errors upward; all data-dependent failures are treated +// as bad input. +// +// Span batch prefix validation is delegated to opderive.CheckSpanBatchPrefix, +// which rejects overlapping span batches under Karst. If the prefix check +// returns BatchPast, the span batch is skipped. Any other non-Accept result +// causes the function to return the batches collected so far. +func decodeBatches( + lgr log.Logger, + r io.Reader, + cfg *rollup.Config, + l1Origins []eth.L1BlockRef, + cursor l2Cursor, + l1InclusionBlock eth.L1BlockRef, +) []*opderive.SingularBatch { + spec := rollup.NewChainSpec(cfg) + maxRLP := spec.MaxRLPBytesPerChannel(cursor.Timestamp) + + readBatch, err := opderive.BatchReader(r, maxRLP, true) // Fjord always active (implied by Karst) + if err != nil { + lgr.Warn("failed to create batch reader", "err", err) + return nil + } + + var batches []*opderive.SingularBatch + for { + batchData, err := readBatch() + if err == io.EOF { + break + } else if err != nil { + lgr.Warn("failed to read batch", "err", err) + return batches + } + + switch batchData.GetBatchType() { + case opderive.SingularBatchType: + singular, err := opderive.GetSingularBatch(batchData) + if err != nil { + lgr.Warn("failed to extract singular batch", "err", err) + return batches + } + batches = append(batches, singular) + + case opderive.SpanBatchType: + spanBatch, err := opderive.DeriveSpanBatch( + batchData, + cfg.BlockTime, + cfg.Genesis.L2Time, + cfg.L2ChainID, + ) + if err != nil { + lgr.Warn("failed to derive span batch", "err", err) + return batches + } + + l2SafeHead := eth.L2BlockRef{ + Number: cursor.Number, + Time: cursor.Timestamp, + L1Origin: cursor.L1Origin, + SequenceNumber: cursor.SequenceNumber, + } + + // Build l1Blocks slice starting from the cursor's epoch, as + // CheckSpanBatchPrefix expects l1Blocks[0] to be the current epoch. + var l1Blocks []eth.L1BlockRef + for _, ref := range l1Origins { + if ref.Number >= cursor.L1Origin.Number { + l1Blocks = append(l1Blocks, ref) + } + } + + validity, _ := opderive.CheckSpanBatchPrefix( + context.Background(), cfg, + lgr, + l1Blocks, l2SafeHead, spanBatch, l1InclusionBlock, nil, + ) + if validity == opderive.BatchPast { + lgr.Debug("span batch is past safe head, skipping") + continue + } + if validity != opderive.BatchAccept { + lgr.Warn("span batch prefix check failed", "validity", validity) + return batches + } + + singular, err := spanBatch.GetSingularBatches(l1Origins, l2SafeHead) + if err != nil { + lgr.Warn("failed to expand span batch", "err", err) + return batches + } + batches = append(batches, singular...) + + default: + lgr.Warn("unknown batch type", "type", batchData.GetBatchType()) + return batches + } + } + + return batches +} diff --git a/op-core/derive/batches_test.go b/op-core/derive/batches_test.go new file mode 100644 index 0000000000000..0eb5849c94c06 --- /dev/null +++ b/op-core/derive/batches_test.go @@ -0,0 +1,39 @@ +package derive + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +func TestDecodeBatches_SingularBatch(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + l1Ref := testL1Ref(1) + + batch := &opderive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Ref.Number), + EpochHash: l1Ref.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + } + + channelData := encodeBatchToChannelData(t, batch) + + cursor := newCursor(safeHead) + l1Origins := []eth.L1BlockRef{testL1Ref(0), l1Ref} + + batches := decodeBatches(testLogger, bytes.NewReader(channelData), cfg, l1Origins, cursor, l1Ref) + require.Len(t, batches, 1) + + decoded := batches[0] + require.Equal(t, batch.ParentHash, decoded.ParentHash) + require.Equal(t, batch.EpochNum, decoded.EpochNum) + require.Equal(t, batch.EpochHash, decoded.EpochHash) + require.Equal(t, batch.Timestamp, decoded.Timestamp) +} diff --git a/op-core/pure/channels.go b/op-core/derive/channels.go similarity index 78% rename from op-core/pure/channels.go rename to op-core/derive/channels.go index ea1b8d9e6edff..003f13799710e 100644 --- a/op-core/pure/channels.go +++ b/op-core/derive/channels.go @@ -1,15 +1,15 @@ -package pure +package derive import ( - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/eth" ) // readyChannel is a completed channel ready for batch decoding. type readyChannel struct { - id derive.ChannelID + id opderive.ChannelID openBlock eth.L1BlockRef - channel *derive.Channel + channel *opderive.Channel } // channelAssembler implements Holocene single-channel strict-order assembly. @@ -22,8 +22,8 @@ type readyChannel struct { // streaming pipeline. Our push-based model feeds frames directly, making a // simpler implementation appropriate. type channelAssembler struct { - current *derive.Channel - currentID derive.ChannelID + current *opderive.Channel + currentID opderive.ChannelID openBlock eth.L1BlockRef nextFrame uint16 } @@ -33,9 +33,9 @@ func newChannelAssembler() *channelAssembler { } // addFrame processes a single frame. Returns a readyChannel if the channel is complete. -func (ca *channelAssembler) addFrame(frame derive.Frame, l1Ref eth.L1BlockRef) *readyChannel { +func (ca *channelAssembler) addFrame(frame opderive.Frame, l1Ref eth.L1BlockRef) *readyChannel { if ca.current == nil || frame.ID != ca.currentID { - ca.current = derive.NewChannel(frame.ID, l1Ref, true) + ca.current = opderive.NewChannel(frame.ID, l1Ref, true) ca.currentID = frame.ID ca.openBlock = l1Ref ca.nextFrame = 0 @@ -62,6 +62,12 @@ func (ca *channelAssembler) addFrame(frame derive.Frame, l1Ref eth.L1BlockRef) * return nil } +// reset clears the assembler's in-progress channel state. +func (ca *channelAssembler) reset() { + ca.current = nil + ca.nextFrame = 0 +} + // checkTimeout returns true and discards the current channel if it has timed out. func (ca *channelAssembler) checkTimeout(current eth.L1BlockRef, channelTimeout uint64) bool { if ca.current == nil { diff --git a/op-core/pure/channels_test.go b/op-core/derive/channels_test.go similarity index 87% rename from op-core/pure/channels_test.go rename to op-core/derive/channels_test.go index 054f0bcb289c5..8b983fbda088e 100644 --- a/op-core/pure/channels_test.go +++ b/op-core/derive/channels_test.go @@ -1,14 +1,14 @@ -package pure +package derive import ( "testing" - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/stretchr/testify/require" ) -func testChannelID(b byte) derive.ChannelID { - var id derive.ChannelID +func testChannelID(b byte) opderive.ChannelID { + var id opderive.ChannelID id[0] = b return id } @@ -17,7 +17,7 @@ func TestChannelAssembler_SingleFrameChannel(t *testing.T) { ca := newChannelAssembler() l1 := testL1Ref(1) - ready := ca.addFrame(derive.Frame{ + ready := ca.addFrame(opderive.Frame{ ID: testChannelID(0xAA), FrameNumber: 0, Data: []byte("hello"), @@ -35,7 +35,7 @@ func TestChannelAssembler_MultiFrameChannel(t *testing.T) { chID := testChannelID(0xBB) l1 := testL1Ref(1) - ready := ca.addFrame(derive.Frame{ + ready := ca.addFrame(opderive.Frame{ ID: chID, FrameNumber: 0, Data: []byte("part1"), @@ -44,7 +44,7 @@ func TestChannelAssembler_MultiFrameChannel(t *testing.T) { require.Nil(t, ready, "channel should not be ready after first frame") l1b := testL1Ref(2) - ready = ca.addFrame(derive.Frame{ + ready = ca.addFrame(opderive.Frame{ ID: chID, FrameNumber: 1, Data: []byte("part2"), @@ -63,7 +63,7 @@ func TestChannelAssembler_NewChannelDiscardsOld(t *testing.T) { chB := testChannelID(0xBB) l1 := testL1Ref(1) - ready := ca.addFrame(derive.Frame{ + ready := ca.addFrame(opderive.Frame{ ID: chA, FrameNumber: 0, Data: []byte("A-frame0"), @@ -73,7 +73,7 @@ func TestChannelAssembler_NewChannelDiscardsOld(t *testing.T) { require.Equal(t, chA, ca.currentID) l1b := testL1Ref(2) - ready = ca.addFrame(derive.Frame{ + ready = ca.addFrame(opderive.Frame{ ID: chB, FrameNumber: 0, Data: []byte("B-frame0"), @@ -90,7 +90,7 @@ func TestChannelAssembler_Timeout(t *testing.T) { chID := testChannelID(0xCC) l1Open := testL1Ref(10) - ca.addFrame(derive.Frame{ + ca.addFrame(opderive.Frame{ ID: chID, FrameNumber: 0, Data: []byte("data"), @@ -116,7 +116,7 @@ func TestChannelAssembler_OutOfOrderFrame(t *testing.T) { chID := testChannelID(0xDD) l1 := testL1Ref(1) - ready := ca.addFrame(derive.Frame{ + ready := ca.addFrame(opderive.Frame{ ID: chID, FrameNumber: 0, Data: []byte("frame0"), @@ -124,7 +124,7 @@ func TestChannelAssembler_OutOfOrderFrame(t *testing.T) { }, l1) require.Nil(t, ready) - ready = ca.addFrame(derive.Frame{ + ready = ca.addFrame(opderive.Frame{ ID: chID, FrameNumber: 2, // skip frame 1 Data: []byte("frame2"), @@ -135,7 +135,7 @@ func TestChannelAssembler_OutOfOrderFrame(t *testing.T) { require.NotNil(t, ca.current, "channel should still be in progress") require.Equal(t, uint16(1), ca.nextFrame, "nextFrame should still expect frame 1") - ready = ca.addFrame(derive.Frame{ + ready = ca.addFrame(opderive.Frame{ ID: chID, FrameNumber: 1, Data: []byte("frame1"), diff --git a/op-core/derive/deriver.go b/op-core/derive/deriver.go new file mode 100644 index 0000000000000..735def7a83ea8 --- /dev/null +++ b/op-core/derive/deriver.go @@ -0,0 +1,298 @@ +package derive + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// Deriver is an iterator that produces one payload attributes at a time from +// incrementally-added L1 blocks. It replaces the batch-mode PureDerive function +// with an API that matches how derivation works in practice: derive one block, +// execute on engine, verify, then derive the next. +type Deriver struct { + cfg *rollup.Config + l1ChainConfig *params.ChainConfig + lgr log.Logger + spec *rollup.ChainSpec + + // L1 data — appended via AddL1Block + l1Blocks []L1Input + l1Origins []eth.L1BlockRef + firstL1Num uint64 + + // System config — evolves with config logs + sysConfig eth.SystemConfig + + // L1 processing position — next L1 block index to scan for frames + l1Pos int + + // Channel assembly + assembler *channelAssembler + + // Batch buffer from completed channels + pendingBatches []*opderive.SingularBatch + batchInclusionBlock eth.L1BlockRef + + // Derivation cursor + cursor l2Cursor +} + +// NewDeriver creates a new iterator-style deriver starting from the given safe +// head. The caller must then call AddL1Block to provide L1 data and Next to +// consume derived payload attributes. +func NewDeriver( + cfg *rollup.Config, + l1ChainConfig *params.ChainConfig, + lgr log.Logger, + safeHead eth.L2BlockRef, + sysConfig eth.SystemConfig, +) (*Deriver, error) { + if !cfg.IsKarst(safeHead.Time) { + return nil, fmt.Errorf("derivation requires Karst fork (no overlapping span batches), safe head time %d is pre-Karst", safeHead.Time) + } + + return &Deriver{ + cfg: cfg, + l1ChainConfig: l1ChainConfig, + lgr: lgr, + spec: rollup.NewChainSpec(cfg), + sysConfig: sysConfig, + assembler: newChannelAssembler(), + cursor: newCursor(safeHead), + }, nil +} + +// AddL1Block appends one or more L1 blocks. Blocks must be contiguous with +// previously added blocks. Returns ErrReorg if a block's parent hash doesn't +// match the tip of the already-added chain. +func (d *Deriver) AddL1Block(blocks ...L1Input) error { + for i := range blocks { + ref := blocks[i].BlockRef() + + if len(d.l1Origins) > 0 { + tip := d.l1Origins[len(d.l1Origins)-1] + if ref.ParentHash != tip.Hash { + return fmt.Errorf("%w: block %d parent %s != tip %s", ErrReorg, ref.Number, ref.ParentHash, tip.Hash) + } + } + + if len(d.l1Blocks) == 0 { + d.firstL1Num = ref.Number + } + + d.l1Blocks = append(d.l1Blocks, blocks[i]) + d.l1Origins = append(d.l1Origins, ref) + } + return nil +} + +// Next returns the next derived payload attributes and the L1 block they were +// derived from. safeHead provides the current L2 safe head (including Hash) +// for parent hash validation via upstream CheckBatch. +// +// Returns ErrNeedL1Data when more L1 blocks are needed. +func (d *Deriver) Next(safeHead eth.L2BlockRef) (*eth.PayloadAttributes, eth.L1BlockRef, error) { + // Step 1: Try consuming from pending batches first. + if attrs, l1Ref, err := d.tryPendingBatch(safeHead); err != nil { + return nil, eth.L1BlockRef{}, err + } else if attrs != nil { + return attrs, l1Ref, nil + } + + // Step 2: Process more L1 blocks to find new channels/batches. + for d.l1Pos < len(d.l1Blocks) { + if err := d.processNextL1Block(); err != nil { + return nil, eth.L1BlockRef{}, err + } + + // If we got new pending batches, try them. + if attrs, l1Ref, err := d.tryPendingBatch(safeHead); err != nil { + return nil, eth.L1BlockRef{}, err + } else if attrs != nil { + return attrs, l1Ref, nil + } + + // After each L1 block, check if the seq window expired → empty batch. + if attrs, l1Ref, err := d.tryEmptyBatch(safeHead); err != nil { + return nil, eth.L1BlockRef{}, err + } else if attrs != nil { + return attrs, l1Ref, nil + } + } + + // Step 3: Nothing to do — need more L1 data. + return nil, eth.L1BlockRef{}, ErrNeedL1Data +} + +// Reset clears all internal state back to the given safe head + system config. +// Used after L1 reorgs. The caller must re-add L1 blocks from the new chain. +func (d *Deriver) Reset(safeHead eth.L2BlockRef, sysConfig eth.SystemConfig) { + d.l1Blocks = nil + d.l1Origins = nil + d.firstL1Num = 0 + d.l1Pos = 0 + d.sysConfig = sysConfig + d.assembler.reset() + d.pendingBatches = nil + d.batchInclusionBlock = eth.L1BlockRef{} + d.cursor = newCursor(safeHead) +} + +// findL1 does O(1) lookup by block number into the l1Blocks slice. +func (d *Deriver) findL1(number uint64) *L1Input { + if len(d.l1Blocks) == 0 { + return nil + } + idx := int(number - d.firstL1Num) + if idx >= 0 && idx < len(d.l1Blocks) { + return &d.l1Blocks[idx] + } + return nil +} + +// tryPendingBatch validates the next pending batch via CheckBatch and builds +// attributes if accepted. +func (d *Deriver) tryPendingBatch(safeHead eth.L2BlockRef) (*eth.PayloadAttributes, eth.L1BlockRef, error) { + for len(d.pendingBatches) > 0 { + batch := d.pendingBatches[0] + + // Build l1Blocks slice for CheckBatch: must start at safeHead.L1Origin. + startIdx := int(safeHead.L1Origin.Number - d.firstL1Num) + if startIdx < 0 || startIdx >= len(d.l1Origins) { + return nil, eth.L1BlockRef{}, ErrNeedL1Data + } + l1BlocksForCheck := d.l1Origins[startIdx:] + + batchWithInclusion := &opderive.BatchWithL1InclusionBlock{ + Batch: batch, + L1InclusionBlock: d.batchInclusionBlock, + } + + validity := opderive.CheckBatch( + context.Background(), d.cfg, d.lgr, + l1BlocksForCheck, safeHead, batchWithInclusion, nil, + ) + + switch validity { + case opderive.BatchAccept: + d.pendingBatches = d.pendingBatches[1:] + + epochL1 := d.findL1(uint64(batch.EpochNum)) + if epochL1 == nil { + return nil, eth.L1BlockRef{}, fmt.Errorf("missing L1 block %d for batch epoch", batch.EpochNum) + } + + attrs, err := buildAttributes(batch, epochL1, d.cursor, d.sysConfig, d.cfg, d.l1ChainConfig) + if err != nil { + return nil, eth.L1BlockRef{}, fmt.Errorf("building attributes: %w", err) + } + + epochID := eth.BlockID{Number: uint64(batch.EpochNum), Hash: batch.EpochHash} + var seqNum uint64 + if epochID.Number != d.cursor.L1Origin.Number { + seqNum = 0 + } else { + seqNum = d.cursor.SequenceNumber + 1 + } + d.cursor.advance(batch.Timestamp, epochID, seqNum) + + return attrs, d.batchInclusionBlock, nil + + case opderive.BatchPast: + d.pendingBatches = d.pendingBatches[1:] + continue + + case opderive.BatchUndecided: + return nil, eth.L1BlockRef{}, ErrNeedL1Data + + default: // BatchDrop, BatchFuture, etc. + d.lgr.Warn("invalid batch, discarding remaining channel batches", + "timestamp", batch.Timestamp, "epoch", batch.EpochNum, "validity", validity) + d.pendingBatches = nil + return nil, eth.L1BlockRef{}, nil + } + } + return nil, eth.L1BlockRef{}, nil +} + +// processNextL1Block processes the L1 block at l1Pos: applies config logs, +// checks channel timeout, parses frames, and decodes any completed channels +// into pendingBatches. +func (d *Deriver) processNextL1Block() error { + l1 := d.l1Blocks[d.l1Pos] + l1Ref := d.l1Origins[d.l1Pos] + d.l1Pos++ + + for _, configLog := range l1.ConfigLogs { + if err := opderive.ProcessSystemConfigUpdateLogEvent(&d.sysConfig, configLog, d.cfg, l1.Header.Time); err != nil { + return fmt.Errorf("processing system config update at L1 block %d: %w", l1Ref.Number, err) + } + } + + d.assembler.checkTimeout(l1Ref, d.spec.ChannelTimeout(l1Ref.Time)) + + for _, txData := range l1.BatcherData { + frames, err := opderive.ParseFrames(txData) + if err != nil { + d.lgr.Warn("failed to parse frames", "l1_block", l1Ref.Number, "err", err) + continue + } + + for _, frame := range frames { + ready := d.assembler.addFrame(frame, l1Ref) + if ready == nil { + continue + } + + d.lgr.Debug("channel ready", "channel", ready.id, "l1_block", l1Ref.Number) + + batches := decodeBatches(d.lgr, ready.channel.Reader(), d.cfg, d.l1Origins, d.cursor, l1Ref) + if len(batches) > 0 { + d.pendingBatches = batches + d.batchInclusionBlock = l1Ref + } + } + } + + return nil +} + +// tryEmptyBatch generates an empty batch if the sequencing window has expired +// for the most recently processed L1 block. +func (d *Deriver) tryEmptyBatch(safeHead eth.L2BlockRef) (*eth.PayloadAttributes, eth.L1BlockRef, error) { + if d.l1Pos == 0 { + return nil, eth.L1BlockRef{}, nil + } + currentL1 := d.l1Origins[d.l1Pos-1] + + if !d.cursor.needsEmptyBatch(currentL1, d.cfg) { + return nil, eth.L1BlockRef{}, nil + } + + batch, epochL1, newOrigin := makeEmptyBatch(d.cursor, d.findL1, d.cfg) + if batch == nil { + return nil, eth.L1BlockRef{}, nil + } + + attrs, err := buildAttributes(batch, epochL1, d.cursor, d.sysConfig, d.cfg, d.l1ChainConfig) + if err != nil { + return nil, eth.L1BlockRef{}, fmt.Errorf("building empty batch attributes: %w", err) + } + + var seqNum uint64 + if newOrigin.Number != d.cursor.L1Origin.Number { + seqNum = 0 + } else { + seqNum = d.cursor.SequenceNumber + 1 + } + d.cursor.advance(batch.Timestamp, newOrigin, seqNum) + + return attrs, currentL1, nil +} diff --git a/op-core/derive/deriver_test.go b/op-core/derive/deriver_test.go new file mode 100644 index 0000000000000..ea62ef387af77 --- /dev/null +++ b/op-core/derive/deriver_test.go @@ -0,0 +1,408 @@ +package derive + +import ( + "bytes" + "errors" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +var testLogger = log.NewLogger(log.DiscardHandler()) + +// addBatchToL1 adds a batcher transaction containing the given singular batch +// to a pre-existing L1Input. +func addBatchToL1(t *testing.T, l1 *L1Input, batch *opderive.SingularBatch) { + t.Helper() + channelData := encodeBatchToChannelData(t, batch) + var chID opderive.ChannelID + copy(chID[:], l1.Header.Hash().Bytes()) + l1.BatcherData = [][]byte{wrapInFrames(channelData, chID)} +} + +func TestDeriver_SingleBatch(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + chain := makeTestL1Chain(2) + l1Ref1 := chain[1].BlockRef() + addBatchToL1(t, chain[1], &opderive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Ref1.Number), + EpochHash: l1Ref1.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + }) + + require.NoError(t, d.AddL1Block(*chain[0], *chain[1])) + + attrs, l1Ref, err := d.Next(safeHead) + require.NoError(t, err) + require.NotNil(t, attrs) + require.Equal(t, hexutil.Uint64(safeHead.Time+cfg.BlockTime), attrs.Timestamp) + require.True(t, attrs.NoTxPool) + require.Equal(t, chain[1].BlockRef(), l1Ref) + + // No more to opderive. + _, _, err = d.Next(safeHead) + require.True(t, errors.Is(err, ErrNeedL1Data)) +} + +func TestDeriver_NeedL1Data(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + _, _, err = d.Next(safeHead) + require.True(t, errors.Is(err, ErrNeedL1Data)) +} + +func TestDeriver_IncrementalL1(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + chain := makeTestL1Chain(2) + require.NoError(t, d.AddL1Block(*chain[0])) + + // No batches in block 0, need more data. + _, _, err = d.Next(safeHead) + require.True(t, errors.Is(err, ErrNeedL1Data)) + + // Add block 1 with a batch. + l1Ref1 := chain[1].BlockRef() + addBatchToL1(t, chain[1], &opderive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Ref1.Number), + EpochHash: l1Ref1.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + }) + require.NoError(t, d.AddL1Block(*chain[1])) + + attrs, _, err := d.Next(safeHead) + require.NoError(t, err) + require.NotNil(t, attrs) + require.Equal(t, hexutil.Uint64(safeHead.Time+cfg.BlockTime), attrs.Timestamp) +} + +func TestDeriver_EmptyBatches(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + // Add SeqWindowSize + 2 L1 blocks with no batcher data. + numBlocks := cfg.SeqWindowSize + 2 + chain := makeTestL1Chain(numBlocks) + for _, block := range chain { + require.NoError(t, d.AddL1Block(*block)) + } + + // Should generate empty batches when the sequencing window expires. + currentSafeHead := safeHead + var derived []*eth.PayloadAttributes + for { + attrs, _, err := d.Next(currentSafeHead) + if errors.Is(err, ErrNeedL1Data) { + break + } + require.NoError(t, err) + require.NotNil(t, attrs) + derived = append(derived, attrs) + + // Advance the safe head for the next call. + currentSafeHead = eth.L2BlockRef{ + Hash: common.Hash{byte(len(derived))}, + Number: currentSafeHead.Number + 1, + Time: uint64(attrs.Timestamp), + L1Origin: currentSafeHead.L1Origin, + SequenceNumber: currentSafeHead.SequenceNumber + 1, + } + } + + require.Greater(t, len(derived), 0, "empty batches should be generated when sequencer window expires") + + expectedTimestamp := safeHead.Time + cfg.BlockTime + for _, attrs := range derived { + require.Equal(t, hexutil.Uint64(expectedTimestamp), attrs.Timestamp) + expectedTimestamp += cfg.BlockTime + } +} + +func TestDeriver_ReorgDetection(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + chain := makeTestL1Chain(1) + require.NoError(t, d.AddL1Block(*chain[0])) + + // Create a block that doesn't chain to block 0. + reorgedBlock := makeTestL1Input(1) + reorgedBlock.Header.ParentHash = common.HexToHash("0xbadparent") + + err = d.AddL1Block(*reorgedBlock) + require.True(t, errors.Is(err, ErrReorg)) +} + +func TestDeriver_ReorgReset(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + chain := makeTestL1Chain(2) + l1Ref1 := chain[1].BlockRef() + addBatchToL1(t, chain[1], &opderive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Ref1.Number), + EpochHash: l1Ref1.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + }) + require.NoError(t, d.AddL1Block(*chain[0], *chain[1])) + + // Derive the first block. + attrs, _, err := d.Next(safeHead) + require.NoError(t, err) + require.NotNil(t, attrs) + + // Now reset (simulating reorg). + d.Reset(safeHead, sysConfig) + + // Need L1 data again. + _, _, err = d.Next(safeHead) + require.True(t, errors.Is(err, ErrNeedL1Data)) + + // Re-add blocks, can derive again. + require.NoError(t, d.AddL1Block(*chain[0], *chain[1])) + attrs, _, err = d.Next(safeHead) + require.NoError(t, err) + require.NotNil(t, attrs) +} + +func TestDeriver_ChannelTimeout(t *testing.T) { + cfg := testRollupConfig() + cfg.ChannelTimeoutBedrock = 2 + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + chain := makeTestL1Chain(5) + l1Block0Ref := chain[0].BlockRef() + + // Incomplete channel at L1 block 1. + incompleteChID := testChannelID(0xAA) + channelData := encodeBatchToChannelData(t, &opderive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Block0Ref.Number), + EpochHash: l1Block0Ref.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + }) + frame0 := opderive.Frame{ + ID: incompleteChID, + FrameNumber: 0, + Data: channelData, + IsLast: false, + } + var buf bytes.Buffer + buf.WriteByte(params.DerivationVersion0) + require.NoError(t, frame0.MarshalBinary(&buf)) + chain[1].BatcherData = [][]byte{buf.Bytes()} + + // Complete channel at L1 block 4 (after timeout: 4 > 1 + 2). + completeChID := testChannelID(0xBB) + completeBatch := &opderive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Block0Ref.Number), + EpochHash: l1Block0Ref.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + } + completeChannelData := encodeBatchToChannelData(t, completeBatch) + completeTx := wrapInFrames(completeChannelData, completeChID) + chain[4].BatcherData = [][]byte{completeTx} + + for _, block := range chain { + require.NoError(t, d.AddL1Block(*block)) + } + + attrs, _, err := d.Next(safeHead) + require.NoError(t, err) + require.NotNil(t, attrs) + require.Equal(t, hexutil.Uint64(safeHead.Time+cfg.BlockTime), attrs.Timestamp) +} + +func TestDeriver_InvalidBatchDropped(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + chain := makeTestL1Chain(2) + l1Ref1 := chain[1].BlockRef() + + // Batch with wrong timestamp. + addBatchToL1(t, chain[1], &opderive.SingularBatch{ + ParentHash: safeHead.Hash, + EpochNum: rollup.Epoch(l1Ref1.Number), + EpochHash: l1Ref1.Hash, + Timestamp: safeHead.Time + cfg.BlockTime + 999, + }) + + require.NoError(t, d.AddL1Block(*chain[0], *chain[1])) + + // The invalid batch should be dropped, returning ErrNeedL1Data. + _, _, err = d.Next(safeHead) + require.True(t, errors.Is(err, ErrNeedL1Data)) +} + +func TestDeriver_ParentHashCheck(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + chain := makeTestL1Chain(2) + l1Ref1 := chain[1].BlockRef() + + // Create a batch with WRONG parent hash. + addBatchToL1(t, chain[1], &opderive.SingularBatch{ + ParentHash: common.HexToHash("0xwrongparent"), + EpochNum: rollup.Epoch(l1Ref1.Number), + EpochHash: l1Ref1.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + }) + + require.NoError(t, d.AddL1Block(*chain[0], *chain[1])) + + // CheckBatch will reject this because ParentHash != safeHead.Hash. + _, _, err = d.Next(safeHead) + require.True(t, errors.Is(err, ErrNeedL1Data)) +} + +func TestDeriver_RejectsPreKarst(t *testing.T) { + cfg := testRollupConfig() + cfg.KarstTime = nil + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + _, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.Error(t, err) + require.Contains(t, err.Error(), "Karst fork") +} + +func TestDeriver_MultipleChannelsAndEpochs(t *testing.T) { + cfg := testRollupConfig() + safeHead := testSafeHead(cfg) + sysConfig := testSystemConfig() + + d, err := NewDeriver(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig) + require.NoError(t, err) + + chain := makeTestL1Chain(4) + + // Parent hashes must match the safe head hashes we'll pass to Next(). + // We use a deterministic scheme: genesis hash, then Hash{1}, Hash{2}, etc. + l2Hashes := []common.Hash{ + safeHead.Hash, + {1}, + {2}, + {3}, + } + + // Block 1: batch for epoch 1 + l1Ref1 := chain[1].BlockRef() + addBatchToL1(t, chain[1], &opderive.SingularBatch{ + ParentHash: l2Hashes[0], + EpochNum: rollup.Epoch(l1Ref1.Number), + EpochHash: l1Ref1.Hash, + Timestamp: safeHead.Time + cfg.BlockTime, + }) + + // Block 2: batch for epoch 2 + l1Ref2 := chain[2].BlockRef() + chData2 := encodeBatchToChannelData(t, &opderive.SingularBatch{ + ParentHash: l2Hashes[1], + EpochNum: rollup.Epoch(l1Ref2.Number), + EpochHash: l1Ref2.Hash, + Timestamp: safeHead.Time + 2*cfg.BlockTime, + }) + var chID2 opderive.ChannelID + chID2[0] = 0x02 + chain[2].BatcherData = [][]byte{wrapInFrames(chData2, chID2)} + + // Block 3: batch for epoch 3 + l1Ref3 := chain[3].BlockRef() + chData3 := encodeBatchToChannelData(t, &opderive.SingularBatch{ + ParentHash: l2Hashes[2], + EpochNum: rollup.Epoch(l1Ref3.Number), + EpochHash: l1Ref3.Hash, + Timestamp: safeHead.Time + 3*cfg.BlockTime, + }) + var chID3 opderive.ChannelID + chID3[0] = 0x03 + chain[3].BatcherData = [][]byte{wrapInFrames(chData3, chID3)} + + for _, block := range chain { + require.NoError(t, d.AddL1Block(*block)) + } + + var derived []*eth.PayloadAttributes + currentSafeHead := safeHead + for { + attrs, _, err := d.Next(currentSafeHead) + if errors.Is(err, ErrNeedL1Data) { + break + } + require.NoError(t, err) + require.NotNil(t, attrs) + derived = append(derived, attrs) + + epochIdx := len(derived) // epoch advances 1:1 with derived blocks here + currentSafeHead = eth.L2BlockRef{ + Hash: l2Hashes[epochIdx], + Number: currentSafeHead.Number + 1, + Time: uint64(attrs.Timestamp), + L1Origin: chain[epochIdx].BlockRef().ID(), + SequenceNumber: 0, + } + } + + require.Greater(t, len(derived), 1, "should derive multiple blocks from multiple epochs") + + expectedTimestamp := safeHead.Time + cfg.BlockTime + for i, attrs := range derived { + require.Equal(t, hexutil.Uint64(expectedTimestamp), attrs.Timestamp, + "block %d should have timestamp %d", i, expectedTimestamp) + expectedTimestamp += cfg.BlockTime + } +} diff --git a/op-core/derive/empty_batch.go b/op-core/derive/empty_batch.go new file mode 100644 index 0000000000000..9942a40c7f0a1 --- /dev/null +++ b/op-core/derive/empty_batch.go @@ -0,0 +1,39 @@ +package derive + +import ( + "github.com/ethereum-optimism/optimism/op-node/rollup" + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// makeEmptyBatch generates a single empty batch when the sequencing window has +// expired. It returns the batch, the L1 input for the batch's epoch (for +// buildAttributes), and the new L1 origin. Returns nil if no empty batch is needed. +// +// The epoch advances when the next L2 timestamp >= the next L1 block's timestamp. +func makeEmptyBatch( + cursor l2Cursor, + findL1 func(uint64) *L1Input, + cfg *rollup.Config, +) (*opderive.SingularBatch, *L1Input, eth.BlockID) { + nextTimestamp := cursor.Timestamp + cfg.BlockTime + newOrigin := cursor.L1Origin + epochL1 := findL1(cursor.L1Origin.Number) + if epochL1 == nil { + return nil, nil, eth.BlockID{} + } + + nextL1 := findL1(cursor.L1Origin.Number + 1) + if nextL1 != nil && nextTimestamp >= nextL1.Header.Time { + newOrigin = nextL1.BlockID() + epochL1 = nextL1 + } + + batch := &opderive.SingularBatch{ + EpochNum: rollup.Epoch(newOrigin.Number), + EpochHash: newOrigin.Hash, + Timestamp: nextTimestamp, + } + + return batch, epochL1, newOrigin +} diff --git a/op-core/derive/empty_batch_test.go b/op-core/derive/empty_batch_test.go new file mode 100644 index 0000000000000..f1147ef7354c9 --- /dev/null +++ b/op-core/derive/empty_batch_test.go @@ -0,0 +1,82 @@ +package derive + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/stretchr/testify/require" +) + +func TestMakeEmptyBatch_SameEpoch(t *testing.T) { + cfg := testRollupConfig() + + // Cursor at L1 origin 5, next L1 block at num 6 has time 12. + // Next L2 timestamp = 100 + 2 = 102, which is < 12... wait, our test + // L1 blocks have Time = num*2, so L1#6 has time 12. + // With cursor.Timestamp = 100, nextTimestamp = 102 which is >= 12, + // so it would advance. Let's set up so it stays. + cursor := l2Cursor{ + Number: 10, + Timestamp: 4, // nextTimestamp = 6 + L1Origin: testL1Ref(5).ID(), + } + + // L1 block 6 has time 12. nextTimestamp = 6 < 12 → stays at same epoch. + findL1 := func(num uint64) *L1Input { + if num <= 6 { + return makeTestL1Input(num) + } + return nil + } + + batch, epochL1, newOrigin := makeEmptyBatch(cursor, findL1, cfg) + require.NotNil(t, batch) + require.NotNil(t, epochL1) + require.Equal(t, uint64(5), newOrigin.Number) + require.Equal(t, uint64(6), batch.Timestamp) + require.Equal(t, cursor.L1Origin.Number, uint64(batch.EpochNum)) +} + +func TestMakeEmptyBatch_AdvancesEpoch(t *testing.T) { + cfg := testRollupConfig() + + // Cursor at L1 origin 5. L1 block 5 has time 10, L1 block 6 has time 12. + // If cursor.Timestamp = 100, nextTimestamp = 102 >= 12 → advances epoch. + cursor := l2Cursor{ + Number: 10, + Timestamp: 100, + L1Origin: testL1Ref(5).ID(), + } + + findL1 := func(num uint64) *L1Input { + if num <= 6 { + return makeTestL1Input(num) + } + return nil + } + + batch, epochL1, newOrigin := makeEmptyBatch(cursor, findL1, cfg) + require.NotNil(t, batch) + require.NotNil(t, epochL1) + require.Equal(t, uint64(6), newOrigin.Number) + require.Equal(t, uint64(102), batch.Timestamp) + require.Equal(t, uint64(6), uint64(batch.EpochNum)) +} + +func TestMakeEmptyBatch_MissingL1(t *testing.T) { + cfg := testRollupConfig() + + cursor := l2Cursor{ + Number: 10, + Timestamp: 100, + L1Origin: eth.BlockID{Number: 99}, + } + + findL1 := func(num uint64) *L1Input { + return nil + } + + batch, epochL1, _ := makeEmptyBatch(cursor, findL1, cfg) + require.Nil(t, batch) + require.Nil(t, epochL1) +} diff --git a/op-core/pure/helpers_test.go b/op-core/derive/helpers_test.go similarity index 87% rename from op-core/pure/helpers_test.go rename to op-core/derive/helpers_test.go index 30cefcbb4c476..8f7f423480364 100644 --- a/op-core/pure/helpers_test.go +++ b/op-core/derive/helpers_test.go @@ -1,4 +1,4 @@ -package pure +package derive import ( "bytes" @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + opderive "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -110,12 +110,27 @@ func testL1Ref(num uint64) eth.L1BlockRef { return input.BlockRef() } +// makeTestL1Chain creates a sequence of properly chained L1 inputs where each +// block's ParentHash matches the previous block's actual hash. This is required +// for AddL1Block's reorg detection. +func makeTestL1Chain(count uint64) []*L1Input { + chain := make([]*L1Input, count) + for i := uint64(0); i < count; i++ { + input := makeTestL1Input(i) + if i > 0 { + input.Header.ParentHash = chain[i-1].Header.Hash() + } + chain[i] = input + } + return chain +} + // encodeBatchToChannelData RLP-encodes a singular batch and zlib-compresses // it into channel data (the format read by the channel reader stage). -func encodeBatchToChannelData(t *testing.T, batch *derive.SingularBatch) []byte { +func encodeBatchToChannelData(t *testing.T, batch *opderive.SingularBatch) []byte { t.Helper() - bd := derive.NewBatchData(batch) + bd := opderive.NewBatchData(batch) batchBytes, err := bd.MarshalBinary() if err != nil { t.Fatalf("marshal batch: %v", err) @@ -142,8 +157,8 @@ func encodeBatchToChannelData(t *testing.T, batch *derive.SingularBatch) []byte // wrapInFrames wraps channel data in a single-frame batcher transaction. // The result is a raw batcher tx data payload (DerivationVersion0 prefix + frame). -func wrapInFrames(channelData []byte, channelID derive.ChannelID) []byte { - frame := derive.Frame{ +func wrapInFrames(channelData []byte, channelID opderive.ChannelID) []byte { + frame := opderive.Frame{ ID: channelID, FrameNumber: 0, Data: channelData, @@ -188,7 +203,7 @@ func TestHelpers(t *testing.T) { require.NotEmpty(t, l1WithBatch.BatcherData[0]) // Verify the batcher tx can be parsed as frames - frames, err := derive.ParseFrames(l1WithBatch.BatcherData[0]) + frames, err := opderive.ParseFrames(l1WithBatch.BatcherData[0]) require.NoError(t, err) require.Len(t, frames, 1) require.True(t, frames[0].IsLast) @@ -203,7 +218,7 @@ func makeL1WithBatch(t *testing.T, cfg *rollup.Config, l1Num uint64, safeHead et l1 := makeTestL1Input(l1Num) l1Ref := l1.BlockRef() - batch := &derive.SingularBatch{ + batch := &opderive.SingularBatch{ ParentHash: safeHead.Hash, EpochNum: rollup.Epoch(l1Ref.Number), EpochHash: l1Ref.Hash, @@ -212,7 +227,7 @@ func makeL1WithBatch(t *testing.T, cfg *rollup.Config, l1Num uint64, safeHead et channelData := encodeBatchToChannelData(t, batch) - var chID derive.ChannelID + var chID opderive.ChannelID copy(chID[:], common.Hex2Bytes("deadbeefdeadbeefdeadbeefdeadbeef")) batcherTx := wrapInFrames(channelData, chID) diff --git a/op-core/pure/types.go b/op-core/derive/types.go similarity index 85% rename from op-core/pure/types.go rename to op-core/derive/types.go index 34497e0c7f6d0..7e4241fb6e067 100644 --- a/op-core/pure/types.go +++ b/op-core/derive/types.go @@ -1,13 +1,19 @@ -package pure +package derive import ( - "github.com/ethereum/go-ethereum/common" + "errors" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" ) +var ( + ErrNeedL1Data = errors.New("need more L1 data") + ErrReorg = errors.New("L1 reorg detected") +) + // L1Input is a pre-processed L1 block containing only derivation-relevant data. // The caller is responsible for filtering batcher transactions, extracting deposits // from receipts, and extracting system config update logs. @@ -32,13 +38,6 @@ func (l *L1Input) BlockID() eth.BlockID { return eth.HeaderBlockID(l.Header) } -// DerivedBlock is a single derived L2 block -- payload attributes ready for execution. -type DerivedBlock struct { - Attributes *eth.PayloadAttributes - ExpectedParentHash common.Hash // from batch ParentHash field; zero if unavailable - DerivedFrom eth.L1BlockRef -} - // l2Cursor tracks the derivation position without knowing the L2 block hash. type l2Cursor struct { Number uint64 diff --git a/op-core/pure/types_test.go b/op-core/derive/types_test.go similarity index 99% rename from op-core/pure/types_test.go rename to op-core/derive/types_test.go index 9e2a8202d0574..a6d4649296894 100644 --- a/op-core/pure/types_test.go +++ b/op-core/derive/types_test.go @@ -1,4 +1,4 @@ -package pure +package derive import ( "math/big" diff --git a/op-core/pure/batches.go b/op-core/pure/batches.go deleted file mode 100644 index 18cb0c60e0c5f..0000000000000 --- a/op-core/pure/batches.go +++ /dev/null @@ -1,237 +0,0 @@ -package pure - -import ( - "context" - "io" - - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -// decodeBatches reads all batches from a completed channel's compressed data -// and returns them as singular batches. Span batches are expanded into -// individual singular batches using the provided L1 origins and cursor. -// -// Decode errors are logged and cause the function to return whatever batches -// were successfully decoded so far. Only programming errors (bugs) would -// warrant propagating errors upward; all data-dependent failures are treated -// as bad input. -// -// Span batch prefix validation is delegated to derive.CheckSpanBatchPrefix, -// which rejects overlapping span batches under Karst. If the prefix check -// returns BatchPast, the span batch is skipped. Any other non-Accept result -// causes the function to return the batches collected so far. -func decodeBatches( - lgr log.Logger, - r io.Reader, - cfg *rollup.Config, - l1Origins []eth.L1BlockRef, - cursor l2Cursor, - l1InclusionBlock eth.L1BlockRef, -) []*derive.SingularBatch { - spec := rollup.NewChainSpec(cfg) - maxRLP := spec.MaxRLPBytesPerChannel(cursor.Timestamp) - - readBatch, err := derive.BatchReader(r, maxRLP, true) // Fjord always active (implied by Karst) - if err != nil { - lgr.Warn("failed to create batch reader", "err", err) - return nil - } - - var batches []*derive.SingularBatch - for { - batchData, err := readBatch() - if err == io.EOF { - break - } else if err != nil { - lgr.Warn("failed to read batch", "err", err) - return batches - } - - switch batchData.GetBatchType() { - case derive.SingularBatchType: - singular, err := derive.GetSingularBatch(batchData) - if err != nil { - lgr.Warn("failed to extract singular batch", "err", err) - return batches - } - batches = append(batches, singular) - - case derive.SpanBatchType: - spanBatch, err := derive.DeriveSpanBatch( - batchData, - cfg.BlockTime, - cfg.Genesis.L2Time, - cfg.L2ChainID, - ) - if err != nil { - lgr.Warn("failed to derive span batch", "err", err) - return batches - } - - l2SafeHead := eth.L2BlockRef{ - Number: cursor.Number, - Time: cursor.Timestamp, - L1Origin: cursor.L1Origin, - SequenceNumber: cursor.SequenceNumber, - } - - // Build l1Blocks slice starting from the cursor's epoch, as - // CheckSpanBatchPrefix expects l1Blocks[0] to be the current epoch. - var l1Blocks []eth.L1BlockRef - for _, ref := range l1Origins { - if ref.Number >= cursor.L1Origin.Number { - l1Blocks = append(l1Blocks, ref) - } - } - - validity, _ := derive.CheckSpanBatchPrefix( - context.Background(), cfg, - lgr, - l1Blocks, l2SafeHead, spanBatch, l1InclusionBlock, nil, - ) - if validity == derive.BatchPast { - lgr.Debug("span batch is past safe head, skipping") - continue - } - if validity != derive.BatchAccept { - lgr.Warn("span batch prefix check failed", "validity", validity) - return batches - } - - singular, err := spanBatch.GetSingularBatches(l1Origins, l2SafeHead) - if err != nil { - lgr.Warn("failed to expand span batch", "err", err) - return batches - } - batches = append(batches, singular...) - - default: - lgr.Warn("unknown batch type", "type", batchData.GetBatchType()) - return batches - } - } - - return batches -} - -// validateBatch performs batch validation matching the checks in -// op-node/rollup/derive/batches.go (checkSingularBatch), minus checks that -// require L2 state access: -// - Parent hash validation (deferred to post-execution via DerivedBlock.ExpectedParentHash) -// -// Returns BatchAccept, BatchPast, or BatchDrop. With Karst active (implying -// Holocene), past batches return BatchPast instead of BatchDrop. -// -// Overlapping span batches are already rejected in decodeBatches via -// CheckSpanBatchPrefix. -func validateBatch(lgr log.Logger, batch *derive.SingularBatch, cursor l2Cursor, l1Origins []eth.L1BlockRef, cfg *rollup.Config, l1InclusionNum uint64) derive.BatchValidity { - expectedTimestamp := cursor.Timestamp + cfg.BlockTime - - // Holocene (implied by Karst): past batches are BatchPast, future batches are BatchDrop. - if batch.Timestamp > expectedTimestamp { - lgr.Warn("batch timestamp too new", "expected", expectedTimestamp, "got", batch.Timestamp) - return derive.BatchDrop - } - if batch.Timestamp < expectedTimestamp { - lgr.Debug("batch is past safe head", "expected", expectedTimestamp, "got", batch.Timestamp) - return derive.BatchPast - } - - epochNum := uint64(batch.EpochNum) - - // Sequence window: batch must be included within SeqWindowSize of its epoch. - if epochNum+cfg.SeqWindowSize < l1InclusionNum { - lgr.Warn("batch sequence window expired", "epoch", epochNum, "inclusion", l1InclusionNum, "window", cfg.SeqWindowSize) - return derive.BatchDrop - } - - // Epoch must be current or next (cannot skip epochs). - if epochNum < cursor.L1Origin.Number { - lgr.Warn("batch epoch too old", "epoch", epochNum, "cursor_origin", cursor.L1Origin.Number) - return derive.BatchDrop - } - if epochNum > cursor.L1Origin.Number+1 { - lgr.Warn("batch epoch too new", "epoch", epochNum, "cursor_origin", cursor.L1Origin.Number) - return derive.BatchDrop - } - - // Find the batch's L1 origin and verify epoch hash. - var batchOrigin *eth.L1BlockRef - for i := range l1Origins { - if l1Origins[i].Number == epochNum { - batchOrigin = &l1Origins[i] - break - } - } - if batchOrigin == nil { - lgr.Warn("batch epoch L1 origin not found", "epoch", epochNum) - return derive.BatchDrop - } - if batch.EpochHash != batchOrigin.Hash { - lgr.Warn("batch epoch hash mismatch", "epoch", epochNum, "expected", batchOrigin.Hash, "got", batch.EpochHash) - return derive.BatchDrop - } - - // Batch timestamp must be >= L1 origin timestamp. - if batch.Timestamp < batchOrigin.Time { - lgr.Warn("batch timestamp before L1 origin", "batch_time", batch.Timestamp, "l1_time", batchOrigin.Time) - return derive.BatchDrop - } - - // Fork activation blocks must not contain user transactions. - if (cfg.IsJovianActivationBlock(batch.Timestamp) || - cfg.IsKarstActivationBlock(batch.Timestamp) || - cfg.IsInteropActivationBlock(batch.Timestamp)) && - len(batch.Transactions) > 0 { - lgr.Warn("batch has transactions at fork activation block") - return derive.BatchDrop - } - - // Sequencer time drift: L2 time must not exceed L1 time + MaxSequencerDrift. - spec := rollup.NewChainSpec(cfg) - maxDrift := batchOrigin.Time + spec.MaxSequencerDrift(batchOrigin.Time) - if batch.Timestamp > maxDrift { - if len(batch.Transactions) == 0 { - // Empty batches may exceed drift to maintain L2 time >= L1 time invariant, - // but only if they don't advance the epoch and the next origin isn't available. - if epochNum == cursor.L1Origin.Number { - for i := range l1Origins { - if l1Origins[i].Number == epochNum+1 { - if batch.Timestamp >= l1Origins[i].Time { - lgr.Warn("empty batch exceeds drift but should have adopted next origin") - return derive.BatchDrop - } - break - } - } - } - } else { - lgr.Warn("batch exceeds sequencer drift", "batch_time", batch.Timestamp, "max_drift", maxDrift) - return derive.BatchDrop - } - } - - // Transaction validation. - isIsthmus := cfg.IsIsthmus(batch.Timestamp) - for _, txBytes := range batch.Transactions { - if len(txBytes) == 0 { - lgr.Warn("batch contains empty transaction") - return derive.BatchDrop - } - if txBytes[0] == types.DepositTxType { - lgr.Warn("batch contains deposit transaction") - return derive.BatchDrop - } - if !isIsthmus && txBytes[0] == types.SetCodeTxType { - lgr.Warn("batch contains SetCode transaction before Isthmus") - return derive.BatchDrop - } - } - - return derive.BatchAccept -} diff --git a/op-core/pure/batches_test.go b/op-core/pure/batches_test.go deleted file mode 100644 index 86bba7d9945f2..0000000000000 --- a/op-core/pure/batches_test.go +++ /dev/null @@ -1,216 +0,0 @@ -package pure - -import ( - "bytes" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" - "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -var testLogger = log.NewLogger(log.DiscardHandler()) - -func TestDecodeBatches_SingularBatch(t *testing.T) { - cfg := testRollupConfig() - safeHead := testSafeHead(cfg) - l1Ref := testL1Ref(1) - - batch := &derive.SingularBatch{ - ParentHash: safeHead.Hash, - EpochNum: rollup.Epoch(l1Ref.Number), - EpochHash: l1Ref.Hash, - Timestamp: safeHead.Time + cfg.BlockTime, - } - - channelData := encodeBatchToChannelData(t, batch) - - cursor := newCursor(safeHead) - l1Origins := []eth.L1BlockRef{testL1Ref(0), l1Ref} - - batches := decodeBatches(testLogger, bytes.NewReader(channelData), cfg, l1Origins, cursor, l1Ref) - require.Len(t, batches, 1) - - decoded := batches[0] - require.Equal(t, batch.ParentHash, decoded.ParentHash) - require.Equal(t, batch.EpochNum, decoded.EpochNum) - require.Equal(t, batch.EpochHash, decoded.EpochHash) - require.Equal(t, batch.Timestamp, decoded.Timestamp) -} - -func TestValidateBatch_ValidSingular(t *testing.T) { - cfg := testRollupConfig() - l1Origin := testL1Ref(5) - - cursor := l2Cursor{ - Number: 10, - Timestamp: 100, - L1Origin: l1Origin.ID(), - } - - batch := &derive.SingularBatch{ - EpochNum: rollup.Epoch(l1Origin.Number), - EpochHash: l1Origin.Hash, - Timestamp: cursor.Timestamp + cfg.BlockTime, - } - - l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - - require.Equal(t, derive.BatchValidity(derive.BatchAccept), validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) -} - -func TestValidateBatch_WrongTimestamp(t *testing.T) { - cfg := testRollupConfig() - l1Origin := testL1Ref(5) - - cursor := l2Cursor{ - Number: 10, - Timestamp: 100, - L1Origin: l1Origin.ID(), - } - - batch := &derive.SingularBatch{ - EpochNum: rollup.Epoch(l1Origin.Number), - EpochHash: l1Origin.Hash, - Timestamp: cursor.Timestamp + cfg.BlockTime + 1, // too new - } - - l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - - require.Equal(t, derive.BatchValidity(derive.BatchDrop), validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) -} - -func TestValidateBatch_PastTimestamp(t *testing.T) { - cfg := testRollupConfig() - l1Origin := testL1Ref(5) - - cursor := l2Cursor{ - Number: 10, - Timestamp: 100, - L1Origin: l1Origin.ID(), - } - - batch := &derive.SingularBatch{ - EpochNum: rollup.Epoch(l1Origin.Number), - EpochHash: l1Origin.Hash, - Timestamp: cursor.Timestamp - 2, // in the past - } - - l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - - require.Equal(t, derive.BatchValidity(derive.BatchPast), validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) -} - -func TestValidateBatch_EpochTooOld(t *testing.T) { - cfg := testRollupConfig() - l1Origin := testL1Ref(5) - - cursor := l2Cursor{ - Number: 10, - Timestamp: 100, - L1Origin: l1Origin.ID(), - } - - oldOrigin := testL1Ref(3) - batch := &derive.SingularBatch{ - EpochNum: rollup.Epoch(oldOrigin.Number), // before cursor's L1 origin - EpochHash: oldOrigin.Hash, - Timestamp: cursor.Timestamp + cfg.BlockTime, - } - - l1Origins := []eth.L1BlockRef{oldOrigin, testL1Ref(4), l1Origin, testL1Ref(6)} - - require.Equal(t, derive.BatchValidity(derive.BatchDrop), validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) -} - -func TestValidateBatch_EpochTooNew(t *testing.T) { - cfg := testRollupConfig() - l1Origin := testL1Ref(5) - - cursor := l2Cursor{ - Number: 10, - Timestamp: 100, - L1Origin: l1Origin.ID(), - } - - batch := &derive.SingularBatch{ - EpochNum: rollup.Epoch(100), // way beyond latest L1 origin - EpochHash: common.Hash{0xab}, - Timestamp: cursor.Timestamp + cfg.BlockTime, - } - - l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - - require.Equal(t, derive.BatchValidity(derive.BatchDrop), validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) -} - -func TestValidateBatch_SequenceWindowExpired(t *testing.T) { - cfg := testRollupConfig() - l1Origin := testL1Ref(5) - - cursor := l2Cursor{ - Number: 10, - Timestamp: 100, - L1Origin: l1Origin.ID(), - } - - batch := &derive.SingularBatch{ - EpochNum: rollup.Epoch(l1Origin.Number), - EpochHash: l1Origin.Hash, - Timestamp: cursor.Timestamp + cfg.BlockTime, - } - - l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - - // Inclusion at block 16: epochNum(5) + SeqWindowSize(10) = 15 < 16 → expired - require.Equal(t, derive.BatchValidity(derive.BatchDrop), validateBatch(testLogger, batch, cursor, l1Origins, cfg, 16)) -} - -func TestValidateBatch_EpochSkip(t *testing.T) { - cfg := testRollupConfig() - l1Origin := testL1Ref(5) - - cursor := l2Cursor{ - Number: 10, - Timestamp: 100, - L1Origin: l1Origin.ID(), - } - - // Epoch 7 skips over epoch 6 (cursor is at 5, can only go to 6) - batch := &derive.SingularBatch{ - EpochNum: rollup.Epoch(7), - EpochHash: testL1Ref(7).Hash, - Timestamp: cursor.Timestamp + cfg.BlockTime, - } - - l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6), testL1Ref(7)} - - require.Equal(t, derive.BatchValidity(derive.BatchDrop), validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) -} - -func TestValidateBatch_DepositTxRejected(t *testing.T) { - cfg := testRollupConfig() - l1Origin := testL1Ref(5) - - cursor := l2Cursor{ - Number: 10, - Timestamp: 100, - L1Origin: l1Origin.ID(), - } - - batch := &derive.SingularBatch{ - EpochNum: rollup.Epoch(l1Origin.Number), - EpochHash: l1Origin.Hash, - Timestamp: cursor.Timestamp + cfg.BlockTime, - Transactions: []hexutil.Bytes{{0x7e, 0x01, 0x02}}, // deposit tx type - } - - l1Origins := []eth.L1BlockRef{testL1Ref(4), l1Origin, testL1Ref(6)} - - require.Equal(t, derive.BatchValidity(derive.BatchDrop), validateBatch(testLogger, batch, cursor, l1Origins, cfg, l1Origin.Number)) -} diff --git a/op-core/pure/derive.go b/op-core/pure/derive.go deleted file mode 100644 index eeb6d657320d1..0000000000000 --- a/op-core/pure/derive.go +++ /dev/null @@ -1,190 +0,0 @@ -package pure - -import ( - "fmt" - - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/params" - - "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -// PureDerive is the main entry point for pure derivation. It takes an L2 safe -// head, system config, and a sequence of L1 blocks and produces the derived L2 -// blocks (as payload attributes) that follow from those inputs. -// -// The function is stateless and deterministic: given the same inputs it always -// produces the same outputs. No network access, no caching, no side effects. -// -// l1Blocks must be contiguous and strictly ordered by number. They should start -// at least ChannelTimeout blocks before safeHead.L1Origin.Number to ensure -// channels opened before the safe head can still be decoded. -// -// Requires the Karst fork to be active at the safe head timestamp. Before Karst, -// span batches may overlap the safe chain, which this implementation does not support. -// -// Compared to the legacy pipeline (op-node/rollup/derive), this implementation -// intentionally skips the following checks: -// - Parent hash validation against the actual L2 chain (deferred to post-execution -// via DerivedBlock.ExpectedParentHash) -// - L2 block hash verification (no L2 state access) -// - Span batch overlap comparison (rejected by Karst; overlaps are invalid) -// - Pipeline reset / reorg handling (caller is responsible for providing correct inputs) -// -// See op-node/rollup/derive/batches.go for the full upstream validation logic. -func PureDerive( - cfg *rollup.Config, - l1ChainConfig *params.ChainConfig, - lgr log.Logger, - safeHead eth.L2BlockRef, - sysConfig eth.SystemConfig, - l1Blocks []L1Input, -) ([]DerivedBlock, error) { - if !cfg.IsKarst(safeHead.Time) { - return nil, fmt.Errorf("pure derivation requires Karst fork (no overlapping span batches), safe head time %d is pre-Karst", safeHead.Time) - } - - if len(l1Blocks) == 0 { - return nil, nil - } - - spec := rollup.NewChainSpec(cfg) - - // L1 blocks must be contiguous and strictly ordered. Compute the base - // number so we can do O(1) lookups by index arithmetic. - firstL1Num := l1Blocks[0].Header.Number.Uint64() - - // Require l1Blocks to start at least ChannelTimeout before the safe - // head's L1 origin so that channels opened before the safe head are available. - channelTimeout := spec.ChannelTimeout(safeHead.Time) - requiredStart := safeHead.L1Origin.Number - if requiredStart > channelTimeout { - requiredStart -= channelTimeout - } else { - requiredStart = 0 - } - if firstL1Num > requiredStart { - return nil, fmt.Errorf("l1Blocks start at %d but must start at or before %d (safe head origin %d minus channel timeout %d)", - firstL1Num, requiredStart, safeHead.L1Origin.Number, channelTimeout) - } - - cursor := newCursor(safeHead) - assembler := newChannelAssembler() - - l1Origins := make([]eth.L1BlockRef, len(l1Blocks)) - for i := range l1Blocks { - l1Origins[i] = l1Blocks[i].BlockRef() - } - - findL1 := func(number uint64) *L1Input { - idx := int(number - firstL1Num) - if idx >= 0 && idx < len(l1Blocks) { - return &l1Blocks[idx] - } - return nil - } - - var derived []DerivedBlock - - for i := range l1Blocks { - l1 := l1Blocks[i] - l1Ref := l1.BlockRef() - - for _, configLog := range l1.ConfigLogs { - if err := derive.ProcessSystemConfigUpdateLogEvent(&sysConfig, configLog, cfg, l1.Header.Time); err != nil { - return nil, fmt.Errorf("processing system config update at L1 block %d: %w", l1Ref.Number, err) - } - } - - assembler.checkTimeout(l1Ref, spec.ChannelTimeout(l1Ref.Time)) - - for _, txData := range l1.BatcherData { - frames, err := derive.ParseFrames(txData) - if err != nil { - lgr.Warn("failed to parse frames", "l1_block", l1Ref.Number, "err", err) - continue - } - - for _, frame := range frames { - ready := assembler.addFrame(frame, l1Ref) - if ready == nil { - continue - } - - lgr.Debug("channel ready", "channel", ready.id, "l1_block", l1Ref.Number) - - batches := decodeBatches(lgr, ready.channel.Reader(), cfg, l1Origins, cursor, l1Ref) - - for _, batch := range batches { - validity := validateBatch(lgr, batch, cursor, l1Origins, cfg, l1Ref.Number) - if validity == derive.BatchPast { - lgr.Debug("batch is past, skipping", - "timestamp", batch.Timestamp, "epoch", batch.EpochNum) - continue - } - if validity != derive.BatchAccept { - lgr.Warn("invalid batch, flushing channel", - "timestamp", batch.Timestamp, "epoch", batch.EpochNum, "l1_block", l1Ref.Number) - break - } - - epochL1 := findL1(uint64(batch.EpochNum)) - if epochL1 == nil { - return nil, fmt.Errorf("missing L1 block %d for batch epoch", batch.EpochNum) - } - - block, err := buildAttributes(batch, epochL1, cursor, sysConfig, cfg, l1ChainConfig) - if err != nil { - return nil, fmt.Errorf("building attributes at L1 block %d: %w", l1Ref.Number, err) - } - derived = append(derived, *block) - - epochID := eth.BlockID{Number: uint64(batch.EpochNum), Hash: batch.EpochHash} - var seqNum uint64 - if epochID.Number != cursor.L1Origin.Number { - seqNum = 0 - } else { - seqNum = cursor.SequenceNumber + 1 - } - cursor.advance(batch.Timestamp, epochID, seqNum) - } - } - } - - for cursor.needsEmptyBatch(l1Ref, cfg) { - nextTimestamp := cursor.Timestamp + cfg.BlockTime - newOrigin := cursor.L1Origin - newSeqNum := cursor.SequenceNumber + 1 - - epochL1 := findL1(cursor.L1Origin.Number) - if epochL1 == nil { - return nil, fmt.Errorf("missing L1 block %d for empty batch epoch", cursor.L1Origin.Number) - } - - // Advance epoch if the next L2 timestamp >= next L1 block's timestamp. - nextL1 := findL1(cursor.L1Origin.Number + 1) - if nextL1 != nil && nextTimestamp >= nextL1.Header.Time { - newOrigin = nextL1.BlockID() - newSeqNum = 0 - epochL1 = nextL1 - } - - emptyBatch := &derive.SingularBatch{ - EpochNum: rollup.Epoch(newOrigin.Number), - EpochHash: newOrigin.Hash, - Timestamp: nextTimestamp, - } - - block, err := buildAttributes(emptyBatch, epochL1, cursor, sysConfig, cfg, l1ChainConfig) - if err != nil { - return nil, fmt.Errorf("building empty batch attributes at L1 block %d: %w", l1Ref.Number, err) - } - derived = append(derived, *block) - cursor.advance(emptyBatch.Timestamp, newOrigin, newSeqNum) - } - } - - return derived, nil -} diff --git a/op-core/pure/derive_test.go b/op-core/pure/derive_test.go deleted file mode 100644 index fcf5ebc809f3c..0000000000000 --- a/op-core/pure/derive_test.go +++ /dev/null @@ -1,275 +0,0 @@ -package pure - -import ( - "bytes" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" - "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" - "github.com/ethereum-optimism/optimism/op-service/bigs" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -func TestPureDerive_SingleBatch(t *testing.T) { - cfg := testRollupConfig() - safeHead := testSafeHead(cfg) - sysConfig := testSystemConfig() - - l1Origin := makeTestL1Input(0) // safe head's L1 origin - l1 := makeL1WithBatch(t, cfg, 1, safeHead, sysConfig) - - derived, err := PureDerive(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig, []L1Input{*l1Origin, *l1}) - require.NoError(t, err) - require.Len(t, derived, 1) - - block := derived[0] - require.Equal(t, hexutil.Uint64(safeHead.Time+cfg.BlockTime), block.Attributes.Timestamp) - require.True(t, block.Attributes.NoTxPool) - require.Equal(t, l1.BlockRef(), block.DerivedFrom) -} - -func TestPureDerive_EmptyEpoch(t *testing.T) { - cfg := testRollupConfig() - safeHead := testSafeHead(cfg) - sysConfig := testSystemConfig() - - // Create SeqWindowSize + 2 L1 blocks with no batcher data. - // The sequencer window expires once we get far enough ahead of the cursor's L1 origin. - numBlocks := cfg.SeqWindowSize + 2 - l1Blocks := make([]L1Input, numBlocks) - for i := uint64(0); i < numBlocks; i++ { - l1Blocks[i] = *makeTestL1Input(i) - } - - derived, err := PureDerive(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig, l1Blocks) - require.NoError(t, err) - require.Greater(t, len(derived), 0, "empty batches should be generated when sequencer window expires") - - // Each derived block should have sequential timestamps. - expectedTimestamp := safeHead.Time + cfg.BlockTime - for _, block := range derived { - require.Equal(t, hexutil.Uint64(expectedTimestamp), block.Attributes.Timestamp) - expectedTimestamp += cfg.BlockTime - } -} - -func TestPureDerive_MultipleChannelsAndEpochs(t *testing.T) { - cfg := testRollupConfig() - safeHead := testSafeHead(cfg) - sysConfig := testSystemConfig() - - l1Blocks := makeMultiEpochL1Inputs(t, cfg, safeHead, sysConfig) - - derived, err := PureDerive(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig, l1Blocks) - require.NoError(t, err) - require.Greater(t, len(derived), 1, "should derive multiple blocks from multiple epochs") - - // Each derived block should have sequential timestamps. - expectedTimestamp := safeHead.Time + cfg.BlockTime - for i, block := range derived { - require.Equal(t, hexutil.Uint64(expectedTimestamp), block.Attributes.Timestamp, - "block %d should have timestamp %d", i, expectedTimestamp) - expectedTimestamp += cfg.BlockTime - } -} - -func TestPureDerive_ChannelTimeout(t *testing.T) { - cfg := testRollupConfig() - cfg.ChannelTimeoutBedrock = 2 // short timeout for testing - safeHead := testSafeHead(cfg) - sysConfig := testSystemConfig() - - l1Block0 := makeTestL1Input(0) // safe head's L1 origin - l1Block0Ref := l1Block0.BlockRef() - - // Create an incomplete channel at L1 block 1 (frame 0, not last). - incompleteL1 := makeTestL1Input(1) - incompleteChID := testChannelID(0xAA) - - channelData := encodeBatchToChannelData(t, &derive.SingularBatch{ - ParentHash: safeHead.Hash, - EpochNum: rollup.Epoch(l1Block0Ref.Number), - EpochHash: l1Block0Ref.Hash, - Timestamp: safeHead.Time + cfg.BlockTime, - }) - - frame0 := derive.Frame{ - ID: incompleteChID, - FrameNumber: 0, - Data: channelData, - IsLast: false, - } - var buf bytes.Buffer - buf.WriteByte(params.DerivationVersion0) - require.NoError(t, frame0.MarshalBinary(&buf)) - incompleteL1.BatcherData = [][]byte{buf.Bytes()} - - // L1 blocks: 0 (origin), 1 (incomplete channel), 2, 3, 4 (complete channel). - // Timeout fires at block 4 (4 > 1 + 2). No empty batches generated - // because we're well within SeqWindowSize (10). - var l1Blocks []L1Input - l1Blocks = append(l1Blocks, *l1Block0) - l1Blocks = append(l1Blocks, *incompleteL1) - l1Blocks = append(l1Blocks, *makeTestL1Input(2)) - l1Blocks = append(l1Blocks, *makeTestL1Input(3)) - - // Complete channel at L1 block 4 (after timeout). - completeL1 := makeTestL1Input(4) - completeChID := testChannelID(0xBB) - - completeBatch := &derive.SingularBatch{ - ParentHash: safeHead.Hash, - EpochNum: rollup.Epoch(l1Block0Ref.Number), - EpochHash: l1Block0Ref.Hash, - Timestamp: safeHead.Time + cfg.BlockTime, - } - completeChannelData := encodeBatchToChannelData(t, completeBatch) - completeTx := wrapInFrames(completeChannelData, completeChID) - completeL1.BatcherData = [][]byte{completeTx} - l1Blocks = append(l1Blocks, *completeL1) - - derived, err := PureDerive(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig, l1Blocks) - require.NoError(t, err) - - // The incomplete channel timed out. Only the complete channel produces a block. - foundFromComplete := false - for _, block := range derived { - if uint64(block.Attributes.Timestamp) == safeHead.Time+cfg.BlockTime { - foundFromComplete = true - break - } - } - require.True(t, foundFromComplete, "should derive block from complete channel after timeout") -} - -func TestPureDerive_InvalidBatchSkipped(t *testing.T) { - cfg := testRollupConfig() - safeHead := testSafeHead(cfg) - sysConfig := testSystemConfig() - - l1 := makeTestL1Input(1) - l1Ref := l1.BlockRef() - - // Create a batch with wrong timestamp (should be safeHead.Time + BlockTime). - invalidBatch := &derive.SingularBatch{ - ParentHash: safeHead.Hash, - EpochNum: rollup.Epoch(l1Ref.Number), - EpochHash: l1Ref.Hash, - Timestamp: safeHead.Time + cfg.BlockTime + 999, // wrong timestamp - } - - channelData := encodeBatchToChannelData(t, invalidBatch) - var chID derive.ChannelID - copy(chID[:], common.Hex2Bytes("cccccccccccccccccccccccccccccccc")) - batcherTx := wrapInFrames(channelData, chID) - l1.BatcherData = [][]byte{batcherTx} - - l1Origin := makeTestL1Input(0) // safe head's L1 origin - derived, err := PureDerive(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig, []L1Input{*l1Origin, *l1}) - require.NoError(t, err) - require.Empty(t, derived, "invalid batch should be skipped without error") -} - -func TestPureDerive_RejectsPreKarst(t *testing.T) { - cfg := testRollupConfig() - cfg.KarstTime = nil // disable Karst - safeHead := testSafeHead(cfg) - sysConfig := testSystemConfig() - - _, err := PureDerive(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig, nil) - require.Error(t, err) - require.Contains(t, err.Error(), "Karst fork") -} - -func TestPureDerive_ValidatesL1BlockRange(t *testing.T) { - cfg := testRollupConfig() - safeHead := testSafeHead(cfg) - sysConfig := testSystemConfig() - - // Start L1 blocks after the safe head's L1 origin (gap) - l1Blocks := []L1Input{*makeTestL1Input(5)} - - _, err := PureDerive(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig, l1Blocks) - require.Error(t, err) - require.Contains(t, err.Error(), "l1Blocks start at") -} - -func TestPureDerive_EmptyL1Blocks(t *testing.T) { - cfg := testRollupConfig() - safeHead := testSafeHead(cfg) - sysConfig := testSystemConfig() - - derived, err := PureDerive(cfg, testL1ChainConfig(), testLogger, safeHead, sysConfig, nil) - require.NoError(t, err) - require.Nil(t, derived) -} - -// makeMultiEpochL1Inputs builds several L1 blocks with batches at different -// epochs, suitable for testing multi-channel, multi-epoch derivation. -func makeMultiEpochL1Inputs(t *testing.T, cfg *rollup.Config, safeHead eth.L2BlockRef, sysConfig eth.SystemConfig) []L1Input { - t.Helper() - _ = sysConfig - - // Block 1: batch for epoch 1, timestamp = safeHead.Time + BlockTime - l1Block1 := makeTestL1Input(1) - l1Ref1 := l1Block1.BlockRef() - batch1 := &derive.SingularBatch{ - ParentHash: safeHead.Hash, - EpochNum: rollup.Epoch(l1Ref1.Number), - EpochHash: l1Ref1.Hash, - Timestamp: safeHead.Time + cfg.BlockTime, - } - chData1 := encodeBatchToChannelData(t, batch1) - var chID1 derive.ChannelID - chID1[0] = 0x01 - l1Block1.BatcherData = [][]byte{wrapInFrames(chData1, chID1)} - - // Block 2: batch for epoch 2, timestamp = safeHead.Time + 2*BlockTime - l1Block2 := makeTestL1Input(2) - l1Ref2 := l1Block2.BlockRef() - batch2 := &derive.SingularBatch{ - EpochNum: rollup.Epoch(l1Ref2.Number), - EpochHash: l1Ref2.Hash, - Timestamp: safeHead.Time + 2*cfg.BlockTime, - } - chData2 := encodeBatchToChannelData(t, batch2) - var chID2 derive.ChannelID - chID2[0] = 0x02 - l1Block2.BatcherData = [][]byte{wrapInFrames(chData2, chID2)} - - // Block 3: batch for epoch 3, timestamp = safeHead.Time + 3*BlockTime - l1Block3 := makeTestL1Input(3) - l1Ref3 := l1Block3.BlockRef() - batch3 := &derive.SingularBatch{ - EpochNum: rollup.Epoch(l1Ref3.Number), - EpochHash: l1Ref3.Hash, - Timestamp: safeHead.Time + 3*cfg.BlockTime, - } - chData3 := encodeBatchToChannelData(t, batch3) - var chID3 derive.ChannelID - chID3[0] = 0x03 - l1Block3.BatcherData = [][]byte{wrapInFrames(chData3, chID3)} - - // Include block 0 (safe head's L1 origin) at the start. - l1Block0 := makeTestL1Input(0) - return []L1Input{*l1Block0, *l1Block1, *l1Block2, *l1Block3} -} - -// Verify that test inputs are constructed correctly through BlockRef/BlockID. -func TestL1InputIntegration(t *testing.T) { - l1 := makeTestL1Input(10) - ref := l1.BlockRef() - require.Equal(t, bigs.Uint64Strict(l1.Header.Number), ref.Number) - require.Equal(t, l1.Header.Hash(), ref.Hash) - require.Equal(t, l1.Header.ParentHash, ref.ParentHash) - require.Equal(t, l1.Header.Time, ref.Time) - - id := l1.BlockID() - require.Equal(t, ref.Hash, id.Hash) - require.Equal(t, ref.Number, id.Number) -} From f771927113678e81971a3ad0a3ccaa3c07ba6d02 Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Mon, 2 Mar 2026 20:29:04 -0400 Subject: [PATCH 15/15] derive: update DESIGN.md for iterator-style Deriver Co-Authored-By: Claude Opus 4.6 --- op-core/derive/DESIGN.md | 187 +++++++++++++++++++++------------------ 1 file changed, 100 insertions(+), 87 deletions(-) diff --git a/op-core/derive/DESIGN.md b/op-core/derive/DESIGN.md index c5b5b9566fbc8..bfadf40eada89 100644 --- a/op-core/derive/DESIGN.md +++ b/op-core/derive/DESIGN.md @@ -1,17 +1,20 @@ -# Pure Derivation Pipeline +# Derivation Iterator ## Objective -Implement a pure function that derives L2 payload attributes from L1 data, -equivalent in behavior to the existing streaming pipeline in -`op-node/rollup/derive`, but without I/O, caching, or state access. +Derive L2 payload attributes from L1 data one block at a time, equivalent in +behavior to the existing streaming pipeline in `op-node/rollup/derive`, but +without I/O, caching, or state access beyond what the caller provides. -``` -PureDerive(cfg, l1ChainConfig, logger, safeHead, sysConfig, l1Blocks) → []DerivedBlock +```go +d, _ := NewDeriver(cfg, l1ChainConfig, lgr, safeHead, sysConfig) +d.AddL1Block(l1Blocks...) +attrs, l1Ref, err := d.Next(safeHead) ``` -Given the same inputs, the function always produces the same outputs. The caller -provides all L1 data upfront; the function never fetches anything. +The `Deriver` iterator accepts L1 blocks incrementally and produces one +`PayloadAttributes` at a time. The caller executes each block on the engine, +then calls `Next` again with the updated safe head. ## Motivation @@ -21,106 +24,114 @@ computation. This makes it difficult to test, reason about, and use in contexts where all data is already available (ZK provers, auditing tools, replay utilities). -A pure function is deterministic, composable, and trivially testable. +An earlier batch-mode `PureDerive` function took all L1 data upfront and +returned all derived blocks at once. This didn't match how derivation works in +practice: derive one block, execute on engine, verify, then derive the next. It +also couldn't validate parent hashes (needs L2 block hashes from execution) and +had no mechanism for L1 reorgs. + +The iterator solves both: incremental L1 ingestion, one-at-a-time derivation +with full `CheckBatch` validation including parent hash checks, and explicit +reorg handling via `Reset`. ## Scope **In scope:** Post-Karst derivation only. Karst implies Holocene, Granite, Fjord, and all prior forks. This simplifies the implementation: -- No `BatchFuture` or `BatchUndecided` (Holocene semantics: future → drop, - undecided conditions don't arise with complete L1 data) -- No span batch overlap handling (Karst rejects overlapping span batches as - `BatchPast`) - Single-channel assembly (Holocene rule: one active channel at a time) - Strict frame ordering (Holocene) +- No span batch overlap handling (Karst rejects overlapping span batches as + `BatchPast`) **Out of scope:** - Pre-Karst derivation -- Pipeline reset / reorg detection (caller responsibility) - L2 execution (we produce attributes, not executed blocks) +## API + +```go +var ErrNeedL1Data = errors.New("need more L1 data") +var ErrReorg = errors.New("L1 reorg detected") + +func NewDeriver(cfg, l1ChainConfig, lgr, safeHead, sysConfig) (*Deriver, error) + +// AddL1Block appends L1 blocks. Must be contiguous with previously added +// blocks. Returns ErrReorg on parent hash mismatch. +func (d *Deriver) AddL1Block(blocks ...L1Input) error + +// Next returns the next derived payload attributes and the L1 block they +// were derived from. Returns ErrNeedL1Data when more L1 blocks are needed. +func (d *Deriver) Next(safeHead eth.L2BlockRef) (*eth.PayloadAttributes, eth.L1BlockRef, error) + +// Reset clears all state back to the given safe head + system config. +// Used after L1 reorgs. The caller must re-add L1 blocks from the new chain. +func (d *Deriver) Reset(safeHead eth.L2BlockRef, sysConfig eth.SystemConfig) +``` + ## Architecture ``` -L1Input[] ──► frame parsing ──► channel assembly ──► batch decoding ──► batch validation ──► attribute building ──► DerivedBlock[] - │ - timeout check - (per L1 block) + AddL1Block + │ + ▼ +L1Input[] ──► frame parsing ──► channel assembly ──► batch decoding ──► CheckBatch ──► attribute building ──► PayloadAttributes + │ │ + timeout check parent hash check + (per L1 block) (via safe head) + │ + empty batch fallback + (seq window expired) ``` ### Components | File | Responsibility | |------|---------------| -| `derive.go` | `PureDerive` entry point, main loop over L1 blocks | +| `deriver.go` | `Deriver` iterator: `NewDeriver`, `AddL1Block`, `Next`, `Reset` | | `channels.go` | Push-based Holocene single-channel assembler | -| `batches.go` | `decodeBatches` (channel → singular batches), `validateBatch` | +| `batches.go` | `decodeBatches` (channel → singular batches via upstream decode) | +| `empty_batch.go` | `makeEmptyBatch` (pure function for seq window expiry) | | `attributes.go` | `buildAttributes` (batch + L1 data → PayloadAttributes) | -| `types.go` | `L1Input`, `DerivedBlock`, `l2Cursor` | - -### Main Loop (derive.go) - -For each L1 block: -1. Process system config update logs -2. Check channel timeout (fork-aware via `spec.ChannelTimeout`) -3. Parse frames from batcher transactions -4. Assemble frames into channels -5. When a channel completes: decode batches, validate each, build attributes -6. After processing all channels: generate empty batches if the sequencing - window has expired +| `types.go` | `L1Input`, `l2Cursor`, sentinel errors | + +### Next() Flow + +1. Try consuming from `pendingBatches`: + - `CheckBatch` → `BatchAccept`: build attributes, advance cursor, return + - `CheckBatch` → `BatchPast`: skip, try next batch + - `CheckBatch` → `BatchDrop`: discard remaining channel batches + - `CheckBatch` → `BatchUndecided`: return `ErrNeedL1Data` +2. Process more L1 blocks (`l1Pos < len(l1Blocks)`): + - Process config logs, check channel timeout + - Parse frames → assemble channel → if ready, decode into `pendingBatches` + - If got pending batches, go to step 1 + - After each L1 block, check for empty batches (seq window expired) +3. Return `ErrNeedL1Data` ### Empty Batch Generation When no batcher data covers a time range and the sequencing window expires (`currentL1.Number > cursor.L1Origin.Number + SeqWindowSize`), the pipeline -generates empty batches to maintain L2 liveness. Epoch advancement follows the -rule: advance to the next L1 origin when the L2 timestamp >= the next L1 +generates one empty batch to maintain L2 liveness. Epoch advancement follows +the rule: advance to the next L1 origin when the L2 timestamp >= the next L1 block's timestamp. -## Behavioral Equivalence - -The implementation must match `checkSingularBatch` in -`op-node/rollup/derive/batches.go` for all checks that don't require L2 state. - -### Upstream Check Mapping - -| # | Upstream Check | Pure Implementation | Notes | -|---|---------------|-------------------|-------| -| 1 | `len(l1Blocks) == 0` → `BatchUndecided` | N/A | We always have all L1 data | -| 2 | `timestamp > next` → `BatchFuture`/`BatchDrop` | `BatchDrop` | Holocene always active (implied by Karst) | -| 3 | `timestamp < next` → `BatchDrop`/`BatchPast` | `BatchPast` | Holocene always active | -| 4 | Parent hash mismatch → `BatchDrop` | Deferred | Stored in `DerivedBlock.ExpectedParentHash` for post-execution verification | -| 5 | Sequence window expired → `BatchDrop` | `epochNum + SeqWindowSize < l1InclusionNum` → `BatchDrop` | Equivalent | -| 6a | Epoch too old → `BatchDrop` | `epochNum < cursor.L1Origin.Number` → `BatchDrop` | Equivalent | -| 6b | Epoch is next but no L1 data → `BatchUndecided` | N/A | We always have all L1 data | -| 6c | Epoch too far ahead → `BatchDrop` | `epochNum > cursor.L1Origin.Number+1` → `BatchDrop` | Equivalent | -| 7 | Epoch hash mismatch → `BatchDrop` | Look up origin, compare hash → `BatchDrop` | Equivalent | -| 8 | Timestamp < L1 origin time → `BatchDrop` | `batch.Timestamp < batchOrigin.Time` → `BatchDrop` | Equivalent | -| 9 | Fork activation block with txs → `BatchDrop` | Jovian, Karst, Interop checks → `BatchDrop` | Equivalent | -| 10 | Sequencer drift exceeded → `BatchDrop` | Same logic with empty batch exception → `BatchDrop` | Equivalent | -| 11a | Empty transaction → `BatchDrop` | `len(txBytes) == 0` → `BatchDrop` | Equivalent | -| 11b | Deposit transaction → `BatchDrop` | `txBytes[0] == DepositTxType` → `BatchDrop` | Equivalent | -| 11c | SetCode before Isthmus → `BatchDrop` | `!isIsthmus && txBytes[0] == SetCodeTxType` → `BatchDrop` | Equivalent | -| 12 | All pass → `BatchAccept` | → `BatchAccept` | Equivalent | - -### Intentional Differences - -1. **Parent hash validation (check #4):** Deferred to post-execution. The pure - function has no L2 block hashes. The caller can verify - `DerivedBlock.ExpectedParentHash` against actual execution results. - -2. **No `BatchUndecided` or `BatchFuture`:** With Holocene active and all L1 - data provided, these states cannot occur. - -3. **Span batch overlaps:** Under Karst, `CheckSpanBatchPrefix` rejects - overlapping span batches as `BatchPast` (upstream treats them as errors - pre-Karst). This is the one behavioral change vs pre-Karst upstream. - -4. **`BatchPast` handling:** In the main loop, `BatchPast` batches are skipped - (`continue`), not flushed. `BatchDrop` and other non-accept results cause a - `break` that flushes the remaining batches from the current channel. This - matches Holocene semantics where past batches are harmless leftovers. +## Batch Validation + +Batch validation is delegated entirely to upstream `derive.CheckBatch`, which +dispatches to `checkSingularBatch`. Since `Next` receives a full +`eth.L2BlockRef` with `Hash`, `checkSingularBatch` validates +`batch.ParentHash != l2SafeHead.Hash` — solving the parent hash problem that +the earlier batch-mode approach had to defer. + +`CheckBatch` expects `l1Blocks[0]` to match `safeHead.L1Origin`. The deriver +computes the starting index dynamically: + +```go +startIdx := safeHead.L1Origin.Number - d.firstL1Num +l1BlocksForCheck := d.l1Origins[startIdx:] +``` ### Attribute Building Equivalence @@ -138,22 +149,24 @@ all pre-Karst forks are already active. Future forks with NUTs must be added. ## Dependencies on Upstream -The implementation reuses these upstream types and functions: -- `derive.ParseFrames`, `derive.Channel`, `derive.Frame` -- `derive.BatchReader`, `derive.GetSingularBatch`, `derive.DeriveSpanBatch` -- `derive.CheckSpanBatchPrefix` -- `derive.L1InfoDeposit` -- `derive.ProcessSystemConfigUpdateLogEvent` +The implementation reuses these upstream types and functions (aliased as +`opderive` to avoid naming conflict with this package): +- `opderive.ParseFrames`, `opderive.Channel`, `opderive.Frame` +- `opderive.BatchReader`, `opderive.GetSingularBatch`, `opderive.DeriveSpanBatch` +- `opderive.CheckBatch`, `opderive.CheckSpanBatchPrefix` +- `opderive.L1InfoDeposit` +- `opderive.ProcessSystemConfigUpdateLogEvent` - `rollup.Config`, `rollup.ChainSpec` - `eth.PayloadAttributes`, `eth.L1BlockRef`, `eth.L2BlockRef`, `eth.SystemConfig` ## Testing Unit tests cover each component in isolation: -- `batches_test.go`: Batch decoding and all `validateBatch` rejection paths - `channels_test.go`: Channel assembly, timeout, frame ordering - `attributes_test.go`: Payload attribute construction - `types_test.go`: Cursor advancement, empty batch detection -- `derive_test.go`: Integration tests for `PureDerive` (single batch, empty - epochs, multi-channel, channel timeout, invalid batch skip, pre-Karst - rejection, L1 range validation) +- `batches_test.go`: Batch decoding from channel data +- `empty_batch_test.go`: Empty batch generation (same epoch, epoch advance, missing L1) +- `deriver_test.go`: Iterator integration tests (single batch, incremental L1, + empty batches, reorg detection, reorg reset, channel timeout, invalid batch + drop, parent hash check, pre-Karst rejection, multi-channel multi-epoch)