Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 28 additions & 1 deletion common/batch/batch_header.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,11 @@ type (
const (
expectedLengthV0 = 249
expectedLengthV1 = 257
// V2 reuses the V1 wire format (257 bytes). The only semantic
// difference is that the 32-byte field at offset 57 stores
// keccak256(blobhash(0) || ... || blobhash(N-1)) instead of a
// single blob versioned hash.
expectedLengthV2 = 257

BatchHeaderVersion0 = 0
BatchHeaderVersion1 = 1
Expand Down Expand Up @@ -44,7 +49,7 @@ func (b BatchHeaderBytes) validate() error {
return ErrInvalidBatchHeaderLength
}
case BatchHeaderVersion2:
if len(b) != expectedLengthV1 {
if len(b) != expectedLengthV2 {
return ErrInvalidBatchHeaderLength
}
default:
Expand Down Expand Up @@ -99,10 +104,32 @@ func (b BatchHeaderBytes) DataHash() (common.Hash, error) {
return common.BytesToHash(b[25:57]), nil
}

// BlobVersionedHash returns the EIP-4844 blob versioned hash recorded at
// offset [57:89]. This is only meaningful for V0/V1 batches, where the field
// holds the single blob's versioned hash. For V2 batches the same offset
// holds an aggregated hash; callers must use BlobHashesHash instead.
func (b BatchHeaderBytes) BlobVersionedHash() (common.Hash, error) {
if err := b.validate(); err != nil {
return common.Hash{}, err
}
version, _ := b.Version()
if version >= BatchHeaderVersion2 {
return common.Hash{}, errors.New("BlobVersionedHash is not available for V2+; use BlobHashesHash")
}
return common.BytesToHash(b[57:89]), nil
}

// BlobHashesHash returns the aggregated blob hash recorded at offset [57:89]
// for V2+ batches, defined as keccak256(blobhash(0) || ... || blobhash(N-1)).
// V0/V1 batches do not aggregate and will return an error.
func (b BatchHeaderBytes) BlobHashesHash() (common.Hash, error) {
if err := b.validate(); err != nil {
return common.Hash{}, err
}
version, _ := b.Version()
if version < BatchHeaderVersion2 {
return common.Hash{}, errors.New("BlobHashesHash is only available for V2+; use BlobVersionedHash")
}
return common.BytesToHash(b[57:89]), nil
}

Expand Down
86 changes: 86 additions & 0 deletions common/batch/batch_header_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
package batch

import (
"math/big"
"testing"

"github.com/morph-l2/go-ethereum/common"
"github.com/stretchr/testify/require"
)

// TestBatchHeaderV2 exercises the V2 header variant: it reuses the V1 wire
// layout (257 bytes) but the 32-byte field at offset 57 carries an aggregated
// blob hash (keccak256(blobhash(0)||...||blobhash(N-1))) rather than a single
// versioned hash. Parsing helpers must route callers accordingly.
func TestBatchHeaderV2(t *testing.T) {
aggregated := common.BigToHash(big.NewInt(0xABCDEF))

// Start from a V1 encoding (identical byte layout), then flip the version
// byte to V2. This matches the on-chain behavior where a V2 header is
// produced by tx-submitter with the aggregated hash stored at offset 57.
raw := BatchHeaderV1{
BatchHeaderV0: BatchHeaderV0{
BatchIndex: 42,
L1MessagePopped: 1,
TotalL1MessagePopped: 3,
DataHash: common.BigToHash(big.NewInt(0x11)),
BlobVersionedHash: aggregated,
PrevStateRoot: common.BigToHash(big.NewInt(0x22)),
PostStateRoot: common.BigToHash(big.NewInt(0x33)),
WithdrawalRoot: common.BigToHash(big.NewInt(0x44)),
SequencerSetVerifyHash: common.BigToHash(big.NewInt(0x55)),
ParentBatchHash: common.BigToHash(big.NewInt(0x66)),
},
LastBlockNumber: 9_876,
}.Bytes()
raw[0] = BatchHeaderVersion2

version, err := raw.Version()
require.NoError(t, err)
require.EqualValues(t, BatchHeaderVersion2, version)

batchIndex, err := raw.BatchIndex()
require.NoError(t, err)
require.EqualValues(t, 42, batchIndex)

lastBlockNumber, err := raw.LastBlockNumber()
require.NoError(t, err)
require.EqualValues(t, 9_876, lastBlockNumber)

// V2 headers must route callers to BlobHashesHash; the legacy accessor
// intentionally errors to prevent silent mis-use.
_, err = raw.BlobVersionedHash()
require.Error(t, err)

aggHash, err := raw.BlobHashesHash()
require.NoError(t, err)
require.EqualValues(t, aggregated, aggHash)

// Length check: a V2 header with the wrong length must fail validate().
short := make(BatchHeaderBytes, expectedLengthV2-1)
short[0] = BatchHeaderVersion2
_, err = short.BatchIndex()
require.ErrorIs(t, err, ErrInvalidBatchHeaderLength)
}

// TestBlobHashesHashUnavailableForLegacy guards the inverse direction: V0 and
// V1 headers must reject BlobHashesHash so that callers reach for the correct
// accessor.
func TestBlobHashesHashUnavailableForLegacy(t *testing.T) {
v0 := BatchHeaderV0{
BatchIndex: 1,
BlobVersionedHash: EmptyVersionedHash,
}.Bytes()
_, err := v0.BlobHashesHash()
require.Error(t, err)

v1 := BatchHeaderV1{
BatchHeaderV0: BatchHeaderV0{
BatchIndex: 2,
BlobVersionedHash: EmptyVersionedHash,
},
LastBlockNumber: 10,
}.Bytes()
_, err = v1.BlobHashesHash()
require.Error(t, err)
}
128 changes: 84 additions & 44 deletions common/batch/blob.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,77 +118,117 @@ func DecodeTxsFromBytes(txsBytes []byte) (eth.Transactions, error) {
txs := make(eth.Transactions, 0)
for {
var (
firstByte byte
fullTxBytes []byte
innerTx eth.TxData
err error
typeByte byte
err error
)
if err = binary.Read(reader, binary.BigEndian, &firstByte); err != nil {
// if the blob byte array is completely consumed, then break the loop
if err = binary.Read(reader, binary.BigEndian, &typeByte); err != nil {
if err == io.EOF {
break
}
return nil, err
}
// zero byte is found after valid tx bytes, break the loop
if firstByte == 0 {
if typeByte == 0 {
break
}

switch firstByte {
case eth.AccessListTxType:
if err := binary.Read(reader, binary.BigEndian, &firstByte); err != nil {
switch typeByte {
case eth.AccessListTxType, eth.DynamicFeeTxType, eth.SetCodeTxType:
tx, err := decodeTypedTx(typeByte, reader)
if err != nil {
return nil, err
}
innerTx = new(eth.AccessListTx)
case eth.DynamicFeeTxType:
if err := binary.Read(reader, binary.BigEndian, &firstByte); err != nil {
txs = append(txs, tx)

case eth.MorphTxType:
tx, err := decodeMorphTx(reader)
if err != nil {
return nil, err
}
innerTx = new(eth.DynamicFeeTx)
case eth.SetCodeTxType:
if err := binary.Read(reader, binary.BigEndian, &firstByte); err != nil {
return nil, err
txs = append(txs, tx)

default:
if typeByte <= 0xf7 {
return nil, fmt.Errorf("not supported tx type: %d", typeByte)
}
innerTx = new(eth.SetCodeTx)
case eth.MorphTxType:
if err := binary.Read(reader, binary.BigEndian, &firstByte); err != nil {
fullTxBytes, err := extractInnerTxFullBytes(typeByte, reader)
if err != nil {
return nil, err
}
innerTx = new(eth.MorphTx)
default:
if firstByte <= 0xf7 { // legacy tx first byte must be greater than 0xf7(247)
return nil, fmt.Errorf("not supported tx type: %d", firstByte)
var inner eth.LegacyTx
if err = rlp.DecodeBytes(fullTxBytes, &inner); err != nil {
return nil, err
}
innerTx = new(eth.LegacyTx)
txs = append(txs, eth.NewTx(&inner))
}
}
return txs, nil
}

// we support the tx types of LegacyTxType/AccessListTxType/DynamicFeeTxType
//if firstByte == eth.AccessListTxType || firstByte == eth.DynamicFeeTxType {
// // the firstByte here is used to indicate tx type, so skip it
// if err := binary.Read(reader, binary.BigEndian, &firstByte); err != nil {
// return nil, err
// }
//} else if firstByte <= 0xf7 { // legacy tx first byte must be greater than 0xf7(247)
// return nil, fmt.Errorf("not supported tx type: %d", firstByte)
//}
fullTxBytes, err = extractInnerTxFullBytes(firstByte, reader)
if err != nil {
return nil, err
}
if err = rlp.DecodeBytes(fullTxBytes, innerTx); err != nil {
// decodeTypedTx decodes a standard EIP-2718 typed tx (AccessList, DynamicFee, SetCode)
// from the reader. The type byte has already been consumed; the next byte is the RLP prefix.
func decodeTypedTx(typeByte byte, reader io.Reader) (*eth.Transaction, error) {
var rlpPrefix byte
if err := binary.Read(reader, binary.BigEndian, &rlpPrefix); err != nil {
return nil, err
}
rlpBytes, err := extractInnerTxFullBytes(rlpPrefix, reader)
if err != nil {
return nil, err
}
txBinary := make([]byte, 0, 1+len(rlpBytes))
txBinary = append(txBinary, typeByte)
txBinary = append(txBinary, rlpBytes...)

var tx eth.Transaction
if err := tx.UnmarshalBinary(txBinary); err != nil {
return nil, err
}
return &tx, nil
}

// decodeMorphTx decodes a MorphTx from the reader. The type byte (0x7f) has already
// been consumed. MorphTx has two wire formats:
// - V0: type(0x7f) || RLP(fields) — next byte is RLP prefix (>= 0xC0)
// - V1: type(0x7f) || version(0x01) || RLP(fields) — next byte is version, then RLP prefix
func decodeMorphTx(reader io.Reader) (*eth.Transaction, error) {
var nextByte byte
if err := binary.Read(reader, binary.BigEndian, &nextByte); err != nil {
return nil, err
}

var versionPrefix []byte
rlpFirstByte := nextByte
if nextByte != 0 && nextByte < 0xC0 {
// V1+: nextByte is the version byte, read the actual RLP prefix
versionPrefix = []byte{nextByte}
if err := binary.Read(reader, binary.BigEndian, &rlpFirstByte); err != nil {
return nil, err
}
txs = append(txs, eth.NewTx(innerTx))
}
return txs, nil

rlpBytes, err := extractInnerTxFullBytes(rlpFirstByte, reader)
if err != nil {
return nil, err
}

txBinary := make([]byte, 0, 1+len(versionPrefix)+len(rlpBytes))
txBinary = append(txBinary, eth.MorphTxType)
txBinary = append(txBinary, versionPrefix...)
txBinary = append(txBinary, rlpBytes...)

var tx eth.Transaction
if err := tx.UnmarshalBinary(txBinary); err != nil {
return nil, err
}
return &tx, nil
}

func extractInnerTxFullBytes(firstByte byte, reader io.Reader) ([]byte, error) {
//the occupied byte length for storing the size of the following rlp encoded bytes
sizeByteLen := firstByte - 0xf7
if sizeByteLen > 4 {
return nil, fmt.Errorf("invalid RLP size byte length: %d (firstByte=0x%x)", sizeByteLen, firstByte)
}

// the size of the following rlp encoded bytes
sizeByte := make([]byte, sizeByteLen)
if err := binary.Read(reader, binary.BigEndian, sizeByte); err != nil {
return nil, err
Expand Down
9 changes: 5 additions & 4 deletions node/derivation/batch_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
geth "github.com/morph-l2/go-ethereum/eth"
"github.com/morph-l2/go-ethereum/eth/catalyst"

commonbatch "morph-l2/common/batch"
"morph-l2/node/types"
"morph-l2/node/zstd"
)
Expand Down Expand Up @@ -81,7 +82,7 @@ func (bi *BatchInfo) ParseBatch(batch geth.RPCRollupBatch) error {
if len(batch.Sidecar.Blobs) == 0 {
return fmt.Errorf("blobs length can not be zero")
}
parentBatchHeader := types.BatchHeaderBytes(batch.ParentBatchHeader)
parentBatchHeader := commonbatch.BatchHeaderBytes(batch.ParentBatchHeader)
parentBatchIndex, err := parentBatchHeader.BatchIndex()
if err != nil {
return fmt.Errorf("decode batch header index error:%v", err)
Expand All @@ -103,10 +104,10 @@ func (bi *BatchInfo) ParseBatch(batch geth.RPCRollupBatch) error {
// must concatenate all blob bodies first and decompress once; per-blob
// decompression would fail on the second blob since it is not a
// standalone zstd stream.
compressed := make([]byte, 0, len(batch.Sidecar.Blobs)*types.MaxBlobBytesSize)
compressed := make([]byte, 0, len(batch.Sidecar.Blobs)*commonbatch.MaxBlobBytesSize)
for i := range batch.Sidecar.Blobs {
blobCopy := batch.Sidecar.Blobs[i]
blobData, err := types.RetrieveBlobBytes(&blobCopy)
blobData, err := commonbatch.RetrieveBlobBytes(&blobCopy)
if err != nil {
return err
}
Expand Down Expand Up @@ -166,7 +167,7 @@ func (bi *BatchInfo) ParseBatch(batch geth.RPCRollupBatch) error {
txsData = batchBytes[bcLen:]
}

data, err := types.DecodeTxsFromBytes(txsData)
data, err := commonbatch.DecodeTxsFromBytes(txsData)
if err != nil {
return err
}
Expand Down
Loading
Loading