Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion common/version/version.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import (
"runtime/debug"
)

var tag = "v4.5.39"
var tag = "v4.5.40"

var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
Expand Down
63 changes: 42 additions & 21 deletions rollup/internal/controller/relayer/l2_relayer_sanity.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"math/big"

"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
Expand All @@ -16,17 +17,17 @@ import (
// transaction data (calldata and blobs) by parsing them and comparing against database records.
// This ensures the constructed transaction data is correct and consistent with the database state.
func (r *Layer2Relayer) sanityChecksCommitBatchCodecV7CalldataAndBlobs(calldata []byte, blobs []*kzg4844.Blob) error {
calldataInfo, err := r.parseCommitBatchesCalldata(calldata)
calldataInfo, err := parseCommitBatchesCalldata(r.l1RollupABI, calldata)
if err != nil {
return fmt.Errorf("failed to parse calldata: %w", err)
}

batchesToValidate, err := r.getBatchesFromCalldata(calldataInfo)
batchesToValidate, l1MessagesWithBlockNumbers, err := r.getBatchesFromCalldata(calldataInfo)
if err != nil {
return fmt.Errorf("failed to get batches from database: %w", err)
}

if err := r.validateCalldataAndBlobsAgainstDatabase(calldataInfo, blobs, batchesToValidate); err != nil {
if err := r.validateCalldataAndBlobsAgainstDatabase(calldataInfo, blobs, batchesToValidate, l1MessagesWithBlockNumbers); err != nil {
return fmt.Errorf("calldata and blobs validation failed: %w", err)
}

Expand All @@ -45,8 +46,8 @@ type CalldataInfo struct {
}

// parseCommitBatchesCalldata parses the commitBatches calldata and extracts key information
func (r *Layer2Relayer) parseCommitBatchesCalldata(calldata []byte) (*CalldataInfo, error) {
method := r.l1RollupABI.Methods["commitBatches"]
func parseCommitBatchesCalldata(abi *abi.ABI, calldata []byte) (*CalldataInfo, error) {
method := abi.Methods["commitBatches"]
decoded, err := method.Inputs.Unpack(calldata[4:])
if err != nil {
return nil, fmt.Errorf("failed to unpack commitBatches calldata: %w", err)
Expand Down Expand Up @@ -81,17 +82,17 @@ func (r *Layer2Relayer) parseCommitBatchesCalldata(calldata []byte) (*CalldataIn
}

// getBatchesFromCalldata retrieves the relevant batches from database based on calldata information
func (r *Layer2Relayer) getBatchesFromCalldata(info *CalldataInfo) ([]*dbBatchWithChunks, error) {
func (r *Layer2Relayer) getBatchesFromCalldata(info *CalldataInfo) ([]*dbBatchWithChunks, map[uint64][]*types.TransactionData, error) {
// Get the parent batch to determine the starting point
parentBatch, err := r.batchOrm.GetBatchByHash(r.ctx, info.ParentBatchHash.Hex())
if err != nil {
return nil, fmt.Errorf("failed to get parent batch by hash %s: %w", info.ParentBatchHash.Hex(), err)
return nil, nil, fmt.Errorf("failed to get parent batch by hash %s: %w", info.ParentBatchHash.Hex(), err)
}

// Get the last batch to determine the ending point
lastBatch, err := r.batchOrm.GetBatchByHash(r.ctx, info.LastBatchHash.Hex())
if err != nil {
return nil, fmt.Errorf("failed to get last batch by hash %s: %w", info.LastBatchHash.Hex(), err)
return nil, nil, fmt.Errorf("failed to get last batch by hash %s: %w", info.LastBatchHash.Hex(), err)
}

// Get all batches in the range (parent+1 to last)
Expand All @@ -100,29 +101,46 @@ func (r *Layer2Relayer) getBatchesFromCalldata(info *CalldataInfo) ([]*dbBatchWi

// Check if the range is valid
if firstBatchIndex > lastBatchIndex {
return nil, fmt.Errorf("no batches found in range: first index %d, last index %d", firstBatchIndex, lastBatchIndex)
return nil, nil, fmt.Errorf("no batches found in range: first index %d, last index %d", firstBatchIndex, lastBatchIndex)
}

var batchesToValidate []*dbBatchWithChunks
l1MessagesWithBlockNumbers := make(map[uint64][]*types.TransactionData)
for batchIndex := firstBatchIndex; batchIndex <= lastBatchIndex; batchIndex++ {
dbBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, batchIndex)
if err != nil {
return nil, fmt.Errorf("failed to get batch by index %d: %w", batchIndex, err)
return nil, nil, fmt.Errorf("failed to get batch by index %d: %w", batchIndex, err)
}

// Get chunks for this batch
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
if err != nil {
return nil, fmt.Errorf("failed to get chunks for batch %d: %w", batchIndex, err)
return nil, nil, fmt.Errorf("failed to get chunks for batch %d: %w", batchIndex, err)
}

batchesToValidate = append(batchesToValidate, &dbBatchWithChunks{
Batch: dbBatch,
Chunks: dbChunks,
})
}

return batchesToValidate, nil
// If there are L1 messages in this batch, retrieve L1 messages with block numbers
for _, chunk := range dbChunks {
if chunk.TotalL1MessagesPoppedInChunk > 0 {
blockWithL1Messages, err := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, chunk.StartBlockNumber, chunk.EndBlockNumber)
if err != nil {
return nil, nil, fmt.Errorf("failed to get L2 blocks for chunk %d: %w", chunk.Index, err)
}
for _, block := range blockWithL1Messages {
for _, tx := range block.Transactions {
if tx.Type == types.L1MessageTxType {
l1MessagesWithBlockNumbers[block.Header.Number.Uint64()] = append(l1MessagesWithBlockNumbers[block.Header.Number.Uint64()], tx)
}
}
}
}
}
}
return batchesToValidate, l1MessagesWithBlockNumbers, nil
}

// validateDatabaseConsistency performs comprehensive validation of database records
Expand Down Expand Up @@ -299,7 +317,7 @@ func (r *Layer2Relayer) validateSingleChunkConsistency(chunk *orm.Chunk, prevChu
}

// validateCalldataAndBlobsAgainstDatabase validates calldata and blobs against database records
func (r *Layer2Relayer) validateCalldataAndBlobsAgainstDatabase(calldataInfo *CalldataInfo, blobs []*kzg4844.Blob, batchesToValidate []*dbBatchWithChunks) error {
func (r *Layer2Relayer) validateCalldataAndBlobsAgainstDatabase(calldataInfo *CalldataInfo, blobs []*kzg4844.Blob, batchesToValidate []*dbBatchWithChunks, l1MessagesWithBlockNumbers map[uint64][]*types.TransactionData) error {
// Validate blobs
if len(blobs) == 0 {
return fmt.Errorf("no blobs provided")
Expand Down Expand Up @@ -338,7 +356,7 @@ func (r *Layer2Relayer) validateCalldataAndBlobsAgainstDatabase(calldataInfo *Ca
// Validate each blob against its corresponding batch
for i, blob := range blobs {
dbBatch := batchesToValidate[i].Batch
if err := r.validateSingleBlobAgainstBatch(blob, dbBatch, codec); err != nil {
if err := r.validateSingleBlobAgainstBatch(blob, dbBatch, codec, l1MessagesWithBlockNumbers); err != nil {
return fmt.Errorf("blob validation failed for batch %d: %w", dbBatch.Index, err)
}
}
Expand All @@ -347,15 +365,15 @@ func (r *Layer2Relayer) validateCalldataAndBlobsAgainstDatabase(calldataInfo *Ca
}

// validateSingleBlobAgainstBatch validates a single blob against its batch data
func (r *Layer2Relayer) validateSingleBlobAgainstBatch(blob *kzg4844.Blob, dbBatch *orm.Batch, codec encoding.Codec) error {
func (r *Layer2Relayer) validateSingleBlobAgainstBatch(blob *kzg4844.Blob, dbBatch *orm.Batch, codec encoding.Codec, l1MessagesWithBlockNumbers map[uint64][]*types.TransactionData) error {
// Decode blob payload
payload, err := codec.DecodeBlob(blob)
if err != nil {
return fmt.Errorf("failed to decode blob: %w", err)
}

// Validate batch hash
daBatch, err := assembleDABatchFromPayload(payload, dbBatch, codec)
daBatch, err := assembleDABatchFromPayload(payload, dbBatch, codec, l1MessagesWithBlockNumbers)
if err != nil {
return fmt.Errorf("failed to assemble batch from payload: %w", err)
}
Expand Down Expand Up @@ -401,8 +419,8 @@ func (r *Layer2Relayer) validateMessageQueueConsistency(batchIndex uint64, chunk
return nil
}

func assembleDABatchFromPayload(payload encoding.DABlobPayload, dbBatch *orm.Batch, codec encoding.Codec) (encoding.DABatch, error) {
blocks, err := assembleBlocksFromPayload(payload)
func assembleDABatchFromPayload(payload encoding.DABlobPayload, dbBatch *orm.Batch, codec encoding.Codec, l1MessagesWithBlockNumbers map[uint64][]*types.TransactionData) (encoding.DABatch, error) {
blocks, err := assembleBlocksFromPayload(payload, l1MessagesWithBlockNumbers)
if err != nil {
return nil, fmt.Errorf("failed to assemble blocks from payload batch_index=%d codec_version=%d parent_batch_hash=%s: %w", dbBatch.Index, dbBatch.CodecVersion, dbBatch.ParentBatchHash, err)
}
Expand All @@ -427,7 +445,7 @@ func assembleDABatchFromPayload(payload encoding.DABlobPayload, dbBatch *orm.Bat
return daBatch, nil
}

func assembleBlocksFromPayload(payload encoding.DABlobPayload) ([]*encoding.Block, error) {
func assembleBlocksFromPayload(payload encoding.DABlobPayload, l1MessagesWithBlockNumbers map[uint64][]*types.TransactionData) ([]*encoding.Block, error) {
daBlocks := payload.Blocks()
txns := payload.Transactions()
if len(daBlocks) != len(txns) {
Expand All @@ -442,8 +460,11 @@ func assembleBlocksFromPayload(payload encoding.DABlobPayload) ([]*encoding.Bloc
BaseFee: daBlocks[i].BaseFee(),
GasLimit: daBlocks[i].GasLimit(),
},
Transactions: encoding.TxsToTxsData(txns[i]),
}
if l1Messages, ok := l1MessagesWithBlockNumbers[daBlocks[i].Number()]; ok {
blocks[i].Transactions = l1Messages
}
blocks[i].Transactions = append(blocks[i].Transactions, encoding.TxsToTxsData(txns[i])...)
}
return blocks, nil
}
131 changes: 131 additions & 0 deletions rollup/internal/controller/relayer/l2_relayer_sanity_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
package relayer

import (
"encoding/json"
"fmt"
"math/big"
"os"
"path/filepath"
"strings"
"testing"

"github.com/stretchr/testify/assert"

"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"

bridgeabi "scroll-tech/rollup/abi"
"scroll-tech/rollup/internal/orm"
)

func TestAssembleDABatch(t *testing.T) {
calldataHex := "0x9bbaa2ba0000000000000000000000000000000000000000000000000000000000000008146793a7d71663cd87ec9713f72242a3798d5e801050130a3e16efaa09fb803e58af2593dadc8b9fff75a2d27199cb97ec115bade109b8d691a512608ef180eb"
blobsPath := filepath.Join("../../../testdata", "commit_batches_blobs.json")

calldata, err := hexutil.Decode(strings.TrimSpace(calldataHex))
assert.NoErrorf(t, err, "failed to decode calldata: %s", calldataHex)

blobs, err := loadBlobsFromJSON(blobsPath)
assert.NoErrorf(t, err, "failed to read blobs: %s", blobsPath)
assert.NotEmpty(t, blobs, "no blobs provided")

info, err := parseCommitBatchesCalldata(bridgeabi.ScrollChainABI, calldata)
assert.NoError(t, err)

codec, err := encoding.CodecFromVersion(encoding.CodecVersion(info.Version))
assert.NoErrorf(t, err, "failed to get codec from version %d", info.Version)

parentBatchHash := info.ParentBatchHash
index := uint64(113571)

t.Logf("calldata parsed: version=%d parentBatchHash=%s lastBatchHash=%s blobs=%d", info.Version, info.ParentBatchHash.Hex(), info.LastBatchHash.Hex(), len(blobs))

fromAddr := common.HexToAddress("0x61d8d3e7f7c656493d1d76aaa1a836cedfcbc27b")
toAddr := common.HexToAddress("0xba50f5340fb9f3bd074bd638c9be13ecb36e603d")
l1MessagesWithBlockNumbers := map[uint64][]*types.TransactionData{
11488527: {
&types.TransactionData{
Type: types.L1MessageTxType,
Nonce: 1072515,
Gas: 340000,
To: &toAddr,
Value: (*hexutil.Big)(big.NewInt(0)),
Data: "0x8ef1332e00000000000000000000000081f3843af1fbab046b771f0d440c04ebb2b7513f000000000000000000000000cec03800074d0ac0854bf1f34153cc4c8baeeb1e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000105d8300000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000084f03efa3700000000000000000000000000000000000000000000000000000000000024730000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000171bdb6e3062daaee1845ba4cb1902169feb5a9b9555a882d45637d3bd29eb83500000000000000000000000000000000000000000000000000000000",
From: fromAddr,
},
},
11488622: {
&types.TransactionData{
Type: types.L1MessageTxType,
Nonce: 1072516,
Gas: 340000,
To: &toAddr,
Value: (*hexutil.Big)(big.NewInt(0)),
Data: "0x8ef1332e00000000000000000000000081f3843af1fbab046b771f0d440c04ebb2b7513f000000000000000000000000cec03800074d0ac0854bf1f34153cc4c8baeeb1e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000105d8400000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000084f03efa370000000000000000000000000000000000000000000000000000000000002474000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000012aeb01535c1845b689bfce22e53029ec59ec75ea20f660d7c5fcd99f55b75b6900000000000000000000000000000000000000000000000000000000",
From: fromAddr,
},
},
11489190: {
&types.TransactionData{
Type: types.L1MessageTxType,
Nonce: 1072517,
Gas: 168000,
To: &toAddr,
Value: (*hexutil.Big)(big.NewInt(0)),
Data: "0x8ef1332e0000000000000000000000003b1399523f819ea4c4d3e76dddefaf4226c6ba570000000000000000000000003b1399523f819ea4c4d3e76dddefaf4226c6ba5700000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000105d8500000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000",
From: fromAddr,
},
},
}

for i, blob := range blobs {
payload, decErr := codec.DecodeBlob(blob)
assert.NoErrorf(t, decErr, "blob[%d] decode failed", i)
if decErr != nil {
continue
}

dbBatch := &orm.Batch{
Index: index,
ParentBatchHash: parentBatchHash.Hex(),
}

daBatch, asmErr := assembleDABatchFromPayload(payload, dbBatch, codec, l1MessagesWithBlockNumbers)
assert.NoErrorf(t, asmErr, "blob[%d] assemble failed", i)
if asmErr == nil {
t.Logf("blob[%d] DABatch hash=%s", i, daBatch.Hash().Hex())
}

index += 1
parentBatchHash = daBatch.Hash()
}
}

func loadBlobsFromJSON(path string) ([]*kzg4844.Blob, error) {
raw, err := os.ReadFile(path)
if err != nil {
return nil, err
}

var arr []hexutil.Bytes
if err := json.Unmarshal(raw, &arr); err != nil {
return nil, fmt.Errorf("invalid JSON; expect [\"0x...\"] array: %w", err)
}

out := make([]*kzg4844.Blob, 0, len(arr))
var empty kzg4844.Blob
want := len(empty)

for i, b := range arr {
if len(b) != want {
return nil, fmt.Errorf("blob[%d] length mismatch: got %d, want %d", i, len(b), want)
}
blob := new(kzg4844.Blob)
copy(blob[:], b)
out = append(out, blob)
}
return out, nil
}
7 changes: 7 additions & 0 deletions rollup/testdata/commit_batches_blobs.json

Large diffs are not rendered by default.