diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 3a1f81e880..2d8b99ef5b 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022, Offchain Labs, Inc. +// Copyright 2021-2025, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md package arbnode @@ -105,6 +105,7 @@ type BatchPoster struct { inbox *InboxTracker streamer *TransactionStreamer arbOSVersionGetter execution.ArbOSVersionGetter + chainConfig *params.ChainConfig config BatchPosterConfigFetcher seqInbox *bridgegen.SequencerInbox syncMonitor *SyncMonitor @@ -360,6 +361,7 @@ type BatchPosterOpts struct { DAPWriters []daprovider.Writer ParentChainID *big.Int DAPReaders *daprovider.DAProviderRegistry + ChainConfig *params.ChainConfig } func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, error) { @@ -406,6 +408,7 @@ func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, e inbox: opts.Inbox, streamer: opts.Streamer, arbOSVersionGetter: opts.VersionGetter, + chainConfig: opts.ChainConfig, syncMonitor: opts.SyncMonitor, config: opts.Config, seqInbox: seqInbox, @@ -904,6 +907,7 @@ type batchSegments struct { recompressionLevel int newUncompressedSize int totalUncompressedSize int + maxUncompressedSize int lastCompressedSize int trailingHeaders int // how many trailing segments are headers isDone bool @@ -984,12 +988,13 @@ func (b *BatchPoster) newBatchSegments(ctx context.Context, firstDelayed uint64, recompressionLevel = compressionLevel } return &batchSegments{ - compressedBuffer: compressedBuffer, - compressedWriter: brotli.NewWriterLevel(compressedBuffer, compressionLevel), - sizeLimit: maxSize, - recompressionLevel: recompressionLevel, - rawSegments: make([][]byte, 0, 128), - delayedMsg: firstDelayed, + compressedBuffer: compressedBuffer, + compressedWriter: brotli.NewWriterLevel(compressedBuffer, compressionLevel), + sizeLimit: maxSize, + recompressionLevel: recompressionLevel, + rawSegments: make([][]byte, 0, 128), + delayedMsg: firstDelayed, + maxUncompressedSize: int(b.chainConfig.MaxUncompressedBatchSize()), // #nosec G115 }, nil } @@ -1004,8 +1009,8 @@ func (s *batchSegments) recompressAll() error { return err } } - if s.totalUncompressedSize > arbstate.MaxDecompressedLen { - return fmt.Errorf("batch size %v exceeds maximum decompressed length %v", s.totalUncompressedSize, arbstate.MaxDecompressedLen) + if s.totalUncompressedSize > s.maxUncompressedSize { + return fmt.Errorf("batch size %v exceeds maximum uncompressed length %v", s.totalUncompressedSize, s.maxUncompressedSize) } if len(s.rawSegments) >= arbstate.MaxSegmentsPerSequencerMessage { return fmt.Errorf("number of raw segments %v excees maximum number %v", len(s.rawSegments), arbstate.MaxSegmentsPerSequencerMessage) @@ -1015,10 +1020,10 @@ func (s *batchSegments) recompressAll() error { func (s *batchSegments) testForOverflow(isHeader bool) (bool, error) { // we've reached the max decompressed size - if s.totalUncompressedSize > arbstate.MaxDecompressedLen { - log.Info("Batch full: max decompressed length exceeded", + if s.totalUncompressedSize > s.maxUncompressedSize { + log.Info("Batch full: max uncompressed length exceeded", "current", s.totalUncompressedSize, - "max", arbstate.MaxDecompressedLen, + "max", s.maxUncompressedSize, "isHeader", isHeader) return true, nil } @@ -1929,7 +1934,7 @@ func (b *BatchPoster) MaybePostSequencerBatch(ctx context.Context) (bool, error) b.building.muxBackend.seqMsg = seqMsg b.building.muxBackend.delayedInboxStart = batchPosition.DelayedMessageCount b.building.muxBackend.SetPositionWithinMessage(0) - simMux := arbstate.NewInboxMultiplexer(b.building.muxBackend, batchPosition.DelayedMessageCount, dapReaders, daprovider.KeysetValidate) + simMux := arbstate.NewInboxMultiplexer(b.building.muxBackend, batchPosition.DelayedMessageCount, dapReaders, daprovider.KeysetValidate, b.chainConfig) log.Debug("Begin checking the correctness of batch against inbox multiplexer", "startMsgSeqNum", batchPosition.MessageCount, "endMsgSeqNum", b.building.msgCount-1) for i := batchPosition.MessageCount; i < b.building.msgCount; i++ { msg, err := simMux.Pop(ctx) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index 32c73e60c2..7898091012 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022, Offchain Labs, Inc. +// Copyright 2021-2025, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md package arbnode @@ -781,7 +781,13 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client *ethclien ctx: ctx, client: client, } - multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, t.dapReaders, daprovider.KeysetValidate) + multiplexer := arbstate.NewInboxMultiplexer( + backend, + prevbatchmeta.DelayedMessageCount, + t.dapReaders, + daprovider.KeysetValidate, + t.txStreamer.chainConfig, + ) batchMessageCounts := make(map[uint64]arbutil.MessageIndex) currentPos := prevbatchmeta.MessageCount + 1 for { diff --git a/arbnode/mel/extraction/message_extraction_function.go b/arbnode/mel/extraction/message_extraction_function.go index f55c414bfb..552a11d0fa 100644 --- a/arbnode/mel/extraction/message_extraction_function.go +++ b/arbnode/mel/extraction/message_extraction_function.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode/mel" "github.com/offchainlabs/nitro/arbos/arbostypes" @@ -60,11 +61,13 @@ func ExtractMessages( delayedMsgDatabase DelayedMessageDatabase, txFetcher TransactionFetcher, logsFetcher LogsFetcher, + chainConfig *params.ChainConfig, ) (*mel.State, []*arbostypes.MessageWithMetadata, []*mel.DelayedInboxMessage, []*mel.BatchMetadata, error) { return extractMessagesImpl( ctx, inputState, parentChainHeader, + chainConfig, dataProviders, delayedMsgDatabase, txFetcher, @@ -86,6 +89,7 @@ func extractMessagesImpl( ctx context.Context, inputState *mel.State, parentChainHeader *types.Header, + chainConfig *params.ChainConfig, dataProviders *daprovider.DAProviderRegistry, delayedMsgDatabase DelayedMessageDatabase, txFetcher TransactionFetcher, @@ -205,6 +209,7 @@ func extractMessagesImpl( serialized, dataProviders, daprovider.KeysetValidate, + chainConfig, ) if err != nil { return nil, nil, nil, nil, err diff --git a/arbnode/mel/extraction/message_extraction_function_test.go b/arbnode/mel/extraction/message_extraction_function_test.go index f265caca0b..8642ac8e8b 100644 --- a/arbnode/mel/extraction/message_extraction_function_test.go +++ b/arbnode/mel/extraction/message_extraction_function_test.go @@ -12,10 +12,12 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode/mel" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/daprovider" ) @@ -31,7 +33,7 @@ func TestExtractMessages(t *testing.T) { lookupDelayedMsgs func(context.Context, *mel.State, *types.Header, TransactionFetcher, LogsFetcher) ([]*mel.DelayedInboxMessage, error) serializer func(context.Context, *mel.SequencerInboxBatch, *types.Transaction, LogsFetcher) ([]byte, error) parseReport func(io.Reader) (*big.Int, common.Address, common.Hash, uint64, *big.Int, uint64, error) - parseSequencerMsg func(context.Context, uint64, common.Hash, []byte, arbstate.DapReaderSource, daprovider.KeysetValidationMode) (*arbstate.SequencerMessage, error) + parseSequencerMsg func(context.Context, uint64, common.Hash, []byte, arbstate.DapReaderSource, daprovider.KeysetValidationMode, *params.ChainConfig) (*arbstate.SequencerMessage, error) extractBatchMessages func(context.Context, *mel.State, *arbstate.SequencerMessage, DelayedMessageDatabase) ([]*arbostypes.MessageWithMetadata, error) expectedError string expectedMsgCount uint64 @@ -152,6 +154,7 @@ func TestExtractMessages(t *testing.T) { nil, txFetcher, blockLogsFetcher, + chaininfo.ArbitrumDevTestChainConfig(), ) } else { // Test the internal extractMessagesImpl function @@ -159,6 +162,7 @@ func TestExtractMessages(t *testing.T) { ctx, melState, header, + chaininfo.ArbitrumDevTestChainConfig(), nil, nil, txFetcher, @@ -317,6 +321,7 @@ func successfulParseSequencerMsg( data []byte, dapReaders arbstate.DapReaderSource, keysetValidationMode daprovider.KeysetValidationMode, + chainConfig *params.ChainConfig, ) (*arbstate.SequencerMessage, error) { return nil, nil } @@ -328,6 +333,7 @@ func failingParseSequencerMsg( data []byte, dapReaders arbstate.DapReaderSource, keysetValidationMode daprovider.KeysetValidationMode, + chainConfig *params.ChainConfig, ) (*arbstate.SequencerMessage, error) { return nil, errors.New("failed to parse sequencer message") } diff --git a/arbnode/mel/extraction/types.go b/arbnode/mel/extraction/types.go index 4a4bafbda8..3294e3d5ea 100644 --- a/arbnode/mel/extraction/types.go +++ b/arbnode/mel/extraction/types.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode/mel" "github.com/offchainlabs/nitro/arbos/arbostypes" @@ -62,6 +63,7 @@ type sequencerMessageParserFunc func( data []byte, dapReaders arbstate.DapReaderSource, keysetValidationMode daprovider.KeysetValidationMode, + chainConfig *params.ChainConfig, ) (*arbstate.SequencerMessage, error) // Defines a function that can extract messages from a batch. diff --git a/arbnode/mel/runner/mel.go b/arbnode/mel/runner/mel.go index e3c6480526..40fb278b92 100644 --- a/arbnode/mel/runner/mel.go +++ b/arbnode/mel/runner/mel.go @@ -11,6 +11,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbnode/mel" @@ -42,6 +43,7 @@ type ParentChainReader interface { type MessageExtractor struct { stopwaiter.StopWaiter parentChainReader ParentChainReader + chainConfig *params.ChainConfig addrs *chaininfo.RollupAddresses melDB *Database msgConsumer mel.MessageConsumer @@ -56,6 +58,7 @@ type MessageExtractor struct { // to be used when extracting messages from the parent chain. func NewMessageExtractor( parentChainReader ParentChainReader, + chainConfig *params.ChainConfig, rollupAddrs *chaininfo.RollupAddresses, melDB *Database, msgConsumer mel.MessageConsumer, @@ -72,6 +75,7 @@ func NewMessageExtractor( } return &MessageExtractor{ parentChainReader: parentChainReader, + chainConfig: chainConfig, addrs: rollupAddrs, melDB: melDB, msgConsumer: msgConsumer, @@ -179,6 +183,7 @@ func (m *MessageExtractor) Act(ctx context.Context) (time.Duration, error) { m.melDB, nil, // will be removed when syncing with the core mel runner code as that has been refactored into separate files nil, // will be removed when syncing with the core mel runner code as that has been refactored into separate files + m.chainConfig, ) if err != nil { return m.retryInterval, err diff --git a/arbnode/mel/runner/mel_test.go b/arbnode/mel/runner/mel_test.go index c7d25979e7..a16b3aac34 100644 --- a/arbnode/mel/runner/mel_test.go +++ b/arbnode/mel/runner/mel_test.go @@ -40,6 +40,7 @@ func TestMessageExtractor(t *testing.T) { messageConsumer := &mockMessageConsumer{} extractor, err := NewMessageExtractor( parentChainReader, + chaininfo.ArbitrumDevTestChainConfig(), &chaininfo.RollupAddresses{}, melDb, messageConsumer, diff --git a/arbnode/node.go b/arbnode/node.go index 3e28962a75..f691dab47a 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022, Offchain Labs, Inc. +// Copyright 2021-2025, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md package arbnode @@ -979,6 +979,7 @@ func getBatchPoster( ctx context.Context, config *Config, configFetcher ConfigFetcher, + l2Config *params.ChainConfig, txOptsBatchPoster *bind.TransactOpts, dapWriters []daprovider.Writer, l1Reader *headerreader.HeaderReader, @@ -1018,6 +1019,7 @@ func getBatchPoster( DAPWriters: dapWriters, ParentChainID: parentChainID, DAPReaders: dapReaders, + ChainConfig: l2Config, }) if err != nil { return nil, err @@ -1214,7 +1216,7 @@ func createNodeImpl( return nil, err } - batchPoster, err := getBatchPoster(ctx, config, configFetcher, txOptsBatchPoster, dapWriters, l1Reader, inboxTracker, txStreamer, arbOSVersionGetter, arbDb, syncMonitor, deployInfo, parentChainID, dapRegistry, stakerAddr) + batchPoster, err := getBatchPoster(ctx, config, configFetcher, l2Config, txOptsBatchPoster, dapWriters, l1Reader, inboxTracker, txStreamer, arbOSVersionGetter, arbDb, syncMonitor, deployInfo, parentChainID, dapRegistry, stakerAddr) if err != nil { return nil, err } @@ -1355,7 +1357,7 @@ func CreateNodeExecutionClient( if executionClient == nil { return nil, errors.New("execution client must be non-nil") } - currentNode, err := createNodeImpl(ctx, stack, executionClient, nil, nil, nil, arbDb, configFetcher, l2Config, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan, parentChainID, blobReader, latestWasmModuleRoot) + currentNode, err := createNodeImpl(ctx, stack, executionClient, nil, nil, executionClient, arbDb, configFetcher, l2Config, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan, parentChainID, blobReader, latestWasmModuleRoot) if err != nil { return nil, err } diff --git a/arbstate/inbox.go b/arbstate/inbox.go index b9bf852b5c..c2a5fcd7db 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/offchainlabs/nitro/arbcompress" @@ -77,11 +78,9 @@ type SequencerMessage struct { Segments [][]byte } -const MaxDecompressedLen int = 1024 * 1024 * 16 // 16 MiB -const maxZeroheavyDecompressedLen = 101*MaxDecompressedLen/100 + 64 const MaxSegmentsPerSequencerMessage = 100 * 1024 -func ParseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, dapReaders DapReaderSource, keysetValidationMode daprovider.KeysetValidationMode) (*SequencerMessage, error) { +func ParseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, dapReaders DapReaderSource, keysetValidationMode daprovider.KeysetValidationMode, chainConfig *params.ChainConfig) (*SequencerMessage, error) { if len(data) < 40 { return nil, errors.New("sequencer message missing L1 header") } @@ -148,10 +147,17 @@ func ParseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash // At this point, `payload` has not been validated by the sequencer inbox at all. // It's not safe to trust any part of the payload from this point onwards. + var uncompressedBatchSizeLimit uint64 + if chainConfig != nil { + uncompressedBatchSizeLimit = chainConfig.MaxUncompressedBatchSize() + } else { // In case chainConfig is nil, fall back to params default (e.g. in tests or for the genesis block) + uncompressedBatchSizeLimit = params.DefaultMaxUncompressedBatchSize + } // Stage 2: If enabled, decode the zero heavy payload (saves gas based on calldata charging). if len(payload) > 0 && daprovider.IsZeroheavyEncodedHeaderByte(payload[0]) { - pl, err := io.ReadAll(io.LimitReader(zeroheavy.NewZeroheavyDecoder(bytes.NewReader(payload[1:])), int64(maxZeroheavyDecompressedLen))) + maxZeroheavyDecompressedLen := 101*uncompressedBatchSizeLimit/100 + 64 + pl, err := io.ReadAll(io.LimitReader(zeroheavy.NewZeroheavyDecoder(bytes.NewReader(payload[1:])), int64(maxZeroheavyDecompressedLen))) // #nosec G115 if err != nil { log.Warn("error reading from zeroheavy decoder", err.Error()) return parsedMsg, nil @@ -161,10 +167,10 @@ func ParseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash // Stage 3: Decompress the brotli payload and fill the parsedMsg.segments list. if len(payload) > 0 && daprovider.IsBrotliMessageHeaderByte(payload[0]) { - decompressed, err := arbcompress.Decompress(payload[1:], MaxDecompressedLen) + decompressed, err := arbcompress.Decompress(payload[1:], int(uncompressedBatchSizeLimit)) // #nosec G115 if err == nil { reader := bytes.NewReader(decompressed) - stream := rlp.NewStream(reader, uint64(MaxDecompressedLen)) + stream := rlp.NewStream(reader, 0) for { var segment []byte err := stream.Decode(&segment) @@ -200,6 +206,7 @@ func ParseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash type inboxMultiplexer struct { backend InboxBackend delayedMessagesRead uint64 + chainConfig *params.ChainConfig dapReaders DapReaderSource cachedSequencerMessage *SequencerMessage cachedSequencerMessageNum uint64 @@ -214,10 +221,11 @@ type inboxMultiplexer struct { keysetValidationMode daprovider.KeysetValidationMode } -func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dapReaders DapReaderSource, keysetValidationMode daprovider.KeysetValidationMode) arbostypes.InboxMultiplexer { +func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dapReaders DapReaderSource, keysetValidationMode daprovider.KeysetValidationMode, chainConfig *params.ChainConfig) arbostypes.InboxMultiplexer { return &inboxMultiplexer{ backend: backend, delayedMessagesRead: delayedMessagesRead, + chainConfig: chainConfig, dapReaders: dapReaders, cachedSequencerMessage: nil, cachedSequencerMessageNum: 0, @@ -245,8 +253,9 @@ func (r *inboxMultiplexer) Pop(ctx context.Context) (*arbostypes.MessageWithMeta return nil, realErr } r.cachedSequencerMessageNum = r.backend.GetSequencerInboxPosition() + var err error - r.cachedSequencerMessage, err = ParseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.dapReaders, r.keysetValidationMode) + r.cachedSequencerMessage, err = ParseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.dapReaders, r.keysetValidationMode, r.chainConfig) if err != nil { return nil, err } diff --git a/arbstate/inbox_fuzz_test.go b/arbstate/inbox_fuzz_test.go index 6b49d97288..da67cf2af2 100644 --- a/arbstate/inbox_fuzz_test.go +++ b/arbstate/inbox_fuzz_test.go @@ -1,4 +1,4 @@ -// Copyright 2022, Offchain Labs, Inc. +// Copyright 2022-2025, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md package arbstate @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/daprovider" ) @@ -70,7 +71,13 @@ func FuzzInboxMultiplexer(f *testing.F) { delayedMessage: delayedMsg, positionWithinMessage: 0, } - multiplexer := NewInboxMultiplexer(backend, 0, nil, daprovider.KeysetValidate) + multiplexer := NewInboxMultiplexer( + backend, + 0, + nil, + daprovider.KeysetValidate, + chaininfo.ArbitrumDevTestChainConfig(), + ) _, err := multiplexer.Pop(context.TODO()) if err != nil { panic(err) diff --git a/changelog/nit-3121-configurable-batch-size.md b/changelog/nit-3121-configurable-batch-size.md new file mode 100644 index 0000000000..ada235f670 --- /dev/null +++ b/changelog/nit-3121-configurable-batch-size.md @@ -0,0 +1,2 @@ +### Added +- Make uncompressed batch size limit configurable in the chain config with a new parameter (`MaxUncompressedBatchSize`). Once set, it cannot be changed. The default value remains 16MB. diff --git a/cmd/chaininfo/chain_defaults.go b/cmd/chaininfo/chain_defaults.go index 5849bc8121..869877e732 100644 --- a/cmd/chaininfo/chain_defaults.go +++ b/cmd/chaininfo/chain_defaults.go @@ -38,6 +38,7 @@ func CopyArbitrumChainParams(arbChainParams params.ArbitrumChainParams) params.A GenesisBlockNum: arbChainParams.GenesisBlockNum, MaxCodeSize: arbChainParams.MaxCodeSize, MaxInitCodeSize: arbChainParams.MaxInitCodeSize, + MaxUncompressedBatchSize: arbChainParams.MaxUncompressedBatchSize, } } diff --git a/cmd/replay/main.go b/cmd/replay/main.go index d2358fefa4..d66b80266d 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -300,7 +300,7 @@ func main() { } return wavmio.ReadInboxMessage(batchNum), nil } - readMessage := func(anyTrustEnabled bool) *arbostypes.MessageWithMetadata { + readMessage := func(anyTrustEnabled bool, chainConfig *params.ChainConfig) *arbostypes.MessageWithMetadata { var delayedMessagesRead uint64 if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() @@ -334,7 +334,7 @@ func main() { panic(fmt.Sprintf("Failed to register DA Certificate reader: %v", err)) } - inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dapReaders, keysetValidationMode) + inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dapReaders, keysetValidationMode, chainConfig) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) if err != nil { @@ -390,7 +390,7 @@ func main() { } } - message := readMessage(chainConfig.ArbitrumChainParams.DataAvailabilityCommittee) + message := readMessage(chainConfig.ArbitrumChainParams.DataAvailabilityCommittee, chainConfig) chainContext := WavmChainContext{chainConfig: chainConfig} newBlock, _, err = arbos.ProduceBlock(message.Message, message.DelayedMessagesRead, lastBlockHeader, statedb, chainContext, false, core.NewMessageReplayContext(), false) @@ -400,7 +400,9 @@ func main() { } else { // Initialize ArbOS with this init message and create the genesis block. - message := readMessage(false) + // Currently, the only use of `chainConfig` argument is to get a limit on the uncompressed batch size. + // However, the init message is never compressed, so we can safely pass nil here. + message := readMessage(false, nil) initMessage, err := message.Message.ParseInitMessage() if err != nil { diff --git a/go-ethereum b/go-ethereum index 5f92cb04db..b35fd262b6 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 5f92cb04db03f4679937133d1c631d2aa3e5a79b +Subproject commit b35fd262b6fad6bf675fd7f2c813d0937f4641c9 diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 00c7360748..08461168e9 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -160,6 +160,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool, useRedisLock bool) { TransactOpts: &seqTxOpts, DAPWriters: nil, ParentChainID: parentChainID, + ChainConfig: builder.chainConfig, }, ) Require(t, err) @@ -300,6 +301,7 @@ func TestRedisBatchPosterHandoff(t *testing.T) { TransactOpts: &seqTxOpts, DAPWriters: nil, ParentChainID: parentChainID, + ChainConfig: builder.chainConfig, }, ) Require(t, err) diff --git a/system_tests/batch_size_limit_test.go b/system_tests/batch_size_limit_test.go new file mode 100644 index 0000000000..49d0ab92b8 --- /dev/null +++ b/system_tests/batch_size_limit_test.go @@ -0,0 +1,153 @@ +package arbtest + +import ( + "bytes" + "context" + "math/big" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + + "github.com/offchainlabs/nitro/arbcompress" + "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/solgen/go/bridgegen" +) + +const ( + SenderAccount = "Sender" + ReceiverAccount = "Receiver" + TransferAmount = 1000000 + NewUncompressedSizeLimit = params.DefaultMaxUncompressedBatchSize * 2 +) + +func TestTooBigBatchGetsRejected(t *testing.T) { + builder, ctx, cleanup := setupNodeForTestingBatchSizeLimit(t, false) + defer cleanup() + + checkReceiverAccountBalance(t, ctx, builder, 0) + + bigBatch := buildBigBatch(t, builder.L2Info) + batchNum := postBatch(t, ctx, builder, bigBatch) + + ensureBatchWasProcessed(t, builder, batchNum) + checkReceiverAccountBalance(t, ctx, builder, 0) +} + +func TestCanIncreaseBatchSizeLimit(t *testing.T) { + builder, ctx, cleanup := setupNodeForTestingBatchSizeLimit(t, true) + defer cleanup() + + checkReceiverAccountBalance(t, ctx, builder, 0) + + bigBatch := buildBigBatch(t, builder.L2Info) + batchNum := postBatch(t, ctx, builder, bigBatch) + + ensureBatchWasProcessed(t, builder, batchNum) + checkReceiverAccountBalance(t, ctx, builder, TransferAmount) +} + +// setupNodeForTestingBatchSizeLimit initializes a test node with the option to set a higher uncompressed batch size limit. +// Also, it creates genesis accounts for sender and receiver with appropriate balances. +// It returns the NodeBuilder and a cleanup function to be called after the test. +func setupNodeForTestingBatchSizeLimit(t *testing.T, setHighLimit bool) (*NodeBuilder, context.Context, func()) { + ctx, cancel := context.WithCancel(context.Background()) + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.BatchPoster.Enable = false + builder.L2Info.GenerateGenesisAccount(SenderAccount, big.NewInt(1e18)) + builder.L2Info.GenerateGenesisAccount(ReceiverAccount, big.NewInt(0)) + + if setHighLimit { + builder.chainConfig.ArbitrumChainParams.MaxUncompressedBatchSize = NewUncompressedSizeLimit + } + + cleanup := builder.Build(t) + + return builder, ctx, func() { + cancel() + cleanup() + } +} + +// buildBigBatch builds a batch that: +// - consists of a valid transfer tx followed by highly compressible trash data +// - has an uncompressed size larger than DefaultMaxUncompressedBatchSize but less than NewUncompressedSizeLimit +// - has a compressed size smaller than the allowed calldata batch size for the test batch poster +// - is already compressed and has the appropriate header byte +func buildBigBatch(t *testing.T, l2Info *BlockchainTestInfo) []byte { + batchBuffer := bytes.NewBuffer([]byte{}) + + // 1. The first tx in the batch is a standard transfer tx used as an indicator whether the batch was processed successfully. + standardTx := l2Info.PrepareTx(SenderAccount, ReceiverAccount, 1000000, big.NewInt(TransferAmount), []byte{}) + err := writeTxToBatch(batchBuffer, standardTx) + Require(t, err) + + // 2. The rest of the batch is filled with highly compressible trash data. + batchBuffer.Write(bytes.Repeat([]byte{0xff}, params.DefaultMaxUncompressedBatchSize)) + + // 3. Compress the batch (as the batch poster would do). + compressed, err := arbcompress.CompressWell(batchBuffer.Bytes()) + Require(t, err) + + // 4. Ensure compressed and uncompressed sizes are as expected. + uncompressedSize, compressedSize := len(batchBuffer.Bytes()), len(compressed) + require.Greater(t, uncompressedSize, params.DefaultMaxUncompressedBatchSize) + require.Less(t, uncompressedSize, NewUncompressedSizeLimit) + require.Less(t, compressedSize, arbnode.TestBatchPosterConfig.MaxCalldataBatchSize) + + // 5. Return the compressed batch with the appropriate header byte. + return append([]byte{daprovider.BrotliMessageHeaderByte}, compressed...) +} + +// postBatch posts the given batch directly to the L1 SequencerInbox contract. Returns the batch sequence number (sequencer message index). +func postBatch(t *testing.T, ctx context.Context, builder *NodeBuilder, batch []byte) uint64 { + seqNum := new(big.Int).Lsh(common.Big1, 256) + seqNum.Sub(seqNum, common.Big1) + + seqInboxAddr := builder.L1Info.GetAddress("SequencerInbox") + seqInbox, err := bridgegen.NewSequencerInbox(seqInboxAddr, builder.L1.Client) + Require(t, err) + + sequencer := builder.L1Info.GetDefaultTransactOpts("Sequencer", ctx) + + tx, err := seqInbox.AddSequencerL2BatchFromOrigin8f111f3c(&sequencer, seqNum, batch, big.NewInt(1), common.Address{}, big.NewInt(0), big.NewInt(0)) + Require(t, err) + receipt, err := EnsureTxSucceeded(ctx, builder.L1.Client, tx) + Require(t, err) + + return getPostedBatchSequenceNumber(t, seqInbox, receipt) +} + +// getPostedBatchSequenceNumber extracts the batch sequence number from the SequencerBatchDelivered event in the given receipt. +func getPostedBatchSequenceNumber(t *testing.T, seqInbox *bridgegen.SequencerInbox, receipt *types.Receipt) uint64 { + for _, log := range receipt.Logs { + event, err := seqInbox.ParseSequencerBatchDelivered(*log) + if err == nil { + require.True(t, event.BatchSequenceNumber.IsUint64(), "BatchSequenceNumber is not uint64") + return event.BatchSequenceNumber.Uint64() + } + } + t.Fatal("SequencerBatchDelivered event not found in receipt logs") + return 0 +} + +// checkReceiverAccountBalance ensures that the receiver account has the expected balance. +func checkReceiverAccountBalance(t *testing.T, ctx context.Context, builder *NodeBuilder, expectedBalance int64) { + balanceBefore, err := builder.L2.Client.BalanceAt(ctx, builder.L2Info.GetAddress(ReceiverAccount), nil) + Require(t, err) + require.True(t, balanceBefore.Cmp(big.NewInt(expectedBalance)) == 0) +} + +// ensureBatchWasProcessed waits until a particular batch has been processed by the L2 node. +func ensureBatchWasProcessed(t *testing.T, builder *NodeBuilder, batchNum uint64) { + require.Eventuallyf(t, func() bool { + _, err := builder.L2.ConsensusNode.InboxTracker.GetBatchMetadata(batchNum) + return err == nil + }, 5*time.Second, time.Second, "Batch %d was not processed in time", batchNum) +} diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index 5d3ad68229..7630557250 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022, Offchain Labs, Inc. +// Copyright 2021-2025, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md package arbtest @@ -44,7 +44,13 @@ func BuildBlock( if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, daprovider.KeysetValidate) + inboxMultiplexer := arbstate.NewInboxMultiplexer( + inbox, + delayedMessagesRead, + nil, + daprovider.KeysetValidate, + getChainConfig(), + ) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) @@ -243,7 +249,7 @@ func FuzzStateTransition(f *testing.F) { runCtx = core.NewMessageGasEstimationContext() } - _, err = BuildBlock(statedb, genesis.Header(), noopChainContext{chainConfig: chaininfo.ArbitrumDevTestChainConfig()}, inbox, seqBatch, runCtx) + _, err = BuildBlock(statedb, genesis.Header(), noopChainContext{chainConfig: getChainConfig()}, inbox, seqBatch, runCtx) if err != nil { // With the fixed header it shouldn't be possible to read a delayed message, // and no other type of error should be possible. @@ -251,3 +257,7 @@ func FuzzStateTransition(f *testing.F) { } }) } + +func getChainConfig() *params.ChainConfig { + return chaininfo.ArbitrumDevTestChainConfig() +}