diff --git a/espresso/environment/14_batcher_fallback_test.go b/espresso/environment/14_batcher_fallback_test.go index 769bc5a7034..5cf2720c3c8 100644 --- a/espresso/environment/14_batcher_fallback_test.go +++ b/espresso/environment/14_batcher_fallback_test.go @@ -1,8 +1,13 @@ package environment_test import ( + "bytes" "context" + "errors" + "fmt" + "io" "math/big" + "sync" "testing" "time" @@ -10,13 +15,45 @@ import ( env "github.com/ethereum-optimism/optimism/espresso/environment" "github.com/ethereum-optimism/optimism/op-batcher/batcher" "github.com/ethereum-optimism/optimism/op-batcher/bindings" + "github.com/ethereum-optimism/optimism/op-batcher/compressor" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" "github.com/ethereum-optimism/optimism/op-node/config" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" + "github.com/ethereum-optimism/optimism/op-service/sources" + "github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/require" ) +// waitForRollupToMovePastL1Block waits until the targeted rollup cli moves +// past the reference l1BlockNumber. This indicates that the rollupCli is +// still receiveing information that indicates that the L1 has progressed +// past the desired height. +// +// For convenience, this also returns the Local Safe L2 Height of the last +// call to the Sync Status on the Rollup Client. If this wait passes, it +// will be the LocalSafeL2 height of the SyncStatus that exceeded the +// referenced l1BlockNumber. +func waitForRollupToMovePastL1Block(ctx context.Context, rollupCli *sources.RollupClient, l1BlockNumber uint64) (uint64, error) { + timeoutCtx, cancel := context.WithTimeout(ctx, 2*time.Minute) + var localSafeL2Height uint64 + defer cancel() + err := wait.For(timeoutCtx, 100*time.Millisecond, func() (bool, error) { + status, err := rollupCli.SyncStatus(ctx) + if err != nil { + return false, err + } + + localSafeL2Height = status.LocalSafeL2.Number + return status.CurrentL1.Number > l1BlockNumber, nil + }) + + return localSafeL2Height, err +} + func TestBatcherSwitching(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -76,26 +113,8 @@ func TestBatcherSwitching(t *testing.T) { require.NoError(t, err) // Give things time to settle - var l2Height uint64 - - ticker := time.NewTicker(100 * time.Millisecond) - timeoutCtx, cancel := context.WithTimeout(ctx, 2*time.Minute) - defer cancel() - -Loop: - for { - select { - case <-timeoutCtx.Done(): - panic("Timeout waiting for verifier derivation pipeline to advance past the fallback batcher switchoff point") - case <-ticker.C: - status, err := system.RollupClient(e2esys.RoleVerif).SyncStatus(ctx) - require.NoError(t, err) - if status.CurrentL1.Number > switchReceipt.BlockNumber.Uint64() { - l2Height = status.LocalSafeL2.Number - break Loop - } - } - } + l2Height, err := waitForRollupToMovePastL1Block(ctx, system.RollupClient(e2esys.RoleVerif), switchReceipt.BlockNumber.Uint64()) + require.NoError(t, err) espHeight, err := espClient.FetchLatestBlockHeight(ctx) require.NoError(t, err) @@ -133,3 +152,424 @@ Loop: require.Equal(t, verifBlock.Hash(), caffBlock.Hash()) } + +// TxManagerIntercept is a txmgr.TxManager that wraps another txmgr.TxManager +// and intercepts calls to Send and SendAsync. +// +// The purpose of this intercept is to simulate a failure in the tx submission +// process such that when activated a single frame of a multi-frame channel is +// sent to L1, and the remaining frames fail to be sent, triggering the fallback +// batcher to take over. +type TxManagerIntercept struct { + txmgr.TxManager + sync.Mutex + + // shouldFail indicates whether to simulate a failure on Send/SendAsync. + shouldFail bool + + // triggerAfterOne indicates whether to start failing after a single + // successful Send/SendAsync. + triggerAfterOne bool + + // failureCount tracks the number of failures that have occurred. + failureCount int + + successfulFrames map[derive.ChannelID][]derive.Frame + unsuccessfulFrames map[derive.ChannelID][]derive.Frame +} + +func NewTxManagerIntercept(base txmgr.TxManager) *TxManagerIntercept { + return &TxManagerIntercept{ + TxManager: base, + successfulFrames: make(map[derive.ChannelID][]derive.Frame), + unsuccessfulFrames: make(map[derive.ChannelID][]derive.Frame), + } +} + +// ErrSimulatedTxSubmissionFailure is the sentinel error returned when a +// simulated tx submission failure is triggered. +// +// We utilize this as a placeholder error to indicate that the tx submission +// failure was intentional for testing purposes. +var ErrSimulatedTxSubmissionFailure = errors.New("simulated tx submission failure") + +// decodeFrameInformation takes a txmgr.TxCandidate and attempts to decode +// frames contained within either the Blob fields, or the TxData field. +func decodeFrameInformation(candidate txmgr.TxCandidate) ([]derive.Frame, error) { + if len(candidate.TxData) > 0 { + // We have a CallData tx, so we can decode the frame information from + // the tx data. + return decodeFrameInformationFromTxData(candidate) + } + + if len(candidate.Blobs) > 0 { + // We have a Blob tx, so we can decode the frame information from + // the blobs. + return decodeFrameInformationFromBlobs(candidate) + } + + return nil, fmt.Errorf("tx candidate has neither tx data nor blobs to decode frame information from") +} + +// decodeFrameInformationFromData takes a byte slice and decodes each frame +// until it can no longer decode any frames. It returns a slice of all +// decoded frames, and any error encountered. +func decodeFrameInformationFromData(data []byte) ([]derive.Frame, error) { + if data[0] != params.DerivationVersion0 { + // Not a supported derivation version + return nil, fmt.Errorf("unsupported derivation version: %d", data[0]) + } + + var frames []derive.Frame + reader := bytes.NewBuffer(data[1:]) + for { + var frame derive.Frame + err := frame.UnmarshalBinary(reader) + if errors.Is(err, io.EOF) { + // We've consumed all of the frames. + break + } + + // If this is any other error, it indicates that there was an + // error decoding the frame. + if err != nil { + return frames, fmt.Errorf("error decoding frame: %w", err) + } + + frames = append(frames, frame) + } + + return frames, nil +} + +// decodeFrameInformationFromTxData takes a txmgr.TxCandidate and will assume +// that the frame data is encoded within the TxData. This data will be taken +// and decoded into frames and returned. +func decodeFrameInformationFromTxData(candidate txmgr.TxCandidate) ([]derive.Frame, error) { + data := candidate.TxData + + return decodeFrameInformationFromData(data) +} + +// decodeFrameInformationFromBlobs() takes a txmgr.TxCandidate and will assume +// that the frame data is encoded within the Blobs. The blobs will be +// converted back to txData, and the data will be decoded into frames. +func decodeFrameInformationFromBlobs(candidate txmgr.TxCandidate) ([]derive.Frame, error) { + var frames []derive.Frame + for _, blob := range candidate.Blobs { + data, err := blob.ToData() + if err != nil { + return frames, fmt.Errorf("error converting blob to data: %w", err) + } + + newFrames, err := decodeFrameInformationFromData(data) + if err != nil { + return frames, err + } + frames = append(frames, newFrames...) + } + + return frames, nil +} + +func (t *TxManagerIntercept) markFramesAsSuccessful(frames []derive.Frame) { + t.Lock() + defer t.Unlock() + for _, frame := range frames { + t.successfulFrames[frame.ID] = append(t.successfulFrames[frame.ID], frame) + } +} + +func (t *TxManagerIntercept) markFramesAsUnsuccessful(frames []derive.Frame) { + t.Lock() + defer t.Unlock() + for _, frame := range frames { + t.unsuccessfulFrames[frame.ID] = append(t.unsuccessfulFrames[frame.ID], frame) + } +} + +// Send implements txmgr.TxManager. +// +// Send is overridden to simulate a failure when shouldFail is true, and to +// allow for one final transaction to be sent before failures begin when +// triggerAfterOne is true. +func (t *TxManagerIntercept) Send(ctx context.Context, candidate txmgr.TxCandidate) (*types.Receipt, error) { + frames, err := decodeFrameInformation(candidate) + if err != nil { + return nil, err + } + + if t.shouldFail { + t.failureCount++ + t.markFramesAsUnsuccessful(frames) + time.Sleep(50 * time.Millisecond) // Simulate some delay + return nil, ErrSimulatedTxSubmissionFailure + } + + if t.triggerAfterOne { + t.shouldFail = true + } + + t.markFramesAsSuccessful(frames) + + return t.TxManager.Send(ctx, candidate) +} + +// SendAsync implements txmgr.TxManager. +// +// SendAsync is overridden to simulate a failure when shouldFail is true, and +// to allow for one final transaction to be sent before failures begin when +// triggerAfterOne is true. +func (t *TxManagerIntercept) SendAsync(ctx context.Context, candidate txmgr.TxCandidate, ch chan txmgr.SendResponse) { + frames, err := decodeFrameInformation(candidate) + if err != nil { + ch <- txmgr.SendResponse{Err: fmt.Errorf("failed to decode frame information: %w", err)} + return + } + + if t.shouldFail { + t.failureCount++ + t.markFramesAsUnsuccessful(frames) + time.Sleep(50 * time.Millisecond) // Simulate some delay + ch <- txmgr.SendResponse{Err: ErrSimulatedTxSubmissionFailure} + return + } + + if t.triggerAfterOne { + t.shouldFail = true + } + + t.markFramesAsSuccessful(frames) + t.TxManager.SendAsync(ctx, candidate, ch) +} + +type partialFrameData struct { + channelID derive.ChannelID + successfulFrames []derive.Frame + unsuccessfulFrames []derive.Frame +} + +func (t *TxManagerIntercept) partialFrameData() []partialFrameData { + var partials []partialFrameData + + for channelID, unsuccessfulFrames := range t.unsuccessfulFrames { + successfulFrames, ok := t.successfulFrames[channelID] + if !ok { + continue + } + + partials = append(partials, partialFrameData{ + channelID: channelID, + successfulFrames: successfulFrames, + unsuccessfulFrames: unsuccessfulFrames, + }) + } + + return partials +} + +// Compile time assertion to ensure TxManagerIntercept implements +// txmgr.TxManager. +var _ txmgr.TxManager = (*TxManagerIntercept)(nil) + +// retryWaitNTimes retries the given function up to n times until it +// succeeds. +func retryWaitNTimes(fn func() error, n int) error { + var lastErr error + for range n { + lastErr = fn() + if lastErr == nil { + break + } + } + + return lastErr +} + +// TestFallbackMechanismIntegrationTestChannelNotClosed is a test case that is +// meant to verify the correct expected behavior in the event that the Espresso +// Batcher encounters an error mid L1 Batch submission that prevents the full +// channel from being submitted to the L1. +// +// In this scenario this issue is expected to send a single frame of a +// multi-frame channel to the contract. At this point the batch should be +// switched to the fallback and the fallback batcher should continue +// submitting the remaining frames of the channel without any issues. +func TestFallbackMechanismIntegrationTestChannelNotClosed(t *testing.T) { + ctx, cancel := context.WithTimeoutCause(context.Background(), time.Minute*10, fmt.Errorf("test did not complete within expected time allotment: %w", context.DeadlineExceeded)) + defer cancel() + + launcher := new(env.EspressoDevNodeLauncherDocker) + + // In order to force a multi-frame channel with the e2e system setup, + // we need to multimately modify the channel config that will be utilized + // by the batcher. + // + // This may seem a bit convoluted, but we have to contend with a few + // different settings in order to ensure that the behavior we are + // targeting is achieved. + // + // All of the options given below are utilized with the specific purpose + // of triggering multi-frame channels. + // + // NOTE: Some of the configuration options pull double-duty. They are + // utilized in both the creation of the frames, and the sending of the + // frames. In both scenarios, they may behave differently. I will make + // an effor to note them where they occur. + + system, espressoDevNode, err := launcher.StartE2eDevnet( + ctx, + t, + // Make Sure that the Batcher does not start Running + env.WithBatcherStoppedInitially(), + + // Explicitly disable using any sort of compression. This is + // necessary as we will be specifying that we will be targeting + // a specific frame size, and we don't want compression to indirectly + // interfere with this process. + env.WithBatcherCompressor(compressor.NoneKind), + + // This sets the Target Number of Frames that each channel is aiming + // to achieve. In this case we specify 3 so that we can ensure that + // our channel will always be a multi frame channel. + // + // Coupling this with the max channel duration helps us to ensure that + // each channel will always aim for the same number of channels. + // + // NOTE: This has different behavior when constructing the frames on + // the Batcher preparation than it does on the L1 submission. + // Specifically concerning the Da Type DaTypeCalldata. When utilizing + // call data, the L1 Submission will **ALWAYS** utilize 1 frame instead + // of this passed value. Yet channel construction will utilize this + // provided value as appropriate. + env.WithBatcherTargetNumFrames(3), + + // We set the MaxL1TxSize to some value that will hold our L2 + // Transaction size comfortably. + env.WithBatcherMaxL1TxSize(1200), + + // We set the MaxChannelDuration to 0 specifically to disable premature + // channel closing before we have enough frames. The default behavior + // is to create a new Channel with the specified window of L1 Blocks. + // The idea is that you can prevent eager channel production when + // you are not producing blocks with transactions. + // + // Setting this to 0 explicitly disables the feature, and as a result + // it will only send the data when the previous conditions are met. + env.WithBatcherMaxChannelDuration(0), + ) + + require.NoError(t, err) + defer env.Stop(t, system) + defer env.Stop(t, espressoDevNode) + + // We create an intercept around the existing tx manager so we have + // control over when our failures start to occur. + + interceptTxManager := NewTxManagerIntercept( + system.BatchSubmitter.TxManager, + ) + + { + // Replace the existing TxManager with our intercept + system.BatchSubmitter.TestDriver().Txmgr = interceptTxManager + + // Start the Batcher again, so the publishingLoop picks up the TxManager + // when creating its queue. + err = system.BatchSubmitter.TestDriver().StartBatchSubmitting() + require.NoError(t, err) + + // Wait for the Next L2 Block to be verified by ensure everything is + // working and progressing without issue + err = wait.ForProcessingFullBatch(ctx, system.RollupClient(e2esys.RoleVerif)) + require.NoError(t, err) + + // Reset TxManager as we don't want to target or interfere with the + // other aspects of the system. + system.BatchSubmitter.TestDriver().Txmgr = interceptTxManager.TxManager + } + + l2Seq := system.NodeClient(e2esys.RoleSeq) + l1Client := system.NodeClient(e2esys.RoleL1) + l2Verif := system.NodeClient(e2esys.RoleVerif) + + // Let's make sure that the system is progressing initially for both + // the Sequencer, the Verifier, and the L1Node + err = wait.ForBlock(ctx, l2Seq, 3) + require.NoError(t, err) + err = retryWaitNTimes(func() error { + return wait.ForNextBlock(ctx, l2Verif) + }, 3) + require.NoError(t, err) + + // Verify everything works + env.RunSimpleL2Burn(ctx, t, system) + + // We want to trigger the failure mode now. + interceptTxManager.triggerAfterOne = true + + // Now we need to submit a multi-frame channel to L1 to trigger the + // failure. We can do this by adjusting the batcher config to use a very + // small MaxL1TxSize such that even a small L2 transaction will result in + // multiple frames. + + // We want enough L2 Transactions to ensure we have multiple frames. + const n = 10 + + receipts, err := env.RunSimpleMultiTransactions(ctx, t, system, n) + require.NoError(t, err) + + // We want to wait until we know that the intercept tx manager has + // trigger the failure mode successfully, and that all n transactions + // have been attempted. + + // Wait until at least 2 L2 blocks have been mined (one for the + // a block with successful frames, and one for a block with failed frames). + err = wait.ForNextBlock(ctx, l2Seq) + require.NoError(t, err) + err = wait.ForNextBlock(ctx, l2Seq) + require.NoError(t, err) + + // Make sure that at least one failure has occurred, as this should + // indicate that the submission process should have failed a multiframe + // channel submission. + err = wait.For(ctx, 10*time.Second, func() (bool, error) { + return interceptTxManager.failureCount >= 1, nil + }) + require.NoError(t, err) + + // Stop the "TEE" batcher + err = system.BatchSubmitter.TestDriver().StopBatchSubmitting(ctx) + require.NoError(t, err) + + // Switch active batcher + options, err := bind.NewKeyedTransactorWithChainID(system.Config().Secrets.Deployer, system.Cfg.L1ChainIDBig()) + require.NoError(t, err) + + batchAuthenticator, err := bindings.NewBatchAuthenticator(system.RollupConfig.BatchAuthenticatorAddress, l1Client) + require.NoError(t, err) + + tx, err := batchAuthenticator.SwitchBatcher(options) + require.NoError(t, err) + _, err = wait.ForReceiptOK(ctx, l1Client, tx.Hash()) + require.NoError(t, err) + + // Start the fallback batcher + err = system.FallbackBatchSubmitter.TestDriver().StartBatchSubmitting() + require.NoError(t, err) + + // There should be some failure recorded in the intercept tx manager that + // has a corresponding success in the intercept tx manager. + + partialFrameData := interceptTxManager.partialFrameData() + require.Greaterf(t, len(partialFrameData), 0, "expected to find at least one partially submitted frame") + + // Verify that our previous receipts were also recorded on L1 + for i, receipt := range receipts { + _, err := wait.ForReceiptOK(ctx, l2Verif, receipt.TxHash) + require.NoError(t, err, "failed to find receipt %d for tx %s on L2 Verifier", i, receipt.TxHash) + } + + // Everything should still work + env.RunSimpleL2Burn(ctx, t, system) +} diff --git a/espresso/environment/5_batch_authentication_test.go b/espresso/environment/5_batch_authentication_test.go index f9a457fded2..b0bf92aaaee 100644 --- a/espresso/environment/5_batch_authentication_test.go +++ b/espresso/environment/5_batch_authentication_test.go @@ -9,7 +9,6 @@ import ( env "github.com/ethereum-optimism/optimism/espresso/environment" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" - "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" "github.com/ethereum/go-ethereum/crypto" ) @@ -29,9 +28,7 @@ func TestE2eDevnetWithInvalidAttestation(t *testing.T) { system, _, err := launcher.StartE2eDevnet(ctx, t, env.SetBatcherKey(*privateKey), - env.Config(func(cfg *e2esys.SystemConfig) { - cfg.DisableBatcher = true - }), + env.WithBatcherStoppedInitially(), env.WithEspressoAttestationVerifierService(), ) diff --git a/espresso/environment/6_batch_inbox_test.go b/espresso/environment/6_batch_inbox_test.go index afe12971a9c..2cb623e2305 100644 --- a/espresso/environment/6_batch_inbox_test.go +++ b/espresso/environment/6_batch_inbox_test.go @@ -43,12 +43,9 @@ func TestE2eDevnetWithoutAuthenticatingBatches(t *testing.T) { launcher := new(env.EspressoDevNodeLauncherDocker) - system, _, err := - launcher.StartE2eDevnet(ctx, t, - env.Config(func(cfg *e2esys.SystemConfig) { - cfg.DisableBatcher = true - }), - ) + system, _, err := launcher.StartE2eDevnet(ctx, t, + env.WithBatcherStoppedInitially(), + ) if have, want := err, error(nil); have != want { t.Fatalf("failed to start dev environment with espresso dev node:\nhave:\n\t\"%v\"\nwant:\n\t\"%v\"\n", have, want) diff --git a/espresso/environment/e2e_helpers.go b/espresso/environment/e2e_helpers.go index 1bcd7cfa5cd..7989016c9e4 100644 --- a/espresso/environment/e2e_helpers.go +++ b/espresso/environment/e2e_helpers.go @@ -4,6 +4,8 @@ import ( "math/big" "time" + bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" + "github.com/ethereum-optimism/optimism/op-batcher/flags" "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" "github.com/ethereum-optimism/optimism/op-e2e/system/helpers" "github.com/ethereum/go-ethereum/common" @@ -131,3 +133,184 @@ func WithL2BlockTime(blockTime time.Duration) E2eDevnetLauncherOption { } } } + +// WithBatcherTargetNumFrames is a E2eDevnetLauncherOption that configures the +// batcher's `TargetNumFrames` option to the provided value. +// +// This governs how many frames the batcher will attempt to utilize when +// submitting a channel to the L1. +func WithBatcherTargetNumFrames(size int) E2eDevnetLauncherOption { + return func(c *E2eDevnetLauncherContext) E2eSystemOption { + return E2eSystemOption{ + StartOptions: []e2esys.StartOption{ + { + Key: "maxL1NumFrames", + Role: e2esys.RoleSeq, + BatcherMod: func(batchConfig *bss.CLIConfig, sys *e2esys.System) { + batchConfig.TargetNumFrames = size + }, + }, + }, + } + } +} + +// WithBatcherMaxPendingTransactions is a E2eDevnetLauncherOption that +// configures the batcher's `MaxPendingTransactions` option to the provided +// value. +// +// This governs how many pending L1 transactions the batcher will allow +// before pausing new submissions. +func WithBatcherMaxPendingTransactions(pendingTransactions uint64) E2eDevnetLauncherOption { + return func(c *E2eDevnetLauncherContext) E2eSystemOption { + return E2eSystemOption{ + StartOptions: []e2esys.StartOption{ + { + Key: "maxPendingTransactions", + Role: e2esys.RoleSeq, + BatcherMod: func(batchConfig *bss.CLIConfig, sys *e2esys.System) { + batchConfig.MaxPendingTransactions = pendingTransactions + }, + }, + }, + } + } +} + +// WithBatcherMaxL1TxSize is a E2eDevnetLauncherOption that configures the +// batcher's `MaxL1TxSize` option to the provided value. +// +// This governs the maximum L1 transaction size that the batcher will attempt +// to submit when submitting a channel to L1. +func WithBatcherMaxL1TxSize(maxL1TxSize uint64) E2eDevnetLauncherOption { + return func(c *E2eDevnetLauncherContext) E2eSystemOption { + return E2eSystemOption{ + StartOptions: []e2esys.StartOption{ + { + Key: "maxL1TxSize", + Role: e2esys.RoleSeq, + BatcherMod: func(batchConfig *bss.CLIConfig, sys *e2esys.System) { + batchConfig.MaxL1TxSize = maxL1TxSize + + if batchConfig.DataAvailabilityType == flags.BlobsType { + // If we're setting the max data size for blobs, + // we need to also inform the batcher to use that + // setting when calculating blob sizes. + // + // Otherwise it will use the max blob size constant. + batchConfig.TestUseMaxTxSizeForBlobs = true + } + }, + }, + }, + } + } +} + +// WithBatcherMaxBlocksPerSpanBatch is a E2eDevnetLauncherOption that +// configures the batcher's `MaxBlocksPerSpanBatch` option to the provided +// value. +// +// This governs how many blocks the batcher will include in a single span +// when creating batches to submit to L1. +func WithBatcherMaxBlocksPerSpanBatch(maxBlocksPerSpanBatch int) E2eDevnetLauncherOption { + return func(c *E2eDevnetLauncherContext) E2eSystemOption { + return E2eSystemOption{ + StartOptions: []e2esys.StartOption{ + { + Key: "maxBlocksPerSpanBatch", + Role: e2esys.RoleSeq, + BatcherMod: func(batchConfig *bss.CLIConfig, sys *e2esys.System) { + batchConfig.MaxBlocksPerSpanBatch = maxBlocksPerSpanBatch + }, + }, + }, + } + } +} + +// WithBatcherDataAvailabilityType is a E2eDevnetLauncherOption that configures +// the batcher's `DataAvailabilityType` option to the provided value. +// +// This governs which data availability method the batcher will use when +// submitting frames to L1. +func WithBatcherDataAvailabilityType(daAvailabilityType flags.DataAvailabilityType) E2eDevnetLauncherOption { + { + return func(c *E2eDevnetLauncherContext) E2eSystemOption { + return E2eSystemOption{ + StartOptions: []e2esys.StartOption{ + { + Key: "dataAvailabilityType", + Role: e2esys.RoleSeq, + BatcherMod: func(batchConfig *bss.CLIConfig, sys *e2esys.System) { + batchConfig.DataAvailabilityType = daAvailabilityType + }, + }, + }, + } + } + } +} + +// WithBatcherMaxChannelDuration is a configuration option that modifies the +// MaxChannelDuration for the Batcher Config. This value will then be +// utilized by the Channels created by the batcher. +func WithBatcherMaxChannelDuration(maxChannelDuration uint64) E2eDevnetLauncherOption { + { + return func(c *E2eDevnetLauncherContext) E2eSystemOption { + return E2eSystemOption{ + StartOptions: []e2esys.StartOption{ + { + Key: "maxChannelDuration", + Role: e2esys.RoleSeq, + BatcherMod: func(batchConfig *bss.CLIConfig, sys *e2esys.System) { + batchConfig.MaxChannelDuration = maxChannelDuration + }, + }, + }, + } + } + } +} + +// WithBatcherMaxFrameSize is a configuration option that modifies the +// MaxChannelDuration for the Batcher Config. This value will then be +// utilized by the channels created by the batcher. +func WithBatcherMaxFrameSize(maxFrameSize uint64) E2eDevnetLauncherOption { + { + return func(c *E2eDevnetLauncherContext) E2eSystemOption { + return E2eSystemOption{ + StartOptions: []e2esys.StartOption{ + { + Key: "maxFrameSize", + Role: e2esys.RoleSeq, + BatcherMod: func(batchConfig *bss.CLIConfig, sys *e2esys.System) { + batchConfig.MaxChannelDuration = maxFrameSize + }, + }, + }, + } + } + } +} + +// WithBatcherCompressor is a configuration option that modifies the Compressor +// setting of the Batcher Config. This value will be utilized to determine +// compression options for the channels created by the batcher. +func WithBatcherCompressor(compressor string) E2eDevnetLauncherOption { + { + return func(c *E2eDevnetLauncherContext) E2eSystemOption { + return E2eSystemOption{ + StartOptions: []e2esys.StartOption{ + { + Key: "compressor", + Role: e2esys.RoleSeq, + BatcherMod: func(batchConfig *bss.CLIConfig, sys *e2esys.System) { + batchConfig.Compressor = compressor + }, + }, + }, + } + } + } +} diff --git a/espresso/environment/enclave_helpers.go b/espresso/environment/enclave_helpers.go index 6b754a32ffa..e4a2d1b6087 100644 --- a/espresso/environment/enclave_helpers.go +++ b/espresso/environment/enclave_helpers.go @@ -85,13 +85,24 @@ func appendArg(args *[]string, flagName string, value any) { // an Enclave, as such, it is better to pre-configure the option instead of // allowing for the potential of an error to occur due to not including the // other Option. +// +// This LauncherOption explicitly creates a Batcher to run in the Enclave based +// on the configuration of the batcher that would be created and started +// locally. The locally created Batcher in the E2e System is never meant to +// actually run with this option, and instead the External Batcher is meant +// to be run instead. func LaunchBatcherInEnclave() E2eDevnetLauncherOption { return func(ct *E2eDevnetLauncherContext) E2eSystemOption { return E2eSystemOption{ - SystemConfigOption: func(cfg *e2esys.SystemConfig) { - cfg.DisableBatcher = true - }, - SystemConfigOpt: e2esys.WithAllocType(config.AllocTypeEspressoWithEnclave), + // | NOTE: while this option initially disables the batcher for + // the purposes of being started later, it is the intention of + // this Launchger Option to tie the Batcher as an external + // connection, rather than the local testing one. As a result + // The local Batcher should not be accessed / inspecting / + // interacted with for the purposes of any tests that are + // utilizing this Launcher Option. + SystemConfigOption: SystemConfigOptionDisableBatcher, + SystemConfigOpt: e2esys.WithAllocType(config.AllocTypeEspressoWithEnclave), StartOptions: []e2esys.StartOption{ launchEspressoAttestationVerifierServiceDockerContainer(ct), { diff --git a/espresso/environment/optitmism_espresso_test_helpers.go b/espresso/environment/optitmism_espresso_test_helpers.go index 799ce6a5c53..cadb203c987 100644 --- a/espresso/environment/optitmism_espresso_test_helpers.go +++ b/espresso/environment/optitmism_espresso_test_helpers.go @@ -570,6 +570,22 @@ func SetEspressoUrls(numGood int, numBad int, badServerUrl string) E2eDevnetLaun } } +// SystemConfigOptionDisableBatcher is a SystemConfigOption that disables +// the Batcher. +// +// | NOTE: This doesn't actually stop the Batcher from being created entirely. +// +// Instead, it prevents the Batcher from "Starting". The Batcher still +// exists in the local context, it just won't be running initially. But +// it can still be started programatically via its API. This is most +// easily done by calling `StartBatchSubmitting` on the `TestDriver` of +// the system. +func SystemConfigOptionDisableBatcher(cfg *e2esys.SystemConfig) { + cfg.DisableBatcher = true +} + +// Config is a convenience function that allows for the initial modification +// of the SystemConfig only. func Config(fn func(*e2esys.SystemConfig)) E2eDevnetLauncherOption { return func(ct *E2eDevnetLauncherContext) E2eSystemOption { return E2eSystemOption{ @@ -578,6 +594,15 @@ func Config(fn func(*e2esys.SystemConfig)) E2eDevnetLauncherOption { } } +// WithBatcherStoppedInitially is an E2eDevNetLauncherOption that ensures that +// the locally created Batcher is not running initially. +// +// The Batcher can still be started locally with a call to the TestDriver's +// method: `StartBatchSubmitting`. +func WithBatcherStoppedInitially() E2eDevnetLauncherOption { + return Config(SystemConfigOptionDisableBatcher) +} + // getContainerRemappedHostPort is a helper function that takes the // containerListeningHostPort and returns the remapped host port // that the container is listening on. diff --git a/espresso/environment/tx_helpers.go b/espresso/environment/tx_helpers.go index fb114fbbd08..52c19f43e1b 100644 --- a/espresso/environment/tx_helpers.go +++ b/espresso/environment/tx_helpers.go @@ -2,16 +2,17 @@ package environment import ( "context" + "fmt" + "math/big" + "testing" + "time" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/require" - "math/big" - "testing" - "time" - "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" "github.com/ethereum-optimism/optimism/op-e2e/system/helpers" "github.com/ethereum/go-ethereum/ethclient" @@ -25,7 +26,8 @@ func RunSimpleL2Transfer( system *e2esys.System, nonce uint64, amount big.Int, - l2Seq *ethclient.Client) common.Hash { + l2Seq *ethclient.Client, +) common.Hash { _, cancel := context.WithTimeout(ctx, 2*time.Minute) defer cancel() @@ -46,7 +48,6 @@ func RunSimpleL2Transfer( txHash := receipt.TxHash return txHash - } // runSimpleL1TransferAndVerifier runs a simple L1 transfer and verifies it on @@ -129,3 +130,51 @@ func RunSimpleL2Burn(ctx context.Context, t *testing.T, system *e2esys.System) { cancel() } + +// RunSimpleMultiTransactions sends numTransactions simple L2 transactions +// from Bob's account and returns the receipts. +// +// This is all attempted in parallel, as it will spawn a separate goroutine +// for each transaction submission. Each transaction will be provided its +// own nonce, based on the currently understood value of the nonce for +// Bob. +// +// This will return once all receipts have been returned. +func RunSimpleMultiTransactions(ctx context.Context, t *testing.T, system *e2esys.System, numTransactions int) ([]*types.Receipt, error) { + ctx, cancel := context.WithTimeoutCause(ctx, 2*time.Minute, fmt.Errorf("failed to submit all transactions within time frame: %w", context.DeadlineExceeded)) + defer cancel() + + senderKey := system.Cfg.Secrets.Bob + senderAddress := system.Cfg.Secrets.Addresses().Bob + l2Seq := system.NodeClient(e2esys.RoleSeq) + nonce, err := l2Seq.NonceAt(ctx, senderAddress, nil) + if err != nil { + require.NoError(t, err, "failed to get nonce for account %s", senderAddress) + } + + ch := make(chan *types.Receipt, numTransactions) + defer close(ch) + for i := range numTransactions { + go (func(ch chan *types.Receipt, i int, nonce uint64) { + receipt := helpers.SendL2Tx(t, system.Cfg, l2Seq, senderKey, func(opts *helpers.TxOpts) { + opts.Nonce = nonce + uint64(i) + // We need to explicitly increase the gas beyond some threshold + // for an unknown reason. We'll set it high enough so that + // it hopefully won't cause a problem + opts.Gas = 100_000 + }) + ch <- receipt + })(ch, i, nonce) + } + + var receipts []*types.Receipt + for range numTransactions { + select { + case <-ctx.Done(): + return receipts, ctx.Err() + case receipt := <-ch: + receipts = append(receipts, receipt) + } + } + return receipts, nil +}