Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
543df5b
WIP
geoknee Dec 8, 2025
ea66bee
wip
geoknee Dec 8, 2025
dbaa3ab
WIP
geoknee Dec 9, 2025
7333916
Treat NotFound next L1 origin as chain end
geoknee Dec 9, 2025
acc6c33
Use recover mode in sequence window expiry test
geoknee Dec 9, 2025
e1bfc22
Invoke fault proof earlier and fix typos
geoknee Dec 9, 2025
0aa6adc
reduce diff
geoknee Dec 9, 2025
9e95a84
Use requireL1OriginAt helper in test
geoknee Dec 9, 2025
90abe3d
Introduce L2Sequencer.ActMaybeL2StartBlock
geoknee Dec 10, 2025
16f5198
add TestRecoverModeWhenChainHealthy acceptance test
geoknee Dec 10, 2025
dd9492e
Add SetSequencerRecoverMode and enable debug logs
geoknee Dec 10, 2025
418a985
Adjust L1 block time and sequence window test
geoknee Dec 10, 2025
3b22b34
restore stub
geoknee Dec 10, 2025
8b0f3d1
WIP
geoknee Dec 10, 2025
d7d1ffa
name errs
geoknee Dec 11, 2025
1c08546
refactor
geoknee Dec 11, 2025
7187af4
fix
geoknee Dec 11, 2025
b33328a
add test
geoknee Dec 11, 2025
d164cfc
Rename error constant and add L1 origin tests
geoknee Dec 11, 2025
9fb3560
Use drift check for next L1 origin and update tests
geoknee Dec 11, 2025
84e6db9
Refactor L1 origin selection and error handling
geoknee Dec 11, 2025
3ca9243
fixes
geoknee Dec 11, 2025
581028e
fixes
geoknee Dec 11, 2025
f90208e
lint
geoknee Dec 11, 2025
c361b5c
don't use pointers
geoknee Dec 11, 2025
55d709b
handle retries without a "temporary error"
geoknee Dec 11, 2025
69bf84d
use Fjord drift constant
geoknee Dec 12, 2025
5e7e859
fix origin_selector_test
geoknee Dec 12, 2025
b04f62b
Simplify FindL1Origin
geoknee Dec 12, 2025
08dd4d3
move new pure function into a method on los
geoknee Dec 12, 2025
204f4a1
Update comment to refer to empty nextL1Origin
geoknee Dec 15, 2025
057e394
Use errors.Is for L1 origin error checks
geoknee Dec 15, 2025
099e1b3
Return L1 origin on validation errors
geoknee Dec 15, 2025
ce9fa62
Add expectedResult to origin selector tests
geoknee Dec 15, 2025
389f2b6
Add assertion message and clarify origin comments
geoknee Dec 18, 2025
4e76749
Store recoverMode and add comment period
geoknee Dec 18, 2025
fd8d7fa
Update op-node/rollup/sequencing/origin_selector.go
geoknee Dec 18, 2025
420c249
Update op-node/rollup/sequencing/origin_selector.go
geoknee Dec 18, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion justfile
Original file line number Diff line number Diff line change
Expand Up @@ -39,4 +39,3 @@ update-op-geth ref:
go mod edit -replace=github.com/ethereum/go-ethereum=github.com/ethereum-optimism/op-geth@"$ver"; \
go mod tidy; \
echo "Updated op-geth to $ver"

17 changes: 17 additions & 0 deletions op-acceptance-tests/tests/sequencer/init_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
package sequencer

import (
"log/slog"
"testing"

"github.com/ethereum-optimism/optimism/op-devstack/compat"
"github.com/ethereum-optimism/optimism/op-devstack/presets"
)

// TestMain creates the test-setups against the shared backend
func TestMain(m *testing.M) {
presets.DoMain(m, presets.WithMinimal(),
presets.WithCompatibleTypes(compat.SysGo),
presets.WithLogLevel(slog.LevelDebug),
)
}
45 changes: 45 additions & 0 deletions op-acceptance-tests/tests/sequencer/recover_mode_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
package sequencer

import (
"testing"
"time"

"github.com/ethereum-optimism/optimism/op-devstack/devtest"
"github.com/ethereum-optimism/optimism/op-devstack/presets"
"github.com/stretchr/testify/require"
)

// TestRecoverModeWhenChainHealthy checks that the chain
// can progress as normal when recover mode is activated.
// Recover mode is designed to recover from a sequencing
// window expiry when there are ample L1 blocks to eagerly
// progress the l1 origin to. But when the l1 origin is
// close to the tip of the l1 chain, the eagerness would cause
// a delay in unsafe block production while the sequencer waits
// for the next l1 origin to become available. Recover mode
// has since been patched, and the sequencer will not demand the
// next l1 origin until it is actually available. This tests
// protects against a regeression in that behavior.
func TestRecoverModeWhenChainHealthy(gt *testing.T) {
t := devtest.ParallelT(gt)
sys := presets.NewMinimal(t)
tracer := t.Tracer()
ctx := t.Ctx()

err := sys.L2CL.SetSequencerRecoverMode(true)
require.NoError(t, err)
blockTime := sys.L2Chain.Escape().RollupConfig().BlockTime
numL2Blocks := uint64(20)
waitTime := time.Duration(blockTime*numL2Blocks+5) * time.Second

num := sys.L2CL.SyncStatus().UnsafeL2.Number
new_num := num
require.Eventually(t, func() bool {
ctx, span := tracer.Start(ctx, "check head")
defer span.End()

new_num, num = sys.L2CL.SyncStatus().UnsafeL2.Number, new_num
t.Logger().InfoContext(ctx, "unsafe head", "number", new_num, "safe head", sys.L2CL.SyncStatus().SafeL2.Number)
return new_num >= numL2Blocks
}, waitTime, time.Duration(blockTime)*time.Second)
}
4 changes: 4 additions & 0 deletions op-devstack/dsl/l2_cl.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,10 @@ func (cl *L2CLNode) StopSequencer() common.Hash {
return unsafeHead
}

func (cl *L2CLNode) SetSequencerRecoverMode(b bool) error {
return cl.inner.RollupAPI().SetRecoverMode(cl.ctx, b)
}

func (cl *L2CLNode) SyncStatus() *eth.SyncStatus {
ctx, cancel := context.WithTimeout(cl.ctx, DefaultTimeout)
defer cancel()
Expand Down
2 changes: 1 addition & 1 deletion op-e2e/actions/helpers/l2_batcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ func (s *L2Batcher) Buffer(t Testing, bufferOpts ...BufferOption) error {
}
}

s.ActCreateChannel(t, s.rollupCfg.IsDelta(block.Time()), options.channelModifiers...)
s.ActCreateChannel(t, s.rollupCfg.IsDelta(block.Time()) && !s.l2BatcherCfg.ForceSubmitSingularBatch, options.channelModifiers...)

if _, err := s.L2ChannelOut.AddBlock(s.rollupCfg, block); err != nil {
return err
Expand Down
26 changes: 19 additions & 7 deletions op-e2e/actions/helpers/l2_sequencer.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ func (m *MockL1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bl
return m.actual.FindL1Origin(ctx, l2Head)
}

func (m *MockL1OriginSelector) SetRecoverMode(bool) {
// noop
func (m *MockL1OriginSelector) SetRecoverMode(b bool) {
m.actual.SetRecoverMode(b)
}

// L2Sequencer is an actor that functions like a rollup node,
Expand Down Expand Up @@ -98,20 +98,28 @@ func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, blobSrc deri

// ActL2StartBlock starts building of a new L2 block on top of the head
func (s *L2Sequencer) ActL2StartBlock(t Testing) {
err := s.ActMaybeL2StartBlock(t)
require.NoError(t, err, "failed to start block building")
}

// ActMaybeL2StartBlock tries to start building a new L2 block on top of the head
func (s *L2Sequencer) ActMaybeL2StartBlock(t Testing) error {
require.NoError(t, s.drainer.Drain()) // can't build when other work is still blocking
if !s.L2PipelineIdle {
t.InvalidAction("cannot start L2 build when derivation is not idle")
return
return nil
}
if s.l2Building {
t.InvalidAction("already started building L2 block")
return
return nil
}
s.synchronousEvents.Emit(t.Ctx(), sequencing.SequencerActionEvent{})
require.NoError(t, s.drainer.DrainUntil(event.Is[engine.BuildStartedEvent], false),
"failed to start block building")

err := s.drainer.DrainUntil(event.Is[engine.BuildStartedEvent], false)
if err != nil {
return err
}
s.l2Building = true
return nil
}

// ActL2EndBlock completes a new L2 block and applies it to the L2 chain as new canonical unsafe head
Expand Down Expand Up @@ -272,3 +280,7 @@ func (s *L2Sequencer) ActBuildL2ToInterop(t Testing) {
s.ActL2EmptyBlock(t)
}
}

func (s *L2Sequencer) ActSetRecoverMode(t Testing, b bool) {
s.sequencer.SetRecoverMode(b)
}
2 changes: 1 addition & 1 deletion op-e2e/actions/helpers/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ func DefaultRollupTestParams() *e2eutils.TestParams {
MaxSequencerDrift: 40,
SequencerWindowSize: 120,
ChannelTimeout: 120,
L1BlockTime: 15,
L1BlockTime: 12, // Many of the action helpers assume a 12s L1 block time
AllocType: config.DefaultAllocType,
}
}
Expand Down
3 changes: 0 additions & 3 deletions op-e2e/actions/proofs/helpers/env.go
Original file line number Diff line number Diff line change
Expand Up @@ -243,15 +243,12 @@ func (env *L2FaultProofEnv) BatchAndMine(t helpers.Testing) {
// Returns the L2 Safe Block Reference
func (env *L2FaultProofEnv) BatchMineAndSync(t helpers.Testing) eth.L2BlockRef {
t.Helper()
id := env.Miner.UnsafeID()
env.BatchAndMine(t)
env.Sequencer.ActL1HeadSignal(t)
env.Sequencer.ActL2PipelineFull(t)

// Assertions

syncStatus := env.Sequencer.SyncStatus()
require.Equal(t, syncStatus.UnsafeL2.L1Origin, id, "UnsafeL2.L1Origin should equal L1 Unsafe ID before batch submitted")
require.Equal(t, syncStatus.UnsafeL2, syncStatus.SafeL2, "UnsafeL2 should equal SafeL2")

return syncStatus.SafeL2
Expand Down
132 changes: 125 additions & 7 deletions op-e2e/actions/proofs/sequence_window_expiry_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,26 +5,47 @@ import (

actionsHelpers "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers"
"github.com/ethereum-optimism/optimism/op-e2e/actions/proofs/helpers"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-program/client/claim"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)

// Run a test that proves a deposit-only block generated due to sequence window expiry.
// Run a test that proves a deposit-only block generated due to sequence window expiry,
// and then recovers the chain using sequencer recover mode.
func runSequenceWindowExpireTest(gt *testing.T, testCfg *helpers.TestCfg[any]) {
t := actionsHelpers.NewDefaultTesting(gt)
tp := helpers.NewTestParams()
env := helpers.NewL2FaultProofEnv(t, testCfg, tp, helpers.NewBatcherCfg())
const SEQUENCER_WINDOW_SIZE = 50 // (short, to keep test fast)
tp := helpers.NewTestParams(func(p *e2eutils.TestParams) {
p.SequencerWindowSize = SEQUENCER_WINDOW_SIZE
p.MaxSequencerDrift = 1800 // use 1800 seconds (30 minutes), which is the protocol constant since Fjord
})

// It seems more difficult (almost impossible) to recover from sequencing window expiry with span batches,
// since the singular batches within are invalidated _atomically_.
// That is to say, if the oldest batch in the span batch fails the sequencing window check
// (l1 origin + seq window < l1 inclusion)
// All following batches are invalidated / dropped as well.
// https://github.com/ethereum-optimism/optimism/blob/73339162d78a1ebf2daadab01736382eed6f4527/op-node/rollup/derive/batches.go#L96-L100
//
// If the same blocks were batched with singular batches, the validation rules are different
// https://github.com/ethereum-optimism/optimism/blob/73339162d78a1ebf2daadab01736382eed6f4527/op-node/rollup/derive/batches.go#L83-L86
// In the case of recover mode, the noTxPool=true condition means autoderviation actually fills
// the gap with identical blocks anyway, meaning the following batches are actually still valid.
bc := helpers.NewBatcherCfg()
bc.ForceSubmitSingularBatch = true

// Mine an empty block for gas estimation purposes.
env := helpers.NewL2FaultProofEnv(t, testCfg, tp, bc)

// Mine an empty L1 block for gas estimation purposes.
env.Miner.ActEmptyBlock(t)

// Expire the sequence window by building `SequenceWindow + 1` empty blocks on L1.
for i := 0; i < int(tp.SequencerWindowSize)+1; i++ {
env.Alice.L1.ActResetTxOpts(t)
env.Alice.ActDeposit(t)

env.Miner.ActL1StartBlock(12)(t)
env.Miner.ActL1StartBlock(tp.L1BlockTime)(t)
env.Miner.ActL1IncludeTx(env.Alice.Address())(t)
env.Miner.ActL1EndBlock(t)

Expand All @@ -42,10 +63,107 @@ func runSequenceWindowExpireTest(gt *testing.T, testCfg *helpers.TestCfg[any]) {

// Ensure the safe head advanced forcefully.
l2SafeHead = env.Engine.L2Chain().CurrentSafeBlock()
require.Greater(t, l2SafeHead.Number.Uint64(), uint64(0))
require.Greater(t, l2SafeHead.Number.Uint64(), uint64(0),
"The safe head failed to progress after the sequencing window expired (expected deposit-only blocks to be derived).")

// Run the FPP on one of the auto-derived blocks.
env.RunFaultProofProgram(t, l2SafeHead.Number.Uint64()/2, testCfg.CheckResult, testCfg.InputParams...)

// Set recover mode on the sequencer:
env.Sequencer.ActSetRecoverMode(t, true)
// Since recover mode only affects the L2 CL (op-node),
// it won't stop the test environment injecting transactions
// directly into the engine. So we will force the engine
// to ignore such injections if recover mode is enabled.
env.Engine.EngineApi.SetForceEmpty(true)

// Define "lag" as the difference between the current L1 block number and the safe L2 block's L1 origin number.
computeLag := func() int {
ss := env.Sequencer.SyncStatus()
return int(ss.CurrentL1.Number - ss.SafeL2.L1Origin.Number)
}

// Define "drift" as the difference between the current L2 block's timestamp and the unsafe L2 block's L1 origin's timestamp.
computeDrift := func() int {
ss := env.Sequencer.SyncStatus()
l2header, err := env.Engine.EthClient().HeaderByHash(t.Ctx(), ss.UnsafeL2.Hash)
require.NoError(t, err)
l1header, err := env.Miner.EthClient().HeaderByHash(t.Ctx(), ss.UnsafeL2.L1Origin.Hash)
require.NoError(t, err)
t.Log("l2header.Time", l2header.Time)
t.Log("l1header.Time", l1header.Time)
return int(l2header.Time) - int(l1header.Time)
}

// Build both chains and assert the L1 origin catches back up with the tip of the L1 chain.
lag := computeLag()
t.Log("lag", lag)
drift := computeDrift()
t.Log("drift", drift)
require.GreaterOrEqual(t, uint64(lag), tp.SequencerWindowSize, "Lag is less than sequencing window size")
numL1Blocks := 0
timeout := tp.SequencerWindowSize * 50

for numL1Blocks < int(timeout) {
for range 100 * tp.L1BlockTime / env.Sd.RollupCfg.BlockTime { // go at 100x real time
err := env.Sequencer.ActMaybeL2StartBlock(t)
if err != nil {
break
}
env.Bob.L2.ActResetTxOpts(t)
env.Bob.L2.ActMakeTx(t)
env.Engine.ActL2IncludeTx(env.Bob.Address())(t)
// RecoverMode (enabled above) should prevent this
// transaction from being included in the block, which
// is critical for recover mode to work.
env.Sequencer.ActL2EndBlock(t)
drift = computeDrift()
t.Log("drift", drift)
}
env.BatchMineAndSync(t) // Mines 1 block on L1
numL1Blocks++
lag = computeLag()
t.Log("lag", lag)
drift = computeDrift()
t.Log("drift", drift)
if lag == 1 { // A lag of 1 is the minimum possible.
break
}
}

if uint64(numL1Blocks) >= timeout {
t.Fatal("L1 Origin did not catch up to tip within %d L1 blocks (lag is %d)", numL1Blocks, lag)
} else {
t.Logf("L1 Origin caught up to within %d blocks of the tip within %d L1 blocks (sequencing window size %d)",
lag, numL1Blocks, tp.SequencerWindowSize)
}

switch {
case drift == 0:
t.Fatal("drift is zero, this implies the unsafe l2 head is pinned to the l1 head")
case drift > int(tp.MaxSequencerDrift):
t.Fatal("drift is too high")
default:
t.Log("drift", drift)
}

// Disable recover mode so we can get some user transactions in again.
env.Sequencer.ActSetRecoverMode(t, false)
env.Engine.EngineApi.SetForceEmpty(false)
l2SafeBefore := env.Sequencer.L2Safe()
env.Sequencer.ActL2StartBlock(t)
env.Bob.L2.ActResetTxOpts(t)
env.Bob.L2.ActMakeTx(t)
env.Engine.ActL2IncludeTx(env.Bob.Address())(t)
env.Sequencer.ActL2EndBlock(t)
env.BatchMineAndSync(t)
l2Safe := env.Sequencer.L2Safe()
require.Equal(t, l2Safe.Number, l2SafeBefore.Number+1, "safe chain did not progress with user transactions")
l2SafeBlock, err := env.Engine.EthClient().BlockByHash(t.Ctx(), l2Safe.Hash)
require.NoError(t, err)
// Assert safe block has at least two transactions
require.GreaterOrEqual(t, len(l2SafeBlock.Transactions()), 2, "safe block did not have at least two transactions")

env.RunFaultProofProgram(t, l2Safe.Number, testCfg.CheckResult, testCfg.InputParams...)
}

// Runs a that proves a block in a chain where the batcher opens a channel, the sequence window expires, and then the
Expand Down
Loading