From e6817fd74da9259209c04a56bdb63e9ae99f6902 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Wed, 4 Feb 2026 14:17:10 -0600 Subject: [PATCH 01/23] op-supernode: Add block invalidation and deny list to chain container - Add DenyList using bbolt for persistent storage of invalid block hashes - Add InvalidateBlock method to ChainContainer interface - InvalidateBlock adds hash to denylist and triggers rewind if current block matches - Add IsDenied helper to check if a block is on the deny list - Add acceptance test skeleton for block replacement flow --- .../invalid_message_replacement_test.go | 230 ++++++++++++++++++ .../chain_container/chain_container.go | 22 ++ .../supernode/chain_container/invalidation.go | 219 +++++++++++++++++ 3 files changed, 471 insertions(+) create mode 100644 op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go create mode 100644 op-supernode/supernode/chain_container/invalidation.go diff --git a/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go b/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go new file mode 100644 index 0000000000000..94dd5f0f99f4c --- /dev/null +++ b/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go @@ -0,0 +1,230 @@ +package interop + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/ethereum/go-ethereum/core/types" + + "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/eth/bigs" + "github.com/ethereum-optimism/optimism/op-service/txintent" +) + +// TestSupernodeInteropInvalidMessageReplacement tests that: +// WHEN: an invalid Executing Message is included in a chain +// THEN: +// - The block containing the invalid message gets reset backward +// - The chain re-derives and produces a replacement block +// - Validity eventually advances past the replaced block +// +// This test verifies the block invalidation and replacement mechanism. +func TestSupernodeInteropInvalidMessageReplacement(gt *testing.T) { + t := devtest.SerialT(gt) + sys := presets.NewTwoL2SupernodeInterop(t, 0) + + ctx := t.Ctx() + snClient := sys.SuperNodeClient() + + // Create funded EOAs on both chains + alice := sys.FunderA.NewFundedEOA(eth.OneEther) + bob := sys.FunderB.NewFundedEOA(eth.OneEther) + + // Deploy event logger on chain A + eventLoggerA := alice.DeployEventLogger() + + // Sync chains + sys.L2B.CatchUpTo(sys.L2A) + sys.L2A.CatchUpTo(sys.L2B) + + rng := rand.New(rand.NewSource(12345)) + + // Send an initiating message on chain A + initTrigger := randomInitTrigger(rng, eventLoggerA, 2, 10) + initTx, initReceipt := alice.SendInitMessage(initTrigger) + + t.Logger().Info("initiating message sent on chain A", + "block", initReceipt.BlockNumber, + "hash", initReceipt.BlockHash, + ) + + // Wait for chain B to catch up + sys.L2B.WaitForBlock() + + // Record the verified timestamp before the invalid message + // We need to know what timestamp was verified before the invalid exec message + blockTime := sys.L2A.Escape().RollupConfig().BlockTime + genesisTime := sys.L2A.Escape().RollupConfig().Genesis.L2Time + + // Wait for some timestamps to be verified first + targetTimestamp := genesisTime + blockTime*2 + t.Require().Eventually(func() bool { + resp, err := snClient.SuperRootAtTimestamp(ctx, targetTimestamp) + if err != nil { + return false + } + t.Logger().Info("super root at timestamp", "timestamp", targetTimestamp, "data", resp.Data) + return resp.Data != nil + }, 60*time.Second, time.Second, "initial timestamps should be verified") + + t.Logger().Info("initial verification confirmed", "timestamp", targetTimestamp) + + // Send an INVALID executing message on chain B + // Modify the message identifier to make it invalid (wrong log index) + invalidExecReceipt := sendInvalidExecMessageForReplacement(t, bob, initTx, 0) + + invalidBlockNumber := bigs.Uint64Strict(invalidExecReceipt.BlockNumber) + invalidBlockHash := invalidExecReceipt.BlockHash + invalidBlock := sys.L2ELB.BlockRefByHash(invalidExecReceipt.BlockHash) + invalidBlockTimestamp := invalidBlock.Time + + t.Logger().Info("invalid executing message sent on chain B", + "block", invalidBlockNumber, + "hash", invalidBlockHash, + "timestamp", invalidBlockTimestamp, + ) + + // Record the safety status before waiting + initialStatusA := sys.L2ACL.SyncStatus() + initialStatusB := sys.L2BCL.SyncStatus() + + t.Logger().Info("initial safety status", + "chainA_local_safe", initialStatusA.LocalSafeL2.Number, + "chainA_unsafe", initialStatusA.UnsafeL2.Number, + "chainB_local_safe", initialStatusB.LocalSafeL2.Number, + "chainB_unsafe", initialStatusB.UnsafeL2.Number, + ) + + // Now we verify the key behaviors: + // 1. The invalid block should be replaced with a different block at the same height + // 2. Validity should eventually advance past the replaced block + + observationDuration := 60 * time.Second + checkInterval := time.Second + + start := time.Now() + var replacementDetected bool + var replacementBlockHash [32]byte + + for time.Since(start) < observationDuration { + time.Sleep(checkInterval) + + // Check if the block at the invalid block number has changed + currentBlock := sys.L2ELB.BlockRefByNumber(invalidBlockNumber) + + if currentBlock.Hash != invalidBlockHash { + replacementDetected = true + replacementBlockHash = currentBlock.Hash + t.Logger().Info("REPLACEMENT DETECTED!", + "block_number", invalidBlockNumber, + "old_hash", invalidBlockHash, + "new_hash", currentBlock.Hash, + ) + } + + // Check current safety status + statusA := sys.L2ACL.SyncStatus() + statusB := sys.L2BCL.SyncStatus() + + // Check if the invalid block's timestamp has been verified + resp, err := snClient.SuperRootAtTimestamp(ctx, invalidBlockTimestamp) + t.Require().NoError(err, "SuperRootAtTimestamp should not error") + + t.Logger().Info("observation tick", + "elapsed", time.Since(start).Round(time.Second), + "chainA_local_safe", statusA.LocalSafeL2.Number, + "chainA_unsafe", statusA.UnsafeL2.Number, + "chainB_local_safe", statusB.LocalSafeL2.Number, + "chainB_unsafe", statusB.UnsafeL2.Number, + "invalid_block_ts", invalidBlockTimestamp, + "replacement_detected", replacementDetected, + "verified", resp.Data != nil, + ) + + // If replacement was detected and timestamp is now verified, we're done + if replacementDetected && resp.Data != nil { + t.Logger().Info("SUCCESS: replacement block is now verified", + "timestamp", invalidBlockTimestamp, + "replacement_hash", replacementBlockHash, + ) + break + } + } + + // Final assertions + + // ASSERTION: The invalid block should have been replaced + t.Require().True(replacementDetected, + "invalid block should have been replaced with a different block") + + // ASSERTION: The replacement block should be different from the invalid block + t.Require().NotEqual(invalidBlockHash, replacementBlockHash, + "replacement block hash should differ from invalid block hash") + + // ASSERTION: The timestamp should eventually be verified (with the replacement block) + finalResp, err := snClient.SuperRootAtTimestamp(ctx, invalidBlockTimestamp) + t.Require().NoError(err) + t.Require().NotNil(finalResp.Data, + "timestamp should be verified after block replacement") + + finalStatusA := sys.L2ACL.SyncStatus() + finalStatusB := sys.L2BCL.SyncStatus() + + t.Logger().Info("test complete: invalid block was replaced and validity advanced", + "final_chainA_local_safe", finalStatusA.LocalSafeL2.Number, + "final_chainA_unsafe", finalStatusA.UnsafeL2.Number, + "final_chainB_local_safe", finalStatusB.LocalSafeL2.Number, + "final_chainB_unsafe", finalStatusB.UnsafeL2.Number, + "invalid_block_hash", invalidBlockHash, + "replacement_block_hash", replacementBlockHash, + "invalid_block_timestamp", invalidBlockTimestamp, + ) +} + +// sendInvalidExecMessageForReplacement sends an executing message with a modified (invalid) identifier. +// This makes the message invalid because it references a non-existent log index. +func sendInvalidExecMessageForReplacement( + t devtest.T, + bob *dsl.EOA, + initIntent *txintent.IntentTx[*txintent.InitTrigger, *txintent.InteropOutput], + eventIdx int, +) *types.Receipt { + ctx := t.Ctx() + + // Evaluate the init result to get the message entries + result, err := initIntent.Result.Eval(ctx) + t.Require().NoError(err, "failed to evaluate init result") + t.Require().Greater(len(result.Entries), eventIdx, "event index out of range") + + // Get the message and modify it to be invalid + msg := result.Entries[eventIdx] + + // Make the message invalid by setting an impossible log index + // This creates a message that claims to reference a log that doesn't exist + msg.Identifier.LogIndex = 9999 + + // Create the exec trigger with the invalid message + execTrigger := &txintent.ExecTrigger{ + Executor: constants.CrossL2Inbox, + Msg: msg, + } + + // Create the intent with the invalid trigger + tx := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](bob.Plan()) + tx.Content.DependOn(&initIntent.Result) + tx.Content.Fn(func(ctx context.Context) (*txintent.ExecTrigger, error) { + return execTrigger, nil + }) + + receipt, err := tx.PlannedTx.Included.Eval(ctx) + t.Require().NoError(err, "invalid exec msg receipt not found") + t.Logger().Info("invalid exec message included", "chain", bob.ChainID(), "block", receipt.BlockNumber) + + return receipt +} diff --git a/op-supernode/supernode/chain_container/chain_container.go b/op-supernode/supernode/chain_container/chain_container.go index f83a6f016f713..48719cbe3de54 100644 --- a/op-supernode/supernode/chain_container/chain_container.go +++ b/op-supernode/supernode/chain_container/chain_container.go @@ -20,6 +20,7 @@ import ( "github.com/ethereum-optimism/optimism/op-supernode/supernode/chain_container/engine_controller" "github.com/ethereum-optimism/optimism/op-supernode/supernode/chain_container/virtual_node" "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" gethlog "github.com/ethereum/go-ethereum/log" "github.com/prometheus/client_golang/prometheus" @@ -49,6 +50,12 @@ type ChainContainer interface { FetchReceipts(ctx context.Context, blockHash eth.BlockID) (eth.BlockInfo, types.Receipts, error) // BlockTime returns the block time in seconds for this chain. BlockTime() uint64 + // InvalidateBlock adds a block to the deny list and triggers a rewind if the chain + // currently uses that block at the specified height. + // Returns true if a rewind was triggered, false otherwise. + InvalidateBlock(ctx context.Context, height uint64, payloadHash common.Hash) (bool, error) + // IsDenied checks if a block hash is on the deny list at the given height. + IsDenied(height uint64, payloadHash common.Hash) (bool, error) } type virtualNodeFactory func(cfg *opnodecfg.Config, log gethlog.Logger, initOverrides *rollupNode.InitializationOverrides, appVersion string) virtual_node.VirtualNode @@ -58,6 +65,7 @@ type simpleChainContainer struct { vncfg *opnodecfg.Config cfg config.CLIConfig engine engine_controller.EngineController + denyList *DenyList pause atomic.Bool stop atomic.Bool stopped chan struct{} @@ -107,6 +115,13 @@ func NewChainContainer( log.Warn("failed to attach in-proc rollup client (initial)", "err", err) } } + // Initialize the deny list for block invalidation + denyListPath := c.subPath("denylist") + if denyList, err := OpenDenyList(denyListPath); err != nil { + log.Error("failed to open deny list", "err", err) + } else { + c.denyList = denyList + } // Initialize engine controller (separate connection, not an op-node override) with a short setup timeout if vncfg.L2 != nil { setupCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -227,6 +242,13 @@ func (c *simpleChainContainer) Stop(ctx context.Context) error { _ = c.engine.Close() } + // Close deny list database + if c.denyList != nil { + if err := c.denyList.Close(); err != nil { + c.log.Error("error closing deny list", "error", err) + } + } + select { case <-c.stopped: return nil diff --git a/op-supernode/supernode/chain_container/invalidation.go b/op-supernode/supernode/chain_container/invalidation.go new file mode 100644 index 0000000000000..d2069955adc5d --- /dev/null +++ b/op-supernode/supernode/chain_container/invalidation.go @@ -0,0 +1,219 @@ +package chain_container + +import ( + "context" + "encoding/binary" + "fmt" + "path/filepath" + "sync" + + "github.com/ethereum/go-ethereum/common" + bolt "go.etcd.io/bbolt" +) + +const ( + denyListDBName = "denylist" +) + +// denyListBucketName is the name of the bbolt bucket used to store denied block hashes. +var denyListBucketName = []byte("denied_blocks") + +// DenyList provides persistence for invalid block payload hashes using bbolt. +// Blocks are keyed by block height, with each height potentially having multiple denied hashes. +type DenyList struct { + db *bolt.DB + mu sync.RWMutex +} + +// OpenDenyList opens or creates a DenyList at the given data directory. +func OpenDenyList(dataDir string) (*DenyList, error) { + dbPath := filepath.Join(dataDir, denyListDBName+".db") + db, err := bolt.Open(dbPath, 0600, nil) + if err != nil { + return nil, fmt.Errorf("failed to open denylist bbolt at %s: %w", dbPath, err) + } + + // Ensure the bucket exists + err = db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(denyListBucketName) + return err + }) + if err != nil { + db.Close() + return nil, fmt.Errorf("failed to create denylist bucket: %w", err) + } + + return &DenyList{db: db}, nil +} + +// heightToKey converts a block height to a big-endian byte key. +// Using big-endian ensures lexicographic ordering matches numeric ordering. +func heightToKey(height uint64) []byte { + key := make([]byte, 8) + binary.BigEndian.PutUint64(key, height) + return key +} + +// Add adds a payload hash to the deny list at the given block height. +// Multiple hashes can be denied at the same height. +func (d *DenyList) Add(height uint64, payloadHash common.Hash) error { + d.mu.Lock() + defer d.mu.Unlock() + + key := heightToKey(height) + + return d.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(denyListBucketName) + + // Get existing hashes at this height + existing := b.Get(key) + var hashes []byte + if existing != nil { + // Check if hash already exists + for i := 0; i+common.HashLength <= len(existing); i += common.HashLength { + if common.BytesToHash(existing[i:i+common.HashLength]) == payloadHash { + // Already denied + return nil + } + } + hashes = make([]byte, len(existing), len(existing)+common.HashLength) + copy(hashes, existing) + } + + // Append the new hash + hashes = append(hashes, payloadHash.Bytes()...) + return b.Put(key, hashes) + }) +} + +// Contains checks if a payload hash is denied at the given block height. +func (d *DenyList) Contains(height uint64, payloadHash common.Hash) (bool, error) { + d.mu.RLock() + defer d.mu.RUnlock() + + key := heightToKey(height) + var found bool + + err := d.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(denyListBucketName) + existing := b.Get(key) + if existing == nil { + return nil + } + + // Search for the hash in the list + for i := 0; i+common.HashLength <= len(existing); i += common.HashLength { + if common.BytesToHash(existing[i:i+common.HashLength]) == payloadHash { + found = true + return nil + } + } + return nil + }) + + return found, err +} + +// GetDeniedHashes returns all denied payload hashes at the given block height. +func (d *DenyList) GetDeniedHashes(height uint64) ([]common.Hash, error) { + d.mu.RLock() + defer d.mu.RUnlock() + + key := heightToKey(height) + var hashes []common.Hash + + err := d.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(denyListBucketName) + existing := b.Get(key) + if existing == nil { + return nil + } + + for i := 0; i+common.HashLength <= len(existing); i += common.HashLength { + hashes = append(hashes, common.BytesToHash(existing[i:i+common.HashLength])) + } + return nil + }) + + return hashes, err +} + +// Close closes the database. +func (d *DenyList) Close() error { + return d.db.Close() +} + +// InvalidateBlock adds a block to the deny list and triggers a rewind if the chain +// currently uses that block at the specified height. +// Returns true if a rewind was triggered, false otherwise. +func (c *simpleChainContainer) InvalidateBlock(ctx context.Context, height uint64, payloadHash common.Hash) (bool, error) { + if c.denyList == nil { + return false, fmt.Errorf("deny list not initialized") + } + + // Add to deny list first + if err := c.denyList.Add(height, payloadHash); err != nil { + return false, fmt.Errorf("failed to add block to deny list: %w", err) + } + + c.log.Info("added block to deny list", + "height", height, + "payloadHash", payloadHash, + ) + + // Check if the current chain uses this block at this height + if c.engine == nil { + c.log.Warn("engine not initialized, cannot check current block") + return false, nil + } + + currentBlock, err := c.engine.BlockAtTimestamp(ctx, c.blockNumberToTimestamp(height), "") + if err != nil { + c.log.Warn("failed to get current block at height", "height", height, "err", err) + return false, nil + } + + // Compare the current block hash with the invalidated hash + if currentBlock.Hash != payloadHash { + c.log.Info("current block differs from invalidated block, no rewind needed", + "height", height, + "currentHash", currentBlock.Hash, + "invalidatedHash", payloadHash, + ) + return false, nil + } + + c.log.Warn("current block matches invalidated block, initiating rewind", + "height", height, + "hash", payloadHash, + ) + + // Rewind to the prior block's timestamp + priorTimestamp := c.blockNumberToTimestamp(height - 1) + if err := c.RewindEngine(ctx, priorTimestamp); err != nil { + return false, fmt.Errorf("failed to rewind engine: %w", err) + } + + c.log.Info("rewind completed after block invalidation", + "invalidatedHeight", height, + "rewindToTimestamp", priorTimestamp, + ) + + return true, nil +} + +// blockNumberToTimestamp converts a block number to its timestamp using rollup config. +func (c *simpleChainContainer) blockNumberToTimestamp(blockNum uint64) uint64 { + if c.vncfg == nil { + return 0 + } + return c.vncfg.Rollup.Genesis.L2Time + (blockNum * c.vncfg.Rollup.BlockTime) +} + +// IsDenied checks if a block hash is on the deny list at the given height. +func (c *simpleChainContainer) IsDenied(height uint64, payloadHash common.Hash) (bool, error) { + if c.denyList == nil { + return false, fmt.Errorf("deny list not initialized") + } + return c.denyList.Contains(height, payloadHash) +} From c3703c2919b83b3a4ea44009e653a16cdd30c793 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Wed, 4 Feb 2026 15:18:11 -0600 Subject: [PATCH 02/23] op-supernode: Add unit tests for block invalidation - Add table-driven tests for DenyList (Add, Contains, Persistence, GetDeniedHashes) - Add tests for InvalidateBlock (rewind triggers, no-rewind cases, timestamp calculation) - Add tests for IsDenied helper - Fix OpenDenyList to create parent directories - Add InvalidateBlock/IsDenied stubs to mock ChainContainers in existing tests --- .../invalid_message_replacement_test.go | 2 +- .../activity/interop/interop_test.go | 6 + .../supernode/activity/interop/logdb_test.go | 6 + .../activity/superroot/superroot_test.go | 7 + .../supernode/chain_container/invalidation.go | 4 + .../chain_container/invalidation_test.go | 514 ++++++++++++++++++ 6 files changed, 538 insertions(+), 1 deletion(-) create mode 100644 op-supernode/supernode/chain_container/invalidation_test.go diff --git a/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go b/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go index 94dd5f0f99f4c..036bfde56286e 100644 --- a/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go +++ b/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go @@ -12,8 +12,8 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/eth/bigs" "github.com/ethereum-optimism/optimism/op-service/txintent" ) diff --git a/op-supernode/supernode/activity/interop/interop_test.go b/op-supernode/supernode/activity/interop/interop_test.go index 88fdfebd6d7f6..bcbb4281cbb01 100644 --- a/op-supernode/supernode/activity/interop/interop_test.go +++ b/op-supernode/supernode/activity/interop/interop_test.go @@ -892,6 +892,12 @@ func (m *mockChainContainer) RewindEngine(ctx context.Context, timestamp uint64) return nil } func (m *mockChainContainer) BlockTime() uint64 { return 1 } +func (m *mockChainContainer) InvalidateBlock(ctx context.Context, height uint64, payloadHash common.Hash) (bool, error) { + return false, nil +} +func (m *mockChainContainer) IsDenied(height uint64, payloadHash common.Hash) (bool, error) { + return false, nil +} var _ cc.ChainContainer = (*mockChainContainer)(nil) diff --git a/op-supernode/supernode/activity/interop/logdb_test.go b/op-supernode/supernode/activity/interop/logdb_test.go index 47c2db75fe785..f0bed03f027f8 100644 --- a/op-supernode/supernode/activity/interop/logdb_test.go +++ b/op-supernode/supernode/activity/interop/logdb_test.go @@ -596,5 +596,11 @@ func (m *statefulMockChainContainer) BlockTime() uint64 { return 1 } func (m *statefulMockChainContainer) RewindEngine(ctx context.Context, timestamp uint64) error { return nil } +func (m *statefulMockChainContainer) InvalidateBlock(ctx context.Context, height uint64, payloadHash common.Hash) (bool, error) { + return false, nil +} +func (m *statefulMockChainContainer) IsDenied(height uint64, payloadHash common.Hash) (bool, error) { + return false, nil +} var _ cc.ChainContainer = (*statefulMockChainContainer)(nil) diff --git a/op-supernode/supernode/activity/superroot/superroot_test.go b/op-supernode/supernode/activity/superroot/superroot_test.go index 443162d5a46fa..d56f1d634793e 100644 --- a/op-supernode/supernode/activity/superroot/superroot_test.go +++ b/op-supernode/supernode/activity/superroot/superroot_test.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum-optimism/optimism/op-supernode/supernode/activity" cc "github.com/ethereum-optimism/optimism/op-supernode/supernode/chain_container" "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" gethlog "github.com/ethereum/go-ethereum/log" @@ -97,6 +98,12 @@ func (m *mockCC) ID() eth.ChainID { } func (m *mockCC) BlockTime() uint64 { return 1 } +func (m *mockCC) InvalidateBlock(ctx context.Context, height uint64, payloadHash common.Hash) (bool, error) { + return false, nil +} +func (m *mockCC) IsDenied(height uint64, payloadHash common.Hash) (bool, error) { + return false, nil +} var _ cc.ChainContainer = (*mockCC)(nil) diff --git a/op-supernode/supernode/chain_container/invalidation.go b/op-supernode/supernode/chain_container/invalidation.go index d2069955adc5d..475522262c88d 100644 --- a/op-supernode/supernode/chain_container/invalidation.go +++ b/op-supernode/supernode/chain_container/invalidation.go @@ -4,6 +4,7 @@ import ( "context" "encoding/binary" "fmt" + "os" "path/filepath" "sync" @@ -27,6 +28,9 @@ type DenyList struct { // OpenDenyList opens or creates a DenyList at the given data directory. func OpenDenyList(dataDir string) (*DenyList, error) { + if err := os.MkdirAll(dataDir, 0755); err != nil { + return nil, fmt.Errorf("failed to create denylist directory %s: %w", dataDir, err) + } dbPath := filepath.Join(dataDir, denyListDBName+".db") db, err := bolt.Open(dbPath, 0600, nil) if err != nil { diff --git a/op-supernode/supernode/chain_container/invalidation_test.go b/op-supernode/supernode/chain_container/invalidation_test.go new file mode 100644 index 0000000000000..eab587256a6c1 --- /dev/null +++ b/op-supernode/supernode/chain_container/invalidation_test.go @@ -0,0 +1,514 @@ +package chain_container + +import ( + "context" + "path/filepath" + "testing" + + opnodecfg "github.com/ethereum-optimism/optimism/op-node/config" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supernode/supernode/chain_container/virtual_node" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + gethlog "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +func TestDenyList_AddAndContains(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + setup func(t *testing.T, dl *DenyList) + check func(t *testing.T, dl *DenyList) + }{ + { + name: "single hash at height", + setup: func(t *testing.T, dl *DenyList) { + hash := common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") + require.NoError(t, dl.Add(100, hash)) + }, + check: func(t *testing.T, dl *DenyList) { + hash := common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") + found, err := dl.Contains(100, hash) + require.NoError(t, err) + require.True(t, found, "hash should be found at height 100") + }, + }, + { + name: "multiple hashes same height", + setup: func(t *testing.T, dl *DenyList) { + hashes := []common.Hash{ + common.HexToHash("0xaaaa"), + common.HexToHash("0xbbbb"), + common.HexToHash("0xcccc"), + } + for _, h := range hashes { + require.NoError(t, dl.Add(50, h)) + } + }, + check: func(t *testing.T, dl *DenyList) { + hashes := []common.Hash{ + common.HexToHash("0xaaaa"), + common.HexToHash("0xbbbb"), + common.HexToHash("0xcccc"), + } + for _, h := range hashes { + found, err := dl.Contains(50, h) + require.NoError(t, err) + require.True(t, found, "hash %s should be found at height 50", h) + } + }, + }, + { + name: "hash at wrong height returns false", + setup: func(t *testing.T, dl *DenyList) { + hash := common.HexToHash("0xdddd") + require.NoError(t, dl.Add(10, hash)) + }, + check: func(t *testing.T, dl *DenyList) { + hash := common.HexToHash("0xdddd") + // Check at different height + found, err := dl.Contains(11, hash) + require.NoError(t, err) + require.False(t, found, "hash should NOT be found at height 11") + + // Verify it IS at height 10 + found, err = dl.Contains(10, hash) + require.NoError(t, err) + require.True(t, found, "hash should be found at height 10") + }, + }, + { + name: "duplicate add is idempotent", + setup: func(t *testing.T, dl *DenyList) { + hash := common.HexToHash("0xeeee") + require.NoError(t, dl.Add(200, hash)) + require.NoError(t, dl.Add(200, hash)) // Add again + require.NoError(t, dl.Add(200, hash)) // And again + }, + check: func(t *testing.T, dl *DenyList) { + hash := common.HexToHash("0xeeee") + hashes, err := dl.GetDeniedHashes(200) + require.NoError(t, err) + require.Len(t, hashes, 1, "should only have one entry despite multiple adds") + require.Equal(t, hash, hashes[0]) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + dir := t.TempDir() + dl, err := OpenDenyList(dir) + require.NoError(t, err) + defer dl.Close() + + tt.setup(t, dl) + tt.check(t, dl) + }) + } +} + +func TestDenyList_Persistence(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + setup func(t *testing.T, dir string) + check func(t *testing.T, dir string) + }{ + { + name: "survives close and reopen", + setup: func(t *testing.T, dir string) { + dl, err := OpenDenyList(dir) + require.NoError(t, err) + + hashes := []struct { + height uint64 + hash common.Hash + }{ + {100, common.HexToHash("0x1111")}, + {100, common.HexToHash("0x2222")}, + {200, common.HexToHash("0x3333")}, + {300, common.HexToHash("0x4444")}, + } + for _, h := range hashes { + require.NoError(t, dl.Add(h.height, h.hash)) + } + + require.NoError(t, dl.Close()) + }, + check: func(t *testing.T, dir string) { + dl, err := OpenDenyList(dir) + require.NoError(t, err) + defer dl.Close() + + // Verify all hashes are still present + found, err := dl.Contains(100, common.HexToHash("0x1111")) + require.NoError(t, err) + require.True(t, found) + + found, err = dl.Contains(100, common.HexToHash("0x2222")) + require.NoError(t, err) + require.True(t, found) + + found, err = dl.Contains(200, common.HexToHash("0x3333")) + require.NoError(t, err) + require.True(t, found) + + found, err = dl.Contains(300, common.HexToHash("0x4444")) + require.NoError(t, err) + require.True(t, found) + + // Verify counts + hashes100, err := dl.GetDeniedHashes(100) + require.NoError(t, err) + require.Len(t, hashes100, 2) + + hashes200, err := dl.GetDeniedHashes(200) + require.NoError(t, err) + require.Len(t, hashes200, 1) + }, + }, + { + name: "empty DB on fresh open", + setup: func(t *testing.T, dir string) { + // No setup - fresh directory + }, + check: func(t *testing.T, dir string) { + dl, err := OpenDenyList(dir) + require.NoError(t, err) + defer dl.Close() + + found, err := dl.Contains(100, common.HexToHash("0xabcd")) + require.NoError(t, err) + require.False(t, found, "fresh DB should not contain any hashes") + + hashes, err := dl.GetDeniedHashes(100) + require.NoError(t, err) + require.Empty(t, hashes, "fresh DB should return empty slice") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + dir := filepath.Join(t.TempDir(), "denylist") + + tt.setup(t, dir) + tt.check(t, dir) + }) + } +} + +func TestDenyList_GetDeniedHashes(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + setup func(t *testing.T, dl *DenyList) + check func(t *testing.T, dl *DenyList) + }{ + { + name: "returns all hashes at height", + setup: func(t *testing.T, dl *DenyList) { + for i := 0; i < 5; i++ { + hash := common.BigToHash(common.Big1.Add(common.Big1, common.Big0.SetInt64(int64(i)))) + require.NoError(t, dl.Add(100, hash)) + } + }, + check: func(t *testing.T, dl *DenyList) { + hashes, err := dl.GetDeniedHashes(100) + require.NoError(t, err) + require.Len(t, hashes, 5, "should return all 5 hashes") + }, + }, + { + name: "empty for clean height", + setup: func(t *testing.T, dl *DenyList) { + // Add hashes at other heights + require.NoError(t, dl.Add(10, common.HexToHash("0xaaaa"))) + require.NoError(t, dl.Add(30, common.HexToHash("0xbbbb"))) + }, + check: func(t *testing.T, dl *DenyList) { + hashes, err := dl.GetDeniedHashes(20) + require.NoError(t, err) + require.Empty(t, hashes, "height 20 should have no entries") + }, + }, + { + name: "isolated by height", + setup: func(t *testing.T, dl *DenyList) { + // Add different hashes at different heights + require.NoError(t, dl.Add(10, common.HexToHash("0x1010"))) + require.NoError(t, dl.Add(10, common.HexToHash("0x1011"))) + require.NoError(t, dl.Add(20, common.HexToHash("0x2020"))) + require.NoError(t, dl.Add(20, common.HexToHash("0x2021"))) + require.NoError(t, dl.Add(20, common.HexToHash("0x2022"))) + require.NoError(t, dl.Add(30, common.HexToHash("0x3030"))) + }, + check: func(t *testing.T, dl *DenyList) { + hashes10, err := dl.GetDeniedHashes(10) + require.NoError(t, err) + require.Len(t, hashes10, 2, "height 10 should have 2 hashes") + + hashes20, err := dl.GetDeniedHashes(20) + require.NoError(t, err) + require.Len(t, hashes20, 3, "height 20 should have 3 hashes") + + hashes30, err := dl.GetDeniedHashes(30) + require.NoError(t, err) + require.Len(t, hashes30, 1, "height 30 should have 1 hash") + + // Verify specific hashes at height 20 + expected := map[common.Hash]bool{ + common.HexToHash("0x2020"): true, + common.HexToHash("0x2021"): true, + common.HexToHash("0x2022"): true, + } + for _, h := range hashes20 { + require.True(t, expected[h], "unexpected hash at height 20: %s", h) + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + dir := t.TempDir() + dl, err := OpenDenyList(dir) + require.NoError(t, err) + defer dl.Close() + + tt.setup(t, dl) + tt.check(t, dl) + }) + } +} + +// mockEngineForInvalidation implements engine_controller.EngineController for invalidation tests +type mockEngineForInvalidation struct { + blockAtTimestampFn func(ctx context.Context, ts uint64, label eth.BlockLabel) (eth.L2BlockRef, error) + rewindCalled bool + rewindTimestamp uint64 +} + +func (m *mockEngineForInvalidation) BlockAtTimestamp(ctx context.Context, ts uint64, label eth.BlockLabel) (eth.L2BlockRef, error) { + if m.blockAtTimestampFn != nil { + return m.blockAtTimestampFn(ctx, ts, label) + } + return eth.L2BlockRef{}, nil +} + +func (m *mockEngineForInvalidation) OutputV0AtBlockNumber(ctx context.Context, num uint64) (*eth.OutputV0, error) { + return nil, nil +} + +func (m *mockEngineForInvalidation) RewindToTimestamp(ctx context.Context, timestamp uint64) error { + m.rewindCalled = true + m.rewindTimestamp = timestamp + return nil +} + +func (m *mockEngineForInvalidation) FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Receipts, error) { + return nil, nil, nil +} + +func (m *mockEngineForInvalidation) Close() error { + return nil +} + +// mockVNForInvalidation implements virtual_node.VirtualNode for invalidation tests +type mockVNForInvalidation struct { + stopErr error +} + +func (m *mockVNForInvalidation) Start(ctx context.Context) error { return nil } +func (m *mockVNForInvalidation) Stop(ctx context.Context) error { return m.stopErr } +func (m *mockVNForInvalidation) LatestSafe(ctx context.Context) (eth.BlockID, error) { + return eth.BlockID{}, nil +} +func (m *mockVNForInvalidation) SafeHeadAtL1(ctx context.Context, l1BlockNum uint64) (eth.BlockID, eth.BlockID, error) { + return eth.BlockID{}, eth.BlockID{}, nil +} +func (m *mockVNForInvalidation) L1AtSafeHead(ctx context.Context, target eth.BlockID) (eth.BlockID, error) { + return eth.BlockID{}, nil +} +func (m *mockVNForInvalidation) SyncStatus(ctx context.Context) (*eth.SyncStatus, error) { + return ð.SyncStatus{}, nil +} + +var _ virtual_node.VirtualNode = (*mockVNForInvalidation)(nil) + +func TestInvalidateBlock(t *testing.T) { + t.Parallel() + + genesisTime := uint64(1000) + blockTime := uint64(2) + + tests := []struct { + name string + height uint64 + payloadHash common.Hash + currentBlockHash common.Hash + engineAvailable bool + expectRewind bool + expectRewindTs uint64 + }{ + { + name: "current block matches triggers rewind", + height: 5, + payloadHash: common.HexToHash("0xdead"), + currentBlockHash: common.HexToHash("0xdead"), // Same hash + engineAvailable: true, + expectRewind: true, + expectRewindTs: genesisTime + (4 * blockTime), // height-1 timestamp + }, + { + name: "current block differs no rewind", + height: 5, + payloadHash: common.HexToHash("0xdead"), + currentBlockHash: common.HexToHash("0xbeef"), // Different hash + engineAvailable: true, + expectRewind: false, + }, + { + name: "engine unavailable adds to denylist only", + height: 5, + payloadHash: common.HexToHash("0xdead"), + engineAvailable: false, + expectRewind: false, + }, + { + name: "rewind to height-1 timestamp calculated correctly", + height: 10, + payloadHash: common.HexToHash("0xabcd"), + currentBlockHash: common.HexToHash("0xabcd"), + engineAvailable: true, + expectRewind: true, + expectRewindTs: genesisTime + (9 * blockTime), // height 9 + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + dir := t.TempDir() + + // Create deny list + dl, err := OpenDenyList(filepath.Join(dir, "denylist")) + require.NoError(t, err) + defer dl.Close() + + // Create mock engine + mockEng := &mockEngineForInvalidation{ + blockAtTimestampFn: func(ctx context.Context, ts uint64, label eth.BlockLabel) (eth.L2BlockRef, error) { + return eth.L2BlockRef{Hash: tt.currentBlockHash}, nil + }, + } + + // Create container with minimal config + c := &simpleChainContainer{ + denyList: dl, + log: testLogger(), + vncfg: &opnodecfg.Config{}, + vn: &mockVNForInvalidation{}, + } + c.vncfg.Rollup.Genesis.L2Time = genesisTime + c.vncfg.Rollup.BlockTime = blockTime + + if tt.engineAvailable { + c.engine = mockEng + } + + // Call InvalidateBlock + ctx := context.Background() + rewound, err := c.InvalidateBlock(ctx, tt.height, tt.payloadHash) + require.NoError(t, err) + + // Verify rewind behavior + require.Equal(t, tt.expectRewind, rewound, "rewind triggered mismatch") + + if tt.expectRewind && tt.engineAvailable { + require.True(t, mockEng.rewindCalled, "RewindToTimestamp should have been called") + require.Equal(t, tt.expectRewindTs, mockEng.rewindTimestamp, "rewind timestamp mismatch") + } + + // Verify hash was added to denylist regardless + found, err := dl.Contains(tt.height, tt.payloadHash) + require.NoError(t, err) + require.True(t, found, "hash should be in denylist after InvalidateBlock") + }) + } +} + +func TestIsDenied(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + setupHash common.Hash + setupHeight uint64 + checkHash common.Hash + checkHeight uint64 + expectFound bool + }{ + { + name: "denied block returns true", + setupHash: common.HexToHash("0x1234"), + setupHeight: 100, + checkHash: common.HexToHash("0x1234"), + checkHeight: 100, + expectFound: true, + }, + { + name: "non-denied returns false", + setupHash: common.HexToHash("0x1234"), + setupHeight: 100, + checkHash: common.HexToHash("0x5678"), // Different hash + checkHeight: 100, + expectFound: false, + }, + { + name: "wrong height returns false", + setupHash: common.HexToHash("0xabcd"), + setupHeight: 10, + checkHash: common.HexToHash("0xabcd"), // Same hash + checkHeight: 11, // Different height + expectFound: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + dir := t.TempDir() + + dl, err := OpenDenyList(filepath.Join(dir, "denylist")) + require.NoError(t, err) + defer dl.Close() + + // Setup + require.NoError(t, dl.Add(tt.setupHeight, tt.setupHash)) + + // Create container + c := &simpleChainContainer{ + denyList: dl, + log: testLogger(), + } + + // Check + found, err := c.IsDenied(tt.checkHeight, tt.checkHash) + require.NoError(t, err) + require.Equal(t, tt.expectFound, found) + }) + } +} + +func testLogger() gethlog.Logger { + return gethlog.New() +} From aecad4e394671c3086294964ea6f51472c4bcc50 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Wed, 4 Feb 2026 16:39:08 -0600 Subject: [PATCH 03/23] Wire up block invalidation from interop activity to chain container When the interop activity detects an invalid executing message, it now calls InvalidateBlock on the chain container. This: 1. Adds the block to the chain's denylist (persisted via bbolt) 2. Checks if the chain is currently using that block 3. Triggers a rewind to the prior timestamp if so The acceptance test is renamed to TestSupernodeInteropInvalidMessageReset and temporarily skipped due to pre-existing logsDB consistency issues blocking interop progression. --- .../invalid_message_replacement_test.go | 121 +++++++++--------- .../supernode/activity/interop/interop.go | 36 +++++- 2 files changed, 89 insertions(+), 68 deletions(-) diff --git a/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go b/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go index 036bfde56286e..4f7eae2fdb55d 100644 --- a/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go +++ b/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go @@ -17,15 +17,18 @@ import ( "github.com/ethereum-optimism/optimism/op-service/txintent" ) -// TestSupernodeInteropInvalidMessageReplacement tests that: +// TestSupernodeInteropInvalidMessageReset tests that: // WHEN: an invalid Executing Message is included in a chain // THEN: -// - The block containing the invalid message gets reset backward -// - The chain re-derives and produces a replacement block -// - Validity eventually advances past the replaced block +// - The interop activity detects the invalid block +// - The chain container is told to invalidate the block +// - A reset/rewind is triggered if the chain is using that block // -// This test verifies the block invalidation and replacement mechanism. -func TestSupernodeInteropInvalidMessageReplacement(gt *testing.T) { +// Note: This test observes reset behavior. Full block replacement requires +// re-derivation which is a separate mechanism. +func TestSupernodeInteropInvalidMessageReset(gt *testing.T) { + gt.Skip("Skipped: logsDB consistency issues blocking interop progression - see #18944") + t := devtest.SerialT(gt) sys := presets.NewTwoL2SupernodeInterop(t, 0) @@ -77,7 +80,7 @@ func TestSupernodeInteropInvalidMessageReplacement(gt *testing.T) { // Send an INVALID executing message on chain B // Modify the message identifier to make it invalid (wrong log index) - invalidExecReceipt := sendInvalidExecMessageForReplacement(t, bob, initTx, 0) + invalidExecReceipt := sendInvalidExecMessageForReset(t, bob, initTx, 0) invalidBlockNumber := bigs.Uint64Strict(invalidExecReceipt.BlockNumber) invalidBlockHash := invalidExecReceipt.BlockHash @@ -90,106 +93,96 @@ func TestSupernodeInteropInvalidMessageReplacement(gt *testing.T) { "timestamp", invalidBlockTimestamp, ) - // Record the safety status before waiting - initialStatusA := sys.L2ACL.SyncStatus() + // Record the initial unsafe head for chain B initialStatusB := sys.L2BCL.SyncStatus() + initialUnsafeB := initialStatusB.UnsafeL2.Number - t.Logger().Info("initial safety status", - "chainA_local_safe", initialStatusA.LocalSafeL2.Number, - "chainA_unsafe", initialStatusA.UnsafeL2.Number, - "chainB_local_safe", initialStatusB.LocalSafeL2.Number, - "chainB_unsafe", initialStatusB.UnsafeL2.Number, + t.Logger().Info("initial status before reset observation", + "chainB_unsafe", initialUnsafeB, + "invalid_block", invalidBlockNumber, ) - // Now we verify the key behaviors: - // 1. The invalid block should be replaced with a different block at the same height - // 2. Validity should eventually advance past the replaced block + // Observe for reset behavior: + // When the interop activity detects the invalid message and calls InvalidateBlock, + // it will trigger a rewind. We observe by watching for the unsafe head to go backwards + // or for the block at the invalid block number to change. observationDuration := 60 * time.Second checkInterval := time.Second start := time.Now() - var replacementDetected bool - var replacementBlockHash [32]byte + var resetDetected bool + var lastUnsafeB uint64 = initialUnsafeB for time.Since(start) < observationDuration { time.Sleep(checkInterval) - // Check if the block at the invalid block number has changed - currentBlock := sys.L2ELB.BlockRefByNumber(invalidBlockNumber) + statusB := sys.L2BCL.SyncStatus() + currentUnsafeB := statusB.UnsafeL2.Number + + // Check if the unsafe head went backwards (reset occurred) + if currentUnsafeB < lastUnsafeB && lastUnsafeB >= invalidBlockNumber { + resetDetected = true + t.Logger().Info("RESET DETECTED! Unsafe head moved backward", + "previous_unsafe", lastUnsafeB, + "current_unsafe", currentUnsafeB, + "invalid_block", invalidBlockNumber, + ) + } + // Also check if the block hash at the invalid block number changed + currentBlock := sys.L2ELB.BlockRefByNumber(invalidBlockNumber) if currentBlock.Hash != invalidBlockHash { - replacementDetected = true - replacementBlockHash = currentBlock.Hash - t.Logger().Info("REPLACEMENT DETECTED!", + resetDetected = true + t.Logger().Info("RESET DETECTED! Block hash changed", "block_number", invalidBlockNumber, "old_hash", invalidBlockHash, "new_hash", currentBlock.Hash, ) } - // Check current safety status - statusA := sys.L2ACL.SyncStatus() - statusB := sys.L2BCL.SyncStatus() - - // Check if the invalid block's timestamp has been verified + // Check verification status resp, err := snClient.SuperRootAtTimestamp(ctx, invalidBlockTimestamp) t.Require().NoError(err, "SuperRootAtTimestamp should not error") t.Logger().Info("observation tick", "elapsed", time.Since(start).Round(time.Second), - "chainA_local_safe", statusA.LocalSafeL2.Number, - "chainA_unsafe", statusA.UnsafeL2.Number, - "chainB_local_safe", statusB.LocalSafeL2.Number, - "chainB_unsafe", statusB.UnsafeL2.Number, + "chainB_unsafe", currentUnsafeB, "invalid_block_ts", invalidBlockTimestamp, - "replacement_detected", replacementDetected, + "reset_detected", resetDetected, "verified", resp.Data != nil, ) - // If replacement was detected and timestamp is now verified, we're done - if replacementDetected && resp.Data != nil { - t.Logger().Info("SUCCESS: replacement block is now verified", - "timestamp", invalidBlockTimestamp, - "replacement_hash", replacementBlockHash, - ) + lastUnsafeB = currentUnsafeB + + // Exit early if we detect reset + if resetDetected { + t.Logger().Info("Reset behavior confirmed") break } } - // Final assertions + // ASSERTION: Reset should have been detected + // (either unsafe head went backward or block hash changed) + t.Require().True(resetDetected, + "reset should be triggered when invalid block is detected") - // ASSERTION: The invalid block should have been replaced - t.Require().True(replacementDetected, - "invalid block should have been replaced with a different block") - - // ASSERTION: The replacement block should be different from the invalid block - t.Require().NotEqual(invalidBlockHash, replacementBlockHash, - "replacement block hash should differ from invalid block hash") - - // ASSERTION: The timestamp should eventually be verified (with the replacement block) + // ASSERTION: The invalid block's timestamp should NOT be verified + // (because the reset means the block is no longer valid) finalResp, err := snClient.SuperRootAtTimestamp(ctx, invalidBlockTimestamp) t.Require().NoError(err) - t.Require().NotNil(finalResp.Data, - "timestamp should be verified after block replacement") - - finalStatusA := sys.L2ACL.SyncStatus() - finalStatusB := sys.L2BCL.SyncStatus() + t.Require().Nil(finalResp.Data, + "invalid block timestamp should not be verified after reset") - t.Logger().Info("test complete: invalid block was replaced and validity advanced", - "final_chainA_local_safe", finalStatusA.LocalSafeL2.Number, - "final_chainA_unsafe", finalStatusA.UnsafeL2.Number, - "final_chainB_local_safe", finalStatusB.LocalSafeL2.Number, - "final_chainB_unsafe", finalStatusB.UnsafeL2.Number, + t.Logger().Info("test complete: reset was triggered for invalid block", + "invalid_block_number", invalidBlockNumber, "invalid_block_hash", invalidBlockHash, - "replacement_block_hash", replacementBlockHash, - "invalid_block_timestamp", invalidBlockTimestamp, ) } -// sendInvalidExecMessageForReplacement sends an executing message with a modified (invalid) identifier. +// sendInvalidExecMessageForReset sends an executing message with a modified (invalid) identifier. // This makes the message invalid because it references a non-existent log index. -func sendInvalidExecMessageForReplacement( +func sendInvalidExecMessageForReset( t devtest.T, bob *dsl.EOA, initIntent *txintent.IntentTx[*txintent.InitTrigger, *txintent.InteropOutput], diff --git a/op-supernode/supernode/activity/interop/interop.go b/op-supernode/supernode/activity/interop/interop.go index b9c12a818ea65..ab3e560ca6668 100644 --- a/op-supernode/supernode/activity/interop/interop.go +++ b/op-supernode/supernode/activity/interop/interop.go @@ -294,11 +294,39 @@ func (i *Interop) handleResult(result Result) error { return nil } -// invalidateBlock handles an invalid block by notifying the chain to reorg. +// invalidateBlock handles an invalid block by notifying the chain container to add it +// to the denylist and potentially rewind if the chain is currently using that block. func (i *Interop) invalidateBlock(chainID eth.ChainID, blockID eth.BlockID) error { - // TODO(#18944): Implement block invalidation - // This should trigger the chain container to reorg away from the invalid block - i.log.Warn("invalidateBlock called but not implemented", "chainID", chainID, "blockID", blockID) + chain, ok := i.chains[chainID] + if !ok { + return fmt.Errorf("chain %s not found", chainID) + } + + rewound, err := chain.InvalidateBlock(i.ctx, blockID.Number, blockID.Hash) + if err != nil { + i.log.Error("failed to invalidate block", + "chainID", chainID, + "blockNumber", blockID.Number, + "blockHash", blockID.Hash, + "err", err, + ) + return err + } + + if rewound { + i.log.Warn("chain rewound due to invalid block", + "chainID", chainID, + "blockNumber", blockID.Number, + "blockHash", blockID.Hash, + ) + } else { + i.log.Info("block added to denylist (no rewind needed)", + "chainID", chainID, + "blockNumber", blockID.Number, + "blockHash", blockID.Hash, + ) + } + return nil } From e712178e1f2c711df423ddca9b865d08db72a6b8 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Wed, 4 Feb 2026 17:36:10 -0600 Subject: [PATCH 04/23] Fix block invalidation: use eth.Unsafe label and improve test resilience - Use eth.Unsafe label in BlockAtTimestamp call (was using empty string) - Make acceptance test resilient to block not existing after rewind - Test now passes: detects reset when block 8 is rewound away --- .../invalid_message_replacement_test.go | 54 +++++++++---------- .../supernode/chain_container/invalidation.go | 3 +- 2 files changed, 27 insertions(+), 30 deletions(-) diff --git a/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go b/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go index 4f7eae2fdb55d..0b2a60ff3d797 100644 --- a/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go +++ b/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go @@ -27,7 +27,6 @@ import ( // Note: This test observes reset behavior. Full block replacement requires // re-derivation which is a separate mechanism. func TestSupernodeInteropInvalidMessageReset(gt *testing.T) { - gt.Skip("Skipped: logsDB consistency issues blocking interop progression - see #18944") t := devtest.SerialT(gt) sys := presets.NewTwoL2SupernodeInterop(t, 0) @@ -93,15 +92,6 @@ func TestSupernodeInteropInvalidMessageReset(gt *testing.T) { "timestamp", invalidBlockTimestamp, ) - // Record the initial unsafe head for chain B - initialStatusB := sys.L2BCL.SyncStatus() - initialUnsafeB := initialStatusB.UnsafeL2.Number - - t.Logger().Info("initial status before reset observation", - "chainB_unsafe", initialUnsafeB, - "invalid_block", invalidBlockNumber, - ) - // Observe for reset behavior: // When the interop activity detects the invalid message and calls InvalidateBlock, // it will trigger a rewind. We observe by watching for the unsafe head to go backwards @@ -112,27 +102,22 @@ func TestSupernodeInteropInvalidMessageReset(gt *testing.T) { start := time.Now() var resetDetected bool - var lastUnsafeB uint64 = initialUnsafeB for time.Since(start) < observationDuration { time.Sleep(checkInterval) - statusB := sys.L2BCL.SyncStatus() - currentUnsafeB := statusB.UnsafeL2.Number - - // Check if the unsafe head went backwards (reset occurred) - if currentUnsafeB < lastUnsafeB && lastUnsafeB >= invalidBlockNumber { + // Check if the block hash at the invalid block number changed or block doesn't exist + // Use the EthClient directly to handle errors (block may not exist after rewind) + currentBlock, err := sys.L2ELB.Escape().EthClient().BlockRefByNumber(ctx, invalidBlockNumber) + if err != nil { + // Block not found - this means the rewind happened and block was removed resetDetected = true - t.Logger().Info("RESET DETECTED! Unsafe head moved backward", - "previous_unsafe", lastUnsafeB, - "current_unsafe", currentUnsafeB, - "invalid_block", invalidBlockNumber, + t.Logger().Info("RESET DETECTED! Block no longer exists (rewound)", + "block_number", invalidBlockNumber, + "err", err, ) - } - - // Also check if the block hash at the invalid block number changed - currentBlock := sys.L2ELB.BlockRefByNumber(invalidBlockNumber) - if currentBlock.Hash != invalidBlockHash { + } else if currentBlock.Hash != invalidBlockHash { + // Block exists but with different hash - replaced resetDetected = true t.Logger().Info("RESET DETECTED! Block hash changed", "block_number", invalidBlockNumber, @@ -143,18 +128,29 @@ func TestSupernodeInteropInvalidMessageReset(gt *testing.T) { // Check verification status resp, err := snClient.SuperRootAtTimestamp(ctx, invalidBlockTimestamp) - t.Require().NoError(err, "SuperRootAtTimestamp should not error") + if err != nil { + t.Logger().Info("SuperRootAtTimestamp error (may be resetting)", + "elapsed", time.Since(start).Round(time.Second), + "err", err, + ) + continue + } + + var currentHash string + if currentBlock.Hash != ([32]byte{}) { + currentHash = currentBlock.Hash.String()[:10] + } else { + currentHash = "(none)" + } t.Logger().Info("observation tick", "elapsed", time.Since(start).Round(time.Second), - "chainB_unsafe", currentUnsafeB, "invalid_block_ts", invalidBlockTimestamp, + "current_block_hash", currentHash, "reset_detected", resetDetected, "verified", resp.Data != nil, ) - lastUnsafeB = currentUnsafeB - // Exit early if we detect reset if resetDetected { t.Logger().Info("Reset behavior confirmed") diff --git a/op-supernode/supernode/chain_container/invalidation.go b/op-supernode/supernode/chain_container/invalidation.go index 475522262c88d..b36646c35ecbb 100644 --- a/op-supernode/supernode/chain_container/invalidation.go +++ b/op-supernode/supernode/chain_container/invalidation.go @@ -8,6 +8,7 @@ import ( "path/filepath" "sync" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/common" bolt "go.etcd.io/bbolt" ) @@ -171,7 +172,7 @@ func (c *simpleChainContainer) InvalidateBlock(ctx context.Context, height uint6 return false, nil } - currentBlock, err := c.engine.BlockAtTimestamp(ctx, c.blockNumberToTimestamp(height), "") + currentBlock, err := c.engine.BlockAtTimestamp(ctx, c.blockNumberToTimestamp(height), eth.Unsafe) if err != nil { c.log.Warn("failed to get current block at height", "height", height, "err", err) return false, nil From d344c0ced4ad8975b705950d001a0c598b190655 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Thu, 5 Feb 2026 14:41:37 -0600 Subject: [PATCH 05/23] op-node: add SuperAuthority interface for payload denial Introduce SuperAuthority interface that allows external authority (like op-supernode) to deny payloads before they are inserted into the engine. - Define SuperAuthority interface in op-node/node/node.go with IsDenied method - Add SuperAuthority to InitializationOverrides for injection - Wire SuperAuthority through Driver to EngineController - Check IsDenied before NewPayload in payload_process.go - If denied during Holocene derivation, request deposits-only replacement - Update tests and op-program to pass nil for SuperAuthority --- op-e2e/actions/helpers/l2_verifier.go | 2 +- op-node/node/node.go | 18 +++++++++++- op-node/rollup/driver/driver.go | 3 +- op-node/rollup/engine/engine_controller.go | 15 ++++++++++ .../rollup/engine/engine_controller_test.go | 12 ++++---- op-node/rollup/engine/payload_process.go | 29 +++++++++++++++++++ op-program/client/driver/driver.go | 2 +- .../supernode/resources/super_authority.go | 23 +++++++++++++++ 8 files changed, 94 insertions(+), 10 deletions(-) create mode 100644 op-supernode/supernode/resources/super_authority.go diff --git a/op-e2e/actions/helpers/l2_verifier.go b/op-e2e/actions/helpers/l2_verifier.go index aeab9fcb99f71..08a4ee69e95b4 100644 --- a/op-e2e/actions/helpers/l2_verifier.go +++ b/op-e2e/actions/helpers/l2_verifier.go @@ -150,7 +150,7 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, supervisorEnabled := interopSys != nil metrics := &testutils.TestDerivationMetrics{} - ec := engine.NewEngineController(ctx, eng, log, opnodemetrics.NoopMetrics, cfg, syncCfg, supervisorEnabled, l1, sys.Register("engine-controller", nil, opts)) + ec := engine.NewEngineController(ctx, eng, log, opnodemetrics.NoopMetrics, cfg, syncCfg, supervisorEnabled, l1, sys.Register("engine-controller", nil, opts), nil) if mm, ok := interopSys.(*indexing.IndexingMode); ok { mm.SetEngineController(ec) diff --git a/op-node/node/node.go b/op-node/node/node.go index e37a9b76b38cf..3135b0f5b3fa2 100644 --- a/op-node/node/node.go +++ b/op-node/node/node.go @@ -48,6 +48,16 @@ import ( var ErrAlreadyClosed = errors.New("node is already closed") +// SuperAuthority provides supernode-level authority operations to op-node instances. +// When running inside a supernode, this allows the node to check if payloads are denied +// before applying them, enabling coordinated block invalidation across the supernode. +type SuperAuthority interface { + // IsDenied checks if a payload hash is denied at the given block number. + // Returns true if the payload should not be applied. + // The error indicates if the check could not be performed (should be logged but not fatal). + IsDenied(blockNumber uint64, payloadHash common.Hash) (bool, error) +} + // L1Client is the interface that op-node uses to interact with L1. // This allows wrapped or mocked clients to be used type L1Client interface { @@ -108,6 +118,8 @@ type OpNode struct { appVersion string metrics *metrics.Metrics + superAuthority SuperAuthority // Supernode authority for payload validation (may be nil) + l1HeadsSub ethereum.Subscription // Subscription to get L1 heads (automatically re-subscribes on error) l1SafeSub ethereum.Subscription // Subscription to get L1 safe blocks, a.k.a. justified data (polling) l1FinalizedSub ethereum.Subscription // Subscription to get L1 safe blocks, a.k.a. justified data (polling) @@ -200,6 +212,7 @@ type InitializationOverrides struct { Beacon L1Beacon RPCHandler *oprpc.Handler MetricsRegistry func(*prometheus.Registry) + SuperAuthority SuperAuthority // Supernode authority for payload validation } // init progressively creates and sets up all the components of the OpNode @@ -225,6 +238,9 @@ func (n *OpNode) init(ctx context.Context, cfg *config.Config, overrides Initial return fmt.Errorf("failed to init event system: %w", err) } + // Store the supernode authority for payload validation + n.superAuthority = overrides.SuperAuthority + if overrides.Beacon == nil { beacon, err := initL1BeaconAPI(ctx, cfg, n) if err != nil { @@ -607,7 +623,7 @@ func initL2(ctx context.Context, cfg *config.Config, node *OpNode) (*sources.Eng } l2Driver := driver.NewDriver(node.eventSys, node.eventDrain, &cfg.Driver, &cfg.Rollup, cfg.L1ChainConfig, cfg.DependencySet, l2Source, node.l1Source, upstreamFollowSource, - node.beacon, node, node, node.log, node.metrics, cfg.ConfigPersistence, safeDB, &cfg.Sync, sequencerConductor, altDA, indexingMode) + node.beacon, node, node, node.log, node.metrics, cfg.ConfigPersistence, safeDB, &cfg.Sync, sequencerConductor, altDA, indexingMode, node.superAuthority) // Wire up IndexingMode to engine controller for direct procedure call if sys != nil { diff --git a/op-node/rollup/driver/driver.go b/op-node/rollup/driver/driver.go index 2b23d62e5dcd3..d750151ab1fc1 100644 --- a/op-node/rollup/driver/driver.go +++ b/op-node/rollup/driver/driver.go @@ -49,6 +49,7 @@ func NewDriver( sequencerConductor conductor.SequencerConductor, altDA AltDAIface, indexingMode bool, + superAuthority engine.SuperAuthority, ) *Driver { driverCtx, driverCancel := context.WithCancel(context.Background()) @@ -60,7 +61,7 @@ func NewDriver( l1 = metered.NewMeteredL1Fetcher(l1Tracker, metrics) verifConfDepth := confdepth.NewConfDepth(driverCfg.VerifierConfDepth, statusTracker.L1Head, l1) - ec := engine.NewEngineController(driverCtx, l2, log, metrics, cfg, syncCfg, indexingMode, l1, sys.Register("engine-controller", nil)) + ec := engine.NewEngineController(driverCtx, l2, log, metrics, cfg, syncCfg, indexingMode, l1, sys.Register("engine-controller", nil), superAuthority) // TODO(#17115): Refactor dependency cycles ec.SetCrossUpdateHandler(statusTracker) diff --git a/op-node/rollup/engine/engine_controller.go b/op-node/rollup/engine/engine_controller.go index 7e89edd3f4a82..15c84d14230d7 100644 --- a/op-node/rollup/engine/engine_controller.go +++ b/op-node/rollup/engine/engine_controller.go @@ -93,6 +93,16 @@ type CrossUpdateHandler interface { OnCrossSafeUpdate(ctx context.Context, crossSafe eth.L2BlockRef, localSafe eth.L2BlockRef) } +// SuperAuthority provides supernode-level authority operations. +// When running inside a supernode, this allows the engine controller to check +// if payloads are denied before applying them. +type SuperAuthority interface { + // IsDenied checks if a payload hash is denied at the given block number. + // Returns true if the payload should not be applied. + // The error indicates if the check could not be performed (should be logged but not fatal). + IsDenied(blockNumber uint64, payloadHash common.Hash) (bool, error) +} + type EngineController struct { engine ExecEngine // Underlying execution engine RPC log log.Logger @@ -158,6 +168,9 @@ type EngineController struct { // Handler for cross-unsafe and cross-safe updates crossUpdateHandler CrossUpdateHandler + // SuperAuthority for payload validation (may be nil when not in supernode context) + superAuthority SuperAuthority + unsafePayloads *PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps and duplicates } @@ -165,6 +178,7 @@ var _ event.Deriver = (*EngineController)(nil) func NewEngineController(ctx context.Context, engine ExecEngine, log log.Logger, m opmetrics.Metricer, rollupCfg *rollup.Config, syncCfg *sync.Config, supervisorEnabled bool, l1 sync.L1Chain, emitter event.Emitter, + superAuthority SuperAuthority, ) *EngineController { syncStatus := syncStatusCL if syncCfg.SyncMode == sync.ELSync { @@ -184,6 +198,7 @@ func NewEngineController(ctx context.Context, engine ExecEngine, log log.Logger, l1: l1, ctx: ctx, emitter: emitter, + superAuthority: superAuthority, unsafePayloads: NewPayloadsQueue(log, maxUnsafePayloadsMemory, payloadMemSize), } } diff --git a/op-node/rollup/engine/engine_controller_test.go b/op-node/rollup/engine/engine_controller_test.go index d0a0e4d646ba5..03db21584f9b7 100644 --- a/op-node/rollup/engine/engine_controller_test.go +++ b/op-node/rollup/engine/engine_controller_test.go @@ -22,7 +22,7 @@ import ( func TestInvalidPayloadDropsHead(t *testing.T) { emitter := &testutils.MockEmitter{} - ec := NewEngineController(context.Background(), nil, testlog.Logger(t, 0), metrics.NoopMetrics, &rollup.Config{}, &sync.Config{}, false, &testutils.MockL1Source{}, emitter) + ec := NewEngineController(context.Background(), nil, testlog.Logger(t, 0), metrics.NoopMetrics, &rollup.Config{}, &sync.Config{}, false, &testutils.MockL1Source{}, emitter, nil) payload := ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{ BlockHash: common.Hash{0x01}, @@ -110,7 +110,7 @@ func TestOnUnsafePayload_EnqueueEmit(t *testing.T) { cfg, _, _, payloadA1 := buildSimpleCfgAndPayload(t) emitter := &testutils.MockEmitter{} - ec := NewEngineController(context.Background(), nil, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{}, false, &testutils.MockL1Source{}, emitter) + ec := NewEngineController(context.Background(), nil, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{}, false, &testutils.MockL1Source{}, emitter, nil) emitter.ExpectOnce(PayloadInvalidEvent{}) emitter.ExpectOnce(ForkchoiceUpdateEvent{}) @@ -127,7 +127,7 @@ func TestOnForkchoiceUpdate_ProcessRetryAndPop(t *testing.T) { emitter := &testutils.MockEmitter{} mockEngine := &testutils.MockEngine{} - cl := NewEngineController(context.Background(), mockEngine, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{SyncMode: sync.CLSync}, false, &testutils.MockL1Source{}, emitter) + cl := NewEngineController(context.Background(), mockEngine, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{SyncMode: sync.CLSync}, false, &testutils.MockL1Source{}, emitter, nil) // queue payload A1 emitter.ExpectOnceType("UnsafeUpdateEvent") @@ -156,7 +156,7 @@ func TestPeekUnsafePayload(t *testing.T) { cfg, _, _, payloadA1 := buildSimpleCfgAndPayload(t) emitter := &testutils.MockEmitter{} - ec := NewEngineController(context.Background(), nil, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{SyncMode: sync.CLSync}, false, &testutils.MockL1Source{}, emitter) + ec := NewEngineController(context.Background(), nil, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{SyncMode: sync.CLSync}, false, &testutils.MockL1Source{}, emitter, nil) // empty -> zero _, ref := ec.PeekUnsafePayload() @@ -174,7 +174,7 @@ func TestPeekUnsafePayload(t *testing.T) { func TestPeekUnsafePayload_OnDeriveErrorReturnsZero(t *testing.T) { // missing L1-info in txs will cause derive error emitter := &testutils.MockEmitter{} - ec := NewEngineController(context.Background(), nil, testlog.Logger(t, 0), metrics.NoopMetrics, &rollup.Config{}, &sync.Config{SyncMode: sync.CLSync}, false, &testutils.MockL1Source{}, emitter) + ec := NewEngineController(context.Background(), nil, testlog.Logger(t, 0), metrics.NoopMetrics, &rollup.Config{}, &sync.Config{SyncMode: sync.CLSync}, false, &testutils.MockL1Source{}, emitter, nil) bad := ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{BlockNumber: 1, BlockHash: common.Hash{0xaa}}} _ = ec.unsafePayloads.Push(bad) @@ -184,7 +184,7 @@ func TestPeekUnsafePayload_OnDeriveErrorReturnsZero(t *testing.T) { func TestInvalidPayloadForNonHead_NoDrop(t *testing.T) { emitter := &testutils.MockEmitter{} - ec := NewEngineController(context.Background(), nil, testlog.Logger(t, 0), metrics.NoopMetrics, &rollup.Config{}, &sync.Config{SyncMode: sync.CLSync}, false, &testutils.MockL1Source{}, emitter) + ec := NewEngineController(context.Background(), nil, testlog.Logger(t, 0), metrics.NoopMetrics, &rollup.Config{}, &sync.Config{SyncMode: sync.CLSync}, false, &testutils.MockL1Source{}, emitter, nil) // Head payload (lower block number) head := ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{ diff --git a/op-node/rollup/engine/payload_process.go b/op-node/rollup/engine/payload_process.go index b80968167d702..65b87472f9db4 100644 --- a/op-node/rollup/engine/payload_process.go +++ b/op-node/rollup/engine/payload_process.go @@ -28,6 +28,35 @@ func (e *EngineController) onPayloadProcess(ctx context.Context, ev PayloadProce rpcCtx, cancel := context.WithTimeout(e.ctx, payloadProcessTimeout) defer cancel() + // Check SuperAuthority denylist before inserting the payload + if e.superAuthority != nil && ev.Envelope != nil && ev.Envelope.ExecutionPayload != nil { + payload := ev.Envelope.ExecutionPayload + denied, err := e.superAuthority.IsDenied(uint64(payload.BlockNumber), payload.BlockHash) + if err != nil { + e.log.Warn("Failed to check SuperAuthority denylist, proceeding with payload", + "blockNumber", payload.BlockNumber, + "blockHash", payload.BlockHash, + "err", err, + ) + } else if denied { + e.log.Warn("Payload denied by SuperAuthority", + "blockNumber", payload.BlockNumber, + "blockHash", payload.BlockHash, + ) + // If derived and Holocene is active, request a deposits-only replacement + if ev.DerivedFrom != (eth.L1BlockRef{}) && e.rollupCfg.IsHolocene(ev.DerivedFrom.Time) { + e.emitDepositsOnlyPayloadAttributesRequest(ctx, ev.Ref.ParentID(), ev.DerivedFrom) + return + } + // Otherwise emit invalid event + e.emitter.Emit(ctx, PayloadInvalidEvent{ + Envelope: ev.Envelope, + Err: fmt.Errorf("payload %s denied by SuperAuthority", payload.BlockHash), + }) + return + } + } + insertStart := time.Now() status, err := e.engine.NewPayload(rpcCtx, ev.Envelope.ExecutionPayload, ev.Envelope.ParentBeaconBlockRoot) diff --git a/op-program/client/driver/driver.go b/op-program/client/driver/driver.go index c2ba9372fc6fb..1810bf318c75c 100644 --- a/op-program/client/driver/driver.go +++ b/op-program/client/driver/driver.go @@ -46,7 +46,7 @@ func NewDriver(logger log.Logger, cfg *rollup.Config, depSet derive.DependencySe pipelineDeriver.AttachEmitter(d) syncCfg := &sync.Config{SyncMode: sync.CLSync} - ec := engine.NewEngineController(context.Background(), l2Source, logger, metrics.NoopMetrics, cfg, syncCfg, false, l1Source, d) + ec := engine.NewEngineController(context.Background(), l2Source, logger, metrics.NoopMetrics, cfg, syncCfg, false, l1Source, d, nil) attrHandler := attributes.NewAttributesHandler(logger, cfg, context.Background(), l2Source, ec) ec.SetAttributesResetter(attrHandler) diff --git a/op-supernode/supernode/resources/super_authority.go b/op-supernode/supernode/resources/super_authority.go new file mode 100644 index 0000000000000..bd7bcfa62f3ee --- /dev/null +++ b/op-supernode/supernode/resources/super_authority.go @@ -0,0 +1,23 @@ +package resources + +import "github.com/ethereum/go-ethereum/common" + +// SuperAuthority is an interface for supernode-level authority operations. +// It is passed to op-node instances during initialization to provide +// supernode-specific functionality and coordination. +type SuperAuthority interface { + // IsDenied checks if a payload hash is denied at the given block number. + // Returns true if the payload should not be applied. + // The error indicates if the check could not be performed. + IsDenied(blockNumber uint64, payloadHash common.Hash) (bool, error) +} + +// NoOpSuperAuthority is a no-op implementation that never denies any payload. +// Used when running op-node outside of a supernode context. +type NoOpSuperAuthority struct{} + +func (n *NoOpSuperAuthority) IsDenied(blockNumber uint64, payloadHash common.Hash) (bool, error) { + return false, nil +} + +var _ SuperAuthority = (*NoOpSuperAuthority)(nil) From 11942285d75ae651f140467780769422b8c0307d Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Thu, 5 Feb 2026 14:41:45 -0600 Subject: [PATCH 06/23] op-supernode: add ResetOn method to Activity interface Add ResetOn(chainID, timestamp) to Activity interface so activities can clean up cached state when a chain container rewinds. - Activity.ResetOn called by Supernode when any chain resets - Supernode.onChainReset distributes reset to all activities - Heartbeat and Superroot implement no-op ResetOn (no cached state) - Update test mocks to implement ResetOn --- op-supernode/supernode/activity/activity.go | 4 ++++ .../supernode/activity/heartbeat/heartbeat.go | 6 ++++++ .../supernode/activity/superroot/superroot.go | 6 ++++++ .../activity/superroot/superroot_test.go | 1 + op-supernode/supernode/supernode.go | 21 ++++++++++++++++++- .../supernode/supernode_activities_test.go | 11 +++++++--- 6 files changed, 45 insertions(+), 4 deletions(-) diff --git a/op-supernode/supernode/activity/activity.go b/op-supernode/supernode/activity/activity.go index f3020798f8145..061aaca52475d 100644 --- a/op-supernode/supernode/activity/activity.go +++ b/op-supernode/supernode/activity/activity.go @@ -8,6 +8,10 @@ import ( // Activity is an open interface to collect pluggable behaviors which satisfy sub-activitiy interfaces. type Activity interface { + // ResetOn is called when a chain container resets to a given timestamp. + // Activities should clean up any cached state for that chain at or after the timestamp. + // This is a no-op for activities that don't maintain chain-specific state. + ResetOn(chainID eth.ChainID, timestamp uint64) } // RunnableActivity is an Activity that can be started and stopped independently. diff --git a/op-supernode/supernode/activity/heartbeat/heartbeat.go b/op-supernode/supernode/activity/heartbeat/heartbeat.go index aab65475a7103..ba9869d1d8ca0 100644 --- a/op-supernode/supernode/activity/heartbeat/heartbeat.go +++ b/op-supernode/supernode/activity/heartbeat/heartbeat.go @@ -5,6 +5,7 @@ import ( "crypto/rand" "time" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supernode/supernode/activity" "github.com/ethereum/go-ethereum/common/hexutil" gethlog "github.com/ethereum/go-ethereum/log" @@ -55,6 +56,11 @@ func (h *Heartbeat) Stop(ctx context.Context) error { return nil } +// ResetOn is a no-op for heartbeat - it has no chain-specific state. +func (h *Heartbeat) ResetOn(chainID eth.ChainID, timestamp uint64) { + // No-op: heartbeat has no chain-specific cached state +} + // RPCNamespace returns the JSON-RPC namespace for this activity. func (h *Heartbeat) RPCNamespace() string { return "heartbeat" } diff --git a/op-supernode/supernode/activity/superroot/superroot.go b/op-supernode/supernode/activity/superroot/superroot.go index dace9b15b8647..a30b155aef5f2 100644 --- a/op-supernode/supernode/activity/superroot/superroot.go +++ b/op-supernode/supernode/activity/superroot/superroot.go @@ -30,6 +30,12 @@ func New(log gethlog.Logger, chains map[eth.ChainID]cc.ChainContainer) *Superroo func (s *Superroot) ActivityName() string { return "superroot" } +// ResetOn is a no-op for superroot - it always queries chain containers directly +// and doesn't maintain any chain-specific cached state. +func (s *Superroot) ResetOn(chainID eth.ChainID, timestamp uint64) { + // No-op: superroot queries chain containers directly +} + func (s *Superroot) RPCNamespace() string { return "superroot" } func (s *Superroot) RPCService() interface{} { return &superrootAPI{s: s} } diff --git a/op-supernode/supernode/activity/superroot/superroot_test.go b/op-supernode/supernode/activity/superroot/superroot_test.go index d56f1d634793e..2d8823a191b6b 100644 --- a/op-supernode/supernode/activity/superroot/superroot_test.go +++ b/op-supernode/supernode/activity/superroot/superroot_test.go @@ -104,6 +104,7 @@ func (m *mockCC) InvalidateBlock(ctx context.Context, height uint64, payloadHash func (m *mockCC) IsDenied(height uint64, payloadHash common.Hash) (bool, error) { return false, nil } +func (m *mockCC) SetResetCallback(cb cc.ResetCallback) {} var _ cc.ChainContainer = (*mockCC)(nil) diff --git a/op-supernode/supernode/supernode.go b/op-supernode/supernode/supernode.go index 91df00ca376e3..73c8c99e148aa 100644 --- a/op-supernode/supernode/supernode.go +++ b/op-supernode/supernode/supernode.go @@ -81,7 +81,8 @@ func New(ctx context.Context, log gethlog.Logger, version string, requestStop co log.Error("missing virtual node config for chain", "chain", id) continue } - s.chains[chainID] = cc.NewChainContainer(chainID, vnCfgs[chainID], log, *cfg, initOverrides, nil, s.rpcRouter.SetHandler, s.metricsFanIn.SetMetricsRegistry) + container := cc.NewChainContainer(chainID, vnCfgs[chainID], log, *cfg, initOverrides, nil, s.rpcRouter.SetHandler, s.metricsFanIn.SetMetricsRegistry) + s.chains[chainID] = container } // Initialize fixed activities @@ -100,6 +101,12 @@ func New(ctx context.Context, log gethlog.Logger, version string, requestStop co } } + // Set up reset callbacks on all chain containers + // When a chain resets, notify all activities + for _, chain := range s.chains { + chain.SetResetCallback(s.onChainReset) + } + // Set up http server addr := net.JoinHostPort(cfg.RPCConfig.ListenAddr, strconv.Itoa(cfg.RPCConfig.ListenPort)) s.httpServer = httputil.NewHTTPServer(addr, s.rpcRouter) @@ -221,6 +228,18 @@ func (s *Supernode) Stop(ctx context.Context) error { return nil } +// onChainReset is called when a chain container resets to a given timestamp. +// It notifies all activities about the reset so they can clean up cached state. +func (s *Supernode) onChainReset(chainID eth.ChainID, timestamp uint64) { + s.log.Info("chain reset detected, notifying activities", + "chainID", chainID, + "timestamp", timestamp, + ) + for _, a := range s.activities { + a.ResetOn(chainID, timestamp) + } +} + func (s *Supernode) Stopped() bool { return s.stopped } // RPCAddr returns the bound RPC address (host:port) if the server is listening. diff --git a/op-supernode/supernode/supernode_activities_test.go b/op-supernode/supernode/supernode_activities_test.go index 0af07a547ffa8..ded2a032386a2 100644 --- a/op-supernode/supernode/supernode_activities_test.go +++ b/op-supernode/supernode/supernode_activities_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/ethereum-optimism/optimism/op-service/eth" rpc "github.com/ethereum-optimism/optimism/op-service/rpc" "github.com/ethereum-optimism/optimism/op-supernode/supernode/activity" gethlog "github.com/ethereum/go-ethereum/log" @@ -26,7 +27,8 @@ func (m *mockRunnable) Start(ctx context.Context) error { <-ctx.Done() return ctx.Err() } -func (m *mockRunnable) Stop(ctx context.Context) error { m.stopped++; return nil } +func (m *mockRunnable) Stop(ctx context.Context) error { m.stopped++; return nil } +func (m *mockRunnable) ResetOn(chainID eth.ChainID, timestamp uint64) {} // ensure it satisfies both Activity and RunnableActivity var _ activity.Activity = (*mockRunnable)(nil) @@ -35,6 +37,8 @@ var _ activity.RunnableActivity = (*mockRunnable)(nil) // plain marker-only activity type plainActivity struct{} +func (p *plainActivity) ResetOn(chainID eth.ChainID, timestamp uint64) {} + var _ activity.Activity = (*plainActivity)(nil) // Start is implemented, but no Stop, so this is not runnable @@ -47,8 +51,9 @@ func (s *rpcSvc) Echo(_ context.Context) (string, error) { return "ok", nil } type rpcAct struct{} -func (a *rpcAct) RPCNamespace() string { return "act" } -func (a *rpcAct) RPCService() interface{} { return &rpcSvc{} } +func (a *rpcAct) RPCNamespace() string { return "act" } +func (a *rpcAct) RPCService() interface{} { return &rpcSvc{} } +func (a *rpcAct) ResetOn(chainID eth.ChainID, timestamp uint64) {} var _ activity.Activity = (*rpcAct)(nil) var _ activity.RPCActivity = (*rpcAct)(nil) From 8b099323b5b4f6237719b86a7148818aed51916b Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Thu, 5 Feb 2026 14:41:52 -0600 Subject: [PATCH 07/23] op-supernode: add SetResetCallback to ChainContainer Add reset callback mechanism to ChainContainer so it can notify the supernode when a chain rewinds due to block invalidation. - Add ResetCallback type and onReset field to simpleChainContainer - Add SetResetCallback to ChainContainer interface - Call onReset after successful RewindEngine in InvalidateBlock - Update test mocks to implement SetResetCallback --- .../supernode/chain_container/chain_container.go | 11 +++++++++++ .../supernode/chain_container/chain_container_test.go | 2 ++ .../supernode/chain_container/invalidation.go | 10 ++++++++++ 3 files changed, 23 insertions(+) diff --git a/op-supernode/supernode/chain_container/chain_container.go b/op-supernode/supernode/chain_container/chain_container.go index 48719cbe3de54..d38be7aa0cd1b 100644 --- a/op-supernode/supernode/chain_container/chain_container.go +++ b/op-supernode/supernode/chain_container/chain_container.go @@ -56,10 +56,17 @@ type ChainContainer interface { InvalidateBlock(ctx context.Context, height uint64, payloadHash common.Hash) (bool, error) // IsDenied checks if a block hash is on the deny list at the given height. IsDenied(height uint64, payloadHash common.Hash) (bool, error) + // SetResetCallback sets a callback that is invoked when the chain resets. + // The supernode uses this to notify activities about chain resets. + SetResetCallback(cb ResetCallback) } type virtualNodeFactory func(cfg *opnodecfg.Config, log gethlog.Logger, initOverrides *rollupNode.InitializationOverrides, appVersion string) virtual_node.VirtualNode +// ResetCallback is called when the chain container resets to a given timestamp. +// The supernode uses this to notify activities about the reset. +type ResetCallback func(chainID eth.ChainID, timestamp uint64) + type simpleChainContainer struct { vn virtual_node.VirtualNode vncfg *opnodecfg.Config @@ -79,10 +86,12 @@ type simpleChainContainer struct { virtualNodeFactory virtualNodeFactory // Factory function to create virtual node (for testing) rollupClient *sources.RollupClient // In-proc rollup RPC client bound to rpcHandler verifiers []activity.VerificationActivity + onReset ResetCallback // Called when chain resets to notify activities } // Interface conformance assertions var _ ChainContainer = (*simpleChainContainer)(nil) +var _ rollupNode.SuperAuthority = (*simpleChainContainer)(nil) func NewChainContainer( chainID eth.ChainID, @@ -182,6 +191,8 @@ func (c *simpleChainContainer) Start(ctx context.Context) error { c.addMetricsRegistry(c.chainID.String(), reg) } } + // Pass the chain container as SuperAuthority for payload denylist checks + c.initOverload.SuperAuthority = c } c.vn = c.virtualNodeFactory(c.vncfg, c.log, c.initOverload, c.appVersion) if c.pause.Load() { diff --git a/op-supernode/supernode/chain_container/chain_container_test.go b/op-supernode/supernode/chain_container/chain_container_test.go index 3f00cbfcc4018..a8d148d14ffba 100644 --- a/op-supernode/supernode/chain_container/chain_container_test.go +++ b/op-supernode/supernode/chain_container/chain_container_test.go @@ -167,6 +167,8 @@ func (m *mockVerificationActivity) VerifiedAtTimestamp(ts uint64) (bool, error) return m.verifiedAtTimestampResult, m.verifiedAtTimestampErr } +func (m *mockVerificationActivity) ResetOn(chainID eth.ChainID, timestamp uint64) {} + // Test helpers func createTestVNConfig() *opnodecfg.Config { return &opnodecfg.Config{ diff --git a/op-supernode/supernode/chain_container/invalidation.go b/op-supernode/supernode/chain_container/invalidation.go index b36646c35ecbb..8925fdd945eb4 100644 --- a/op-supernode/supernode/chain_container/invalidation.go +++ b/op-supernode/supernode/chain_container/invalidation.go @@ -204,9 +204,19 @@ func (c *simpleChainContainer) InvalidateBlock(ctx context.Context, height uint6 "rewindToTimestamp", priorTimestamp, ) + // Notify activities about the reset + if c.onReset != nil { + c.onReset(c.chainID, priorTimestamp) + } + return true, nil } +// SetResetCallback sets a callback that is invoked when the chain resets. +func (c *simpleChainContainer) SetResetCallback(cb ResetCallback) { + c.onReset = cb +} + // blockNumberToTimestamp converts a block number to its timestamp using rollup config. func (c *simpleChainContainer) blockNumberToTimestamp(blockNum uint64) uint64 { if c.vncfg == nil { From 9d8ae515ac040f867f38533db397810af176ab55 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Thu, 5 Feb 2026 14:42:01 -0600 Subject: [PATCH 08/23] op-supernode: implement ResetOn in Interop activity When a chain rewinds, the Interop activity must clean up its cached state: - Rewind logsDB for the chain to before the reset timestamp - Rewind verifiedDB to remove entries at or after reset timestamp - Log error if verified results were deleted (unexpected) Also adds: - Rewind/Clear methods to LogsDB interface - RewindTo method to VerifiedDB - noopInvalidator for calling logsDB.Rewind - Update test mocks for new interface methods --- .../supernode/activity/interop/algo_test.go | 5 +- .../supernode/activity/interop/interop.go | 94 +++++++++++++++++++ .../activity/interop/interop_test.go | 6 +- .../supernode/activity/interop/logdb.go | 15 +++ .../supernode/activity/interop/logdb_test.go | 8 +- .../supernode/activity/interop/verified_db.go | 46 +++++++++ 6 files changed, 169 insertions(+), 5 deletions(-) diff --git a/op-supernode/supernode/activity/interop/algo_test.go b/op-supernode/supernode/activity/interop/algo_test.go index fd03926b3a31d..c5ea078287076 100644 --- a/op-supernode/supernode/activity/interop/algo_test.go +++ b/op-supernode/supernode/activity/interop/algo_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/reads" suptypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -537,7 +538,9 @@ func (m *algoMockLogsDB) AddLog(logHash common.Hash, parentBlock eth.BlockID, lo func (m *algoMockLogsDB) SealBlock(parentHash common.Hash, block eth.BlockID, timestamp uint64) error { return nil } -func (m *algoMockLogsDB) Close() error { return nil } +func (m *algoMockLogsDB) Rewind(inv reads.Invalidator, newHead eth.BlockID) error { return nil } +func (m *algoMockLogsDB) Clear(inv reads.Invalidator) error { return nil } +func (m *algoMockLogsDB) Close() error { return nil } var _ LogsDB = (*algoMockLogsDB)(nil) diff --git a/op-supernode/supernode/activity/interop/interop.go b/op-supernode/supernode/activity/interop/interop.go index ab3e560ca6668..cdf54fb0d3569 100644 --- a/op-supernode/supernode/activity/interop/interop.go +++ b/op-supernode/supernode/activity/interop/interop.go @@ -390,3 +390,97 @@ func (i *Interop) VerifiedAtTimestamp(ts uint64) (bool, error) { } return i.verifiedDB.Has(ts) } + +// ResetOn is called when a chain container resets to a given timestamp. +// It clears the logsDB for that chain and removes any verified results at or after the timestamp. +func (i *Interop) ResetOn(chainID eth.ChainID, timestamp uint64) { + i.mu.Lock() + defer i.mu.Unlock() + + i.log.Warn("ResetOn called", + "chainID", chainID, + "timestamp", timestamp, + ) + + // Reset the logsDB for this chain + if db, ok := i.logsDBs[chainID]; ok { + // Find the block just before the reset timestamp + chain, chainOk := i.chains[chainID] + if !chainOk { + i.log.Error("chain not found for reset", "chainID", chainID) + return + } + + // Get the block time to calculate the previous block's timestamp + blockTime := chain.BlockTime() + if timestamp > blockTime { + prevTimestamp := timestamp - blockTime + // Try to get the block at the previous timestamp to use as rewind target + prevBlock, err := chain.BlockAtTimestamp(i.ctx, prevTimestamp, eth.Safe) + if err == nil { + i.log.Info("rewinding logsDB to previous block", + "chainID", chainID, + "newHead", prevBlock.ID(), + ) + if err := db.Rewind(&noopInvalidator{}, prevBlock.ID()); err != nil { + i.log.Error("failed to rewind logsDB", + "chainID", chainID, + "err", err, + ) + } + } else { + // If we can't get the previous block, clear the entire logsDB + i.log.Warn("could not get previous block, clearing logsDB", + "chainID", chainID, + "prevTimestamp", prevTimestamp, + "err", err, + ) + if err := db.Clear(&noopInvalidator{}); err != nil { + i.log.Error("failed to clear logsDB", + "chainID", chainID, + "err", err, + ) + } + } + } else { + // If timestamp is at or before blockTime, clear the entire logsDB + i.log.Info("clearing logsDB (reset timestamp at or before first block)", + "chainID", chainID, + ) + if err := db.Clear(&noopInvalidator{}); err != nil { + i.log.Error("failed to clear logsDB", + "chainID", chainID, + "err", err, + ) + } + } + } else { + i.log.Warn("no logsDB found for chain", "chainID", chainID) + } + + // Remove any verified results at or after the timestamp + if i.verifiedDB != nil { + deleted, err := i.verifiedDB.RewindTo(timestamp) + if err != nil { + i.log.Error("failed to rewind verifiedDB", + "timestamp", timestamp, + "err", err, + ) + } + if deleted { + // This is unexpected - we shouldn't have verified results at timestamps + // that are being reset. Log an error for visibility. + i.log.Error("UNEXPECTED: verified results were deleted on reset", + "chainID", chainID, + "timestamp", timestamp, + ) + } else { + i.log.Info("verifiedDB rewound (no results deleted)", + "timestamp", timestamp, + ) + } + } + + // Reset the currentL1 to force re-evaluation + i.currentL1 = eth.BlockID{} +} diff --git a/op-supernode/supernode/activity/interop/interop_test.go b/op-supernode/supernode/activity/interop/interop_test.go index bcbb4281cbb01..909cec82231bb 100644 --- a/op-supernode/supernode/activity/interop/interop_test.go +++ b/op-supernode/supernode/activity/interop/interop_test.go @@ -11,6 +11,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supernode/supernode/activity" cc "github.com/ethereum-optimism/optimism/op-supernode/supernode/chain_container" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/reads" suptypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" @@ -898,6 +899,7 @@ func (m *mockChainContainer) InvalidateBlock(ctx context.Context, height uint64, func (m *mockChainContainer) IsDenied(height uint64, payloadHash common.Hash) (bool, error) { return false, nil } +func (m *mockChainContainer) SetResetCallback(cb cc.ResetCallback) {} var _ cc.ChainContainer = (*mockChainContainer)(nil) @@ -940,6 +942,8 @@ func (m *mockLogsDBForInterop) AddLog(logHash common.Hash, parentBlock eth.Block func (m *mockLogsDBForInterop) SealBlock(parentHash common.Hash, block eth.BlockID, timestamp uint64) error { return nil } -func (m *mockLogsDBForInterop) Close() error { return nil } +func (m *mockLogsDBForInterop) Rewind(inv reads.Invalidator, newHead eth.BlockID) error { return nil } +func (m *mockLogsDBForInterop) Clear(inv reads.Invalidator) error { return nil } +func (m *mockLogsDBForInterop) Close() error { return nil } var _ LogsDB = (*mockLogsDBForInterop)(nil) diff --git a/op-supernode/supernode/activity/interop/logdb.go b/op-supernode/supernode/activity/interop/logdb.go index 07c501734764c..b5b3799b34f12 100644 --- a/op-supernode/supernode/activity/interop/logdb.go +++ b/op-supernode/supernode/activity/interop/logdb.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/processors" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/reads" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -34,6 +35,10 @@ type LogsDB interface { AddLog(logHash common.Hash, parentBlock eth.BlockID, logIdx uint32, execMsg *types.ExecutingMessage) error // SealBlock seals a block in the database. SealBlock(parentHash common.Hash, block eth.BlockID, timestamp uint64) error + // Rewind removes all blocks after newHead from the database. + Rewind(inv reads.Invalidator, newHead eth.BlockID) error + // Clear removes all data from the database. + Clear(inv reads.Invalidator) error // Close closes the database. Close() error } @@ -47,6 +52,16 @@ type noopLogsDBMetrics struct{} func (n *noopLogsDBMetrics) RecordDBEntryCount(kind string, count int64) {} func (n *noopLogsDBMetrics) RecordDBSearchEntriesRead(count int64) {} +// noopInvalidator implements reads.Invalidator as a no-op. +// Used for rewind operations where we don't need cache invalidation. +type noopInvalidator struct{} + +func (n *noopInvalidator) TryInvalidate(rule reads.InvalidationRule) (release func(), err error) { + return func() {}, nil +} + +var _ reads.Invalidator = (*noopInvalidator)(nil) + // openLogsDB opens a logs.DB for the given chain in the data directory. func openLogsDB(logger log.Logger, chainID eth.ChainID, dataDir string) (LogsDB, error) { chainDir := filepath.Join(dataDir, fmt.Sprintf("chain-%s", chainID)) diff --git a/op-supernode/supernode/activity/interop/logdb_test.go b/op-supernode/supernode/activity/interop/logdb_test.go index f0bed03f027f8..5087668eafedf 100644 --- a/op-supernode/supernode/activity/interop/logdb_test.go +++ b/op-supernode/supernode/activity/interop/logdb_test.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supernode/supernode/activity" cc "github.com/ethereum-optimism/optimism/op-supernode/supernode/chain_container" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/reads" suptypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -548,9 +549,9 @@ func (m *mockLogsDB) SealBlock(parentHash common.Hash, block eth.BlockID, timest return m.sealBlockErr } -func (m *mockLogsDB) Close() error { - return nil -} +func (m *mockLogsDB) Rewind(inv reads.Invalidator, newHead eth.BlockID) error { return nil } +func (m *mockLogsDB) Clear(inv reads.Invalidator) error { return nil } +func (m *mockLogsDB) Close() error { return nil } var _ LogsDB = (*mockLogsDB)(nil) @@ -602,5 +603,6 @@ func (m *statefulMockChainContainer) InvalidateBlock(ctx context.Context, height func (m *statefulMockChainContainer) IsDenied(height uint64, payloadHash common.Hash) (bool, error) { return false, nil } +func (m *statefulMockChainContainer) SetResetCallback(cb cc.ResetCallback) {} var _ cc.ChainContainer = (*statefulMockChainContainer)(nil) diff --git a/op-supernode/supernode/activity/interop/verified_db.go b/op-supernode/supernode/activity/interop/verified_db.go index 9e7e03c4e486e..0f1fba04c020b 100644 --- a/op-supernode/supernode/activity/interop/verified_db.go +++ b/op-supernode/supernode/activity/interop/verified_db.go @@ -181,6 +181,52 @@ func (v *VerifiedDB) LastTimestamp() (uint64, bool) { return v.lastTimestamp, v.initialized } +// RewindTo removes all verified results at or after the given timestamp. +// Returns true if any results were deleted, false otherwise. +func (v *VerifiedDB) RewindTo(timestamp uint64) (bool, error) { + var deleted bool + + err := v.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(bucketName) + c := b.Cursor() + + // Start from the timestamp and delete all entries at or after it + startKey := timestampToKey(timestamp) + for k, _ := c.Seek(startKey); k != nil; k, _ = c.Next() { + if err := b.Delete(k); err != nil { + return err + } + deleted = true + } + return nil + }) + if err != nil { + return false, fmt.Errorf("failed to rewind verifiedDB: %w", err) + } + + // Update state + if deleted { + // Reinitialize lastTimestamp from the database + if err := v.initLastTimestamp(); err != nil { + return deleted, fmt.Errorf("failed to reinitialize lastTimestamp after rewind: %w", err) + } + // If no timestamps remain, reset initialized state + if err := v.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(bucketName) + c := b.Cursor() + if k, _ := c.First(); k == nil { + v.initialized = false + v.lastTimestamp = 0 + } + return nil + }); err != nil { + return deleted, err + } + } + + return deleted, nil +} + // Close closes the database. func (v *VerifiedDB) Close() error { return v.db.Close() From c44538e92fd54e37e8a87aaeacd3202cf0b372ad Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Thu, 5 Feb 2026 14:42:08 -0600 Subject: [PATCH 09/23] op-acceptance-tests: add replacement block assertions Add assertions to verify block replacement behavior: - Replacement block hash differs from invalid block hash - Invalid transaction is NOT present in replacement block These checks confirm the deposits-only replacement mechanism works correctly after an invalid executing message is detected. --- .../invalid_message_replacement_test.go | 148 ++++++++++++++++-- 1 file changed, 133 insertions(+), 15 deletions(-) diff --git a/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go b/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go index 0b2a60ff3d797..a611ffaea4daa 100644 --- a/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go +++ b/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go @@ -17,16 +17,15 @@ import ( "github.com/ethereum-optimism/optimism/op-service/txintent" ) -// TestSupernodeInteropInvalidMessageReset tests that: +// TestSupernodeInteropInvalidMessageReplacement tests that: // WHEN: an invalid Executing Message is included in a chain // THEN: // - The interop activity detects the invalid block // - The chain container is told to invalidate the block // - A reset/rewind is triggered if the chain is using that block -// -// Note: This test observes reset behavior. Full block replacement requires -// re-derivation which is a separate mechanism. -func TestSupernodeInteropInvalidMessageReset(gt *testing.T) { +// - A replacement block is built at the same height (deposits-only) +// - The replacement block's timestamp eventually becomes verified +func TestSupernodeInteropInvalidMessageReplacement(gt *testing.T) { t := devtest.SerialT(gt) sys := presets.NewTwoL2SupernodeInterop(t, 0) @@ -79,7 +78,7 @@ func TestSupernodeInteropInvalidMessageReset(gt *testing.T) { // Send an INVALID executing message on chain B // Modify the message identifier to make it invalid (wrong log index) - invalidExecReceipt := sendInvalidExecMessageForReset(t, bob, initTx, 0) + invalidExecReceipt := sendInvalidExecMessageForReplacement(t, bob, initTx, 0) invalidBlockNumber := bigs.Uint64Strict(invalidExecReceipt.BlockNumber) invalidBlockHash := invalidExecReceipt.BlockHash @@ -163,22 +162,141 @@ func TestSupernodeInteropInvalidMessageReset(gt *testing.T) { t.Require().True(resetDetected, "reset should be triggered when invalid block is detected") - // ASSERTION: The invalid block's timestamp should NOT be verified - // (because the reset means the block is no longer valid) - finalResp, err := snClient.SuperRootAtTimestamp(ctx, invalidBlockTimestamp) - t.Require().NoError(err) - t.Require().Nil(finalResp.Data, - "invalid block timestamp should not be verified after reset") + t.Logger().Info("reset confirmed, now waiting for replacement block", + "invalid_block_number", invalidBlockNumber, + "invalid_block_hash", invalidBlockHash, + ) + + // PHASE 2: Wait for a replacement block to appear at the same height + // After rewind, the derivation pipeline should rebuild the block with deposits-only + var replacementBlockHash eth.BlockID + var replacementDetected bool + + replacementTimeout := 60 * time.Second + replacementStart := time.Now() + + for time.Since(replacementStart) < replacementTimeout { + time.Sleep(checkInterval) + + // Try to get the block at the invalid block number + currentBlock, err := sys.L2ELB.Escape().EthClient().BlockRefByNumber(ctx, invalidBlockNumber) + if err != nil { + t.Logger().Debug("waiting for replacement block", + "elapsed", time.Since(replacementStart).Round(time.Second), + "err", err, + ) + continue + } + + // Check if we got a different block than the invalid one + if currentBlock.Hash != invalidBlockHash { + replacementBlockHash = currentBlock.ID() + replacementDetected = true + t.Logger().Info("REPLACEMENT DETECTED! New block at same height", + "block_number", invalidBlockNumber, + "old_hash", invalidBlockHash, + "new_hash", currentBlock.Hash, + ) + break + } + + t.Logger().Debug("block exists but still has invalid hash (waiting)", + "elapsed", time.Since(replacementStart).Round(time.Second), + "hash", currentBlock.Hash, + ) + } + + // ASSERTION: Replacement block should have been created + t.Require().True(replacementDetected, + "replacement block should be created at the same height after invalidation") + t.Require().NotEqual(invalidBlockHash, replacementBlockHash.Hash, + "replacement block should have different hash than invalid block") + + t.Logger().Info("replacement block confirmed, verifying it differs from original", + "replacement_hash", replacementBlockHash.Hash, + ) + + // ASSERTION: The replacement block is different than the original + // Fetch the replacement block with its transactions + replacementBlockInfo, replacementTxs, err := sys.L2ELB.Escape().EthClient().InfoAndTxsByNumber(ctx, invalidBlockNumber) + t.Require().NoError(err, "failed to fetch replacement block") + + t.Require().NotEqual(invalidBlockHash, replacementBlockInfo.Hash(), + "replacement block hash must differ from invalid block hash") + t.Logger().Info("confirmed replacement block differs from original", + "original_hash", invalidBlockHash, + "replacement_hash", replacementBlockInfo.Hash(), + ) + + // ASSERTION: The invalid transaction no longer exists in the chain + // The invalid exec message transaction should NOT be in the replacement block + invalidTxHash := invalidExecReceipt.TxHash + txInReplacementBlock := false + for _, tx := range replacementTxs { + if tx.Hash() == invalidTxHash { + txInReplacementBlock = true + break + } + } + t.Require().False(txInReplacementBlock, + "invalid transaction should NOT exist in replacement block") + + // Also verify the transaction receipt is no longer available at that block + // (the tx may have been re-included in a later block, but not at the same height) + t.Logger().Info("confirmed invalid transaction not in replacement block", + "invalid_tx_hash", invalidTxHash, + "replacement_block_tx_count", len(replacementTxs), + ) + + t.Logger().Info("replacement block validated, waiting for verification", + "replacement_hash", replacementBlockHash.Hash, + ) + + // PHASE 3: Wait for the replacement block's timestamp to become verified + var verified bool + verificationTimeout := 60 * time.Second + verificationStart := time.Now() + + for time.Since(verificationStart) < verificationTimeout { + time.Sleep(checkInterval) + + resp, err := snClient.SuperRootAtTimestamp(ctx, invalidBlockTimestamp) + if err != nil { + t.Logger().Debug("waiting for verification", + "elapsed", time.Since(verificationStart).Round(time.Second), + "err", err, + ) + continue + } + + if resp.Data != nil { + verified = true + t.Logger().Info("VERIFIED! Timestamp now verified with replacement block", + "timestamp", invalidBlockTimestamp, + "super_root", resp.Data.SuperRoot, + ) + break + } + + t.Logger().Debug("timestamp not yet verified", + "elapsed", time.Since(verificationStart).Round(time.Second), + ) + } + + // ASSERTION: The replacement block's timestamp should eventually be verified + t.Require().True(verified, + "replacement block timestamp should become verified") - t.Logger().Info("test complete: reset was triggered for invalid block", + t.Logger().Info("test complete: invalid block was replaced and verified", "invalid_block_number", invalidBlockNumber, "invalid_block_hash", invalidBlockHash, + "replacement_block_hash", replacementBlockHash.Hash, ) } -// sendInvalidExecMessageForReset sends an executing message with a modified (invalid) identifier. +// sendInvalidExecMessageForReplacement sends an executing message with a modified (invalid) identifier. // This makes the message invalid because it references a non-existent log index. -func sendInvalidExecMessageForReset( +func sendInvalidExecMessageForReplacement( t devtest.T, bob *dsl.EOA, initIntent *txintent.IntentTx[*txintent.InitTrigger, *txintent.InteropOutput], From c4690af94853deeac8a5114995cb0740975a127a Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Fri, 6 Feb 2026 14:36:40 -0600 Subject: [PATCH 10/23] fill in missing unit tests Add comprehensive unit tests for block invalidation sub-features: - TestDenyList_ConcurrentAccess: Verify concurrent Add/Contains operations - TestInvalidateBlock: Test invalidateBlock wiring from Interop to ChainContainer - TestResetOn: Test Interop activity reset behavior on chain rewind - TestVerifiedDB_RewindTo: Test VerifiedDB rewind functionality - TestSuperAuthority_*: Test payload denial in op-node engine controller Also includes BlockInvalidation_Feature.md diary documenting all sub-features. --- BlockInvalidation_Feature.md | 453 ++++++++++++++++++ .../rollup/engine/engine_controller_test.go | 147 ++++++ .../activity/interop/interop_test.go | 305 +++++++++++- .../activity/interop/verified_db_test.go | 157 ++++++ .../chain_container/invalidation_test.go | 71 +++ 5 files changed, 1129 insertions(+), 4 deletions(-) create mode 100644 BlockInvalidation_Feature.md diff --git a/BlockInvalidation_Feature.md b/BlockInvalidation_Feature.md new file mode 100644 index 0000000000000..96eea0cbad215 --- /dev/null +++ b/BlockInvalidation_Feature.md @@ -0,0 +1,453 @@ +# BlockInvalidation Feature Diary + +## Feature Overview +**Feature Name:** Block Invalidation & Replacement +**Branch:** `supernode/BlockInvalidation` +**Developer:** Axel Kingsley +**Started:** 2026-02-06 + +### Purpose +Implement a block invalidation mechanism in the op-supernode that: +1. Persists invalid payload hashes in a DenyList (keyed by block height) +2. Triggers chain rewinds when the current chain uses an invalidated block +3. Notifies activities (especially Interop) to clean up cached state on reset +4. Integrates with op-node to deny payloads before insertion and trigger deposits-only replacement + +--- + +## Diary of Interactions + +### Session 1 (Retroactive) — Prior to Skill Adoption + +**Context:** Development began before the op-feature skill was adopted. The following commits were created through iterative prompts. + +### Session 2 — Skill Adopted + +**Prompt:** Developer provided op-feature skill prompt and asked me to adopt it. + +**Action:** Created this diary file. Developer granted standing permission to update diary without approval. + +### Session 2.1 — Sub-Feature Review for PR + +**Prompt:** Developer requested detailed breakdown of Sub-Feature 1 for peer review. + +**Action:** Generated comprehensive report (see Sub-Feature 1 section below). + +--- + +## Current State + +### Commits on Branch (9 total) +``` +31ea9484c6 op-acceptance-tests: add replacement block assertions +90db9cf396 op-supernode: implement ResetOn in Interop activity +dbbbc6568c op-supernode: add SetResetCallback to ChainContainer +c920528378 op-supernode: add ResetOn method to Activity interface +539e46aef0 op-node: add SuperAuthority interface for payload denial +227537f99a Fix block invalidation: use eth.Unsafe label and improve test resilience +9a63b131e9 Wire up block invalidation from interop activity to chain container +ae4358d202 op-supernode: Add unit tests for block invalidation +425bbb2d9a op-supernode: Add block invalidation and deny list to chain container +``` + +### Test Coverage Summary + +| Component | Test File | Status | +|-----------|-----------|--------| +| DenyList | `invalidation_test.go` | ✅ Implemented | +| InvalidateBlock | `invalidation_test.go` | ✅ Implemented | +| IsDenied | `invalidation_test.go` | ✅ Implemented | +| VerifiedDB.RewindTo | `verified_db_test.go` | ❌ Not yet tested | +| Interop.ResetOn | `interop_test.go` | ❌ Not yet tested | +| SuperAuthority denial | - | ❌ Not yet tested | +| Acceptance (Halt) | `invalid_message_halt_test.go` | ✅ Exists | +| Acceptance (Replace) | `invalid_message_replacement_test.go` | ✅ Exists | + +--- + +# Sub-Feature Breakdowns + +## Sub-Feature 1: DenyList & InvalidateBlock + +### Commits + +| SHA | Message | Files Changed | +|-----|---------|---------------| +| `425bbb2d9a` | op-supernode: Add block invalidation and deny list to chain container | 3 files, +471 | +| `ae4358d202` | op-supernode: Add unit tests for block invalidation | 6 files, +538 | + +### Purpose + +Provide a persistent mechanism to track invalid block payload hashes and trigger chain rewinds when the current chain uses an invalidated block. + +**Why this exists:** When the Interop activity detects an invalid cross-chain executing message, it needs to: +1. Remember that block is invalid (so it's never re-applied) +2. Trigger a rewind if the chain is currently using that block + +### Specification + +#### DenyList +A bbolt-backed key-value store that persists invalid payload hashes keyed by block height. + +| Method | Signature | Behavior | +|--------|-----------|----------| +| `OpenDenyList` | `(dataDir string) (*DenyList, error)` | Opens/creates DB, ensures bucket exists, creates parent dirs | +| `Add` | `(height uint64, payloadHash Hash) error` | Appends hash to height's entry. Idempotent (no duplicates) | +| `Contains` | `(height uint64, payloadHash Hash) (bool, error)` | Returns true if hash exists at height | +| `GetDeniedHashes` | `(height uint64) ([]Hash, error)` | Returns all hashes at height | +| `Close` | `() error` | Closes bbolt DB | + +**Storage format:** +- Key: `uint64` height as 8-byte big-endian +- Value: Concatenated 32-byte hashes + +#### InvalidateBlock +Added to `ChainContainer` interface. + +```go +InvalidateBlock(ctx context.Context, height uint64, payloadHash common.Hash) (bool, error) +``` + +**Behavior:** +1. Add hash to DenyList +2. If engine available, check if current block at `height` matches `payloadHash` +3. If match → call `RewindEngine(ctx, priorTimestamp)` → return `true` +4. If no match → return `false` (no rewind needed) + +#### IsDenied +Helper method on `ChainContainer`: +```go +IsDenied(height uint64, payloadHash common.Hash) (bool, error) +``` +Delegates to `denyList.Contains`. + +### Test Coverage + +#### `TestDenyList_AddAndContains` — 4 subcases + +| Subcase | Setup | Assertion | +|---------|-------|-----------| +| `single hash at height` | Add 1 hash at height 100 | `Contains(100, hash)` → `true` | +| `multiple hashes same height` | Add 3 hashes at height 50 | All 3 return `true` from `Contains` | +| `hash at wrong height returns false` | Add hash at height 10 | `Contains(11, hash)` → `false`, `Contains(10, hash)` → `true` | +| `duplicate add is idempotent` | Add same hash 3 times | `GetDeniedHashes` returns exactly 1 entry | + +#### `TestDenyList_Persistence` — 2 subcases + +| Subcase | Setup | Assertion | +|---------|-------|-----------| +| `survives close and reopen` | Add 4 hashes, close DB | Reopen → all 4 hashes present, correct counts | +| `empty DB on fresh open` | (none) | `Contains` → `false`, `GetDeniedHashes` → empty | + +#### `TestDenyList_GetDeniedHashes` — 3 subcases + +| Subcase | Setup | Assertion | +|---------|-------|-----------| +| `returns all hashes at height` | Add 5 hashes at height 100 | `GetDeniedHashes(100)` returns 5 | +| `empty for clean height` | Add at heights 10, 30 | `GetDeniedHashes(20)` → empty | +| `isolated by height` | Add 2 at h10, 3 at h20, 1 at h30 | Correct counts at each height | + +#### `TestInvalidateBlock` — 4 subcases + +| Subcase | Config | Assertion | +|---------|--------|-----------| +| `current block matches triggers rewind` | currentHash == payloadHash | `rewound=true`, `RewindToTimestamp` called with correct ts | +| `current block differs no rewind` | currentHash ≠ payloadHash | `rewound=false`, no rewind call | +| `engine unavailable adds to denylist only` | engine=nil | `rewound=false`, hash still in denylist | +| `rewind to height-1 timestamp calculated correctly` | height=10 | Rewind ts = `genesis + (9 * blockTime)` | + +#### `TestIsDenied` — 3 subcases + +| Subcase | Setup | Assertion | +|---------|-------|-----------| +| `denied block returns true` | Add hash at height 100 | `IsDenied(100, hash)` → `true` | +| `non-denied returns false` | Add hash at height 100 | `IsDenied(100, differentHash)` → `false` | +| `wrong height returns false` | Add hash at height 10 | `IsDenied(11, sameHash)` → `false` | + +#### `TestDenyList_ConcurrentAccess` — 1 case + +| Subcase | Setup | Assertion | +|---------|-------|-----------| +| 10 concurrent accessors | 10 goroutines, 100 ops each | All writes succeed, all reads find own hashes, no errors | + +### Untested Behavior + +| Behavior | Why Untested | +|----------|--------------| +| Corrupt bbolt file recovery | Out of scope | +| Very large number of hashes at single height | Performance not tested | + +### Specified Behavior (Clarified) + +| Behavior | Status | +|----------|--------| +| `height=0` | ✅ Works fine, acceptable | +| `RewindEngine` fails | ✅ Hash remains in denylist — **intended behavior** | +| Concurrent access | ✅ Now tested (see `TestDenyList_ConcurrentAccess`) + +### Code Locations + +| Component | File | Lines | +|-----------|------|-------| +| DenyList struct | `chain_container/invalidation.go` | 23-28 | +| OpenDenyList | `chain_container/invalidation.go` | 30-52 | +| Add | `chain_container/invalidation.go` | 62-91 | +| Contains | `chain_container/invalidation.go` | 94-119 | +| GetDeniedHashes | `chain_container/invalidation.go` | 122-143 | +| InvalidateBlock | `chain_container/invalidation.go` | 149-207 | +| IsDenied | `chain_container/invalidation.go` | 218-223 | +| Tests | `chain_container/invalidation_test.go` | 1-514 | + +--- + +## Sub-Feature 2: Wire Interop → ChainContainer + +### Commits + +| SHA | Message | Files Changed | +|-----|---------|---------------| +| `9a63b131e9` | Wire up block invalidation from interop activity | 2 files | +| `227537f99a` | Fix block invalidation: use eth.Unsafe label | 2 files | + +### Purpose + +Connect the Interop activity's invalid message detection to the ChainContainer's invalidation mechanism. + +**Flow:** +1. Interop detects invalid executing message in `verifyInteropMessages` +2. `handleResult` sees `InvalidHeads` in the result +3. Calls `invalidateBlock(chainID, blockID)` for each invalid head +4. `invalidateBlock` calls `chain.InvalidateBlock(ctx, blockNum, hash)` + +### Specification + +```go +func (i *Interop) invalidateBlock(chainID eth.ChainID, blockID eth.BlockID) error +``` + +| Scenario | Behavior | +|----------|----------| +| Chain not found | Return error `"chain %s not found"` | +| Chain.InvalidateBlock errors | Log error, return error | +| Chain.InvalidateBlock returns `rewound=true` | Log warn "chain rewound" | +| Chain.InvalidateBlock returns `rewound=false` | Log info "block added to denylist" | + +### Test Coverage + +#### `TestInvalidateBlock` (interop_test.go) — 4 subcases + +| Subcase | Setup | Assertion | +|---------|-------|-----------| +| `calls chain.InvalidateBlock with correct args` | Call invalidateBlock(chainID, blockID) | mock tracks height=500, hash=0xBAD | +| `returns error when chain not found` | Call with unknown chainID | Error contains "not found", no mock calls | +| `returns error when chain.InvalidateBlock fails` | mock returns error | Error propagated | +| `handleResult calls invalidateBlock for each invalid head` | Result with 2 InvalidHeads | Both mocks have 1 call each with correct args | + +### Code Locations + +| Component | File | Lines | +|-----------|------|-------| +| invalidateBlock | `activity/interop/interop.go` | 293-322 | +| Tests | `activity/interop/interop_test.go` | TestInvalidateBlock | + +## Sub-Feature 3: SuperAuthority Injection + +### Commits + +| SHA | Message | Files Changed | +|-----|---------|---------------| +| `539e46aef0` | op-node: add SuperAuthority interface for payload denial | 8 files | + +### Purpose + +Allow the `op-supernode` to inject a "SuperAuthority" into the `op-node` engine controller. This authority can deny payloads before they are inserted, triggering deposits-only replacement during Holocene derivation. + +**Flow:** +1. `ChainContainer` implements `SuperAuthority.IsDenied(blockNumber, hash)` +2. Passed via `InitializationOverrides` when creating `VirtualNode` +3. `EngineController` calls `IsDenied` before `NewPayload` +4. If denied + Holocene + derived → request deposits-only replacement +5. If denied otherwise → emit `PayloadInvalidEvent` + +### Specification + +```go +type SuperAuthority interface { + IsDenied(blockNumber uint64, payloadHash common.Hash) (bool, error) +} +``` + +| Scenario | Behavior | +|----------|----------| +| IsDenied returns `(true, nil)` | Payload rejected, replacement requested (Holocene) or invalid event | +| IsDenied returns `(false, nil)` | Payload proceeds to engine | +| IsDenied returns `(_, error)` | Log warning, proceed with payload (graceful degradation) | +| SuperAuthority is nil | No check, proceed with payload | + +### Test Coverage + +#### `TestSuperAuthority_*` (engine_controller_test.go) — 4 tests + +| Test | Setup | Assertion | +|------|-------|-----------| +| `DeniedPayload_EmitsInvalidEvent` | sa.DenyBlock for payload | PayloadInvalidEvent emitted, NewPayload NOT called | +| `AllowedPayload_Proceeds` | sa empty (no deny) | NewPayload called, PayloadSuccessEvent emitted | +| `Error_ProceedsWithPayload` | sa.shouldError = true | NewPayload called despite error, PayloadSuccessEvent emitted | +| `NilAuthority_Proceeds` | sa = nil | NewPayload called, PayloadSuccessEvent emitted | + +### Code Locations + +| Component | File | Lines | +|-----------|------|-------| +| SuperAuthority interface | `op-node/rollup/engine/engine_controller.go` | 96-104 | +| IsDenied check | `op-node/rollup/engine/payload_process.go` | 31-58 | +| InitializationOverrides | `op-node/node/node.go` | InitializationOverrides struct | +| Tests | `op-node/rollup/engine/engine_controller_test.go` | TestSuperAuthority_* | + +## Sub-Feature 4: Activity Reset Notification Chain + +### Commits + +| SHA | Message | Files Changed | +|-----|---------|---------------| +| `c920528378` | op-supernode: add ResetOn method to Activity interface | 6 files | +| `dbbbc6568c` | op-supernode: add SetResetCallback to ChainContainer | 3 files | +| `90db9cf396` | op-supernode: implement ResetOn in Interop activity | 6 files | + +### Purpose + +When a chain container rewinds due to block invalidation, all activities must be notified so they can clean up cached state. The Interop activity specifically must rewind its `logsDB` and `verifiedDB`. + +**Flow:** +1. `ChainContainer.InvalidateBlock` triggers a rewind +2. After successful rewind, calls `onReset(chainID, timestamp)` callback +3. `Supernode.onChainReset` receives the notification +4. Iterates through all activities, calling `ResetOn(chainID, timestamp)` +5. Interop: rewinds logsDB and verifiedDB +6. Heartbeat/Superroot: no-op (no cached state) + +### Specification + +```go +// Activity interface +ResetOn(chainID eth.ChainID, timestamp uint64) + +// ChainContainer interface +SetResetCallback(cb ResetCallback) + +// VerifiedDB +RewindTo(timestamp uint64) (deleted bool, err error) +``` + +| Scenario | Behavior | +|----------|----------| +| Previous block available | logsDB.Rewind(prevBlockID) | +| Previous block not found | logsDB.Clear() | +| timestamp ≤ blockTime | logsDB.Clear() | +| Verified results deleted | Log ERROR (unexpected) | + +### Test Coverage + +#### `TestVerifiedDB_RewindTo` (verified_db_test.go) — 4 subcases + +| Subcase | Setup | Assertion | +|---------|-------|-----------| +| `removes entries at and after timestamp` | Commit 100-105, RewindTo(103) | 100-102 exist, 103-105 gone, lastTs=102 | +| `returns false when no entries deleted` | Commit 98-100, RewindTo(200) | All exist, deleted=false | +| `rewind all entries` | Commit 100-102, RewindTo(0) | All gone, lastTs uninitialized | +| `allows sequential commits after rewind` | Commit 100-105, RewindTo(103), Commit 103 | New 103 data readable | + +#### `TestResetOn` (interop_test.go) — 6 subcases + +| Subcase | Setup | Assertion | +|---------|-------|-----------| +| `rewinds logsDB when previous block available` | mock returns valid block | logsDB.Rewind called with prevBlockID | +| `clears logsDB when previous block not available` | mock returns error | logsDB.Clear called | +| `clears logsDB when timestamp at or before blockTime` | timestamp=1, blockTime=1 | logsDB.Clear called | +| `rewinds verifiedDB` | Commit 98-102, ResetOn(100) | 98-99 exist, 100-102 gone | +| `resets currentL1` | currentL1={500, 0xL1} | currentL1 = {} after reset | +| `handles unknown chain gracefully` | ResetOn(unknownChain, 100) | No panic | + +### Code Locations + +| Component | File | Lines | +|-----------|------|-------| +| ResetOn (Activity interface) | `activity/activity.go` | 4-8 | +| SetResetCallback | `chain_container/chain_container.go` | SetResetCallback method | +| onReset callback | `chain_container/invalidation.go` | In InvalidateBlock | +| Supernode.onChainReset | `supernode/supernode.go` | onChainReset method | +| Interop.ResetOn | `activity/interop/interop.go` | 388-480 | +| VerifiedDB.RewindTo | `activity/interop/verified_db.go` | 184-220 | +| Tests | verified_db_test.go, interop_test.go | TestVerifiedDB_RewindTo, TestResetOn | + +## Sub-Feature 5: Acceptance Tests + +### Commits + +| SHA | Message | Files Changed | +|-----|---------|---------------| +| `31ea9484c6` | op-acceptance-tests: add replacement block assertions | 1 file | + +### Purpose + +End-to-end test verifying the complete block invalidation and replacement flow works in a full supernode environment. + +**Test Flow:** +1. Start supernode with interop chains +2. Send cross-chain message that will be invalid +3. Wait for message to be included in a block +4. Interop activity detects invalid message +5. ChainContainer invalidates block, triggers rewind +6. Activities are notified via ResetOn +7. op-node derives replacement block (deposits-only) +8. New block at same height has different hash +9. Invalid transaction is NOT in replacement block +10. Timestamp eventually becomes verified + +### Test: `TestSupernodeInteropInvalidMessageReplacement` + +**Location:** `op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go` + +### Phases + +| Phase | What is verified | +|-------|------------------| +| 1. Setup | Supernode running, chains synced | +| 2. Send invalid message | Cross-chain exec message sent, receipt obtained | +| 3. Observe reset | Block at invalid height changes or disappears | +| 4. Detect replacement | New block at same height with different hash | +| 5. Verify replacement | Replacement hash ≠ invalid hash, invalid tx not in replacement | +| 6. Verify timestamp | `SuperRootAtTimestamp` returns verified data | + +### Assertions + +| Assertion | Purpose | +|-----------|---------| +| `resetDetected = true` | Rewind was triggered | +| `replacementDetected = true` | New block created at same height | +| `replacementHash ≠ invalidHash` | Block was actually replaced | +| `invalidTx NOT in replacement` | Invalid transaction removed | +| `verified = true` | Replacement passes verification | + +### Code Location + +| Component | File | +|-----------|------| +| Test | `op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go` | + +--- + +## Test Summary + +All unit tests added for missing coverage: + +| Test File | Tests Added | +|-----------|-------------| +| `chain_container/invalidation_test.go` | `TestDenyList_ConcurrentAccess` | +| `activity/interop/interop_test.go` | `TestInvalidateBlock` (4 cases), `TestResetOn` (6 cases) | +| `activity/interop/verified_db_test.go` | `TestVerifiedDB_RewindTo` (4 cases) | +| `op-node/rollup/engine/engine_controller_test.go` | `TestSuperAuthority_*` (4 tests) | + +## Next Steps + +Ready to commit all tests as "fill in missing unit tests" commit. diff --git a/op-node/rollup/engine/engine_controller_test.go b/op-node/rollup/engine/engine_controller_test.go index 03db21584f9b7..b6a0ce73670ff 100644 --- a/op-node/rollup/engine/engine_controller_test.go +++ b/op-node/rollup/engine/engine_controller_test.go @@ -2,6 +2,7 @@ package engine import ( "context" + "fmt" "math/big" mrand "math/rand" "testing" @@ -212,3 +213,149 @@ func TestInvalidPayloadForNonHead_NoDrop(t *testing.T) { } // note: nil-envelope behavior is not tested to match current implementation + +// ============================================================================= +// SuperAuthority Tests +// ============================================================================= + +// mockSuperAuthority implements SuperAuthority for testing. +type mockSuperAuthority struct { + deniedBlocks map[uint64]common.Hash + shouldError bool +} + +func newMockSuperAuthority() *mockSuperAuthority { + return &mockSuperAuthority{ + deniedBlocks: make(map[uint64]common.Hash), + } +} + +func (m *mockSuperAuthority) DenyBlock(blockNumber uint64, hash common.Hash) { + m.deniedBlocks[blockNumber] = hash +} + +func (m *mockSuperAuthority) IsDenied(blockNumber uint64, payloadHash common.Hash) (bool, error) { + if m.shouldError { + return false, fmt.Errorf("superauthority check failed") + } + deniedHash, exists := m.deniedBlocks[blockNumber] + if exists && deniedHash == payloadHash { + return true, nil + } + return false, nil +} + +func TestSuperAuthority_DeniedPayload_EmitsInvalidEvent(t *testing.T) { + cfg, _, _, payloadA1 := buildSimpleCfgAndPayload(t) + + emitter := &testutils.MockEmitter{} + sa := newMockSuperAuthority() + // Deny the payload + sa.DenyBlock(uint64(payloadA1.ExecutionPayload.BlockNumber), payloadA1.ExecutionPayload.BlockHash) + + ec := NewEngineController(context.Background(), nil, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{}, false, &testutils.MockL1Source{}, emitter, sa) + + // Expect PayloadInvalidEvent to be emitted (use type match since event has dynamic fields) + emitter.ExpectOnceType("PayloadInvalidEvent") + + // Trigger payload processing + blockRef := eth.L2BlockRef{ + Hash: payloadA1.ExecutionPayload.BlockHash, + Number: uint64(payloadA1.ExecutionPayload.BlockNumber), + ParentHash: payloadA1.ExecutionPayload.ParentHash, + Time: uint64(payloadA1.ExecutionPayload.Timestamp), + } + ec.onPayloadProcess(context.Background(), PayloadProcessEvent{ + Envelope: payloadA1, + Ref: blockRef, + }) + + emitter.AssertExpectations(t) +} + +func TestSuperAuthority_AllowedPayload_Proceeds(t *testing.T) { + cfg, _, _, payloadA1 := buildSimpleCfgAndPayload(t) + + emitter := &testutils.MockEmitter{} + mockEngine := &testutils.MockEngine{} + sa := newMockSuperAuthority() + // Do NOT deny the payload + + ec := NewEngineController(context.Background(), mockEngine, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{}, false, &testutils.MockL1Source{}, emitter, sa) + + // Expect NewPayload to be called (payload is allowed) + mockEngine.ExpectNewPayload(payloadA1.ExecutionPayload, nil, ð.PayloadStatusV1{Status: eth.ExecutionValid}, nil) + // Expect success event + emitter.ExpectOnceType("PayloadSuccessEvent") + + blockRef := eth.L2BlockRef{ + Hash: payloadA1.ExecutionPayload.BlockHash, + Number: uint64(payloadA1.ExecutionPayload.BlockNumber), + ParentHash: payloadA1.ExecutionPayload.ParentHash, + Time: uint64(payloadA1.ExecutionPayload.Timestamp), + } + ec.onPayloadProcess(context.Background(), PayloadProcessEvent{ + Envelope: payloadA1, + Ref: blockRef, + }) + + mockEngine.AssertExpectations(t) + emitter.AssertExpectations(t) +} + +func TestSuperAuthority_Error_ProceedsWithPayload(t *testing.T) { + cfg, _, _, payloadA1 := buildSimpleCfgAndPayload(t) + + emitter := &testutils.MockEmitter{} + mockEngine := &testutils.MockEngine{} + sa := newMockSuperAuthority() + sa.shouldError = true // Simulate check failure + + ec := NewEngineController(context.Background(), mockEngine, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{}, false, &testutils.MockL1Source{}, emitter, sa) + + // Despite error, expect NewPayload to be called (graceful degradation) + mockEngine.ExpectNewPayload(payloadA1.ExecutionPayload, nil, ð.PayloadStatusV1{Status: eth.ExecutionValid}, nil) + emitter.ExpectOnceType("PayloadSuccessEvent") + + blockRef := eth.L2BlockRef{ + Hash: payloadA1.ExecutionPayload.BlockHash, + Number: uint64(payloadA1.ExecutionPayload.BlockNumber), + ParentHash: payloadA1.ExecutionPayload.ParentHash, + Time: uint64(payloadA1.ExecutionPayload.Timestamp), + } + ec.onPayloadProcess(context.Background(), PayloadProcessEvent{ + Envelope: payloadA1, + Ref: blockRef, + }) + + mockEngine.AssertExpectations(t) + emitter.AssertExpectations(t) +} + +func TestSuperAuthority_NilAuthority_Proceeds(t *testing.T) { + cfg, _, _, payloadA1 := buildSimpleCfgAndPayload(t) + + emitter := &testutils.MockEmitter{} + mockEngine := &testutils.MockEngine{} + + // nil SuperAuthority + ec := NewEngineController(context.Background(), mockEngine, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{}, false, &testutils.MockL1Source{}, emitter, nil) + + // Expect NewPayload to be called (no authority check) + mockEngine.ExpectNewPayload(payloadA1.ExecutionPayload, nil, ð.PayloadStatusV1{Status: eth.ExecutionValid}, nil) + emitter.ExpectOnceType("PayloadSuccessEvent") + + blockRef := eth.L2BlockRef{ + Hash: payloadA1.ExecutionPayload.BlockHash, + Number: uint64(payloadA1.ExecutionPayload.BlockNumber), + ParentHash: payloadA1.ExecutionPayload.ParentHash, + Time: uint64(payloadA1.ExecutionPayload.Timestamp), + } + ec.onPayloadProcess(context.Background(), PayloadProcessEvent{ + Envelope: payloadA1, + Ref: blockRef, + }) + + mockEngine.AssertExpectations(t) + emitter.AssertExpectations(t) +} diff --git a/op-supernode/supernode/activity/interop/interop_test.go b/op-supernode/supernode/activity/interop/interop_test.go index 909cec82231bb..9ddc12f32695c 100644 --- a/op-supernode/supernode/activity/interop/interop_test.go +++ b/op-supernode/supernode/activity/interop/interop_test.go @@ -584,6 +584,114 @@ func TestHandleResult(t *testing.T) { }) } +// ============================================================================= +// TestInvalidateBlock +// ============================================================================= + +// TestInvalidateBlock verifies the invalidateBlock method correctly calls +// ChainContainer.InvalidateBlock with the right parameters and handles errors. +func TestInvalidateBlock(t *testing.T) { + t.Parallel() + + t.Run("calls chain.InvalidateBlock with correct args", func(t *testing.T) { + t.Parallel() + dataDir := t.TempDir() + + mock := newMockChainContainer(10) + chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} + interop := New(testLogger(), 1000, chains, dataDir) + require.NotNil(t, interop) + interop.ctx = context.Background() + + blockID := eth.BlockID{Number: 500, Hash: common.HexToHash("0xBAD")} + err := interop.invalidateBlock(mock.id, blockID) + require.NoError(t, err) + + // Verify InvalidateBlock was called with correct arguments + require.Len(t, mock.invalidateBlockCalls, 1) + require.Equal(t, uint64(500), mock.invalidateBlockCalls[0].height) + require.Equal(t, common.HexToHash("0xBAD"), mock.invalidateBlockCalls[0].payloadHash) + }) + + t.Run("returns error when chain not found", func(t *testing.T) { + t.Parallel() + dataDir := t.TempDir() + + mock := newMockChainContainer(10) + chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} + interop := New(testLogger(), 1000, chains, dataDir) + require.NotNil(t, interop) + interop.ctx = context.Background() + + // Try to invalidate on a chain that doesn't exist + unknownChain := eth.ChainIDFromUInt64(999) + blockID := eth.BlockID{Number: 500, Hash: common.HexToHash("0xBAD")} + err := interop.invalidateBlock(unknownChain, blockID) + + require.Error(t, err) + require.Contains(t, err.Error(), "not found") + + // Verify InvalidateBlock was NOT called + require.Len(t, mock.invalidateBlockCalls, 0) + }) + + t.Run("returns error when chain.InvalidateBlock fails", func(t *testing.T) { + t.Parallel() + dataDir := t.TempDir() + + mock := newMockChainContainer(10) + mock.invalidateBlockErr = errors.New("engine failure") + chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} + interop := New(testLogger(), 1000, chains, dataDir) + require.NotNil(t, interop) + interop.ctx = context.Background() + + blockID := eth.BlockID{Number: 500, Hash: common.HexToHash("0xBAD")} + err := interop.invalidateBlock(mock.id, blockID) + + require.Error(t, err) + require.Contains(t, err.Error(), "engine failure") + }) + + t.Run("handleResult calls invalidateBlock for each invalid head", func(t *testing.T) { + t.Parallel() + dataDir := t.TempDir() + + mock1 := newMockChainContainer(10) + mock2 := newMockChainContainer(8453) + chains := map[eth.ChainID]cc.ChainContainer{mock1.id: mock1, mock2.id: mock2} + interop := New(testLogger(), 1000, chains, dataDir) + require.NotNil(t, interop) + interop.ctx = context.Background() + + // Create result with invalid heads on both chains + invalidResult := Result{ + Timestamp: 1000, + L1Head: eth.BlockID{Number: 100, Hash: common.HexToHash("0xL1")}, + L2Heads: map[eth.ChainID]eth.BlockID{ + mock1.id: {Number: 500, Hash: common.HexToHash("0xL2-1")}, + mock2.id: {Number: 600, Hash: common.HexToHash("0xL2-2")}, + }, + InvalidHeads: map[eth.ChainID]eth.BlockID{ + mock1.id: {Number: 500, Hash: common.HexToHash("0xBAD1")}, + mock2.id: {Number: 600, Hash: common.HexToHash("0xBAD2")}, + }, + } + + err := interop.handleResult(invalidResult) + require.NoError(t, err) + + // Verify both chains had InvalidateBlock called + require.Len(t, mock1.invalidateBlockCalls, 1) + require.Equal(t, uint64(500), mock1.invalidateBlockCalls[0].height) + require.Equal(t, common.HexToHash("0xBAD1"), mock1.invalidateBlockCalls[0].payloadHash) + + require.Len(t, mock2.invalidateBlockCalls, 1) + require.Equal(t, uint64(600), mock2.invalidateBlockCalls[0].height) + require.Equal(t, common.HexToHash("0xBAD2"), mock2.invalidateBlockCalls[0].payloadHash) + }) +} + // ============================================================================= // TestProgressAndRecord // ============================================================================= @@ -824,6 +932,16 @@ type mockChainContainer struct { lastRequestedTimestamp uint64 mu sync.Mutex + + // InvalidateBlock tracking + invalidateBlockCalls []invalidateBlockCall + invalidateBlockRet bool + invalidateBlockErr error +} + +type invalidateBlockCall struct { + height uint64 + payloadHash common.Hash } func newMockChainContainer(id uint64) *mockChainContainer { @@ -894,7 +1012,10 @@ func (m *mockChainContainer) RewindEngine(ctx context.Context, timestamp uint64) } func (m *mockChainContainer) BlockTime() uint64 { return 1 } func (m *mockChainContainer) InvalidateBlock(ctx context.Context, height uint64, payloadHash common.Hash) (bool, error) { - return false, nil + m.mu.Lock() + defer m.mu.Unlock() + m.invalidateBlockCalls = append(m.invalidateBlockCalls, invalidateBlockCall{height: height, payloadHash: payloadHash}) + return m.invalidateBlockRet, m.invalidateBlockErr } func (m *mockChainContainer) IsDenied(height uint64, payloadHash common.Hash) (bool, error) { return false, nil @@ -915,6 +1036,10 @@ type mockLogsDBForInterop struct { openBlockErr error containsSeal suptypes.BlockSeal containsErr error + + // Track calls for verification + rewindCalls []eth.BlockID + clearCalls int } func (m *mockLogsDBForInterop) LatestSealedBlock() (eth.BlockID, bool) { return eth.BlockID{}, false } @@ -942,8 +1067,180 @@ func (m *mockLogsDBForInterop) AddLog(logHash common.Hash, parentBlock eth.Block func (m *mockLogsDBForInterop) SealBlock(parentHash common.Hash, block eth.BlockID, timestamp uint64) error { return nil } -func (m *mockLogsDBForInterop) Rewind(inv reads.Invalidator, newHead eth.BlockID) error { return nil } -func (m *mockLogsDBForInterop) Clear(inv reads.Invalidator) error { return nil } -func (m *mockLogsDBForInterop) Close() error { return nil } +func (m *mockLogsDBForInterop) Rewind(inv reads.Invalidator, newHead eth.BlockID) error { + m.rewindCalls = append(m.rewindCalls, newHead) + return nil +} +func (m *mockLogsDBForInterop) Clear(inv reads.Invalidator) error { + m.clearCalls++ + return nil +} +func (m *mockLogsDBForInterop) Close() error { return nil } var _ LogsDB = (*mockLogsDBForInterop)(nil) + +// ============================================================================= +// TestResetOn +// ============================================================================= + +func TestResetOn(t *testing.T) { + t.Parallel() + + t.Run("rewinds logsDB when previous block available", func(t *testing.T) { + t.Parallel() + dataDir := t.TempDir() + + mock := newMockChainContainer(10) + // BlockAtTimestamp will return a valid block + mock.blockAtTimestamp = eth.L2BlockRef{ + Hash: common.HexToHash("0xPREV"), + Number: 99, + } + + mockLogsDB := &mockLogsDBForInterop{} + + chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} + interop := New(testLogger(), 1000, chains, dataDir) + require.NotNil(t, interop) + interop.ctx = context.Background() + interop.logsDBs[mock.id] = mockLogsDB + + // Reset at timestamp 100 (blockTime=1, so prev=99) + interop.ResetOn(mock.id, 100) + + // Verify logsDB.Rewind was called + require.Len(t, mockLogsDB.rewindCalls, 1) + require.Equal(t, uint64(99), mockLogsDB.rewindCalls[0].Number) + require.Equal(t, 0, mockLogsDB.clearCalls) + }) + + t.Run("clears logsDB when previous block not available", func(t *testing.T) { + t.Parallel() + dataDir := t.TempDir() + + mock := newMockChainContainer(10) + // BlockAtTimestamp returns error + mock.blockAtTimestampErr = errors.New("block not found") + + mockLogsDB := &mockLogsDBForInterop{} + + chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} + interop := New(testLogger(), 1000, chains, dataDir) + require.NotNil(t, interop) + interop.ctx = context.Background() + interop.logsDBs[mock.id] = mockLogsDB + + // Reset at timestamp 100 + interop.ResetOn(mock.id, 100) + + // Verify logsDB.Clear was called + require.Len(t, mockLogsDB.rewindCalls, 0) + require.Equal(t, 1, mockLogsDB.clearCalls) + }) + + t.Run("clears logsDB when timestamp at or before blockTime", func(t *testing.T) { + t.Parallel() + dataDir := t.TempDir() + + mock := newMockChainContainer(10) + mockLogsDB := &mockLogsDBForInterop{} + + chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} + interop := New(testLogger(), 1000, chains, dataDir) + require.NotNil(t, interop) + interop.ctx = context.Background() + interop.logsDBs[mock.id] = mockLogsDB + + // Reset at timestamp 1 (blockTime=1, so no previous block) + interop.ResetOn(mock.id, 1) + + // Verify logsDB.Clear was called + require.Len(t, mockLogsDB.rewindCalls, 0) + require.Equal(t, 1, mockLogsDB.clearCalls) + }) + + t.Run("rewinds verifiedDB", func(t *testing.T) { + t.Parallel() + dataDir := t.TempDir() + + mock := newMockChainContainer(10) + mock.blockAtTimestamp = eth.L2BlockRef{Number: 99} + + mockLogsDB := &mockLogsDBForInterop{} + + chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} + interop := New(testLogger(), 1000, chains, dataDir) + require.NotNil(t, interop) + interop.ctx = context.Background() + interop.logsDBs[mock.id] = mockLogsDB + + // Add some verified results + for ts := uint64(98); ts <= 102; ts++ { + err := interop.verifiedDB.Commit(VerifiedResult{ + Timestamp: ts, + L1Head: eth.BlockID{Number: ts}, + L2Heads: map[eth.ChainID]eth.BlockID{mock.id: {Number: ts}}, + }) + require.NoError(t, err) + } + + // Reset at timestamp 100 (should remove 100, 101, 102) + interop.ResetOn(mock.id, 100) + + // Verify results at 98, 99 still exist + has, _ := interop.verifiedDB.Has(98) + require.True(t, has) + has, _ = interop.verifiedDB.Has(99) + require.True(t, has) + + // Verify results at 100, 101, 102 are gone + has, _ = interop.verifiedDB.Has(100) + require.False(t, has) + has, _ = interop.verifiedDB.Has(101) + require.False(t, has) + has, _ = interop.verifiedDB.Has(102) + require.False(t, has) + }) + + t.Run("resets currentL1", func(t *testing.T) { + t.Parallel() + dataDir := t.TempDir() + + mock := newMockChainContainer(10) + mock.blockAtTimestamp = eth.L2BlockRef{Number: 99} + + mockLogsDB := &mockLogsDBForInterop{} + + chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} + interop := New(testLogger(), 1000, chains, dataDir) + require.NotNil(t, interop) + interop.ctx = context.Background() + interop.logsDBs[mock.id] = mockLogsDB + + // Set currentL1 to some value + interop.currentL1 = eth.BlockID{Number: 500, Hash: common.HexToHash("0xL1")} + + // Reset + interop.ResetOn(mock.id, 100) + + // Verify currentL1 is reset to zero + require.Equal(t, eth.BlockID{}, interop.currentL1) + }) + + t.Run("handles unknown chain gracefully", func(t *testing.T) { + t.Parallel() + dataDir := t.TempDir() + + mock := newMockChainContainer(10) + chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} + interop := New(testLogger(), 1000, chains, dataDir) + require.NotNil(t, interop) + interop.ctx = context.Background() + + // Reset on unknown chain (should not panic) + unknownChain := eth.ChainIDFromUInt64(999) + interop.ResetOn(unknownChain, 100) + + // Just verify it didn't panic + }) +} diff --git a/op-supernode/supernode/activity/interop/verified_db_test.go b/op-supernode/supernode/activity/interop/verified_db_test.go index 43885fd8d2eb0..48c9635c9fc3d 100644 --- a/op-supernode/supernode/activity/interop/verified_db_test.go +++ b/op-supernode/supernode/activity/interop/verified_db_test.go @@ -175,3 +175,160 @@ func TestVerifiedDB_Persistence(t *testing.T) { }) require.NoError(t, err) } + +func TestVerifiedDB_RewindTo(t *testing.T) { + t.Parallel() + + t.Run("removes entries at and after timestamp", func(t *testing.T) { + t.Parallel() + dataDir := t.TempDir() + + db, err := OpenVerifiedDB(dataDir) + require.NoError(t, err) + defer db.Close() + + chainID := eth.ChainIDFromUInt64(10) + + // Commit several timestamps + for ts := uint64(100); ts <= 105; ts++ { + err = db.Commit(VerifiedResult{ + Timestamp: ts, + L1Head: eth.BlockID{Hash: common.BytesToHash([]byte{byte(ts)}), Number: ts}, + L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.BytesToHash([]byte{byte(ts + 100)}), Number: ts}}, + }) + require.NoError(t, err) + } + + // Verify all exist + lastTs, _ := db.LastTimestamp() + require.Equal(t, uint64(105), lastTs) + + // Rewind to 103 (should remove 103, 104, 105) + deleted, err := db.RewindTo(103) + require.NoError(t, err) + require.True(t, deleted) + + // Verify 100, 101, 102 still exist + for ts := uint64(100); ts <= 102; ts++ { + has, err := db.Has(ts) + require.NoError(t, err) + require.True(t, has, "timestamp %d should still exist", ts) + } + + // Verify 103, 104, 105 are gone + for ts := uint64(103); ts <= 105; ts++ { + has, err := db.Has(ts) + require.NoError(t, err) + require.False(t, has, "timestamp %d should be deleted", ts) + } + + // Last timestamp should be updated to 102 + lastTs, _ = db.LastTimestamp() + require.Equal(t, uint64(102), lastTs) + }) + + t.Run("returns false when no entries deleted", func(t *testing.T) { + t.Parallel() + dataDir := t.TempDir() + + db, err := OpenVerifiedDB(dataDir) + require.NoError(t, err) + defer db.Close() + + chainID := eth.ChainIDFromUInt64(10) + + // Commit up to timestamp 100 + for ts := uint64(98); ts <= 100; ts++ { + err = db.Commit(VerifiedResult{ + Timestamp: ts, + L1Head: eth.BlockID{Hash: common.BytesToHash([]byte{byte(ts)}), Number: ts}, + L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.BytesToHash([]byte{byte(ts + 100)}), Number: ts}}, + }) + require.NoError(t, err) + } + + // Rewind to 200 (nothing to delete) + deleted, err := db.RewindTo(200) + require.NoError(t, err) + require.False(t, deleted) + + // All entries should still exist + lastTs, _ := db.LastTimestamp() + require.Equal(t, uint64(100), lastTs) + }) + + t.Run("rewind all entries", func(t *testing.T) { + t.Parallel() + dataDir := t.TempDir() + + db, err := OpenVerifiedDB(dataDir) + require.NoError(t, err) + defer db.Close() + + chainID := eth.ChainIDFromUInt64(10) + + // Commit a few entries + for ts := uint64(100); ts <= 102; ts++ { + err = db.Commit(VerifiedResult{ + Timestamp: ts, + L1Head: eth.BlockID{Hash: common.BytesToHash([]byte{byte(ts)}), Number: ts}, + L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.BytesToHash([]byte{byte(ts + 100)}), Number: ts}}, + }) + require.NoError(t, err) + } + + // Rewind to 0 (delete all) + deleted, err := db.RewindTo(0) + require.NoError(t, err) + require.True(t, deleted) + + // No entries should exist + for ts := uint64(100); ts <= 102; ts++ { + has, err := db.Has(ts) + require.NoError(t, err) + require.False(t, has) + } + + // Last timestamp should be reset to uninitialized + _, initialized := db.LastTimestamp() + require.False(t, initialized) + }) + + t.Run("allows sequential commits after rewind", func(t *testing.T) { + t.Parallel() + dataDir := t.TempDir() + + db, err := OpenVerifiedDB(dataDir) + require.NoError(t, err) + defer db.Close() + + chainID := eth.ChainIDFromUInt64(10) + + // Commit 100-105 + for ts := uint64(100); ts <= 105; ts++ { + err = db.Commit(VerifiedResult{ + Timestamp: ts, + L1Head: eth.BlockID{Hash: common.BytesToHash([]byte{byte(ts)}), Number: ts}, + L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.BytesToHash([]byte{byte(ts + 100)}), Number: ts}}, + }) + require.NoError(t, err) + } + + // Rewind to 103 + _, err = db.RewindTo(103) + require.NoError(t, err) + + // Should be able to commit 103 again (sequential from 102) + err = db.Commit(VerifiedResult{ + Timestamp: 103, + L1Head: eth.BlockID{Hash: common.HexToHash("0xNEW"), Number: 103}, + L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.HexToHash("0xNEW2"), Number: 103}}, + }) + require.NoError(t, err) + + // Verify new data + result, err := db.Get(103) + require.NoError(t, err) + require.Equal(t, common.HexToHash("0xNEW"), result.L1Head.Hash) + }) +} diff --git a/op-supernode/supernode/chain_container/invalidation_test.go b/op-supernode/supernode/chain_container/invalidation_test.go index eab587256a6c1..5cd70fa6ba28e 100644 --- a/op-supernode/supernode/chain_container/invalidation_test.go +++ b/op-supernode/supernode/chain_container/invalidation_test.go @@ -3,6 +3,7 @@ package chain_container import ( "context" "path/filepath" + "sync" "testing" opnodecfg "github.com/ethereum-optimism/optimism/op-node/config" @@ -512,3 +513,73 @@ func TestIsDenied(t *testing.T) { func testLogger() gethlog.Logger { return gethlog.New() } + +// TestDenyList_ConcurrentAccess verifies the DenyList is safe for concurrent use. +// 10 goroutines each perform 100 Add and Contains operations simultaneously. +func TestDenyList_ConcurrentAccess(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + dl, err := OpenDenyList(dir) + require.NoError(t, err) + defer dl.Close() + + const numAccessors = 10 + const opsPerAccessor = 100 + + // Helper to generate deterministic hash from accessor and op index + makeHash := func(accessorID, opIdx int) common.Hash { + var h common.Hash + h[0] = byte(accessorID) + h[1] = byte(opIdx) + h[2] = byte(opIdx >> 8) + return h + } + + // Each accessor writes to its own height range and reads from all ranges + var wg sync.WaitGroup + wg.Add(numAccessors) + + for i := 0; i < numAccessors; i++ { + go func(accessorID int) { + defer wg.Done() + + baseHeight := uint64(accessorID * opsPerAccessor) + + for j := 0; j < opsPerAccessor; j++ { + height := baseHeight + uint64(j) + hash := makeHash(accessorID, j) + + // Write + err := dl.Add(height, hash) + require.NoError(t, err) + + // Read own write + found, err := dl.Contains(height, hash) + require.NoError(t, err) + require.True(t, found, "accessor %d should find its own hash at height %d", accessorID, height) + + // Read from another accessor's range (may or may not exist yet) + otherAccessor := (accessorID + 1) % numAccessors + otherHeight := uint64(otherAccessor*opsPerAccessor) + uint64(j/2) + _, err = dl.Contains(otherHeight, common.Hash{}) + require.NoError(t, err) // Should not error even if not found + } + }(i) + } + + wg.Wait() + + // Verify final state: each accessor should have written opsPerAccessor hashes + for i := 0; i < numAccessors; i++ { + baseHeight := uint64(i * opsPerAccessor) + for j := 0; j < opsPerAccessor; j++ { + height := baseHeight + uint64(j) + hash := makeHash(i, j) + + found, err := dl.Contains(height, hash) + require.NoError(t, err) + require.True(t, found, "hash from accessor %d at height %d should exist after concurrent access", i, height) + } + } +} From 882d008b1cfa82f9b6f2fbdae72ea310f1f0ece9 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Fri, 6 Feb 2026 15:21:41 -0600 Subject: [PATCH 11/23] address review feedback: genesis block guard and docs - Add guard check rejecting InvalidateBlock for height=0 (genesis block) - Document SetResetCallback must only be called during initialization - Add test case for genesis block invalidation error --- .../supernode/chain_container/invalidation.go | 8 ++++++ .../chain_container/invalidation_test.go | 27 +++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/op-supernode/supernode/chain_container/invalidation.go b/op-supernode/supernode/chain_container/invalidation.go index 8925fdd945eb4..e66509b5c1ea2 100644 --- a/op-supernode/supernode/chain_container/invalidation.go +++ b/op-supernode/supernode/chain_container/invalidation.go @@ -151,11 +151,17 @@ func (d *DenyList) Close() error { // InvalidateBlock adds a block to the deny list and triggers a rewind if the chain // currently uses that block at the specified height. // Returns true if a rewind was triggered, false otherwise. +// Note: Genesis block (height=0) cannot be invalidated as there is no prior block to rewind to. func (c *simpleChainContainer) InvalidateBlock(ctx context.Context, height uint64, payloadHash common.Hash) (bool, error) { if c.denyList == nil { return false, fmt.Errorf("deny list not initialized") } + // Cannot invalidate genesis block - there is no prior block to rewind to + if height == 0 { + return false, fmt.Errorf("cannot invalidate genesis block (height=0)") + } + // Add to deny list first if err := c.denyList.Add(height, payloadHash); err != nil { return false, fmt.Errorf("failed to add block to deny list: %w", err) @@ -213,6 +219,8 @@ func (c *simpleChainContainer) InvalidateBlock(ctx context.Context, height uint6 } // SetResetCallback sets a callback that is invoked when the chain resets. +// This must only be called during initialization, before the chain container starts processing. +// Calling this while InvalidateBlock may be running is unsafe. func (c *simpleChainContainer) SetResetCallback(cb ResetCallback) { c.onReset = cb } diff --git a/op-supernode/supernode/chain_container/invalidation_test.go b/op-supernode/supernode/chain_container/invalidation_test.go index 5cd70fa6ba28e..6cb2c13afa910 100644 --- a/op-supernode/supernode/chain_container/invalidation_test.go +++ b/op-supernode/supernode/chain_container/invalidation_test.go @@ -395,6 +395,33 @@ func TestInvalidateBlock(t *testing.T) { }, } + // Separate test for genesis block (height=0) which should error + t.Run("genesis block invalidation returns error", func(t *testing.T) { + t.Parallel() + dir := t.TempDir() + + dl, err := OpenDenyList(filepath.Join(dir, "denylist")) + require.NoError(t, err) + defer dl.Close() + + c := &simpleChainContainer{ + denyList: dl, + log: testLogger(), + } + + ctx := context.Background() + rewound, err := c.InvalidateBlock(ctx, 0, common.HexToHash("0xgenesis")) + + require.Error(t, err) + require.Contains(t, err.Error(), "cannot invalidate genesis block") + require.False(t, rewound) + + // Genesis hash should NOT be added to denylist + found, err := dl.Contains(0, common.HexToHash("0xgenesis")) + require.NoError(t, err) + require.False(t, found, "genesis block should not be added to denylist") + }) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() From 4bb97f35c94fc72fd05cef9b293dddcdf2e318dc Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Fri, 6 Feb 2026 15:26:32 -0600 Subject: [PATCH 12/23] op-acceptance-tests: rename halt package to reorg, consolidate tests - Rename halt/ package to reorg/ (reflects actual behavior) - Move invalid_message_replacement_test.go into reorg package - Delete obsolete invalid_message_halt_test.go (superseded by reorg test) The halt test tested old behavior where invalid messages caused the chain to halt. With block invalidation, the chain now rewinds and replaces the invalid block with a deposits-only block. --- .../interop/halt/invalid_message_halt_test.go | 251 ------------------ .../interop/{halt => reorg}/init_test.go | 4 +- .../invalid_message_reorg_test.go} | 28 +- 3 files changed, 29 insertions(+), 254 deletions(-) delete mode 100644 op-acceptance-tests/tests/supernode/interop/halt/invalid_message_halt_test.go rename op-acceptance-tests/tests/supernode/interop/{halt => reorg}/init_test.go (71%) rename op-acceptance-tests/tests/supernode/interop/{invalid_message_replacement_test.go => reorg/invalid_message_reorg_test.go} (94%) diff --git a/op-acceptance-tests/tests/supernode/interop/halt/invalid_message_halt_test.go b/op-acceptance-tests/tests/supernode/interop/halt/invalid_message_halt_test.go deleted file mode 100644 index c6ae93bc1ff73..0000000000000 --- a/op-acceptance-tests/tests/supernode/interop/halt/invalid_message_halt_test.go +++ /dev/null @@ -1,251 +0,0 @@ -package halt - -import ( - "context" - "math/rand" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-service/bigs" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/testutils" - "github.com/ethereum-optimism/optimism/op-service/txintent" -) - -// TestSupernodeInteropInvalidMessageHalt tests that: -// WHEN: an invalid Executing Message is included in a chain -// THEN: -// - Validity Never Advances to include the Invalid Block -// - Local Safety and Unsafety for both chains continue to advance -// -// This is a TDD test that starts a cycle to implement the Interop Activity's actual algorithm. -func TestSupernodeInteropInvalidMessageHalt(gt *testing.T) { - t := devtest.SerialT(gt) - sys := presets.NewTwoL2SupernodeInterop(t, 0) - - ctx := t.Ctx() - snClient := sys.SuperNodeClient() - - // Create funded EOAs on both chains - alice := sys.FunderA.NewFundedEOA(eth.OneEther) - bob := sys.FunderB.NewFundedEOA(eth.OneEther) - - // Deploy event logger on chain A - eventLoggerA := alice.DeployEventLogger() - - // Sync chains - sys.L2B.CatchUpTo(sys.L2A) - sys.L2A.CatchUpTo(sys.L2B) - - rng := rand.New(rand.NewSource(12345)) - - // Send an initiating message on chain A - initTrigger := randomInitTrigger(rng, eventLoggerA, 2, 10) - initTx, initReceipt := alice.SendInitMessage(initTrigger) - - t.Logger().Info("initiating message sent on chain A", - "block", initReceipt.BlockNumber, - "hash", initReceipt.BlockHash, - ) - - // Wait for chain B to catch up - sys.L2B.WaitForBlock() - - // Record the verified timestamp before the invalid message - // We need to know what timestamp was verified before the invalid exec message - blockTime := sys.L2A.Escape().RollupConfig().BlockTime - genesisTime := sys.L2A.Escape().RollupConfig().Genesis.L2Time - - // Wait for some timestamps to be verified first - targetTimestamp := genesisTime + blockTime*2 - t.Require().Eventually(func() bool { - resp, err := snClient.SuperRootAtTimestamp(ctx, targetTimestamp) - if err != nil { - return false - } - t.Logger().Info("super root at timestamp", "timestamp", targetTimestamp, "data", resp.Data) - return resp.Data != nil - }, 60*time.Second, time.Second, "initial timestamps should be verified") - - t.Logger().Info("initial verification confirmed", "timestamp", targetTimestamp) - - // Send an INVALID executing message on chain B - // Modify the message identifier to make it invalid (wrong log index) - invalidExecReceipt := sendInvalidExecMessage(t, bob, initTx, 0) - - invalidBlockNumber := bigs.Uint64Strict(invalidExecReceipt.BlockNumber) - invalidBlock := sys.L2ELB.BlockRefByHash(invalidExecReceipt.BlockHash) - invalidBlockTimestamp := invalidBlock.Time - - t.Logger().Info("invalid executing message sent on chain B", - "block", invalidExecReceipt.BlockNumber, - "hash", invalidExecReceipt.BlockHash, - "timestamp", invalidBlockTimestamp, - ) - - // Record the safety status before waiting - initialStatusA := sys.L2ACL.SyncStatus() - initialStatusB := sys.L2BCL.SyncStatus() - - t.Logger().Info("initial safety status", - "chainA_local_safe", initialStatusA.LocalSafeL2.Number, - "chainA_unsafe", initialStatusA.UnsafeL2.Number, - "chainB_local_safe", initialStatusB.LocalSafeL2.Number, - "chainB_unsafe", initialStatusB.UnsafeL2.Number, - ) - - // Now we verify the key behaviors over time: - // 1. Validity should NEVER advance to include the invalid block - // 2. Local Safety and Unsafety should continue to advance for both chains - - observationDuration := 30 * time.Second - checkInterval := time.Second - - start := time.Now() - var lastVerifiedTimestamp uint64 - - for time.Since(start) < observationDuration { - time.Sleep(checkInterval) - - // Check current safety status - statusA := sys.L2ACL.SyncStatus() - statusB := sys.L2BCL.SyncStatus() - - // KEY ASSERTION 1: Validity should NOT advance past the invalid block's timestamp - // Check if the invalid block's timestamp has been verified (it should NOT be) - resp, err := snClient.SuperRootAtTimestamp(ctx, invalidBlockTimestamp) - t.Require().NoError(err, "SuperRootAtTimestamp should not error") - - if resp.Data != nil { - t.Logger().Error("UNEXPECTED: invalid block timestamp was verified!", - "timestamp", invalidBlockTimestamp, - "invalid_block", invalidBlockNumber, - ) - t.FailNow() - } - - // Track the last verified timestamp (for timestamps before the invalid block) - if invalidBlockTimestamp > blockTime { - checkTs := invalidBlockTimestamp - blockTime - checkResp, _ := snClient.SuperRootAtTimestamp(ctx, checkTs) - if checkResp.Data != nil { - lastVerifiedTimestamp = checkTs - } - } - - t.Logger().Info("observation tick", - "elapsed", time.Since(start).Round(time.Second), - "chainA_local_safe", statusA.LocalSafeL2.Number, - "chainA_unsafe", statusA.UnsafeL2.Number, - "chainB_local_safe", statusB.LocalSafeL2.Number, - "chainB_unsafe", statusB.UnsafeL2.Number, - "last_verified_ts", lastVerifiedTimestamp, - "invalid_block_ts", invalidBlockTimestamp, - ) - } - - // Final assertions after observation period - - finalStatusA := sys.L2ACL.SyncStatus() - finalStatusB := sys.L2BCL.SyncStatus() - - // ASSERTION: Local Safety should have advanced for both chains - t.Require().Greater(finalStatusA.LocalSafeL2.Number, initialStatusA.LocalSafeL2.Number, - "chain A local safe head should advance") - t.Require().Greater(finalStatusB.LocalSafeL2.Number, initialStatusB.LocalSafeL2.Number, - "chain B local safe head should advance") - - // ASSERTION: Unsafety should have advanced for both chains - t.Require().Greater(finalStatusA.UnsafeL2.Number, initialStatusA.UnsafeL2.Number, - "chain A unsafe head should advance") - t.Require().Greater(finalStatusB.UnsafeL2.Number, initialStatusB.UnsafeL2.Number, - "chain B unsafe head should advance") - - // ASSERTION: The invalid block's timestamp should still NOT be verified - finalResp, err := snClient.SuperRootAtTimestamp(ctx, invalidBlockTimestamp) - t.Require().NoError(err) - t.Require().Nil(finalResp.Data, - "invalid block timestamp should NEVER be verified") - - t.Logger().Info("test complete: invalid message correctly halted validity advancement", - "final_chainA_local_safe", finalStatusA.LocalSafeL2.Number, - "final_chainA_unsafe", finalStatusA.UnsafeL2.Number, - "final_chainB_local_safe", finalStatusB.LocalSafeL2.Number, - "final_chainB_unsafe", finalStatusB.UnsafeL2.Number, - "invalid_block_timestamp", invalidBlockTimestamp, - "last_verified_timestamp", lastVerifiedTimestamp, - ) -} - -// sendInvalidExecMessage sends an executing message with a modified (invalid) identifier. -// This makes the message invalid because it references a non-existent log index. -func sendInvalidExecMessage( - t devtest.T, - bob *dsl.EOA, - initIntent *txintent.IntentTx[*txintent.InitTrigger, *txintent.InteropOutput], - eventIdx int, -) *types.Receipt { - ctx := t.Ctx() - - // Evaluate the init result to get the message entries - result, err := initIntent.Result.Eval(ctx) - t.Require().NoError(err, "failed to evaluate init result") - t.Require().Greater(len(result.Entries), eventIdx, "event index out of range") - - // Get the message and modify it to be invalid - msg := result.Entries[eventIdx] - - // Make the message invalid by setting an impossible log index - // This creates a message that claims to reference a log that doesn't exist - msg.Identifier.LogIndex = 9999 - - // Create the exec trigger with the invalid message - execTrigger := &txintent.ExecTrigger{ - Executor: constants.CrossL2Inbox, - Msg: msg, - } - - // Create the intent with the invalid trigger - tx := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](bob.Plan()) - tx.Content.DependOn(&initIntent.Result) - tx.Content.Fn(func(ctx context.Context) (*txintent.ExecTrigger, error) { - return execTrigger, nil - }) - - receipt, err := tx.PlannedTx.Included.Eval(ctx) - t.Require().NoError(err, "invalid exec msg receipt not found") - t.Logger().Info("invalid exec message included", "chain", bob.ChainID(), "block", receipt.BlockNumber) - - return receipt -} - -// randomInitTrigger creates a random init trigger for testing. -func randomInitTrigger(rng *rand.Rand, eventLoggerAddress common.Address, topicCount, dataLen int) *txintent.InitTrigger { - if topicCount > 4 { - topicCount = 4 // Max 4 topics in EVM logs - } - if topicCount < 1 { - topicCount = 1 - } - if dataLen < 1 { - dataLen = 1 - } - - topics := make([][32]byte, topicCount) - for i := range topics { - copy(topics[i][:], testutils.RandomData(rng, 32)) - } - - return &txintent.InitTrigger{ - Emitter: eventLoggerAddress, - Topics: topics, - OpaqueData: testutils.RandomData(rng, dataLen), - } -} diff --git a/op-acceptance-tests/tests/supernode/interop/halt/init_test.go b/op-acceptance-tests/tests/supernode/interop/reorg/init_test.go similarity index 71% rename from op-acceptance-tests/tests/supernode/interop/halt/init_test.go rename to op-acceptance-tests/tests/supernode/interop/reorg/init_test.go index e93e0029d8021..8cc9d75999607 100644 --- a/op-acceptance-tests/tests/supernode/interop/halt/init_test.go +++ b/op-acceptance-tests/tests/supernode/interop/reorg/init_test.go @@ -1,4 +1,4 @@ -package halt +package reorg import ( "os" @@ -8,7 +8,7 @@ import ( ) // TestMain creates an isolated two-L2 setup with a shared supernode that has interop enabled. -// This package tests invalid message scenarios that would pollute other tests if run on a shared devnet. +// This package tests block invalidation and reorg scenarios that would pollute other tests if run on a shared devnet. func TestMain(m *testing.M) { _ = os.Setenv("DEVSTACK_L2CL_KIND", "supernode") presets.DoMain(m, presets.WithTwoL2SupernodeInterop(0)) diff --git a/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go b/op-acceptance-tests/tests/supernode/interop/reorg/invalid_message_reorg_test.go similarity index 94% rename from op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go rename to op-acceptance-tests/tests/supernode/interop/reorg/invalid_message_reorg_test.go index a611ffaea4daa..99adc3f2370c8 100644 --- a/op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go +++ b/op-acceptance-tests/tests/supernode/interop/reorg/invalid_message_reorg_test.go @@ -1,4 +1,4 @@ -package interop +package reorg import ( "context" @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" @@ -14,6 +15,7 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testutils" "github.com/ethereum-optimism/optimism/op-service/txintent" ) @@ -335,3 +337,27 @@ func sendInvalidExecMessageForReplacement( return receipt } + +// randomInitTrigger creates a random init trigger for testing. +func randomInitTrigger(rng *rand.Rand, eventLoggerAddress common.Address, topicCount, dataLen int) *txintent.InitTrigger { + if topicCount > 4 { + topicCount = 4 // Max 4 topics in EVM logs + } + if topicCount < 1 { + topicCount = 1 + } + if dataLen < 1 { + dataLen = 1 + } + + topics := make([][32]byte, topicCount) + for i := range topics { + copy(topics[i][:], testutils.RandomData(rng, 32)) + } + + return &txintent.InitTrigger{ + Emitter: eventLoggerAddress, + Topics: topics, + OpaqueData: testutils.RandomData(rng, dataLen), + } +} From 490412e8db5c2194151005bc5071b95a70783364 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Mon, 9 Feb 2026 11:34:47 -0600 Subject: [PATCH 13/23] sub-feature 6: interop test control (PauseInterop/ResumeInterop) Add test-only control methods for the interop activity to support precise timing control in acceptance tests. Specification: - PauseInterop(ts): When called, interop activity returns early when it would process the given timestamp, without making progress - ResumeInterop(): Clears the pause, allowing normal processing - Zero value indicates 'not paused' (always process all values) - Atomic read/write for concurrent safety Implementation layers: - Interop: pauseAtTimestamp atomic.Uint64 field + check in progressInterop() - Supernode: delegates to interop activity by type assertion - sysgo.SuperNode: exposes methods, delegates to supernode.Supernode - stack.InteropTestControl: interface defining the test control contract - Orchestrator: InteropTestControl(id) method to get control for a supernode - DSL Supernode: NewSupernodeWithTestControl() + wrapper methods - Preset: wires up test control in NewTwoL2SupernodeInterop() This enables acceptance tests to: sys.Supernode.PauseInterop(targetTimestamp + 1) // ... perform test setup ... sys.Supernode.ResumeInterop() --- BlockInvalidation_Feature.md | 41 ++++ .../reorg/invalid_message_reorg_test.go | 197 +++--------------- op-devstack/dsl/supernode.go | 30 ++- op-devstack/presets/twol2.go | 9 +- op-devstack/stack/supernode.go | 13 ++ op-devstack/sysgo/l2_cl_supernode.go | 20 ++ op-devstack/sysgo/orchestrator.go | 11 + .../supernode/activity/interop/interop.go | 28 +++ .../chain_container/invalidation_test.go | 58 +++--- op-supernode/supernode/supernode.go | 25 +++ 10 files changed, 234 insertions(+), 198 deletions(-) diff --git a/BlockInvalidation_Feature.md b/BlockInvalidation_Feature.md index 96eea0cbad215..c5f725cf45c5e 100644 --- a/BlockInvalidation_Feature.md +++ b/BlockInvalidation_Feature.md @@ -451,3 +451,44 @@ All unit tests added for missing coverage: ## Next Steps Ready to commit all tests as "fill in missing unit tests" commit. + +--- + +## Sub-Feature 6: Test Control for Interop Activity + +### Purpose +Provide integration test control for pausing and resuming the interop activity at specific timestamps. This allows acceptance tests to precisely control when interop validation occurs. + +### Specification +- `PauseInterop(ts uint64)`: When called, the interop activity pauses at the given timestamp - if it would process that timestamp in its progress loop, it returns early without making progress. +- `ResumeInterop()`: Clears the pause, allowing normal processing to continue. +- Zero value for `ts` indicates "not paused" (always process all values). +- Values are stored atomically for concurrent read/write safety. +- This is test-only functionality, not wired at production level. + +### Implementation + +| Component | Location | Changes | +|-----------|----------|---------| +| Interop Activity | `op-supernode/supernode/activity/interop/interop.go` | Added `pauseAtTimestamp atomic.Uint64` field, `PauseAt(ts)` and `Resume()` methods, check in `progressInterop()` | +| Supernode | `op-supernode/supernode/supernode.go` | Added `PauseInterop(ts)` and `ResumeInterop()` methods that delegate to interop activity | +| sysgo.SuperNode | `op-devstack/sysgo/l2_cl_supernode.go` | Added `PauseInterop(ts)` and `ResumeInterop()` methods | +| Stack Interface | `op-devstack/stack/supernode.go` | Added `InteropTestControl` interface | +| Orchestrator | `op-devstack/sysgo/orchestrator.go` | Added `InteropTestControl(id)` method to get test control for a supernode | +| DSL Supernode | `op-devstack/dsl/supernode.go` | Added `testControl` field, `NewSupernodeWithTestControl()` constructor, `PauseInterop(ts)` and `ResumeInterop()` methods | +| Preset | `op-devstack/presets/twol2.go` | Wire up `InteropTestControl` in `NewTwoL2SupernodeInterop()` | + +### Usage in Tests + +```go +// Pause interop at a specific timestamp +sys.Supernode.PauseInterop(targetTimestamp + 1) + +// ... perform test actions ... + +// Resume interop processing +sys.Supernode.ResumeInterop() +``` + +### Test Coverage +Test-only functionality - exercised through acceptance test usage. diff --git a/op-acceptance-tests/tests/supernode/interop/reorg/invalid_message_reorg_test.go b/op-acceptance-tests/tests/supernode/interop/reorg/invalid_message_reorg_test.go index 99adc3f2370c8..b2d787efa72a9 100644 --- a/op-acceptance-tests/tests/supernode/interop/reorg/invalid_message_reorg_test.go +++ b/op-acceptance-tests/tests/supernode/interop/reorg/invalid_message_reorg_test.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" "github.com/ethereum-optimism/optimism/op-devstack/devtest" @@ -67,6 +68,8 @@ func TestSupernodeInteropInvalidMessageReplacement(gt *testing.T) { // Wait for some timestamps to be verified first targetTimestamp := genesisTime + blockTime*2 + // set supernode to pause verification just after this timestamp + sys.Supernode.PauseInterop(targetTimestamp + 1) t.Require().Eventually(func() bool { resp, err := snClient.SuperRootAtTimestamp(ctx, targetTimestamp) if err != nil { @@ -81,7 +84,6 @@ func TestSupernodeInteropInvalidMessageReplacement(gt *testing.T) { // Send an INVALID executing message on chain B // Modify the message identifier to make it invalid (wrong log index) invalidExecReceipt := sendInvalidExecMessageForReplacement(t, bob, initTx, 0) - invalidBlockNumber := bigs.Uint64Strict(invalidExecReceipt.BlockNumber) invalidBlockHash := invalidExecReceipt.BlockHash invalidBlock := sys.L2ELB.BlockRefByHash(invalidExecReceipt.BlockHash) @@ -93,206 +95,67 @@ func TestSupernodeInteropInvalidMessageReplacement(gt *testing.T) { "timestamp", invalidBlockTimestamp, ) - // Observe for reset behavior: - // When the interop activity detects the invalid message and calls InvalidateBlock, - // it will trigger a rewind. We observe by watching for the unsafe head to go backwards - // or for the block at the invalid block number to change. - - observationDuration := 60 * time.Second - checkInterval := time.Second - - start := time.Now() - var resetDetected bool - - for time.Since(start) < observationDuration { - time.Sleep(checkInterval) - + // Observe the invalid block is locally safe on Chain B + require.Eventually(t, func() bool { + numSame := sys.L2BCL.SyncStatus().LocalSafeL2.Number == invalidBlockNumber + hashSame := sys.L2BCL.SyncStatus().LocalSafeL2.Hash == invalidBlockHash + return numSame && hashSame + }, 60*time.Second, time.Second, "invalid block should become locally safe") + + // Resume interop and observe reorg + // Interop activity will proceed and invalidate the block, triggering a rewind, and building a replacement block + // We observe resets and replacements, but only proceed on replacement (we may miss reset if it happens quickly) + sys.Supernode.ResumeInterop() + require.Eventually(t, func() bool { // Check if the block hash at the invalid block number changed or block doesn't exist // Use the EthClient directly to handle errors (block may not exist after rewind) currentBlock, err := sys.L2ELB.Escape().EthClient().BlockRefByNumber(ctx, invalidBlockNumber) if err != nil { // Block not found - this means the rewind happened and block was removed - resetDetected = true t.Logger().Info("RESET DETECTED! Block no longer exists (rewound)", "block_number", invalidBlockNumber, "err", err, ) } else if currentBlock.Hash != invalidBlockHash { // Block exists but with different hash - replaced - resetDetected = true t.Logger().Info("RESET DETECTED! Block hash changed", "block_number", invalidBlockNumber, "old_hash", invalidBlockHash, "new_hash", currentBlock.Hash, ) + return true } + return false + }, 60*time.Second, time.Second, "reset should be detected") - // Check verification status + // Wait for interop to proceed and verify the replacement block at the timestamp + require.Eventually(t, func() bool { resp, err := snClient.SuperRootAtTimestamp(ctx, invalidBlockTimestamp) if err != nil { - t.Logger().Info("SuperRootAtTimestamp error (may be resetting)", - "elapsed", time.Since(start).Round(time.Second), - "err", err, - ) - continue - } - - var currentHash string - if currentBlock.Hash != ([32]byte{}) { - currentHash = currentBlock.Hash.String()[:10] - } else { - currentHash = "(none)" - } - - t.Logger().Info("observation tick", - "elapsed", time.Since(start).Round(time.Second), - "invalid_block_ts", invalidBlockTimestamp, - "current_block_hash", currentHash, - "reset_detected", resetDetected, - "verified", resp.Data != nil, - ) - - // Exit early if we detect reset - if resetDetected { - t.Logger().Info("Reset behavior confirmed") - break - } - } - - // ASSERTION: Reset should have been detected - // (either unsafe head went backward or block hash changed) - t.Require().True(resetDetected, - "reset should be triggered when invalid block is detected") - - t.Logger().Info("reset confirmed, now waiting for replacement block", - "invalid_block_number", invalidBlockNumber, - "invalid_block_hash", invalidBlockHash, - ) - - // PHASE 2: Wait for a replacement block to appear at the same height - // After rewind, the derivation pipeline should rebuild the block with deposits-only - var replacementBlockHash eth.BlockID - var replacementDetected bool - - replacementTimeout := 60 * time.Second - replacementStart := time.Now() - - for time.Since(replacementStart) < replacementTimeout { - time.Sleep(checkInterval) - - // Try to get the block at the invalid block number - currentBlock, err := sys.L2ELB.Escape().EthClient().BlockRefByNumber(ctx, invalidBlockNumber) - if err != nil { - t.Logger().Debug("waiting for replacement block", - "elapsed", time.Since(replacementStart).Round(time.Second), - "err", err, - ) - continue - } - - // Check if we got a different block than the invalid one - if currentBlock.Hash != invalidBlockHash { - replacementBlockHash = currentBlock.ID() - replacementDetected = true - t.Logger().Info("REPLACEMENT DETECTED! New block at same height", - "block_number", invalidBlockNumber, - "old_hash", invalidBlockHash, - "new_hash", currentBlock.Hash, - ) - break + return false } - - t.Logger().Debug("block exists but still has invalid hash (waiting)", - "elapsed", time.Since(replacementStart).Round(time.Second), - "hash", currentBlock.Hash, - ) - } - - // ASSERTION: Replacement block should have been created - t.Require().True(replacementDetected, - "replacement block should be created at the same height after invalidation") - t.Require().NotEqual(invalidBlockHash, replacementBlockHash.Hash, - "replacement block should have different hash than invalid block") - - t.Logger().Info("replacement block confirmed, verifying it differs from original", - "replacement_hash", replacementBlockHash.Hash, - ) - - // ASSERTION: The replacement block is different than the original - // Fetch the replacement block with its transactions - replacementBlockInfo, replacementTxs, err := sys.L2ELB.Escape().EthClient().InfoAndTxsByNumber(ctx, invalidBlockNumber) - t.Require().NoError(err, "failed to fetch replacement block") - - t.Require().NotEqual(invalidBlockHash, replacementBlockInfo.Hash(), - "replacement block hash must differ from invalid block hash") - t.Logger().Info("confirmed replacement block differs from original", - "original_hash", invalidBlockHash, - "replacement_hash", replacementBlockInfo.Hash(), - ) + return resp.Data != nil + }, 60*time.Second, time.Second, "replacement should be verified") // ASSERTION: The invalid transaction no longer exists in the chain // The invalid exec message transaction should NOT be in the replacement block + replacementBlockInfo, replacementTxs, err := sys.L2ELB.Escape().EthClient().InfoAndTxsByNumber(ctx, invalidBlockNumber) + t.Require().NoError(err, "failed to fetch replacement block") invalidTxHash := invalidExecReceipt.TxHash - txInReplacementBlock := false for _, tx := range replacementTxs { if tx.Hash() == invalidTxHash { - txInReplacementBlock = true - break - } - } - t.Require().False(txInReplacementBlock, - "invalid transaction should NOT exist in replacement block") - - // Also verify the transaction receipt is no longer available at that block - // (the tx may have been re-included in a later block, but not at the same height) - t.Logger().Info("confirmed invalid transaction not in replacement block", - "invalid_tx_hash", invalidTxHash, - "replacement_block_tx_count", len(replacementTxs), - ) - - t.Logger().Info("replacement block validated, waiting for verification", - "replacement_hash", replacementBlockHash.Hash, - ) - - // PHASE 3: Wait for the replacement block's timestamp to become verified - var verified bool - verificationTimeout := 60 * time.Second - verificationStart := time.Now() - - for time.Since(verificationStart) < verificationTimeout { - time.Sleep(checkInterval) - - resp, err := snClient.SuperRootAtTimestamp(ctx, invalidBlockTimestamp) - if err != nil { - t.Logger().Debug("waiting for verification", - "elapsed", time.Since(verificationStart).Round(time.Second), - "err", err, + t.Logger().Error("invalid transaction should NOT exist in replacement block", + "invalid_tx_hash", invalidTxHash, + "replacement_tx_hash", tx.Hash(), ) - continue + t.FailNow() } - - if resp.Data != nil { - verified = true - t.Logger().Info("VERIFIED! Timestamp now verified with replacement block", - "timestamp", invalidBlockTimestamp, - "super_root", resp.Data.SuperRoot, - ) - break - } - - t.Logger().Debug("timestamp not yet verified", - "elapsed", time.Since(verificationStart).Round(time.Second), - ) } - // ASSERTION: The replacement block's timestamp should eventually be verified - t.Require().True(verified, - "replacement block timestamp should become verified") - t.Logger().Info("test complete: invalid block was replaced and verified", "invalid_block_number", invalidBlockNumber, "invalid_block_hash", invalidBlockHash, - "replacement_block_hash", replacementBlockHash.Hash, + "replacement_block_hash", replacementBlockInfo.Hash, ) } diff --git a/op-devstack/dsl/supernode.go b/op-devstack/dsl/supernode.go index 237b3eaa06db8..1752c0b2b6921 100644 --- a/op-devstack/dsl/supernode.go +++ b/op-devstack/dsl/supernode.go @@ -14,7 +14,8 @@ import ( // Supernode wraps a stack.Supernode interface for DSL operations type Supernode struct { commonImpl - inner stack.Supernode + inner stack.Supernode + testControl stack.InteropTestControl } // NewSupernode creates a new Supernode DSL wrapper @@ -25,6 +26,16 @@ func NewSupernode(inner stack.Supernode) *Supernode { } } +// NewSupernodeWithTestControl creates a new Supernode DSL wrapper with test control support. +// The testControl parameter can be nil if no test control is needed. +func NewSupernodeWithTestControl(inner stack.Supernode, testControl stack.InteropTestControl) *Supernode { + return &Supernode{ + commonImpl: commonFromT(inner.T()), + inner: inner, + testControl: testControl, + } +} + func (s *Supernode) ID() stack.SupernodeID { return s.inner.ID() } @@ -73,3 +84,20 @@ func (s *Supernode) AwaitValidatedTimestamp(timestamp uint64) { }) s.require.NoError(err, "super-root at timestamp %d was not validated in time", timestamp) } + +// PauseInterop pauses the interop activity at the given timestamp. +// When the interop activity attempts to process this timestamp, it returns early. +// This function is for integration test control only. +// Requires the Supernode to be created with NewSupernodeWithTestControl. +func (s *Supernode) PauseInterop(ts uint64) { + s.require.NotNil(s.testControl, "PauseInterop requires test control; use NewSupernodeWithTestControl") + s.testControl.PauseInterop(ts) +} + +// ResumeInterop clears any pause on the interop activity, allowing normal processing. +// This function is for integration test control only. +// Requires the Supernode to be created with NewSupernodeWithTestControl. +func (s *Supernode) ResumeInterop() { + s.require.NotNil(s.testControl, "ResumeInterop requires test control; use NewSupernodeWithTestControl") + s.testControl.ResumeInterop() +} diff --git a/op-devstack/presets/twol2.go b/op-devstack/presets/twol2.go index abdb9babb65bc..1584d91c5fc25 100644 --- a/op-devstack/presets/twol2.go +++ b/op-devstack/presets/twol2.go @@ -146,6 +146,13 @@ func NewTwoL2SupernodeInterop(t devtest.T, delaySeconds uint64) *TwoL2SupernodeI l2aNet := dsl.NewL2Network(l2a, orch.ControlPlane()) genesisTime := l2aNet.Escape().RollupConfig().Genesis.L2Time + // Get the supernode and its test control + stackSupernode := system.Supernode(match.Assume(t, match.FirstSupernode)) + var testControl stack.InteropTestControl + if sysgoOrch, ok := orch.(*sysgo.Orchestrator); ok { + testControl = sysgoOrch.InteropTestControl(stackSupernode.ID()) + } + out := &TwoL2SupernodeInterop{ TwoL2: TwoL2{ Log: t.Logger(), @@ -158,7 +165,7 @@ func NewTwoL2SupernodeInterop(t devtest.T, delaySeconds uint64) *TwoL2SupernodeI L2ACL: dsl.NewL2CLNode(l2aCL, orch.ControlPlane()), L2BCL: dsl.NewL2CLNode(l2bCL, orch.ControlPlane()), }, - Supernode: dsl.NewSupernode(system.Supernode(match.Assume(t, match.FirstSupernode))), + Supernode: dsl.NewSupernodeWithTestControl(stackSupernode, testControl), L2ELA: dsl.NewL2ELNode(l2a.L2ELNode(match.Assume(t, match.FirstL2EL)), orch.ControlPlane()), L2ELB: dsl.NewL2ELNode(l2b.L2ELNode(match.Assume(t, match.FirstL2EL)), orch.ControlPlane()), L2BatcherA: dsl.NewL2Batcher(l2a.L2Batcher(match.Assume(t, match.FirstL2Batcher))), diff --git a/op-devstack/stack/supernode.go b/op-devstack/stack/supernode.go index a88b07ef62799..ef871bfd0efb9 100644 --- a/op-devstack/stack/supernode.go +++ b/op-devstack/stack/supernode.go @@ -62,3 +62,16 @@ type Supernode interface { ID() SupernodeID QueryAPI() apis.SupernodeQueryAPI } + +// InteropTestControl provides integration test control methods for the interop activity. +// This interface is for integration test control only. +type InteropTestControl interface { + // PauseInterop pauses the interop activity at the given timestamp. + // When the interop activity attempts to process this timestamp, it returns early. + // This function is for integration test control only. + PauseInterop(ts uint64) + + // ResumeInterop clears any pause on the interop activity, allowing normal processing. + // This function is for integration test control only. + ResumeInterop() +} diff --git a/op-devstack/sysgo/l2_cl_supernode.go b/op-devstack/sysgo/l2_cl_supernode.go index 7590692381378..8fbf8bf93bbfd 100644 --- a/op-devstack/sysgo/l2_cl_supernode.go +++ b/op-devstack/sysgo/l2_cl_supernode.go @@ -143,6 +143,26 @@ func (n *SuperNode) Stop() { n.sn = nil } +// PauseInterop pauses the interop activity at the given timestamp. +// This function is for integration test control only. +func (n *SuperNode) PauseInterop(ts uint64) { + n.mu.Lock() + defer n.mu.Unlock() + if n.sn != nil { + n.sn.PauseInterop(ts) + } +} + +// ResumeInterop clears any pause on the interop activity. +// This function is for integration test control only. +func (n *SuperNode) ResumeInterop() { + n.mu.Lock() + defer n.mu.Unlock() + if n.sn != nil { + n.sn.ResumeInterop() + } +} + // WithSupernode constructs a Supernode-based L2 CL node func WithSupernode(supernodeID stack.SupernodeID, l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID, opts ...L2CLOption) stack.Option[*Orchestrator] { args := []L2CLs{{CLID: l2CLID, ELID: l2ELID}} diff --git a/op-devstack/sysgo/orchestrator.go b/op-devstack/sysgo/orchestrator.go index db8d4c1ced9d7..9243fda3408f7 100644 --- a/op-devstack/sysgo/orchestrator.go +++ b/op-devstack/sysgo/orchestrator.go @@ -179,6 +179,17 @@ func (o *Orchestrator) RegisterL2MetricsTargets(id stack.IDWithChain, endpoints } } +// InteropTestControl returns the InteropTestControl for a given SupernodeID. +// Returns nil if the supernode doesn't exist or doesn't implement the interface. +// This function is for integration test control only. +func (o *Orchestrator) InteropTestControl(id stack.SupernodeID) stack.InteropTestControl { + sn, ok := o.supernodes.Get(id) + if !ok { + return nil + } + return sn +} + type hydrator interface { hydrate(system stack.ExtensibleSystem) } diff --git a/op-supernode/supernode/activity/interop/interop.go b/op-supernode/supernode/activity/interop/interop.go index cdf54fb0d3569..4c86f95a7036a 100644 --- a/op-supernode/supernode/activity/interop/interop.go +++ b/op-supernode/supernode/activity/interop/interop.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "sync" + "sync/atomic" "time" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -53,6 +54,11 @@ type Interop struct { currentL1 eth.BlockID verifyFn func(ts uint64, blocksAtTimestamp map[eth.ChainID]eth.BlockID) (Result, error) + + // pauseAtTimestamp is used for integration test control only. + // When non-zero, progressInterop will return early without processing + // if the next timestamp to process matches this value. + pauseAtTimestamp atomic.Uint64 } func (i *Interop) Name() string { @@ -155,6 +161,22 @@ func (i *Interop) Stop(ctx context.Context) error { return nil } +// PauseAt sets a timestamp at which the interop activity should pause. +// When progressInterop encounters this timestamp, it returns early without processing. +// This function is for integration test control only. +// Pass 0 to clear the pause (equivalent to calling Resume). +func (i *Interop) PauseAt(ts uint64) { + i.pauseAtTimestamp.Store(ts) + i.log.Info("interop pause set", "pauseAtTimestamp", ts) +} + +// Resume clears any pause timestamp, allowing normal processing to continue. +// This function is for integration test control only. +func (i *Interop) Resume() { + i.pauseAtTimestamp.Store(0) + i.log.Info("interop pause cleared") +} + // progressAndRecord attempts to progress interop and record the result. // Returns (madeProgress, error) where madeProgress indicates if we advanced the verified timestamp. func (i *Interop) progressAndRecord() (bool, error) { @@ -240,6 +262,12 @@ func (i *Interop) progressInterop() (Result, error) { ts = lastTimestamp + 1 } + // Check if we're paused at this timestamp (integration test control only) + if pauseTs := i.pauseAtTimestamp.Load(); pauseTs != 0 && ts == pauseTs { + i.log.Info("interop paused at timestamp", "timestamp", ts) + return Result{}, nil + } + // 1: check if all chains are ready to process the next timestamp. // if all chains are ready, we can proceed to download the logs blocksAtTimestamp, err := i.checkChainsReady(ts) diff --git a/op-supernode/supernode/chain_container/invalidation_test.go b/op-supernode/supernode/chain_container/invalidation_test.go index 6cb2c13afa910..2006d7e147f3a 100644 --- a/op-supernode/supernode/chain_container/invalidation_test.go +++ b/op-supernode/supernode/chain_container/invalidation_test.go @@ -19,9 +19,9 @@ func TestDenyList_AddAndContains(t *testing.T) { t.Parallel() tests := []struct { - name string - setup func(t *testing.T, dl *DenyList) - check func(t *testing.T, dl *DenyList) + name string + setup func(t *testing.T, dl *DenyList) + check func(t *testing.T, dl *DenyList) }{ { name: "single hash at height", @@ -209,9 +209,9 @@ func TestDenyList_GetDeniedHashes(t *testing.T) { t.Parallel() tests := []struct { - name string - setup func(t *testing.T, dl *DenyList) - check func(t *testing.T, dl *DenyList) + name string + setup func(t *testing.T, dl *DenyList) + check func(t *testing.T, dl *DenyList) }{ { name: "returns all hashes at height", @@ -352,30 +352,30 @@ func TestInvalidateBlock(t *testing.T) { blockTime := uint64(2) tests := []struct { - name string - height uint64 - payloadHash common.Hash + name string + height uint64 + payloadHash common.Hash currentBlockHash common.Hash - engineAvailable bool - expectRewind bool - expectRewindTs uint64 + engineAvailable bool + expectRewind bool + expectRewindTs uint64 }{ { - name: "current block matches triggers rewind", - height: 5, - payloadHash: common.HexToHash("0xdead"), + name: "current block matches triggers rewind", + height: 5, + payloadHash: common.HexToHash("0xdead"), currentBlockHash: common.HexToHash("0xdead"), // Same hash - engineAvailable: true, - expectRewind: true, - expectRewindTs: genesisTime + (4 * blockTime), // height-1 timestamp + engineAvailable: true, + expectRewind: true, + expectRewindTs: genesisTime + (4 * blockTime), // height-1 timestamp }, { - name: "current block differs no rewind", - height: 5, - payloadHash: common.HexToHash("0xdead"), + name: "current block differs no rewind", + height: 5, + payloadHash: common.HexToHash("0xdead"), currentBlockHash: common.HexToHash("0xbeef"), // Different hash - engineAvailable: true, - expectRewind: false, + engineAvailable: true, + expectRewind: false, }, { name: "engine unavailable adds to denylist only", @@ -385,13 +385,13 @@ func TestInvalidateBlock(t *testing.T) { expectRewind: false, }, { - name: "rewind to height-1 timestamp calculated correctly", - height: 10, - payloadHash: common.HexToHash("0xabcd"), + name: "rewind to height-1 timestamp calculated correctly", + height: 10, + payloadHash: common.HexToHash("0xabcd"), currentBlockHash: common.HexToHash("0xabcd"), - engineAvailable: true, - expectRewind: true, - expectRewindTs: genesisTime + (9 * blockTime), // height 9 + engineAvailable: true, + expectRewind: true, + expectRewindTs: genesisTime + (9 * blockTime), // height 9 }, } diff --git a/op-supernode/supernode/supernode.go b/op-supernode/supernode/supernode.go index 73c8c99e148aa..f166e1140f1df 100644 --- a/op-supernode/supernode/supernode.go +++ b/op-supernode/supernode/supernode.go @@ -240,6 +240,31 @@ func (s *Supernode) onChainReset(chainID eth.ChainID, timestamp uint64) { } } +// PauseInterop pauses the interop activity at the given timestamp. +// When the interop activity attempts to process this timestamp, it returns early. +// This function is for integration test control only. +func (s *Supernode) PauseInterop(ts uint64) { + for _, a := range s.activities { + if ia, ok := a.(*interop.Interop); ok { + ia.PauseAt(ts) + return + } + } + s.log.Warn("PauseInterop called but no interop activity found") +} + +// ResumeInterop clears any pause on the interop activity, allowing normal processing. +// This function is for integration test control only. +func (s *Supernode) ResumeInterop() { + for _, a := range s.activities { + if ia, ok := a.(*interop.Interop); ok { + ia.Resume() + return + } + } + s.log.Warn("ResumeInterop called but no interop activity found") +} + func (s *Supernode) Stopped() bool { return s.stopped } // RPCAddr returns the bound RPC address (host:port) if the server is listening. From 0a814ac6131519d5b62cecd7919d14bb8bec7f8e Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Mon, 9 Feb 2026 14:25:45 -0600 Subject: [PATCH 14/23] Updates from Self Review - Consolidate SuperAuthority interface to op-node/rollup/iface.go (remove duplicate definitions from node.go, engine_controller.go, resources/) - Move onReset callback invocation from InvalidateBlock to RewindEngine (fires on any engine reset, not just block invalidation) - Move helper methods (SetResetCallback, blockNumberToTimestamp, IsDenied) from invalidation.go to chain_container.go - Delete unused op-supernode/supernode/resources/super_authority.go --- op-node/node/node.go | 14 +-- op-node/rollup/driver/driver.go | 2 +- op-node/rollup/engine/engine_controller.go | 14 +-- op-node/rollup/engine/payload_process.go | 2 +- op-node/rollup/iface.go | 16 ++- .../supernode/activity/interop/interop.go | 119 ++++++------------ .../activity/interop/interop_test.go | 23 +++- .../chain_container/chain_container.go | 31 ++++- .../supernode/chain_container/invalidation.go | 28 ----- .../supernode/resources/super_authority.go | 23 ---- 10 files changed, 110 insertions(+), 162 deletions(-) delete mode 100644 op-supernode/supernode/resources/super_authority.go diff --git a/op-node/node/node.go b/op-node/node/node.go index 3135b0f5b3fa2..3cbec3404fc06 100644 --- a/op-node/node/node.go +++ b/op-node/node/node.go @@ -48,16 +48,6 @@ import ( var ErrAlreadyClosed = errors.New("node is already closed") -// SuperAuthority provides supernode-level authority operations to op-node instances. -// When running inside a supernode, this allows the node to check if payloads are denied -// before applying them, enabling coordinated block invalidation across the supernode. -type SuperAuthority interface { - // IsDenied checks if a payload hash is denied at the given block number. - // Returns true if the payload should not be applied. - // The error indicates if the check could not be performed (should be logged but not fatal). - IsDenied(blockNumber uint64, payloadHash common.Hash) (bool, error) -} - // L1Client is the interface that op-node uses to interact with L1. // This allows wrapped or mocked clients to be used type L1Client interface { @@ -118,7 +108,7 @@ type OpNode struct { appVersion string metrics *metrics.Metrics - superAuthority SuperAuthority // Supernode authority for payload validation (may be nil) + superAuthority rollup.SuperAuthority // Supernode authority for payload validation (may be nil) l1HeadsSub ethereum.Subscription // Subscription to get L1 heads (automatically re-subscribes on error) l1SafeSub ethereum.Subscription // Subscription to get L1 safe blocks, a.k.a. justified data (polling) @@ -212,7 +202,7 @@ type InitializationOverrides struct { Beacon L1Beacon RPCHandler *oprpc.Handler MetricsRegistry func(*prometheus.Registry) - SuperAuthority SuperAuthority // Supernode authority for payload validation + SuperAuthority rollup.SuperAuthority // Supernode authority for payload validation } // init progressively creates and sets up all the components of the OpNode diff --git a/op-node/rollup/driver/driver.go b/op-node/rollup/driver/driver.go index d750151ab1fc1..4705b9385b9a5 100644 --- a/op-node/rollup/driver/driver.go +++ b/op-node/rollup/driver/driver.go @@ -49,7 +49,7 @@ func NewDriver( sequencerConductor conductor.SequencerConductor, altDA AltDAIface, indexingMode bool, - superAuthority engine.SuperAuthority, + superAuthority rollup.SuperAuthority, ) *Driver { driverCtx, driverCancel := context.WithCancel(context.Background()) diff --git a/op-node/rollup/engine/engine_controller.go b/op-node/rollup/engine/engine_controller.go index 15c84d14230d7..5e6836b53f35b 100644 --- a/op-node/rollup/engine/engine_controller.go +++ b/op-node/rollup/engine/engine_controller.go @@ -93,16 +93,6 @@ type CrossUpdateHandler interface { OnCrossSafeUpdate(ctx context.Context, crossSafe eth.L2BlockRef, localSafe eth.L2BlockRef) } -// SuperAuthority provides supernode-level authority operations. -// When running inside a supernode, this allows the engine controller to check -// if payloads are denied before applying them. -type SuperAuthority interface { - // IsDenied checks if a payload hash is denied at the given block number. - // Returns true if the payload should not be applied. - // The error indicates if the check could not be performed (should be logged but not fatal). - IsDenied(blockNumber uint64, payloadHash common.Hash) (bool, error) -} - type EngineController struct { engine ExecEngine // Underlying execution engine RPC log log.Logger @@ -169,7 +159,7 @@ type EngineController struct { crossUpdateHandler CrossUpdateHandler // SuperAuthority for payload validation (may be nil when not in supernode context) - superAuthority SuperAuthority + superAuthority rollup.SuperAuthority unsafePayloads *PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps and duplicates } @@ -178,7 +168,7 @@ var _ event.Deriver = (*EngineController)(nil) func NewEngineController(ctx context.Context, engine ExecEngine, log log.Logger, m opmetrics.Metricer, rollupCfg *rollup.Config, syncCfg *sync.Config, supervisorEnabled bool, l1 sync.L1Chain, emitter event.Emitter, - superAuthority SuperAuthority, + superAuthority rollup.SuperAuthority, ) *EngineController { syncStatus := syncStatusCL if syncCfg.SyncMode == sync.ELSync { diff --git a/op-node/rollup/engine/payload_process.go b/op-node/rollup/engine/payload_process.go index 65b87472f9db4..469472a7677e0 100644 --- a/op-node/rollup/engine/payload_process.go +++ b/op-node/rollup/engine/payload_process.go @@ -33,7 +33,7 @@ func (e *EngineController) onPayloadProcess(ctx context.Context, ev PayloadProce payload := ev.Envelope.ExecutionPayload denied, err := e.superAuthority.IsDenied(uint64(payload.BlockNumber), payload.BlockHash) if err != nil { - e.log.Warn("Failed to check SuperAuthority denylist, proceeding with payload", + e.log.Error("Failed to check SuperAuthority denylist, proceeding with payload", "blockNumber", payload.BlockNumber, "blockHash", payload.BlockHash, "err", err, diff --git a/op-node/rollup/iface.go b/op-node/rollup/iface.go index f6cf0882de20c..957a5c3d32098 100644 --- a/op-node/rollup/iface.go +++ b/op-node/rollup/iface.go @@ -1,6 +1,20 @@ package rollup -import "github.com/ethereum-optimism/optimism/op-service/eth" +import ( + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// SuperAuthority provides payload validation functionality from a supernode. +// When running inside a supernode, this allows the engine controller to check +// if payloads are denied before applying them, enabling coordinated block invalidation. +type SuperAuthority interface { + // IsDenied checks if a payload hash is denied at the given block number. + // Returns true if the payload should not be applied. + // The error indicates if the check could not be performed (should be logged but not fatal). + IsDenied(blockNumber uint64, payloadHash common.Hash) (bool, error) +} // SafeHeadListener is called when the safe head is updated. // The safe head may advance by more than one block in a single update diff --git a/op-supernode/supernode/activity/interop/interop.go b/op-supernode/supernode/activity/interop/interop.go index 4c86f95a7036a..71bb8a2fd3b10 100644 --- a/op-supernode/supernode/activity/interop/interop.go +++ b/op-supernode/supernode/activity/interop/interop.go @@ -322,40 +322,15 @@ func (i *Interop) handleResult(result Result) error { return nil } -// invalidateBlock handles an invalid block by notifying the chain container to add it -// to the denylist and potentially rewind if the chain is currently using that block. +// invalidateBlock notifies the chain container to add the block to the denylist +// and potentially rewind if the chain is currently using that block. func (i *Interop) invalidateBlock(chainID eth.ChainID, blockID eth.BlockID) error { chain, ok := i.chains[chainID] if !ok { return fmt.Errorf("chain %s not found", chainID) } - - rewound, err := chain.InvalidateBlock(i.ctx, blockID.Number, blockID.Hash) - if err != nil { - i.log.Error("failed to invalidate block", - "chainID", chainID, - "blockNumber", blockID.Number, - "blockHash", blockID.Hash, - "err", err, - ) - return err - } - - if rewound { - i.log.Warn("chain rewound due to invalid block", - "chainID", chainID, - "blockNumber", blockID.Number, - "blockHash", blockID.Hash, - ) - } else { - i.log.Info("block added to denylist (no rewind needed)", - "chainID", chainID, - "blockNumber", blockID.Number, - "blockHash", blockID.Hash, - ) - } - - return nil + _, err := chain.InvalidateBlock(i.ctx, blockID.Number, blockID.Hash) + return err } // checkChainsReady checks if all chains are ready to process the next timestamp. @@ -430,60 +405,48 @@ func (i *Interop) ResetOn(chainID eth.ChainID, timestamp uint64) { "timestamp", timestamp, ) + chain, chainOk := i.chains[chainID] + if !chainOk { + i.log.Error("chain not found for reset", "chainID", chainID) + return + } + db, dbOk := i.logsDBs[chainID] + if !dbOk { + i.log.Error("logsDB not found for reset", "chainID", chainID) + return + } + // Reset the logsDB for this chain - if db, ok := i.logsDBs[chainID]; ok { - // Find the block just before the reset timestamp - chain, chainOk := i.chains[chainID] - if !chainOk { - i.log.Error("chain not found for reset", "chainID", chainID) + // Get the block time to calculate the previous block's timestamp + blockTime := chain.BlockTime() + targetTs := timestamp - blockTime + targetBlock, err := chain.BlockAtTimestamp(i.ctx, targetTs, eth.Safe) + if err != nil { + // If we can't find the target block, clear the entire logsDB + i.log.Warn("failed to get block at timestamp, clearing logsDB", "chainID", chainID, "timestamp", targetTs, "err", err) + if clearErr := db.Clear(&noopInvalidator{}); clearErr != nil { + i.log.Error("failed to clear logsDB", "chainID", chainID, "err", clearErr) + } + } else { + // check the first block in the logsDB + firstBlock, err := db.FirstSealedBlock() + if err != nil { + i.log.Error("failed to get first block", "chainID", chainID, "err", err) return } - - // Get the block time to calculate the previous block's timestamp - blockTime := chain.BlockTime() - if timestamp > blockTime { - prevTimestamp := timestamp - blockTime - // Try to get the block at the previous timestamp to use as rewind target - prevBlock, err := chain.BlockAtTimestamp(i.ctx, prevTimestamp, eth.Safe) - if err == nil { - i.log.Info("rewinding logsDB to previous block", - "chainID", chainID, - "newHead", prevBlock.ID(), - ) - if err := db.Rewind(&noopInvalidator{}, prevBlock.ID()); err != nil { - i.log.Error("failed to rewind logsDB", - "chainID", chainID, - "err", err, - ) - } - } else { - // If we can't get the previous block, clear the entire logsDB - i.log.Warn("could not get previous block, clearing logsDB", - "chainID", chainID, - "prevTimestamp", prevTimestamp, - "err", err, - ) - if err := db.Clear(&noopInvalidator{}); err != nil { - i.log.Error("failed to clear logsDB", - "chainID", chainID, - "err", err, - ) - } + if firstBlock.Number > targetBlock.Number { + i.log.Info("logsDB is to be cleared", "chainID", chainID) + if err := db.Clear(&noopInvalidator{}); err != nil { + i.log.Error("failed to clear logsDB", "chainID", chainID, "err", err) + return } } else { - // If timestamp is at or before blockTime, clear the entire logsDB - i.log.Info("clearing logsDB (reset timestamp at or before first block)", - "chainID", chainID, - ) - if err := db.Clear(&noopInvalidator{}); err != nil { - i.log.Error("failed to clear logsDB", - "chainID", chainID, - "err", err, - ) + i.log.Info("logsDB is to be rewound", "chainID", chainID, "targetBlock", targetBlock.Number, "firstBlock", firstBlock.Number) + if err := db.Rewind(&noopInvalidator{}, targetBlock.ID()); err != nil { + i.log.Error("failed to rewind logsDB", "chainID", chainID, "err", err) + return } } - } else { - i.log.Warn("no logsDB found for chain", "chainID", chainID) } // Remove any verified results at or after the timestamp @@ -502,10 +465,6 @@ func (i *Interop) ResetOn(chainID eth.ChainID, timestamp uint64) { "chainID", chainID, "timestamp", timestamp, ) - } else { - i.log.Info("verifiedDB rewound (no results deleted)", - "timestamp", timestamp, - ) } } diff --git a/op-supernode/supernode/activity/interop/interop_test.go b/op-supernode/supernode/activity/interop/interop_test.go index 9ddc12f32695c..b7598f6db4f50 100644 --- a/op-supernode/supernode/activity/interop/interop_test.go +++ b/op-supernode/supernode/activity/interop/interop_test.go @@ -80,6 +80,7 @@ func TestStartStop(t *testing.T) { chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} interop := New(testLogger(), 1000, chains, dataDir) require.NotNil(t, interop) + defer func() { _ = interop.Stop(context.Background()) }() ctx, cancel := context.WithCancel(context.Background()) done := make(chan error, 1) @@ -116,6 +117,7 @@ func TestStartStop(t *testing.T) { chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} interop := New(testLogger(), 1000, chains, dataDir) require.NotNil(t, interop) + defer func() { _ = interop.Stop(context.Background()) }() ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1040,11 +1042,14 @@ type mockLogsDBForInterop struct { // Track calls for verification rewindCalls []eth.BlockID clearCalls int + + // Configurable return value for FirstSealedBlock + firstSealedBlock suptypes.BlockSeal } func (m *mockLogsDBForInterop) LatestSealedBlock() (eth.BlockID, bool) { return eth.BlockID{}, false } func (m *mockLogsDBForInterop) FirstSealedBlock() (suptypes.BlockSeal, error) { - return suptypes.BlockSeal{}, nil + return m.firstSealedBlock, nil } func (m *mockLogsDBForInterop) FindSealedBlock(number uint64) (suptypes.BlockSeal, error) { return suptypes.BlockSeal{}, nil @@ -1102,6 +1107,7 @@ func TestResetOn(t *testing.T) { chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} interop := New(testLogger(), 1000, chains, dataDir) require.NotNil(t, interop) + defer func() { _ = interop.Stop(context.Background()) }() interop.ctx = context.Background() interop.logsDBs[mock.id] = mockLogsDB @@ -1127,6 +1133,7 @@ func TestResetOn(t *testing.T) { chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} interop := New(testLogger(), 1000, chains, dataDir) require.NotNil(t, interop) + defer func() { _ = interop.Stop(context.Background()) }() interop.ctx = context.Background() interop.logsDBs[mock.id] = mockLogsDB @@ -1143,15 +1150,22 @@ func TestResetOn(t *testing.T) { dataDir := t.TempDir() mock := newMockChainContainer(10) - mockLogsDB := &mockLogsDBForInterop{} + // Configure mock so firstSealedBlock.Number > targetBlock.Number + // When timestamp=1 and blockTime=1, targetTs=0, so targetBlock.Number=0 + // Setting firstSealedBlock.Number=5 means DB starts after target, triggering Clear + mockLogsDB := &mockLogsDBForInterop{ + firstSealedBlock: suptypes.BlockSeal{Number: 5}, + } chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} interop := New(testLogger(), 1000, chains, dataDir) require.NotNil(t, interop) + defer func() { _ = interop.Stop(context.Background()) }() interop.ctx = context.Background() interop.logsDBs[mock.id] = mockLogsDB - // Reset at timestamp 1 (blockTime=1, so no previous block) + // Reset at timestamp 1 (blockTime=1, so targetTs=0) + // Since firstSealedBlock.Number (5) > targetBlock.Number (0), Clear is called interop.ResetOn(mock.id, 1) // Verify logsDB.Clear was called @@ -1171,6 +1185,7 @@ func TestResetOn(t *testing.T) { chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} interop := New(testLogger(), 1000, chains, dataDir) require.NotNil(t, interop) + defer func() { _ = interop.Stop(context.Background()) }() interop.ctx = context.Background() interop.logsDBs[mock.id] = mockLogsDB @@ -1214,6 +1229,7 @@ func TestResetOn(t *testing.T) { chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} interop := New(testLogger(), 1000, chains, dataDir) require.NotNil(t, interop) + defer func() { _ = interop.Stop(context.Background()) }() interop.ctx = context.Background() interop.logsDBs[mock.id] = mockLogsDB @@ -1235,6 +1251,7 @@ func TestResetOn(t *testing.T) { chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} interop := New(testLogger(), 1000, chains, dataDir) require.NotNil(t, interop) + defer func() { _ = interop.Stop(context.Background()) }() interop.ctx = context.Background() // Reset on unknown chain (should not panic) diff --git a/op-supernode/supernode/chain_container/chain_container.go b/op-supernode/supernode/chain_container/chain_container.go index d38be7aa0cd1b..e13a8d25a6ce8 100644 --- a/op-supernode/supernode/chain_container/chain_container.go +++ b/op-supernode/supernode/chain_container/chain_container.go @@ -11,6 +11,7 @@ import ( opnodecfg "github.com/ethereum-optimism/optimism/op-node/config" rollupNode "github.com/ethereum-optimism/optimism/op-node/node" + "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/eth" oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" @@ -91,7 +92,7 @@ type simpleChainContainer struct { // Interface conformance assertions var _ ChainContainer = (*simpleChainContainer)(nil) -var _ rollupNode.SuperAuthority = (*simpleChainContainer)(nil) +var _ rollup.SuperAuthority = (*simpleChainContainer)(nil) func NewChainContainer( chainID eth.ChainID, @@ -501,6 +502,11 @@ retryLoop: } } + // Notify activities about the reset + if c.onReset != nil { + c.onReset(c.chainID, timestamp) + } + // resume the chain container to trigger a new vn to be started err = c.Resume(ctx) if err != nil { @@ -510,3 +516,26 @@ retryLoop: return nil } + +// SetResetCallback sets a callback that is invoked when the chain resets. +// This must only be called during initialization, before the chain container starts processing. +// Calling this while InvalidateBlock may be running is unsafe. +func (c *simpleChainContainer) SetResetCallback(cb ResetCallback) { + c.onReset = cb +} + +// blockNumberToTimestamp converts a block number to its timestamp using rollup config. +func (c *simpleChainContainer) blockNumberToTimestamp(blockNum uint64) uint64 { + if c.vncfg == nil { + return 0 + } + return c.vncfg.Rollup.Genesis.L2Time + (blockNum * c.vncfg.Rollup.BlockTime) +} + +// IsDenied checks if a block hash is on the deny list at the given height. +func (c *simpleChainContainer) IsDenied(height uint64, payloadHash common.Hash) (bool, error) { + if c.denyList == nil { + return false, fmt.Errorf("deny list not initialized") + } + return c.denyList.Contains(height, payloadHash) +} diff --git a/op-supernode/supernode/chain_container/invalidation.go b/op-supernode/supernode/chain_container/invalidation.go index e66509b5c1ea2..2c8e5ad4fb7df 100644 --- a/op-supernode/supernode/chain_container/invalidation.go +++ b/op-supernode/supernode/chain_container/invalidation.go @@ -210,33 +210,5 @@ func (c *simpleChainContainer) InvalidateBlock(ctx context.Context, height uint6 "rewindToTimestamp", priorTimestamp, ) - // Notify activities about the reset - if c.onReset != nil { - c.onReset(c.chainID, priorTimestamp) - } - return true, nil } - -// SetResetCallback sets a callback that is invoked when the chain resets. -// This must only be called during initialization, before the chain container starts processing. -// Calling this while InvalidateBlock may be running is unsafe. -func (c *simpleChainContainer) SetResetCallback(cb ResetCallback) { - c.onReset = cb -} - -// blockNumberToTimestamp converts a block number to its timestamp using rollup config. -func (c *simpleChainContainer) blockNumberToTimestamp(blockNum uint64) uint64 { - if c.vncfg == nil { - return 0 - } - return c.vncfg.Rollup.Genesis.L2Time + (blockNum * c.vncfg.Rollup.BlockTime) -} - -// IsDenied checks if a block hash is on the deny list at the given height. -func (c *simpleChainContainer) IsDenied(height uint64, payloadHash common.Hash) (bool, error) { - if c.denyList == nil { - return false, fmt.Errorf("deny list not initialized") - } - return c.denyList.Contains(height, payloadHash) -} diff --git a/op-supernode/supernode/resources/super_authority.go b/op-supernode/supernode/resources/super_authority.go deleted file mode 100644 index bd7bcfa62f3ee..0000000000000 --- a/op-supernode/supernode/resources/super_authority.go +++ /dev/null @@ -1,23 +0,0 @@ -package resources - -import "github.com/ethereum/go-ethereum/common" - -// SuperAuthority is an interface for supernode-level authority operations. -// It is passed to op-node instances during initialization to provide -// supernode-specific functionality and coordination. -type SuperAuthority interface { - // IsDenied checks if a payload hash is denied at the given block number. - // Returns true if the payload should not be applied. - // The error indicates if the check could not be performed. - IsDenied(blockNumber uint64, payloadHash common.Hash) (bool, error) -} - -// NoOpSuperAuthority is a no-op implementation that never denies any payload. -// Used when running op-node outside of a supernode context. -type NoOpSuperAuthority struct{} - -func (n *NoOpSuperAuthority) IsDenied(blockNumber uint64, payloadHash common.Hash) (bool, error) { - return false, nil -} - -var _ SuperAuthority = (*NoOpSuperAuthority)(nil) From c68e3e217ede600b9318831a1f3ccf492dd6ce67 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Mon, 9 Feb 2026 15:54:53 -0600 Subject: [PATCH 15/23] lint --- op-supernode/supernode/supernode_activities_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/op-supernode/supernode/supernode_activities_test.go b/op-supernode/supernode/supernode_activities_test.go index ded2a032386a2..017697bae7365 100644 --- a/op-supernode/supernode/supernode_activities_test.go +++ b/op-supernode/supernode/supernode_activities_test.go @@ -27,7 +27,7 @@ func (m *mockRunnable) Start(ctx context.Context) error { <-ctx.Done() return ctx.Err() } -func (m *mockRunnable) Stop(ctx context.Context) error { m.stopped++; return nil } +func (m *mockRunnable) Stop(ctx context.Context) error { m.stopped++; return nil } func (m *mockRunnable) ResetOn(chainID eth.ChainID, timestamp uint64) {} // ensure it satisfies both Activity and RunnableActivity @@ -51,8 +51,8 @@ func (s *rpcSvc) Echo(_ context.Context) (string, error) { return "ok", nil } type rpcAct struct{} -func (a *rpcAct) RPCNamespace() string { return "act" } -func (a *rpcAct) RPCService() interface{} { return &rpcSvc{} } +func (a *rpcAct) RPCNamespace() string { return "act" } +func (a *rpcAct) RPCService() interface{} { return &rpcSvc{} } func (a *rpcAct) ResetOn(chainID eth.ChainID, timestamp uint64) {} var _ activity.Activity = (*rpcAct)(nil) From 7d33d82d0bd4be82f61616468acc609377bae907 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Mon, 9 Feb 2026 16:56:30 -0600 Subject: [PATCH 16/23] datadir for unit tests --- .../chain_container/chain_container_test.go | 52 ++++++++++++------- 1 file changed, 32 insertions(+), 20 deletions(-) diff --git a/op-supernode/supernode/chain_container/chain_container_test.go b/op-supernode/supernode/chain_container/chain_container_test.go index a8d148d14ffba..964d367d38757 100644 --- a/op-supernode/supernode/chain_container/chain_container_test.go +++ b/op-supernode/supernode/chain_container/chain_container_test.go @@ -178,9 +178,9 @@ func createTestVNConfig() *opnodecfg.Config { } } -func createTestCLIConfig() config.CLIConfig { +func createTestCLIConfig(dataDir string) config.CLIConfig { return config.CLIConfig{ - DataDir: "/tmp/test", + DataDir: dataDir, RPCConfig: oprpc.CLIConfig{ ListenAddr: "0.0.0.0", ListenPort: 8545, @@ -217,10 +217,10 @@ func TestChainContainer_Constructor(t *testing.T) { chainID := eth.ChainIDFromUInt64(420) vncfg := createTestVNConfig() log := createTestLogger(t) - cfg := createTestCLIConfig() initOverload := &rollupNode.InitializationOverrides{} t.Run("creates container with correct config", func(t *testing.T) { + cfg := createTestCLIConfig(t.TempDir()) container := NewChainContainer(chainID, vncfg, log, cfg, initOverload, nil, nil, nil) require.NotNil(t, container) @@ -237,21 +237,22 @@ func TestChainContainer_Constructor(t *testing.T) { }) t.Run("SafeDBPath uses subPath", func(t *testing.T) { + dataDir := t.TempDir() cfg := config.CLIConfig{ - DataDir: "/tmp/datadir", + DataDir: dataDir, } container := NewChainContainer(eth.ChainIDFromUInt64(420), vncfg, log, cfg, initOverload, nil, nil, nil) impl, ok := container.(*simpleChainContainer) require.True(t, ok) - expectedPath := filepath.Join("/tmp/datadir", "420", "safe_db") + expectedPath := filepath.Join(dataDir, "420", "safe_db") require.Equal(t, expectedPath, impl.vncfg.SafeDBPath) }) t.Run("RPC config inherited from supernode config", func(t *testing.T) { cfg := config.CLIConfig{ - DataDir: "/tmp/test", + DataDir: t.TempDir(), RPCConfig: oprpc.CLIConfig{ ListenAddr: "127.0.0.1", ListenPort: 9545, @@ -266,6 +267,7 @@ func TestChainContainer_Constructor(t *testing.T) { }) t.Run("appVersion set correctly", func(t *testing.T) { + cfg := createTestCLIConfig(t.TempDir()) container := NewChainContainer(chainID, vncfg, log, cfg, initOverload, nil, nil, nil) impl, ok := container.(*simpleChainContainer) require.True(t, ok) @@ -274,31 +276,32 @@ func TestChainContainer_Constructor(t *testing.T) { }) t.Run("subPath combines DataDir, chainID, and path correctly", func(t *testing.T) { + dataDir := t.TempDir() cfg := config.CLIConfig{ - DataDir: "/data", + DataDir: dataDir, } container := NewChainContainer(eth.ChainIDFromUInt64(420), vncfg, log, cfg, initOverload, nil, nil, nil) impl, ok := container.(*simpleChainContainer) require.True(t, ok) result := impl.subPath("safe_db") - expected := filepath.Join("/data", "420", "safe_db") + expected := filepath.Join(dataDir, "420", "safe_db") require.Equal(t, expected, result) }) t.Run("subPath works with various chain IDs", func(t *testing.T) { + dataDir := t.TempDir() cfg := config.CLIConfig{ - DataDir: "/data", + DataDir: dataDir, } testCases := []struct { - chainID eth.ChainID - path string - expected string + chainID eth.ChainID + path string }{ - {eth.ChainIDFromUInt64(10), "safe_db", "/data/10/safe_db"}, - {eth.ChainIDFromUInt64(11155420), "safe_db", "/data/11155420/safe_db"}, - {eth.ChainIDFromUInt64(8453), "peerstore", "/data/8453/peerstore"}, + {eth.ChainIDFromUInt64(10), "safe_db"}, + {eth.ChainIDFromUInt64(11155420), "safe_db"}, + {eth.ChainIDFromUInt64(8453), "peerstore"}, } for _, tc := range testCases { @@ -307,7 +310,7 @@ func TestChainContainer_Constructor(t *testing.T) { require.True(t, ok) result := impl.subPath(tc.path) - expected := filepath.Join(cfg.DataDir, tc.chainID.String(), tc.path) + expected := filepath.Join(dataDir, tc.chainID.String(), tc.path) require.Equal(t, expected, result, "subPath should work for chain %d", tc.chainID) } }) @@ -319,11 +322,11 @@ func TestChainContainer_Lifecycle(t *testing.T) { chainID := eth.ChainIDFromUInt64(420) vncfg := createTestVNConfig() - cfg := createTestCLIConfig() initOverload := &rollupNode.InitializationOverrides{} t.Run("Start respects stop flag", func(t *testing.T) { log := createTestLogger(t) + cfg := createTestCLIConfig(t.TempDir()) container := NewChainContainer(chainID, vncfg, log, cfg, initOverload, nil, nil, nil) impl, ok := container.(*simpleChainContainer) require.True(t, ok) @@ -350,6 +353,7 @@ func TestChainContainer_Lifecycle(t *testing.T) { t.Run("Stop sets stop flag", func(t *testing.T) { log := createTestLogger(t) + cfg := createTestCLIConfig(t.TempDir()) container := NewChainContainer(chainID, vncfg, log, cfg, initOverload, nil, nil, nil) impl, ok := container.(*simpleChainContainer) require.True(t, ok) @@ -364,6 +368,7 @@ func TestChainContainer_Lifecycle(t *testing.T) { t.Run("signals stopped channel on exit", func(t *testing.T) { log := createTestLogger(t) + cfg := createTestCLIConfig(t.TempDir()) container := NewChainContainer(chainID, vncfg, log, cfg, initOverload, nil, nil, nil) impl, ok := container.(*simpleChainContainer) require.True(t, ok) @@ -393,6 +398,7 @@ func TestChainContainer_Lifecycle(t *testing.T) { t.Run("context cancellation stops restart loop", func(t *testing.T) { log := createTestLogger(t) + cfg := createTestCLIConfig(t.TempDir()) container := NewChainContainer(chainID, vncfg, log, cfg, initOverload, nil, nil, nil) impl, ok := container.(*simpleChainContainer) require.True(t, ok) @@ -433,6 +439,7 @@ func TestChainContainer_Lifecycle(t *testing.T) { t.Run("Stop flag stops restart loop", func(t *testing.T) { log := createTestLogger(t) + cfg := createTestCLIConfig(t.TempDir()) container := NewChainContainer(chainID, vncfg, log, cfg, initOverload, nil, nil, nil) impl, ok := container.(*simpleChainContainer) require.True(t, ok) @@ -475,11 +482,11 @@ func TestChainContainer_PauseResume(t *testing.T) { chainID := eth.ChainIDFromUInt64(420) vncfg := createTestVNConfig() - cfg := createTestCLIConfig() initOverload := &rollupNode.InitializationOverrides{} t.Run("Pause sets pause flag", func(t *testing.T) { log := createTestLogger(t) + cfg := createTestCLIConfig(t.TempDir()) container := NewChainContainer(chainID, vncfg, log, cfg, initOverload, nil, nil, nil) impl, ok := container.(*simpleChainContainer) require.True(t, ok) @@ -493,6 +500,7 @@ func TestChainContainer_PauseResume(t *testing.T) { t.Run("Resume clears pause flag", func(t *testing.T) { log := createTestLogger(t) + cfg := createTestCLIConfig(t.TempDir()) container := NewChainContainer(chainID, vncfg, log, cfg, initOverload, nil, nil, nil) impl, ok := container.(*simpleChainContainer) require.True(t, ok) @@ -508,6 +516,7 @@ func TestChainContainer_PauseResume(t *testing.T) { t.Run("paused container doesn't start VN, resumed does", func(t *testing.T) { log := createTestLogger(t) + cfg := createTestCLIConfig(t.TempDir()) container := NewChainContainer(chainID, vncfg, log, cfg, initOverload, nil, nil, nil) impl, ok := container.(*simpleChainContainer) require.True(t, ok) @@ -747,11 +756,11 @@ func TestChainContainer_VirtualNodeIntegration(t *testing.T) { chainID := eth.ChainIDFromUInt64(420) vncfg := createTestVNConfig() - cfg := createTestCLIConfig() initOverload := &rollupNode.InitializationOverrides{} t.Run("Start creates and starts virtual node", func(t *testing.T) { log := createTestLogger(t) + cfg := createTestCLIConfig(t.TempDir()) container := NewChainContainer(chainID, vncfg, log, cfg, initOverload, nil, nil, nil) impl, ok := container.(*simpleChainContainer) require.True(t, ok) @@ -782,6 +791,7 @@ func TestChainContainer_VirtualNodeIntegration(t *testing.T) { t.Run("auto-restart virtual node on exit", func(t *testing.T) { log := createTestLogger(t) + cfg := createTestCLIConfig(t.TempDir()) container := NewChainContainer(chainID, vncfg, log, cfg, initOverload, nil, nil, nil) impl, ok := container.(*simpleChainContainer) require.True(t, ok) @@ -818,6 +828,7 @@ func TestChainContainer_VirtualNodeIntegration(t *testing.T) { t.Run("Stop calls virtual node Stop", func(t *testing.T) { log := createTestLogger(t) + cfg := createTestCLIConfig(t.TempDir()) container := NewChainContainer(chainID, vncfg, log, cfg, initOverload, nil, nil, nil) impl, ok := container.(*simpleChainContainer) require.True(t, ok) @@ -863,6 +874,7 @@ func TestChainContainer_VirtualNodeIntegration(t *testing.T) { } log := createTestLogger(t) + cfg := createTestCLIConfig(t.TempDir()) container := NewChainContainer(chainID, vncfg, log, cfg, initOverload, nil, setHandler, nil) impl, ok := container.(*simpleChainContainer) require.True(t, ok) @@ -895,7 +907,7 @@ func TestChainContainer_VerifiedAt(t *testing.T) { chainID := eth.ChainIDFromUInt64(420) vncfg := createTestVNConfig() log := createTestLogger(t) - cfg := createTestCLIConfig() + cfg := createTestCLIConfig(t.TempDir()) initOverload := &rollupNode.InitializationOverrides{} t.Run("returns error when verification activity reports not verified", func(t *testing.T) { From 8c57ec83669ab84a7553d29ba11d90039bf0cc8d Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Tue, 10 Feb 2026 12:28:02 -0600 Subject: [PATCH 17/23] Add functions to DSL --- .../reorg/invalid_message_reorg_test.go | 119 ++---------------- op-devstack/dsl/eoa.go | 94 ++++++++++++++ op-devstack/dsl/l2_el.go | 17 +++ 3 files changed, 119 insertions(+), 111 deletions(-) diff --git a/op-acceptance-tests/tests/supernode/interop/reorg/invalid_message_reorg_test.go b/op-acceptance-tests/tests/supernode/interop/reorg/invalid_message_reorg_test.go index b2d787efa72a9..c9eab1601902d 100644 --- a/op-acceptance-tests/tests/supernode/interop/reorg/invalid_message_reorg_test.go +++ b/op-acceptance-tests/tests/supernode/interop/reorg/invalid_message_reorg_test.go @@ -1,23 +1,16 @@ package reorg import ( - "context" "math/rand" "testing" "time" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/require" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/testutils" - "github.com/ethereum-optimism/optimism/op-service/txintent" ) // TestSupernodeInteropInvalidMessageReplacement tests that: @@ -34,7 +27,6 @@ func TestSupernodeInteropInvalidMessageReplacement(gt *testing.T) { sys := presets.NewTwoL2SupernodeInterop(t, 0) ctx := t.Ctx() - snClient := sys.SuperNodeClient() // Create funded EOAs on both chains alice := sys.FunderA.NewFundedEOA(eth.OneEther) @@ -50,8 +42,7 @@ func TestSupernodeInteropInvalidMessageReplacement(gt *testing.T) { rng := rand.New(rand.NewSource(12345)) // Send an initiating message on chain A - initTrigger := randomInitTrigger(rng, eventLoggerA, 2, 10) - initTx, initReceipt := alice.SendInitMessage(initTrigger) + initTx, initReceipt := alice.SendRandomInitMessage(rng, eventLoggerA, 2, 10) t.Logger().Info("initiating message sent on chain A", "block", initReceipt.BlockNumber, @@ -70,36 +61,26 @@ func TestSupernodeInteropInvalidMessageReplacement(gt *testing.T) { targetTimestamp := genesisTime + blockTime*2 // set supernode to pause verification just after this timestamp sys.Supernode.PauseInterop(targetTimestamp + 1) - t.Require().Eventually(func() bool { - resp, err := snClient.SuperRootAtTimestamp(ctx, targetTimestamp) - if err != nil { - return false - } - t.Logger().Info("super root at timestamp", "timestamp", targetTimestamp, "data", resp.Data) - return resp.Data != nil - }, 60*time.Second, time.Second, "initial timestamps should be verified") + sys.Supernode.AwaitValidatedTimestamp(targetTimestamp) t.Logger().Info("initial verification confirmed", "timestamp", targetTimestamp) // Send an INVALID executing message on chain B - // Modify the message identifier to make it invalid (wrong log index) - invalidExecReceipt := sendInvalidExecMessageForReplacement(t, bob, initTx, 0) + _, invalidExecReceipt := bob.SendInvalidExecMessage(initTx, 0) invalidBlockNumber := bigs.Uint64Strict(invalidExecReceipt.BlockNumber) invalidBlockHash := invalidExecReceipt.BlockHash invalidBlock := sys.L2ELB.BlockRefByHash(invalidExecReceipt.BlockHash) invalidBlockTimestamp := invalidBlock.Time - t.Logger().Info("invalid executing message sent on chain B", "block", invalidBlockNumber, "hash", invalidBlockHash, "timestamp", invalidBlockTimestamp, ) - // Observe the invalid block is locally safe on Chain B + // Wait for safety to include the invalid block require.Eventually(t, func() bool { - numSame := sys.L2BCL.SyncStatus().LocalSafeL2.Number == invalidBlockNumber - hashSame := sys.L2BCL.SyncStatus().LocalSafeL2.Hash == invalidBlockHash - return numSame && hashSame + numSafe := sys.L2BCL.SyncStatus().LocalSafeL2.Number >= invalidBlockNumber + return numSafe }, 60*time.Second, time.Second, "invalid block should become locally safe") // Resume interop and observe reorg @@ -129,98 +110,14 @@ func TestSupernodeInteropInvalidMessageReplacement(gt *testing.T) { }, 60*time.Second, time.Second, "reset should be detected") // Wait for interop to proceed and verify the replacement block at the timestamp - require.Eventually(t, func() bool { - resp, err := snClient.SuperRootAtTimestamp(ctx, invalidBlockTimestamp) - if err != nil { - return false - } - return resp.Data != nil - }, 60*time.Second, time.Second, "replacement should be verified") + sys.Supernode.AwaitValidatedTimestamp(invalidBlockTimestamp) // ASSERTION: The invalid transaction no longer exists in the chain // The invalid exec message transaction should NOT be in the replacement block - replacementBlockInfo, replacementTxs, err := sys.L2ELB.Escape().EthClient().InfoAndTxsByNumber(ctx, invalidBlockNumber) - t.Require().NoError(err, "failed to fetch replacement block") - invalidTxHash := invalidExecReceipt.TxHash - for _, tx := range replacementTxs { - if tx.Hash() == invalidTxHash { - t.Logger().Error("invalid transaction should NOT exist in replacement block", - "invalid_tx_hash", invalidTxHash, - "replacement_tx_hash", tx.Hash(), - ) - t.FailNow() - } - } + sys.L2ELB.AssertTxNotInBlock(invalidBlockNumber, invalidExecReceipt.TxHash) t.Logger().Info("test complete: invalid block was replaced and verified", "invalid_block_number", invalidBlockNumber, "invalid_block_hash", invalidBlockHash, - "replacement_block_hash", replacementBlockInfo.Hash, ) } - -// sendInvalidExecMessageForReplacement sends an executing message with a modified (invalid) identifier. -// This makes the message invalid because it references a non-existent log index. -func sendInvalidExecMessageForReplacement( - t devtest.T, - bob *dsl.EOA, - initIntent *txintent.IntentTx[*txintent.InitTrigger, *txintent.InteropOutput], - eventIdx int, -) *types.Receipt { - ctx := t.Ctx() - - // Evaluate the init result to get the message entries - result, err := initIntent.Result.Eval(ctx) - t.Require().NoError(err, "failed to evaluate init result") - t.Require().Greater(len(result.Entries), eventIdx, "event index out of range") - - // Get the message and modify it to be invalid - msg := result.Entries[eventIdx] - - // Make the message invalid by setting an impossible log index - // This creates a message that claims to reference a log that doesn't exist - msg.Identifier.LogIndex = 9999 - - // Create the exec trigger with the invalid message - execTrigger := &txintent.ExecTrigger{ - Executor: constants.CrossL2Inbox, - Msg: msg, - } - - // Create the intent with the invalid trigger - tx := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](bob.Plan()) - tx.Content.DependOn(&initIntent.Result) - tx.Content.Fn(func(ctx context.Context) (*txintent.ExecTrigger, error) { - return execTrigger, nil - }) - - receipt, err := tx.PlannedTx.Included.Eval(ctx) - t.Require().NoError(err, "invalid exec msg receipt not found") - t.Logger().Info("invalid exec message included", "chain", bob.ChainID(), "block", receipt.BlockNumber) - - return receipt -} - -// randomInitTrigger creates a random init trigger for testing. -func randomInitTrigger(rng *rand.Rand, eventLoggerAddress common.Address, topicCount, dataLen int) *txintent.InitTrigger { - if topicCount > 4 { - topicCount = 4 // Max 4 topics in EVM logs - } - if topicCount < 1 { - topicCount = 1 - } - if dataLen < 1 { - dataLen = 1 - } - - topics := make([][32]byte, topicCount) - for i := range topics { - copy(topics[i][:], testutils.RandomData(rng, 32)) - } - - return &txintent.InitTrigger{ - Emitter: eventLoggerAddress, - Topics: topics, - OpaqueData: testutils.RandomData(rng, dataLen), - } -} diff --git a/op-devstack/dsl/eoa.go b/op-devstack/dsl/eoa.go index 49ce2063c48d4..b5a4c8dbe6e28 100644 --- a/op-devstack/dsl/eoa.go +++ b/op-devstack/dsl/eoa.go @@ -1,6 +1,7 @@ package dsl import ( + "context" "fmt" "math/big" "math/rand" @@ -12,6 +13,7 @@ import ( e2eBindings "github.com/ethereum-optimism/optimism/op-e2e/bindings" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/retry" + "github.com/ethereum-optimism/optimism/op-service/testutils" "github.com/ethereum-optimism/optimism/op-service/txintent" txIntentBindings "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" "github.com/ethereum-optimism/optimism/op-service/txintent/contractio" @@ -186,6 +188,35 @@ func (u *EOA) SendInitMessage(trigger *txintent.InitTrigger) (*txintent.IntentTx return tx, receipt } +// SendRandomInitMessage creates and sends a random initiating message using the given event logger. +// topicCount specifies the number of topics (clamped to 1-4), dataLen specifies the opaque data length (minimum 1). +func (u *EOA) SendRandomInitMessage(rng *rand.Rand, eventLoggerAddress common.Address, topicCount, dataLen int) (*txintent.IntentTx[*txintent.InitTrigger, *txintent.InteropOutput], *types.Receipt) { + // Clamp topicCount to valid range [1, 4] + if topicCount > 4 { + topicCount = 4 + } + if topicCount < 1 { + topicCount = 1 + } + // Ensure at least 1 byte of data + if dataLen < 1 { + dataLen = 1 + } + + // Generate random topics + topics := make([][32]byte, topicCount) + for i := range topics { + copy(topics[i][:], testutils.RandomData(rng, 32)) + } + + trigger := &txintent.InitTrigger{ + Emitter: eventLoggerAddress, + Topics: topics, + OpaqueData: testutils.RandomData(rng, dataLen), + } + return u.SendInitMessage(trigger) +} + func (u *EOA) SendExecMessage(initIntent *txintent.IntentTx[*txintent.InitTrigger, *txintent.InteropOutput], eventIdx int) (*txintent.IntentTx[*txintent.ExecTrigger, *txintent.InteropOutput], *types.Receipt) { tx := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](u.Plan()) tx.Content.DependOn(&initIntent.Result) @@ -198,6 +229,69 @@ func (u *EOA) SendExecMessage(initIntent *txintent.IntentTx[*txintent.InitTrigge return tx, receipt } +// InvalidExecOption configures how the executing message should be invalidated +type InvalidExecOption func(*invalidExecOpts) + +type invalidExecOpts struct { + logIndex *uint32 +} + +// WithInvalidLogIndex sets an invalid log index to make the message invalid +func WithInvalidLogIndex(index uint32) InvalidExecOption { + return func(o *invalidExecOpts) { + o.logIndex = &index + } +} + +// SendInvalidExecMessage sends an executing message with a modified (invalid) identifier. +// By default, sets LogIndex to 9999 to reference a non-existent log. +// The modification can be controlled via options (e.g., WithInvalidLogIndex). +func (u *EOA) SendInvalidExecMessage( + initIntent *txintent.IntentTx[*txintent.InitTrigger, *txintent.InteropOutput], + eventIdx int, + opts ...InvalidExecOption, +) (*txintent.IntentTx[*txintent.ExecTrigger, *txintent.InteropOutput], *types.Receipt) { + options := &invalidExecOpts{ + logIndex: ptrTo(uint32(9999)), // Default invalid log index + } + for _, opt := range opts { + opt(options) + } + + result, err := initIntent.Result.Eval(u.ctx) + u.t.Require().NoError(err, "failed to evaluate init result") + u.t.Require().Greater(len(result.Entries), eventIdx, "event index out of range") + + // Get the message and modify it to be invalid + msg := result.Entries[eventIdx] + if options.logIndex != nil { + msg.Identifier.LogIndex = *options.logIndex + } + + // Create the exec trigger with the invalid message + execTrigger := &txintent.ExecTrigger{ + Executor: constants.CrossL2Inbox, + Msg: msg, + } + + // The Fn just returns the pre-built trigger + tx := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](u.Plan()) + tx.Content.DependOn(&initIntent.Result) + tx.Content.Fn(func(ctx context.Context) (*txintent.ExecTrigger, error) { + return execTrigger, nil + }) + + receipt, err := tx.PlannedTx.Included.Eval(u.ctx) + u.t.Require().NoError(err, "invalid exec msg receipt not found") + u.log.Info("invalid exec message included", "chain", u.ChainID(), "block", receipt.BlockNumber) + return tx, receipt +} + +// ptrTo returns a pointer to the given value +func ptrTo[T any](v T) *T { + return &v +} + // SendPackedRandomInitMessages batches random messages and initiates them via a single multicall func (u *EOA) SendPackedRandomInitMessages(rng *rand.Rand, eventLoggerAddress common.Address) (*txintent.IntentTx[*txintent.MultiTrigger, *txintent.InteropOutput], *types.Receipt, error) { // Intent to initiate messages diff --git a/op-devstack/dsl/l2_el.go b/op-devstack/dsl/l2_el.go index d1d802866407d..cf9d866c216c5 100644 --- a/op-devstack/dsl/l2_el.go +++ b/op-devstack/dsl/l2_el.go @@ -397,6 +397,23 @@ func (el *L2ELNode) FinalizedHead() *BlockRefResult { return &BlockRefResult{T: el.t, BlockRef: el.BlockRefByLabel(eth.Finalized)} } +// AssertTxNotInBlock asserts that a transaction with the given hash does not exist in the block at the given number. +func (el *L2ELNode) AssertTxNotInBlock(blockNumber uint64, txHash common.Hash) { + ctx, cancel := context.WithTimeout(el.ctx, DefaultTimeout) + defer cancel() + + _, txs, err := el.inner.EthClient().InfoAndTxsByNumber(ctx, blockNumber) + el.require.NoError(err, "failed to fetch block %d", blockNumber) + + for _, tx := range txs { + if tx.Hash() == txHash { + el.require.Failf("transaction should not exist in block", + "tx_hash=%s found in block %d", txHash, blockNumber) + } + } + el.log.Info("confirmed transaction not in block", "blockNumber", blockNumber, "txHash", txHash) +} + type BlockRefResult struct { T devtest.T BlockRef eth.L2BlockRef From 21e224c3e21d7fcdbb73fc73b9f30d38928e635a Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Wed, 11 Feb 2026 10:50:56 -0600 Subject: [PATCH 18/23] PR comments: Rename Functions ; Remove non-holocene replacement logic --- op-devstack/dsl/supernode.go | 4 ++-- op-devstack/stack/supernode.go | 8 ++++---- op-devstack/sysgo/l2_cl_supernode.go | 12 ++++++------ op-node/rollup/engine/payload_process.go | 11 +---------- op-supernode/supernode/activity/activity.go | 4 ++-- .../supernode/activity/heartbeat/heartbeat.go | 4 ++-- .../supernode/activity/interop/interop.go | 6 +++--- .../supernode/activity/interop/interop_test.go | 16 ++++++++-------- .../supernode/activity/superroot/superroot.go | 4 ++-- .../chain_container/chain_container_test.go | 2 +- op-supernode/supernode/supernode.go | 10 +++++----- .../supernode/supernode_activities_test.go | 12 ++++++------ 12 files changed, 42 insertions(+), 51 deletions(-) diff --git a/op-devstack/dsl/supernode.go b/op-devstack/dsl/supernode.go index 1752c0b2b6921..3f2ae64489190 100644 --- a/op-devstack/dsl/supernode.go +++ b/op-devstack/dsl/supernode.go @@ -91,7 +91,7 @@ func (s *Supernode) AwaitValidatedTimestamp(timestamp uint64) { // Requires the Supernode to be created with NewSupernodeWithTestControl. func (s *Supernode) PauseInterop(ts uint64) { s.require.NotNil(s.testControl, "PauseInterop requires test control; use NewSupernodeWithTestControl") - s.testControl.PauseInterop(ts) + s.testControl.PauseInteropActivity(ts) } // ResumeInterop clears any pause on the interop activity, allowing normal processing. @@ -99,5 +99,5 @@ func (s *Supernode) PauseInterop(ts uint64) { // Requires the Supernode to be created with NewSupernodeWithTestControl. func (s *Supernode) ResumeInterop() { s.require.NotNil(s.testControl, "ResumeInterop requires test control; use NewSupernodeWithTestControl") - s.testControl.ResumeInterop() + s.testControl.ResumeInteropActivity() } diff --git a/op-devstack/stack/supernode.go b/op-devstack/stack/supernode.go index ef871bfd0efb9..c7b1fa6080f2c 100644 --- a/op-devstack/stack/supernode.go +++ b/op-devstack/stack/supernode.go @@ -66,12 +66,12 @@ type Supernode interface { // InteropTestControl provides integration test control methods for the interop activity. // This interface is for integration test control only. type InteropTestControl interface { - // PauseInterop pauses the interop activity at the given timestamp. + // PauseInteropActivity pauses the interop activity at the given timestamp. // When the interop activity attempts to process this timestamp, it returns early. // This function is for integration test control only. - PauseInterop(ts uint64) + PauseInteropActivity(ts uint64) - // ResumeInterop clears any pause on the interop activity, allowing normal processing. + // ResumeInteropActivity clears any pause on the interop activity, allowing normal processing. // This function is for integration test control only. - ResumeInterop() + ResumeInteropActivity() } diff --git a/op-devstack/sysgo/l2_cl_supernode.go b/op-devstack/sysgo/l2_cl_supernode.go index 8fbf8bf93bbfd..2da664fa20daa 100644 --- a/op-devstack/sysgo/l2_cl_supernode.go +++ b/op-devstack/sysgo/l2_cl_supernode.go @@ -143,23 +143,23 @@ func (n *SuperNode) Stop() { n.sn = nil } -// PauseInterop pauses the interop activity at the given timestamp. +// PauseInteropActivity pauses the interop activity at the given timestamp. // This function is for integration test control only. -func (n *SuperNode) PauseInterop(ts uint64) { +func (n *SuperNode) PauseInteropActivity(ts uint64) { n.mu.Lock() defer n.mu.Unlock() if n.sn != nil { - n.sn.PauseInterop(ts) + n.sn.PauseInteropActivity(ts) } } -// ResumeInterop clears any pause on the interop activity. +// ResumeInteropActivity clears any pause on the interop activity. // This function is for integration test control only. -func (n *SuperNode) ResumeInterop() { +func (n *SuperNode) ResumeInteropActivity() { n.mu.Lock() defer n.mu.Unlock() if n.sn != nil { - n.sn.ResumeInterop() + n.sn.ResumeInteropActivity() } } diff --git a/op-node/rollup/engine/payload_process.go b/op-node/rollup/engine/payload_process.go index 469472a7677e0..d741741182cb6 100644 --- a/op-node/rollup/engine/payload_process.go +++ b/op-node/rollup/engine/payload_process.go @@ -43,16 +43,7 @@ func (e *EngineController) onPayloadProcess(ctx context.Context, ev PayloadProce "blockNumber", payload.BlockNumber, "blockHash", payload.BlockHash, ) - // If derived and Holocene is active, request a deposits-only replacement - if ev.DerivedFrom != (eth.L1BlockRef{}) && e.rollupCfg.IsHolocene(ev.DerivedFrom.Time) { - e.emitDepositsOnlyPayloadAttributesRequest(ctx, ev.Ref.ParentID(), ev.DerivedFrom) - return - } - // Otherwise emit invalid event - e.emitter.Emit(ctx, PayloadInvalidEvent{ - Envelope: ev.Envelope, - Err: fmt.Errorf("payload %s denied by SuperAuthority", payload.BlockHash), - }) + e.emitDepositsOnlyPayloadAttributesRequest(ctx, ev.Ref.ParentID(), ev.DerivedFrom) return } } diff --git a/op-supernode/supernode/activity/activity.go b/op-supernode/supernode/activity/activity.go index 061aaca52475d..cc4bb1d5e6e81 100644 --- a/op-supernode/supernode/activity/activity.go +++ b/op-supernode/supernode/activity/activity.go @@ -8,10 +8,10 @@ import ( // Activity is an open interface to collect pluggable behaviors which satisfy sub-activitiy interfaces. type Activity interface { - // ResetOn is called when a chain container resets to a given timestamp. + // Reset is called when a chain container resets to a given timestamp. // Activities should clean up any cached state for that chain at or after the timestamp. // This is a no-op for activities that don't maintain chain-specific state. - ResetOn(chainID eth.ChainID, timestamp uint64) + Reset(chainID eth.ChainID, timestamp uint64) } // RunnableActivity is an Activity that can be started and stopped independently. diff --git a/op-supernode/supernode/activity/heartbeat/heartbeat.go b/op-supernode/supernode/activity/heartbeat/heartbeat.go index ba9869d1d8ca0..857137d39d19a 100644 --- a/op-supernode/supernode/activity/heartbeat/heartbeat.go +++ b/op-supernode/supernode/activity/heartbeat/heartbeat.go @@ -56,8 +56,8 @@ func (h *Heartbeat) Stop(ctx context.Context) error { return nil } -// ResetOn is a no-op for heartbeat - it has no chain-specific state. -func (h *Heartbeat) ResetOn(chainID eth.ChainID, timestamp uint64) { +// Reset is a no-op for heartbeat - it has no chain-specific state. +func (h *Heartbeat) Reset(chainID eth.ChainID, timestamp uint64) { // No-op: heartbeat has no chain-specific cached state } diff --git a/op-supernode/supernode/activity/interop/interop.go b/op-supernode/supernode/activity/interop/interop.go index 71bb8a2fd3b10..29af3a58a67c2 100644 --- a/op-supernode/supernode/activity/interop/interop.go +++ b/op-supernode/supernode/activity/interop/interop.go @@ -394,13 +394,13 @@ func (i *Interop) VerifiedAtTimestamp(ts uint64) (bool, error) { return i.verifiedDB.Has(ts) } -// ResetOn is called when a chain container resets to a given timestamp. +// Reset is called when a chain container resets to a given timestamp. // It clears the logsDB for that chain and removes any verified results at or after the timestamp. -func (i *Interop) ResetOn(chainID eth.ChainID, timestamp uint64) { +func (i *Interop) Reset(chainID eth.ChainID, timestamp uint64) { i.mu.Lock() defer i.mu.Unlock() - i.log.Warn("ResetOn called", + i.log.Warn("Reset called", "chainID", chainID, "timestamp", timestamp, ) diff --git a/op-supernode/supernode/activity/interop/interop_test.go b/op-supernode/supernode/activity/interop/interop_test.go index b7598f6db4f50..a0180d5ec81ca 100644 --- a/op-supernode/supernode/activity/interop/interop_test.go +++ b/op-supernode/supernode/activity/interop/interop_test.go @@ -1085,10 +1085,10 @@ func (m *mockLogsDBForInterop) Close() error { return nil } var _ LogsDB = (*mockLogsDBForInterop)(nil) // ============================================================================= -// TestResetOn +// TestReset // ============================================================================= -func TestResetOn(t *testing.T) { +func TestReset(t *testing.T) { t.Parallel() t.Run("rewinds logsDB when previous block available", func(t *testing.T) { @@ -1112,7 +1112,7 @@ func TestResetOn(t *testing.T) { interop.logsDBs[mock.id] = mockLogsDB // Reset at timestamp 100 (blockTime=1, so prev=99) - interop.ResetOn(mock.id, 100) + interop.Reset(mock.id, 100) // Verify logsDB.Rewind was called require.Len(t, mockLogsDB.rewindCalls, 1) @@ -1138,7 +1138,7 @@ func TestResetOn(t *testing.T) { interop.logsDBs[mock.id] = mockLogsDB // Reset at timestamp 100 - interop.ResetOn(mock.id, 100) + interop.Reset(mock.id, 100) // Verify logsDB.Clear was called require.Len(t, mockLogsDB.rewindCalls, 0) @@ -1166,7 +1166,7 @@ func TestResetOn(t *testing.T) { // Reset at timestamp 1 (blockTime=1, so targetTs=0) // Since firstSealedBlock.Number (5) > targetBlock.Number (0), Clear is called - interop.ResetOn(mock.id, 1) + interop.Reset(mock.id, 1) // Verify logsDB.Clear was called require.Len(t, mockLogsDB.rewindCalls, 0) @@ -1200,7 +1200,7 @@ func TestResetOn(t *testing.T) { } // Reset at timestamp 100 (should remove 100, 101, 102) - interop.ResetOn(mock.id, 100) + interop.Reset(mock.id, 100) // Verify results at 98, 99 still exist has, _ := interop.verifiedDB.Has(98) @@ -1237,7 +1237,7 @@ func TestResetOn(t *testing.T) { interop.currentL1 = eth.BlockID{Number: 500, Hash: common.HexToHash("0xL1")} // Reset - interop.ResetOn(mock.id, 100) + interop.Reset(mock.id, 100) // Verify currentL1 is reset to zero require.Equal(t, eth.BlockID{}, interop.currentL1) @@ -1256,7 +1256,7 @@ func TestResetOn(t *testing.T) { // Reset on unknown chain (should not panic) unknownChain := eth.ChainIDFromUInt64(999) - interop.ResetOn(unknownChain, 100) + interop.Reset(unknownChain, 100) // Just verify it didn't panic }) diff --git a/op-supernode/supernode/activity/superroot/superroot.go b/op-supernode/supernode/activity/superroot/superroot.go index a30b155aef5f2..618cb1e2a5609 100644 --- a/op-supernode/supernode/activity/superroot/superroot.go +++ b/op-supernode/supernode/activity/superroot/superroot.go @@ -30,9 +30,9 @@ func New(log gethlog.Logger, chains map[eth.ChainID]cc.ChainContainer) *Superroo func (s *Superroot) ActivityName() string { return "superroot" } -// ResetOn is a no-op for superroot - it always queries chain containers directly +// Reset is a no-op for superroot - it always queries chain containers directly // and doesn't maintain any chain-specific cached state. -func (s *Superroot) ResetOn(chainID eth.ChainID, timestamp uint64) { +func (s *Superroot) Reset(chainID eth.ChainID, timestamp uint64) { // No-op: superroot queries chain containers directly } diff --git a/op-supernode/supernode/chain_container/chain_container_test.go b/op-supernode/supernode/chain_container/chain_container_test.go index 964d367d38757..343ceff34ee97 100644 --- a/op-supernode/supernode/chain_container/chain_container_test.go +++ b/op-supernode/supernode/chain_container/chain_container_test.go @@ -167,7 +167,7 @@ func (m *mockVerificationActivity) VerifiedAtTimestamp(ts uint64) (bool, error) return m.verifiedAtTimestampResult, m.verifiedAtTimestampErr } -func (m *mockVerificationActivity) ResetOn(chainID eth.ChainID, timestamp uint64) {} +func (m *mockVerificationActivity) Reset(chainID eth.ChainID, timestamp uint64) {} // Test helpers func createTestVNConfig() *opnodecfg.Config { diff --git a/op-supernode/supernode/supernode.go b/op-supernode/supernode/supernode.go index f166e1140f1df..3048c1a207ba3 100644 --- a/op-supernode/supernode/supernode.go +++ b/op-supernode/supernode/supernode.go @@ -236,14 +236,14 @@ func (s *Supernode) onChainReset(chainID eth.ChainID, timestamp uint64) { "timestamp", timestamp, ) for _, a := range s.activities { - a.ResetOn(chainID, timestamp) + a.Reset(chainID, timestamp) } } -// PauseInterop pauses the interop activity at the given timestamp. +// PauseInteropActivity pauses the interop activity at the given timestamp. // When the interop activity attempts to process this timestamp, it returns early. // This function is for integration test control only. -func (s *Supernode) PauseInterop(ts uint64) { +func (s *Supernode) PauseInteropActivity(ts uint64) { for _, a := range s.activities { if ia, ok := a.(*interop.Interop); ok { ia.PauseAt(ts) @@ -253,9 +253,9 @@ func (s *Supernode) PauseInterop(ts uint64) { s.log.Warn("PauseInterop called but no interop activity found") } -// ResumeInterop clears any pause on the interop activity, allowing normal processing. +// ResumeInteropActivity clears any pause on the interop activity, allowing normal processing. // This function is for integration test control only. -func (s *Supernode) ResumeInterop() { +func (s *Supernode) ResumeInteropActivity() { for _, a := range s.activities { if ia, ok := a.(*interop.Interop); ok { ia.Resume() diff --git a/op-supernode/supernode/supernode_activities_test.go b/op-supernode/supernode/supernode_activities_test.go index 017697bae7365..552b44afbc488 100644 --- a/op-supernode/supernode/supernode_activities_test.go +++ b/op-supernode/supernode/supernode_activities_test.go @@ -27,8 +27,8 @@ func (m *mockRunnable) Start(ctx context.Context) error { <-ctx.Done() return ctx.Err() } -func (m *mockRunnable) Stop(ctx context.Context) error { m.stopped++; return nil } -func (m *mockRunnable) ResetOn(chainID eth.ChainID, timestamp uint64) {} +func (m *mockRunnable) Stop(ctx context.Context) error { m.stopped++; return nil } +func (m *mockRunnable) Reset(chainID eth.ChainID, timestamp uint64) {} // ensure it satisfies both Activity and RunnableActivity var _ activity.Activity = (*mockRunnable)(nil) @@ -37,7 +37,7 @@ var _ activity.RunnableActivity = (*mockRunnable)(nil) // plain marker-only activity type plainActivity struct{} -func (p *plainActivity) ResetOn(chainID eth.ChainID, timestamp uint64) {} +func (p *plainActivity) Reset(chainID eth.ChainID, timestamp uint64) {} var _ activity.Activity = (*plainActivity)(nil) @@ -51,9 +51,9 @@ func (s *rpcSvc) Echo(_ context.Context) (string, error) { return "ok", nil } type rpcAct struct{} -func (a *rpcAct) RPCNamespace() string { return "act" } -func (a *rpcAct) RPCService() interface{} { return &rpcSvc{} } -func (a *rpcAct) ResetOn(chainID eth.ChainID, timestamp uint64) {} +func (a *rpcAct) RPCNamespace() string { return "act" } +func (a *rpcAct) RPCService() interface{} { return &rpcSvc{} } +func (a *rpcAct) Reset(chainID eth.ChainID, timestamp uint64) {} var _ activity.Activity = (*rpcAct)(nil) var _ activity.RPCActivity = (*rpcAct)(nil) From 7516c450aefe9f5e743e2ba4a71a98336e259cbf Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Wed, 11 Feb 2026 13:56:53 -0600 Subject: [PATCH 19/23] refactor: cleanup DSL, tests, and interop activity DSL changes: - Add TimestampForBlockNum helper to L2Network - Simplify SendInvalidExecMessage to just increment log index Test changes: - invalid_message_reorg_test: use TimestampForBlockNum, check eth.NotFound - Move SuperAuthority tests to super_authority_deny_test.go - Fix denied payload test to expect DepositsOnlyPayloadAttributesRequestEvent - Make denyBlock private in test mock Interop activity: - Extract resetLogsDB and resetVerifiedDB helper functions --- .../reorg/invalid_message_reorg_test.go | 30 ++-- op-devstack/dsl/eoa.go | 38 +---- op-devstack/dsl/l2_network.go | 5 + .../rollup/engine/engine_controller_test.go | 148 +---------------- .../engine/super_authority_deny_test.go | 157 ++++++++++++++++++ .../supernode/activity/interop/interop.go | 86 +++++----- 6 files changed, 229 insertions(+), 235 deletions(-) create mode 100644 op-node/rollup/engine/super_authority_deny_test.go diff --git a/op-acceptance-tests/tests/supernode/interop/reorg/invalid_message_reorg_test.go b/op-acceptance-tests/tests/supernode/interop/reorg/invalid_message_reorg_test.go index c9eab1601902d..f3f290d62a201 100644 --- a/op-acceptance-tests/tests/supernode/interop/reorg/invalid_message_reorg_test.go +++ b/op-acceptance-tests/tests/supernode/interop/reorg/invalid_message_reorg_test.go @@ -1,10 +1,12 @@ package reorg import ( + "errors" "math/rand" "testing" "time" + "github.com/ethereum/go-ethereum" "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/op-devstack/devtest" @@ -52,13 +54,8 @@ func TestSupernodeInteropInvalidMessageReplacement(gt *testing.T) { // Wait for chain B to catch up sys.L2B.WaitForBlock() - // Record the verified timestamp before the invalid message - // We need to know what timestamp was verified before the invalid exec message - blockTime := sys.L2A.Escape().RollupConfig().BlockTime - genesisTime := sys.L2A.Escape().RollupConfig().Genesis.L2Time - // Wait for some timestamps to be verified first - targetTimestamp := genesisTime + blockTime*2 + targetTimestamp := sys.L2A.TimestampForBlockNum(2) // set supernode to pause verification just after this timestamp sys.Supernode.PauseInterop(targetTimestamp + 1) sys.Supernode.AwaitValidatedTimestamp(targetTimestamp) @@ -69,15 +66,14 @@ func TestSupernodeInteropInvalidMessageReplacement(gt *testing.T) { _, invalidExecReceipt := bob.SendInvalidExecMessage(initTx, 0) invalidBlockNumber := bigs.Uint64Strict(invalidExecReceipt.BlockNumber) invalidBlockHash := invalidExecReceipt.BlockHash - invalidBlock := sys.L2ELB.BlockRefByHash(invalidExecReceipt.BlockHash) - invalidBlockTimestamp := invalidBlock.Time + invalidBlockTimestamp := sys.L2B.TimestampForBlockNum(invalidBlockNumber) t.Logger().Info("invalid executing message sent on chain B", "block", invalidBlockNumber, "hash", invalidBlockHash, "timestamp", invalidBlockTimestamp, ) - // Wait for safety to include the invalid block + // Wait for local safety to include the invalid block require.Eventually(t, func() bool { numSafe := sys.L2BCL.SyncStatus().LocalSafeL2.Number >= invalidBlockNumber return numSafe @@ -92,13 +88,17 @@ func TestSupernodeInteropInvalidMessageReplacement(gt *testing.T) { // Use the EthClient directly to handle errors (block may not exist after rewind) currentBlock, err := sys.L2ELB.Escape().EthClient().BlockRefByNumber(ctx, invalidBlockNumber) if err != nil { - // Block not found - this means the rewind happened and block was removed - t.Logger().Info("RESET DETECTED! Block no longer exists (rewound)", - "block_number", invalidBlockNumber, - "err", err, - ) + if errors.Is(eth.MaybeAsNotFoundErr(err), ethereum.NotFound) { + t.Logger().Info("RESET DETECTED! Block no longer exists (rewound)", + "block_number", invalidBlockNumber, + ) + } else { + t.Logger().Warn("unexpected error checking block", + "block_number", invalidBlockNumber, + "err", err, + ) + } } else if currentBlock.Hash != invalidBlockHash { - // Block exists but with different hash - replaced t.Logger().Info("RESET DETECTED! Block hash changed", "block_number", invalidBlockNumber, "old_hash", invalidBlockHash, diff --git a/op-devstack/dsl/eoa.go b/op-devstack/dsl/eoa.go index b5a4c8dbe6e28..1cfbd859facc4 100644 --- a/op-devstack/dsl/eoa.go +++ b/op-devstack/dsl/eoa.go @@ -229,44 +229,19 @@ func (u *EOA) SendExecMessage(initIntent *txintent.IntentTx[*txintent.InitTrigge return tx, receipt } -// InvalidExecOption configures how the executing message should be invalidated -type InvalidExecOption func(*invalidExecOpts) - -type invalidExecOpts struct { - logIndex *uint32 -} - -// WithInvalidLogIndex sets an invalid log index to make the message invalid -func WithInvalidLogIndex(index uint32) InvalidExecOption { - return func(o *invalidExecOpts) { - o.logIndex = &index - } -} - -// SendInvalidExecMessage sends an executing message with a modified (invalid) identifier. -// By default, sets LogIndex to 9999 to reference a non-existent log. -// The modification can be controlled via options (e.g., WithInvalidLogIndex). +// SendInvalidExecMessage sends an executing message with an invalid identifier. +// The log index is incremented to reference a non-existent log. func (u *EOA) SendInvalidExecMessage( initIntent *txintent.IntentTx[*txintent.InitTrigger, *txintent.InteropOutput], eventIdx int, - opts ...InvalidExecOption, ) (*txintent.IntentTx[*txintent.ExecTrigger, *txintent.InteropOutput], *types.Receipt) { - options := &invalidExecOpts{ - logIndex: ptrTo(uint32(9999)), // Default invalid log index - } - for _, opt := range opts { - opt(options) - } - result, err := initIntent.Result.Eval(u.ctx) u.t.Require().NoError(err, "failed to evaluate init result") u.t.Require().Greater(len(result.Entries), eventIdx, "event index out of range") - // Get the message and modify it to be invalid + // Get the message and modify it to be invalid by incrementing the log index msg := result.Entries[eventIdx] - if options.logIndex != nil { - msg.Identifier.LogIndex = *options.logIndex - } + msg.Identifier.LogIndex++ // Create the exec trigger with the invalid message execTrigger := &txintent.ExecTrigger{ @@ -287,11 +262,6 @@ func (u *EOA) SendInvalidExecMessage( return tx, receipt } -// ptrTo returns a pointer to the given value -func ptrTo[T any](v T) *T { - return &v -} - // SendPackedRandomInitMessages batches random messages and initiates them via a single multicall func (u *EOA) SendPackedRandomInitMessages(rng *rand.Rand, eventLoggerAddress common.Address) (*txintent.IntentTx[*txintent.MultiTrigger, *txintent.InteropOutput], *types.Receipt, error) { // Intent to initiate messages diff --git a/op-devstack/dsl/l2_network.go b/op-devstack/dsl/l2_network.go index d2ae3f694a0d1..9a80d2bfe2ba9 100644 --- a/op-devstack/dsl/l2_network.go +++ b/op-devstack/dsl/l2_network.go @@ -45,6 +45,11 @@ func (n *L2Network) ChainID() eth.ChainID { return n.inner.ChainID() } +// TimestampForBlockNum returns the timestamp for the given L2 block number. +func (n *L2Network) TimestampForBlockNum(blockNum uint64) uint64 { + return n.inner.RollupConfig().TimestampForBlock(blockNum) +} + // Escape returns the underlying stack.L2Network func (n *L2Network) Escape() stack.L2Network { return n.inner diff --git a/op-node/rollup/engine/engine_controller_test.go b/op-node/rollup/engine/engine_controller_test.go index b6a0ce73670ff..dd865974af3c1 100644 --- a/op-node/rollup/engine/engine_controller_test.go +++ b/op-node/rollup/engine/engine_controller_test.go @@ -2,7 +2,6 @@ package engine import ( "context" - "fmt" "math/big" mrand "math/rand" "testing" @@ -213,149 +212,4 @@ func TestInvalidPayloadForNonHead_NoDrop(t *testing.T) { } // note: nil-envelope behavior is not tested to match current implementation - -// ============================================================================= -// SuperAuthority Tests -// ============================================================================= - -// mockSuperAuthority implements SuperAuthority for testing. -type mockSuperAuthority struct { - deniedBlocks map[uint64]common.Hash - shouldError bool -} - -func newMockSuperAuthority() *mockSuperAuthority { - return &mockSuperAuthority{ - deniedBlocks: make(map[uint64]common.Hash), - } -} - -func (m *mockSuperAuthority) DenyBlock(blockNumber uint64, hash common.Hash) { - m.deniedBlocks[blockNumber] = hash -} - -func (m *mockSuperAuthority) IsDenied(blockNumber uint64, payloadHash common.Hash) (bool, error) { - if m.shouldError { - return false, fmt.Errorf("superauthority check failed") - } - deniedHash, exists := m.deniedBlocks[blockNumber] - if exists && deniedHash == payloadHash { - return true, nil - } - return false, nil -} - -func TestSuperAuthority_DeniedPayload_EmitsInvalidEvent(t *testing.T) { - cfg, _, _, payloadA1 := buildSimpleCfgAndPayload(t) - - emitter := &testutils.MockEmitter{} - sa := newMockSuperAuthority() - // Deny the payload - sa.DenyBlock(uint64(payloadA1.ExecutionPayload.BlockNumber), payloadA1.ExecutionPayload.BlockHash) - - ec := NewEngineController(context.Background(), nil, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{}, false, &testutils.MockL1Source{}, emitter, sa) - - // Expect PayloadInvalidEvent to be emitted (use type match since event has dynamic fields) - emitter.ExpectOnceType("PayloadInvalidEvent") - - // Trigger payload processing - blockRef := eth.L2BlockRef{ - Hash: payloadA1.ExecutionPayload.BlockHash, - Number: uint64(payloadA1.ExecutionPayload.BlockNumber), - ParentHash: payloadA1.ExecutionPayload.ParentHash, - Time: uint64(payloadA1.ExecutionPayload.Timestamp), - } - ec.onPayloadProcess(context.Background(), PayloadProcessEvent{ - Envelope: payloadA1, - Ref: blockRef, - }) - - emitter.AssertExpectations(t) -} - -func TestSuperAuthority_AllowedPayload_Proceeds(t *testing.T) { - cfg, _, _, payloadA1 := buildSimpleCfgAndPayload(t) - - emitter := &testutils.MockEmitter{} - mockEngine := &testutils.MockEngine{} - sa := newMockSuperAuthority() - // Do NOT deny the payload - - ec := NewEngineController(context.Background(), mockEngine, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{}, false, &testutils.MockL1Source{}, emitter, sa) - - // Expect NewPayload to be called (payload is allowed) - mockEngine.ExpectNewPayload(payloadA1.ExecutionPayload, nil, ð.PayloadStatusV1{Status: eth.ExecutionValid}, nil) - // Expect success event - emitter.ExpectOnceType("PayloadSuccessEvent") - - blockRef := eth.L2BlockRef{ - Hash: payloadA1.ExecutionPayload.BlockHash, - Number: uint64(payloadA1.ExecutionPayload.BlockNumber), - ParentHash: payloadA1.ExecutionPayload.ParentHash, - Time: uint64(payloadA1.ExecutionPayload.Timestamp), - } - ec.onPayloadProcess(context.Background(), PayloadProcessEvent{ - Envelope: payloadA1, - Ref: blockRef, - }) - - mockEngine.AssertExpectations(t) - emitter.AssertExpectations(t) -} - -func TestSuperAuthority_Error_ProceedsWithPayload(t *testing.T) { - cfg, _, _, payloadA1 := buildSimpleCfgAndPayload(t) - - emitter := &testutils.MockEmitter{} - mockEngine := &testutils.MockEngine{} - sa := newMockSuperAuthority() - sa.shouldError = true // Simulate check failure - - ec := NewEngineController(context.Background(), mockEngine, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{}, false, &testutils.MockL1Source{}, emitter, sa) - - // Despite error, expect NewPayload to be called (graceful degradation) - mockEngine.ExpectNewPayload(payloadA1.ExecutionPayload, nil, ð.PayloadStatusV1{Status: eth.ExecutionValid}, nil) - emitter.ExpectOnceType("PayloadSuccessEvent") - - blockRef := eth.L2BlockRef{ - Hash: payloadA1.ExecutionPayload.BlockHash, - Number: uint64(payloadA1.ExecutionPayload.BlockNumber), - ParentHash: payloadA1.ExecutionPayload.ParentHash, - Time: uint64(payloadA1.ExecutionPayload.Timestamp), - } - ec.onPayloadProcess(context.Background(), PayloadProcessEvent{ - Envelope: payloadA1, - Ref: blockRef, - }) - - mockEngine.AssertExpectations(t) - emitter.AssertExpectations(t) -} - -func TestSuperAuthority_NilAuthority_Proceeds(t *testing.T) { - cfg, _, _, payloadA1 := buildSimpleCfgAndPayload(t) - - emitter := &testutils.MockEmitter{} - mockEngine := &testutils.MockEngine{} - - // nil SuperAuthority - ec := NewEngineController(context.Background(), mockEngine, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{}, false, &testutils.MockL1Source{}, emitter, nil) - - // Expect NewPayload to be called (no authority check) - mockEngine.ExpectNewPayload(payloadA1.ExecutionPayload, nil, ð.PayloadStatusV1{Status: eth.ExecutionValid}, nil) - emitter.ExpectOnceType("PayloadSuccessEvent") - - blockRef := eth.L2BlockRef{ - Hash: payloadA1.ExecutionPayload.BlockHash, - Number: uint64(payloadA1.ExecutionPayload.BlockNumber), - ParentHash: payloadA1.ExecutionPayload.ParentHash, - Time: uint64(payloadA1.ExecutionPayload.Timestamp), - } - ec.onPayloadProcess(context.Background(), PayloadProcessEvent{ - Envelope: payloadA1, - Ref: blockRef, - }) - - mockEngine.AssertExpectations(t) - emitter.AssertExpectations(t) -} +// SuperAuthority tests are in super_authority_deny_test.go diff --git a/op-node/rollup/engine/super_authority_deny_test.go b/op-node/rollup/engine/super_authority_deny_test.go new file mode 100644 index 0000000000000..fa030f1d396d1 --- /dev/null +++ b/op-node/rollup/engine/super_authority_deny_test.go @@ -0,0 +1,157 @@ +package engine + +import ( + "context" + "fmt" + "testing" + + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/op-node/metrics" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-node/rollup/sync" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-service/testutils" +) + +// mockSuperAuthority implements SuperAuthority for testing. +type mockSuperAuthority struct { + deniedBlocks map[uint64]common.Hash + shouldError bool +} + +func newMockSuperAuthority() *mockSuperAuthority { + return &mockSuperAuthority{ + deniedBlocks: make(map[uint64]common.Hash), + } +} + +func (m *mockSuperAuthority) denyBlock(blockNumber uint64, hash common.Hash) { + m.deniedBlocks[blockNumber] = hash +} + +func (m *mockSuperAuthority) IsDenied(blockNumber uint64, payloadHash common.Hash) (bool, error) { + if m.shouldError { + return false, fmt.Errorf("superauthority check failed") + } + deniedHash, exists := m.deniedBlocks[blockNumber] + if exists && deniedHash == payloadHash { + return true, nil + } + return false, nil +} + +// superAuthorityTestCase defines a test scenario for SuperAuthority behavior +type superAuthorityTestCase struct { + name string + // setup is called to configure the test scenario + // Returns: engine (nil if not needed), superAuthority (nil if testing nil case), derivedFrom + setup func(payload *eth.ExecutionPayloadEnvelope) (*testutils.MockEngine, rollup.SuperAuthority, eth.L1BlockRef) + // expectations sets up expected calls on the emitter and engine + expectations func(emitter *testutils.MockEmitter, engine *testutils.MockEngine, payload *eth.ExecutionPayloadEnvelope) +} + +func TestSuperAuthority(t *testing.T) { + tests := []superAuthorityTestCase{ + { + name: "DeniedPayload_EmitsDepositsOnlyRequest", + setup: func(payload *eth.ExecutionPayloadEnvelope) (*testutils.MockEngine, rollup.SuperAuthority, eth.L1BlockRef) { + sa := newMockSuperAuthority() + sa.denyBlock(uint64(payload.ExecutionPayload.BlockNumber), payload.ExecutionPayload.BlockHash) + // Need DerivedFrom for Holocene path + return nil, sa, eth.L1BlockRef{Number: 1} + }, + expectations: func(emitter *testutils.MockEmitter, engine *testutils.MockEngine, payload *eth.ExecutionPayloadEnvelope) { + emitter.ExpectOnceType("DepositsOnlyPayloadAttributesRequestEvent") + }, + }, + { + name: "AllowedPayload_Proceeds", + setup: func(payload *eth.ExecutionPayloadEnvelope) (*testutils.MockEngine, rollup.SuperAuthority, eth.L1BlockRef) { + sa := newMockSuperAuthority() + // Do NOT deny the payload + return &testutils.MockEngine{}, sa, eth.L1BlockRef{} + }, + expectations: func(emitter *testutils.MockEmitter, engine *testutils.MockEngine, payload *eth.ExecutionPayloadEnvelope) { + engine.ExpectNewPayload(payload.ExecutionPayload, nil, ð.PayloadStatusV1{Status: eth.ExecutionValid}, nil) + emitter.ExpectOnceType("PayloadSuccessEvent") + }, + }, + { + name: "Error_ProceedsWithPayload", + setup: func(payload *eth.ExecutionPayloadEnvelope) (*testutils.MockEngine, rollup.SuperAuthority, eth.L1BlockRef) { + sa := newMockSuperAuthority() + sa.shouldError = true + return &testutils.MockEngine{}, sa, eth.L1BlockRef{} + }, + expectations: func(emitter *testutils.MockEmitter, engine *testutils.MockEngine, payload *eth.ExecutionPayloadEnvelope) { + // Despite error, expect NewPayload (graceful degradation) + engine.ExpectNewPayload(payload.ExecutionPayload, nil, ð.PayloadStatusV1{Status: eth.ExecutionValid}, nil) + emitter.ExpectOnceType("PayloadSuccessEvent") + }, + }, + { + name: "NilAuthority_Proceeds", + setup: func(payload *eth.ExecutionPayloadEnvelope) (*testutils.MockEngine, rollup.SuperAuthority, eth.L1BlockRef) { + return &testutils.MockEngine{}, nil, eth.L1BlockRef{} + }, + expectations: func(emitter *testutils.MockEmitter, engine *testutils.MockEngine, payload *eth.ExecutionPayloadEnvelope) { + engine.ExpectNewPayload(payload.ExecutionPayload, nil, ð.PayloadStatusV1{Status: eth.ExecutionValid}, nil) + emitter.ExpectOnceType("PayloadSuccessEvent") + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + runSuperAuthorityTest(t, tc) + }) + } +} + +func runSuperAuthorityTest(t *testing.T, tc superAuthorityTestCase) { + cfg, _, _, payload := buildSimpleCfgAndPayload(t) + emitter := &testutils.MockEmitter{} + + engine, sa, derivedFrom := tc.setup(payload) + tc.expectations(emitter, engine, payload) + + ec := NewEngineController( + context.Background(), + engine, + testlog.Logger(t, 0), + metrics.NoopMetrics, + cfg, + &sync.Config{}, + false, + &testutils.MockL1Source{}, + emitter, + sa, + ) + + blockRef := eth.L2BlockRef{ + Hash: payload.ExecutionPayload.BlockHash, + Number: uint64(payload.ExecutionPayload.BlockNumber), + ParentHash: payload.ExecutionPayload.ParentHash, + Time: uint64(payload.ExecutionPayload.Timestamp), + } + + ec.onPayloadProcess(context.Background(), PayloadProcessEvent{ + Envelope: payload, + Ref: blockRef, + DerivedFrom: derivedFrom, + }) + + if engine != nil { + engine.AssertExpectations(t) + } + emitter.AssertExpectations(t) +} + +// Ensure derive.DepositsOnlyPayloadAttributesRequestEvent is referenced to verify import +var _ = derive.DepositsOnlyPayloadAttributesRequestEvent{} + +// Ensure rollup is imported (used by buildSimpleCfgAndPayload) +var _ *rollup.Config diff --git a/op-supernode/supernode/activity/interop/interop.go b/op-supernode/supernode/activity/interop/interop.go index 29af3a58a67c2..8bb68551de10e 100644 --- a/op-supernode/supernode/activity/interop/interop.go +++ b/op-supernode/supernode/activity/interop/interop.go @@ -416,8 +416,15 @@ func (i *Interop) Reset(chainID eth.ChainID, timestamp uint64) { return } - // Reset the logsDB for this chain - // Get the block time to calculate the previous block's timestamp + i.resetLogsDB(chainID, chain, db, timestamp) + i.resetVerifiedDB(chainID, timestamp) + + // Reset the currentL1 to force re-evaluation + i.currentL1 = eth.BlockID{} +} + +// resetLogsDB rewinds or clears the logsDB for a chain to the block before the given timestamp. +func (i *Interop) resetLogsDB(chainID eth.ChainID, chain cc.ChainContainer, db LogsDB, timestamp uint64) { blockTime := chain.BlockTime() targetTs := timestamp - blockTime targetBlock, err := chain.BlockAtTimestamp(i.ctx, targetTs, eth.Safe) @@ -427,47 +434,48 @@ func (i *Interop) Reset(chainID eth.ChainID, timestamp uint64) { if clearErr := db.Clear(&noopInvalidator{}); clearErr != nil { i.log.Error("failed to clear logsDB", "chainID", chainID, "err", clearErr) } - } else { - // check the first block in the logsDB - firstBlock, err := db.FirstSealedBlock() - if err != nil { - i.log.Error("failed to get first block", "chainID", chainID, "err", err) - return - } - if firstBlock.Number > targetBlock.Number { - i.log.Info("logsDB is to be cleared", "chainID", chainID) - if err := db.Clear(&noopInvalidator{}); err != nil { - i.log.Error("failed to clear logsDB", "chainID", chainID, "err", err) - return - } - } else { - i.log.Info("logsDB is to be rewound", "chainID", chainID, "targetBlock", targetBlock.Number, "firstBlock", firstBlock.Number) - if err := db.Rewind(&noopInvalidator{}, targetBlock.ID()); err != nil { - i.log.Error("failed to rewind logsDB", "chainID", chainID, "err", err) - return - } - } + return } - // Remove any verified results at or after the timestamp - if i.verifiedDB != nil { - deleted, err := i.verifiedDB.RewindTo(timestamp) - if err != nil { - i.log.Error("failed to rewind verifiedDB", - "timestamp", timestamp, - "err", err, - ) + // Check the first block in the logsDB to decide whether to clear or rewind + firstBlock, err := db.FirstSealedBlock() + if err != nil { + i.log.Error("failed to get first block", "chainID", chainID, "err", err) + return + } + + if firstBlock.Number > targetBlock.Number { + i.log.Info("logsDB is to be cleared", "chainID", chainID) + if err := db.Clear(&noopInvalidator{}); err != nil { + i.log.Error("failed to clear logsDB", "chainID", chainID, "err", err) } - if deleted { - // This is unexpected - we shouldn't have verified results at timestamps - // that are being reset. Log an error for visibility. - i.log.Error("UNEXPECTED: verified results were deleted on reset", - "chainID", chainID, - "timestamp", timestamp, - ) + } else { + i.log.Info("logsDB is to be rewound", "chainID", chainID, "targetBlock", targetBlock.Number, "firstBlock", firstBlock.Number) + if err := db.Rewind(&noopInvalidator{}, targetBlock.ID()); err != nil { + i.log.Error("failed to rewind logsDB", "chainID", chainID, "err", err) } } +} - // Reset the currentL1 to force re-evaluation - i.currentL1 = eth.BlockID{} +// resetVerifiedDB removes any verified results at or after the given timestamp. +func (i *Interop) resetVerifiedDB(chainID eth.ChainID, timestamp uint64) { + if i.verifiedDB == nil { + return + } + + deleted, err := i.verifiedDB.RewindTo(timestamp) + if err != nil { + i.log.Error("failed to rewind verifiedDB", + "timestamp", timestamp, + "err", err, + ) + } + if deleted { + // This is unexpected - we shouldn't have verified results at timestamps + // that are being reset. Log an error for visibility. + i.log.Error("UNEXPECTED: verified results were deleted on reset", + "chainID", chainID, + "timestamp", timestamp, + ) + } } From af28ee6ee9ff7b969f81dd96757aad17d9911bf1 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Wed, 11 Feb 2026 14:10:22 -0600 Subject: [PATCH 20/23] Refacor Tests to Test Cases --- .../supernode/activity/interop/algo_test.go | 917 +++++++++--------- .../supernode/activity/interop/logdb.go | 2 + 2 files changed, 472 insertions(+), 447 deletions(-) diff --git a/op-supernode/supernode/activity/interop/algo_test.go b/op-supernode/supernode/activity/interop/algo_test.go index c5ea078287076..29a63c8e2a9f4 100644 --- a/op-supernode/supernode/activity/interop/algo_test.go +++ b/op-supernode/supernode/activity/interop/algo_test.go @@ -17,479 +17,502 @@ import ( ) // ============================================================================= -// TestVerifyInteropMessages_ValidBlocks +// TestVerifyInteropMessages - Table-Driven Tests // ============================================================================= -func TestVerifyInteropMessages_ValidBlocks(t *testing.T) { - t.Parallel() - - t.Run("block with no executing messages is valid", func(t *testing.T) { - t.Parallel() - - chainID := eth.ChainIDFromUInt64(10) - blockHash := common.HexToHash("0x123") - expectedBlock := eth.BlockID{Number: 100, Hash: blockHash} +// verifyInteropTestCase defines a single test case for verifyInteropMessages +type verifyInteropTestCase struct { + name string + setup func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) + expectError bool + errorMsg string + validate func(t *testing.T, result Result) +} - mockDB := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: blockHash, Number: 100, Time: 1000}, - openBlockExecMsg: nil, - } +func runVerifyInteropTest(t *testing.T, tc verifyInteropTestCase) { + t.Parallel() + interop, timestamp, blocks := tc.setup() + result, err := interop.verifyInteropMessages(timestamp, blocks) - interop := &Interop{ - log: gethlog.New(), - logsDBs: map[eth.ChainID]LogsDB{chainID: mockDB}, + if tc.expectError { + require.Error(t, err) + if tc.errorMsg != "" { + require.Contains(t, err.Error(), tc.errorMsg) } - - result, err := interop.verifyInteropMessages(1000, map[eth.ChainID]eth.BlockID{ - chainID: expectedBlock, - }) - + } else { require.NoError(t, err) - require.True(t, result.IsValid()) - require.Empty(t, result.InvalidHeads) - require.Equal(t, expectedBlock, result.L2Heads[chainID]) - }) - - t.Run("valid executing message passes verification", func(t *testing.T) { - t.Parallel() - - sourceChainID := eth.ChainIDFromUInt64(10) - destChainID := eth.ChainIDFromUInt64(8453) - - sourceBlockHash := common.HexToHash("0xSource") - destBlockHash := common.HexToHash("0xDest") - - sourceBlock := eth.BlockID{Number: 50, Hash: sourceBlockHash} - destBlock := eth.BlockID{Number: 100, Hash: destBlockHash} + } - execMsg := &suptypes.ExecutingMessage{ - ChainID: sourceChainID, - BlockNum: 50, - LogIdx: 0, - Timestamp: 500, // Source timestamp < dest timestamp (1000) - Checksum: suptypes.MessageChecksum{0x01}, - } + if tc.validate != nil { + tc.validate(t, result) + } +} - sourceDB := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: sourceBlockHash, Number: 50, Time: 500}, - containsSeal: suptypes.BlockSeal{Number: 50, Timestamp: 500}, - } +func TestVerifyInteropMessages(t *testing.T) { + t.Parallel() - destDB := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: destBlockHash, Number: 100, Time: 1000}, - openBlockExecMsg: map[uint32]*suptypes.ExecutingMessage{ - 0: execMsg, + tests := []verifyInteropTestCase{ + // Valid block cases + { + name: "ValidBlocks/NoExecutingMessages", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + chainID := eth.ChainIDFromUInt64(10) + blockHash := common.HexToHash("0x123") + expectedBlock := eth.BlockID{Number: 100, Hash: blockHash} + + mockDB := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: blockHash, Number: 100, Time: 1000}, + openBlockExecMsg: nil, + } + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{chainID: mockDB}, + } + + return interop, 1000, map[eth.ChainID]eth.BlockID{chainID: expectedBlock} }, - } - - interop := &Interop{ - log: gethlog.New(), - logsDBs: map[eth.ChainID]LogsDB{ - sourceChainID: sourceDB, - destChainID: destDB, + validate: func(t *testing.T, result Result) { + chainID := eth.ChainIDFromUInt64(10) + expectedBlock := eth.BlockID{Number: 100, Hash: common.HexToHash("0x123")} + require.True(t, result.IsValid()) + require.Empty(t, result.InvalidHeads) + require.Equal(t, expectedBlock, result.L2Heads[chainID]) }, - } - - result, err := interop.verifyInteropMessages(1000, map[eth.ChainID]eth.BlockID{ - sourceChainID: sourceBlock, - destChainID: destBlock, - }) - - require.NoError(t, err) - require.True(t, result.IsValid()) - require.Empty(t, result.InvalidHeads) - }) - - t.Run("message at expiry boundary passes verification", func(t *testing.T) { - t.Parallel() - - sourceChainID := eth.ChainIDFromUInt64(10) - destChainID := eth.ChainIDFromUInt64(8453) - - sourceBlockHash := common.HexToHash("0xSource") - destBlockHash := common.HexToHash("0xDest") - - // Message is exactly at the expiry boundary (should pass) - execTimestamp := uint64(1000000) - initTimestamp := execTimestamp - ExpiryTime // Exactly at boundary - - sourceBlock := eth.BlockID{Number: 50, Hash: sourceBlockHash} - destBlock := eth.BlockID{Number: 100, Hash: destBlockHash} - - execMsg := &suptypes.ExecutingMessage{ - ChainID: sourceChainID, - BlockNum: 50, - LogIdx: 0, - Timestamp: initTimestamp, // Exactly at expiry boundary - Checksum: suptypes.MessageChecksum{0x01}, - } - - sourceDB := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: sourceBlockHash, Number: 50, Time: initTimestamp}, - containsSeal: suptypes.BlockSeal{Number: 50, Timestamp: initTimestamp}, - } - - destDB := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: destBlockHash, Number: 100, Time: execTimestamp}, - openBlockExecMsg: map[uint32]*suptypes.ExecutingMessage{ - 0: execMsg, + }, + { + name: "ValidBlocks/ValidExecutingMessage", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + sourceChainID := eth.ChainIDFromUInt64(10) + destChainID := eth.ChainIDFromUInt64(8453) + + sourceBlockHash := common.HexToHash("0xSource") + destBlockHash := common.HexToHash("0xDest") + + sourceBlock := eth.BlockID{Number: 50, Hash: sourceBlockHash} + destBlock := eth.BlockID{Number: 100, Hash: destBlockHash} + + execMsg := &suptypes.ExecutingMessage{ + ChainID: sourceChainID, + BlockNum: 50, + LogIdx: 0, + Timestamp: 500, // Source timestamp < dest timestamp (1000) + Checksum: suptypes.MessageChecksum{0x01}, + } + + sourceDB := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: sourceBlockHash, Number: 50, Time: 500}, + containsSeal: suptypes.BlockSeal{Number: 50, Timestamp: 500}, + } + + destDB := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: destBlockHash, Number: 100, Time: 1000}, + openBlockExecMsg: map[uint32]*suptypes.ExecutingMessage{ + 0: execMsg, + }, + } + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{ + sourceChainID: sourceDB, + destChainID: destDB, + }, + } + + return interop, 1000, map[eth.ChainID]eth.BlockID{ + sourceChainID: sourceBlock, + destChainID: destBlock, + } }, - } - - interop := &Interop{ - log: gethlog.New(), - logsDBs: map[eth.ChainID]LogsDB{ - sourceChainID: sourceDB, - destChainID: destDB, + validate: func(t *testing.T, result Result) { + require.True(t, result.IsValid()) + require.Empty(t, result.InvalidHeads) }, - } - - result, err := interop.verifyInteropMessages(execTimestamp, map[eth.ChainID]eth.BlockID{ - sourceChainID: sourceBlock, - destChainID: destBlock, - }) - - require.NoError(t, err) - require.True(t, result.IsValid()) - require.Empty(t, result.InvalidHeads) - }) - - t.Run("unregistered chains in blocksAtTimestamp are skipped", func(t *testing.T) { - t.Parallel() - - registeredChain := eth.ChainIDFromUInt64(10) - unregisteredChain := eth.ChainIDFromUInt64(9999) - - mockDB := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: common.HexToHash("0x1"), Number: 100, Time: 1000}, - } - - interop := &Interop{ - log: gethlog.New(), - logsDBs: map[eth.ChainID]LogsDB{registeredChain: mockDB}, - } - - result, err := interop.verifyInteropMessages(1000, map[eth.ChainID]eth.BlockID{ - registeredChain: {Number: 100, Hash: common.HexToHash("0x1")}, - unregisteredChain: {Number: 200, Hash: common.HexToHash("0x2")}, - }) - - require.NoError(t, err) - require.True(t, result.IsValid()) - require.Contains(t, result.L2Heads, registeredChain) - require.NotContains(t, result.L2Heads, unregisteredChain) - }) -} - -// ============================================================================= -// TestVerifyInteropMessages_InvalidBlocks -// ============================================================================= - -func TestVerifyInteropMessages_InvalidBlocks(t *testing.T) { - t.Parallel() - - t.Run("block hash mismatch marked invalid", func(t *testing.T) { - t.Parallel() - - chainID := eth.ChainIDFromUInt64(10) - expectedBlock := eth.BlockID{Number: 100, Hash: common.HexToHash("0xExpected")} - - mockDB := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{ - Hash: common.HexToHash("0xActual"), // Different from expected - Number: 100, - Time: 1000, + }, + { + name: "ValidBlocks/MessageAtExpiryBoundary", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + sourceChainID := eth.ChainIDFromUInt64(10) + destChainID := eth.ChainIDFromUInt64(8453) + + sourceBlockHash := common.HexToHash("0xSource") + destBlockHash := common.HexToHash("0xDest") + + // Message is exactly at the expiry boundary (should pass) + execTimestamp := uint64(1000000) + initTimestamp := execTimestamp - ExpiryTime // Exactly at boundary + + sourceBlock := eth.BlockID{Number: 50, Hash: sourceBlockHash} + destBlock := eth.BlockID{Number: 100, Hash: destBlockHash} + + execMsg := &suptypes.ExecutingMessage{ + ChainID: sourceChainID, + BlockNum: 50, + LogIdx: 0, + Timestamp: initTimestamp, // Exactly at expiry boundary + Checksum: suptypes.MessageChecksum{0x01}, + } + + sourceDB := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: sourceBlockHash, Number: 50, Time: initTimestamp}, + containsSeal: suptypes.BlockSeal{Number: 50, Timestamp: initTimestamp}, + } + + destDB := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: destBlockHash, Number: 100, Time: execTimestamp}, + openBlockExecMsg: map[uint32]*suptypes.ExecutingMessage{ + 0: execMsg, + }, + } + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{ + sourceChainID: sourceDB, + destChainID: destDB, + }, + } + + return interop, execTimestamp, map[eth.ChainID]eth.BlockID{ + sourceChainID: sourceBlock, + destChainID: destBlock, + } }, - } - - interop := &Interop{ - log: gethlog.New(), - logsDBs: map[eth.ChainID]LogsDB{chainID: mockDB}, - } - - result, err := interop.verifyInteropMessages(1000, map[eth.ChainID]eth.BlockID{ - chainID: expectedBlock, - }) - - require.NoError(t, err) - require.False(t, result.IsValid()) - require.Contains(t, result.InvalidHeads, chainID) - require.Equal(t, expectedBlock, result.InvalidHeads[chainID]) - }) - - t.Run("initiating message not found marked invalid", func(t *testing.T) { - t.Parallel() - - sourceChainID := eth.ChainIDFromUInt64(10) - destChainID := eth.ChainIDFromUInt64(8453) - - destBlockHash := common.HexToHash("0xDest") - destBlock := eth.BlockID{Number: 100, Hash: destBlockHash} - - execMsg := &suptypes.ExecutingMessage{ - ChainID: sourceChainID, - BlockNum: 50, - LogIdx: 0, - Timestamp: 500, - Checksum: suptypes.MessageChecksum{0x01}, - } - - sourceDB := &algoMockLogsDB{ - containsErr: suptypes.ErrConflict, // Message not found - } - - destDB := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: destBlockHash, Number: 100, Time: 1000}, - openBlockExecMsg: map[uint32]*suptypes.ExecutingMessage{ - 0: execMsg, + validate: func(t *testing.T, result Result) { + require.True(t, result.IsValid()) + require.Empty(t, result.InvalidHeads) }, - } - - interop := &Interop{ - log: gethlog.New(), - logsDBs: map[eth.ChainID]LogsDB{ - sourceChainID: sourceDB, - destChainID: destDB, + }, + { + name: "ValidBlocks/UnregisteredChainsSkipped", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + registeredChain := eth.ChainIDFromUInt64(10) + unregisteredChain := eth.ChainIDFromUInt64(9999) + + mockDB := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: common.HexToHash("0x1"), Number: 100, Time: 1000}, + } + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{registeredChain: mockDB}, + } + + return interop, 1000, map[eth.ChainID]eth.BlockID{ + registeredChain: {Number: 100, Hash: common.HexToHash("0x1")}, + unregisteredChain: {Number: 200, Hash: common.HexToHash("0x2")}, + } }, - } - - result, err := interop.verifyInteropMessages(1000, map[eth.ChainID]eth.BlockID{ - destChainID: destBlock, - }) - - require.NoError(t, err) - require.False(t, result.IsValid()) - require.Contains(t, result.InvalidHeads, destChainID) - }) - - t.Run("timestamp violation (init.ts >= exec.ts) marked invalid", func(t *testing.T) { - t.Parallel() - - sourceChainID := eth.ChainIDFromUInt64(10) - destChainID := eth.ChainIDFromUInt64(8453) - - destBlockHash := common.HexToHash("0xDest") - destBlock := eth.BlockID{Number: 100, Hash: destBlockHash} - - execMsg := &suptypes.ExecutingMessage{ - ChainID: sourceChainID, - BlockNum: 50, - LogIdx: 0, - Timestamp: 1000, // Same as dest block timestamp - INVALID! - Checksum: suptypes.MessageChecksum{0x01}, - } - - sourceDB := &algoMockLogsDB{ - containsSeal: suptypes.BlockSeal{Number: 50, Timestamp: 1000}, - } - - destDB := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: destBlockHash, Number: 100, Time: 1000}, - openBlockExecMsg: map[uint32]*suptypes.ExecutingMessage{ - 0: execMsg, + validate: func(t *testing.T, result Result) { + registeredChain := eth.ChainIDFromUInt64(10) + unregisteredChain := eth.ChainIDFromUInt64(9999) + require.True(t, result.IsValid()) + require.Contains(t, result.L2Heads, registeredChain) + require.NotContains(t, result.L2Heads, unregisteredChain) }, - } - - interop := &Interop{ - log: gethlog.New(), - logsDBs: map[eth.ChainID]LogsDB{ - sourceChainID: sourceDB, - destChainID: destDB, + }, + // Invalid block cases + { + name: "InvalidBlocks/BlockHashMismatch", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + chainID := eth.ChainIDFromUInt64(10) + expectedBlock := eth.BlockID{Number: 100, Hash: common.HexToHash("0xExpected")} + + mockDB := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{ + Hash: common.HexToHash("0xActual"), // Different from expected + Number: 100, + Time: 1000, + }, + } + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{chainID: mockDB}, + } + + return interop, 1000, map[eth.ChainID]eth.BlockID{chainID: expectedBlock} }, - } - - result, err := interop.verifyInteropMessages(1000, map[eth.ChainID]eth.BlockID{ - destChainID: destBlock, - }) - - require.NoError(t, err) - require.False(t, result.IsValid()) - require.Contains(t, result.InvalidHeads, destChainID) - }) - - t.Run("unknown source chain marked invalid", func(t *testing.T) { - t.Parallel() - - unknownSourceChain := eth.ChainIDFromUInt64(9999) - destChainID := eth.ChainIDFromUInt64(8453) - - destBlockHash := common.HexToHash("0xDest") - destBlock := eth.BlockID{Number: 100, Hash: destBlockHash} - - execMsg := &suptypes.ExecutingMessage{ - ChainID: unknownSourceChain, // Not registered - BlockNum: 50, - LogIdx: 0, - Timestamp: 500, - Checksum: suptypes.MessageChecksum{0x01}, - } - - destDB := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: destBlockHash, Number: 100, Time: 1000}, - openBlockExecMsg: map[uint32]*suptypes.ExecutingMessage{ - 0: execMsg, + validate: func(t *testing.T, result Result) { + chainID := eth.ChainIDFromUInt64(10) + expectedBlock := eth.BlockID{Number: 100, Hash: common.HexToHash("0xExpected")} + require.False(t, result.IsValid()) + require.Contains(t, result.InvalidHeads, chainID) + require.Equal(t, expectedBlock, result.InvalidHeads[chainID]) }, - } - - interop := &Interop{ - log: gethlog.New(), - logsDBs: map[eth.ChainID]LogsDB{ - destChainID: destDB, - // Note: unknownSourceChain NOT in logsDBs + }, + { + name: "InvalidBlocks/InitiatingMessageNotFound", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + sourceChainID := eth.ChainIDFromUInt64(10) + destChainID := eth.ChainIDFromUInt64(8453) + + destBlockHash := common.HexToHash("0xDest") + destBlock := eth.BlockID{Number: 100, Hash: destBlockHash} + + execMsg := &suptypes.ExecutingMessage{ + ChainID: sourceChainID, + BlockNum: 50, + LogIdx: 0, + Timestamp: 500, + Checksum: suptypes.MessageChecksum{0x01}, + } + + sourceDB := &algoMockLogsDB{ + containsErr: suptypes.ErrConflict, // Message not found + } + + destDB := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: destBlockHash, Number: 100, Time: 1000}, + openBlockExecMsg: map[uint32]*suptypes.ExecutingMessage{ + 0: execMsg, + }, + } + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{ + sourceChainID: sourceDB, + destChainID: destDB, + }, + } + + return interop, 1000, map[eth.ChainID]eth.BlockID{destChainID: destBlock} }, - } - - result, err := interop.verifyInteropMessages(1000, map[eth.ChainID]eth.BlockID{ - destChainID: destBlock, - }) - - require.NoError(t, err) - require.False(t, result.IsValid()) - require.Contains(t, result.InvalidHeads, destChainID) - }) - - t.Run("expired message marked invalid", func(t *testing.T) { - t.Parallel() - - sourceChainID := eth.ChainIDFromUInt64(10) - destChainID := eth.ChainIDFromUInt64(8453) - - destBlockHash := common.HexToHash("0xDest") - // Executing block is at timestamp 1000000 (well after expiry) - execTimestamp := uint64(1000000) - // Initiating message timestamp is more than ExpiryTime (604800) before executing timestamp - initTimestamp := execTimestamp - ExpiryTime - 1 // 1 second past expiry - - destBlock := eth.BlockID{Number: 100, Hash: destBlockHash} - - execMsg := &suptypes.ExecutingMessage{ - ChainID: sourceChainID, - BlockNum: 50, - LogIdx: 0, - Timestamp: initTimestamp, // Expired! - Checksum: suptypes.MessageChecksum{0x01}, - } - - sourceDB := &algoMockLogsDB{ - containsSeal: suptypes.BlockSeal{Number: 50, Timestamp: initTimestamp}, - } - - destDB := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: destBlockHash, Number: 100, Time: execTimestamp}, - openBlockExecMsg: map[uint32]*suptypes.ExecutingMessage{ - 0: execMsg, + validate: func(t *testing.T, result Result) { + destChainID := eth.ChainIDFromUInt64(8453) + require.False(t, result.IsValid()) + require.Contains(t, result.InvalidHeads, destChainID) }, - } - - interop := &Interop{ - log: gethlog.New(), - logsDBs: map[eth.ChainID]LogsDB{ - sourceChainID: sourceDB, - destChainID: destDB, + }, + { + name: "InvalidBlocks/TimestampViolation", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + sourceChainID := eth.ChainIDFromUInt64(10) + destChainID := eth.ChainIDFromUInt64(8453) + + destBlockHash := common.HexToHash("0xDest") + destBlock := eth.BlockID{Number: 100, Hash: destBlockHash} + + execMsg := &suptypes.ExecutingMessage{ + ChainID: sourceChainID, + BlockNum: 50, + LogIdx: 0, + Timestamp: 1001, // Future timestamp - INVALID! + Checksum: suptypes.MessageChecksum{0x01}, + } + + sourceDB := &algoMockLogsDB{ + containsSeal: suptypes.BlockSeal{Number: 50, Timestamp: 1001}, + } + + destDB := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: destBlockHash, Number: 100, Time: 1000}, + openBlockExecMsg: map[uint32]*suptypes.ExecutingMessage{ + 0: execMsg, + }, + } + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{ + sourceChainID: sourceDB, + destChainID: destDB, + }, + } + + return interop, 1000, map[eth.ChainID]eth.BlockID{destChainID: destBlock} }, - } - - result, err := interop.verifyInteropMessages(execTimestamp, map[eth.ChainID]eth.BlockID{ - destChainID: destBlock, - }) - - require.NoError(t, err) - require.False(t, result.IsValid()) - require.Contains(t, result.InvalidHeads, destChainID) - }) - - t.Run("multiple chains with one invalid", func(t *testing.T) { - t.Parallel() - - sourceChainID := eth.ChainIDFromUInt64(10) - validChainID := eth.ChainIDFromUInt64(8453) - invalidChainID := eth.ChainIDFromUInt64(420) - - validBlockHash := common.HexToHash("0xValid") - invalidBlockHash := common.HexToHash("0xInvalid") - - validBlock := eth.BlockID{Number: 100, Hash: validBlockHash} - invalidBlock := eth.BlockID{Number: 200, Hash: invalidBlockHash} - - badExecMsg := &suptypes.ExecutingMessage{ - ChainID: sourceChainID, - BlockNum: 50, - LogIdx: 0, - Timestamp: 1000, // Same as block timestamp - INVALID - Checksum: suptypes.MessageChecksum{0x01}, - } - - sourceDB := &algoMockLogsDB{ - containsSeal: suptypes.BlockSeal{Number: 50, Timestamp: 1000}, - } - - validDB := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: validBlockHash, Number: 100, Time: 1000}, - openBlockExecMsg: nil, // No executing messages - valid - } - - invalidDB := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: invalidBlockHash, Number: 200, Time: 1000}, - openBlockExecMsg: map[uint32]*suptypes.ExecutingMessage{ - 0: badExecMsg, + validate: func(t *testing.T, result Result) { + destChainID := eth.ChainIDFromUInt64(8453) + require.False(t, result.IsValid()) + require.Contains(t, result.InvalidHeads, destChainID) }, - } - - interop := &Interop{ - log: gethlog.New(), - logsDBs: map[eth.ChainID]LogsDB{ - sourceChainID: sourceDB, - validChainID: validDB, - invalidChainID: invalidDB, + }, + { + name: "InvalidBlocks/UnknownSourceChain", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + unknownSourceChain := eth.ChainIDFromUInt64(9999) + destChainID := eth.ChainIDFromUInt64(8453) + + destBlockHash := common.HexToHash("0xDest") + destBlock := eth.BlockID{Number: 100, Hash: destBlockHash} + + execMsg := &suptypes.ExecutingMessage{ + ChainID: unknownSourceChain, // Not registered + BlockNum: 50, + LogIdx: 0, + Timestamp: 500, + Checksum: suptypes.MessageChecksum{0x01}, + } + + destDB := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: destBlockHash, Number: 100, Time: 1000}, + openBlockExecMsg: map[uint32]*suptypes.ExecutingMessage{ + 0: execMsg, + }, + } + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{ + destChainID: destDB, + // Note: unknownSourceChain NOT in logsDBs + }, + } + + return interop, 1000, map[eth.ChainID]eth.BlockID{destChainID: destBlock} }, - } - - result, err := interop.verifyInteropMessages(1000, map[eth.ChainID]eth.BlockID{ - validChainID: validBlock, - invalidChainID: invalidBlock, - }) - - require.NoError(t, err) - require.False(t, result.IsValid()) - // Both chains in L2Heads - require.Contains(t, result.L2Heads, validChainID) - require.Contains(t, result.L2Heads, invalidChainID) - // Only invalid in InvalidHeads - require.NotContains(t, result.InvalidHeads, validChainID) - require.Contains(t, result.InvalidHeads, invalidChainID) - }) -} - -// ============================================================================= -// TestVerifyInteropMessages_Errors -// ============================================================================= - -func TestVerifyInteropMessages_Errors(t *testing.T) { - t.Parallel() - - t.Run("OpenBlock error propagated", func(t *testing.T) { - t.Parallel() - - chainID := eth.ChainIDFromUInt64(10) - block := eth.BlockID{Number: 100, Hash: common.HexToHash("0x123")} - - mockDB := &algoMockLogsDB{ - openBlockErr: errors.New("database error"), - } - - interop := &Interop{ - log: gethlog.New(), - logsDBs: map[eth.ChainID]LogsDB{chainID: mockDB}, - } + validate: func(t *testing.T, result Result) { + destChainID := eth.ChainIDFromUInt64(8453) + require.False(t, result.IsValid()) + require.Contains(t, result.InvalidHeads, destChainID) + }, + }, + { + name: "InvalidBlocks/ExpiredMessage", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + sourceChainID := eth.ChainIDFromUInt64(10) + destChainID := eth.ChainIDFromUInt64(8453) + + destBlockHash := common.HexToHash("0xDest") + // Executing block is at timestamp 1000000 (well after expiry) + execTimestamp := uint64(1000000) + // Initiating message timestamp is more than ExpiryTime (604800) before executing timestamp + initTimestamp := execTimestamp - ExpiryTime - 1 // 1 second past expiry + + destBlock := eth.BlockID{Number: 100, Hash: destBlockHash} + + execMsg := &suptypes.ExecutingMessage{ + ChainID: sourceChainID, + BlockNum: 50, + LogIdx: 0, + Timestamp: initTimestamp, // Expired! + Checksum: suptypes.MessageChecksum{0x01}, + } + + sourceDB := &algoMockLogsDB{ + containsSeal: suptypes.BlockSeal{Number: 50, Timestamp: initTimestamp}, + } + + destDB := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: destBlockHash, Number: 100, Time: execTimestamp}, + openBlockExecMsg: map[uint32]*suptypes.ExecutingMessage{ + 0: execMsg, + }, + } + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{ + sourceChainID: sourceDB, + destChainID: destDB, + }, + } + + return interop, execTimestamp, map[eth.ChainID]eth.BlockID{destChainID: destBlock} + }, + validate: func(t *testing.T, result Result) { + destChainID := eth.ChainIDFromUInt64(8453) + require.False(t, result.IsValid()) + require.Contains(t, result.InvalidHeads, destChainID) + }, + }, + { + name: "InvalidBlocks/MultipleChainsOneInvalid", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + sourceChainID := eth.ChainIDFromUInt64(10) + validChainID := eth.ChainIDFromUInt64(8453) + invalidChainID := eth.ChainIDFromUInt64(420) + + validBlockHash := common.HexToHash("0xValid") + invalidBlockHash := common.HexToHash("0xInvalid") + + validBlock := eth.BlockID{Number: 100, Hash: validBlockHash} + invalidBlock := eth.BlockID{Number: 200, Hash: invalidBlockHash} + + badExecMsg := &suptypes.ExecutingMessage{ + ChainID: sourceChainID, + BlockNum: 50, + LogIdx: 0, + Timestamp: 1001, // Future timestamp - INVALID + Checksum: suptypes.MessageChecksum{0x01}, + } + + sourceDB := &algoMockLogsDB{ + containsSeal: suptypes.BlockSeal{Number: 50, Timestamp: 1001}, + } + + validDB := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: validBlockHash, Number: 100, Time: 1000}, + openBlockExecMsg: nil, // No executing messages - valid + } + + invalidDB := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: invalidBlockHash, Number: 200, Time: 1000}, + openBlockExecMsg: map[uint32]*suptypes.ExecutingMessage{ + 0: badExecMsg, + }, + } + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{ + sourceChainID: sourceDB, + validChainID: validDB, + invalidChainID: invalidDB, + }, + } + + return interop, 1000, map[eth.ChainID]eth.BlockID{ + validChainID: validBlock, + invalidChainID: invalidBlock, + } + }, + validate: func(t *testing.T, result Result) { + validChainID := eth.ChainIDFromUInt64(8453) + invalidChainID := eth.ChainIDFromUInt64(420) + require.False(t, result.IsValid()) + // Both chains in L2Heads + require.Contains(t, result.L2Heads, validChainID) + require.Contains(t, result.L2Heads, invalidChainID) + // Only invalid in InvalidHeads + require.NotContains(t, result.InvalidHeads, validChainID) + require.Contains(t, result.InvalidHeads, invalidChainID) + }, + }, + // Error cases + { + name: "Errors/OpenBlockError", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + chainID := eth.ChainIDFromUInt64(10) + block := eth.BlockID{Number: 100, Hash: common.HexToHash("0x123")} + + mockDB := &algoMockLogsDB{ + openBlockErr: errors.New("database error"), + } + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{chainID: mockDB}, + } + + return interop, 1000, map[eth.ChainID]eth.BlockID{chainID: block} + }, + expectError: true, + errorMsg: "database error", + validate: func(t *testing.T, result Result) { + require.True(t, result.IsEmpty()) + }, + }, + } - result, err := interop.verifyInteropMessages(1000, map[eth.ChainID]eth.BlockID{ - chainID: block, + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + runVerifyInteropTest(t, tc) }) - - require.Error(t, err) - require.Contains(t, err.Error(), "database error") - require.True(t, result.IsEmpty()) - }) + } } // ============================================================================= diff --git a/op-supernode/supernode/activity/interop/logdb.go b/op-supernode/supernode/activity/interop/logdb.go index b5b3799b34f12..7a382b406c299 100644 --- a/op-supernode/supernode/activity/interop/logdb.go +++ b/op-supernode/supernode/activity/interop/logdb.go @@ -54,6 +54,8 @@ func (n *noopLogsDBMetrics) RecordDBSearchEntriesRead(count int64) {} // noopInvalidator implements reads.Invalidator as a no-op. // Used for rewind operations where we don't need cache invalidation. +// noopInvalidator is a stub needed to use the logs.DB.Rewind method. +// read-handle invalidation is not currently used type noopInvalidator struct{} func (n *noopInvalidator) TryInvalidate(rule reads.InvalidationRule) (release func(), err error) { From 7aa96dc6640c8ef44de58680917c7bf16c3f0e84 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Wed, 11 Feb 2026 14:20:51 -0600 Subject: [PATCH 21/23] Delete AI Diary --- BlockInvalidation_Feature.md | 494 ----------------------------------- 1 file changed, 494 deletions(-) delete mode 100644 BlockInvalidation_Feature.md diff --git a/BlockInvalidation_Feature.md b/BlockInvalidation_Feature.md deleted file mode 100644 index c5f725cf45c5e..0000000000000 --- a/BlockInvalidation_Feature.md +++ /dev/null @@ -1,494 +0,0 @@ -# BlockInvalidation Feature Diary - -## Feature Overview -**Feature Name:** Block Invalidation & Replacement -**Branch:** `supernode/BlockInvalidation` -**Developer:** Axel Kingsley -**Started:** 2026-02-06 - -### Purpose -Implement a block invalidation mechanism in the op-supernode that: -1. Persists invalid payload hashes in a DenyList (keyed by block height) -2. Triggers chain rewinds when the current chain uses an invalidated block -3. Notifies activities (especially Interop) to clean up cached state on reset -4. Integrates with op-node to deny payloads before insertion and trigger deposits-only replacement - ---- - -## Diary of Interactions - -### Session 1 (Retroactive) — Prior to Skill Adoption - -**Context:** Development began before the op-feature skill was adopted. The following commits were created through iterative prompts. - -### Session 2 — Skill Adopted - -**Prompt:** Developer provided op-feature skill prompt and asked me to adopt it. - -**Action:** Created this diary file. Developer granted standing permission to update diary without approval. - -### Session 2.1 — Sub-Feature Review for PR - -**Prompt:** Developer requested detailed breakdown of Sub-Feature 1 for peer review. - -**Action:** Generated comprehensive report (see Sub-Feature 1 section below). - ---- - -## Current State - -### Commits on Branch (9 total) -``` -31ea9484c6 op-acceptance-tests: add replacement block assertions -90db9cf396 op-supernode: implement ResetOn in Interop activity -dbbbc6568c op-supernode: add SetResetCallback to ChainContainer -c920528378 op-supernode: add ResetOn method to Activity interface -539e46aef0 op-node: add SuperAuthority interface for payload denial -227537f99a Fix block invalidation: use eth.Unsafe label and improve test resilience -9a63b131e9 Wire up block invalidation from interop activity to chain container -ae4358d202 op-supernode: Add unit tests for block invalidation -425bbb2d9a op-supernode: Add block invalidation and deny list to chain container -``` - -### Test Coverage Summary - -| Component | Test File | Status | -|-----------|-----------|--------| -| DenyList | `invalidation_test.go` | ✅ Implemented | -| InvalidateBlock | `invalidation_test.go` | ✅ Implemented | -| IsDenied | `invalidation_test.go` | ✅ Implemented | -| VerifiedDB.RewindTo | `verified_db_test.go` | ❌ Not yet tested | -| Interop.ResetOn | `interop_test.go` | ❌ Not yet tested | -| SuperAuthority denial | - | ❌ Not yet tested | -| Acceptance (Halt) | `invalid_message_halt_test.go` | ✅ Exists | -| Acceptance (Replace) | `invalid_message_replacement_test.go` | ✅ Exists | - ---- - -# Sub-Feature Breakdowns - -## Sub-Feature 1: DenyList & InvalidateBlock - -### Commits - -| SHA | Message | Files Changed | -|-----|---------|---------------| -| `425bbb2d9a` | op-supernode: Add block invalidation and deny list to chain container | 3 files, +471 | -| `ae4358d202` | op-supernode: Add unit tests for block invalidation | 6 files, +538 | - -### Purpose - -Provide a persistent mechanism to track invalid block payload hashes and trigger chain rewinds when the current chain uses an invalidated block. - -**Why this exists:** When the Interop activity detects an invalid cross-chain executing message, it needs to: -1. Remember that block is invalid (so it's never re-applied) -2. Trigger a rewind if the chain is currently using that block - -### Specification - -#### DenyList -A bbolt-backed key-value store that persists invalid payload hashes keyed by block height. - -| Method | Signature | Behavior | -|--------|-----------|----------| -| `OpenDenyList` | `(dataDir string) (*DenyList, error)` | Opens/creates DB, ensures bucket exists, creates parent dirs | -| `Add` | `(height uint64, payloadHash Hash) error` | Appends hash to height's entry. Idempotent (no duplicates) | -| `Contains` | `(height uint64, payloadHash Hash) (bool, error)` | Returns true if hash exists at height | -| `GetDeniedHashes` | `(height uint64) ([]Hash, error)` | Returns all hashes at height | -| `Close` | `() error` | Closes bbolt DB | - -**Storage format:** -- Key: `uint64` height as 8-byte big-endian -- Value: Concatenated 32-byte hashes - -#### InvalidateBlock -Added to `ChainContainer` interface. - -```go -InvalidateBlock(ctx context.Context, height uint64, payloadHash common.Hash) (bool, error) -``` - -**Behavior:** -1. Add hash to DenyList -2. If engine available, check if current block at `height` matches `payloadHash` -3. If match → call `RewindEngine(ctx, priorTimestamp)` → return `true` -4. If no match → return `false` (no rewind needed) - -#### IsDenied -Helper method on `ChainContainer`: -```go -IsDenied(height uint64, payloadHash common.Hash) (bool, error) -``` -Delegates to `denyList.Contains`. - -### Test Coverage - -#### `TestDenyList_AddAndContains` — 4 subcases - -| Subcase | Setup | Assertion | -|---------|-------|-----------| -| `single hash at height` | Add 1 hash at height 100 | `Contains(100, hash)` → `true` | -| `multiple hashes same height` | Add 3 hashes at height 50 | All 3 return `true` from `Contains` | -| `hash at wrong height returns false` | Add hash at height 10 | `Contains(11, hash)` → `false`, `Contains(10, hash)` → `true` | -| `duplicate add is idempotent` | Add same hash 3 times | `GetDeniedHashes` returns exactly 1 entry | - -#### `TestDenyList_Persistence` — 2 subcases - -| Subcase | Setup | Assertion | -|---------|-------|-----------| -| `survives close and reopen` | Add 4 hashes, close DB | Reopen → all 4 hashes present, correct counts | -| `empty DB on fresh open` | (none) | `Contains` → `false`, `GetDeniedHashes` → empty | - -#### `TestDenyList_GetDeniedHashes` — 3 subcases - -| Subcase | Setup | Assertion | -|---------|-------|-----------| -| `returns all hashes at height` | Add 5 hashes at height 100 | `GetDeniedHashes(100)` returns 5 | -| `empty for clean height` | Add at heights 10, 30 | `GetDeniedHashes(20)` → empty | -| `isolated by height` | Add 2 at h10, 3 at h20, 1 at h30 | Correct counts at each height | - -#### `TestInvalidateBlock` — 4 subcases - -| Subcase | Config | Assertion | -|---------|--------|-----------| -| `current block matches triggers rewind` | currentHash == payloadHash | `rewound=true`, `RewindToTimestamp` called with correct ts | -| `current block differs no rewind` | currentHash ≠ payloadHash | `rewound=false`, no rewind call | -| `engine unavailable adds to denylist only` | engine=nil | `rewound=false`, hash still in denylist | -| `rewind to height-1 timestamp calculated correctly` | height=10 | Rewind ts = `genesis + (9 * blockTime)` | - -#### `TestIsDenied` — 3 subcases - -| Subcase | Setup | Assertion | -|---------|-------|-----------| -| `denied block returns true` | Add hash at height 100 | `IsDenied(100, hash)` → `true` | -| `non-denied returns false` | Add hash at height 100 | `IsDenied(100, differentHash)` → `false` | -| `wrong height returns false` | Add hash at height 10 | `IsDenied(11, sameHash)` → `false` | - -#### `TestDenyList_ConcurrentAccess` — 1 case - -| Subcase | Setup | Assertion | -|---------|-------|-----------| -| 10 concurrent accessors | 10 goroutines, 100 ops each | All writes succeed, all reads find own hashes, no errors | - -### Untested Behavior - -| Behavior | Why Untested | -|----------|--------------| -| Corrupt bbolt file recovery | Out of scope | -| Very large number of hashes at single height | Performance not tested | - -### Specified Behavior (Clarified) - -| Behavior | Status | -|----------|--------| -| `height=0` | ✅ Works fine, acceptable | -| `RewindEngine` fails | ✅ Hash remains in denylist — **intended behavior** | -| Concurrent access | ✅ Now tested (see `TestDenyList_ConcurrentAccess`) - -### Code Locations - -| Component | File | Lines | -|-----------|------|-------| -| DenyList struct | `chain_container/invalidation.go` | 23-28 | -| OpenDenyList | `chain_container/invalidation.go` | 30-52 | -| Add | `chain_container/invalidation.go` | 62-91 | -| Contains | `chain_container/invalidation.go` | 94-119 | -| GetDeniedHashes | `chain_container/invalidation.go` | 122-143 | -| InvalidateBlock | `chain_container/invalidation.go` | 149-207 | -| IsDenied | `chain_container/invalidation.go` | 218-223 | -| Tests | `chain_container/invalidation_test.go` | 1-514 | - ---- - -## Sub-Feature 2: Wire Interop → ChainContainer - -### Commits - -| SHA | Message | Files Changed | -|-----|---------|---------------| -| `9a63b131e9` | Wire up block invalidation from interop activity | 2 files | -| `227537f99a` | Fix block invalidation: use eth.Unsafe label | 2 files | - -### Purpose - -Connect the Interop activity's invalid message detection to the ChainContainer's invalidation mechanism. - -**Flow:** -1. Interop detects invalid executing message in `verifyInteropMessages` -2. `handleResult` sees `InvalidHeads` in the result -3. Calls `invalidateBlock(chainID, blockID)` for each invalid head -4. `invalidateBlock` calls `chain.InvalidateBlock(ctx, blockNum, hash)` - -### Specification - -```go -func (i *Interop) invalidateBlock(chainID eth.ChainID, blockID eth.BlockID) error -``` - -| Scenario | Behavior | -|----------|----------| -| Chain not found | Return error `"chain %s not found"` | -| Chain.InvalidateBlock errors | Log error, return error | -| Chain.InvalidateBlock returns `rewound=true` | Log warn "chain rewound" | -| Chain.InvalidateBlock returns `rewound=false` | Log info "block added to denylist" | - -### Test Coverage - -#### `TestInvalidateBlock` (interop_test.go) — 4 subcases - -| Subcase | Setup | Assertion | -|---------|-------|-----------| -| `calls chain.InvalidateBlock with correct args` | Call invalidateBlock(chainID, blockID) | mock tracks height=500, hash=0xBAD | -| `returns error when chain not found` | Call with unknown chainID | Error contains "not found", no mock calls | -| `returns error when chain.InvalidateBlock fails` | mock returns error | Error propagated | -| `handleResult calls invalidateBlock for each invalid head` | Result with 2 InvalidHeads | Both mocks have 1 call each with correct args | - -### Code Locations - -| Component | File | Lines | -|-----------|------|-------| -| invalidateBlock | `activity/interop/interop.go` | 293-322 | -| Tests | `activity/interop/interop_test.go` | TestInvalidateBlock | - -## Sub-Feature 3: SuperAuthority Injection - -### Commits - -| SHA | Message | Files Changed | -|-----|---------|---------------| -| `539e46aef0` | op-node: add SuperAuthority interface for payload denial | 8 files | - -### Purpose - -Allow the `op-supernode` to inject a "SuperAuthority" into the `op-node` engine controller. This authority can deny payloads before they are inserted, triggering deposits-only replacement during Holocene derivation. - -**Flow:** -1. `ChainContainer` implements `SuperAuthority.IsDenied(blockNumber, hash)` -2. Passed via `InitializationOverrides` when creating `VirtualNode` -3. `EngineController` calls `IsDenied` before `NewPayload` -4. If denied + Holocene + derived → request deposits-only replacement -5. If denied otherwise → emit `PayloadInvalidEvent` - -### Specification - -```go -type SuperAuthority interface { - IsDenied(blockNumber uint64, payloadHash common.Hash) (bool, error) -} -``` - -| Scenario | Behavior | -|----------|----------| -| IsDenied returns `(true, nil)` | Payload rejected, replacement requested (Holocene) or invalid event | -| IsDenied returns `(false, nil)` | Payload proceeds to engine | -| IsDenied returns `(_, error)` | Log warning, proceed with payload (graceful degradation) | -| SuperAuthority is nil | No check, proceed with payload | - -### Test Coverage - -#### `TestSuperAuthority_*` (engine_controller_test.go) — 4 tests - -| Test | Setup | Assertion | -|------|-------|-----------| -| `DeniedPayload_EmitsInvalidEvent` | sa.DenyBlock for payload | PayloadInvalidEvent emitted, NewPayload NOT called | -| `AllowedPayload_Proceeds` | sa empty (no deny) | NewPayload called, PayloadSuccessEvent emitted | -| `Error_ProceedsWithPayload` | sa.shouldError = true | NewPayload called despite error, PayloadSuccessEvent emitted | -| `NilAuthority_Proceeds` | sa = nil | NewPayload called, PayloadSuccessEvent emitted | - -### Code Locations - -| Component | File | Lines | -|-----------|------|-------| -| SuperAuthority interface | `op-node/rollup/engine/engine_controller.go` | 96-104 | -| IsDenied check | `op-node/rollup/engine/payload_process.go` | 31-58 | -| InitializationOverrides | `op-node/node/node.go` | InitializationOverrides struct | -| Tests | `op-node/rollup/engine/engine_controller_test.go` | TestSuperAuthority_* | - -## Sub-Feature 4: Activity Reset Notification Chain - -### Commits - -| SHA | Message | Files Changed | -|-----|---------|---------------| -| `c920528378` | op-supernode: add ResetOn method to Activity interface | 6 files | -| `dbbbc6568c` | op-supernode: add SetResetCallback to ChainContainer | 3 files | -| `90db9cf396` | op-supernode: implement ResetOn in Interop activity | 6 files | - -### Purpose - -When a chain container rewinds due to block invalidation, all activities must be notified so they can clean up cached state. The Interop activity specifically must rewind its `logsDB` and `verifiedDB`. - -**Flow:** -1. `ChainContainer.InvalidateBlock` triggers a rewind -2. After successful rewind, calls `onReset(chainID, timestamp)` callback -3. `Supernode.onChainReset` receives the notification -4. Iterates through all activities, calling `ResetOn(chainID, timestamp)` -5. Interop: rewinds logsDB and verifiedDB -6. Heartbeat/Superroot: no-op (no cached state) - -### Specification - -```go -// Activity interface -ResetOn(chainID eth.ChainID, timestamp uint64) - -// ChainContainer interface -SetResetCallback(cb ResetCallback) - -// VerifiedDB -RewindTo(timestamp uint64) (deleted bool, err error) -``` - -| Scenario | Behavior | -|----------|----------| -| Previous block available | logsDB.Rewind(prevBlockID) | -| Previous block not found | logsDB.Clear() | -| timestamp ≤ blockTime | logsDB.Clear() | -| Verified results deleted | Log ERROR (unexpected) | - -### Test Coverage - -#### `TestVerifiedDB_RewindTo` (verified_db_test.go) — 4 subcases - -| Subcase | Setup | Assertion | -|---------|-------|-----------| -| `removes entries at and after timestamp` | Commit 100-105, RewindTo(103) | 100-102 exist, 103-105 gone, lastTs=102 | -| `returns false when no entries deleted` | Commit 98-100, RewindTo(200) | All exist, deleted=false | -| `rewind all entries` | Commit 100-102, RewindTo(0) | All gone, lastTs uninitialized | -| `allows sequential commits after rewind` | Commit 100-105, RewindTo(103), Commit 103 | New 103 data readable | - -#### `TestResetOn` (interop_test.go) — 6 subcases - -| Subcase | Setup | Assertion | -|---------|-------|-----------| -| `rewinds logsDB when previous block available` | mock returns valid block | logsDB.Rewind called with prevBlockID | -| `clears logsDB when previous block not available` | mock returns error | logsDB.Clear called | -| `clears logsDB when timestamp at or before blockTime` | timestamp=1, blockTime=1 | logsDB.Clear called | -| `rewinds verifiedDB` | Commit 98-102, ResetOn(100) | 98-99 exist, 100-102 gone | -| `resets currentL1` | currentL1={500, 0xL1} | currentL1 = {} after reset | -| `handles unknown chain gracefully` | ResetOn(unknownChain, 100) | No panic | - -### Code Locations - -| Component | File | Lines | -|-----------|------|-------| -| ResetOn (Activity interface) | `activity/activity.go` | 4-8 | -| SetResetCallback | `chain_container/chain_container.go` | SetResetCallback method | -| onReset callback | `chain_container/invalidation.go` | In InvalidateBlock | -| Supernode.onChainReset | `supernode/supernode.go` | onChainReset method | -| Interop.ResetOn | `activity/interop/interop.go` | 388-480 | -| VerifiedDB.RewindTo | `activity/interop/verified_db.go` | 184-220 | -| Tests | verified_db_test.go, interop_test.go | TestVerifiedDB_RewindTo, TestResetOn | - -## Sub-Feature 5: Acceptance Tests - -### Commits - -| SHA | Message | Files Changed | -|-----|---------|---------------| -| `31ea9484c6` | op-acceptance-tests: add replacement block assertions | 1 file | - -### Purpose - -End-to-end test verifying the complete block invalidation and replacement flow works in a full supernode environment. - -**Test Flow:** -1. Start supernode with interop chains -2. Send cross-chain message that will be invalid -3. Wait for message to be included in a block -4. Interop activity detects invalid message -5. ChainContainer invalidates block, triggers rewind -6. Activities are notified via ResetOn -7. op-node derives replacement block (deposits-only) -8. New block at same height has different hash -9. Invalid transaction is NOT in replacement block -10. Timestamp eventually becomes verified - -### Test: `TestSupernodeInteropInvalidMessageReplacement` - -**Location:** `op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go` - -### Phases - -| Phase | What is verified | -|-------|------------------| -| 1. Setup | Supernode running, chains synced | -| 2. Send invalid message | Cross-chain exec message sent, receipt obtained | -| 3. Observe reset | Block at invalid height changes or disappears | -| 4. Detect replacement | New block at same height with different hash | -| 5. Verify replacement | Replacement hash ≠ invalid hash, invalid tx not in replacement | -| 6. Verify timestamp | `SuperRootAtTimestamp` returns verified data | - -### Assertions - -| Assertion | Purpose | -|-----------|---------| -| `resetDetected = true` | Rewind was triggered | -| `replacementDetected = true` | New block created at same height | -| `replacementHash ≠ invalidHash` | Block was actually replaced | -| `invalidTx NOT in replacement` | Invalid transaction removed | -| `verified = true` | Replacement passes verification | - -### Code Location - -| Component | File | -|-----------|------| -| Test | `op-acceptance-tests/tests/supernode/interop/invalid_message_replacement_test.go` | - ---- - -## Test Summary - -All unit tests added for missing coverage: - -| Test File | Tests Added | -|-----------|-------------| -| `chain_container/invalidation_test.go` | `TestDenyList_ConcurrentAccess` | -| `activity/interop/interop_test.go` | `TestInvalidateBlock` (4 cases), `TestResetOn` (6 cases) | -| `activity/interop/verified_db_test.go` | `TestVerifiedDB_RewindTo` (4 cases) | -| `op-node/rollup/engine/engine_controller_test.go` | `TestSuperAuthority_*` (4 tests) | - -## Next Steps - -Ready to commit all tests as "fill in missing unit tests" commit. - ---- - -## Sub-Feature 6: Test Control for Interop Activity - -### Purpose -Provide integration test control for pausing and resuming the interop activity at specific timestamps. This allows acceptance tests to precisely control when interop validation occurs. - -### Specification -- `PauseInterop(ts uint64)`: When called, the interop activity pauses at the given timestamp - if it would process that timestamp in its progress loop, it returns early without making progress. -- `ResumeInterop()`: Clears the pause, allowing normal processing to continue. -- Zero value for `ts` indicates "not paused" (always process all values). -- Values are stored atomically for concurrent read/write safety. -- This is test-only functionality, not wired at production level. - -### Implementation - -| Component | Location | Changes | -|-----------|----------|---------| -| Interop Activity | `op-supernode/supernode/activity/interop/interop.go` | Added `pauseAtTimestamp atomic.Uint64` field, `PauseAt(ts)` and `Resume()` methods, check in `progressInterop()` | -| Supernode | `op-supernode/supernode/supernode.go` | Added `PauseInterop(ts)` and `ResumeInterop()` methods that delegate to interop activity | -| sysgo.SuperNode | `op-devstack/sysgo/l2_cl_supernode.go` | Added `PauseInterop(ts)` and `ResumeInterop()` methods | -| Stack Interface | `op-devstack/stack/supernode.go` | Added `InteropTestControl` interface | -| Orchestrator | `op-devstack/sysgo/orchestrator.go` | Added `InteropTestControl(id)` method to get test control for a supernode | -| DSL Supernode | `op-devstack/dsl/supernode.go` | Added `testControl` field, `NewSupernodeWithTestControl()` constructor, `PauseInterop(ts)` and `ResumeInterop()` methods | -| Preset | `op-devstack/presets/twol2.go` | Wire up `InteropTestControl` in `NewTwoL2SupernodeInterop()` | - -### Usage in Tests - -```go -// Pause interop at a specific timestamp -sys.Supernode.PauseInterop(targetTimestamp + 1) - -// ... perform test actions ... - -// Resume interop processing -sys.Supernode.ResumeInterop() -``` - -### Test Coverage -Test-only functionality - exercised through acceptance test usage. From 7c2817b29ebc257138317222ea66f43e4919514e Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Wed, 11 Feb 2026 16:22:55 -0600 Subject: [PATCH 22/23] Final PR Comments --- .../supernode/activity/interop/interop.go | 9 +- .../activity/interop/interop_test.go | 1771 +++++++++-------- .../supernode/activity/interop/verified_db.go | 4 +- .../activity/interop/verified_db_test.go | 8 +- 4 files changed, 938 insertions(+), 854 deletions(-) diff --git a/op-supernode/supernode/activity/interop/interop.go b/op-supernode/supernode/activity/interop/interop.go index 8bb68551de10e..dd968f139ff76 100644 --- a/op-supernode/supernode/activity/interop/interop.go +++ b/op-supernode/supernode/activity/interop/interop.go @@ -395,7 +395,7 @@ func (i *Interop) VerifiedAtTimestamp(ts uint64) (bool, error) { } // Reset is called when a chain container resets to a given timestamp. -// It clears the logsDB for that chain and removes any verified results at or after the timestamp. +// It prunes the logsDB and verifiedDB for that chain at and after the timestamp. func (i *Interop) Reset(chainID eth.ChainID, timestamp uint64) { i.mu.Lock() defer i.mu.Unlock() @@ -417,7 +417,7 @@ func (i *Interop) Reset(chainID eth.ChainID, timestamp uint64) { } i.resetLogsDB(chainID, chain, db, timestamp) - i.resetVerifiedDB(chainID, timestamp) + i.resetVerifiedDB(timestamp) // Reset the currentL1 to force re-evaluation i.currentL1 = eth.BlockID{} @@ -458,12 +458,12 @@ func (i *Interop) resetLogsDB(chainID eth.ChainID, chain cc.ChainContainer, db L } // resetVerifiedDB removes any verified results at or after the given timestamp. -func (i *Interop) resetVerifiedDB(chainID eth.ChainID, timestamp uint64) { +func (i *Interop) resetVerifiedDB(timestamp uint64) { if i.verifiedDB == nil { return } - deleted, err := i.verifiedDB.RewindTo(timestamp) + deleted, err := i.verifiedDB.Rewind(timestamp) if err != nil { i.log.Error("failed to rewind verifiedDB", "timestamp", timestamp, @@ -474,7 +474,6 @@ func (i *Interop) resetVerifiedDB(chainID eth.ChainID, timestamp uint64) { // This is unexpected - we shouldn't have verified results at timestamps // that are being reset. Log an error for visibility. i.log.Error("UNEXPECTED: verified results were deleted on reset", - "chainID", chainID, "timestamp", timestamp, ) } diff --git a/op-supernode/supernode/activity/interop/interop_test.go b/op-supernode/supernode/activity/interop/interop_test.go index a0180d5ec81ca..f109680a5ffd6 100644 --- a/op-supernode/supernode/activity/interop/interop_test.go +++ b/op-supernode/supernode/activity/interop/interop_test.go @@ -22,225 +22,330 @@ import ( ) // ============================================================================= -// TestNew +// Test Harness // ============================================================================= -func TestNew(t *testing.T) { - t.Parallel() +// interopTestHarness provides a builder-pattern test setup for Interop tests. +// It reduces boilerplate by handling common setup: temp directories, mock chains, +// interop creation, context assignment, and cleanup. +type interopTestHarness struct { + t *testing.T + interop *Interop + mocks map[eth.ChainID]*mockChainContainer + activationTime uint64 + dataDir string + skipBuild bool // for tests that need custom construction +} - t.Run("valid inputs initializes all components", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() +// newInteropTestHarness creates a new test harness with sensible defaults. +func newInteropTestHarness(t *testing.T) *interopTestHarness { + t.Helper() + t.Parallel() + return &interopTestHarness{ + t: t, + mocks: make(map[eth.ChainID]*mockChainContainer), + activationTime: 1000, + dataDir: t.TempDir(), + } +} - chains := map[eth.ChainID]cc.ChainContainer{ - eth.ChainIDFromUInt64(10): newMockChainContainer(10), - eth.ChainIDFromUInt64(8453): newMockChainContainer(8453), - } +// WithActivation sets the interop activation timestamp. +func (h *interopTestHarness) WithActivation(ts uint64) *interopTestHarness { + h.activationTime = ts + return h +} - interop := New(testLogger(), 1000, chains, dataDir) +// WithDataDir sets a custom data directory (useful for error testing). +func (h *interopTestHarness) WithDataDir(dir string) *interopTestHarness { + h.dataDir = dir + return h +} - require.NotNil(t, interop) - require.Equal(t, uint64(1000), interop.activationTimestamp) - require.NotNil(t, interop.verifiedDB) - require.Len(t, interop.chains, 2) - require.Len(t, interop.logsDBs, 2) - require.NotNil(t, interop.verifyFn) +// WithChain adds a mock chain container with optional configuration. +func (h *interopTestHarness) WithChain(id uint64, configure func(*mockChainContainer)) *interopTestHarness { + mock := newMockChainContainer(id) + if configure != nil { + configure(mock) + } + h.mocks[mock.id] = mock + return h +} - // Verify logsDBs populated for each chain - for chainID := range chains { - require.Contains(t, interop.logsDBs, chainID) - require.NotNil(t, interop.logsDBs[chainID]) - } - }) +// SkipBuild marks that Build() should not create an Interop instance. +// Useful for tests that need to test New() directly. +func (h *interopTestHarness) SkipBuild() *interopTestHarness { + h.skipBuild = true + return h +} - t.Run("invalid dataDir returns nil", func(t *testing.T) { - t.Parallel() +// Build creates the Interop instance from configured mocks. +// Sets up context and registers cleanup. +func (h *interopTestHarness) Build() *interopTestHarness { + if h.skipBuild { + return h + } + chains := make(map[eth.ChainID]cc.ChainContainer) + for id, mock := range h.mocks { + chains[id] = mock + } + h.interop = New(testLogger(), h.activationTime, chains, h.dataDir) + if h.interop != nil { + h.interop.ctx = context.Background() + h.t.Cleanup(func() { _ = h.interop.Stop(context.Background()) }) + } + return h +} - interop := New(testLogger(), 1000, map[eth.ChainID]cc.ChainContainer{}, "/nonexistent/path") +// Chains returns the map of chain containers for use with New(). +func (h *interopTestHarness) Chains() map[eth.ChainID]cc.ChainContainer { + chains := make(map[eth.ChainID]cc.ChainContainer) + for id, mock := range h.mocks { + chains[id] = mock + } + return chains +} - require.Nil(t, interop) - }) +// Mock returns the mock for a given chain ID. +func (h *interopTestHarness) Mock(id uint64) *mockChainContainer { + return h.mocks[eth.ChainIDFromUInt64(id)] } // ============================================================================= -// TestStartStop +// TestNew // ============================================================================= -func TestStartStop(t *testing.T) { +func TestNew(t *testing.T) { t.Parallel() - t.Run("Start blocks until context cancelled", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - mock.currentL1 = eth.BlockRef{Number: 100, Hash: common.HexToHash("0x1")} - mock.blockAtTimestamp = eth.L2BlockRef{Number: 50} - - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - defer func() { _ = interop.Stop(context.Background()) }() - - ctx, cancel := context.WithCancel(context.Background()) - done := make(chan error, 1) - go func() { done <- interop.Start(ctx) }() - - // Wait for start - require.Eventually(t, func() bool { - interop.mu.RLock() - defer interop.mu.RUnlock() - return interop.started - }, 5*time.Second, 100*time.Millisecond) - - cancel() - - var err error - require.Eventually(t, func() bool { - select { - case err = <-done: - return true - default: - return false - } - }, 5*time.Second, 100*time.Millisecond) - require.ErrorIs(t, err, context.Canceled) - }) - - t.Run("double Start blocked", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - mock.currentL1 = eth.BlockRef{Number: 100, Hash: common.HexToHash("0x1")} - - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - defer func() { _ = interop.Stop(context.Background()) }() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - go func() { _ = interop.Start(ctx) }() - - require.Eventually(t, func() bool { - interop.mu.RLock() - defer interop.mu.RUnlock() - return interop.started - }, 5*time.Second, 100*time.Millisecond) - - ctx2, cancel2 := context.WithTimeout(context.Background(), 500*time.Millisecond) - defer cancel2() - - err := interop.Start(ctx2) - require.ErrorIs(t, err, context.DeadlineExceeded) - }) - - t.Run("Stop cancels running Start and closes DB", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - mock.currentL1 = eth.BlockRef{Number: 100, Hash: common.HexToHash("0x1")} - mock.blockAtTimestampErr = ethereum.NotFound - - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - - done := make(chan error, 1) - go func() { done <- interop.Start(context.Background()) }() - - require.Eventually(t, func() bool { - interop.mu.RLock() - defer interop.mu.RUnlock() - return interop.started - }, 5*time.Second, 100*time.Millisecond) - - err := interop.Stop(context.Background()) - require.NoError(t, err) + tests := []struct { + name string + setup func(h *interopTestHarness) *interopTestHarness + run func(t *testing.T, h *interopTestHarness) + }{ + { + name: "valid inputs initializes all components", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, nil).WithChain(8453, nil).SkipBuild() + }, + run: func(t *testing.T, h *interopTestHarness) { + interop := New(testLogger(), h.activationTime, h.Chains(), h.dataDir) + require.NotNil(t, interop) + t.Cleanup(func() { _ = interop.Stop(context.Background()) }) + + require.Equal(t, uint64(1000), interop.activationTimestamp) + require.NotNil(t, interop.verifiedDB) + require.Len(t, interop.chains, 2) + require.Len(t, interop.logsDBs, 2) + require.NotNil(t, interop.verifyFn) + + for chainID := range h.Chains() { + require.Contains(t, interop.logsDBs, chainID) + require.NotNil(t, interop.logsDBs[chainID]) + } + }, + }, + { + name: "invalid dataDir returns nil", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithDataDir("/nonexistent/path").SkipBuild() + }, + run: func(t *testing.T, h *interopTestHarness) { + interop := New(testLogger(), h.activationTime, h.Chains(), h.dataDir) + require.Nil(t, interop) + }, + }, + } - require.Eventually(t, func() bool { - select { - case <-done: - return true - default: - return false - } - }, 5*time.Second, 100*time.Millisecond) - - // Verify DB is closed - _, err = interop.verifiedDB.Has(100) - require.Error(t, err) - }) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newInteropTestHarness(t) + tc.setup(h) + tc.run(t, h) + }) + } } // ============================================================================= -// TestCollectCurrentL1 +// TestStartStop // ============================================================================= -func TestCollectCurrentL1(t *testing.T) { +func TestStartStop(t *testing.T) { t.Parallel() - t.Run("returns minimum L1 across multiple chains", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock1 := newMockChainContainer(10) - mock1.currentL1 = eth.BlockRef{Number: 200, Hash: common.HexToHash("0x2")} - - mock2 := newMockChainContainer(8453) - mock2.currentL1 = eth.BlockRef{Number: 100, Hash: common.HexToHash("0x1")} // minimum - - chains := map[eth.ChainID]cc.ChainContainer{mock1.id: mock1, mock2.id: mock2} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() - - l1, err := interop.collectCurrentL1() - - require.NoError(t, err) - require.Equal(t, uint64(100), l1.Number) - require.Equal(t, common.HexToHash("0x1"), l1.Hash) - }) - - t.Run("single chain returns its L1", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() + tests := []struct { + name string + setup func(h *interopTestHarness) *interopTestHarness + run func(t *testing.T, h *interopTestHarness) + }{ + { + name: "Start blocks until context cancelled", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.currentL1 = eth.BlockRef{Number: 100, Hash: common.HexToHash("0x1")} + m.blockAtTimestamp = eth.L2BlockRef{Number: 50} + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan error, 1) + go func() { done <- h.interop.Start(ctx) }() + + require.Eventually(t, func() bool { + h.interop.mu.RLock() + defer h.interop.mu.RUnlock() + return h.interop.started + }, 5*time.Second, 100*time.Millisecond) + + cancel() + + var err error + require.Eventually(t, func() bool { + select { + case err = <-done: + return true + default: + return false + } + }, 5*time.Second, 100*time.Millisecond) + require.ErrorIs(t, err, context.Canceled) + }, + }, + { + name: "double Start blocked", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.currentL1 = eth.BlockRef{Number: 100, Hash: common.HexToHash("0x1")} + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - mock := newMockChainContainer(10) - mock.currentL1 = eth.BlockRef{Number: 500, Hash: common.HexToHash("0x5")} + go func() { _ = h.interop.Start(ctx) }() - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() + require.Eventually(t, func() bool { + h.interop.mu.RLock() + defer h.interop.mu.RUnlock() + return h.interop.started + }, 5*time.Second, 100*time.Millisecond) - l1, err := interop.collectCurrentL1() + ctx2, cancel2 := context.WithTimeout(context.Background(), 500*time.Millisecond) + defer cancel2() - require.NoError(t, err) - require.Equal(t, uint64(500), l1.Number) - }) + err := h.interop.Start(ctx2) + require.ErrorIs(t, err, context.DeadlineExceeded) + }, + }, + { + name: "Stop cancels running Start and closes DB", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.currentL1 = eth.BlockRef{Number: 100, Hash: common.HexToHash("0x1")} + m.blockAtTimestampErr = ethereum.NotFound + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + done := make(chan error, 1) + go func() { done <- h.interop.Start(context.Background()) }() + + require.Eventually(t, func() bool { + h.interop.mu.RLock() + defer h.interop.mu.RUnlock() + return h.interop.started + }, 5*time.Second, 100*time.Millisecond) + + err := h.interop.Stop(context.Background()) + require.NoError(t, err) + + require.Eventually(t, func() bool { + select { + case <-done: + return true + default: + return false + } + }, 5*time.Second, 100*time.Millisecond) + + // Verify DB is closed + _, err = h.interop.verifiedDB.Has(100) + require.Error(t, err) + }, + }, + } - t.Run("chain error propagated", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newInteropTestHarness(t) + tc.setup(h) + tc.run(t, h) + }) + } +} - mock := newMockChainContainer(10) - mock.currentL1Err = errors.New("chain not synced") +// ============================================================================= +// TestCollectCurrentL1 +// ============================================================================= - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() +func TestCollectCurrentL1(t *testing.T) { + t.Parallel() - l1, err := interop.collectCurrentL1() + tests := []struct { + name string + setup func(h *interopTestHarness) *interopTestHarness + run func(t *testing.T, h *interopTestHarness) + }{ + { + name: "returns minimum L1 across multiple chains", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.currentL1 = eth.BlockRef{Number: 200, Hash: common.HexToHash("0x2")} + }).WithChain(8453, func(m *mockChainContainer) { + m.currentL1 = eth.BlockRef{Number: 100, Hash: common.HexToHash("0x1")} // minimum + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + l1, err := h.interop.collectCurrentL1() + require.NoError(t, err) + require.Equal(t, uint64(100), l1.Number) + require.Equal(t, common.HexToHash("0x1"), l1.Hash) + }, + }, + { + name: "single chain returns its L1", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.currentL1 = eth.BlockRef{Number: 500, Hash: common.HexToHash("0x5")} + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + l1, err := h.interop.collectCurrentL1() + require.NoError(t, err) + require.Equal(t, uint64(500), l1.Number) + }, + }, + { + name: "chain error propagated", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.currentL1Err = errors.New("chain not synced") + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + l1, err := h.interop.collectCurrentL1() + require.Error(t, err) + require.Contains(t, err.Error(), "not ready") + require.Equal(t, eth.BlockID{}, l1) + }, + }, + } - require.Error(t, err) - require.Contains(t, err.Error(), "not ready") - require.Equal(t, eth.BlockID{}, l1) - }) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newInteropTestHarness(t) + tc.setup(h) + tc.run(t, h) + }) + } } // ============================================================================= @@ -250,70 +355,69 @@ func TestCollectCurrentL1(t *testing.T) { func TestCheckChainsReady(t *testing.T) { t.Parallel() - t.Run("all chains ready returns blocks", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock1 := newMockChainContainer(10) - mock1.blockAtTimestamp = eth.L2BlockRef{Number: 100, Hash: common.HexToHash("0x1")} - - mock2 := newMockChainContainer(8453) - mock2.blockAtTimestamp = eth.L2BlockRef{Number: 200, Hash: common.HexToHash("0x2")} - - chains := map[eth.ChainID]cc.ChainContainer{mock1.id: mock1, mock2.id: mock2} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() - - blocks, err := interop.checkChainsReady(1000) - - require.NoError(t, err) - require.Len(t, blocks, 2) - require.NotEqual(t, common.Hash{}, blocks[mock1.id].Hash) - require.NotEqual(t, common.Hash{}, blocks[mock2.id].Hash) - }) - - t.Run("one chain not ready returns error", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock1 := newMockChainContainer(10) - mock1.blockAtTimestamp = eth.L2BlockRef{Number: 100} - - mock2 := newMockChainContainer(8453) - mock2.blockAtTimestampErr = ethereum.NotFound - - chains := map[eth.ChainID]cc.ChainContainer{mock1.id: mock1, mock2.id: mock2} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() - - blocks, err := interop.checkChainsReady(1000) - - require.Error(t, err) - require.Nil(t, blocks) - }) - - t.Run("parallel execution works", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - chains := make(map[eth.ChainID]cc.ChainContainer) - for i := 0; i < 5; i++ { - mock := newMockChainContainer(uint64(10 + i)) - mock.blockAtTimestamp = eth.L2BlockRef{Number: uint64(100 + i)} - chains[mock.id] = mock - } - - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() - - blocks, err := interop.checkChainsReady(1000) + tests := []struct { + name string + setup func(h *interopTestHarness) *interopTestHarness + run func(t *testing.T, h *interopTestHarness) + }{ + { + name: "all chains ready returns blocks", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.blockAtTimestamp = eth.L2BlockRef{Number: 100, Hash: common.HexToHash("0x1")} + }).WithChain(8453, func(m *mockChainContainer) { + m.blockAtTimestamp = eth.L2BlockRef{Number: 200, Hash: common.HexToHash("0x2")} + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + blocks, err := h.interop.checkChainsReady(1000) + require.NoError(t, err) + require.Len(t, blocks, 2) + require.NotEqual(t, common.Hash{}, blocks[h.Mock(10).id].Hash) + require.NotEqual(t, common.Hash{}, blocks[h.Mock(8453).id].Hash) + }, + }, + { + name: "one chain not ready returns error", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.blockAtTimestamp = eth.L2BlockRef{Number: 100} + }).WithChain(8453, func(m *mockChainContainer) { + m.blockAtTimestampErr = ethereum.NotFound + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + blocks, err := h.interop.checkChainsReady(1000) + require.Error(t, err) + require.Nil(t, blocks) + }, + }, + { + name: "parallel execution works", + setup: func(h *interopTestHarness) *interopTestHarness { + for i := 0; i < 5; i++ { + idx := i // capture loop var + h.WithChain(uint64(10+idx), func(m *mockChainContainer) { + m.blockAtTimestamp = eth.L2BlockRef{Number: uint64(100 + idx)} + }) + } + return h.Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + blocks, err := h.interop.checkChainsReady(1000) + require.NoError(t, err) + require.Len(t, blocks, 5) + }, + }, + } - require.NoError(t, err) - require.Len(t, blocks, 5) - }) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newInteropTestHarness(t) + tc.setup(h) + tc.run(t, h) + }) + } } // ============================================================================= @@ -323,119 +427,112 @@ func TestCheckChainsReady(t *testing.T) { func TestProgressInterop(t *testing.T) { t.Parallel() - t.Run("not initialized uses activation timestamp", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - mock.blockAtTimestamp = eth.L2BlockRef{Number: 100, Hash: common.HexToHash("0x1")} - - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 5000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() - - var capturedTimestamp uint64 - interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { - capturedTimestamp = ts - return Result{Timestamp: ts, L2Heads: blocks}, nil - } - - result, err := interop.progressInterop() - - require.NoError(t, err) - require.Equal(t, uint64(5000), result.Timestamp) - require.Equal(t, uint64(5000), capturedTimestamp) - }) - - t.Run("initialized uses next timestamp", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - mock.blockAtTimestamp = eth.L2BlockRef{Number: 100, Hash: common.HexToHash("0x1")} - - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() - interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { - return Result{Timestamp: ts, L2Heads: blocks}, nil - } - - // First progress - result1, err := interop.progressInterop() - require.NoError(t, err) - require.Equal(t, uint64(1000), result1.Timestamp) - - // Commit - err = interop.handleResult(result1) - require.NoError(t, err) - - // Second progress should use next timestamp - result2, err := interop.progressInterop() - require.NoError(t, err) - require.Equal(t, uint64(1001), result2.Timestamp) - }) - - t.Run("chains not ready returns empty result", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - mock.blockAtTimestampErr = ethereum.NotFound - - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() - - result, err := interop.progressInterop() - - require.NoError(t, err) - require.True(t, result.IsEmpty()) - }) - - t.Run("chain error propagated", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - mock.blockAtTimestampErr = errors.New("internal error") - - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() - - result, err := interop.progressInterop() - - require.Error(t, err) - require.True(t, result.IsEmpty()) - }) - - t.Run("verifyFn error propagated", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - mock.currentL1 = eth.BlockRef{Number: 1000, Hash: common.HexToHash("0xL1")} - mock.blockAtTimestamp = eth.L2BlockRef{Number: 500, Hash: common.HexToHash("0xL2")} - - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 100, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() - interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { - return Result{}, errors.New("verification failed") - } - - result, err := interop.progressInterop() + tests := []struct { + name string + setup func(h *interopTestHarness) *interopTestHarness + run func(t *testing.T, h *interopTestHarness) + }{ + { + name: "not initialized uses activation timestamp", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithActivation(5000).WithChain(10, func(m *mockChainContainer) { + m.blockAtTimestamp = eth.L2BlockRef{Number: 100, Hash: common.HexToHash("0x1")} + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + var capturedTimestamp uint64 + h.interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { + capturedTimestamp = ts + return Result{Timestamp: ts, L2Heads: blocks}, nil + } + + result, err := h.interop.progressInterop() + require.NoError(t, err) + require.Equal(t, uint64(5000), result.Timestamp) + require.Equal(t, uint64(5000), capturedTimestamp) + }, + }, + { + name: "initialized uses next timestamp", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.blockAtTimestamp = eth.L2BlockRef{Number: 100, Hash: common.HexToHash("0x1")} + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + h.interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { + return Result{Timestamp: ts, L2Heads: blocks}, nil + } + + // First progress + result1, err := h.interop.progressInterop() + require.NoError(t, err) + require.Equal(t, uint64(1000), result1.Timestamp) + + // Commit + err = h.interop.handleResult(result1) + require.NoError(t, err) + + // Second progress should use next timestamp + result2, err := h.interop.progressInterop() + require.NoError(t, err) + require.Equal(t, uint64(1001), result2.Timestamp) + }, + }, + { + name: "chains not ready returns empty result", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.blockAtTimestampErr = ethereum.NotFound + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + result, err := h.interop.progressInterop() + require.NoError(t, err) + require.True(t, result.IsEmpty()) + }, + }, + { + name: "chain error propagated", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.blockAtTimestampErr = errors.New("internal error") + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + result, err := h.interop.progressInterop() + require.Error(t, err) + require.True(t, result.IsEmpty()) + }, + }, + { + name: "verifyFn error propagated", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithActivation(100).WithChain(10, func(m *mockChainContainer) { + m.currentL1 = eth.BlockRef{Number: 1000, Hash: common.HexToHash("0xL1")} + m.blockAtTimestamp = eth.L2BlockRef{Number: 500, Hash: common.HexToHash("0xL2")} + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + h.interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { + return Result{}, errors.New("verification failed") + } + + result, err := h.interop.progressInterop() + require.Error(t, err) + require.Contains(t, err.Error(), "verification failed") + require.True(t, result.IsEmpty()) + }, + }, + } - require.Error(t, err) - require.Contains(t, err.Error(), "verification failed") - require.True(t, result.IsEmpty()) - }) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newInteropTestHarness(t) + tc.setup(h) + tc.run(t, h) + }) + } } // ============================================================================= @@ -445,63 +542,73 @@ func TestProgressInterop(t *testing.T) { func TestVerifiedAtTimestamp(t *testing.T) { t.Parallel() - t.Run("before activation always verified", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - interop := New(testLogger(), 1000, map[eth.ChainID]cc.ChainContainer{}, dataDir) - require.NotNil(t, interop) - - verified, err := interop.VerifiedAtTimestamp(999) - require.NoError(t, err) - require.True(t, verified) - - verified, err = interop.VerifiedAtTimestamp(0) - require.NoError(t, err) - require.True(t, verified) - }) - - t.Run("at/after activation not verified until committed", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - interop := New(testLogger(), 1000, map[eth.ChainID]cc.ChainContainer{}, dataDir) - require.NotNil(t, interop) - - verified, err := interop.VerifiedAtTimestamp(1000) - require.NoError(t, err) - require.False(t, verified) - - verified, err = interop.VerifiedAtTimestamp(9999) - require.NoError(t, err) - require.False(t, verified) - }) - - t.Run("committed timestamp verified", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - mock.blockAtTimestamp = eth.L2BlockRef{Number: 100} + tests := []struct { + name string + setup func(h *interopTestHarness) *interopTestHarness + run func(t *testing.T, h *interopTestHarness) + }{ + { + name: "before activation always verified", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + verified, err := h.interop.VerifiedAtTimestamp(999) + require.NoError(t, err) + require.True(t, verified) + + verified, err = h.interop.VerifiedAtTimestamp(0) + require.NoError(t, err) + require.True(t, verified) + }, + }, + { + name: "at/after activation not verified until committed", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + verified, err := h.interop.VerifiedAtTimestamp(1000) + require.NoError(t, err) + require.False(t, verified) + + verified, err = h.interop.VerifiedAtTimestamp(9999) + require.NoError(t, err) + require.False(t, verified) + }, + }, + { + name: "committed timestamp verified", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.blockAtTimestamp = eth.L2BlockRef{Number: 100} + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + h.interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { + return Result{Timestamp: ts, L2Heads: blocks}, nil + } - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() - interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { - return Result{Timestamp: ts, L2Heads: blocks}, nil - } + result, err := h.interop.progressInterop() + require.NoError(t, err) - result, err := interop.progressInterop() - require.NoError(t, err) + err = h.interop.handleResult(result) + require.NoError(t, err) - err = interop.handleResult(result) - require.NoError(t, err) + verified, err := h.interop.VerifiedAtTimestamp(1000) + require.NoError(t, err) + require.True(t, verified) + }, + }, + } - verified, err := interop.VerifiedAtTimestamp(1000) - require.NoError(t, err) - require.True(t, verified) - }) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newInteropTestHarness(t) + tc.setup(h) + tc.run(t, h) + }) + } } // ============================================================================= @@ -511,79 +618,89 @@ func TestVerifiedAtTimestamp(t *testing.T) { func TestHandleResult(t *testing.T) { t.Parallel() - t.Run("empty result is no-op", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - interop := New(testLogger(), 1000, map[eth.ChainID]cc.ChainContainer{}, dataDir) - require.NotNil(t, interop) - - err := interop.handleResult(Result{}) - require.NoError(t, err) - - has, err := interop.verifiedDB.Has(0) - require.NoError(t, err) - require.False(t, has) - }) - - t.Run("valid result commits to DB with correct data", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - - validResult := Result{ - Timestamp: 1000, - L1Head: eth.BlockID{Number: 100, Hash: common.HexToHash("0xL1")}, - L2Heads: map[eth.ChainID]eth.BlockID{ - mock.id: {Number: 500, Hash: common.HexToHash("0xL2")}, + tests := []struct { + name string + setup func(h *interopTestHarness) *interopTestHarness + run func(t *testing.T, h *interopTestHarness) + }{ + { + name: "empty result is no-op", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.Build() }, - } - - err := interop.handleResult(validResult) - require.NoError(t, err) - - has, err := interop.verifiedDB.Has(1000) - require.NoError(t, err) - require.True(t, has) - - retrieved, err := interop.verifiedDB.Get(1000) - require.NoError(t, err) - require.Equal(t, validResult.Timestamp, retrieved.Timestamp) - require.Equal(t, validResult.L1Head, retrieved.L1Head) - require.Equal(t, validResult.L2Heads[mock.id], retrieved.L2Heads[mock.id]) - }) - - t.Run("invalid result does not commit", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() + run: func(t *testing.T, h *interopTestHarness) { + err := h.interop.handleResult(Result{}) + require.NoError(t, err) - mock := newMockChainContainer(10) - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - - invalidResult := Result{ - Timestamp: 1000, - L1Head: eth.BlockID{Number: 100, Hash: common.HexToHash("0xL1")}, - L2Heads: map[eth.ChainID]eth.BlockID{ - mock.id: {Number: 500, Hash: common.HexToHash("0xL2")}, + has, err := h.interop.verifiedDB.Has(0) + require.NoError(t, err) + require.False(t, has) }, - InvalidHeads: map[eth.ChainID]eth.BlockID{ - mock.id: {Number: 500, Hash: common.HexToHash("0xBAD")}, + }, + { + name: "valid result commits to DB with correct data", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, nil).Build() }, - } - - err := interop.handleResult(invalidResult) - require.NoError(t, err) + run: func(t *testing.T, h *interopTestHarness) { + mock := h.Mock(10) + validResult := Result{ + Timestamp: 1000, + L1Head: eth.BlockID{Number: 100, Hash: common.HexToHash("0xL1")}, + L2Heads: map[eth.ChainID]eth.BlockID{ + mock.id: {Number: 500, Hash: common.HexToHash("0xL2")}, + }, + } + + err := h.interop.handleResult(validResult) + require.NoError(t, err) + + has, err := h.interop.verifiedDB.Has(1000) + require.NoError(t, err) + require.True(t, has) + + retrieved, err := h.interop.verifiedDB.Get(1000) + require.NoError(t, err) + require.Equal(t, validResult.Timestamp, retrieved.Timestamp) + require.Equal(t, validResult.L1Head, retrieved.L1Head) + require.Equal(t, validResult.L2Heads[mock.id], retrieved.L2Heads[mock.id]) + }, + }, + { + name: "invalid result does not commit", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, nil).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + mock := h.Mock(10) + invalidResult := Result{ + Timestamp: 1000, + L1Head: eth.BlockID{Number: 100, Hash: common.HexToHash("0xL1")}, + L2Heads: map[eth.ChainID]eth.BlockID{ + mock.id: {Number: 500, Hash: common.HexToHash("0xL2")}, + }, + InvalidHeads: map[eth.ChainID]eth.BlockID{ + mock.id: {Number: 500, Hash: common.HexToHash("0xBAD")}, + }, + } + + err := h.interop.handleResult(invalidResult) + require.NoError(t, err) + + has, err := h.interop.verifiedDB.Has(1000) + require.NoError(t, err) + require.False(t, has) + }, + }, + } - has, err := interop.verifiedDB.Has(1000) - require.NoError(t, err) - require.False(t, has) - }) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newInteropTestHarness(t) + tc.setup(h) + tc.run(t, h) + }) + } } // ============================================================================= @@ -595,103 +712,102 @@ func TestHandleResult(t *testing.T) { func TestInvalidateBlock(t *testing.T) { t.Parallel() - t.Run("calls chain.InvalidateBlock with correct args", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() - - blockID := eth.BlockID{Number: 500, Hash: common.HexToHash("0xBAD")} - err := interop.invalidateBlock(mock.id, blockID) - require.NoError(t, err) - - // Verify InvalidateBlock was called with correct arguments - require.Len(t, mock.invalidateBlockCalls, 1) - require.Equal(t, uint64(500), mock.invalidateBlockCalls[0].height) - require.Equal(t, common.HexToHash("0xBAD"), mock.invalidateBlockCalls[0].payloadHash) - }) - - t.Run("returns error when chain not found", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() - - // Try to invalidate on a chain that doesn't exist - unknownChain := eth.ChainIDFromUInt64(999) - blockID := eth.BlockID{Number: 500, Hash: common.HexToHash("0xBAD")} - err := interop.invalidateBlock(unknownChain, blockID) - - require.Error(t, err) - require.Contains(t, err.Error(), "not found") - - // Verify InvalidateBlock was NOT called - require.Len(t, mock.invalidateBlockCalls, 0) - }) - - t.Run("returns error when chain.InvalidateBlock fails", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - mock.invalidateBlockErr = errors.New("engine failure") - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() - - blockID := eth.BlockID{Number: 500, Hash: common.HexToHash("0xBAD")} - err := interop.invalidateBlock(mock.id, blockID) - - require.Error(t, err) - require.Contains(t, err.Error(), "engine failure") - }) - - t.Run("handleResult calls invalidateBlock for each invalid head", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock1 := newMockChainContainer(10) - mock2 := newMockChainContainer(8453) - chains := map[eth.ChainID]cc.ChainContainer{mock1.id: mock1, mock2.id: mock2} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() - - // Create result with invalid heads on both chains - invalidResult := Result{ - Timestamp: 1000, - L1Head: eth.BlockID{Number: 100, Hash: common.HexToHash("0xL1")}, - L2Heads: map[eth.ChainID]eth.BlockID{ - mock1.id: {Number: 500, Hash: common.HexToHash("0xL2-1")}, - mock2.id: {Number: 600, Hash: common.HexToHash("0xL2-2")}, - }, - InvalidHeads: map[eth.ChainID]eth.BlockID{ - mock1.id: {Number: 500, Hash: common.HexToHash("0xBAD1")}, - mock2.id: {Number: 600, Hash: common.HexToHash("0xBAD2")}, - }, - } - - err := interop.handleResult(invalidResult) - require.NoError(t, err) + tests := []struct { + name string + setup func(h *interopTestHarness) *interopTestHarness + run func(t *testing.T, h *interopTestHarness) + }{ + { + name: "calls chain.InvalidateBlock with correct args", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, nil).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + mock := h.Mock(10) + blockID := eth.BlockID{Number: 500, Hash: common.HexToHash("0xBAD")} + err := h.interop.invalidateBlock(mock.id, blockID) + require.NoError(t, err) + + require.Len(t, mock.invalidateBlockCalls, 1) + require.Equal(t, uint64(500), mock.invalidateBlockCalls[0].height) + require.Equal(t, common.HexToHash("0xBAD"), mock.invalidateBlockCalls[0].payloadHash) + }, + }, + { + name: "returns error when chain not found", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, nil).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + mock := h.Mock(10) + unknownChain := eth.ChainIDFromUInt64(999) + blockID := eth.BlockID{Number: 500, Hash: common.HexToHash("0xBAD")} + err := h.interop.invalidateBlock(unknownChain, blockID) + + require.Error(t, err) + require.Contains(t, err.Error(), "not found") + require.Len(t, mock.invalidateBlockCalls, 0) + }, + }, + { + name: "returns error when chain.InvalidateBlock fails", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.invalidateBlockErr = errors.New("engine failure") + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + mock := h.Mock(10) + blockID := eth.BlockID{Number: 500, Hash: common.HexToHash("0xBAD")} + err := h.interop.invalidateBlock(mock.id, blockID) - // Verify both chains had InvalidateBlock called - require.Len(t, mock1.invalidateBlockCalls, 1) - require.Equal(t, uint64(500), mock1.invalidateBlockCalls[0].height) - require.Equal(t, common.HexToHash("0xBAD1"), mock1.invalidateBlockCalls[0].payloadHash) + require.Error(t, err) + require.Contains(t, err.Error(), "engine failure") + }, + }, + { + name: "handleResult calls invalidateBlock for each invalid head", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, nil).WithChain(8453, nil).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + mock1 := h.Mock(10) + mock2 := h.Mock(8453) + + invalidResult := Result{ + Timestamp: 1000, + L1Head: eth.BlockID{Number: 100, Hash: common.HexToHash("0xL1")}, + L2Heads: map[eth.ChainID]eth.BlockID{ + mock1.id: {Number: 500, Hash: common.HexToHash("0xL2-1")}, + mock2.id: {Number: 600, Hash: common.HexToHash("0xL2-2")}, + }, + InvalidHeads: map[eth.ChainID]eth.BlockID{ + mock1.id: {Number: 500, Hash: common.HexToHash("0xBAD1")}, + mock2.id: {Number: 600, Hash: common.HexToHash("0xBAD2")}, + }, + } + + err := h.interop.handleResult(invalidResult) + require.NoError(t, err) + + require.Len(t, mock1.invalidateBlockCalls, 1) + require.Equal(t, uint64(500), mock1.invalidateBlockCalls[0].height) + require.Equal(t, common.HexToHash("0xBAD1"), mock1.invalidateBlockCalls[0].payloadHash) + + require.Len(t, mock2.invalidateBlockCalls, 1) + require.Equal(t, uint64(600), mock2.invalidateBlockCalls[0].height) + require.Equal(t, common.HexToHash("0xBAD2"), mock2.invalidateBlockCalls[0].payloadHash) + }, + }, + } - require.Len(t, mock2.invalidateBlockCalls, 1) - require.Equal(t, uint64(600), mock2.invalidateBlockCalls[0].height) - require.Equal(t, common.HexToHash("0xBAD2"), mock2.invalidateBlockCalls[0].payloadHash) - }) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newInteropTestHarness(t) + tc.setup(h) + tc.run(t, h) + }) + } } // ============================================================================= @@ -701,108 +817,107 @@ func TestInvalidateBlock(t *testing.T) { func TestProgressAndRecord(t *testing.T) { t.Parallel() - t.Run("empty result sets L1 to collected minimum", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock1 := newMockChainContainer(10) - mock1.currentL1 = eth.BlockRef{Number: 200, Hash: common.HexToHash("0x2")} - mock1.blockAtTimestampErr = ethereum.NotFound - - mock2 := newMockChainContainer(8453) - mock2.currentL1 = eth.BlockRef{Number: 100, Hash: common.HexToHash("0x1")} - mock2.blockAtTimestampErr = ethereum.NotFound - - chains := map[eth.ChainID]cc.ChainContainer{mock1.id: mock1, mock2.id: mock2} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() - - require.Equal(t, eth.BlockID{}, interop.currentL1) - - madeProgress, err := interop.progressAndRecord() - require.NoError(t, err) - require.False(t, madeProgress, "empty result should not advance verified timestamp") - - require.Equal(t, uint64(100), interop.currentL1.Number) - require.Equal(t, common.HexToHash("0x1"), interop.currentL1.Hash) - }) - - t.Run("valid result sets L1 to result L1Head", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - mock.currentL1 = eth.BlockRef{Number: 200, Hash: common.HexToHash("0x200")} - mock.blockAtTimestamp = eth.L2BlockRef{Number: 100, Hash: common.HexToHash("0xL2")} - - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() - - expectedL1Head := eth.BlockID{Number: 150, Hash: common.HexToHash("0xL1Result")} - interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { - return Result{Timestamp: ts, L1Head: expectedL1Head, L2Heads: blocks}, nil - } - - madeProgress, err := interop.progressAndRecord() - require.NoError(t, err) - require.True(t, madeProgress, "valid result should advance verified timestamp") - - require.Equal(t, expectedL1Head.Number, interop.currentL1.Number) - require.Equal(t, expectedL1Head.Hash, interop.currentL1.Hash) - }) - - t.Run("invalid result does not update L1", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - mock.currentL1 = eth.BlockRef{Number: 200, Hash: common.HexToHash("0x200")} - mock.blockAtTimestamp = eth.L2BlockRef{Number: 100, Hash: common.HexToHash("0xL2")} - - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() - - initialL1 := eth.BlockID{Number: 50, Hash: common.HexToHash("0x50")} - interop.currentL1 = initialL1 - - interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { - return Result{ - Timestamp: ts, - L1Head: eth.BlockID{Number: 999, Hash: common.HexToHash("0xShouldNotBeUsed")}, - L2Heads: blocks, - InvalidHeads: map[eth.ChainID]eth.BlockID{mock.id: {Number: 100}}, - }, nil - } - - madeProgress, err := interop.progressAndRecord() - require.NoError(t, err) - require.False(t, madeProgress, "invalid result should not advance verified timestamp") - - require.Equal(t, initialL1.Number, interop.currentL1.Number) - require.Equal(t, initialL1.Hash, interop.currentL1.Hash) - }) - - t.Run("errors propagated", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() + tests := []struct { + name string + setup func(h *interopTestHarness) *interopTestHarness + run func(t *testing.T, h *interopTestHarness) + }{ + { + name: "empty result sets L1 to collected minimum", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.currentL1 = eth.BlockRef{Number: 200, Hash: common.HexToHash("0x2")} + m.blockAtTimestampErr = ethereum.NotFound + }).WithChain(8453, func(m *mockChainContainer) { + m.currentL1 = eth.BlockRef{Number: 100, Hash: common.HexToHash("0x1")} + m.blockAtTimestampErr = ethereum.NotFound + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + require.Equal(t, eth.BlockID{}, h.interop.currentL1) - mock := newMockChainContainer(10) - mock.currentL1Err = errors.New("L1 sync error") + madeProgress, err := h.interop.progressAndRecord() + require.NoError(t, err) + require.False(t, madeProgress, "empty result should not advance verified timestamp") - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - interop.ctx = context.Background() + require.Equal(t, uint64(100), h.interop.currentL1.Number) + require.Equal(t, common.HexToHash("0x1"), h.interop.currentL1.Hash) + }, + }, + { + name: "valid result sets L1 to result L1Head", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.currentL1 = eth.BlockRef{Number: 200, Hash: common.HexToHash("0x200")} + m.blockAtTimestamp = eth.L2BlockRef{Number: 100, Hash: common.HexToHash("0xL2")} + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + expectedL1Head := eth.BlockID{Number: 150, Hash: common.HexToHash("0xL1Result")} + h.interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { + return Result{Timestamp: ts, L1Head: expectedL1Head, L2Heads: blocks}, nil + } + + madeProgress, err := h.interop.progressAndRecord() + require.NoError(t, err) + require.True(t, madeProgress, "valid result should advance verified timestamp") + + require.Equal(t, expectedL1Head.Number, h.interop.currentL1.Number) + require.Equal(t, expectedL1Head.Hash, h.interop.currentL1.Hash) + }, + }, + { + name: "invalid result does not update L1", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.currentL1 = eth.BlockRef{Number: 200, Hash: common.HexToHash("0x200")} + m.blockAtTimestamp = eth.L2BlockRef{Number: 100, Hash: common.HexToHash("0xL2")} + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + mock := h.Mock(10) + initialL1 := eth.BlockID{Number: 50, Hash: common.HexToHash("0x50")} + h.interop.currentL1 = initialL1 + + h.interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { + return Result{ + Timestamp: ts, + L1Head: eth.BlockID{Number: 999, Hash: common.HexToHash("0xShouldNotBeUsed")}, + L2Heads: blocks, + InvalidHeads: map[eth.ChainID]eth.BlockID{mock.id: {Number: 100}}, + }, nil + } + + madeProgress, err := h.interop.progressAndRecord() + require.NoError(t, err) + require.False(t, madeProgress, "invalid result should not advance verified timestamp") + + require.Equal(t, initialL1.Number, h.interop.currentL1.Number) + require.Equal(t, initialL1.Hash, h.interop.currentL1.Hash) + }, + }, + { + name: "errors propagated", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.currentL1Err = errors.New("L1 sync error") + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + madeProgress, err := h.interop.progressAndRecord() + require.Error(t, err) + require.False(t, madeProgress, "error should not advance verified timestamp") + }, + }, + } - madeProgress, err := interop.progressAndRecord() - require.Error(t, err) - require.False(t, madeProgress, "error should not advance verified timestamp") - }) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newInteropTestHarness(t) + tc.setup(h) + tc.run(t, h) + }) + } } // ============================================================================= @@ -1091,173 +1206,143 @@ var _ LogsDB = (*mockLogsDBForInterop)(nil) func TestReset(t *testing.T) { t.Parallel() - t.Run("rewinds logsDB when previous block available", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - // BlockAtTimestamp will return a valid block - mock.blockAtTimestamp = eth.L2BlockRef{ - Hash: common.HexToHash("0xPREV"), - Number: 99, - } - - mockLogsDB := &mockLogsDBForInterop{} - - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - defer func() { _ = interop.Stop(context.Background()) }() - interop.ctx = context.Background() - interop.logsDBs[mock.id] = mockLogsDB - - // Reset at timestamp 100 (blockTime=1, so prev=99) - interop.Reset(mock.id, 100) - - // Verify logsDB.Rewind was called - require.Len(t, mockLogsDB.rewindCalls, 1) - require.Equal(t, uint64(99), mockLogsDB.rewindCalls[0].Number) - require.Equal(t, 0, mockLogsDB.clearCalls) - }) - - t.Run("clears logsDB when previous block not available", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - // BlockAtTimestamp returns error - mock.blockAtTimestampErr = errors.New("block not found") - - mockLogsDB := &mockLogsDBForInterop{} - - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - defer func() { _ = interop.Stop(context.Background()) }() - interop.ctx = context.Background() - interop.logsDBs[mock.id] = mockLogsDB - - // Reset at timestamp 100 - interop.Reset(mock.id, 100) - - // Verify logsDB.Clear was called - require.Len(t, mockLogsDB.rewindCalls, 0) - require.Equal(t, 1, mockLogsDB.clearCalls) - }) - - t.Run("clears logsDB when timestamp at or before blockTime", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - // Configure mock so firstSealedBlock.Number > targetBlock.Number - // When timestamp=1 and blockTime=1, targetTs=0, so targetBlock.Number=0 - // Setting firstSealedBlock.Number=5 means DB starts after target, triggering Clear - mockLogsDB := &mockLogsDBForInterop{ - firstSealedBlock: suptypes.BlockSeal{Number: 5}, - } - - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - defer func() { _ = interop.Stop(context.Background()) }() - interop.ctx = context.Background() - interop.logsDBs[mock.id] = mockLogsDB - - // Reset at timestamp 1 (blockTime=1, so targetTs=0) - // Since firstSealedBlock.Number (5) > targetBlock.Number (0), Clear is called - interop.Reset(mock.id, 1) - - // Verify logsDB.Clear was called - require.Len(t, mockLogsDB.rewindCalls, 0) - require.Equal(t, 1, mockLogsDB.clearCalls) - }) - - t.Run("rewinds verifiedDB", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - mock.blockAtTimestamp = eth.L2BlockRef{Number: 99} - - mockLogsDB := &mockLogsDBForInterop{} - - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - defer func() { _ = interop.Stop(context.Background()) }() - interop.ctx = context.Background() - interop.logsDBs[mock.id] = mockLogsDB - - // Add some verified results - for ts := uint64(98); ts <= 102; ts++ { - err := interop.verifiedDB.Commit(VerifiedResult{ - Timestamp: ts, - L1Head: eth.BlockID{Number: ts}, - L2Heads: map[eth.ChainID]eth.BlockID{mock.id: {Number: ts}}, - }) - require.NoError(t, err) - } - - // Reset at timestamp 100 (should remove 100, 101, 102) - interop.Reset(mock.id, 100) - - // Verify results at 98, 99 still exist - has, _ := interop.verifiedDB.Has(98) - require.True(t, has) - has, _ = interop.verifiedDB.Has(99) - require.True(t, has) + tests := []struct { + name string + setup func(h *interopTestHarness) (*interopTestHarness, *mockLogsDBForInterop) + run func(t *testing.T, h *interopTestHarness, mockLogsDB *mockLogsDBForInterop) + }{ + { + name: "rewinds logsDB when previous block available", + setup: func(h *interopTestHarness) (*interopTestHarness, *mockLogsDBForInterop) { + h.WithChain(10, func(m *mockChainContainer) { + m.blockAtTimestamp = eth.L2BlockRef{Hash: common.HexToHash("0xPREV"), Number: 99} + }).Build() + mockLogsDB := &mockLogsDBForInterop{} + h.interop.logsDBs[h.Mock(10).id] = mockLogsDB + return h, mockLogsDB + }, + run: func(t *testing.T, h *interopTestHarness, mockLogsDB *mockLogsDBForInterop) { + h.interop.Reset(h.Mock(10).id, 100) + + require.Len(t, mockLogsDB.rewindCalls, 1) + require.Equal(t, uint64(99), mockLogsDB.rewindCalls[0].Number) + require.Equal(t, 0, mockLogsDB.clearCalls) + }, + }, + { + name: "clears logsDB when previous block not available", + setup: func(h *interopTestHarness) (*interopTestHarness, *mockLogsDBForInterop) { + h.WithChain(10, func(m *mockChainContainer) { + m.blockAtTimestampErr = errors.New("block not found") + }).Build() + mockLogsDB := &mockLogsDBForInterop{} + h.interop.logsDBs[h.Mock(10).id] = mockLogsDB + return h, mockLogsDB + }, + run: func(t *testing.T, h *interopTestHarness, mockLogsDB *mockLogsDBForInterop) { + h.interop.Reset(h.Mock(10).id, 100) + + require.Len(t, mockLogsDB.rewindCalls, 0) + require.Equal(t, 1, mockLogsDB.clearCalls) + }, + }, + { + name: "clears logsDB when timestamp at or before blockTime", + setup: func(h *interopTestHarness) (*interopTestHarness, *mockLogsDBForInterop) { + h.WithChain(10, nil).Build() + mockLogsDB := &mockLogsDBForInterop{ + firstSealedBlock: suptypes.BlockSeal{Number: 5}, + } + h.interop.logsDBs[h.Mock(10).id] = mockLogsDB + return h, mockLogsDB + }, + run: func(t *testing.T, h *interopTestHarness, mockLogsDB *mockLogsDBForInterop) { + // Reset at timestamp 1 (blockTime=1, so targetTs=0) + // Since firstSealedBlock.Number (5) > targetBlock.Number (0), Clear is called + h.interop.Reset(h.Mock(10).id, 1) + + require.Len(t, mockLogsDB.rewindCalls, 0) + require.Equal(t, 1, mockLogsDB.clearCalls) + }, + }, + { + name: "rewinds verifiedDB", + setup: func(h *interopTestHarness) (*interopTestHarness, *mockLogsDBForInterop) { + h.WithChain(10, func(m *mockChainContainer) { + m.blockAtTimestamp = eth.L2BlockRef{Number: 99} + }).Build() + mockLogsDB := &mockLogsDBForInterop{} + h.interop.logsDBs[h.Mock(10).id] = mockLogsDB + return h, mockLogsDB + }, + run: func(t *testing.T, h *interopTestHarness, mockLogsDB *mockLogsDBForInterop) { + mock := h.Mock(10) + // Add some verified results + for ts := uint64(98); ts <= 102; ts++ { + err := h.interop.verifiedDB.Commit(VerifiedResult{ + Timestamp: ts, + L1Head: eth.BlockID{Number: ts}, + L2Heads: map[eth.ChainID]eth.BlockID{mock.id: {Number: ts}}, + }) + require.NoError(t, err) + } + + // Reset at timestamp 100 (should remove 100, 101, 102) + h.interop.Reset(mock.id, 100) + + // Verify results at 98, 99 still exist + has, _ := h.interop.verifiedDB.Has(98) + require.True(t, has) + has, _ = h.interop.verifiedDB.Has(99) + require.True(t, has) + + // Verify results at 100, 101, 102 are gone + has, _ = h.interop.verifiedDB.Has(100) + require.False(t, has) + has, _ = h.interop.verifiedDB.Has(101) + require.False(t, has) + has, _ = h.interop.verifiedDB.Has(102) + require.False(t, has) + }, + }, + { + name: "resets currentL1", + setup: func(h *interopTestHarness) (*interopTestHarness, *mockLogsDBForInterop) { + h.WithChain(10, func(m *mockChainContainer) { + m.blockAtTimestamp = eth.L2BlockRef{Number: 99} + }).Build() + mockLogsDB := &mockLogsDBForInterop{} + h.interop.logsDBs[h.Mock(10).id] = mockLogsDB + return h, mockLogsDB + }, + run: func(t *testing.T, h *interopTestHarness, mockLogsDB *mockLogsDBForInterop) { + h.interop.currentL1 = eth.BlockID{Number: 500, Hash: common.HexToHash("0xL1")} - // Verify results at 100, 101, 102 are gone - has, _ = interop.verifiedDB.Has(100) - require.False(t, has) - has, _ = interop.verifiedDB.Has(101) - require.False(t, has) - has, _ = interop.verifiedDB.Has(102) - require.False(t, has) - }) - - t.Run("resets currentL1", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - mock.blockAtTimestamp = eth.L2BlockRef{Number: 99} - - mockLogsDB := &mockLogsDBForInterop{} - - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - defer func() { _ = interop.Stop(context.Background()) }() - interop.ctx = context.Background() - interop.logsDBs[mock.id] = mockLogsDB - - // Set currentL1 to some value - interop.currentL1 = eth.BlockID{Number: 500, Hash: common.HexToHash("0xL1")} - - // Reset - interop.Reset(mock.id, 100) - - // Verify currentL1 is reset to zero - require.Equal(t, eth.BlockID{}, interop.currentL1) - }) - - t.Run("handles unknown chain gracefully", func(t *testing.T) { - t.Parallel() - dataDir := t.TempDir() - - mock := newMockChainContainer(10) - chains := map[eth.ChainID]cc.ChainContainer{mock.id: mock} - interop := New(testLogger(), 1000, chains, dataDir) - require.NotNil(t, interop) - defer func() { _ = interop.Stop(context.Background()) }() - interop.ctx = context.Background() - - // Reset on unknown chain (should not panic) - unknownChain := eth.ChainIDFromUInt64(999) - interop.Reset(unknownChain, 100) - - // Just verify it didn't panic - }) + h.interop.Reset(h.Mock(10).id, 100) + + require.Equal(t, eth.BlockID{}, h.interop.currentL1) + }, + }, + { + name: "handles unknown chain gracefully", + setup: func(h *interopTestHarness) (*interopTestHarness, *mockLogsDBForInterop) { + h.WithChain(10, nil).Build() + return h, nil + }, + run: func(t *testing.T, h *interopTestHarness, mockLogsDB *mockLogsDBForInterop) { + // Reset on unknown chain (should not panic) + unknownChain := eth.ChainIDFromUInt64(999) + h.interop.Reset(unknownChain, 100) + // Just verify it didn't panic + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newInteropTestHarness(t) + h, mockLogsDB := tc.setup(h) + tc.run(t, h, mockLogsDB) + }) + } } diff --git a/op-supernode/supernode/activity/interop/verified_db.go b/op-supernode/supernode/activity/interop/verified_db.go index 0f1fba04c020b..246fdcef96865 100644 --- a/op-supernode/supernode/activity/interop/verified_db.go +++ b/op-supernode/supernode/activity/interop/verified_db.go @@ -181,9 +181,9 @@ func (v *VerifiedDB) LastTimestamp() (uint64, bool) { return v.lastTimestamp, v.initialized } -// RewindTo removes all verified results at or after the given timestamp. +// Rewind removes all verified results at or after the given timestamp. // Returns true if any results were deleted, false otherwise. -func (v *VerifiedDB) RewindTo(timestamp uint64) (bool, error) { +func (v *VerifiedDB) Rewind(timestamp uint64) (bool, error) { var deleted bool err := v.db.Update(func(tx *bolt.Tx) error { diff --git a/op-supernode/supernode/activity/interop/verified_db_test.go b/op-supernode/supernode/activity/interop/verified_db_test.go index 48c9635c9fc3d..3848c30b021a8 100644 --- a/op-supernode/supernode/activity/interop/verified_db_test.go +++ b/op-supernode/supernode/activity/interop/verified_db_test.go @@ -204,7 +204,7 @@ func TestVerifiedDB_RewindTo(t *testing.T) { require.Equal(t, uint64(105), lastTs) // Rewind to 103 (should remove 103, 104, 105) - deleted, err := db.RewindTo(103) + deleted, err := db.Rewind(103) require.NoError(t, err) require.True(t, deleted) @@ -248,7 +248,7 @@ func TestVerifiedDB_RewindTo(t *testing.T) { } // Rewind to 200 (nothing to delete) - deleted, err := db.RewindTo(200) + deleted, err := db.Rewind(200) require.NoError(t, err) require.False(t, deleted) @@ -278,7 +278,7 @@ func TestVerifiedDB_RewindTo(t *testing.T) { } // Rewind to 0 (delete all) - deleted, err := db.RewindTo(0) + deleted, err := db.Rewind(0) require.NoError(t, err) require.True(t, deleted) @@ -315,7 +315,7 @@ func TestVerifiedDB_RewindTo(t *testing.T) { } // Rewind to 103 - _, err = db.RewindTo(103) + _, err = db.Rewind(103) require.NoError(t, err) // Should be able to commit 103 again (sequential from 102) From 06b1cbe97d4cab2c02c01de5a92c713c2d398d10 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Wed, 11 Feb 2026 16:35:16 -0600 Subject: [PATCH 23/23] More tests to sub-cases --- .../activity/interop/interop_test.go | 90 +++++++++---------- 1 file changed, 44 insertions(+), 46 deletions(-) diff --git a/op-supernode/supernode/activity/interop/interop_test.go b/op-supernode/supernode/activity/interop/interop_test.go index f109680a5ffd6..3b2b288047a23 100644 --- a/op-supernode/supernode/activity/interop/interop_test.go +++ b/op-supernode/supernode/activity/interop/interop_test.go @@ -290,9 +290,9 @@ func TestCollectCurrentL1(t *testing.T) { t.Parallel() tests := []struct { - name string - setup func(h *interopTestHarness) *interopTestHarness - run func(t *testing.T, h *interopTestHarness) + name string + setup func(h *interopTestHarness) *interopTestHarness + assert func(t *testing.T, l1 eth.BlockID, err error) }{ { name: "returns minimum L1 across multiple chains", @@ -303,8 +303,7 @@ func TestCollectCurrentL1(t *testing.T) { m.currentL1 = eth.BlockRef{Number: 100, Hash: common.HexToHash("0x1")} // minimum }).Build() }, - run: func(t *testing.T, h *interopTestHarness) { - l1, err := h.interop.collectCurrentL1() + assert: func(t *testing.T, l1 eth.BlockID, err error) { require.NoError(t, err) require.Equal(t, uint64(100), l1.Number) require.Equal(t, common.HexToHash("0x1"), l1.Hash) @@ -317,8 +316,7 @@ func TestCollectCurrentL1(t *testing.T) { m.currentL1 = eth.BlockRef{Number: 500, Hash: common.HexToHash("0x5")} }).Build() }, - run: func(t *testing.T, h *interopTestHarness) { - l1, err := h.interop.collectCurrentL1() + assert: func(t *testing.T, l1 eth.BlockID, err error) { require.NoError(t, err) require.Equal(t, uint64(500), l1.Number) }, @@ -330,8 +328,7 @@ func TestCollectCurrentL1(t *testing.T) { m.currentL1Err = errors.New("chain not synced") }).Build() }, - run: func(t *testing.T, h *interopTestHarness) { - l1, err := h.interop.collectCurrentL1() + assert: func(t *testing.T, l1 eth.BlockID, err error) { require.Error(t, err) require.Contains(t, err.Error(), "not ready") require.Equal(t, eth.BlockID{}, l1) @@ -343,7 +340,8 @@ func TestCollectCurrentL1(t *testing.T) { t.Run(tc.name, func(t *testing.T) { h := newInteropTestHarness(t) tc.setup(h) - tc.run(t, h) + l1, err := h.interop.collectCurrentL1() + tc.assert(t, l1, err) }) } } @@ -356,9 +354,9 @@ func TestCheckChainsReady(t *testing.T) { t.Parallel() tests := []struct { - name string - setup func(h *interopTestHarness) *interopTestHarness - run func(t *testing.T, h *interopTestHarness) + name string + setup func(h *interopTestHarness) *interopTestHarness + assert func(t *testing.T, h *interopTestHarness, blocks map[eth.ChainID]eth.BlockID, err error) }{ { name: "all chains ready returns blocks", @@ -369,8 +367,7 @@ func TestCheckChainsReady(t *testing.T) { m.blockAtTimestamp = eth.L2BlockRef{Number: 200, Hash: common.HexToHash("0x2")} }).Build() }, - run: func(t *testing.T, h *interopTestHarness) { - blocks, err := h.interop.checkChainsReady(1000) + assert: func(t *testing.T, h *interopTestHarness, blocks map[eth.ChainID]eth.BlockID, err error) { require.NoError(t, err) require.Len(t, blocks, 2) require.NotEqual(t, common.Hash{}, blocks[h.Mock(10).id].Hash) @@ -386,8 +383,7 @@ func TestCheckChainsReady(t *testing.T) { m.blockAtTimestampErr = ethereum.NotFound }).Build() }, - run: func(t *testing.T, h *interopTestHarness) { - blocks, err := h.interop.checkChainsReady(1000) + assert: func(t *testing.T, h *interopTestHarness, blocks map[eth.ChainID]eth.BlockID, err error) { require.Error(t, err) require.Nil(t, blocks) }, @@ -403,8 +399,7 @@ func TestCheckChainsReady(t *testing.T) { } return h.Build() }, - run: func(t *testing.T, h *interopTestHarness) { - blocks, err := h.interop.checkChainsReady(1000) + assert: func(t *testing.T, h *interopTestHarness, blocks map[eth.ChainID]eth.BlockID, err error) { require.NoError(t, err) require.Len(t, blocks, 5) }, @@ -415,7 +410,8 @@ func TestCheckChainsReady(t *testing.T) { t.Run(tc.name, func(t *testing.T) { h := newInteropTestHarness(t) tc.setup(h) - tc.run(t, h) + blocks, err := h.interop.checkChainsReady(1000) + tc.assert(t, h, blocks, err) }) } } @@ -427,10 +423,17 @@ func TestCheckChainsReady(t *testing.T) { func TestProgressInterop(t *testing.T) { t.Parallel() + // Default verifyFn that passes through + passThroughVerifyFn := func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { + return Result{Timestamp: ts, L2Heads: blocks}, nil + } + tests := []struct { - name string - setup func(h *interopTestHarness) *interopTestHarness - run func(t *testing.T, h *interopTestHarness) + name string + setup func(h *interopTestHarness) *interopTestHarness + verifyFn func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) + assert func(t *testing.T, result Result, err error) + run func(t *testing.T, h *interopTestHarness) // override for complex cases }{ { name: "not initialized uses activation timestamp", @@ -439,17 +442,10 @@ func TestProgressInterop(t *testing.T) { m.blockAtTimestamp = eth.L2BlockRef{Number: 100, Hash: common.HexToHash("0x1")} }).Build() }, - run: func(t *testing.T, h *interopTestHarness) { - var capturedTimestamp uint64 - h.interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { - capturedTimestamp = ts - return Result{Timestamp: ts, L2Heads: blocks}, nil - } - - result, err := h.interop.progressInterop() + verifyFn: passThroughVerifyFn, + assert: func(t *testing.T, result Result, err error) { require.NoError(t, err) require.Equal(t, uint64(5000), result.Timestamp) - require.Equal(t, uint64(5000), capturedTimestamp) }, }, { @@ -460,9 +456,7 @@ func TestProgressInterop(t *testing.T) { }).Build() }, run: func(t *testing.T, h *interopTestHarness) { - h.interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { - return Result{Timestamp: ts, L2Heads: blocks}, nil - } + h.interop.verifyFn = passThroughVerifyFn // First progress result1, err := h.interop.progressInterop() @@ -486,8 +480,7 @@ func TestProgressInterop(t *testing.T) { m.blockAtTimestampErr = ethereum.NotFound }).Build() }, - run: func(t *testing.T, h *interopTestHarness) { - result, err := h.interop.progressInterop() + assert: func(t *testing.T, result Result, err error) { require.NoError(t, err) require.True(t, result.IsEmpty()) }, @@ -499,8 +492,7 @@ func TestProgressInterop(t *testing.T) { m.blockAtTimestampErr = errors.New("internal error") }).Build() }, - run: func(t *testing.T, h *interopTestHarness) { - result, err := h.interop.progressInterop() + assert: func(t *testing.T, result Result, err error) { require.Error(t, err) require.True(t, result.IsEmpty()) }, @@ -513,12 +505,10 @@ func TestProgressInterop(t *testing.T) { m.blockAtTimestamp = eth.L2BlockRef{Number: 500, Hash: common.HexToHash("0xL2")} }).Build() }, - run: func(t *testing.T, h *interopTestHarness) { - h.interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { - return Result{}, errors.New("verification failed") - } - - result, err := h.interop.progressInterop() + verifyFn: func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { + return Result{}, errors.New("verification failed") + }, + assert: func(t *testing.T, result Result, err error) { require.Error(t, err) require.Contains(t, err.Error(), "verification failed") require.True(t, result.IsEmpty()) @@ -530,7 +520,15 @@ func TestProgressInterop(t *testing.T) { t.Run(tc.name, func(t *testing.T) { h := newInteropTestHarness(t) tc.setup(h) - tc.run(t, h) + if tc.run != nil { + tc.run(t, h) + return + } + if tc.verifyFn != nil { + h.interop.verifyFn = tc.verifyFn + } + result, err := h.interop.progressInterop() + tc.assert(t, result, err) }) } }