diff --git a/op-e2e/interop/interop_test.go b/op-e2e/interop/interop_test.go index 0d593673eccea..20428e31e3882 100644 --- a/op-e2e/interop/interop_test.go +++ b/op-e2e/interop/interop_test.go @@ -2,7 +2,6 @@ package interop import ( "context" - "fmt" "math/big" "testing" "time" @@ -86,14 +85,12 @@ func TestInteropTrivial(t *testing.T) { require.Equal(t, expectedBalance, bobBalance) s2.DeployEmitterContract(chainA, "Alice") - rec := s2.EmitData(chainA, "Alice", "0x1234567890abcdef") - - fmt.Println("Result of emitting event:", rec) - s2.DeployEmitterContract(chainB, "Alice") - rec = s2.EmitData(chainB, "Alice", "0x1234567890abcdef") + for i := 0; i < 1; i++ { + s2.EmitData(chainA, "Alice", "0x1234567890abcdef") - fmt.Println("Result of emitting event:", rec) + s2.EmitData(chainB, "Alice", "0x1234567890abcdef") + } time.Sleep(60 * time.Second) diff --git a/op-service/sources/supervisor_client.go b/op-service/sources/supervisor_client.go index d9cb71fb45b2f..48063e351e54a 100644 --- a/op-service/sources/supervisor_client.go +++ b/op-service/sources/supervisor_client.go @@ -5,7 +5,6 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -68,7 +67,12 @@ func (cl *SupervisorClient) AddL2RPC( func (cl *SupervisorClient) UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error) { var result types.ReferenceView - err := cl.client.CallContext(ctx, &result, "supervisor_unsafeView", (*hexutil.U256)(&chainID), unsafe) + err := cl.client.CallContext( + ctx, + &result, + "supervisor_unsafeView", + chainID, + unsafe) if err != nil { return types.ReferenceView{}, fmt.Errorf("failed to share unsafe block view %s (chain %s): %w", unsafe, chainID, err) } @@ -77,7 +81,12 @@ func (cl *SupervisorClient) UnsafeView(ctx context.Context, chainID types.ChainI func (cl *SupervisorClient) SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error) { var result types.ReferenceView - err := cl.client.CallContext(ctx, &result, "supervisor_safeView", (*hexutil.U256)(&chainID), safe) + err := cl.client.CallContext( + ctx, + &result, + "supervisor_safeView", + chainID, + safe) if err != nil { return types.ReferenceView{}, fmt.Errorf("failed to share safe block view %s (chain %s): %w", safe, chainID, err) } diff --git a/op-supervisor/supervisor/backend/backend.go b/op-supervisor/supervisor/backend/backend.go index 563268ae3f3f4..fbac90534ffe5 100644 --- a/op-supervisor/supervisor/backend/backend.go +++ b/op-supervisor/supervisor/backend/backend.go @@ -4,21 +4,22 @@ import ( "context" "errors" "fmt" - "io" + "sync" "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/dial" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum-optimism/optimism/op-supervisor/config" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/source" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/processors" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/frontend" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -29,13 +30,28 @@ type SupervisorBackend struct { m Metrics dataDir string - chainMonitors map[types.ChainID]*source.ChainMonitor - db *db.ChainsDB + // RW lock to avoid concurrent map mutations. + // Read = any chain may be used and mutated. + // Write = set of chains is changing. + mu sync.RWMutex + + // db holds on to the DB indices for each chain + db *db.ChainsDB + + // chainProcessors are notified of new unsafe blocks, and add the unsafe log events data into the events DB + chainProcessors map[types.ChainID]*processors.ChainProcessor + + // crossUnsafeProcessors are notified of new event data to cross-verify. + crossUnsafeProcessors map[types.ChainID]*processors.CrossUnsafeVerifier + + // crossSafeVerifiers are notified of new local-safe blocks and events, + // and then feed the cross-safe data into the cross-safe DB + crossSafeVerifiers map[types.ChainID]*processors.CrossSafeVerifier } var _ frontend.Backend = (*SupervisorBackend)(nil) -var _ io.Closer = (*SupervisorBackend)(nil) +var errAlreadyStopped = errors.New("already stopped") func NewSupervisorBackend(ctx context.Context, logger log.Logger, m Metrics, cfg *config.Config) (*SupervisorBackend, error) { // attempt to prepare the data directory @@ -44,18 +60,18 @@ func NewSupervisorBackend(ctx context.Context, logger log.Logger, m Metrics, cfg } // create the chains db - db := db.NewChainsDB(map[types.ChainID]db.LogStorage{}, logger) + chainsDB := db.NewChainsDB(map[types.ChainID]db.LogStorage{}, logger) // create an empty map of chain monitors - chainMonitors := make(map[types.ChainID]*source.ChainMonitor, len(cfg.L2RPCs)) + chainProcessors := make(map[types.ChainID]*processors.ChainProcessor, len(cfg.L2RPCs)) // create the supervisor backend super := &SupervisorBackend{ - logger: logger, - m: m, - dataDir: cfg.Datadir, - chainMonitors: chainMonitors, - db: db, + logger: logger, + m: m, + dataDir: cfg.Datadir, + chainProcessors: chainProcessors, + db: chainsDB, } // from the RPC strings, have the supervisor backend create a chain monitor @@ -72,9 +88,9 @@ func NewSupervisorBackend(ctx context.Context, logger log.Logger, m Metrics, cfg // addFromRPC adds a chain monitor to the supervisor backend from an rpc endpoint // it does not expect to be called after the backend has been started // it will start the monitor if shouldStart is true -func (su *SupervisorBackend) addFromRPC(ctx context.Context, logger log.Logger, rpc string, shouldStart bool) error { +func (su *SupervisorBackend) addFromRPC(ctx context.Context, logger log.Logger, rpc string, _ bool) error { // create the rpc client, which yields the chain id - rpcClient, chainID, err := createRpcClient(ctx, logger, rpc) + rpcClient, chainID, err := clientForL2(ctx, logger, rpc) if err != nil { return err } @@ -89,25 +105,29 @@ func (su *SupervisorBackend) addFromRPC(ctx context.Context, logger log.Logger, if err != nil { return fmt.Errorf("failed to create logdb for chain %v at %v: %w", chainID, path, err) } - if su.chainMonitors[chainID] != nil { + if su.chainProcessors[chainID] != nil { return fmt.Errorf("chain monitor for chain %v already exists", chainID) } - monitor, err := source.NewChainMonitor(ctx, logger, cm, chainID, rpc, rpcClient, su.db) + // create a client like the monitor would have + cl, err := processors.NewEthClient( + ctx, + logger, + cm, + rpc, + rpcClient, 2*time.Second, + false, + sources.RPCKindStandard) if err != nil { - return fmt.Errorf("failed to create monitor for rpc %v: %w", rpc, err) - } - // start the monitor if requested - if shouldStart { - if err := monitor.Start(); err != nil { - return fmt.Errorf("failed to start monitor for rpc %v: %w", rpc, err) - } + return err } - su.chainMonitors[chainID] = monitor + logProcessor := processors.NewLogProcessor(chainID, su.db) + chainProcessor := processors.NewChainProcessor(logger, cl, chainID, logProcessor, su.db) + su.chainProcessors[chainID] = chainProcessor su.db.AddLogDB(chainID, logDB) return nil } -func createRpcClient(ctx context.Context, logger log.Logger, rpc string) (client.RPC, types.ChainID, error) { +func clientForL2(ctx context.Context, logger log.Logger, rpc string) (client.RPC, types.ChainID, error) { ethClient, err := dial.DialEthClientWithTimeout(ctx, 10*time.Second, logger, rpc) if err != nil { return nil, types.ChainID{}, fmt.Errorf("failed to connect to rpc %v: %w", rpc, err) @@ -120,6 +140,9 @@ func createRpcClient(ctx context.Context, logger log.Logger, rpc string) (client } func (su *SupervisorBackend) Start(ctx context.Context) error { + su.mu.Lock() + defer su.mu.Unlock() + // ensure we only start once if !su.started.CompareAndSwap(false, true) { return errors.New("already started") @@ -129,68 +152,73 @@ func (su *SupervisorBackend) Start(ctx context.Context) error { if err := su.db.ResumeFromLastSealedBlock(); err != nil { return fmt.Errorf("failed to resume chains db: %w", err) } - // start chain monitors - for _, monitor := range su.chainMonitors { - if err := monitor.Start(); err != nil { - return fmt.Errorf("failed to start chain monitor: %w", err) - } - } + // TODO init processors, de-dup with constructor return nil } -var errAlreadyStopped = errors.New("already stopped") - func (su *SupervisorBackend) Stop(ctx context.Context) error { + su.mu.Lock() + defer su.mu.Unlock() + if !su.started.CompareAndSwap(true, false) { return errAlreadyStopped } - // collect errors from stopping chain monitors - var errs error - for _, monitor := range su.chainMonitors { - if err := monitor.Stop(); err != nil { - errs = errors.Join(errs, fmt.Errorf("failed to stop chain monitor: %w", err)) - } + // close all processors + for _, processor := range su.chainProcessors { + processor.Close() } - // close the database - if err := su.db.Close(); err != nil { - errs = errors.Join(errs, fmt.Errorf("failed to close database: %w", err)) + clear(su.chainProcessors) + for _, processor := range su.crossUnsafeProcessors { + processor.Close() } - return errs -} - -func (su *SupervisorBackend) Close() error { - // TODO(protocol-quest#288): close logdb of all chains - return nil + clear(su.crossUnsafeProcessors) + for _, processor := range su.crossSafeVerifiers { + processor.Close() + } + clear(su.crossSafeVerifiers) + // close the databases + return su.db.Close() } // AddL2RPC adds a new L2 chain to the supervisor backend // it stops and restarts the backend to add the new chain func (su *SupervisorBackend) AddL2RPC(ctx context.Context, rpc string) error { + su.mu.Lock() + defer su.mu.Unlock() + // start the monitor immediately, as the backend is assumed to already be running return su.addFromRPC(ctx, su.logger, rpc, true) } +// Query methods +// ---------------------------- + func (su *SupervisorBackend) CheckMessage(identifier types.Identifier, payloadHash common.Hash) (types.SafetyLevel, error) { + su.mu.RLock() + defer su.mu.RUnlock() + chainID := identifier.ChainID blockNum := identifier.BlockNumber logIdx := identifier.LogIndex _, err := su.db.Check(chainID, blockNum, uint32(logIdx), payloadHash) - if errors.Is(err, logs.ErrFuture) { + if errors.Is(err, entrydb.ErrFuture) { return types.LocalUnsafe, nil } - if errors.Is(err, logs.ErrConflict) { + if errors.Is(err, entrydb.ErrConflict) { return types.Invalid, nil } if err != nil { return types.Invalid, fmt.Errorf("failed to check log: %w", err) } - safest := su.db.Safest(chainID, blockNum, uint32(logIdx)) - return safest, nil + return su.db.Safest(chainID, blockNum, uint32(logIdx)) } func (su *SupervisorBackend) CheckMessages( messages []types.Message, minSafety types.SafetyLevel) error { + su.mu.RLock() + defer su.mu.RUnlock() + for _, msg := range messages { safety, err := su.CheckMessage(msg.Identifier, msg.PayloadHash) if err != nil { @@ -206,32 +234,85 @@ func (su *SupervisorBackend) CheckMessages( return nil } -// CheckBlock checks if the block is safe according to the safety level -// The block is considered safe if all logs in the block are safe -// this is decided by finding the last log in the block and -func (su *SupervisorBackend) CheckBlock(chainID *hexutil.U256, blockHash common.Hash, blockNumber hexutil.Uint64) (types.SafetyLevel, error) { - // find the last log index in the block - id := eth.BlockID{Hash: blockHash, Number: uint64(blockNumber)} - _, err := su.db.FindSealedBlock(types.ChainID(*chainID), id) - if errors.Is(err, logs.ErrFuture) { - return types.LocalUnsafe, nil +func (su *SupervisorBackend) UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error) { + su.mu.RLock() + defer su.mu.RUnlock() + + head, err := su.db.LocalUnsafe(chainID) + if err != nil { + return types.ReferenceView{}, fmt.Errorf("failed to get local-unsafe head: %w", err) } - if errors.Is(err, logs.ErrConflict) { - return types.Invalid, nil + cross, err := su.db.CrossUnsafe(chainID) + if err != nil { + return types.ReferenceView{}, fmt.Errorf("failed to get cross-unsafe head: %w", err) } + + // TODO check `unsafe` input to detect reorg conflicts + + return types.ReferenceView{ + Local: eth.BlockID{Hash: head.LastSealedBlockHash, Number: head.LastSealedBlockNum}, + Cross: eth.BlockID{Hash: cross.LastSealedBlockHash, Number: cross.LastSealedBlockNum}, + }, nil +} + +func (su *SupervisorBackend) SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error) { + su.mu.RLock() + defer su.mu.RUnlock() + + _, localSafe, err := su.db.LocalSafe(chainID) if err != nil { - su.logger.Error("failed to scan block", "err", err) - return "", err + return types.ReferenceView{}, fmt.Errorf("failed to get local-safe head: %w", err) } - safest := su.db.Safest(types.ChainID(*chainID), uint64(blockNumber), 0) - return safest, nil + _, crossSafe, err := su.db.CrossSafe(chainID) + if err != nil { + return types.ReferenceView{}, fmt.Errorf("failed to get cross-safe head: %w", err) + } + + // TODO check `safe` input to detect reorg conflicts + + return types.ReferenceView{ + Local: localSafe, + Cross: crossSafe, + }, nil } -func (su *SupervisorBackend) DerivedFrom( - ctx context.Context, - chainID types.ChainID, - blockHash common.Hash, - blockNumber uint64) (eth.BlockRef, error) { - // TODO(#12358): attach to backend - return eth.BlockRef{}, nil +func (su *SupervisorBackend) Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) { + su.mu.RLock() + defer su.mu.RUnlock() + + return su.db.Finalized(chainID) +} + +func (su *SupervisorBackend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockID, err error) { + su.mu.RLock() + defer su.mu.RUnlock() + + return su.db.DerivedFrom(chainID, derived) +} + +// Update methods +// ---------------------------- + +func (su *SupervisorBackend) UpdateLocalUnsafe(chainID types.ChainID, head eth.BlockRef) error { + su.mu.RLock() + defer su.mu.RUnlock() + ch, ok := su.chainProcessors[chainID] + if !ok { + return db.ErrUnknownChain + } + return ch.OnNewHead(head) +} + +func (su *SupervisorBackend) UpdateLocalSafe(chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error { + su.mu.RLock() + defer su.mu.RUnlock() + + return su.db.UpdateLocalSafe(chainID, derivedFrom, lastDerived) +} + +func (su *SupervisorBackend) UpdateFinalizedL1(chainID types.ChainID, finalized eth.BlockRef) error { + su.mu.RLock() + defer su.mu.RUnlock() + + return su.db.UpdateFinalizedL1(finalized) } diff --git a/op-supervisor/supervisor/backend/db/db.go b/op-supervisor/supervisor/backend/db/db.go index c4f8296d1ce01..bc45b27203520 100644 --- a/op-supervisor/supervisor/backend/db/db.go +++ b/op-supervisor/supervisor/backend/db/db.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io" + "sync" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -11,13 +12,10 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/safety" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) -var ( - ErrUnknownChain = errors.New("unknown chain") -) +var ErrUnknownChain = errors.New("unknown chain") type LogStorage interface { io.Closer @@ -45,44 +43,69 @@ type LogStorage interface { Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (nextIndex entrydb.EntryIdx, err error) } +type LocalDerivedFromStorage interface { + AddDerived(derivedFrom eth.BlockRef, derived eth.BlockRef) error +} + +type CrossDerivedFromStorage interface { + LocalDerivedFromStorage + // This will start to differ with reorg support +} + var _ LogStorage = (*logs.DB)(nil) -// ChainsDB is a database that stores logs and heads for multiple chains. +// ChainsDB is a database that stores logs and derived-from data for multiple chains. // it implements the ChainsStorage interface. type ChainsDB struct { - logDBs map[types.ChainID]LogStorage - safetyIndex safety.SafetyIndex - logger log.Logger + // RW mutex: + // Read = chains can be read / mutated. + // Write = set of chains is changing. + mu sync.RWMutex + + // unsafe info: the sequence of block seals and events + logDBs map[types.ChainID]LogStorage + + // cross-unsafe: how far we have processed the unsafe data. + // TODO: not initialized yet. Should just set it to the last known cross-safe block. + crossUnsafe map[types.ChainID]types.HeadPointer + + // local-safe: index of what we optimistically know about L2 blocks being derived from L1 + localDBs map[types.ChainID]LocalDerivedFromStorage + + // cross-safe: index of L2 blocks we know to only have cross-L2 valid dependencies + crossDBs map[types.ChainID]CrossDerivedFromStorage + + // finalized: the L1 finality progress. This can be translated into what may be considered as finalized in L2. + // TODO: not initialized yet. Should just wait for a new signal of it. + finalizedL1 eth.L1BlockRef + + logger log.Logger } func NewChainsDB(logDBs map[types.ChainID]LogStorage, l log.Logger) *ChainsDB { - ret := &ChainsDB{ + return &ChainsDB{ logDBs: logDBs, logger: l, } - ret.safetyIndex = safety.NewSafetyIndex(l, ret) - return ret } func (db *ChainsDB) AddLogDB(chain types.ChainID, logDB LogStorage) { + db.mu.Lock() + defer db.mu.Unlock() + if db.logDBs[chain] != nil { log.Warn("overwriting existing logDB for chain", "chain", chain) } db.logDBs[chain] = logDB } -func (db *ChainsDB) IteratorStartingAt(chain types.ChainID, sealedNum uint64, logIndex uint32) (logs.Iterator, error) { - logDB, ok := db.logDBs[chain] - if !ok { - return nil, fmt.Errorf("%w: %v", ErrUnknownChain, chain) - } - return logDB.IteratorStartingAt(sealedNum, logIndex) -} - // ResumeFromLastSealedBlock prepares the chains db to resume recording events after a restart. // It rewinds the database to the last block that is guaranteed to have been fully recorded to the database, // to ensure it can resume recording from the first log of the next block. func (db *ChainsDB) ResumeFromLastSealedBlock() error { + db.mu.RLock() + defer db.mu.RUnlock() + for chain, logStore := range db.logDBs { headNum, ok := logStore.LatestSealedBlockNum() if !ok { @@ -98,100 +121,10 @@ func (db *ChainsDB) ResumeFromLastSealedBlock() error { return nil } -// Check calls the underlying logDB to determine if the given log entry is safe with respect to the checker's criteria. -func (db *ChainsDB) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (common.Hash, error) { - logDB, ok := db.logDBs[chain] - if !ok { - return common.Hash{}, fmt.Errorf("%w: %v", ErrUnknownChain, chain) - } - _, err := logDB.Contains(blockNum, logIdx, logHash) - if err != nil { - return common.Hash{}, err - } - // TODO(#11693): need to get the actual block hash for this log entry for reorg detection - return common.Hash{}, nil -} - -// Safest returns the strongest safety level that can be guaranteed for the given log entry. -// it assumes the log entry has already been checked and is valid, this funcion only checks safety levels. -func (db *ChainsDB) Safest(chainID types.ChainID, blockNum uint64, index uint32) (safest types.SafetyLevel) { - safest = types.LocalUnsafe - if crossUnsafe, err := db.safetyIndex.CrossUnsafeL2(chainID); err == nil && crossUnsafe.WithinRange(blockNum, index) { - safest = types.CrossUnsafe - } - if localSafe, err := db.safetyIndex.LocalSafeL2(chainID); err == nil && localSafe.WithinRange(blockNum, index) { - safest = types.LocalSafe - } - if crossSafe, err := db.safetyIndex.LocalSafeL2(chainID); err == nil && crossSafe.WithinRange(blockNum, index) { - safest = types.CrossSafe - } - if finalized, err := db.safetyIndex.FinalizedL2(chainID); err == nil { - if finalized.Number >= blockNum { - safest = types.Finalized - } - } - return -} - -func (db *ChainsDB) FindSealedBlock(chain types.ChainID, block eth.BlockID) (nextEntry entrydb.EntryIdx, err error) { - logDB, ok := db.logDBs[chain] - if !ok { - return 0, fmt.Errorf("%w: %v", ErrUnknownChain, chain) - } - return logDB.FindSealedBlock(block) -} - -// LatestBlockNum returns the latest fully-sealed block number that has been recorded to the logs db -// for the given chain. It does not contain safety guarantees. -// The block number might not be available (empty database, or non-existent chain). -func (db *ChainsDB) LatestBlockNum(chain types.ChainID) (num uint64, ok bool) { - logDB, knownChain := db.logDBs[chain] - if !knownChain { - return 0, false - } - return logDB.LatestSealedBlockNum() -} - -func (db *ChainsDB) AddLog( - chain types.ChainID, - logHash common.Hash, - parentBlock eth.BlockID, - logIdx uint32, - execMsg *types.ExecutingMessage) error { - logDB, ok := db.logDBs[chain] - if !ok { - return fmt.Errorf("%w: %v", ErrUnknownChain, chain) - } - return logDB.AddLog(logHash, parentBlock, logIdx, execMsg) -} - -func (db *ChainsDB) SealBlock( - chain types.ChainID, - block eth.BlockRef) error { - logDB, ok := db.logDBs[chain] - if !ok { - return fmt.Errorf("%w: %v", ErrUnknownChain, chain) - } - err := logDB.SealBlock(block.ParentHash, block.ID(), block.Time) - if err != nil { - return fmt.Errorf("failed to seal block %v: %w", block, err) - } - err = db.safetyIndex.UpdateLocalUnsafe(chain, block) - if err != nil { - return fmt.Errorf("failed to update local-unsafe: %w", err) - } - return nil -} - -func (db *ChainsDB) Rewind(chain types.ChainID, headBlockNum uint64) error { - logDB, ok := db.logDBs[chain] - if !ok { - return fmt.Errorf("%w: %v", ErrUnknownChain, chain) - } - return logDB.Rewind(headBlockNum) -} - func (db *ChainsDB) Close() error { + db.mu.Lock() + defer db.mu.Unlock() + var combined error for id, logDB := range db.logDBs { if err := logDB.Close(); err != nil { diff --git a/op-supervisor/supervisor/backend/db/entrydb/db.go b/op-supervisor/supervisor/backend/db/entrydb/db.go new file mode 100644 index 0000000000000..1247597aaa0e3 --- /dev/null +++ b/op-supervisor/supervisor/backend/db/entrydb/db.go @@ -0,0 +1,305 @@ +package entrydb + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" +) + +type EntryStore[T EntryType] interface { + Size() int64 + LastEntryIdx() EntryIdx + Read(idx EntryIdx) (Entry[T], error) + Append(entries ...Entry[T]) error + Truncate(idx EntryIdx) error + Close() error +} + +type Metrics interface { + RecordDBEntryCount(count int64) + RecordDBSearchEntriesRead(count int64) +} + +type IndexKey interface { + comparable + String() string +} + +type IndexState[T EntryType, K IndexKey] interface { + NextIndex() EntryIdx + Key() (k K, ok bool) + Incomplete() bool + ApplyEntry(entry Entry[T]) error + + Out() []Entry[T] + ClearOut() +} + +type IndexDriver[T EntryType, K IndexKey, S IndexState[T, K]] interface { + // Less compares the primary key. To allow binary search over the index. + Less(a, b K) bool + // Copy copies an index state. To allow state-snapshots without copy, for conditional iteration. + Copy(src, dst S) + // NewState creates an empty state, with the given index as next target input. + NewState(nextIndex EntryIdx) S + // KeyFromCheckpoint is called to turn an entry at a SearchCheckpointFrequency interval into a primary key. + KeyFromCheckpoint(e Entry[T]) (K, error) + // ValidEnd inspects if we can truncate the DB and leave the given entry as last entry. + ValidEnd(e Entry[T]) bool + // SearchCheckpointFrequency returns a constant, the interval of how far apart the guaranteed checkpoint entries are. + SearchCheckpointFrequency() uint64 +} + +type DB[T EntryType, K IndexKey, S IndexState[T, K], D IndexDriver[T, K, S]] struct { + log log.Logger + m Metrics + store EntryStore[T] + rwLock sync.RWMutex + + HeadState S + + driver D +} + +func (db *DB[T, K, S, D]) LastEntryIdx() EntryIdx { + return db.store.LastEntryIdx() +} + +func (db *DB[T, K, S, D]) Init(trimToLastSealed bool) error { + defer db.updateEntryCountMetric() // Always update the entry count metric after init completes + if trimToLastSealed { + if err := db.trimToLastSealed(); err != nil { + return fmt.Errorf("failed to trim invalid trailing entries: %w", err) + } + } + if db.LastEntryIdx() < 0 { + // Database is empty. + // Make a state that is ready to apply the genesis block on top of as first entry. + // This will infer into a checkpoint (half of the block seal here) + // and is then followed up with canonical-hash entry of genesis. + db.HeadState = db.driver.NewState(0) + return nil + } + // start at the last checkpoint, + // and then apply any remaining changes on top, to hydrate the state. + searchCheckpointFrequency := EntryIdx(db.driver.SearchCheckpointFrequency()) + lastCheckpoint := (db.LastEntryIdx() / searchCheckpointFrequency) * searchCheckpointFrequency + i := db.newIterator(lastCheckpoint) + if err := i.End(); err != nil { + return fmt.Errorf("failed to init from remaining trailing data: %w", err) + } + db.HeadState = i.current + return nil +} + +func (db *DB[T, K, S, D]) trimToLastSealed() error { + i := db.LastEntryIdx() + for ; i >= 0; i-- { + entry, err := db.store.Read(i) + if err != nil { + return fmt.Errorf("failed to read %v to check for trailing entries: %w", i, err) + } + if db.driver.ValidEnd(entry) { + break + } + } + if i < db.LastEntryIdx() { + db.log.Warn("Truncating unexpected trailing entries", "prev", db.LastEntryIdx(), "new", i) + // trim such that the last entry is the canonical-hash we identified + return db.store.Truncate(i) + } + return nil +} + +func (db *DB[T, K, S, D]) updateEntryCountMetric() { + db.m.RecordDBEntryCount(db.store.Size()) +} + +// NewIteratorFor returns an iterator that will have traversed everything that was returned as true by the given lessFn. +// It may return an ErrSkipped if some data is known, but no data is known to be less than the requested key. +// It may return ErrFuture if no data is known at all. +func (db *DB[T, K, S, D]) NewIteratorFor(lessFn func(key K) bool) (Iterator[T, K, S], error) { + return db.newIteratorFor(lessFn) +} + +func (db *DB[T, K, S, D]) newIteratorExactlyAt(at K) (*iterator[T, K, S, D], error) { + iter, err := db.newIteratorFor(func(key K) bool { + return db.driver.Less(key, at) || key == at + }) + if err != nil { + return nil, err + } + k, ok := iter.State().Key() + if !ok { // we should have stopped at complete data + return nil, ErrDataCorruption + } + if k != at { // we found data less than the key, but not exactly equal to it + return nil, ErrFuture + } + return iter, nil +} + +func (db *DB[T, K, S, D]) newIteratorFor(lessFn func(key K) bool) (*iterator[T, K, S, D], error) { + // Find a checkpoint before (not at) the requested key, + // so we can read the value data corresponding to the key into the iterator state. + searchCheckpointIndex, err := db.searchCheckpoint(lessFn) + if errors.Is(err, io.EOF) { + // Did not find a checkpoint to start reading from so the log cannot be present. + return nil, ErrFuture + } else if err != nil { + return nil, err + } + // The iterator did not consume the checkpoint yet, it's positioned right at it. + // So we can call NextBlock() and get the checkpoint itself as first entry. + iter := db.newIterator(searchCheckpointIndex) + if err != nil { + return nil, err + } + defer func() { + db.m.RecordDBSearchEntriesRead(iter.entriesRead) + }() + err = iter.TraverseConditional(func(state S) error { + at, ok := state.Key() + if !ok { + return errors.New("expected complete state") + } + if !lessFn(at) { + return ErrStop + } + return nil + }) + if err == nil { + panic("expected any error, good or bad, on stop") + } + if errors.Is(err, ErrStop) { + err = nil + } + if err != nil { + return nil, err + } + return iter, nil +} + +// newIterator creates an iterator at the given index. +// None of the iterator attributes will be ready for reads, +// but the entry at the given index will be first read when using the iterator. +func (db *DB[T, K, S, D]) newIterator(index EntryIdx) *iterator[T, K, S, D] { + return &iterator[T, K, S, D]{ + db: db, + current: db.driver.NewState(index), + } +} + +// searchCheckpoint performs a binary search of the searchCheckpoint entries +// to find the closest one with an equal or lower derivedFrom block number and equal or lower derived block number. +// Returns the index of the searchCheckpoint to begin reading from or an error. +func (db *DB[T, K, S, D]) searchCheckpoint(lessFn func(key K) bool) (EntryIdx, error) { + if db.HeadState.NextIndex() == 0 { + return 0, ErrFuture // empty DB, everything is in the future + } + searchCheckpointFrequency := EntryIdx(db.driver.SearchCheckpointFrequency()) + n := (db.LastEntryIdx() / searchCheckpointFrequency) + 1 + // Define: x is the array of known checkpoints + // Invariant: x[i] <= target, x[j] > target. + i, j := EntryIdx(0), n + for i+1 < j { // i is inclusive, j is exclusive. + // Get the checkpoint exactly in-between, + // bias towards a higher value if an even number of checkpoints. + // E.g. i=3 and j=4 would not run, since i + 1 < j + // E.g. i=3 and j=5 leaves checkpoints 3, 4, and we pick 4 as pivot + // E.g. i=3 and j=6 leaves checkpoints 3, 4, 5, and we pick 4 as pivot + // + // The following holds: i ≤ h < j + h := EntryIdx((uint64(i) + uint64(j)) >> 1) + checkpoint, err := db.readSearchCheckpoint(h * searchCheckpointFrequency) + if err != nil { + return 0, fmt.Errorf("failed to read entry %v: %w", h, err) + } + if lessFn(checkpoint) { + i = h + } else { + j = h + } + } + if i+1 != j { + panic("expected to have 1 checkpoint left") + } + result := i * searchCheckpointFrequency + checkpoint, err := db.readSearchCheckpoint(result) + if err != nil { + return 0, fmt.Errorf("failed to read final search checkpoint result: %w", err) + } + if !lessFn(checkpoint) { + return 0, fmt.Errorf("missing data, earliest search checkpoint is %s, but is not before target: %w", checkpoint, ErrSkipped) + } + return result, nil +} + +// Rewind the database to remove any blocks after headBlockNum +// The block at headBlockNum itself is not removed. +func (db *DB[T, K, S, D]) Rewind(newHead K) error { + db.rwLock.Lock() + defer db.rwLock.Unlock() + // Even if the last fully-processed block matches headBlockNum, + // we might still have trailing log events to get rid of. + iter, err := db.newIteratorExactlyAt(newHead) + if err != nil { + return err + } + // Truncate to contain idx+1 entries, since indices are 0 based, + // this deletes everything after idx + if err := db.store.Truncate(iter.NextIndex()); err != nil { + return fmt.Errorf("failed to truncate to %s: %w", newHead, err) + } + // Use db.init() to find the state for the new latest entry + if err := db.Init(true); err != nil { + return fmt.Errorf("failed to find new last entry context: %w", err) + } + return nil +} + +// debug util to log the last 10 entries of the chain +func (db *DB[T, K, S, D]) debugTip() { + for x := 0; x < 10; x++ { + index := db.LastEntryIdx() - EntryIdx(x) + if index < 0 { + continue + } + e, err := db.store.Read(index) + if err == nil { + db.log.Debug("tip", "index", index, "type", e.Type()) + } + } +} + +func (db *DB[T, K, S, D]) Flush() error { + out := db.HeadState.Out() + nextIndex := db.HeadState.NextIndex() + for i, e := range out { + db.log.Trace("appending entry", "type", e.Type(), "entry", hexutil.Bytes(e[:]), + "next", int(nextIndex)-len(out)+i) + } + if err := db.store.Append(out...); err != nil { + return fmt.Errorf("failed to append entries: %w", err) + } + db.HeadState.ClearOut() + db.updateEntryCountMetric() + return nil +} + +func (db *DB[T, K, S, D]) readSearchCheckpoint(entryIdx EntryIdx) (K, error) { + data, err := db.store.Read(entryIdx) + if err != nil { + var k K + return k, fmt.Errorf("failed to read entry %v: %w", entryIdx, err) + } + return db.driver.KeyFromCheckpoint(data) +} + +func (db *DB[T, K, S, D]) Close() error { + return db.store.Close() +} diff --git a/op-supervisor/supervisor/backend/db/entrydb/entry_db.go b/op-supervisor/supervisor/backend/db/entrydb/entry_db.go index a260d143ddb59..05594df7ec31d 100644 --- a/op-supervisor/supervisor/backend/db/entrydb/entry_db.go +++ b/op-supervisor/supervisor/backend/db/entrydb/entry_db.go @@ -9,71 +9,19 @@ import ( "github.com/ethereum/go-ethereum/log" ) -const ( - EntrySize = 34 -) +const EntrySize = 34 type EntryIdx int64 -type Entry [EntrySize]byte - -func (entry Entry) Type() EntryType { - return EntryType(entry[0]) +type EntryType interface { + String() string + ~uint8 } -type EntryTypeFlag uint8 - -const ( - FlagSearchCheckpoint EntryTypeFlag = 1 << TypeSearchCheckpoint - FlagCanonicalHash EntryTypeFlag = 1 << TypeCanonicalHash - FlagInitiatingEvent EntryTypeFlag = 1 << TypeInitiatingEvent - FlagExecutingLink EntryTypeFlag = 1 << TypeExecutingLink - FlagExecutingCheck EntryTypeFlag = 1 << TypeExecutingCheck - FlagPadding EntryTypeFlag = 1 << TypePadding - // for additional padding - FlagPadding2 EntryTypeFlag = FlagPadding << 1 -) - -func (ex EntryTypeFlag) Any(v EntryTypeFlag) bool { - return ex&v != 0 -} - -func (ex *EntryTypeFlag) Add(v EntryTypeFlag) { - *ex = *ex | v -} +type Entry[T EntryType] [EntrySize]byte -func (ex *EntryTypeFlag) Remove(v EntryTypeFlag) { - *ex = *ex &^ v -} - -type EntryType uint8 - -const ( - TypeSearchCheckpoint EntryType = iota - TypeCanonicalHash - TypeInitiatingEvent - TypeExecutingLink - TypeExecutingCheck - TypePadding -) - -func (d EntryType) String() string { - switch d { - case TypeSearchCheckpoint: - return "searchCheckpoint" - case TypeCanonicalHash: - return "canonicalHash" - case TypeInitiatingEvent: - return "initiatingEvent" - case TypeExecutingLink: - return "executingLink" - case TypeExecutingCheck: - return "executingCheck" - case TypePadding: - return "padding" - default: - return fmt.Sprintf("unknown-%d", uint8(d)) - } +func (entry Entry[T]) Type() T { + return T(entry[0]) } // dataAccess defines a minimal API required to manipulate the actual stored data. @@ -85,7 +33,7 @@ type dataAccess interface { Truncate(size int64) error } -type EntryDB struct { +type EntryDB[T EntryType] struct { data dataAccess lastEntryIdx EntryIdx @@ -97,7 +45,7 @@ type EntryDB struct { // If the file exists it will be used as the existing data. // Returns ErrRecoveryRequired if the existing file is not a valid entry db. A EntryDB is still returned but all // operations will return ErrRecoveryRequired until the Recover method is called. -func NewEntryDB(logger log.Logger, path string) (*EntryDB, error) { +func NewEntryDB[T EntryType](logger log.Logger, path string) (*EntryDB[T], error) { logger.Info("Opening entry database", "path", path) file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0o644) if err != nil { @@ -108,7 +56,7 @@ func NewEntryDB(logger log.Logger, path string) (*EntryDB, error) { return nil, fmt.Errorf("failed to stat database at %v: %w", path, err) } size := info.Size() / EntrySize - db := &EntryDB{ + db := &EntryDB[T]{ data: file, lastEntryIdx: EntryIdx(size - 1), } @@ -121,24 +69,24 @@ func NewEntryDB(logger log.Logger, path string) (*EntryDB, error) { return db, nil } -func (e *EntryDB) Size() int64 { +func (e *EntryDB[T]) Size() int64 { return int64(e.lastEntryIdx) + 1 } -func (e *EntryDB) LastEntryIdx() EntryIdx { +func (e *EntryDB[T]) LastEntryIdx() EntryIdx { return e.lastEntryIdx } // Read an entry from the database by index. Returns io.EOF iff idx is after the last entry. -func (e *EntryDB) Read(idx EntryIdx) (Entry, error) { +func (e *EntryDB[T]) Read(idx EntryIdx) (Entry[T], error) { if idx > e.lastEntryIdx { - return Entry{}, io.EOF + return Entry[T]{}, io.EOF } - var out Entry + var out Entry[T] read, err := e.data.ReadAt(out[:], int64(idx)*EntrySize) // Ignore io.EOF if we read the entire last entry as ReadAt may return io.EOF or nil when it reads the last byte if err != nil && !(errors.Is(err, io.EOF) && read == EntrySize) { - return Entry{}, fmt.Errorf("failed to read entry %v: %w", idx, err) + return Entry[T]{}, fmt.Errorf("failed to read entry %v: %w", idx, err) } return out, nil } @@ -147,7 +95,7 @@ func (e *EntryDB) Read(idx EntryIdx) (Entry, error) { // The entries are combined in memory and passed to a single Write invocation. // If the write fails, it will attempt to truncate any partially written data. // Subsequent writes to this instance will fail until partially written data is truncated. -func (e *EntryDB) Append(entries ...Entry) error { +func (e *EntryDB[T]) Append(entries ...Entry[T]) error { if e.cleanupFailedWrite { // Try to rollback partially written data from a previous Append if truncateErr := e.Truncate(e.lastEntryIdx); truncateErr != nil { @@ -177,7 +125,7 @@ func (e *EntryDB) Append(entries ...Entry) error { } // Truncate the database so that the last retained entry is idx. Any entries after idx are deleted. -func (e *EntryDB) Truncate(idx EntryIdx) error { +func (e *EntryDB[T]) Truncate(idx EntryIdx) error { if err := e.data.Truncate((int64(idx) + 1) * EntrySize); err != nil { return fmt.Errorf("failed to truncate to entry %v: %w", idx, err) } @@ -188,13 +136,13 @@ func (e *EntryDB) Truncate(idx EntryIdx) error { } // recover an invalid database by truncating back to the last complete event. -func (e *EntryDB) recover() error { +func (e *EntryDB[T]) recover() error { if err := e.data.Truncate((e.Size()) * EntrySize); err != nil { return fmt.Errorf("failed to truncate trailing partial entries: %w", err) } return nil } -func (e *EntryDB) Close() error { +func (e *EntryDB[T]) Close() error { return e.data.Close() } diff --git a/op-supervisor/supervisor/backend/db/entrydb/entry_db_test.go b/op-supervisor/supervisor/backend/db/entrydb/entry_db_test.go index bc9a871bea26e..866f2fb91b4b0 100644 --- a/op-supervisor/supervisor/backend/db/entrydb/entry_db_test.go +++ b/op-supervisor/supervisor/backend/db/entrydb/entry_db_test.go @@ -3,6 +3,7 @@ package entrydb import ( "bytes" "errors" + "fmt" "io" "os" "path/filepath" @@ -13,6 +14,14 @@ import ( "github.com/stretchr/testify/require" ) +type TestEntryType uint8 + +func (typ TestEntryType) String() string { + return fmt.Sprintf("%d", uint8(typ)) +} + +type TestEntry = Entry[TestEntryType] + func TestReadWrite(t *testing.T) { t.Run("BasicReadWrite", func(t *testing.T) { db := createEntryDB(t) @@ -114,7 +123,7 @@ func TestTruncateTrailingPartialEntries(t *testing.T) { copy(invalidData[EntrySize:], entry2[:]) invalidData[len(invalidData)-1] = 3 // Some invalid trailing data require.NoError(t, os.WriteFile(file, invalidData, 0o644)) - db, err := NewEntryDB(logger, file) + db, err := NewEntryDB[TestEntryType](logger, file) require.NoError(t, err) defer db.Close() @@ -177,19 +186,19 @@ func TestWriteErrors(t *testing.T) { }) } -func requireRead(t *testing.T, db *EntryDB, idx EntryIdx, expected Entry) { +func requireRead(t *testing.T, db *EntryDB[TestEntryType], idx EntryIdx, expected TestEntry) { actual, err := db.Read(idx) require.NoError(t, err) require.Equal(t, expected, actual) } -func createEntry(i byte) Entry { - return Entry(bytes.Repeat([]byte{i}, EntrySize)) +func createEntry(i byte) TestEntry { + return TestEntry(bytes.Repeat([]byte{i}, EntrySize)) } -func createEntryDB(t *testing.T) *EntryDB { +func createEntryDB(t *testing.T) *EntryDB[TestEntryType] { logger := testlog.Logger(t, log.LvlInfo) - db, err := NewEntryDB(logger, filepath.Join(t.TempDir(), "entries.db")) + db, err := NewEntryDB[TestEntryType](logger, filepath.Join(t.TempDir(), "entries.db")) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) @@ -197,9 +206,9 @@ func createEntryDB(t *testing.T) *EntryDB { return db } -func createEntryDBWithStubData() (*EntryDB, *stubDataAccess) { +func createEntryDBWithStubData() (*EntryDB[TestEntryType], *stubDataAccess) { stubData := &stubDataAccess{} - db := &EntryDB{data: stubData, lastEntryIdx: -1} + db := &EntryDB[TestEntryType]{data: stubData, lastEntryIdx: -1} return db, stubData } diff --git a/op-supervisor/supervisor/backend/db/entrydb/error.go b/op-supervisor/supervisor/backend/db/entrydb/error.go new file mode 100644 index 0000000000000..42ae26dcf1125 --- /dev/null +++ b/op-supervisor/supervisor/backend/db/entrydb/error.go @@ -0,0 +1,20 @@ +package entrydb + +import "errors" + +var ( + // ErrOutOfOrder happens when you try to add data to the DB, + // but it does not actually fit onto the latest data (by being too old or new). + ErrOutOfOrder = errors.New("data out of order") + // ErrDataCorruption happens when the underlying DB has some I/O issue + ErrDataCorruption = errors.New("data corruption") + // ErrSkipped happens when we try to retrieve data that is not available (pruned) + // It may also happen if we erroneously skip data, that was not considered a conflict, if the DB is corrupted. + ErrSkipped = errors.New("skipped data") + // ErrFuture happens when data is just not yet available + ErrFuture = errors.New("future data") + // ErrConflict happens when we know for sure that there is different canonical data + ErrConflict = errors.New("conflicting data") + // ErrStop can be used in iterators to indicate iteration has to stop + ErrStop = errors.New("iter stop") +) diff --git a/op-supervisor/supervisor/backend/db/entrydb/iterator.go b/op-supervisor/supervisor/backend/db/entrydb/iterator.go new file mode 100644 index 0000000000000..fef8deb48adb9 --- /dev/null +++ b/op-supervisor/supervisor/backend/db/entrydb/iterator.go @@ -0,0 +1,76 @@ +package entrydb + +import ( + "errors" + "fmt" + "io" +) + +type Iterator[T EntryType, K IndexKey, S IndexState[T, K]] interface { + TraverseConditional(fn func(state S) error) error + State() S +} + +type iterator[T EntryType, K IndexKey, S IndexState[T, K], D IndexDriver[T, K, S]] struct { + db *DB[T, K, S, D] + current S + entriesRead int64 +} + +func (i *iterator[T, K, S, D]) State() S { + return i.current +} + +// End traverses the iterator to the end of the DB. +// It does not return io.EOF or ErrFuture. +func (i *iterator[T, K, S, D]) End() error { + for { + err := i.next() + if errors.Is(err, ErrFuture) { + return nil + } else if err != nil { + return err + } + } +} + +func (i *iterator[T, K, S, D]) TraverseConditional(fn func(state S) error) error { + snapshot := i.db.driver.NewState(0) + for { + i.db.driver.Copy(i.current, snapshot) // copy the iterator state, without allocating a new snapshot each iteration + err := i.next() + if err != nil { + i.current = snapshot + return err + } + if i.current.Incomplete() { // skip intermediate states + continue + } + if err := fn(i.current); err != nil { + i.current = snapshot + return err + } + } +} + +// Read and apply the next entry. +func (i *iterator[T, K, S, D]) next() error { + index := i.current.NextIndex() + entry, err := i.db.store.Read(index) + if err != nil { + if errors.Is(err, io.EOF) { + return ErrFuture + } + return fmt.Errorf("failed to read entry %d: %w", index, err) + } + if err := i.current.ApplyEntry(entry); err != nil { + return fmt.Errorf("failed to process entry %d to iterator state: %w", index, err) + } + + i.entriesRead++ + return nil +} + +func (i *iterator[T, K, S, D]) NextIndex() EntryIdx { + return i.current.NextIndex() +} diff --git a/op-supervisor/supervisor/backend/db/fromda/db.go b/op-supervisor/supervisor/backend/db/fromda/db.go new file mode 100644 index 0000000000000..772021eb69ba8 --- /dev/null +++ b/op-supervisor/supervisor/backend/db/fromda/db.go @@ -0,0 +1,127 @@ +package fromda + +import ( + "errors" + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +type DB struct { + log log.Logger + inner *entrydb.DB[EntryType, Key, *state, driver] + rwLock sync.RWMutex +} + +func (db *DB) AddDerived(derivedFrom eth.BlockRef, derived eth.BlockRef) error { + db.rwLock.Lock() + defer db.rwLock.Unlock() + + if err := db.inner.HeadState.AddDerived(derivedFrom, derived); err != nil { + return fmt.Errorf("failed to add derived block derivedFrom: %s, derived: %s, err: %w", derivedFrom, derived, err) + } + db.log.Trace("Added derived block", "derivedFrom", derivedFrom, "derived", derived) + return db.inner.Flush() +} + +func (db *DB) Rewind(derivedFrom uint64) error { + return db.inner.Rewind(Key{DerivedFrom: derivedFrom, Derived: 0}) +} + +// LatestDerivedFrom returns the last known primary key (the L1 block) +func (db *DB) LatestDerivedFrom() (ref types.BlockSeal, ok bool) { + db.rwLock.Lock() + defer db.rwLock.Unlock() + state := db.inner.HeadState + if state.Incomplete() { + return types.BlockSeal{}, false + } + return state.derived, true +} + +// LatestDerived returns the last known value (the L2 block that was derived) +func (db *DB) LatestDerived() (ref types.BlockSeal, ok bool) { + db.rwLock.Lock() + defer db.rwLock.Unlock() + state := db.inner.HeadState + if state.Incomplete() { + return types.BlockSeal{}, false + } + return state.derived, true +} + +// LastDerivedAt returns the last L2 block derived from the given L1 block +func (db *DB) LastDerivedAt(derivedFrom eth.BlockID) (types.BlockSeal, error) { + db.rwLock.Lock() + defer db.rwLock.Unlock() + iter, err := db.inner.NewIteratorFor(func(key Key) bool { + return key.DerivedFrom < derivedFrom.Number + }) + if err != nil { + return types.BlockSeal{}, err + } + if errors.Is(err, entrydb.ErrStop) { + err = nil + } + if err != nil { + return types.BlockSeal{}, err + } + state := iter.State() + if state.Incomplete() { + return types.BlockSeal{}, entrydb.ErrDataCorruption + } + if state.derivedFrom.ID() != derivedFrom { // did not reach derived From yet + return types.BlockSeal{}, entrydb.ErrFuture + } + return state.derived, nil +} + +// TODO do we want to expose an iterator interface? +//type Iterator interface { +// TraverseConditional(fn func(*state) error) error +//} +// +//func (db *DB) IteratorStartingFor() (Iterator, error) { +// return db.inner.NewIteratorFor() +//} + +// DerivedFrom determines where a L2 block was derived from. +func (db *DB) DerivedFrom(derived eth.BlockID) (types.BlockSeal, error) { + // search to the last point before the data + iter, err := db.inner.NewIteratorFor(func(key Key) bool { + return key.Derived < derived.Number + }) + if err != nil { + return types.BlockSeal{}, err + } + // go forward and read the data + err = iter.TraverseConditional(func(state *state) error { + v, ok := state.Derived() + if !ok { + return nil + } + if v.Number > derived.Number { + return entrydb.ErrStop + } + return nil + }) + if errors.Is(err, entrydb.ErrStop) { + err = nil + } + if err != nil { + return types.BlockSeal{}, err + } + state := iter.State() + if state.Incomplete() { + return types.BlockSeal{}, entrydb.ErrDataCorruption + } + if state.derived.ID() != derived { + return types.BlockSeal{}, entrydb.ErrConflict + } + return state.derivedFrom, nil +} diff --git a/op-supervisor/supervisor/backend/db/fromda/driver.go b/op-supervisor/supervisor/backend/db/fromda/driver.go new file mode 100644 index 0000000000000..58b7c6beb76c3 --- /dev/null +++ b/op-supervisor/supervisor/backend/db/fromda/driver.go @@ -0,0 +1,53 @@ +package fromda + +import ( + "errors" + + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +type driver struct { +} + +func (d driver) Less(a, b Key) bool { + return a.DerivedFrom < b.DerivedFrom || (a.DerivedFrom == b.DerivedFrom && a.Derived < b.Derived) +} + +func (d driver) Copy(src, dst *state) { + *dst = *src // shallow copy is enough + dst.ClearOut() // don't retain output (there shouldn't be any) +} + +func (d driver) NewState(nextIndex entrydb.EntryIdx) *state { + return &state{ + nextEntryIndex: nextIndex, + derivedFrom: types.BlockSeal{}, + derivedUntil: 0, + derivedSince: 0, + derived: types.BlockSeal{}, + need: FlagSearchCheckpoint, + out: nil, + } +} + +func (d driver) KeyFromCheckpoint(e Entry) (Key, error) { + if e.Type() != TypeSearchCheckpoint { + return Key{}, errors.New("expected search checkpoint") + } + p, err := newSearchCheckpointFromEntry(e) + if err != nil { + return Key{}, err + } + return Key{DerivedFrom: p.blockNum, Derived: p.derivedUntil + uint64(p.derivedSince)}, nil +} + +func (d driver) ValidEnd(e Entry) bool { + return e.Type() == TypeCanonicalHash +} + +func (d driver) SearchCheckpointFrequency() uint64 { + return searchCheckpointFrequency +} + +var _ entrydb.IndexDriver[EntryType, Key, *state] = (*driver)(nil) diff --git a/op-supervisor/supervisor/backend/db/fromda/entries.go b/op-supervisor/supervisor/backend/db/fromda/entries.go new file mode 100644 index 0000000000000..70ba5db2394b2 --- /dev/null +++ b/op-supervisor/supervisor/backend/db/fromda/entries.go @@ -0,0 +1,124 @@ +package fromda + +import ( + "encoding/binary" + "fmt" + + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" +) + +// searchCheckpoint is both a checkpoint for searching, as well as a checkpoint for sealing blocks. +type searchCheckpoint struct { + // number of the L1 block that we derived from + blockNum uint64 + // timestamp of the L1 block that was derived from + timestamp uint64 + // number of L2 blocks that were derived after this checkpoint + derivedSince uint32 + // L2 block that we last derived until starting deriving from this L1 block + derivedUntil uint64 +} + +func newSearchCheckpoint(blockNum uint64, timestamp uint64, blocksSince uint32, derivedUntil uint64) searchCheckpoint { + return searchCheckpoint{ + blockNum: blockNum, + timestamp: timestamp, + derivedSince: blocksSince, + derivedUntil: derivedUntil, + } +} + +func newSearchCheckpointFromEntry(data Entry) (searchCheckpoint, error) { + if data.Type() != TypeSearchCheckpoint { + return searchCheckpoint{}, fmt.Errorf("%w: attempting to decode search checkpoint but was type %s", entrydb.ErrDataCorruption, data.Type()) + } + return searchCheckpoint{ + blockNum: binary.LittleEndian.Uint64(data[1:9]), + timestamp: binary.LittleEndian.Uint64(data[9:17]), + derivedSince: binary.LittleEndian.Uint32(data[17:21]), + derivedUntil: binary.LittleEndian.Uint64(data[21:29]), + }, nil +} + +// encode creates a checkpoint entry +// type 0: "search checkpoint" = 29 bytes +func (s searchCheckpoint) encode() Entry { + var data Entry + data[0] = uint8(TypeSearchCheckpoint) + binary.LittleEndian.PutUint64(data[1:9], s.blockNum) + binary.LittleEndian.PutUint64(data[9:17], s.timestamp) + binary.LittleEndian.PutUint32(data[17:21], s.derivedSince) + binary.LittleEndian.PutUint64(data[21:29], s.derivedUntil) + return data +} + +type canonicalHash struct { + hash common.Hash +} + +func newCanonicalHash(hash common.Hash) canonicalHash { + return canonicalHash{hash: hash} +} + +func newCanonicalHashFromEntry(data Entry) (canonicalHash, error) { + if data.Type() != TypeCanonicalHash { + return canonicalHash{}, fmt.Errorf("%w: attempting to decode canonical hash but was type %s", entrydb.ErrDataCorruption, data.Type()) + } + return newCanonicalHash(common.Hash(data[1:33])), nil +} + +func (c canonicalHash) encode() Entry { + var entry Entry + entry[0] = uint8(TypeCanonicalHash) + copy(entry[1:33], c.hash[:]) + return entry +} + +type derivedLink struct { + number uint64 + timestamp uint64 + // May contain additional flag value in the future +} + +func newDerivedLink(num uint64, timestamp uint64) derivedLink { + return derivedLink{number: num, timestamp: timestamp} +} + +func newDerivedLinkFromEntry(data Entry) (derivedLink, error) { + if data.Type() != TypeDerivedLink { + return derivedLink{}, fmt.Errorf("%w: attempting to decode derived link but was type %s", entrydb.ErrDataCorruption, data.Type()) + } + return newDerivedLink(binary.LittleEndian.Uint64(data[1:9]), binary.LittleEndian.Uint64(data[9:17])), nil +} + +func (d derivedLink) encode() Entry { + var entry Entry + entry[0] = uint8(TypeDerivedLink) + binary.LittleEndian.PutUint64(entry[1:9], d.number) + binary.LittleEndian.PutUint64(entry[9:17], d.timestamp) + return entry +} + +type derivedCheck struct { + hash common.Hash +} + +func newDerivedCheck(hash common.Hash) derivedCheck { + return derivedCheck{hash: hash} +} + +func newDerivedCheckFromEntry(data Entry) (derivedCheck, error) { + if data.Type() != TypeDerivedCheck { + return derivedCheck{}, fmt.Errorf("%w: attempting to decode derived check but was type %s", entrydb.ErrDataCorruption, data.Type()) + } + return newDerivedCheck(common.Hash(data[1:33])), nil +} + +func (d derivedCheck) encode() Entry { + var entry Entry + entry[0] = uint8(TypeDerivedCheck) + copy(entry[1:33], d.hash[:]) + return entry +} diff --git a/op-supervisor/supervisor/backend/db/fromda/entry.go b/op-supervisor/supervisor/backend/db/fromda/entry.go new file mode 100644 index 0000000000000..70caf7d536cc9 --- /dev/null +++ b/op-supervisor/supervisor/backend/db/fromda/entry.go @@ -0,0 +1,75 @@ +package fromda + +import ( + "fmt" + "strings" + + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" +) + +const searchCheckpointFrequency = 256 + +type EntryObj interface { + encode() Entry +} + +type Entry = entrydb.Entry[EntryType] + +type EntryTypeFlag uint8 + +const ( + FlagSearchCheckpoint EntryTypeFlag = 1 << TypeSearchCheckpoint + FlagCanonicalHash EntryTypeFlag = 1 << TypeCanonicalHash + FlagDerivedLink EntryTypeFlag = 1 << TypeDerivedLink + FlagDerivedCheck EntryTypeFlag = 1 << TypeDerivedCheck + FlagPadding EntryTypeFlag = 1 << TypePadding +) + +func (x EntryTypeFlag) String() string { + var out []string + for i := EntryTypeFlag(1); i != 0; i <<= 1 { // iterate to bitmask + if x.Any(i) { + out = append(out, i.String()) + } + } + return strings.Join(out, "|") +} + +func (x EntryTypeFlag) Any(v EntryTypeFlag) bool { + return x&v != 0 +} + +func (x *EntryTypeFlag) Add(v EntryTypeFlag) { + *x = *x | v +} + +func (x *EntryTypeFlag) Remove(v EntryTypeFlag) { + *x = *x &^ v +} + +type EntryType uint8 + +const ( + TypeSearchCheckpoint EntryType = iota + TypeCanonicalHash + TypeDerivedLink + TypeDerivedCheck + TypePadding +) + +func (x EntryType) String() string { + switch x { + case TypeSearchCheckpoint: + return "searchCheckpoint" + case TypeCanonicalHash: + return "canonicalHash" + case TypeDerivedLink: + return "derivedLink" + case TypeDerivedCheck: + return "derivedCheck" + case TypePadding: + return "padding" + default: + return fmt.Sprintf("unknown-%d", uint8(x)) + } +} diff --git a/op-supervisor/supervisor/backend/db/fromda/key.go b/op-supervisor/supervisor/backend/db/fromda/key.go new file mode 100644 index 0000000000000..ca902752a7acd --- /dev/null +++ b/op-supervisor/supervisor/backend/db/fromda/key.go @@ -0,0 +1,14 @@ +package fromda + +import ( + "fmt" +) + +type Key struct { + DerivedFrom uint64 + Derived uint64 +} + +func (k Key) String() string { + return fmt.Sprintf("derivedFrom: %d, derived: %d", k.DerivedFrom, k.Derived) +} diff --git a/op-supervisor/supervisor/backend/db/fromda/state.go b/op-supervisor/supervisor/backend/db/fromda/state.go new file mode 100644 index 0000000000000..22b419b7ef7a7 --- /dev/null +++ b/op-supervisor/supervisor/backend/db/fromda/state.go @@ -0,0 +1,286 @@ +package fromda + +import ( + "fmt" + "io" + "slices" + + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +type state struct { + // next entry index, including the contents of `out` + nextEntryIndex entrydb.EntryIdx + + derivedFrom types.BlockSeal + derivedUntil uint64 // L2 block that we last derived until starting deriving from this L1 block + derivedSince uint32 // amount of blocks derived from derivedFrom thus far + + derived types.BlockSeal // produced using L1 data up to and including that of derivedFrom + + need EntryTypeFlag + + // buffer of entries not yet in the DB. + // This is generated as objects are applied. + // E.g. you can build things on top of the state, + // before flushing the entries to a DB. + // However, no entries can be read from the DB while objects are being applied. + out []Entry +} + +var _ entrydb.IndexState[EntryType, Key] = (*state)(nil) + +func (l *state) Key() (k Key, ok bool) { + return Key{DerivedFrom: l.derivedFrom.Number, Derived: l.derived.Number}, l.need == 0 +} + +func (l *state) Incomplete() bool { + return l.need != 0 +} + +func (l *state) Out() []Entry { + return slices.Clone(l.out) +} + +func (l *state) ClearOut() { + l.out = l.out[:0] +} + +func (l *state) NextIndex() entrydb.EntryIdx { + return l.nextEntryIndex +} + +func (l *state) DerivedFrom() (id types.BlockSeal, ok bool) { + return l.derivedFrom, l.need == 0 +} + +func (l *state) DerivedSince() (count uint32, ok bool) { + return l.derivedSince, l.need == 0 +} + +func (l *state) DerivedUntil() (derivedUntil uint64, ok bool) { + return l.derivedUntil, l.need == 0 +} + +func (l *state) Derived() (id types.BlockSeal, ok bool) { + return l.derived, l.need == 0 +} + +// ApplyEntry applies an entry on top of the current state. +func (l *state) ApplyEntry(entry Entry) error { + // Wrap processEntry to add common useful error message info + err := l.processEntry(entry) + if err != nil { + return fmt.Errorf("failed to process type %s entry at idx %d (%x): %w", entry.Type().String(), l.nextEntryIndex, entry[:], err) + } + return nil +} + +func (l *state) processEntry(entry Entry) error { + if len(l.out) != 0 { + panic("can only apply without appending if the state is still empty") + } + switch entry.Type() { + case TypeSearchCheckpoint: + v, err := newSearchCheckpointFromEntry(entry) + if err != nil { + return err + } + l.derivedFrom = types.BlockSeal{ + Hash: common.Hash{}, + Number: v.blockNum, + Timestamp: v.timestamp, + } + l.derivedSince = v.derivedSince + l.need.Remove(FlagSearchCheckpoint) + l.need.Add(FlagCanonicalHash) + case TypeCanonicalHash: + v, err := newCanonicalHashFromEntry(entry) + if err != nil { + return err + } + l.derivedFrom.Hash = v.hash + l.need.Remove(FlagCanonicalHash) + case TypeDerivedLink: + v, err := newDerivedLinkFromEntry(entry) + if err != nil { + return err + } + l.need.Remove(FlagDerivedLink) + l.need.Add(FlagDerivedCheck) + l.derived = types.BlockSeal{ + Hash: common.Hash{}, + Number: v.number, + Timestamp: v.timestamp, + } + case TypeDerivedCheck: + v, err := newDerivedCheckFromEntry(entry) + if err != nil { + return err + } + l.need.Remove(FlagDerivedCheck) + l.derived.Hash = v.hash + // we derived a new block! + l.derivedSince += 1 + case TypePadding: + l.need.Remove(FlagPadding) + default: + return fmt.Errorf("unknown entry type: %s", entry.Type()) + } + return nil +} + +// appendEntry add the entry to the output-buffer, +// and registers it as last processed entry type, and increments the next entry-index. +func (l *state) appendEntry(obj EntryObj) { + entry := obj.encode() + l.out = append(l.out, entry) + l.nextEntryIndex += 1 +} + +// infer advances the logContext in cases where complex entries contain multiple implied entries +// eg. a SearchCheckpoint implies a CannonicalHash will follow +// this also handles inserting the searchCheckpoint at the set frequency, and padding entries +func (l *state) infer() error { + // We force-insert a checkpoint whenever we hit the known fixed interval. + if l.nextEntryIndex%searchCheckpointFrequency == 0 { + l.need.Add(FlagSearchCheckpoint) + } + if l.need.Any(FlagSearchCheckpoint) { + l.appendEntry(newSearchCheckpoint(l.derivedFrom.Number, l.derivedFrom.Timestamp, l.derivedSince, l.derivedUntil)) + l.need.Add(FlagCanonicalHash) // always follow with a canonical hash + l.need.Remove(FlagSearchCheckpoint) + return nil + } + if l.need.Any(FlagCanonicalHash) { + l.appendEntry(newCanonicalHash(l.derivedFrom.Hash)) + l.need.Remove(FlagCanonicalHash) + return nil + } + if l.need.Any(FlagDerivedLink) { + // Add padding if this link/check combination is going to overlap with the checkpoint + switch l.nextEntryIndex % searchCheckpointFrequency { + case searchCheckpointFrequency - 1: + l.need.Add(FlagPadding) + return nil + } + l.appendEntry(newDerivedLink(l.derived.Number, l.derived.Timestamp)) + l.need.Remove(FlagDerivedLink) + l.need.Any(FlagDerivedCheck) + return nil + } + if l.need.Any(FlagDerivedCheck) { + l.appendEntry(newDerivedCheck(l.derived.Hash)) + l.need.Remove(FlagDerivedCheck) + // we derived a new L2 block! + l.derivedSince += 1 + return nil + } + return io.EOF +} + +// inferFull advances the logContext until it cannot infer any more entries. +func (l *state) inferFull() error { + for i := 0; i < 10; i++ { + err := l.infer() + if err == nil { + continue + } + if err == io.EOF { // wrapped io.EOF does not count. + return nil + } else { + return err + } + } + panic("hit sanity limit") +} + +// AddDerived adds a L1<>L2 block derivation link. +// This may repeat the L1 block if there are multiple L2 blocks derived from it, or repeat the L2 block if the L1 block is empty. +func (l *state) AddDerived(derivedFrom eth.BlockRef, derived eth.BlockRef) error { + // If we don't have any entries yet, allow any block to start things off + if l.nextEntryIndex != 0 { + // TODO insert starting point + } + + if l.derived.ID() == derived.ID() && l.derivedFrom.ID() == derivedFrom.ID() { + // Repeat of same information. No entries to be written. + // But we can silently ignore and not return an error, as that brings the caller + // in a consistent state, after which it can insert the actual new derived-from information. + return nil + } + + // Check derived relation: the L2 chain has to be sequential without gaps. An L2 block may repeat if the L1 block is empty. + if l.derived.Number == derived.Number { + // Same block height? Then it must be the same block. + // I.e. we encountered an empty L1 block, and the same L2 block continues to be the last block that was derived from it. + if l.derived.Hash != derived.Hash { + // TODO + } + } else if l.derived.Number+1 == derived.Number { + if l.derived.Hash != derived.ParentHash { + return fmt.Errorf("derived block %s (parent %s) does not build on %s: %w", + derived, derived.ParentHash, l.derived, entrydb.ErrConflict) + } + } else if l.derived.Number+1 < derived.Number { + return fmt.Errorf("derived block %s (parent: %s) is too new, expected to build on top of %s: %w", + derived, derived.ParentHash, l.derived, entrydb.ErrOutOfOrder) + } else { + return fmt.Errorf("derived block %s is older than current derived block %s: %w", + derived, l.derived, entrydb.ErrOutOfOrder) + } + + // Check derived-from relation: multiple L2 blocks may be derived from the same L1 block. But everything in sequence. + if l.derivedFrom.Number == derivedFrom.Number { + // Same block height? Then it must be the same block. + if l.derivedFrom.Hash != derivedFrom.Hash { + return fmt.Errorf("cannot add block %s as derived from %s, expected to be derived from %s at this block height: %w", + derived, derivedFrom, l.derivedFrom, entrydb.ErrConflict) + } + } else if l.derivedFrom.Number+1 == derivedFrom.Number { + // parent hash check + if l.derivedFrom.Hash != derivedFrom.ParentHash { + return fmt.Errorf("cannot add block %s as derived from %s (parent %s) derived on top of %s: %w", + derived, derivedFrom, derivedFrom.ParentHash, l.derivedFrom, entrydb.ErrConflict) + } + } else if l.derivedFrom.Number+1 < derivedFrom.Number { + // adding block that is derived from something too far into the future + return fmt.Errorf("cannot add block %s as derived from %s, still deriving from %s: %s", + derived, derivedFrom, l.derivedFrom, entrydb.ErrOutOfOrder) + } else { + // adding block that is derived from something too old + return fmt.Errorf("cannot add block %s as derived from %s, deriving already at %s: %w", + derived, derivedFrom, l.derivedFrom, entrydb.ErrOutOfOrder) + } + + if l.derivedFrom.ID() != derivedFrom.ID() { + // Sanity check our state + if expected := l.derivedUntil + uint64(l.derivedSince); expected != l.derived.Number { + panic(fmt.Errorf("expected to have derived up to %d (%d until current L1 block, and %d since then), but have %d", + expected, l.derivedUntil, l.derivedSince, l.derived.Number)) + } + l.need.Add(FlagSearchCheckpoint) + l.derivedUntil += l.derived.Number + + l.derivedFrom = types.BlockSeal{ + Hash: derivedFrom.Hash, + Number: derivedFrom.Number, + Timestamp: derivedFrom.Time, + } + } + + if l.derived.ID() != derived.ID() { + l.need.Add(FlagDerivedLink) + l.derived = types.BlockSeal{ + Hash: derived.Hash, + Number: derived.Number, + Timestamp: derived.Time, + } + } + + return l.inferFull() +} diff --git a/op-supervisor/supervisor/backend/db/heads/heads.go b/op-supervisor/supervisor/backend/db/heads/heads.go deleted file mode 100644 index 93d02a84fa644..0000000000000 --- a/op-supervisor/supervisor/backend/db/heads/heads.go +++ /dev/null @@ -1,161 +0,0 @@ -package heads - -import ( - "encoding/json" - "errors" - "fmt" - "os" - "sync" - - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-service/ioutil" - "github.com/ethereum-optimism/optimism/op-service/jsonutil" - - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" -) - -// HeadTracker records the current chain head pointers for a single chain. -type HeadTracker struct { - rwLock sync.RWMutex - - path string - - current *Heads - - logger log.Logger -} - -func (t *HeadTracker) CrossUnsafe(id types.ChainID) HeadPointer { - return t.current.Get(id).CrossUnsafe -} - -func (t *HeadTracker) CrossSafe(id types.ChainID) HeadPointer { - return t.current.Get(id).CrossSafe -} - -func (t *HeadTracker) CrossFinalized(id types.ChainID) HeadPointer { - return t.current.Get(id).CrossFinalized -} - -func (t *HeadTracker) LocalUnsafe(id types.ChainID) HeadPointer { - return t.current.Get(id).Unsafe -} - -func (t *HeadTracker) LocalSafe(id types.ChainID) HeadPointer { - return t.current.Get(id).LocalSafe -} - -func (t *HeadTracker) LocalFinalized(id types.ChainID) HeadPointer { - return t.current.Get(id).LocalFinalized -} - -func (t *HeadTracker) UpdateCrossUnsafe(id types.ChainID, pointer HeadPointer) error { - return t.Apply(OperationFn(func(heads *Heads) error { - t.logger.Info("Cross-unsafe update", "pointer", pointer) - h := heads.Get(id) - h.CrossUnsafe = pointer - heads.Put(id, h) - return nil - })) -} - -func (t *HeadTracker) UpdateCrossSafe(id types.ChainID, pointer HeadPointer) error { - return t.Apply(OperationFn(func(heads *Heads) error { - t.logger.Info("Cross-safe update", "pointer", pointer) - h := heads.Get(id) - h.CrossSafe = pointer - heads.Put(id, h) - return nil - })) -} - -func (t *HeadTracker) UpdateCrossFinalized(id types.ChainID, pointer HeadPointer) error { - return t.Apply(OperationFn(func(heads *Heads) error { - t.logger.Info("Cross-finalized update", "pointer", pointer) - h := heads.Get(id) - h.CrossFinalized = pointer - heads.Put(id, h) - return nil - })) -} - -func (t *HeadTracker) UpdateLocalUnsafe(id types.ChainID, pointer HeadPointer) error { - return t.Apply(OperationFn(func(heads *Heads) error { - t.logger.Info("Local-unsafe update", "pointer", pointer) - h := heads.Get(id) - h.Unsafe = pointer - heads.Put(id, h) - return nil - })) -} - -func (t *HeadTracker) UpdateLocalSafe(id types.ChainID, pointer HeadPointer) error { - return t.Apply(OperationFn(func(heads *Heads) error { - t.logger.Info("Local-safe update", "pointer", pointer) - h := heads.Get(id) - h.LocalSafe = pointer - heads.Put(id, h) - return nil - })) -} - -func (t *HeadTracker) UpdateLocalFinalized(id types.ChainID, pointer HeadPointer) error { - return t.Apply(OperationFn(func(heads *Heads) error { - t.logger.Info("Local-finalized update", "pointer", pointer) - h := heads.Get(id) - h.LocalFinalized = pointer - heads.Put(id, h) - return nil - })) -} - -func NewHeadTracker(logger log.Logger, path string) (*HeadTracker, error) { - current := NewHeads() - if data, err := os.ReadFile(path); errors.Is(err, os.ErrNotExist) { - // No existing file, just use empty heads - } else if err != nil { - return nil, fmt.Errorf("failed to read existing heads from %v: %w", path, err) - } else { - if err := json.Unmarshal(data, current); err != nil { - return nil, fmt.Errorf("invalid existing heads file %v: %w", path, err) - } - } - return &HeadTracker{ - path: path, - current: current, - logger: logger, - }, nil -} - -func (t *HeadTracker) Apply(op Operation) error { - t.rwLock.Lock() - defer t.rwLock.Unlock() - // Store a copy of the heads prior to changing so we can roll back if needed. - modified := t.current.Copy() - if err := op.Apply(modified); err != nil { - return fmt.Errorf("operation failed: %w", err) - } - if err := t.write(modified); err != nil { - return fmt.Errorf("failed to store updated heads: %w", err) - } - t.current = modified - return nil -} - -func (t *HeadTracker) Current() *Heads { - t.rwLock.RLock() - defer t.rwLock.RUnlock() - return t.current.Copy() -} - -func (t *HeadTracker) write(heads *Heads) error { - if err := jsonutil.WriteJSON(heads, ioutil.ToAtomicFile(t.path, 0o644)); err != nil { - return fmt.Errorf("failed to write new heads: %w", err) - } - return nil -} - -func (t *HeadTracker) Close() error { - return nil -} diff --git a/op-supervisor/supervisor/backend/db/heads/heads_test.go b/op-supervisor/supervisor/backend/db/heads/heads_test.go deleted file mode 100644 index 9b8fb4bd45726..0000000000000 --- a/op-supervisor/supervisor/backend/db/heads/heads_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package heads - -/* -import ( - "errors" - "os" - "path/filepath" - "testing" - - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" - "github.com/stretchr/testify/require" -) - -func TestHeads_SaveAndReload(t *testing.T) { - dir := t.TempDir() - path := filepath.Join(dir, "heads.json") - chainA := types.ChainIDFromUInt64(3) - chainAHeads := ChainHeads{ - Unsafe: 1, - CrossUnsafe: 2, - LocalSafe: 3, - CrossSafe: 4, - LocalFinalized: 5, - CrossFinalized: 6, - } - chainB := types.ChainIDFromUInt64(5) - chainBHeads := ChainHeads{ - Unsafe: 11, - CrossUnsafe: 12, - LocalSafe: 13, - CrossSafe: 14, - LocalFinalized: 15, - CrossFinalized: 16, - } - - orig, err := NewHeadTracker(path) - require.NoError(t, err) - err = orig.Apply(OperationFn(func(heads *Heads) error { - heads.Put(chainA, chainAHeads) - heads.Put(chainB, chainBHeads) - return nil - })) - require.NoError(t, err) - require.Equal(t, orig.Current().Get(chainA), chainAHeads) - require.Equal(t, orig.Current().Get(chainB), chainBHeads) - - loaded, err := NewHeadTracker(path) - require.NoError(t, err) - require.EqualValues(t, loaded.Current(), orig.Current()) -} - -func TestHeads_NoChangesMadeIfOperationFails(t *testing.T) { - dir := t.TempDir() - path := filepath.Join(dir, "heads.json") - chainA := types.ChainIDFromUInt64(3) - chainAHeads := ChainHeads{ - Unsafe: 1, - CrossUnsafe: 2, - LocalSafe: 3, - CrossSafe: 4, - LocalFinalized: 5, - CrossFinalized: 6, - } - - orig, err := NewHeadTracker(path) - require.NoError(t, err) - boom := errors.New("boom") - err = orig.Apply(OperationFn(func(heads *Heads) error { - heads.Put(chainA, chainAHeads) - return boom - })) - require.ErrorIs(t, err, boom) - require.Equal(t, ChainHeads{}, orig.Current().Get(chainA)) - - // Should be able to load from disk too - loaded, err := NewHeadTracker(path) - require.NoError(t, err) - require.EqualValues(t, loaded.Current(), orig.Current()) -} - -func TestHeads_NoChangesMadeIfWriteFails(t *testing.T) { - dir := t.TempDir() - path := filepath.Join(dir, "invalid/heads.json") - chainA := types.ChainIDFromUInt64(3) - chainAHeads := ChainHeads{ - Unsafe: 1, - CrossUnsafe: 2, - LocalSafe: 3, - CrossSafe: 4, - LocalFinalized: 5, - CrossFinalized: 6, - } - - orig, err := NewHeadTracker(path) - require.NoError(t, err) - err = orig.Apply(OperationFn(func(heads *Heads) error { - heads.Put(chainA, chainAHeads) - return nil - })) - require.ErrorIs(t, err, os.ErrNotExist) - require.Equal(t, ChainHeads{}, orig.Current().Get(chainA)) -} -*/ diff --git a/op-supervisor/supervisor/backend/db/heads/types.go b/op-supervisor/supervisor/backend/db/heads/types.go deleted file mode 100644 index 7db0bff2d1062..0000000000000 --- a/op-supervisor/supervisor/backend/db/heads/types.go +++ /dev/null @@ -1,127 +0,0 @@ -package heads - -import ( - "encoding/json" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" -) - -type HeadPointer struct { - // LastSealedBlockHash is the last fully-processed block - LastSealedBlockHash common.Hash - LastSealedBlockNum uint64 - LastSealedTimestamp uint64 - - // Number of logs that have been verified since the LastSealedBlock. - // These logs are contained in the block that builds on top of the LastSealedBlock. - LogsSince uint32 -} - -// WithinRange checks if the given log, in the given block, -// is within range (i.e. before or equal to the head-pointer). -// This does not guarantee that the log exists. -func (ptr *HeadPointer) WithinRange(blockNum uint64, logIdx uint32) bool { - if ptr.LastSealedBlockHash == (common.Hash{}) { - return false // no block yet - } - return blockNum <= ptr.LastSealedBlockNum || - (blockNum+1 == ptr.LastSealedBlockNum && logIdx < ptr.LogsSince) -} - -func (ptr *HeadPointer) IsSealed(blockNum uint64) bool { - if ptr.LastSealedBlockHash == (common.Hash{}) { - return false // no block yet - } - return blockNum <= ptr.LastSealedBlockNum -} - -// ChainHeads provides the serialization format for the current chain heads. -type ChainHeads struct { - Unsafe HeadPointer `json:"localUnsafe"` - CrossUnsafe HeadPointer `json:"crossUnsafe"` - LocalSafe HeadPointer `json:"localSafe"` - CrossSafe HeadPointer `json:"crossSafe"` - LocalFinalized HeadPointer `json:"localFinalized"` - CrossFinalized HeadPointer `json:"crossFinalized"` -} - -type Heads struct { - Chains map[types.ChainID]ChainHeads -} - -func NewHeads() *Heads { - return &Heads{Chains: make(map[types.ChainID]ChainHeads)} -} - -func (h *Heads) Get(id types.ChainID) ChainHeads { - chain, ok := h.Chains[id] - if !ok { - return ChainHeads{} - } - // init to genesis - if chain.LocalFinalized == (HeadPointer{}) && chain.Unsafe.LastSealedBlockNum == 0 { - chain.LocalFinalized = chain.Unsafe - } - // Make sure the data is consistent - if chain.LocalSafe == (HeadPointer{}) { - chain.LocalSafe = chain.LocalFinalized - } - if chain.Unsafe == (HeadPointer{}) { - chain.Unsafe = chain.LocalSafe - } - if chain.CrossFinalized == (HeadPointer{}) && chain.LocalFinalized.LastSealedBlockNum == 0 { - chain.CrossFinalized = chain.LocalFinalized - } - if chain.CrossSafe == (HeadPointer{}) { - chain.CrossSafe = chain.CrossFinalized - } - if chain.CrossUnsafe == (HeadPointer{}) { - chain.CrossUnsafe = chain.CrossSafe - } - return chain -} - -func (h *Heads) Put(id types.ChainID, head ChainHeads) { - h.Chains[id] = head -} - -func (h *Heads) Copy() *Heads { - c := &Heads{Chains: make(map[types.ChainID]ChainHeads)} - for id, heads := range h.Chains { - c.Chains[id] = heads - } - return c -} - -func (h *Heads) MarshalJSON() ([]byte, error) { - data := make(map[hexutil.U256]ChainHeads) - for id, heads := range h.Chains { - data[hexutil.U256(id)] = heads - } - return json.Marshal(data) -} - -func (h *Heads) UnmarshalJSON(data []byte) error { - hexData := make(map[hexutil.U256]ChainHeads) - if err := json.Unmarshal(data, &hexData); err != nil { - return err - } - h.Chains = make(map[types.ChainID]ChainHeads) - for id, heads := range hexData { - h.Put(types.ChainID(id), heads) - } - return nil -} - -type Operation interface { - Apply(head *Heads) error -} - -type OperationFn func(heads *Heads) error - -func (f OperationFn) Apply(heads *Heads) error { - return f(heads) -} diff --git a/op-supervisor/supervisor/backend/db/heads/types_test.go b/op-supervisor/supervisor/backend/db/heads/types_test.go deleted file mode 100644 index 20bb057954166..0000000000000 --- a/op-supervisor/supervisor/backend/db/heads/types_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package heads - -import ( - "encoding/json" - "fmt" - "math/rand" // nosemgrep - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ethereum/go-ethereum/common" - - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" -) - -func TestHeads(t *testing.T) { - rng := rand.New(rand.NewSource(1234)) - randHeadPtr := func() HeadPointer { - var h common.Hash - rng.Read(h[:]) - return HeadPointer{ - LastSealedBlockHash: h, - LastSealedBlockNum: rng.Uint64(), - LogsSince: rng.Uint32(), - } - } - t.Run("RoundTripViaJson", func(t *testing.T) { - heads := NewHeads() - heads.Put(types.ChainIDFromUInt64(3), ChainHeads{ - Unsafe: randHeadPtr(), - CrossUnsafe: randHeadPtr(), - LocalSafe: randHeadPtr(), - CrossSafe: randHeadPtr(), - LocalFinalized: randHeadPtr(), - CrossFinalized: randHeadPtr(), - }) - heads.Put(types.ChainIDFromUInt64(9), ChainHeads{ - Unsafe: randHeadPtr(), - CrossUnsafe: randHeadPtr(), - LocalSafe: randHeadPtr(), - CrossSafe: randHeadPtr(), - LocalFinalized: randHeadPtr(), - CrossFinalized: randHeadPtr(), - }) - heads.Put(types.ChainIDFromUInt64(4892497242424), ChainHeads{ - Unsafe: randHeadPtr(), - CrossUnsafe: randHeadPtr(), - LocalSafe: randHeadPtr(), - CrossSafe: randHeadPtr(), - LocalFinalized: randHeadPtr(), - CrossFinalized: randHeadPtr(), - }) - - j, err := json.Marshal(heads) - require.NoError(t, err) - - fmt.Println(string(j)) - var result Heads - err = json.Unmarshal(j, &result) - require.NoError(t, err) - require.Equal(t, heads.Chains, result.Chains) - }) - - t.Run("Copy", func(t *testing.T) { - chainA := types.ChainIDFromUInt64(3) - chainB := types.ChainIDFromUInt64(4) - chainAOrigHeads := ChainHeads{ - Unsafe: randHeadPtr(), - } - chainAModifiedHeads1 := ChainHeads{ - Unsafe: randHeadPtr(), - } - chainAModifiedHeads2 := ChainHeads{ - Unsafe: randHeadPtr(), - } - chainBModifiedHeads := ChainHeads{ - Unsafe: randHeadPtr(), - } - - heads := NewHeads() - heads.Put(chainA, chainAOrigHeads) - - otherHeads := heads.Copy() - otherHeads.Put(chainA, chainAModifiedHeads1) - otherHeads.Put(chainB, chainBModifiedHeads) - - require.Equal(t, heads.Get(chainA), chainAOrigHeads) - require.Equal(t, heads.Get(chainB), ChainHeads{}) - - heads.Put(chainA, chainAModifiedHeads2) - require.Equal(t, heads.Get(chainA), chainAModifiedHeads2) - - require.Equal(t, otherHeads.Get(chainA), chainAModifiedHeads1) - require.Equal(t, otherHeads.Get(chainB), chainBModifiedHeads) - }) -} diff --git a/op-supervisor/supervisor/backend/db/logs/db.go b/op-supervisor/supervisor/backend/db/logs/db.go index 10863c052645e..9fcfc7854cf77 100644 --- a/op-supervisor/supervisor/backend/db/logs/db.go +++ b/op-supervisor/supervisor/backend/db/logs/db.go @@ -20,21 +20,6 @@ const ( eventFlagHasExecutingMessage = byte(1) ) -var ( - // ErrLogOutOfOrder happens when you try to add a log to the DB, - // but it does not actually fit onto the latest data (by being too old or new). - ErrLogOutOfOrder = errors.New("log out of order") - // ErrDataCorruption happens when the underlying DB has some I/O issue - ErrDataCorruption = errors.New("data corruption") - // ErrSkipped happens when we try to retrieve data that is not available (pruned) - // It may also happen if we erroneously skip data, that was not considered a conflict, if the DB is corrupted. - ErrSkipped = errors.New("skipped data") - // ErrFuture happens when data is just not yet available - ErrFuture = errors.New("future data") - // ErrConflict happens when we know for sure that there is different canonical data - ErrConflict = errors.New("conflicting data") -) - type Metrics interface { RecordDBEntryCount(count int64) RecordDBSearchEntriesRead(count int64) @@ -43,8 +28,8 @@ type Metrics interface { type EntryStore interface { Size() int64 LastEntryIdx() entrydb.EntryIdx - Read(idx entrydb.EntryIdx) (entrydb.Entry, error) - Append(entries ...entrydb.Entry) error + Read(idx entrydb.EntryIdx) (Entry, error) + Append(entries ...Entry) error Truncate(idx entrydb.EntryIdx) error Close() error } @@ -66,7 +51,7 @@ type DB struct { } func NewFromFile(logger log.Logger, m Metrics, path string, trimToLastSealed bool) (*DB, error) { - store, err := entrydb.NewEntryDB(logger, path) + store, err := entrydb.NewEntryDB[EntryType](logger, path) if err != nil { return nil, fmt.Errorf("failed to open DB: %w", err) } @@ -117,7 +102,7 @@ func (db *DB) init(trimToLastSealed bool) error { // and then apply any remaining changes on top, to hydrate the state. lastCheckpoint := (db.lastEntryIdx() / searchCheckpointFrequency) * searchCheckpointFrequency i := db.newIterator(lastCheckpoint) - i.current.need.Add(entrydb.FlagCanonicalHash) + i.current.need.Add(FlagCanonicalHash) if err := i.End(); err != nil { return fmt.Errorf("failed to init from remaining trailing data: %w", err) } @@ -132,7 +117,7 @@ func (db *DB) trimToLastSealed() error { if err != nil { return fmt.Errorf("failed to read %v to check for trailing entries: %w", i, err) } - if entry.Type() == entrydb.TypeCanonicalHash { + if entry.Type() == TypeCanonicalHash { // only an executing hash, indicating a sealed block, is a valid point for restart break } @@ -163,8 +148,8 @@ func (db *DB) FindSealedBlock(block eth.BlockID) (nextEntry entrydb.EntryIdx, er db.rwLock.RLock() defer db.rwLock.RUnlock() iter, err := db.newIteratorAt(block.Number, 0) - if errors.Is(err, ErrFuture) { - return 0, fmt.Errorf("block %d is not known yet: %w", block.Number, ErrFuture) + if errors.Is(err, entrydb.ErrFuture) { + return 0, fmt.Errorf("block %d is not known yet: %w", block.Number, entrydb.ErrFuture) } else if err != nil { return 0, fmt.Errorf("failed to find sealed block %d: %w", block.Number, err) } @@ -173,7 +158,7 @@ func (db *DB) FindSealedBlock(block eth.BlockID) (nextEntry entrydb.EntryIdx, er panic("expected block") } if block.Hash != h { - return 0, fmt.Errorf("queried %s but got %s at number %d: %w", block.Hash, h, block.Number, ErrConflict) + return 0, fmt.Errorf("queried %s but got %s at number %d: %w", block.Hash, h, block.Number, entrydb.ErrConflict) } return iter.NextIndex(), nil } @@ -225,12 +210,12 @@ func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (ent func (db *DB) findLogInfo(blockNum uint64, logIdx uint32) (common.Hash, Iterator, error) { if blockNum == 0 { - return common.Hash{}, nil, ErrConflict // no logs in block 0 + return common.Hash{}, nil, entrydb.ErrConflict // no logs in block 0 } // blockNum-1, such that we find a log that came after the parent num-1 was sealed. // logIdx, such that all entries before logIdx can be skipped, but logIdx itself is still readable. iter, err := db.newIteratorAt(blockNum-1, logIdx) - if errors.Is(err, ErrFuture) { + if errors.Is(err, entrydb.ErrFuture) { db.log.Trace("Could not find log yet", "blockNum", blockNum, "logIdx", logIdx) return common.Hash{}, nil, err } else if err != nil { @@ -245,7 +230,7 @@ func (db *DB) findLogInfo(blockNum uint64, logIdx uint32) (common.Hash, Iterator } else if x < blockNum-1 { panic(fmt.Errorf("bug in newIteratorAt, expected to have found parent block %d but got %d", blockNum-1, x)) } else if x > blockNum-1 { - return common.Hash{}, nil, fmt.Errorf("log does not exist, found next block already: %w", ErrConflict) + return common.Hash{}, nil, fmt.Errorf("log does not exist, found next block already: %w", entrydb.ErrConflict) } logHash, x, ok := iter.InitMessage() if !ok { @@ -266,7 +251,7 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error) searchCheckpointIndex, err := db.searchCheckpoint(blockNum, logIndex) if errors.Is(err, io.EOF) { // Did not find a checkpoint to start reading from so the log cannot be present. - return nil, ErrFuture + return nil, entrydb.ErrFuture } else if err != nil { return nil, err } @@ -276,7 +261,7 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error) if err != nil { return nil, err } - iter.current.need.Add(entrydb.FlagCanonicalHash) + iter.current.need.Add(FlagCanonicalHash) defer func() { db.m.RecordDBSearchEntriesRead(iter.entriesRead) }() @@ -285,9 +270,9 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error) if _, n, _ := iter.SealedBlock(); n == blockNum { // we may already have it exactly break } - if err := iter.NextBlock(); errors.Is(err, ErrFuture) { + if err := iter.NextBlock(); errors.Is(err, entrydb.ErrFuture) { db.log.Trace("ran out of data, could not find block", "nextIndex", iter.NextIndex(), "target", blockNum) - return nil, ErrFuture + return nil, entrydb.ErrFuture } else if err != nil { db.log.Error("failed to read next block", "nextIndex", iter.NextIndex(), "target", blockNum, "err", err) return nil, err @@ -301,7 +286,7 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error) continue } if num != blockNum { // block does not contain - return nil, fmt.Errorf("looking for %d, but already at %d: %w", blockNum, num, ErrConflict) + return nil, fmt.Errorf("looking for %d, but already at %d: %w", blockNum, num, entrydb.ErrConflict) } break } @@ -310,7 +295,7 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error) // so two logs before quiting (and not 3 to then quit after). for iter.current.logsSince < logIndex { if err := iter.NextInitMsg(); err == io.EOF { - return nil, ErrFuture + return nil, entrydb.ErrFuture } else if err != nil { return nil, err } @@ -320,7 +305,7 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error) } if num > blockNum { // we overshot, the block did not contain as many seen log events as requested - return nil, ErrConflict + return nil, entrydb.ErrConflict } _, idx, ok := iter.InitMessage() if !ok { @@ -354,7 +339,7 @@ func (db *DB) newIterator(index entrydb.EntryIdx) *iterator { // Returns the index of the searchCheckpoint to begin reading from or an error. func (db *DB) searchCheckpoint(sealedBlockNum uint64, logsSince uint32) (entrydb.EntryIdx, error) { if db.lastEntryContext.nextEntryIndex == 0 { - return 0, ErrFuture // empty DB, everything is in the future + return 0, entrydb.ErrFuture // empty DB, everything is in the future } n := (db.lastEntryIdx() / searchCheckpointFrequency) + 1 // Define: x is the array of known checkpoints @@ -391,7 +376,7 @@ func (db *DB) searchCheckpoint(sealedBlockNum uint64, logsSince uint32) (entrydb if checkpoint.blockNum > sealedBlockNum || (checkpoint.blockNum == sealedBlockNum && checkpoint.logsSince > logsSince) { return 0, fmt.Errorf("missing data, earliest search checkpoint is %d with %d logs, cannot find something before or at %d with %d logs: %w", - checkpoint.blockNum, checkpoint.logsSince, sealedBlockNum, logsSince, ErrSkipped) + checkpoint.blockNum, checkpoint.logsSince, sealedBlockNum, logsSince, entrydb.ErrSkipped) } return result, nil } diff --git a/op-supervisor/supervisor/backend/db/logs/db_invariants_test.go b/op-supervisor/supervisor/backend/db/logs/db_invariants_test.go index 04c004f3d0966..fdcc285dafd78 100644 --- a/op-supervisor/supervisor/backend/db/logs/db_invariants_test.go +++ b/op-supervisor/supervisor/backend/db/logs/db_invariants_test.go @@ -7,12 +7,13 @@ import ( "os" "testing" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" ) type statInvariant func(stat os.FileInfo, m *stubMetrics) error -type entryInvariant func(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error +type entryInvariant func(entryIdx int, entry Entry, entries []Entry, m *stubMetrics) error // checkDBInvariants reads the database log directly and asserts a set of invariants on the data. func checkDBInvariants(t *testing.T, dbPath string, m *stubMetrics) { @@ -30,7 +31,7 @@ func checkDBInvariants(t *testing.T, dbPath string, m *stubMetrics) { // Read all entries as binary blobs file, err := os.OpenFile(dbPath, os.O_RDONLY, 0o644) require.NoError(t, err) - entries := make([]entrydb.Entry, stat.Size()/entrydb.EntrySize) + entries := make([]Entry, stat.Size()/entrydb.EntrySize) for i := range entries { n, err := io.ReadFull(file, entries[i][:]) require.NoErrorf(t, err, "failed to read entry %v", i) @@ -56,7 +57,7 @@ func checkDBInvariants(t *testing.T, dbPath string, m *stubMetrics) { } } -func fmtEntries(entries []entrydb.Entry) string { +func fmtEntries(entries []Entry) string { out := "" for i, entry := range entries { out += fmt.Sprintf("%v: %x\n", i, entry) @@ -80,44 +81,44 @@ func invariantFileSizeMatchesEntryCountMetric(stat os.FileInfo, m *stubMetrics) return nil } -func invariantSearchCheckpointAtEverySearchCheckpointFrequency(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error { - if entryIdx%searchCheckpointFrequency == 0 && entry.Type() != entrydb.TypeSearchCheckpoint { +func invariantSearchCheckpointAtEverySearchCheckpointFrequency(entryIdx int, entry Entry, entries []Entry, m *stubMetrics) error { + if entryIdx%searchCheckpointFrequency == 0 && entry.Type() != TypeSearchCheckpoint { return fmt.Errorf("should have search checkpoints every %v entries but entry %v was %x", searchCheckpointFrequency, entryIdx, entry) } return nil } -func invariantCanonicalHashOrCheckpointAfterEverySearchCheckpoint(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error { - if entry.Type() != entrydb.TypeSearchCheckpoint { +func invariantCanonicalHashOrCheckpointAfterEverySearchCheckpoint(entryIdx int, entry Entry, entries []Entry, m *stubMetrics) error { + if entry.Type() != TypeSearchCheckpoint { return nil } if entryIdx+1 >= len(entries) { return fmt.Errorf("expected canonical hash or checkpoint after search checkpoint at entry %v but no further entries found", entryIdx) } nextEntry := entries[entryIdx+1] - if nextEntry.Type() != entrydb.TypeCanonicalHash && nextEntry.Type() != entrydb.TypeSearchCheckpoint { + if nextEntry.Type() != TypeCanonicalHash && nextEntry.Type() != TypeSearchCheckpoint { return fmt.Errorf("expected canonical hash or checkpoint after search checkpoint at entry %v but got %x", entryIdx, nextEntry) } return nil } // invariantSearchCheckpointBeforeEveryCanonicalHash ensures we don't have extra canonical-hash entries -func invariantSearchCheckpointBeforeEveryCanonicalHash(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error { - if entry.Type() != entrydb.TypeCanonicalHash { +func invariantSearchCheckpointBeforeEveryCanonicalHash(entryIdx int, entry Entry, entries []Entry, m *stubMetrics) error { + if entry.Type() != TypeCanonicalHash { return nil } if entryIdx == 0 { return fmt.Errorf("expected search checkpoint before canonical hash at entry %v but no previous entries present", entryIdx) } prevEntry := entries[entryIdx-1] - if prevEntry.Type() != entrydb.TypeSearchCheckpoint { + if prevEntry.Type() != TypeSearchCheckpoint { return fmt.Errorf("expected search checkpoint before canonical hash at entry %v but got %x", entryIdx, prevEntry) } return nil } -func invariantExecLinkAfterInitEventWithFlagSet(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error { - if entry.Type() != entrydb.TypeInitiatingEvent { +func invariantExecLinkAfterInitEventWithFlagSet(entryIdx int, entry Entry, entries []Entry, m *stubMetrics) error { + if entry.Type() != TypeInitiatingEvent { return nil } hasExecMessage := entry[1]&eventFlagHasExecutingMessage != 0 @@ -131,14 +132,14 @@ func invariantExecLinkAfterInitEventWithFlagSet(entryIdx int, entry entrydb.Entr if len(entries) <= linkIdx { return fmt.Errorf("expected executing link after initiating event with exec msg flag set at entry %v but there were no more events", entryIdx) } - if entries[linkIdx].Type() != entrydb.TypeExecutingLink { + if entries[linkIdx].Type() != TypeExecutingLink { return fmt.Errorf("expected executing link at idx %v after initiating event with exec msg flag set at entry %v but got type %v", linkIdx, entryIdx, entries[linkIdx][0]) } return nil } -func invariantExecLinkOnlyAfterInitiatingEventWithFlagSet(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error { - if entry.Type() != entrydb.TypeExecutingLink { +func invariantExecLinkOnlyAfterInitiatingEventWithFlagSet(entryIdx int, entry Entry, entries []Entry, m *stubMetrics) error { + if entry.Type() != TypeExecutingLink { return nil } if entryIdx == 0 { @@ -152,7 +153,7 @@ func invariantExecLinkOnlyAfterInitiatingEventWithFlagSet(entryIdx int, entry en return fmt.Errorf("found executing link without a preceding initiating event at entry %v", entryIdx) } initEntry := entries[initIdx] - if initEntry.Type() != entrydb.TypeInitiatingEvent { + if initEntry.Type() != TypeInitiatingEvent { return fmt.Errorf("expected initiating event at entry %v prior to executing link at %v but got %x", initIdx, entryIdx, initEntry[0]) } flags := initEntry[1] @@ -162,8 +163,8 @@ func invariantExecLinkOnlyAfterInitiatingEventWithFlagSet(entryIdx int, entry en return nil } -func invariantExecCheckAfterExecLink(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error { - if entry.Type() != entrydb.TypeExecutingLink { +func invariantExecCheckAfterExecLink(entryIdx int, entry Entry, entries []Entry, m *stubMetrics) error { + if entry.Type() != TypeExecutingLink { return nil } checkIdx := entryIdx + 1 @@ -174,14 +175,14 @@ func invariantExecCheckAfterExecLink(entryIdx int, entry entrydb.Entry, entries return fmt.Errorf("expected executing link at %v to be followed by executing check at %v but ran out of entries", entryIdx, checkIdx) } checkEntry := entries[checkIdx] - if checkEntry.Type() != entrydb.TypeExecutingCheck { + if checkEntry.Type() != TypeExecutingCheck { return fmt.Errorf("expected executing link at %v to be followed by executing check at %v but got type %v", entryIdx, checkIdx, checkEntry[0]) } return nil } -func invariantExecCheckOnlyAfterExecLink(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error { - if entry.Type() != entrydb.TypeExecutingCheck { +func invariantExecCheckOnlyAfterExecLink(entryIdx int, entry Entry, entries []Entry, m *stubMetrics) error { + if entry.Type() != TypeExecutingCheck { return nil } if entryIdx == 0 { @@ -195,7 +196,7 @@ func invariantExecCheckOnlyAfterExecLink(entryIdx int, entry entrydb.Entry, entr return fmt.Errorf("found executing link without a preceding initiating event at entry %v", entryIdx) } linkEntry := entries[linkIdx] - if linkEntry.Type() != entrydb.TypeExecutingLink { + if linkEntry.Type() != TypeExecutingLink { return fmt.Errorf("expected executing link at entry %v prior to executing check at %v but got %x", linkIdx, entryIdx, linkEntry[0]) } return nil diff --git a/op-supervisor/supervisor/backend/db/logs/db_test.go b/op-supervisor/supervisor/backend/db/logs/db_test.go index 31067b05808d5..46bec507fab40 100644 --- a/op-supervisor/supervisor/backend/db/logs/db_test.go +++ b/op-supervisor/supervisor/backend/db/logs/db_test.go @@ -90,7 +90,7 @@ func TestLatestSealedBlockNum(t *testing.T) { require.False(t, ok, "empty db expected") require.Zero(t, n) idx, err := db.searchCheckpoint(0, 0) - require.ErrorIs(t, err, ErrFuture, "no checkpoint in empty db") + require.ErrorIs(t, err, entrydb.ErrFuture, "no checkpoint in empty db") require.Zero(t, idx) }) }) @@ -123,7 +123,7 @@ func TestLatestSealedBlockNum(t *testing.T) { require.NoError(t, err) require.Zero(t, idx, "anchor block as checkpoint 0") _, err = db.searchCheckpoint(0, 0) - require.ErrorIs(t, err, ErrSkipped, "no checkpoint before genesis") + require.ErrorIs(t, err, entrydb.ErrSkipped, "no checkpoint before genesis") }) }) t.Run("Block 1 case", func(t *testing.T) { @@ -175,7 +175,7 @@ func TestAddLog(t *testing.T) { func(t *testing.T, db *DB, m *stubMetrics) { genesis := eth.BlockID{Hash: createHash(15), Number: 0} err := db.AddLog(createHash(1), genesis, 0, nil) - require.ErrorIs(t, err, ErrLogOutOfOrder) + require.ErrorIs(t, err, entrydb.ErrOutOfOrder) }) }) @@ -265,7 +265,7 @@ func TestAddLog(t *testing.T) { func(t *testing.T, db *DB, m *stubMetrics) { bl14 := eth.BlockID{Hash: createHash(14), Number: 14} err := db.SealBlock(createHash(13), bl14, 5000) - require.ErrorIs(t, err, ErrConflict) + require.ErrorIs(t, err, entrydb.ErrConflict) }) }) @@ -282,7 +282,7 @@ func TestAddLog(t *testing.T) { func(t *testing.T, db *DB, m *stubMetrics) { onto := eth.BlockID{Hash: createHash(14), Number: 14} err := db.AddLog(createHash(1), onto, 0, nil) - require.ErrorIs(t, err, ErrLogOutOfOrder, "cannot build logs on 14 when 15 is already sealed") + require.ErrorIs(t, err, entrydb.ErrOutOfOrder, "cannot build logs on 14 when 15 is already sealed") }) }) @@ -298,7 +298,7 @@ func TestAddLog(t *testing.T) { func(t *testing.T, db *DB, m *stubMetrics) { bl15 := eth.BlockID{Hash: createHash(15), Number: 15} err := db.AddLog(createHash(1), bl15, 0, nil) - require.ErrorIs(t, err, ErrLogOutOfOrder, "already at log index 2") + require.ErrorIs(t, err, entrydb.ErrOutOfOrder, "already at log index 2") }) }) @@ -313,7 +313,7 @@ func TestAddLog(t *testing.T) { }, func(t *testing.T, db *DB, m *stubMetrics) { err := db.AddLog(createHash(1), eth.BlockID{Hash: createHash(16), Number: 16}, 0, nil) - require.ErrorIs(t, err, ErrLogOutOfOrder) + require.ErrorIs(t, err, entrydb.ErrOutOfOrder) }) }) @@ -329,7 +329,7 @@ func TestAddLog(t *testing.T) { func(t *testing.T, db *DB, m *stubMetrics) { bl15 := eth.BlockID{Hash: createHash(15), Number: 15} err := db.AddLog(createHash(1), bl15, 1, nil) - require.ErrorIs(t, err, ErrLogOutOfOrder, "already at log index 2") + require.ErrorIs(t, err, entrydb.ErrOutOfOrder, "already at log index 2") }) }) @@ -345,7 +345,7 @@ func TestAddLog(t *testing.T) { func(t *testing.T, db *DB, m *stubMetrics) { bl15 := eth.BlockID{Hash: createHash(16), Number: 16} err := db.AddLog(createHash(1), bl15, 2, nil) - require.ErrorIs(t, err, ErrLogOutOfOrder) + require.ErrorIs(t, err, entrydb.ErrOutOfOrder) }) }) @@ -360,7 +360,7 @@ func TestAddLog(t *testing.T) { func(t *testing.T, db *DB, m *stubMetrics) { bl15 := eth.BlockID{Hash: createHash(15), Number: 15} err := db.AddLog(createHash(1), bl15, 2, nil) - require.ErrorIs(t, err, ErrLogOutOfOrder) + require.ErrorIs(t, err, entrydb.ErrOutOfOrder) }) }) @@ -373,7 +373,7 @@ func TestAddLog(t *testing.T) { func(t *testing.T, db *DB, m *stubMetrics) { bl15 := eth.BlockID{Hash: createHash(15), Number: 15} err := db.AddLog(createHash(1), bl15, 5, nil) - require.ErrorIs(t, err, ErrLogOutOfOrder) + require.ErrorIs(t, err, entrydb.ErrOutOfOrder) }) }) @@ -394,7 +394,7 @@ func TestAddLog(t *testing.T) { err = db.SealBlock(bl15.Hash, bl16, 5001) require.NoError(t, err) err = db.AddLog(createHash(1), bl16, 1, nil) - require.ErrorIs(t, err, ErrLogOutOfOrder) + require.ErrorIs(t, err, entrydb.ErrOutOfOrder) }) }) @@ -700,7 +700,7 @@ func TestGetBlockInfo(t *testing.T) { func(t *testing.T, db *DB, m *stubMetrics) { bl10 := eth.BlockID{Hash: createHash(10), Number: 10} _, err := db.FindSealedBlock(bl10) - require.ErrorIs(t, err, ErrFuture) + require.ErrorIs(t, err, entrydb.ErrFuture) }) }) @@ -716,7 +716,7 @@ func TestGetBlockInfo(t *testing.T) { // if the DB starts at 11, then shouldn't find 10 bl10 := eth.BlockID{Hash: createHash(10), Number: 10} _, err := db.FindSealedBlock(bl10) - require.ErrorIs(t, err, ErrSkipped) + require.ErrorIs(t, err, entrydb.ErrSkipped) }) }) @@ -755,7 +755,7 @@ func requireConflicts(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logH m, ok := db.m.(*stubMetrics) require.True(t, ok, "Did not get the expected metrics type") _, err := db.Contains(blockNum, logIdx, logHash) - require.ErrorIs(t, err, ErrConflict, "canonical chain must not include this log") + require.ErrorIs(t, err, entrydb.ErrConflict, "canonical chain must not include this log") require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency*2), "Should not need to read more than between two checkpoints") } @@ -763,7 +763,7 @@ func requireFuture(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash m, ok := db.m.(*stubMetrics) require.True(t, ok, "Did not get the expected metrics type") _, err := db.Contains(blockNum, logIdx, logHash) - require.ErrorIs(t, err, ErrFuture, "canonical chain does not yet include this log") + require.ErrorIs(t, err, entrydb.ErrFuture, "canonical chain does not yet include this log") require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency*2), "Should not need to read more than between two checkpoints") } @@ -791,7 +791,7 @@ func TestRecoverOnCreate(t *testing.T) { return db, m, err } - storeWithEvents := func(evts ...entrydb.Entry) *stubEntryStore { + storeWithEvents := func(evts ...Entry) *stubEntryStore { store := &stubEntryStore{} store.entries = append(store.entries, evts...) return store @@ -924,9 +924,9 @@ func TestRewind(t *testing.T) { t.Run("WhenEmpty", func(t *testing.T) { runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) {}, func(t *testing.T, db *DB, m *stubMetrics) { - require.ErrorIs(t, db.Rewind(100), ErrFuture) + require.ErrorIs(t, db.Rewind(100), entrydb.ErrFuture) // Genesis is a block to, not present in an empty DB - require.ErrorIs(t, db.Rewind(0), ErrFuture) + require.ErrorIs(t, db.Rewind(0), entrydb.ErrFuture) }) }) @@ -944,7 +944,7 @@ func TestRewind(t *testing.T) { require.NoError(t, db.SealBlock(bl51.Hash, bl52, 504)) require.NoError(t, db.AddLog(createHash(4), bl52, 0, nil)) // cannot rewind to a block that is not sealed yet - require.ErrorIs(t, db.Rewind(53), ErrFuture) + require.ErrorIs(t, db.Rewind(53), entrydb.ErrFuture) }, func(t *testing.T, db *DB, m *stubMetrics) { requireContains(t, db, 51, 0, createHash(1)) @@ -963,7 +963,7 @@ func TestRewind(t *testing.T) { require.NoError(t, db.AddLog(createHash(1), bl50, 0, nil)) require.NoError(t, db.AddLog(createHash(2), bl50, 1, nil)) // cannot go back to an unknown block - require.ErrorIs(t, db.Rewind(25), ErrSkipped) + require.ErrorIs(t, db.Rewind(25), entrydb.ErrSkipped) }, func(t *testing.T, db *DB, m *stubMetrics) { requireContains(t, db, 51, 0, createHash(1)) @@ -1088,12 +1088,12 @@ func TestRewind(t *testing.T) { bl29 := eth.BlockID{Hash: createHash(29), Number: 29} // 29 was deleted err := db.AddLog(createHash(2), bl29, 1, nil) - require.ErrorIs(t, err, ErrLogOutOfOrder, "Cannot add log on removed block") + require.ErrorIs(t, err, entrydb.ErrOutOfOrder, "Cannot add log on removed block") // 15 is older, we have up to 16 bl15 := eth.BlockID{Hash: createHash(15), Number: 15} // try to add a third log to 15 err = db.AddLog(createHash(10), bl15, 2, nil) - require.ErrorIs(t, err, ErrLogOutOfOrder) + require.ErrorIs(t, err, entrydb.ErrOutOfOrder) bl16 := eth.BlockID{Hash: createHash(16), Number: 16} // try to add a log to 17, on top of 16 err = db.AddLog(createHash(42), bl16, 0, nil) @@ -1119,7 +1119,7 @@ func (s *stubMetrics) RecordDBSearchEntriesRead(count int64) { var _ Metrics = (*stubMetrics)(nil) type stubEntryStore struct { - entries []entrydb.Entry + entries []Entry } func (s *stubEntryStore) Size() int64 { @@ -1130,14 +1130,14 @@ func (s *stubEntryStore) LastEntryIdx() entrydb.EntryIdx { return entrydb.EntryIdx(s.Size() - 1) } -func (s *stubEntryStore) Read(idx entrydb.EntryIdx) (entrydb.Entry, error) { +func (s *stubEntryStore) Read(idx entrydb.EntryIdx) (Entry, error) { if idx < entrydb.EntryIdx(len(s.entries)) { return s.entries[idx], nil } - return entrydb.Entry{}, io.EOF + return Entry{}, io.EOF } -func (s *stubEntryStore) Append(entries ...entrydb.Entry) error { +func (s *stubEntryStore) Append(entries ...Entry) error { s.entries = append(s.entries, entries...) return nil } diff --git a/op-supervisor/supervisor/backend/db/logs/entries.go b/op-supervisor/supervisor/backend/db/logs/entries.go index 431adc99f465d..401f067bbe123 100644 --- a/op-supervisor/supervisor/backend/db/logs/entries.go +++ b/op-supervisor/supervisor/backend/db/logs/entries.go @@ -3,10 +3,10 @@ package logs import ( "encoding/binary" "fmt" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -27,9 +27,9 @@ func newSearchCheckpoint(blockNum uint64, logsSince uint32, timestamp uint64) se } } -func newSearchCheckpointFromEntry(data entrydb.Entry) (searchCheckpoint, error) { - if data.Type() != entrydb.TypeSearchCheckpoint { - return searchCheckpoint{}, fmt.Errorf("%w: attempting to decode search checkpoint but was type %s", ErrDataCorruption, data.Type()) +func newSearchCheckpointFromEntry(data Entry) (searchCheckpoint, error) { + if data.Type() != TypeSearchCheckpoint { + return searchCheckpoint{}, fmt.Errorf("%w: attempting to decode search checkpoint but was type %s", entrydb.ErrDataCorruption, data.Type()) } return searchCheckpoint{ blockNum: binary.LittleEndian.Uint64(data[1:9]), @@ -40,9 +40,9 @@ func newSearchCheckpointFromEntry(data entrydb.Entry) (searchCheckpoint, error) // encode creates a checkpoint entry // type 0: "search checkpoint" = 21 bytes -func (s searchCheckpoint) encode() entrydb.Entry { - var data entrydb.Entry - data[0] = uint8(entrydb.TypeSearchCheckpoint) +func (s searchCheckpoint) encode() Entry { + var data Entry + data[0] = uint8(TypeSearchCheckpoint) binary.LittleEndian.PutUint64(data[1:9], s.blockNum) binary.LittleEndian.PutUint32(data[9:13], s.logsSince) binary.LittleEndian.PutUint64(data[13:21], s.timestamp) @@ -57,16 +57,16 @@ func newCanonicalHash(hash common.Hash) canonicalHash { return canonicalHash{hash: hash} } -func newCanonicalHashFromEntry(data entrydb.Entry) (canonicalHash, error) { - if data.Type() != entrydb.TypeCanonicalHash { - return canonicalHash{}, fmt.Errorf("%w: attempting to decode canonical hash but was type %s", ErrDataCorruption, data.Type()) +func newCanonicalHashFromEntry(data Entry) (canonicalHash, error) { + if data.Type() != TypeCanonicalHash { + return canonicalHash{}, fmt.Errorf("%w: attempting to decode canonical hash but was type %s", entrydb.ErrDataCorruption, data.Type()) } return newCanonicalHash(common.Hash(data[1:33])), nil } -func (c canonicalHash) encode() entrydb.Entry { - var entry entrydb.Entry - entry[0] = uint8(entrydb.TypeCanonicalHash) +func (c canonicalHash) encode() Entry { + var entry Entry + entry[0] = uint8(TypeCanonicalHash) copy(entry[1:33], c.hash[:]) return entry } @@ -76,9 +76,9 @@ type initiatingEvent struct { logHash common.Hash } -func newInitiatingEventFromEntry(data entrydb.Entry) (initiatingEvent, error) { - if data.Type() != entrydb.TypeInitiatingEvent { - return initiatingEvent{}, fmt.Errorf("%w: attempting to decode initiating event but was type %s", ErrDataCorruption, data.Type()) +func newInitiatingEventFromEntry(data Entry) (initiatingEvent, error) { + if data.Type() != TypeInitiatingEvent { + return initiatingEvent{}, fmt.Errorf("%w: attempting to decode initiating event but was type %s", entrydb.ErrDataCorruption, data.Type()) } flags := data[1] return initiatingEvent{ @@ -96,9 +96,9 @@ func newInitiatingEvent(logHash common.Hash, hasExecMsg bool) initiatingEvent { // encode creates an initiating event entry // type 2: "initiating event" = 22 bytes -func (i initiatingEvent) encode() entrydb.Entry { - var data entrydb.Entry - data[0] = uint8(entrydb.TypeInitiatingEvent) +func (i initiatingEvent) encode() Entry { + var data Entry + data[0] = uint8(TypeInitiatingEvent) flags := byte(0) if i.hasExecMsg { flags = flags | eventFlagHasExecutingMessage @@ -127,9 +127,9 @@ func newExecutingLink(msg types.ExecutingMessage) (executingLink, error) { }, nil } -func newExecutingLinkFromEntry(data entrydb.Entry) (executingLink, error) { - if data.Type() != entrydb.TypeExecutingLink { - return executingLink{}, fmt.Errorf("%w: attempting to decode executing link but was type %s", ErrDataCorruption, data.Type()) +func newExecutingLinkFromEntry(data Entry) (executingLink, error) { + if data.Type() != TypeExecutingLink { + return executingLink{}, fmt.Errorf("%w: attempting to decode executing link but was type %s", entrydb.ErrDataCorruption, data.Type()) } timestamp := binary.LittleEndian.Uint64(data[16:24]) return executingLink{ @@ -142,9 +142,9 @@ func newExecutingLinkFromEntry(data entrydb.Entry) (executingLink, error) { // encode creates an executing link entry // type 3: "executing link" = 24 bytes -func (e executingLink) encode() entrydb.Entry { - var entry entrydb.Entry - entry[0] = uint8(entrydb.TypeExecutingLink) +func (e executingLink) encode() Entry { + var entry Entry + entry[0] = uint8(TypeExecutingLink) binary.LittleEndian.PutUint32(entry[1:5], e.chain) binary.LittleEndian.PutUint64(entry[5:13], e.blockNum) @@ -164,18 +164,18 @@ func newExecutingCheck(hash common.Hash) executingCheck { return executingCheck{hash: hash} } -func newExecutingCheckFromEntry(data entrydb.Entry) (executingCheck, error) { - if data.Type() != entrydb.TypeExecutingCheck { - return executingCheck{}, fmt.Errorf("%w: attempting to decode executing check but was type %s", ErrDataCorruption, data.Type()) +func newExecutingCheckFromEntry(data Entry) (executingCheck, error) { + if data.Type() != TypeExecutingCheck { + return executingCheck{}, fmt.Errorf("%w: attempting to decode executing check but was type %s", entrydb.ErrDataCorruption, data.Type()) } return newExecutingCheck(common.Hash(data[1:33])), nil } // encode creates an executing check entry // type 4: "executing check" = 33 bytes -func (e executingCheck) encode() entrydb.Entry { - var entry entrydb.Entry - entry[0] = uint8(entrydb.TypeExecutingCheck) +func (e executingCheck) encode() Entry { + var entry Entry + entry[0] = uint8(TypeExecutingCheck) copy(entry[1:33], e.hash[:]) return entry } @@ -184,8 +184,8 @@ type paddingEntry struct{} // encoding of the padding entry // type 5: "padding" = 34 bytes -func (e paddingEntry) encode() entrydb.Entry { - var entry entrydb.Entry - entry[0] = uint8(entrydb.TypePadding) +func (e paddingEntry) encode() Entry { + var entry Entry + entry[0] = uint8(TypePadding) return entry } diff --git a/op-supervisor/supervisor/backend/db/logs/entry.go b/op-supervisor/supervisor/backend/db/logs/entry.go new file mode 100644 index 0000000000000..a75c1e3ea1ef4 --- /dev/null +++ b/op-supervisor/supervisor/backend/db/logs/entry.go @@ -0,0 +1,79 @@ +package logs + +import ( + "fmt" + "strings" + + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" +) + +type EntryObj interface { + encode() Entry +} + +type Entry = entrydb.Entry[EntryType] + +type EntryTypeFlag uint8 + +const ( + FlagSearchCheckpoint EntryTypeFlag = 1 << TypeSearchCheckpoint + FlagCanonicalHash EntryTypeFlag = 1 << TypeCanonicalHash + FlagInitiatingEvent EntryTypeFlag = 1 << TypeInitiatingEvent + FlagExecutingLink EntryTypeFlag = 1 << TypeExecutingLink + FlagExecutingCheck EntryTypeFlag = 1 << TypeExecutingCheck + FlagPadding EntryTypeFlag = 1 << TypePadding + // for additional padding + FlagPadding2 EntryTypeFlag = FlagPadding << 1 +) + +func (x EntryTypeFlag) String() string { + var out []string + for i := EntryTypeFlag(1); i != 0; i <<= 1 { // iterate to bitmask + if x.Any(i) { + out = append(out, i.String()) + } + } + return strings.Join(out, "|") +} + +func (x EntryTypeFlag) Any(v EntryTypeFlag) bool { + return x&v != 0 +} + +func (x *EntryTypeFlag) Add(v EntryTypeFlag) { + *x = *x | v +} + +func (x *EntryTypeFlag) Remove(v EntryTypeFlag) { + *x = *x &^ v +} + +type EntryType uint8 + +const ( + TypeSearchCheckpoint EntryType = iota + TypeCanonicalHash + TypeInitiatingEvent + TypeExecutingLink + TypeExecutingCheck + TypePadding +) + +func (x EntryType) String() string { + switch x { + case TypeSearchCheckpoint: + return "searchCheckpoint" + case TypeCanonicalHash: + return "canonicalHash" + case TypeInitiatingEvent: + return "initiatingEvent" + case TypeExecutingLink: + return "executingLink" + case TypeExecutingCheck: + return "executingCheck" + case TypePadding: + return "padding" + default: + return fmt.Sprintf("unknown-%d", uint8(x)) + } +} diff --git a/op-supervisor/supervisor/backend/db/logs/iterator.go b/op-supervisor/supervisor/backend/db/logs/iterator.go index f9e65c41e890f..e6d9c11f287e4 100644 --- a/op-supervisor/supervisor/backend/db/logs/iterator.go +++ b/op-supervisor/supervisor/backend/db/logs/iterator.go @@ -8,13 +8,11 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) type IteratorState interface { NextIndex() entrydb.EntryIdx - HeadPointer() (heads.HeadPointer, error) SealedBlock() (hash common.Hash, num uint64, ok bool) InitMessage() (hash common.Hash, logIndex uint32, ok bool) ExecMessage() *types.ExecutingMessage @@ -42,7 +40,7 @@ type traverseConditionalFn func(state IteratorState) error func (i *iterator) End() error { for { _, err := i.next() - if errors.Is(err, ErrFuture) { + if errors.Is(err, entrydb.ErrFuture) { return nil } else if err != nil { return err @@ -59,7 +57,7 @@ func (i *iterator) NextInitMsg() error { if err != nil { return err } - if typ == entrydb.TypeInitiatingEvent { + if typ == TypeInitiatingEvent { seenLog = true } if !i.current.hasCompleteBlock() { @@ -98,7 +96,7 @@ func (i *iterator) NextBlock() error { if err != nil { return err } - if typ == entrydb.TypeSearchCheckpoint { + if typ == TypeSearchCheckpoint { seenBlock = true } if !i.current.hasCompleteBlock() { @@ -130,12 +128,12 @@ func (i *iterator) TraverseConditional(fn traverseConditionalFn) error { } // Read and apply the next entry. -func (i *iterator) next() (entrydb.EntryType, error) { +func (i *iterator) next() (EntryType, error) { index := i.current.nextEntryIndex entry, err := i.db.store.Read(index) if err != nil { if errors.Is(err, io.EOF) { - return 0, ErrFuture + return 0, entrydb.ErrFuture } return 0, fmt.Errorf("failed to read entry %d: %w", index, err) } @@ -166,7 +164,3 @@ func (i *iterator) InitMessage() (hash common.Hash, logIndex uint32, ok bool) { func (i *iterator) ExecMessage() *types.ExecutingMessage { return i.current.ExecMessage() } - -func (i *iterator) HeadPointer() (heads.HeadPointer, error) { - return i.current.HeadPointer() -} diff --git a/op-supervisor/supervisor/backend/db/logs/state.go b/op-supervisor/supervisor/backend/db/logs/state.go index df63f96e35997..bf8b2561054f1 100644 --- a/op-supervisor/supervisor/backend/db/logs/state.go +++ b/op-supervisor/supervisor/backend/db/logs/state.go @@ -9,7 +9,6 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -73,18 +72,14 @@ type logContext struct { // then we know an executing message is still coming. execMsg *types.ExecutingMessage - need entrydb.EntryTypeFlag + need EntryTypeFlag // buffer of entries not yet in the DB. // This is generated as objects are applied. // E.g. you can build multiple hypothetical blocks with log events on top of the state, // before flushing the entries to a DB. // However, no entries can be read from the DB while objects are being applied. - out []entrydb.Entry -} - -type EntryObj interface { - encode() entrydb.Entry + out []Entry } func (l *logContext) NextIndex() entrydb.EntryIdx { @@ -100,11 +95,11 @@ func (l *logContext) SealedBlock() (hash common.Hash, num uint64, ok bool) { } func (l *logContext) hasCompleteBlock() bool { - return !l.need.Any(entrydb.FlagCanonicalHash) + return !l.need.Any(FlagCanonicalHash) } func (l *logContext) hasIncompleteLog() bool { - return l.need.Any(entrydb.FlagInitiatingEvent | entrydb.FlagExecutingLink | entrydb.FlagExecutingCheck) + return l.need.Any(FlagInitiatingEvent | FlagExecutingLink | FlagExecutingCheck) } func (l *logContext) hasReadableLog() bool { @@ -127,20 +122,8 @@ func (l *logContext) ExecMessage() *types.ExecutingMessage { return nil } -func (l *logContext) HeadPointer() (heads.HeadPointer, error) { - if l.need != 0 { - return heads.HeadPointer{}, errors.New("cannot provide head pointer while state is incomplete") - } - return heads.HeadPointer{ - LastSealedBlockHash: l.blockHash, - LastSealedBlockNum: l.blockNum, - LastSealedTimestamp: l.timestamp, - LogsSince: l.logsSince, - }, nil -} - // ApplyEntry applies an entry on top of the current state. -func (l *logContext) ApplyEntry(entry entrydb.Entry) error { +func (l *logContext) ApplyEntry(entry Entry) error { // Wrap processEntry to add common useful error message info err := l.processEntry(entry) if err != nil { @@ -152,12 +135,12 @@ func (l *logContext) ApplyEntry(entry entrydb.Entry) error { // processEntry decodes and applies an entry to the state. // Entries may not be applied if we are in the process of generating entries from objects. // These outputs need to be flushed before inputs can be accepted. -func (l *logContext) processEntry(entry entrydb.Entry) error { +func (l *logContext) processEntry(entry Entry) error { if len(l.out) != 0 { panic("can only apply without appending if the state is still empty") } switch entry.Type() { - case entrydb.TypeSearchCheckpoint: + case TypeSearchCheckpoint: current, err := newSearchCheckpointFromEntry(entry) if err != nil { return err @@ -166,14 +149,14 @@ func (l *logContext) processEntry(entry entrydb.Entry) error { l.blockHash = common.Hash{} l.logsSince = current.logsSince // TODO this is bumping the logsSince? l.timestamp = current.timestamp - l.need.Add(entrydb.FlagCanonicalHash) + l.need.Add(FlagCanonicalHash) // Log data after the block we are sealing remains to be seen if l.logsSince == 0 { l.logHash = common.Hash{} l.execMsg = nil } - case entrydb.TypeCanonicalHash: - if !l.need.Any(entrydb.FlagCanonicalHash) { + case TypeCanonicalHash: + if !l.need.Any(FlagCanonicalHash) { return errors.New("not ready for canonical hash entry, already sealed the last block") } canonHash, err := newCanonicalHashFromEntry(entry) @@ -181,8 +164,8 @@ func (l *logContext) processEntry(entry entrydb.Entry) error { return err } l.blockHash = canonHash.hash - l.need.Remove(entrydb.FlagCanonicalHash) - case entrydb.TypeInitiatingEvent: + l.need.Remove(FlagCanonicalHash) + case TypeInitiatingEvent: if !l.hasCompleteBlock() { return errors.New("did not complete block seal, cannot add log") } @@ -196,13 +179,13 @@ func (l *logContext) processEntry(entry entrydb.Entry) error { l.execMsg = nil // clear the old state l.logHash = evt.logHash if evt.hasExecMsg { - l.need.Add(entrydb.FlagExecutingLink | entrydb.FlagExecutingCheck) + l.need.Add(FlagExecutingLink | FlagExecutingCheck) } else { l.logsSince += 1 } - l.need.Remove(entrydb.FlagInitiatingEvent) - case entrydb.TypeExecutingLink: - if !l.need.Any(entrydb.FlagExecutingLink) { + l.need.Remove(FlagInitiatingEvent) + case TypeExecutingLink: + if !l.need.Any(FlagExecutingLink) { return errors.New("unexpected executing-link") } link, err := newExecutingLinkFromEntry(entry) @@ -216,13 +199,13 @@ func (l *logContext) processEntry(entry entrydb.Entry) error { Timestamp: link.timestamp, Hash: common.Hash{}, // not known yet } - l.need.Remove(entrydb.FlagExecutingLink) - l.need.Add(entrydb.FlagExecutingCheck) - case entrydb.TypeExecutingCheck: - if l.need.Any(entrydb.FlagExecutingLink) { + l.need.Remove(FlagExecutingLink) + l.need.Add(FlagExecutingCheck) + case TypeExecutingCheck: + if l.need.Any(FlagExecutingLink) { return errors.New("need executing link to be applied before the check part") } - if !l.need.Any(entrydb.FlagExecutingCheck) { + if !l.need.Any(FlagExecutingCheck) { return errors.New("unexpected executing check") } link, err := newExecutingCheckFromEntry(entry) @@ -230,13 +213,13 @@ func (l *logContext) processEntry(entry entrydb.Entry) error { return err } l.execMsg.Hash = link.hash - l.need.Remove(entrydb.FlagExecutingCheck) + l.need.Remove(FlagExecutingCheck) l.logsSince += 1 - case entrydb.TypePadding: - if l.need.Any(entrydb.FlagPadding) { - l.need.Remove(entrydb.FlagPadding) + case TypePadding: + if l.need.Any(FlagPadding) { + l.need.Remove(FlagPadding) } else { - l.need.Remove(entrydb.FlagPadding2) + l.need.Remove(FlagPadding2) } default: return fmt.Errorf("unknown entry type: %s", entry.Type()) @@ -253,77 +236,75 @@ func (l *logContext) appendEntry(obj EntryObj) { l.nextEntryIndex += 1 } -// infer advances the logContext in cases where multiple entries are to be appended implicitly -// depending on the last type of entry, a new entry is appended, -// or when the searchCheckpoint should be inserted. -// This can be done repeatedly until there is no more implied data to extend. +// infer advances the logContext in cases where complex entries contain multiple implied entries +// eg. a SearchCheckpoint implies a CannonicalHash will follow +// this also handles inserting the searchCheckpoint at the set frequency, and padding entries func (l *logContext) infer() error { // We force-insert a checkpoint whenever we hit the known fixed interval. if l.nextEntryIndex%searchCheckpointFrequency == 0 { - l.need.Add(entrydb.FlagSearchCheckpoint) + l.need.Add(FlagSearchCheckpoint) } - if l.need.Any(entrydb.FlagSearchCheckpoint) { + if l.need.Any(FlagSearchCheckpoint) { l.appendEntry(newSearchCheckpoint(l.blockNum, l.logsSince, l.timestamp)) - l.need.Add(entrydb.FlagCanonicalHash) // always follow with a canonical hash - l.need.Remove(entrydb.FlagSearchCheckpoint) + l.need.Add(FlagCanonicalHash) // always follow with a canonical hash + l.need.Remove(FlagSearchCheckpoint) return nil } - if l.need.Any(entrydb.FlagCanonicalHash) { + if l.need.Any(FlagCanonicalHash) { l.appendEntry(newCanonicalHash(l.blockHash)) - l.need.Remove(entrydb.FlagCanonicalHash) + l.need.Remove(FlagCanonicalHash) return nil } - if l.need.Any(entrydb.FlagPadding) { + if l.need.Any(FlagPadding) { l.appendEntry(paddingEntry{}) - l.need.Remove(entrydb.FlagPadding) + l.need.Remove(FlagPadding) return nil } - if l.need.Any(entrydb.FlagPadding2) { + if l.need.Any(FlagPadding2) { l.appendEntry(paddingEntry{}) - l.need.Remove(entrydb.FlagPadding2) + l.need.Remove(FlagPadding2) return nil } - if l.need.Any(entrydb.FlagInitiatingEvent) { + if l.need.Any(FlagInitiatingEvent) { // If we are running out of space for log-event data, // write some checkpoints as padding, to pass the checkpoint. if l.execMsg != nil { // takes 3 total. Need to avoid the checkpoint. switch l.nextEntryIndex % searchCheckpointFrequency { case searchCheckpointFrequency - 1: - l.need.Add(entrydb.FlagPadding) + l.need.Add(FlagPadding) return nil case searchCheckpointFrequency - 2: - l.need.Add(entrydb.FlagPadding | entrydb.FlagPadding2) + l.need.Add(FlagPadding | FlagPadding2) return nil } } evt := newInitiatingEvent(l.logHash, l.execMsg != nil) l.appendEntry(evt) - l.need.Remove(entrydb.FlagInitiatingEvent) + l.need.Remove(FlagInitiatingEvent) if l.execMsg == nil { l.logsSince += 1 } return nil } - if l.need.Any(entrydb.FlagExecutingLink) { + if l.need.Any(FlagExecutingLink) { link, err := newExecutingLink(*l.execMsg) if err != nil { return fmt.Errorf("failed to create executing link: %w", err) } l.appendEntry(link) - l.need.Remove(entrydb.FlagExecutingLink) + l.need.Remove(FlagExecutingLink) return nil } - if l.need.Any(entrydb.FlagExecutingCheck) { + if l.need.Any(FlagExecutingCheck) { l.appendEntry(newExecutingCheck(l.execMsg.Hash)) - l.need.Remove(entrydb.FlagExecutingCheck) + l.need.Remove(FlagExecutingCheck) l.logsSince += 1 return nil } return io.EOF } -// inferFull advances the queued entries held by the log context repeatedly -// until no more implied entries can be added +// inferFull advances the logContext until it cannot infer any more entries. func (l *logContext) inferFull() error { for i := 0; i < 10; i++ { err := l.infer() @@ -364,13 +345,13 @@ func (l *logContext) SealBlock(parent common.Hash, upd eth.BlockID, timestamp ui return err } if l.blockHash != parent { - return fmt.Errorf("%w: cannot apply block %s (parent %s) on top of %s", ErrConflict, upd, parent, l.blockHash) + return fmt.Errorf("%w: cannot apply block %s (parent %s) on top of %s", entrydb.ErrConflict, upd, parent, l.blockHash) } if l.blockHash != (common.Hash{}) && l.blockNum+1 != upd.Number { - return fmt.Errorf("%w: cannot apply block %d on top of %d", ErrConflict, upd.Number, l.blockNum) + return fmt.Errorf("%w: cannot apply block %d on top of %d", entrydb.ErrConflict, upd.Number, l.blockNum) } if l.timestamp > timestamp { - return fmt.Errorf("%w: block timestamp %d must be equal or larger than current timestamp %d", ErrConflict, timestamp, l.timestamp) + return fmt.Errorf("%w: block timestamp %d must be equal or larger than current timestamp %d", entrydb.ErrConflict, timestamp, l.timestamp) } } l.blockHash = upd.Hash @@ -379,7 +360,7 @@ func (l *logContext) SealBlock(parent common.Hash, upd eth.BlockID, timestamp ui l.logsSince = 0 l.execMsg = nil l.logHash = common.Hash{} - l.need.Add(entrydb.FlagSearchCheckpoint) + l.need.Add(FlagSearchCheckpoint) return l.inferFull() // apply to the state as much as possible } @@ -387,34 +368,34 @@ func (l *logContext) SealBlock(parent common.Hash, upd eth.BlockID, timestamp ui // The parent-block that the log comes after must be applied with ApplyBlock first. func (l *logContext) ApplyLog(parentBlock eth.BlockID, logIdx uint32, logHash common.Hash, execMsg *types.ExecutingMessage) error { if parentBlock == (eth.BlockID{}) { - return fmt.Errorf("genesis does not have logs: %w", ErrLogOutOfOrder) + return fmt.Errorf("genesis does not have logs: %w", entrydb.ErrOutOfOrder) } if err := l.inferFull(); err != nil { // ensure we can start applying return err } if !l.hasCompleteBlock() { if l.blockNum == 0 { - return fmt.Errorf("%w: should not have logs in block 0", ErrLogOutOfOrder) + return fmt.Errorf("%w: should not have logs in block 0", entrydb.ErrOutOfOrder) } else { return errors.New("cannot append log before last known block is sealed") } } // check parent block if l.blockHash != parentBlock.Hash { - return fmt.Errorf("%w: log builds on top of block %s, but have block %s", ErrLogOutOfOrder, parentBlock, l.blockHash) + return fmt.Errorf("%w: log builds on top of block %s, but have block %s", entrydb.ErrOutOfOrder, parentBlock, l.blockHash) } if l.blockNum != parentBlock.Number { - return fmt.Errorf("%w: log builds on top of block %d, but have block %d", ErrLogOutOfOrder, parentBlock.Number, l.blockNum) + return fmt.Errorf("%w: log builds on top of block %d, but have block %d", entrydb.ErrOutOfOrder, parentBlock.Number, l.blockNum) } // check if log fits on top. The length so far == the index of the next log. if logIdx != l.logsSince { - return fmt.Errorf("%w: expected event index %d, cannot append %d", ErrLogOutOfOrder, l.logsSince, logIdx) + return fmt.Errorf("%w: expected event index %d, cannot append %d", entrydb.ErrOutOfOrder, l.logsSince, logIdx) } l.logHash = logHash l.execMsg = execMsg - l.need.Add(entrydb.FlagInitiatingEvent) + l.need.Add(FlagInitiatingEvent) if execMsg != nil { - l.need.Add(entrydb.FlagExecutingLink | entrydb.FlagExecutingCheck) + l.need.Add(FlagExecutingLink | FlagExecutingCheck) } return l.inferFull() // apply to the state as much as possible } diff --git a/op-supervisor/supervisor/backend/db/query.go b/op-supervisor/supervisor/backend/db/query.go new file mode 100644 index 0000000000000..1c02f18511dc5 --- /dev/null +++ b/op-supervisor/supervisor/backend/db/query.go @@ -0,0 +1,179 @@ +package db + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +func (db *ChainsDB) FindSealedBlock(chain types.ChainID, block eth.BlockID) (nextEntry entrydb.EntryIdx, err error) { + db.mu.RLock() + defer db.mu.RUnlock() + + logDB, ok := db.logDBs[chain] + if !ok { + return 0, fmt.Errorf("%w: %v", ErrUnknownChain, chain) + } + return logDB.FindSealedBlock(block) +} + +// LatestBlockNum returns the latest fully-sealed block number that has been recorded to the logs db +// for the given chain. It does not contain safety guarantees. +// The block number might not be available (empty database, or non-existent chain). +func (db *ChainsDB) LatestBlockNum(chain types.ChainID) (num uint64, ok bool) { + db.mu.RLock() + defer db.mu.RUnlock() + + logDB, knownChain := db.logDBs[chain] + if !knownChain { + return 0, false + } + return logDB.LatestSealedBlockNum() +} + +func (db *ChainsDB) LocalUnsafe(chainID types.ChainID) (types.HeadPointer, error) { + db.mu.RLock() + defer db.mu.RUnlock() + + eventsDB, ok := db.logDBs[chainID] + if !ok { + return types.HeadPointer{}, ErrUnknownChain + } + // TODO get tip of events DB + return types.HeadPointer{}, nil +} + +func (db *ChainsDB) CrossUnsafe(chainID types.ChainID) (types.HeadPointer, error) { + db.mu.RLock() + defer db.mu.RUnlock() + + result, ok := db.crossUnsafe[chainID] + if !ok { + return types.HeadPointer{}, ErrUnknownChain + } + return result, nil +} + +func (db *ChainsDB) LocalSafe(chainID types.ChainID) (derivedFrom eth.BlockID, derived eth.BlockID, err error) { + db.mu.RLock() + defer db.mu.RUnlock() + + localDB, ok := db.localDBs[chainID] + if !ok { + return eth.BlockID{}, eth.BlockID{}, ErrUnknownChain + } + // TODO get tip of local DB + return eth.BlockID{}, eth.BlockID{}, nil +} + +func (db *ChainsDB) CrossSafe(chainID types.ChainID) (derivedFrom eth.BlockID, derived eth.BlockID, err error) { + db.mu.RLock() + defer db.mu.RUnlock() + + crossDB, ok := db.crossDBs[chainID] + if !ok { + return eth.BlockID{}, eth.BlockID{}, ErrUnknownChain + } + // TODO get tip of cross DB + return eth.BlockID{}, eth.BlockID{}, nil +} + +func (db *ChainsDB) Finalized(chainID types.ChainID) (eth.BlockID, error) { + db.mu.RLock() + defer db.mu.RUnlock() + + finalizedL1 := db.finalizedL1 + derived, err := db.LastDerivedFrom(chainID, finalizedL1.ID()) + if err != nil { + return eth.BlockID{}, fmt.Errorf("could not find what was last derived from the finalized L1 block") + } + return derived, nil +} + +func (db *ChainsDB) LastDerivedFrom(chainID types.ChainID, derivedFrom eth.BlockID) (derived eth.BlockID, err error) { + crossDB, ok := db.crossDBs[chainID] + if !ok { + return eth.BlockID{}, ErrUnknownChain + } + // TODO + crossDB.LastDerived() +} + +func (db *ChainsDB) DerivedFrom(chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockID, err error) { + db.mu.RLock() + defer db.mu.RUnlock() + + localDB, ok := db.localDBs[chainID] + if !ok { + return eth.BlockRef{}, ErrUnknownChain + } + // TODO + localDB.DerivedFrom() +} + +// Check calls the underlying logDB to determine if the given log entry exists at the given location. +// If the block-seal of the block that includes the log is known, it is returned. It is fully zeroed otherwise, if the block is in-progress. +func (db *ChainsDB) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (includedIn eth.BlockID, err error) { + db.mu.RLock() + defer db.mu.RUnlock() + + logDB, ok := db.logDBs[chain] + if !ok { + return eth.BlockID{}, fmt.Errorf("%w: %v", ErrUnknownChain, chain) + } + _, err := logDB.Contains(blockNum, logIdx, logHash) + if err != nil { + return eth.BlockID{}, err + } + // TODO fix this for cross-safe to work + return eth.BlockID{}, nil +} + +// Safest returns the strongest safety level that can be guaranteed for the given log entry. +// it assumes the log entry has already been checked and is valid, this function only checks safety levels. +// Cross-safety levels are all considered to be more safe than any form of local-safety. +func (db *ChainsDB) Safest(chainID types.ChainID, blockNum uint64, index uint32) (safest types.SafetyLevel, err error) { + db.mu.RLock() + defer db.mu.RUnlock() + + if finalized, err := db.Finalized(chainID); err == nil { + if finalized.Number >= blockNum { + return types.Finalized, nil + } + } + _, crossSafe, err := db.CrossSafe(chainID) + if err != nil { + return types.Invalid, err + } + if crossSafe.Number >= blockNum { + return types.CrossSafe, nil + } + crossUnsafe, err := db.CrossUnsafe(chainID) + if err != nil { + return types.Invalid, err + } + if crossUnsafe.WithinRange(blockNum, index) { + return types.CrossUnsafe, nil + } + _, localSafe, err := db.LocalSafe(chainID) + if err != nil { + return types.Invalid, err + } + if localSafe.Number >= blockNum { + return types.LocalSafe, nil + } + return types.LocalUnsafe, nil +} + +func (db *ChainsDB) IteratorStartingAt(chain types.ChainID, sealedNum uint64, logIndex uint32) (logs.Iterator, error) { + logDB, ok := db.logDBs[chain] + if !ok { + return nil, fmt.Errorf("%w: %v", ErrUnknownChain, chain) + } + return logDB.IteratorStartingAt(sealedNum, logIndex) +} diff --git a/op-supervisor/supervisor/backend/db/update.go b/op-supervisor/supervisor/backend/db/update.go new file mode 100644 index 0000000000000..740127ef0d9e8 --- /dev/null +++ b/op-supervisor/supervisor/backend/db/update.go @@ -0,0 +1,96 @@ +package db + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +func (db *ChainsDB) AddLog( + chain types.ChainID, + logHash common.Hash, + parentBlock eth.BlockID, + logIdx uint32, + execMsg *types.ExecutingMessage) error { + db.mu.RLock() + defer db.mu.RUnlock() + + logDB, ok := db.logDBs[chain] + if !ok { + return fmt.Errorf("cannot AddLog: %w: %v", ErrUnknownChain, chain) + } + return logDB.AddLog(logHash, parentBlock, logIdx, execMsg) +} + +func (db *ChainsDB) SealBlock(chain types.ChainID, block eth.BlockRef) error { + db.mu.RLock() + defer db.mu.RUnlock() + + logDB, ok := db.logDBs[chain] + if !ok { + return fmt.Errorf("cannot SealBlock: %w: %v", ErrUnknownChain, chain) + } + err := logDB.SealBlock(block.ParentHash, block.ID(), block.Time) + if err != nil { + return fmt.Errorf("failed to seal block %v: %w", block, err) + } + return nil +} + +func (db *ChainsDB) Rewind(chain types.ChainID, headBlockNum uint64) error { + db.mu.RLock() + defer db.mu.RUnlock() + + logDB, ok := db.logDBs[chain] + if !ok { + return fmt.Errorf("cannot Rewind: %w: %s", ErrUnknownChain, chain) + } + return logDB.Rewind(headBlockNum) +} + +func (db *ChainsDB) UpdateLocalSafe(chain types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error { + db.mu.RLock() + defer db.mu.RUnlock() + + localDB, ok := db.localDBs[chain] + if !ok { + return fmt.Errorf("cannot UpdateLocalSafe: %w: %v", ErrUnknownChain, chain) + } + return localDB.AddDerived(derivedFrom, lastDerived) +} + +func (db *ChainsDB) UpdateCrossUnsafe(chain types.ChainID, crossUnsafe types.HeadPointer) error { + db.mu.RLock() + defer db.mu.RUnlock() + + if _, ok := db.crossUnsafe[chain]; !ok { + return fmt.Errorf("cannot UpdateCrossUnsafe: %w: %s", ErrUnknownChain, chain) + } + db.crossUnsafe[chain] = crossUnsafe + return nil +} + +func (db *ChainsDB) UpdateCrossSafe(chain types.ChainID, l1View eth.BlockRef, lastCrossDerived eth.BlockRef) error { + db.mu.RLock() + defer db.mu.RUnlock() + + crossDB, ok := db.crossDBs[chain] + if !ok { + return fmt.Errorf("cannot UpdateCrossSafe: %w: %s", ErrUnknownChain, chain) + } + return crossDB.AddDerived(l1View, lastCrossDerived) +} + +func (db *ChainsDB) UpdateFinalizedL1(finalized eth.BlockRef) error { + db.mu.RLock() + defer db.mu.RUnlock() + + if db.finalizedL1.Number > finalized.Number { + return fmt.Errorf("cannot rewind finalized L1 head from %s to %s", db.finalizedL1, finalized) + } + db.finalizedL1 = finalized + return nil +} diff --git a/op-supervisor/supervisor/backend/mock.go b/op-supervisor/supervisor/backend/mock.go index 99ec630ddc362..0348711ad139e 100644 --- a/op-supervisor/supervisor/backend/mock.go +++ b/op-supervisor/supervisor/backend/mock.go @@ -6,12 +6,10 @@ import ( "io" "sync/atomic" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/frontend" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" + "github.com/ethereum/go-ethereum/common" ) type MockBackend struct { @@ -52,14 +50,31 @@ func (m *MockBackend) CheckMessages(messages []types.Message, minSafety types.Sa return nil } -func (m *MockBackend) CheckBlock(chainID *hexutil.U256, blockHash common.Hash, blockNumber hexutil.Uint64) (types.SafetyLevel, error) { - return types.CrossUnsafe, nil -} - func (m *MockBackend) DerivedFrom(ctx context.Context, t types.ChainID, parentHash common.Hash, n uint64) (eth.BlockRef, error) { return eth.BlockRef{}, nil } +func (m *MockBackend) UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error) { + return types.ReferenceView{}, nil +} + +func (m *MockBackend) SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error) { + return types.ReferenceView{}, nil +} + +func (m *MockBackend) Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) { + return eth.BlockID{}, nil +} + +func (m *MockBackend) UpdateLocalUnsafe(chainID types.ChainID, head eth.BlockRef) { +} + +func (m *MockBackend) UpdateLocalSafe(chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) { +} + +func (m *MockBackend) UpdateFinalizedL1(chainID types.ChainID, finalized eth.BlockRef) { +} + func (m *MockBackend) Close() error { return nil } diff --git a/op-supervisor/supervisor/backend/source/chain_processor.go b/op-supervisor/supervisor/backend/processors/chain_processor.go similarity index 98% rename from op-supervisor/supervisor/backend/source/chain_processor.go rename to op-supervisor/supervisor/backend/processors/chain_processor.go index 9c63950a1629b..b60abc040aabb 100644 --- a/op-supervisor/supervisor/backend/source/chain_processor.go +++ b/op-supervisor/supervisor/backend/processors/chain_processor.go @@ -1,4 +1,4 @@ -package source +package processors import ( "context" @@ -166,7 +166,7 @@ func (s *ChainProcessor) update(nextNum uint64) error { return nil } -func (s *ChainProcessor) OnNewHead(ctx context.Context, head eth.BlockRef) error { +func (s *ChainProcessor) OnNewHead(head eth.BlockRef) error { // update the latest target s.lastHead.Store(head.Number) // signal that we have something to process diff --git a/op-supervisor/supervisor/backend/processors/client.go b/op-supervisor/supervisor/backend/processors/client.go new file mode 100644 index 0000000000000..253308a514a3b --- /dev/null +++ b/op-supervisor/supervisor/backend/processors/client.go @@ -0,0 +1,28 @@ +package processors + +import ( + "context" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/sources" + "github.com/ethereum-optimism/optimism/op-service/sources/caching" +) + +// NewEthClient creates an Eth RPC client for event-log fetching. +func NewEthClient(ctx context.Context, logger log.Logger, m caching.Metrics, rpc string, rpcClient client.RPC, + pollRate time.Duration, trustRPC bool, kind sources.RPCProviderKind) (*sources.L1Client, error) { + c, err := client.NewRPCWithClient(ctx, logger, rpc, rpcClient, pollRate) + if err != nil { + return nil, fmt.Errorf("failed to create new RPC client: %w", err) + } + + l1Client, err := sources.NewL1Client(c, logger, m, sources.L1ClientSimpleConfig(trustRPC, kind, 100)) + if err != nil { + return nil, fmt.Errorf("failed to connect client: %w", err) + } + return l1Client, nil +} diff --git a/op-supervisor/supervisor/backend/source/contracts/l2inbox.go b/op-supervisor/supervisor/backend/processors/contracts/l2inbox.go similarity index 100% rename from op-supervisor/supervisor/backend/source/contracts/l2inbox.go rename to op-supervisor/supervisor/backend/processors/contracts/l2inbox.go diff --git a/op-supervisor/supervisor/backend/source/contracts/l2inbox_test.go b/op-supervisor/supervisor/backend/processors/contracts/l2inbox_test.go similarity index 100% rename from op-supervisor/supervisor/backend/source/contracts/l2inbox_test.go rename to op-supervisor/supervisor/backend/processors/contracts/l2inbox_test.go diff --git a/op-supervisor/supervisor/backend/processors/crosssafe_verifier.go b/op-supervisor/supervisor/backend/processors/crosssafe_verifier.go new file mode 100644 index 0000000000000..2a2e69f95a387 --- /dev/null +++ b/op-supervisor/supervisor/backend/processors/crosssafe_verifier.go @@ -0,0 +1,129 @@ +package processors + +import ( + "context" + "errors" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +const ( + // The data may have changed, and we may have missed a poke, so re-attempt regularly. + pollCrossSafeUpdateDuration = time.Second * 4 + // Make sure to flush cross-unsafe updates to the DB regularly when there are large spans of data + maxCrossSafeUpdateDuration = time.Second * 4 +) + +type CrossSafeDBDeps interface { + LocalUnsafe() types.HeadPointer + CrossSafe(chainId types.ChainID) types.HeadPointer + CrossUnsafe(chainID types.ChainID) types.HeadPointer + + Finalized(chainID types.ChainID) (eth.BlockID, error) + + CrossDerivedFrom(chainID types.ChainID, derived eth.BlockID, logIndex uint32) (derivedFrom eth.BlockID, err error) + LocalDerivedFrom(chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockID, err error) + + LogsIteratorAt(chainID types.ChainID, at types.HeadPointer) (logs.Iterator, error) + Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (includedIn eth.BlockID, err error) + UpdateCrossSafe(chain types.ChainID, l1View eth.BlockID, crossUnsafe types.HeadPointer) error +} + +// CrossSafeVerifier iterates the local-safe data of a chain, and promotes blocks to cross-safe once dependencies are cross-safe +type CrossSafeVerifier struct { + log log.Logger + + chain types.ChainID + + deps CrossSafeDBDeps + + scope *Scope + + // channel with capacity of 1, full if there is work to do + poke chan struct{} + + // channel with capacity of 1, to signal work complete if running in synchroneous mode + out chan struct{} + + // lifetime management of the chain processor + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup +} + +func NewCrossSafeVerifier(log log.Logger, chain types.ChainID, deps CrossSafeDBDeps) *CrossSafeVerifier { + ctx, cancel := context.WithCancel(context.Background()) + out := &CrossSafeVerifier{ + log: log, + chain: chain, + deps: deps, + poke: make(chan struct{}, 1), + out: make(chan struct{}, 1), + ctx: ctx, + cancel: cancel, + } + out.wg.Add(1) + go out.worker() + return out +} + +func (s *CrossSafeVerifier) worker() { + defer s.wg.Done() + + delay := time.NewTicker(pollCrossSafeUpdateDuration) + for { + if s.ctx.Err() != nil { // check if we are closing down + return + } + + ctx, cancel := context.WithTimeout(s.ctx, maxCrossSafeUpdateDuration) + err := s.scope.Process(ctx) + cancel() + if err != nil { + if errors.Is(err, ctx.Err()) { + s.log.Debug("Processed some, but not all data", "err", err) + } else { + s.log.Error("Failed to process new block", "err", err) + } + // idle until next update trigger (or resource-context may make the worker stop) + } else { + s.log.Debug("Continuing cross-safe-processing") + continue + } + + // await next time we process, or detect shutdown + select { + case <-s.ctx.Done(): + delay.Stop() + return + case <-s.poke: + s.log.Debug("Continuing cross-safe verification after hint of new data") + continue + case <-delay.C: + s.log.Debug("Checking for cross-safe updates") + continue + } + } +} + +func (s *CrossSafeVerifier) OnNewData() error { + // signal that we have something to process + select { + case s.poke <- struct{}{}: + default: + // already requested an update + } + return nil +} + +func (s *CrossSafeVerifier) Close() { + s.cancel() + s.wg.Wait() +} diff --git a/op-supervisor/supervisor/backend/processors/crossunsafe_verifier.go b/op-supervisor/supervisor/backend/processors/crossunsafe_verifier.go new file mode 100644 index 0000000000000..5eed9117b4947 --- /dev/null +++ b/op-supervisor/supervisor/backend/processors/crossunsafe_verifier.go @@ -0,0 +1,174 @@ +package processors + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +const ( + // The data may have changed, and we may have missed a poke, so re-attempt regularly. + pollCrossUnsafeUpdateDuration = time.Second * 4 + // Make sure to flush cross-unsafe updates to the DB regularly when there are large spans of data + maxCrossUnsafeUpdateDuration = time.Second * 4 +) + +type CrossUnsafeDBDeps interface { + LocalUnsafe() types.HeadPointer + CrossSafe(chainId types.ChainID) types.HeadPointer + CrossUnsafe(chainID types.ChainID) types.HeadPointer + LogsIteratorAt(chainID types.ChainID, at types.HeadPointer) (logs.Iterator, error) + Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error + UpdateCrossUnsafe(chain types.ChainID, crossUnsafe types.HeadPointer) error +} + +// CrossUnsafeVerifier iterates the local-safe data of a chain, and promotes blocks to cross-safe once dependencies are cross-safe +type CrossUnsafeVerifier struct { + log log.Logger + + chain types.ChainID + + deps CrossUnsafeDBDeps + + // current cross-unsafe logs-DB iterator + iter logs.Iterator + + // channel with capacity of 1, full if there is work to do + poke chan struct{} + + // channel with capacity of 1, to signal work complete if running in synchroneous mode + out chan struct{} + + // lifetime management of the chain processor + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup +} + +func NewCrossUnsafeVerifier(log log.Logger, chain types.ChainID, deps CrossUnsafeDBDeps) *CrossUnsafeVerifier { + ctx, cancel := context.WithCancel(context.Background()) + out := &CrossUnsafeVerifier{ + log: log, + chain: chain, + deps: deps, + poke: make(chan struct{}, 1), + out: make(chan struct{}, 1), + ctx: ctx, + cancel: cancel, + } + out.wg.Add(1) + go out.worker() + return out +} + +func (s *CrossUnsafeVerifier) worker() { + defer s.wg.Done() + + delay := time.NewTicker(pollCrossUnsafeUpdateDuration) + for { + if s.ctx.Err() != nil { // check if we are closing down + return + } + + if err := s.update(); err != nil { + s.log.Error("Failed to process new block", "err", err) + // idle until next update trigger + } else { + s.log.Debug("Continuing cross-unsafe-processing") + continue + } + + // await next time we process, or detect shutdown + select { + case <-s.ctx.Done(): + delay.Stop() + return + case <-s.poke: + s.log.Debug("Continuing cross-unsafe verification after hint of new data") + continue + case <-delay.C: + s.log.Debug("Checking for cross-unsafe updates") + continue + } + } +} + +func (s *CrossUnsafeVerifier) update() error { + ctx, cancel := context.WithTimeout(s.ctx, maxCrossUnsafeUpdateDuration) + defer cancel() + + // TODO init iterator if needed + + iter, err := s.deps.LogsIteratorAt() + + err := s.iter.TraverseConditional(func(state logs.IteratorState) error { + // we can stop early, to make some progress, and not indefinitely iterate, when there is a lot of unsafe data. + if ctx.Err() != nil { + return ctx.Err() + } + + hash, num, ok := state.SealedBlock() + if !ok { + return entrydb.ErrFuture // maybe a more specific error for no-genesis case? + } + // TODO(#11693): reorg check in the future. To make sure that what we traverse is still canonical. + _, _ = hash, num + + _, _, ok = state.InitMessage() + if !ok { + return nil // no readable message, just an empty block + } + + // check if it is an executing message. If so, check the dependency. + if execMsg := state.ExecMessage(); execMsg != nil { + chainID := types.ChainIDFromUInt64(uint64(execMsg.Chain)) + if err := s.deps.Check(chainID, execMsg.BlockNum, execMsg.LogIdx, execMsg.Hash); err != nil { + return fmt.Errorf("failed to check %s: %w", execMsg, err) + } + } + return nil + }) + if err == nil { + panic("expected reader to complete with an exit-error") + } + + crossUnsafe, err := iter.HeadPointer() + if err != nil { + return fmt.Errorf("failed to get head pointer: %w", err) + } + + // register the new cross-safe block as cross-safe up to the current L1 view + if err := s.deps.UpdateCrossUnsafe(s.chain, crossUnsafe); err != nil { + return fmt.Errorf("failed to write cross-unsafe update: %w", err) + } + + // If we stopped iterating after running out of time, instead of out of data, then we can continue immediately + if errors.Is(err, ctx.Err()) { + return nil + } + return err +} + +func (s *CrossUnsafeVerifier) OnNewData() error { + // signal that we have something to process + select { + case s.newHead <- struct{}{}: + default: + // already requested an update + } + return nil +} + +func (s *CrossUnsafeVerifier) Close() { + s.cancel() + s.wg.Wait() +} diff --git a/op-supervisor/supervisor/backend/source/log_processor.go b/op-supervisor/supervisor/backend/processors/log_processor.go similarity index 96% rename from op-supervisor/supervisor/backend/source/log_processor.go rename to op-supervisor/supervisor/backend/processors/log_processor.go index d7f7e1fbeae0b..01846f32795c5 100644 --- a/op-supervisor/supervisor/backend/source/log_processor.go +++ b/op-supervisor/supervisor/backend/processors/log_processor.go @@ -1,4 +1,4 @@ -package source +package processors import ( "context" @@ -10,7 +10,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/source/contracts" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/processors/contracts" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -34,7 +34,7 @@ type logProcessor struct { eventDecoder EventDecoder } -func newLogProcessor(chain types.ChainID, logStore LogStorage) *logProcessor { +func NewLogProcessor(chain types.ChainID, logStore LogStorage) LogProcessor { return &logProcessor{ chain: chain, logStore: logStore, diff --git a/op-supervisor/supervisor/backend/source/log_processor_test.go b/op-supervisor/supervisor/backend/processors/log_processor_test.go similarity index 96% rename from op-supervisor/supervisor/backend/source/log_processor_test.go rename to op-supervisor/supervisor/backend/processors/log_processor_test.go index 2e1322f55aed1..de28b1f6f5082 100644 --- a/op-supervisor/supervisor/backend/source/log_processor_test.go +++ b/op-supervisor/supervisor/backend/processors/log_processor_test.go @@ -1,4 +1,4 @@ -package source +package processors import ( "context" @@ -25,7 +25,7 @@ func TestLogProcessor(t *testing.T) { } t.Run("NoOutputWhenLogsAreEmpty", func(t *testing.T) { store := &stubLogStorage{} - processor := newLogProcessor(logProcessorChainID, store) + processor := NewLogProcessor(logProcessorChainID, store) err := processor.ProcessLogs(ctx, block1, ethTypes.Receipts{}) require.NoError(t, err) @@ -59,7 +59,7 @@ func TestLogProcessor(t *testing.T) { }, } store := &stubLogStorage{} - processor := newLogProcessor(logProcessorChainID, store) + processor := NewLogProcessor(logProcessorChainID, store) err := processor.ProcessLogs(ctx, block1, rcpts) require.NoError(t, err) @@ -115,7 +115,7 @@ func TestLogProcessor(t *testing.T) { Hash: common.Hash{0xaa}, } store := &stubLogStorage{} - processor := newLogProcessor(types.ChainID{4}, store) + processor := NewLogProcessor(types.ChainID{4}, store).(*logProcessor) processor.eventDecoder = EventDecoderFn(func(l *ethTypes.Log) (types.ExecutingMessage, error) { require.Equal(t, rcpts[0].Logs[0], l) return execMsg, nil diff --git a/op-supervisor/supervisor/backend/processors/scope.go b/op-supervisor/supervisor/backend/processors/scope.go new file mode 100644 index 0000000000000..dd575291638d0 --- /dev/null +++ b/op-supervisor/supervisor/backend/processors/scope.go @@ -0,0 +1,235 @@ +package processors + +import ( + "context" + "errors" + "fmt" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +type EndStatus uint8 + +const ( + // OpenEnd is an end where we can make progress from. + OpenEnd EndStatus = iota + + // BlockedEnd is a piece of work that needs to be unblocked by something else. + BlockedEnd + + // ExhaustedEnd is a previously open end where we ran out of L1 data. + ExhaustedEnd + + // InvalidEnd is an open end that is blocked on something that is not there + InvalidEnd +) + +type Deps interface { + Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (includedIn eth.BlockID, err error) + + LocalDerivedFrom(chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockID, err error) +} + +type End struct { + chainID types.ChainID + + // Cached iterator, so we can proceed to efficiently get the next data when we need to. + iter logs.Iterator + + // Parent block that the logs are building on top of. + parent types.BlockSeal + // Verified logs thus far. Index of the log yet to be verified (if any). + logsSince uint32 + // Block that contains the logs + current types.BlockSeal + // Number of logs in current. + logsTotal uint32 + + // What current was locally-derived from. + localDerivedFrom types.BlockSeal + + // Non-nil if the current log is trying to execute anything. + // Set to nil when the executing message has been verified. + Executing *types.ExecutingMessage + + Status EndStatus +} + +func (e *End) TryNext() error { + if e.Executing != nil { + panic("cannot continue before resolving execution link") + } + // TODO: read from iterator / DB, and update the End state + + return nil +} + +type Scope struct { + // Point in L1 that may consumed up till. + // The L1 view is bounded, so we can process accurate cross-safe increments. + L1Bound types.BlockSeal + + // The ends, one per chain, that we want to synchronously resolve dependencies of. + Ends []*End + + // Chains we are tracking an End of + InvolvedChains map[types.ChainID]struct{} + + deps Deps +} + +func (s *Scope) Process(ctx context.Context) error { + if len(s.Ends) == 0 { // nothing to process + return nil + } + // We keep revisiting the set of ends, until there's no more change. + for { + anyChange := false + for _, end := range s.Ends { + // Only open ends may be processed. + // Other ends are done, or need to be unblocked by others. + if end.Status != OpenEnd { + continue + } + if err := s.ProcessOpenEnd(end); err != nil { + if errors.Is(err, entrydb.ErrFuture) { + // Insufficient data to proceed with this end. Continue with the next end. + continue + } else { + return fmt.Errorf("failed to process chain %s: %w", end.chainID, err) + } + } else { + anyChange = true + } + } + if !anyChange { + break + } + } + var openEnds, blockedEnds, exhaustedEnds, invalidEnds int + for _, end := range s.Ends { + switch end.Status { + case OpenEnd: + openEnds += 1 + case BlockedEnd: + blockedEnds += 1 + case ExhaustedEnd: + exhaustedEnds += 1 + case InvalidEnd: + invalidEnds += 1 + } + } + // If any ends are still open: we stopped early. + if openEnds > 0 { + // if we use a context for processing, we can hit this + if ctx.Err() != nil { + return ctx.Err() + } + return errors.New("incomplete data, unable to proceed to next scope") + } + if invalidEnds > 0 { + return errors.New("found invalid end, blocked") + } + // If all ends are blocked: we found a cycle that cannot be resolved with additional L1 data, + // we'll need to reorg an L2 chain. + if blockedEnds == len(s.Ends) { + return errors.New("every end is blocked") + } + if exhaustedEnds == 0 { + return errors.New("expected to have exhausted L1 view") + } + + // TODO increment L1 bound + + // TODO Everything that was exhausted for L1 data should be marked as Open again + return nil +} + +func (s *Scope) AddChain(chainID types.ChainID) { + end := &End{ + chainID: chainID, + iter: nil, + parent: types.BlockSeal{}, + logsSince: 0, + current: types.BlockSeal{}, + logsTotal: 0, + localDerivedFrom: types.BlockSeal{}, + Executing: nil, + Status: OpenEnd, + } + // TODO + s.Ends = append(s.Ends, end) + s.InvolvedChains[chainID] = struct{}{} +} + +func (s *Scope) ProcessOpenEnd(end *End) error { + + // If not derived within the L1Bound: put it in Exhausted. + // This is L2 data that we cannot yet touch, it's outside of view. + if end.localDerivedFrom.Number > s.L1Bound.Number { + end.Status = ExhaustedEnd + return nil + } + + // If we run into the end of a block: + // -> mark it as cross-safe derived-from current L1 view + if end.logsSince == end.logsTotal { + // TODO: write cross-safe update. + // But subtle bug: if transitive block dependency is not cross-safe up to and including the seal, + // then it might never become cross-safe as a whole, and thus invalidate this cross-safe update. + return nil + } + + // if we run into an executing message: + if end.Executing != nil { + execChID := types.ChainIDFromUInt64(uint64(end.Executing.Chain)) + + // Check that the message exists + includedIn, err := s.deps.Check(execChID, end.Executing.BlockNum, end.Executing.LogIdx, end.Executing.Hash) + if err != nil { + if errors.Is(err, entrydb.ErrConflict) { + end.Status = InvalidEnd + return err + } + } + // Check if within L1 view: checking it is locally derived within L1 view should be enough, + // since within this Scope it would not be counted as cross-safe in L2 if it wasn't transitively within L1 view. + localDerivedFrom, err := s.deps.LocalDerivedFrom(execChID, includedIn) + if err != nil { + return err + } + if localDerivedFrom.Number > s.L1Bound.Number { + end.Status = ExhaustedEnd + return nil + } + + // Check that we are tracking the end of the requested chain + if _, ok := s.InvolvedChains[execChID]; !ok { + s.AddChain(execChID) + } + + // Check if the message is within L2 view + for _, other := range s.Ends { + if other.chainID != execChID { + continue + } + // By checking the logsSince, we can resolve intra-block messaging. + if end.Executing.BlockNum < other.current.Number || + (end.Executing.BlockNum == other.current.Number && end.Executing.LogIdx <= other.logsSince) { + // covered within tentative cross-safe range! + end.Executing = nil + return nil + } + } + + end.Status = BlockedEnd + return nil + } + + // Try to traverse on the open end + return end.TryNext() +} diff --git a/op-supervisor/supervisor/backend/safety/safety.go b/op-supervisor/supervisor/backend/safety/safety.go deleted file mode 100644 index 326c72755e35c..0000000000000 --- a/op-supervisor/supervisor/backend/safety/safety.go +++ /dev/null @@ -1,270 +0,0 @@ -package safety - -import ( - "fmt" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" -) - -type SafetyIndex interface { - // Updaters for the latest local safety status of each chain - UpdateLocalUnsafe(chainID types.ChainID, ref eth.BlockRef) error - UpdateLocalSafe(chainID types.ChainID, at eth.BlockRef, ref eth.BlockRef) error - UpdateFinalizeL1(ref eth.BlockRef) error - - // Getters for the latest safety status of each chain - UnsafeL2(chainID types.ChainID) (heads.HeadPointer, error) - CrossUnsafeL2(chainID types.ChainID) (heads.HeadPointer, error) - LocalSafeL2(chainID types.ChainID) (heads.HeadPointer, error) - CrossSafeL2(chainID types.ChainID) (heads.HeadPointer, error) - // We only finalize on full L2 block boundaries, hence not a heads.HeadPointer return. - FinalizedL2(chainId types.ChainID) (eth.BlockID, error) -} - -type ChainsDBClient interface { - IteratorStartingAt(chainID types.ChainID, sealedNum uint64, logIndex uint32) (logs.Iterator, error) - Check(chainID types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (h common.Hash, err error) -} - -type safetyIndex struct { - log log.Logger - - chains ChainsDBClient - - unsafe map[types.ChainID]*View - safe map[types.ChainID]*View - finalized map[types.ChainID]eth.BlockID - - // remember what each non-finalized L2 block is derived from - derivedFrom map[types.ChainID]map[common.Hash]eth.BlockRef - - // the last received L1 finality signal. - finalizedL1 eth.BlockRef -} - -func NewSafetyIndex(log log.Logger, chains ChainsDBClient) *safetyIndex { - return &safetyIndex{ - log: log, - chains: chains, - unsafe: make(map[types.ChainID]*View), - safe: make(map[types.ChainID]*View), - finalized: make(map[types.ChainID]eth.BlockID), - derivedFrom: make(map[types.ChainID]map[common.Hash]eth.BlockRef), - } -} - -// UpdateLocalUnsafe updates the local-unsafe view for the given chain, and advances the cross-unsafe status. -func (r *safetyIndex) UpdateLocalUnsafe(chainID types.ChainID, ref eth.BlockRef) error { - view, ok := r.safe[chainID] - if !ok { - iter, err := r.chains.IteratorStartingAt(chainID, ref.Number, 0) - if err != nil { - return fmt.Errorf("failed to open iterator for chain %s block %d", chainID, ref.Number) - } - view = &View{ - chainID: chainID, - iter: iter, - localView: heads.HeadPointer{ - LastSealedBlockHash: ref.Hash, - LastSealedBlockNum: ref.Number, - LastSealedTimestamp: ref.Time, - LogsSince: 0, - }, - localDerivedFrom: eth.BlockRef{}, - validWithinView: r.ValidWithinUnsafeView, - } - r.unsafe[chainID] = view - } else if err := view.UpdateLocal(eth.BlockRef{}, ref); err != nil { - return fmt.Errorf("failed to update local-unsafe: %w", err) - } - local, _ := r.unsafe[chainID].Local() - r.log.Debug("Updated local unsafe head", "chainID", chainID, "local", local) - r.advanceCrossUnsafe() - return nil -} - -// advanceCrossUnsafe calls Process on all cross-unsafe views. -func (r *safetyIndex) advanceCrossUnsafe() { - for chainID, view := range r.unsafe { - if err := view.Process(); err != nil { - r.log.Error("Failed to update cross-unsafe view", "chain", chainID, "err", err) - } - cross, _ := r.unsafe[chainID].Cross() - r.log.Debug("Updated cross unsafe head", "chainID", chainID, "cross", cross) - } -} - -// UpdateLocalSafe updates the local-safe view for the given chain, and advances the cross-safe status. -func (r *safetyIndex) UpdateLocalSafe( - chainID types.ChainID, at eth.BlockRef, ref eth.BlockRef) error { - view, ok := r.safe[chainID] - if !ok { - iter, err := r.chains.IteratorStartingAt(chainID, ref.Number, 0) - if err != nil { - return fmt.Errorf("failed to open iterator for chain %s block %d", chainID, ref.Number) - } - view = &View{ - chainID: chainID, - iter: iter, - localView: heads.HeadPointer{ - LastSealedBlockHash: ref.Hash, - LastSealedBlockNum: ref.Number, - LastSealedTimestamp: ref.Time, - LogsSince: 0, - }, - localDerivedFrom: at, - validWithinView: r.ValidWithinSafeView, - } - r.safe[chainID] = view - } else if err := view.UpdateLocal(at, ref); err != nil { - return fmt.Errorf("failed to update local-safe: %w", err) - } - - // register what this L2 block is derived from - m, ok := r.derivedFrom[chainID] - if !ok { - m = make(map[common.Hash]eth.BlockRef) - r.derivedFrom[chainID] = m - } - m[ref.Hash] = at - local, _ := r.safe[chainID].Local() - r.log.Debug("Updated local safe head", "chainID", chainID, "local", local) - r.advanceCrossSafe() - return nil -} - -// advanceCrossSafe calls Process on all cross-safe views, and advances the finalized safety status. -func (r *safetyIndex) advanceCrossSafe() { - for chainID, view := range r.safe { - if err := view.Process(); err != nil { - r.log.Error("Failed to update cross-safe view", "chain", chainID, "err", err) - } - cross, _ := r.safe[chainID].Cross() - r.log.Debug("Updated local safe head", "chainID", chainID, "cross", cross) - } - r.advanceFinalized() -} - -// UpdateFinalizeL1 updates the finalized L1 block, and advances the finalized safety status. -func (r *safetyIndex) UpdateFinalizeL1(ref eth.BlockRef) error { - if ref.Number <= r.finalizedL1.Number { - return fmt.Errorf("ignoring old L1 finality signal of %s, already have %s", ref, r.finalizedL1) - } - r.finalizedL1 = ref - r.log.Debug("Updated L1 finalized head", "L1finalized", ref) - r.advanceFinalized() - return nil -} - -// advanceFinalized should be called whenever the finalized L1 block, or the cross-safe history, changes. -// This then promotes the irreversible cross-safe L2 blocks to a finalized safety status. -func (r *safetyIndex) advanceFinalized() { - // Whatever was considered cross-safe at the finalized block-height can - // now be considered finalized, since the inputs have become irreversible. - for chainID, view := range r.safe { - crossSafe, err := view.Cross() - if err != nil { - r.log.Info("Failed to get cross-safe data, cannot finalize", "chain", chainID, "err", err) - continue - } - // TODO(#12184): we need to consider older cross-safe data, - // if we want to finalize something at all on longer lagging finality signal. - // Could consider just iterating over all derivedFrom contents? - l1Dep := r.derivedFrom[chainID][crossSafe.LastSealedBlockHash] - if l1Dep.Number < r.finalizedL1.Number { - r.finalized[chainID] = eth.BlockID{Hash: crossSafe.LastSealedBlockHash, Number: crossSafe.LastSealedBlockNum} - finalized := r.finalized[chainID] - r.log.Debug("Updated finalized head", "chainID", chainID, "finalized", finalized) - } - } -} - -// UnsafeL2 returns the latest unsafe L2 block of the given chain. -func (r *safetyIndex) UnsafeL2(chainID types.ChainID) (heads.HeadPointer, error) { - view, ok := r.unsafe[chainID] - if !ok { - return heads.HeadPointer{}, fmt.Errorf("no unsafe data for chain %s", chainID) - } - return view.Local() -} - -// CrossUnsafeL2 returns the latest cross-unsafe L2 block of the given chain. -func (r *safetyIndex) CrossUnsafeL2(chainID types.ChainID) (heads.HeadPointer, error) { - view, ok := r.unsafe[chainID] - if !ok { - return heads.HeadPointer{}, fmt.Errorf("no cross-unsafe data for chain %s", chainID) - } - return view.Cross() -} - -// LocalSafeL2 returns the latest local-safe L2 block of the given chain. -func (r *safetyIndex) LocalSafeL2(chainID types.ChainID) (heads.HeadPointer, error) { - view, ok := r.safe[chainID] - if !ok { - return heads.HeadPointer{}, fmt.Errorf("no local-safe data for chain %s", chainID) - } - return view.Local() -} - -// CrossSafeL2 returns the latest cross-safe L2 block of the given chain. -func (r *safetyIndex) CrossSafeL2(chainID types.ChainID) (heads.HeadPointer, error) { - view, ok := r.safe[chainID] - if !ok { - return heads.HeadPointer{}, fmt.Errorf("no cross-safe data for chain %s", chainID) - } - return view.Cross() -} - -// FinalizedL2 returns the latest finalized L2 block of the given chain. -func (r *safetyIndex) FinalizedL2(chainId types.ChainID) (eth.BlockID, error) { - finalized, ok := r.finalized[chainId] - if !ok { - return eth.BlockID{}, fmt.Errorf("not seen finalized data of chain %s at finalized L1 block %s", chainId, r.finalizedL1) - } - return finalized, nil -} - -// ValidWithinUnsafeView checks if the given executing message is in the database. -// unsafe view is meant to represent all of the database, and so no boundary checks are needed. -func (r *safetyIndex) ValidWithinUnsafeView(_ uint64, execMsg *types.ExecutingMessage) error { - execChainID := types.ChainIDFromUInt64(uint64(execMsg.Chain)) - _, err := r.chains.Check(execChainID, execMsg.BlockNum, execMsg.LogIdx, execMsg.Hash) - return err -} - -// ValidWithinSafeView checks if the given executing message is within the database, -// and within the L1 view of the caller. -func (r *safetyIndex) ValidWithinSafeView(l1View uint64, execMsg *types.ExecutingMessage) error { - execChainID := types.ChainIDFromUInt64(uint64(execMsg.Chain)) - - // Check that the initiating message, which was pulled in by the executing message, - // does indeed exist. And in which L2 block it exists (if any). - l2BlockHash, err := r.chains.Check(execChainID, execMsg.BlockNum, execMsg.LogIdx, execMsg.Hash) - if err != nil { - return err - } - // if the executing message falls within the execFinalized range, then nothing to check - execFinalized, ok := r.finalized[execChainID] - if ok && execFinalized.Number > execMsg.BlockNum { - return nil - } - // check if the L1 block of the executing message is known - execL1Block, ok := r.derivedFrom[execChainID][l2BlockHash] - if !ok { - return logs.ErrFuture // TODO(#12185) need to distinguish between same-data future, and new-data future - } - // check if the L1 block is within the view - if execL1Block.Number > l1View { - return fmt.Errorf("exec message depends on L2 block %s:%d, derived from L1 block %s, not within view yet: %w", - l2BlockHash, execMsg.BlockNum, execL1Block, logs.ErrFuture) - } - return nil -} - -var _ SafetyIndex = (*safetyIndex)(nil) diff --git a/op-supervisor/supervisor/backend/safety/views.go b/op-supervisor/supervisor/backend/safety/views.go deleted file mode 100644 index e1c704fa260f3..0000000000000 --- a/op-supervisor/supervisor/backend/safety/views.go +++ /dev/null @@ -1,91 +0,0 @@ -package safety - -import ( - "errors" - - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" -) - -type View struct { - chainID types.ChainID - - iter logs.Iterator - - localView heads.HeadPointer - localDerivedFrom eth.BlockRef - - validWithinView func(l1View uint64, execMsg *types.ExecutingMessage) error -} - -func (vi *View) Cross() (heads.HeadPointer, error) { - return vi.iter.HeadPointer() -} - -func (vi *View) Local() (heads.HeadPointer, error) { - if vi.localView == (heads.HeadPointer{}) { - return heads.HeadPointer{}, logs.ErrFuture - } - return vi.localView, nil -} - -func (vi *View) UpdateLocal(at eth.BlockRef, ref eth.BlockRef) error { - vi.localView = heads.HeadPointer{ - LastSealedBlockHash: ref.Hash, - LastSealedBlockNum: ref.Number, - //LastSealedTimestamp: ref.Time, - LogsSince: 0, - } - vi.localDerivedFrom = at - - // TODO(#11693): reorg check against existing DB - // TODO(#12186): localView may be larger than what DB contents we have - return nil -} - -func (vi *View) Process() error { - err := vi.iter.TraverseConditional(func(state logs.IteratorState) error { - hash, num, ok := state.SealedBlock() - if !ok { - return logs.ErrFuture // maybe a more specific error for no-genesis case? - } - // TODO(#11693): reorg check in the future. To make sure that what we traverse is still canonical. - _ = hash - // check if L2 block is within view - if !vi.localView.WithinRange(num, 0) { - return logs.ErrFuture - } - _, initLogIndex, ok := state.InitMessage() - if !ok { - return nil // no readable message, just an empty block - } - // check if the message is within view - if !vi.localView.WithinRange(num, initLogIndex) { - return logs.ErrFuture - } - // check if it is an executing message. If so, check the dependency - if execMsg := state.ExecMessage(); execMsg != nil { - // Check if executing message is within cross L2 view, - // relative to the L1 view of current message. - // And check if the message is valid to execute at all - // (i.e. if it exists on the initiating side). - // TODO(#12187): it's inaccurate to check with the view of the local-unsafe - // it should be limited to the L1 view at the time of the inclusion of execution of the message. - err := vi.validWithinView(vi.localDerivedFrom.Number, execMsg) - if err != nil { - return err - } - } - return nil - }) - if err == nil { - panic("expected reader to complete with an exit-error") - } - if errors.Is(err, logs.ErrFuture) { - // register the new cross-safe block as cross-safe up to the current L1 view - return nil - } - return err -} diff --git a/op-supervisor/supervisor/backend/source/chain.go b/op-supervisor/supervisor/backend/source/chain.go deleted file mode 100644 index 383a5fb74de87..0000000000000 --- a/op-supervisor/supervisor/backend/source/chain.go +++ /dev/null @@ -1,84 +0,0 @@ -package source - -import ( - "context" - "fmt" - "time" - - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/sources" - "github.com/ethereum-optimism/optimism/op-service/sources/caching" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" -) - -// TODO(optimism#11032) Make these configurable and a sensible default -const epochPollInterval = 3 * time.Second -const pollInterval = 2 * time.Second -const trustRpc = false -const rpcKind = sources.RPCKindStandard - -type Metrics interface { - caching.Metrics -} - -type Storage interface { - ChainsDBClientForLogProcessor - DatabaseRewinder - LatestBlockNum(chainID types.ChainID) (num uint64, ok bool) -} - -// ChainMonitor monitors a source L2 chain, retrieving the data required to populate the database and perform -// interop consolidation. It detects and notifies when reorgs occur. -type ChainMonitor struct { - log log.Logger - headMonitor *HeadMonitor - chainProcessor *ChainProcessor -} - -func NewChainMonitor(ctx context.Context, logger log.Logger, m Metrics, chainID types.ChainID, rpc string, client client.RPC, store Storage) (*ChainMonitor, error) { - logger = logger.New("chainID", chainID) - cl, err := newClient(ctx, logger, m, rpc, client, pollInterval, trustRpc, rpcKind) - if err != nil { - return nil, err - } - - // Create the log processor and fetcher - processLogs := newLogProcessor(chainID, store) - unsafeBlockProcessor := NewChainProcessor(logger, cl, chainID, processLogs, store) - - unsafeProcessors := []HeadProcessor{unsafeBlockProcessor} - - callback := newHeadUpdateProcessor(logger, unsafeProcessors, nil, nil) - headMonitor := NewHeadMonitor(logger, epochPollInterval, cl, callback) - - return &ChainMonitor{ - log: logger, - headMonitor: headMonitor, - chainProcessor: unsafeBlockProcessor, - }, nil -} - -func (c *ChainMonitor) Start() error { - c.log.Info("Started monitoring chain") - return c.headMonitor.Start() -} - -func (c *ChainMonitor) Stop() error { - c.chainProcessor.Close() - return c.headMonitor.Stop() -} - -func newClient(ctx context.Context, logger log.Logger, m caching.Metrics, rpc string, rpcClient client.RPC, pollRate time.Duration, trustRPC bool, kind sources.RPCProviderKind) (*sources.L1Client, error) { - c, err := client.NewRPCWithClient(ctx, logger, rpc, rpcClient, pollRate) - if err != nil { - return nil, fmt.Errorf("failed to create new RPC client: %w", err) - } - - l1Client, err := sources.NewL1Client(c, logger, m, sources.L1ClientSimpleConfig(trustRPC, kind, 100)) - if err != nil { - return nil, fmt.Errorf("failed to connect client: %w", err) - } - return l1Client, nil -} diff --git a/op-supervisor/supervisor/backend/source/chain_processor_test.go b/op-supervisor/supervisor/backend/source/chain_processor_test.go deleted file mode 100644 index af48d5ecdd30b..0000000000000 --- a/op-supervisor/supervisor/backend/source/chain_processor_test.go +++ /dev/null @@ -1,189 +0,0 @@ -package source - -/* TODO -import ( - "context" - "errors" - "fmt" - "testing" - - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/testlog" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - "github.com/stretchr/testify/require" -) - -var processorChainID = types.ChainIDFromUInt64(4) - -func TestUnsafeBlocksStage(t *testing.T) { - t.Run("IgnoreEventsAtOrPriorToStartingHead", func(t *testing.T) { - ctx := context.Background() - logger := testlog.Logger(t, log.LvlInfo) - client := &stubBlockByNumberSource{} - processor := &stubBlockProcessor{} - stage := NewChainProcessor(logger, client, processorChainID, processor, &stubRewinder{}) - stage.OnNewHead(ctx, eth.L1BlockRef{Number: 100}) - stage.OnNewHead(ctx, eth.L1BlockRef{Number: 99}) - - require.Empty(t, processor.processed) - require.Zero(t, client.calls) - }) - - t.Run("OutputNewHeadsWithNoMissedBlocks", func(t *testing.T) { - ctx := context.Background() - logger := testlog.Logger(t, log.LvlInfo) - client := &stubBlockByNumberSource{} - block0 := eth.L1BlockRef{Number: 100} - block1 := eth.L1BlockRef{Number: 101} - block2 := eth.L1BlockRef{Number: 102} - block3 := eth.L1BlockRef{Number: 103} - processor := &stubBlockProcessor{} - stage := NewChainProcessor(logger, client, processorChainID, block0, processor, &stubRewinder{}) - stage.OnNewHead(ctx, block1) - require.Equal(t, []eth.L1BlockRef{block1}, processor.processed) - stage.OnNewHead(ctx, block2) - require.Equal(t, []eth.L1BlockRef{block1, block2}, processor.processed) - stage.OnNewHead(ctx, block3) - require.Equal(t, []eth.L1BlockRef{block1, block2, block3}, processor.processed) - - require.Zero(t, client.calls, "should not need to request block info") - }) - - t.Run("IgnoreEventsAtOrPriorToPreviousHead", func(t *testing.T) { - ctx := context.Background() - logger := testlog.Logger(t, log.LvlInfo) - client := &stubBlockByNumberSource{} - block0 := eth.L1BlockRef{Number: 100} - block1 := eth.L1BlockRef{Number: 101} - processor := &stubBlockProcessor{} - stage := NewChainProcessor(logger, client, processorChainID, block0, processor, &stubRewinder{}) - stage.OnNewHead(ctx, block1) - require.NotEmpty(t, processor.processed) - require.Equal(t, []eth.L1BlockRef{block1}, processor.processed) - - stage.OnNewHead(ctx, block0) - stage.OnNewHead(ctx, block1) - require.Equal(t, []eth.L1BlockRef{block1}, processor.processed) - - require.Zero(t, client.calls, "should not need to request block info") - }) - - t.Run("OutputSkippedBlocks", func(t *testing.T) { - ctx := context.Background() - logger := testlog.Logger(t, log.LvlInfo) - client := &stubBlockByNumberSource{} - block0 := eth.L1BlockRef{Number: 100} - block3 := eth.L1BlockRef{Number: 103} - processor := &stubBlockProcessor{} - stage := NewChainProcessor(logger, client, processorChainID, block0, processor, &stubRewinder{}) - - stage.OnNewHead(ctx, block3) - require.Equal(t, []eth.L1BlockRef{makeBlockRef(101), makeBlockRef(102), block3}, processor.processed) - - require.Equal(t, 2, client.calls, "should only request the two missing blocks") - }) - - t.Run("DoNotUpdateLastBlockOnFetchError", func(t *testing.T) { - ctx := context.Background() - logger := testlog.Logger(t, log.LvlInfo) - client := &stubBlockByNumberSource{err: errors.New("boom")} - block0 := eth.L1BlockRef{Number: 100} - block3 := eth.L1BlockRef{Number: 103} - processor := &stubBlockProcessor{} - rewinder := &stubRewinder{} - stage := NewChainProcessor(logger, client, processorChainID, block0, processor, rewinder) - - stage.OnNewHead(ctx, block3) - require.Empty(t, processor.processed, "should not update any blocks because backfill failed") - - client.err = nil - stage.OnNewHead(ctx, block3) - require.Equal(t, []eth.L1BlockRef{makeBlockRef(101), makeBlockRef(102), block3}, processor.processed) - require.False(t, rewinder.rewindCalled, "should not rewind because no logs could have been written") - }) - - t.Run("DoNotUpdateLastBlockOnProcessorError", func(t *testing.T) { - ctx := context.Background() - logger := testlog.Logger(t, log.LvlInfo) - client := &stubBlockByNumberSource{} - block0 := eth.L1BlockRef{Number: 100} - block3 := eth.L1BlockRef{Number: 103} - processor := &stubBlockProcessor{err: errors.New("boom")} - rewinder := &stubRewinder{} - stage := NewChainProcessor(logger, client, processorChainID, block0, processor, rewinder) - - stage.OnNewHead(ctx, block3) - require.Equal(t, []eth.L1BlockRef{makeBlockRef(101)}, processor.processed, "Attempted to process block 101") - require.Equal(t, block0.Number, rewinder.rewoundTo, "should rewind to block before error") - - processor.err = nil - stage.OnNewHead(ctx, block3) - // Attempts to process block 101 again, then carries on - require.Equal(t, []eth.L1BlockRef{makeBlockRef(101), makeBlockRef(101), makeBlockRef(102), block3}, processor.processed) - }) - - t.Run("RewindWhenNewHeadProcessingFails", func(t *testing.T) { - ctx := context.Background() - logger := testlog.Logger(t, log.LvlInfo) - client := &stubBlockByNumberSource{} - block0 := eth.L1BlockRef{Number: 100} - block1 := eth.L1BlockRef{Number: 101} - processor := &stubBlockProcessor{err: errors.New("boom")} - rewinder := &stubRewinder{} - stage := NewChainProcessor(logger, client, processorChainID, block0, processor, rewinder) - - // No skipped blocks - stage.OnNewHead(ctx, block1) - require.Equal(t, []eth.L1BlockRef{block1}, processor.processed, "Attempted to process block 101") - require.Equal(t, block0.Number, rewinder.rewoundTo, "should rewind to block before error") - }) -} - -type stubBlockByNumberSource struct { - calls int - err error -} - -func (s *stubBlockByNumberSource) L1BlockRefByNumber(_ context.Context, number uint64) (eth.L1BlockRef, error) { - s.calls++ - if s.err != nil { - return eth.L1BlockRef{}, s.err - } - return makeBlockRef(number), nil -} - -type stubBlockProcessor struct { - processed []eth.L1BlockRef - err error -} - -func (s *stubBlockProcessor) ProcessBlock(_ context.Context, block eth.L1BlockRef) error { - s.processed = append(s.processed, block) - return s.err -} - -func makeBlockRef(number uint64) eth.L1BlockRef { - return eth.L1BlockRef{ - Number: number, - Hash: common.Hash{byte(number)}, - ParentHash: common.Hash{byte(number - 1)}, - Time: number * 1000, - } -} - -type stubRewinder struct { - rewoundTo uint64 - rewindCalled bool -} - -func (s *stubRewinder) Rewind(chainID types.ChainID, headBlockNum uint64) error { - if chainID != processorChainID { - return fmt.Errorf("chainID mismatch, expected %v but was %v", processorChainID, chainID) - } - s.rewoundTo = headBlockNum - s.rewindCalled = true - return nil -} -*/ diff --git a/op-supervisor/supervisor/backend/source/head_monitor.go b/op-supervisor/supervisor/backend/source/head_monitor.go deleted file mode 100644 index f5c8896693fd5..0000000000000 --- a/op-supervisor/supervisor/backend/source/head_monitor.go +++ /dev/null @@ -1,97 +0,0 @@ -package source - -import ( - "context" - "errors" - "sync/atomic" - "time" - - "github.com/ethereum-optimism/optimism/op-service/eth" - ethereum "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" -) - -type HeadMonitorClient interface { - eth.NewHeadSource - eth.L1BlockRefsSource -} - -type HeadChangeCallback interface { - OnNewUnsafeHead(ctx context.Context, block eth.L1BlockRef) - OnNewSafeHead(ctx context.Context, block eth.L1BlockRef) - OnNewFinalizedHead(ctx context.Context, block eth.L1BlockRef) -} - -// HeadMonitor monitors an L2 chain and sends notifications when the unsafe, safe or finalized head changes. -// Head updates may be coalesced, allowing the head block to skip forward multiple blocks. -// Reorgs are not identified. -type HeadMonitor struct { - log log.Logger - epochPollInterval time.Duration - rpc HeadMonitorClient - callback HeadChangeCallback - - started atomic.Bool - headsSub event.Subscription - safeSub ethereum.Subscription - finalizedSub ethereum.Subscription -} - -func NewHeadMonitor(logger log.Logger, epochPollInterval time.Duration, rpc HeadMonitorClient, callback HeadChangeCallback) *HeadMonitor { - return &HeadMonitor{ - log: logger, - epochPollInterval: epochPollInterval, - rpc: rpc, - callback: callback, - } -} - -func (h *HeadMonitor) Start() error { - if !h.started.CompareAndSwap(false, true) { - return errors.New("already started") - } - - // Keep subscribed to the unsafe head, which changes frequently. - h.headsSub = event.ResubscribeErr(time.Second*10, func(ctx context.Context, err error) (event.Subscription, error) { - if err != nil { - h.log.Warn("Resubscribing after failed heads subscription", "err", err) - } - return eth.WatchHeadChanges(ctx, h.rpc, h.callback.OnNewUnsafeHead) - }) - go func() { - err, ok := <-h.headsSub.Err() - if !ok { - return - } - h.log.Error("Heads subscription error", "err", err) - }() - - // Poll for the safe block and finalized block, which only change once per epoch at most and may be delayed. - h.safeSub = eth.PollBlockChanges(h.log, h.rpc, h.callback.OnNewSafeHead, eth.Safe, - h.epochPollInterval, time.Second*10) - h.finalizedSub = eth.PollBlockChanges(h.log, h.rpc, h.callback.OnNewFinalizedHead, eth.Finalized, - h.epochPollInterval, time.Second*10) - h.log.Info("Chain head monitoring started") - return nil -} - -func (h *HeadMonitor) Stop() error { - if !h.started.CompareAndSwap(true, false) { - return errors.New("already stopped") - } - - // stop heads feed - if h.headsSub != nil { - h.headsSub.Unsubscribe() - } - // stop polling for safe-head changes - if h.safeSub != nil { - h.safeSub.Unsubscribe() - } - // stop polling for finalized-head changes - if h.finalizedSub != nil { - h.finalizedSub.Unsubscribe() - } - return nil -} diff --git a/op-supervisor/supervisor/backend/source/head_monitor_test.go b/op-supervisor/supervisor/backend/source/head_monitor_test.go deleted file mode 100644 index d13dff48d851c..0000000000000 --- a/op-supervisor/supervisor/backend/source/head_monitor_test.go +++ /dev/null @@ -1,243 +0,0 @@ -package source - -import ( - "context" - "errors" - "fmt" - "math/rand" - "sync" - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/testlog" - "github.com/ethereum-optimism/optimism/op-service/testutils" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/log" - "github.com/stretchr/testify/require" -) - -const waitDuration = 10 * time.Second -const checkInterval = 10 * time.Millisecond - -func TestUnsafeHeadUpdates(t *testing.T) { - rng := rand.New(rand.NewSource(0x1337)) - header1 := testutils.RandomHeader(rng) - header2 := testutils.RandomHeader(rng) - - t.Run("NotifyOfNewHeads", func(t *testing.T) { - rpc, callback := startHeadMonitor(t) - - rpc.NewUnsafeHead(t, header1) - callback.RequireUnsafeHeaders(t, header1) - - rpc.NewUnsafeHead(t, header2) - callback.RequireUnsafeHeaders(t, header1, header2) - }) - - t.Run("ResubscribeOnError", func(t *testing.T) { - rpc, callback := startHeadMonitor(t) - - rpc.SubscriptionError(t) - - rpc.NewUnsafeHead(t, header1) - callback.RequireUnsafeHeaders(t, header1) - }) -} - -func TestSafeHeadUpdates(t *testing.T) { - rpc, callback := startHeadMonitor(t) - - head1 := eth.L1BlockRef{ - Hash: common.Hash{0xaa}, - Number: 1, - } - head2 := eth.L1BlockRef{ - Hash: common.Hash{0xbb}, - Number: 2, - } - - rpc.SetSafeHead(head1) - callback.RequireSafeHeaders(t, head1) - rpc.SetSafeHead(head2) - callback.RequireSafeHeaders(t, head1, head2) -} - -func TestFinalizedHeadUpdates(t *testing.T) { - rpc, callback := startHeadMonitor(t) - - head1 := eth.L1BlockRef{ - Hash: common.Hash{0xaa}, - Number: 1, - } - head2 := eth.L1BlockRef{ - Hash: common.Hash{0xbb}, - Number: 2, - } - - rpc.SetFinalizedHead(head1) - callback.RequireFinalizedHeaders(t, head1) - rpc.SetFinalizedHead(head2) - callback.RequireFinalizedHeaders(t, head1, head2) -} - -func startHeadMonitor(t *testing.T) (*stubRPC, *stubCallback) { - logger := testlog.Logger(t, log.LvlInfo) - rpc := &stubRPC{} - callback := &stubCallback{} - monitor := NewHeadMonitor(logger, 50*time.Millisecond, rpc, callback) - require.NoError(t, monitor.Start()) - t.Cleanup(func() { - require.NoError(t, monitor.Stop()) - }) - return rpc, callback -} - -type stubCallback struct { - sync.Mutex - unsafe []eth.L1BlockRef - safe []eth.L1BlockRef - finalized []eth.L1BlockRef -} - -func (s *stubCallback) RequireUnsafeHeaders(t *testing.T, heads ...*types.Header) { - expected := make([]eth.L1BlockRef, len(heads)) - for i, head := range heads { - expected[i] = eth.InfoToL1BlockRef(eth.HeaderBlockInfo(head)) - } - s.requireHeaders(t, func(s *stubCallback) []eth.L1BlockRef { return s.unsafe }, expected) -} - -func (s *stubCallback) RequireSafeHeaders(t *testing.T, expected ...eth.L1BlockRef) { - s.requireHeaders(t, func(s *stubCallback) []eth.L1BlockRef { return s.safe }, expected) -} - -func (s *stubCallback) RequireFinalizedHeaders(t *testing.T, expected ...eth.L1BlockRef) { - s.requireHeaders(t, func(s *stubCallback) []eth.L1BlockRef { return s.finalized }, expected) -} - -func (s *stubCallback) requireHeaders(t *testing.T, getter func(*stubCallback) []eth.L1BlockRef, expected []eth.L1BlockRef) { - require.Eventually(t, func() bool { - s.Lock() - defer s.Unlock() - return len(getter(s)) >= len(expected) - }, waitDuration, checkInterval) - s.Lock() - defer s.Unlock() - require.Equal(t, expected, getter(s)) -} - -func (s *stubCallback) OnNewUnsafeHead(ctx context.Context, block eth.L1BlockRef) { - s.Lock() - defer s.Unlock() - s.unsafe = append(s.unsafe, block) -} - -func (s *stubCallback) OnNewSafeHead(ctx context.Context, block eth.L1BlockRef) { - s.Lock() - defer s.Unlock() - s.safe = append(s.safe, block) -} - -func (s *stubCallback) OnNewFinalizedHead(ctx context.Context, block eth.L1BlockRef) { - s.Lock() - defer s.Unlock() - s.finalized = append(s.finalized, block) -} - -var _ HeadChangeCallback = (*stubCallback)(nil) - -type stubRPC struct { - sync.Mutex - sub *mockSubscription - - safeHead eth.L1BlockRef - finalizedHead eth.L1BlockRef -} - -func (s *stubRPC) SubscribeNewHead(_ context.Context, unsafeCh chan<- *types.Header) (ethereum.Subscription, error) { - s.Lock() - defer s.Unlock() - if s.sub != nil { - return nil, errors.New("already subscribed to unsafe heads") - } - errChan := make(chan error) - s.sub = &mockSubscription{errChan, unsafeCh, s} - return s.sub, nil -} - -func (s *stubRPC) SetSafeHead(head eth.L1BlockRef) { - s.Lock() - defer s.Unlock() - s.safeHead = head -} - -func (s *stubRPC) SetFinalizedHead(head eth.L1BlockRef) { - s.Lock() - defer s.Unlock() - s.finalizedHead = head -} - -func (s *stubRPC) L1BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L1BlockRef, error) { - s.Lock() - defer s.Unlock() - switch label { - case eth.Safe: - if s.safeHead == (eth.L1BlockRef{}) { - return eth.L1BlockRef{}, errors.New("no unsafe head") - } - return s.safeHead, nil - case eth.Finalized: - if s.finalizedHead == (eth.L1BlockRef{}) { - return eth.L1BlockRef{}, errors.New("no finalized head") - } - return s.finalizedHead, nil - default: - return eth.L1BlockRef{}, fmt.Errorf("unknown label: %v", label) - } -} - -func (s *stubRPC) NewUnsafeHead(t *testing.T, header *types.Header) { - s.WaitForSub(t) - s.Lock() - defer s.Unlock() - require.NotNil(t, s.sub, "Attempting to publish a header with no subscription") - s.sub.headers <- header -} - -func (s *stubRPC) SubscriptionError(t *testing.T) { - s.WaitForSub(t) - s.Lock() - defer s.Unlock() - s.sub.errChan <- errors.New("subscription error") - s.sub = nil -} - -func (s *stubRPC) WaitForSub(t *testing.T) { - require.Eventually(t, func() bool { - s.Lock() - defer s.Unlock() - return s.sub != nil - }, waitDuration, checkInterval, "Head monitor did not subscribe to unsafe head") -} - -var _ HeadMonitorClient = (*stubRPC)(nil) - -type mockSubscription struct { - errChan chan error - headers chan<- *types.Header - rpc *stubRPC -} - -func (m *mockSubscription) Unsubscribe() { - fmt.Println("Unsubscribed") - m.rpc.Lock() - defer m.rpc.Unlock() - m.rpc.sub = nil -} - -func (m *mockSubscription) Err() <-chan error { - return m.errChan -} diff --git a/op-supervisor/supervisor/backend/source/head_processor.go b/op-supervisor/supervisor/backend/source/head_processor.go deleted file mode 100644 index 6a0f867ac61aa..0000000000000 --- a/op-supervisor/supervisor/backend/source/head_processor.go +++ /dev/null @@ -1,76 +0,0 @@ -package source - -import ( - "context" - - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" -) - -type HeadProcessor interface { - OnNewHead(ctx context.Context, head eth.L1BlockRef) error -} - -type HeadProcessorFn func(ctx context.Context, head eth.L1BlockRef) error - -func (f HeadProcessorFn) OnNewHead(ctx context.Context, head eth.L1BlockRef) error { - return f(ctx, head) -} - -// headUpdateProcessor handles head update events and routes them to the appropriate handlers -type headUpdateProcessor struct { - log log.Logger - unsafeProcessors []HeadProcessor - safeProcessors []HeadProcessor - finalizedProcessors []HeadProcessor -} - -func newHeadUpdateProcessor(log log.Logger, unsafeProcessors []HeadProcessor, safeProcessors []HeadProcessor, finalizedProcessors []HeadProcessor) *headUpdateProcessor { - return &headUpdateProcessor{ - log: log, - unsafeProcessors: unsafeProcessors, - safeProcessors: safeProcessors, - finalizedProcessors: finalizedProcessors, - } -} - -func (n *headUpdateProcessor) OnNewUnsafeHead(ctx context.Context, block eth.L1BlockRef) { - n.log.Debug("New unsafe head", "block", block) - for _, processor := range n.unsafeProcessors { - if err := processor.OnNewHead(ctx, block); err != nil { - n.log.Error("unsafe-head processing failed", "err", err) - } - } -} - -func (n *headUpdateProcessor) OnNewSafeHead(ctx context.Context, block eth.L1BlockRef) { - n.log.Debug("New safe head", "block", block) - for _, processor := range n.safeProcessors { - if err := processor.OnNewHead(ctx, block); err != nil { - n.log.Error("safe-head processing failed", "err", err) - } - } -} - -func (n *headUpdateProcessor) OnNewFinalizedHead(ctx context.Context, block eth.L1BlockRef) { - n.log.Debug("New finalized head", "block", block) - for _, processor := range n.finalizedProcessors { - if err := processor.OnNewHead(ctx, block); err != nil { - n.log.Error("finalized-head processing failed", "err", err) - } - } -} - -// OnNewHead is a util function to turn a head-signal processor into head-pointer updater -func OnNewHead(id types.ChainID, apply func(id types.ChainID, v heads.HeadPointer) error) HeadProcessorFn { - return func(ctx context.Context, head eth.L1BlockRef) error { - return apply(id, heads.HeadPointer{ - LastSealedBlockHash: head.Hash, - LastSealedBlockNum: head.Number, - LogsSince: 0, - }) - } -} diff --git a/op-supervisor/supervisor/backend/source/head_processor_test.go b/op-supervisor/supervisor/backend/source/head_processor_test.go deleted file mode 100644 index f684667fa62b8..0000000000000 --- a/op-supervisor/supervisor/backend/source/head_processor_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package source - -import ( - "context" - "testing" - - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/testlog" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - "github.com/stretchr/testify/require" -) - -func TestHeadUpdateProcessor(t *testing.T) { - t.Run("NotifyUnsafeHeadProcessors", func(t *testing.T) { - logger := testlog.Logger(t, log.LvlInfo) - processed := make([]eth.L1BlockRef, 3) - makeProcessor := func(idx int) HeadProcessor { - return HeadProcessorFn(func(_ context.Context, head eth.L1BlockRef) error { - processed[idx] = head - return nil - }) - } - headUpdates := newHeadUpdateProcessor(logger, []HeadProcessor{makeProcessor(0), makeProcessor(1), makeProcessor(2)}, nil, nil) - block := eth.L1BlockRef{Number: 110, Hash: common.Hash{0xaa}} - headUpdates.OnNewUnsafeHead(context.Background(), block) - require.Equal(t, []eth.L1BlockRef{block, block, block}, processed) - }) - - t.Run("NotifySafeHeadProcessors", func(t *testing.T) { - logger := testlog.Logger(t, log.LvlInfo) - processed := make([]eth.L1BlockRef, 3) - makeProcessor := func(idx int) HeadProcessor { - return HeadProcessorFn(func(_ context.Context, head eth.L1BlockRef) error { - processed[idx] = head - return nil - }) - } - headUpdates := newHeadUpdateProcessor(logger, nil, []HeadProcessor{makeProcessor(0), makeProcessor(1), makeProcessor(2)}, nil) - block := eth.L1BlockRef{Number: 110, Hash: common.Hash{0xaa}} - headUpdates.OnNewSafeHead(context.Background(), block) - require.Equal(t, []eth.L1BlockRef{block, block, block}, processed) - }) - - t.Run("NotifyFinalizedHeadProcessors", func(t *testing.T) { - logger := testlog.Logger(t, log.LvlInfo) - processed := make([]eth.L1BlockRef, 3) - makeProcessor := func(idx int) HeadProcessor { - return HeadProcessorFn(func(_ context.Context, head eth.L1BlockRef) error { - processed[idx] = head - return nil - }) - } - headUpdates := newHeadUpdateProcessor(logger, nil, nil, []HeadProcessor{makeProcessor(0), makeProcessor(1), makeProcessor(2)}) - block := eth.L1BlockRef{Number: 110, Hash: common.Hash{0xaa}} - headUpdates.OnNewFinalizedHead(context.Background(), block) - require.Equal(t, []eth.L1BlockRef{block, block, block}, processed) - }) -} diff --git a/op-supervisor/supervisor/frontend/frontend.go b/op-supervisor/supervisor/frontend/frontend.go index b77b6b3edeebe..6b87b219e075a 100644 --- a/op-supervisor/supervisor/frontend/frontend.go +++ b/op-supervisor/supervisor/frontend/frontend.go @@ -3,11 +3,9 @@ package frontend import ( "context" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" + "github.com/ethereum/go-ethereum/common" ) type AdminBackend interface { @@ -19,19 +17,22 @@ type AdminBackend interface { type QueryBackend interface { CheckMessage(identifier types.Identifier, payloadHash common.Hash) (types.SafetyLevel, error) CheckMessages(messages []types.Message, minSafety types.SafetyLevel) error - CheckBlock(chainID *hexutil.U256, blockHash common.Hash, blockNumber hexutil.Uint64) (types.SafetyLevel, error) - DerivedFrom(ctx context.Context, chainID types.ChainID, blockHash common.Hash, blockNumber uint64) (eth.BlockRef, error) + DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockID, err error) + UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error) + SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error) + Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) } type UpdatesBackend interface { - UpdateLocalUnsafe(chainID types.ChainID, head eth.BlockRef) - UpdateLocalSafe(chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) - UpdateFinalizedL1(chainID types.ChainID, finalized eth.BlockRef) + UpdateLocalUnsafe(chainID types.ChainID, head eth.BlockRef) error + UpdateLocalSafe(chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error + UpdateFinalizedL1(chainID types.ChainID, finalized eth.BlockRef) error } type Backend interface { AdminBackend QueryBackend + UpdatesBackend } type QueryFrontend struct { @@ -53,23 +54,19 @@ func (q *QueryFrontend) CheckMessages( } func (q *QueryFrontend) UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error) { - // TODO(#12358): attach to backend - return types.ReferenceView{}, nil + return q.Supervisor.UnsafeView(ctx, chainID, unsafe) } func (q *QueryFrontend) SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error) { - // TODO(#12358): attach to backend - return types.ReferenceView{}, nil + return q.Supervisor.SafeView(ctx, chainID, safe) } func (q *QueryFrontend) Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) { - // TODO(#12358): attach to backend - return eth.BlockID{}, nil + return q.Supervisor.Finalized(ctx, chainID) } -func (q *QueryFrontend) DerivedFrom(ctx context.Context, chainID types.ChainID, blockHash common.Hash, blockNumber uint64) (eth.BlockRef, error) { - // TODO(#12358): attach to backend - return eth.BlockRef{}, nil +func (q *QueryFrontend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockID, err error) { + return q.Supervisor.DerivedFrom(ctx, chainID, derived) } type AdminFrontend struct { @@ -95,14 +92,14 @@ type UpdatesFrontend struct { Supervisor UpdatesBackend } -func (u *UpdatesFrontend) UpdateLocalUnsafe(chainID types.ChainID, head eth.BlockRef) { - u.Supervisor.UpdateLocalUnsafe(chainID, head) +func (u *UpdatesFrontend) UpdateLocalUnsafe(chainID types.ChainID, head eth.BlockRef) error { + return u.Supervisor.UpdateLocalUnsafe(chainID, head) } -func (u *UpdatesFrontend) UpdateLocalSafe(chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) { - u.Supervisor.UpdateLocalSafe(chainID, derivedFrom, lastDerived) +func (u *UpdatesFrontend) UpdateLocalSafe(chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error { + return u.Supervisor.UpdateLocalSafe(chainID, derivedFrom, lastDerived) } -func (u *UpdatesFrontend) UpdateFinalizedL1(chainID types.ChainID, finalized eth.BlockRef) { - u.Supervisor.UpdateFinalizedL1(chainID, finalized) +func (u *UpdatesFrontend) UpdateFinalizedL1(chainID types.ChainID, finalized eth.BlockRef) error { + return u.Supervisor.UpdateFinalizedL1(chainID, finalized) } diff --git a/op-supervisor/supervisor/service.go b/op-supervisor/supervisor/service.go index 47fcb3e9ec23e..128fc557adce9 100644 --- a/op-supervisor/supervisor/service.go +++ b/op-supervisor/supervisor/service.go @@ -149,6 +149,11 @@ func (su *SupervisorService) initRPCServer(cfg *config.Config) error { Service: &frontend.QueryFrontend{Supervisor: su.backend}, Authenticated: false, }) + server.AddAPI(rpc.API{ + Namespace: "supervisor", + Service: &frontend.UpdatesFrontend{Supervisor: su.backend}, + Authenticated: false, + }) su.rpcServer = server return nil } diff --git a/op-supervisor/supervisor/types/types.go b/op-supervisor/supervisor/types/types.go index 0224b9c29e93c..d7df3ac7ae36a 100644 --- a/op-supervisor/supervisor/types/types.go +++ b/op-supervisor/supervisor/types/types.go @@ -23,6 +23,11 @@ type ExecutingMessage struct { Hash common.Hash } +func (s *ExecutingMessage) String() string { + return fmt.Sprintf("ExecMsg(chain: %d, block: %d, log: %d, time: %d, logHash: %s)", + s.Chain, s.BlockNum, s.LogIdx, s.Timestamp, s.Hash) +} + type Message struct { Identifier Identifier `json:"identifier"` PayloadHash common.Hash `json:"payloadHash"` @@ -171,3 +176,49 @@ type ReferenceView struct { func (v ReferenceView) String() string { return fmt.Sprintf("View(local: %s, cross: %s)", v.Local, v.Cross) } + +type HeadPointer struct { + // The parent block that we build logs after + Pre BlockSeal + + // Number of logs that have been verified since the LastSealedBlock. + // These logs are contained in the block that builds on top of the LastSealedBlock. + LogsSince uint32 + + // The block that contains the logs. + // May be zeroed if there is no known post-state seal yet (e.g. during block-building). + // Post.Number == Pre.Number+1 + Post BlockSeal +} + +// WithinRange checks if the given log, in the given block, +// is within range (i.e. before or equal to the head-pointer). +// This does not guarantee that the log exists. +func (ptr *HeadPointer) WithinRange(blockNum uint64, logIdx uint32) bool { + if ptr.LastSealedBlockHash == (common.Hash{}) { + return false // no block yet + } + return blockNum <= ptr.LastSealedBlockNum || + (blockNum+1 == ptr.LastSealedBlockNum && logIdx < ptr.LogsSince) +} + +func (ptr *HeadPointer) IsSealed(blockNum uint64) bool { + if ptr.LastSealedBlockHash == (common.Hash{}) { + return false // no block yet + } + return blockNum <= ptr.LastSealedBlockNum +} + +type BlockSeal struct { + Hash common.Hash + Number uint64 + Timestamp uint64 +} + +func (s BlockSeal) String() string { + return fmt.Sprintf("") +} + +func (s BlockSeal) ID() eth.BlockID { + return eth.BlockID{Hash: s.Hash, Number: s.Number} +}