diff --git a/op-supervisor/supervisor/backend/backend.go b/op-supervisor/supervisor/backend/backend.go index 57b17e591059..54b2f2eae20a 100644 --- a/op-supervisor/supervisor/backend/backend.go +++ b/op-supervisor/supervisor/backend/backend.go @@ -21,7 +21,6 @@ import ( "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/source" - backendTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/frontend" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -191,7 +190,7 @@ func (su *SupervisorBackend) CheckMessage(identifier types.Identifier, payloadHa chainID := identifier.ChainID blockNum := identifier.BlockNumber logIdx := identifier.LogIndex - i, err := su.db.Check(chainID, blockNum, uint32(logIdx), backendTypes.TruncateHash(payloadHash)) + i, err := su.db.Check(chainID, blockNum, uint32(logIdx), payloadHash) if errors.Is(err, logs.ErrFuture) { return types.Unsafe, nil } diff --git a/op-supervisor/supervisor/backend/db/db.go b/op-supervisor/supervisor/backend/db/db.go index ac8c5f506ca5..184be4df76c1 100644 --- a/op-supervisor/supervisor/backend/db/db.go +++ b/op-supervisor/supervisor/backend/db/db.go @@ -14,7 +14,6 @@ import ( "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" - backendTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -25,8 +24,8 @@ var ( type LogStorage interface { io.Closer - AddLog(logHash backendTypes.TruncatedHash, parentBlock eth.BlockID, - logIdx uint32, execMsg *backendTypes.ExecutingMessage) error + AddLog(logHash common.Hash, parentBlock eth.BlockID, + logIdx uint32, execMsg *types.ExecutingMessage) error SealBlock(parentHash common.Hash, block eth.BlockID, timestamp uint64) error @@ -45,7 +44,7 @@ type LogStorage interface { // returns ErrConflict if the log does not match the canonical chain. // returns ErrFuture if the log is out of reach. // returns nil if the log is known and matches the canonical chain. - Contains(blockNum uint64, logIdx uint32, logHash backendTypes.TruncatedHash) (nextIndex entrydb.EntryIdx, err error) + Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (nextIndex entrydb.EntryIdx, err error) } var _ LogStorage = (*logs.DB)(nil) @@ -125,7 +124,7 @@ func (db *ChainsDB) StartCrossHeadMaintenance(ctx context.Context) { } // Check calls the underlying logDB to determine if the given log entry is safe with respect to the checker's criteria. -func (db *ChainsDB) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash backendTypes.TruncatedHash) (entrydb.EntryIdx, error) { +func (db *ChainsDB) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (entrydb.EntryIdx, error) { logDB, ok := db.logDBs[chain] if !ok { return 0, fmt.Errorf("%w: %v", ErrUnknownChain, chain) @@ -267,7 +266,7 @@ func (db *ChainsDB) SealBlock(chain types.ChainID, parentHash common.Hash, block return logDB.SealBlock(parentHash, block, timestamp) } -func (db *ChainsDB) AddLog(chain types.ChainID, logHash backendTypes.TruncatedHash, parentBlock eth.BlockID, logIdx uint32, execMsg *backendTypes.ExecutingMessage) error { +func (db *ChainsDB) AddLog(chain types.ChainID, logHash common.Hash, parentBlock eth.BlockID, logIdx uint32, execMsg *types.ExecutingMessage) error { logDB, ok := db.logDBs[chain] if !ok { return fmt.Errorf("%w: %v", ErrUnknownChain, chain) diff --git a/op-supervisor/supervisor/backend/db/db_test.go b/op-supervisor/supervisor/backend/db/db_test.go index eb10afd7f641..e1da3c177b10 100644 --- a/op-supervisor/supervisor/backend/db/db_test.go +++ b/op-supervisor/supervisor/backend/db/db_test.go @@ -16,14 +16,13 @@ import ( "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" - backendTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) func TestChainsDB_AddLog(t *testing.T) { t.Run("UnknownChain", func(t *testing.T) { db := NewChainsDB(nil, &stubHeadStorage{}, testlog.Logger(t, log.LevelDebug)) - err := db.AddLog(types.ChainIDFromUInt64(2), backendTypes.TruncatedHash{}, eth.BlockID{}, 33, nil) + err := db.AddLog(types.ChainIDFromUInt64(2), common.Hash{}, eth.BlockID{}, 33, nil) require.ErrorIs(t, err, ErrUnknownChain) }) @@ -36,7 +35,7 @@ func TestChainsDB_AddLog(t *testing.T) { bl10 := eth.BlockID{Hash: common.Hash{0x10}, Number: 10} err := db.SealBlock(chainID, common.Hash{0x9}, bl10, 1234) require.NoError(t, err, err) - err = db.AddLog(chainID, backendTypes.TruncatedHash{}, bl10, 0, nil) + err = db.AddLog(chainID, common.Hash{}, bl10, 0, nil) require.NoError(t, err, err) require.Equal(t, 1, logDB.addLogCalls) require.Equal(t, 1, logDB.sealBlockCalls) @@ -195,13 +194,13 @@ func setupStubbedForUpdateHeads(chainID types.ChainID) (*stubLogDB, *stubChecker logDB := &stubLogDB{} // set up stubbed executing messages that the ChainsDB can pass to the checker - logDB.executingMessages = []*backendTypes.ExecutingMessage{} + logDB.executingMessages = []*types.ExecutingMessage{} for i := 0; i < numExecutingMessages; i++ { // executing messages are packed in groups of 3, with block numbers increasing by 1 - logDB.executingMessages = append(logDB.executingMessages, &backendTypes.ExecutingMessage{ + logDB.executingMessages = append(logDB.executingMessages, &types.ExecutingMessage{ BlockNum: uint64(100 + int(i/3)), LogIdx: uint32(i), - Hash: backendTypes.TruncatedHash{}, + Hash: common.Hash{}, }) } @@ -210,7 +209,7 @@ func setupStubbedForUpdateHeads(chainID types.ChainID) (*stubLogDB, *stubChecker logIndex := uint32(0) executedCount := 0 for i := entrydb.EntryIdx(0); i <= local; i++ { - var logHash backendTypes.TruncatedHash + var logHash common.Hash rng.Read(logHash[:]) execIndex := -1 @@ -266,7 +265,7 @@ func (s *stubChecker) CrossHeadForChain(chainID types.ChainID) entrydb.EntryIdx } // stubbed Check returns true for the first numSafe calls, and false thereafter -func (s *stubChecker) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash backendTypes.TruncatedHash) bool { +func (s *stubChecker) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) bool { if s.checkCalls >= s.numSafe { return false } @@ -305,7 +304,7 @@ type nextLogResponse struct { logIdx uint32 - evtHash backendTypes.TruncatedHash + evtHash common.Hash err error @@ -356,22 +355,22 @@ func (s *stubIterator) NextIndex() entrydb.EntryIdx { return s.index + 1 } -func (s *stubIterator) SealedBlock() (hash backendTypes.TruncatedHash, num uint64, ok bool) { +func (s *stubIterator) SealedBlock() (hash common.Hash, num uint64, ok bool) { panic("not yet supported") } -func (s *stubIterator) InitMessage() (hash backendTypes.TruncatedHash, logIndex uint32, ok bool) { +func (s *stubIterator) InitMessage() (hash common.Hash, logIndex uint32, ok bool) { if s.index < 0 { - return backendTypes.TruncatedHash{}, 0, false + return common.Hash{}, 0, false } if s.index >= entrydb.EntryIdx(len(s.db.nextLogs)) { - return backendTypes.TruncatedHash{}, 0, false + return common.Hash{}, 0, false } e := s.db.nextLogs[s.index] return e.evtHash, e.logIdx, true } -func (s *stubIterator) ExecMessage() *backendTypes.ExecutingMessage { +func (s *stubIterator) ExecMessage() *types.ExecutingMessage { if s.index < 0 { return nil } @@ -392,13 +391,13 @@ type stubLogDB struct { sealBlockCalls int headBlockNum uint64 - executingMessages []*backendTypes.ExecutingMessage + executingMessages []*types.ExecutingMessage nextLogs []nextLogResponse containsResponse containsResponse } -func (s *stubLogDB) AddLog(logHash backendTypes.TruncatedHash, parentBlock eth.BlockID, logIdx uint32, execMsg *backendTypes.ExecutingMessage) error { +func (s *stubLogDB) AddLog(logHash common.Hash, parentBlock eth.BlockID, logIdx uint32, execMsg *types.ExecutingMessage) error { s.addLogCalls++ return nil } @@ -432,7 +431,7 @@ type containsResponse struct { // stubbed Contains records the arguments passed to it // it returns the response set in the struct, or an empty response -func (s *stubLogDB) Contains(blockNum uint64, logIdx uint32, logHash backendTypes.TruncatedHash) (nextIndex entrydb.EntryIdx, err error) { +func (s *stubLogDB) Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (nextIndex entrydb.EntryIdx, err error) { return s.containsResponse.index, s.containsResponse.err } diff --git a/op-supervisor/supervisor/backend/db/entrydb/entry_db.go b/op-supervisor/supervisor/backend/db/entrydb/entry_db.go index 446051821ce3..a260d143ddb5 100644 --- a/op-supervisor/supervisor/backend/db/entrydb/entry_db.go +++ b/op-supervisor/supervisor/backend/db/entrydb/entry_db.go @@ -10,7 +10,7 @@ import ( ) const ( - EntrySize = 24 + EntrySize = 34 ) type EntryIdx int64 diff --git a/op-supervisor/supervisor/backend/db/logs/db.go b/op-supervisor/supervisor/backend/db/logs/db.go index 1a2d8c8adfea..61184318ece9 100644 --- a/op-supervisor/supervisor/backend/db/logs/db.go +++ b/op-supervisor/supervisor/backend/db/logs/db.go @@ -12,7 +12,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) const ( @@ -103,11 +103,11 @@ func (db *DB) init(trimToLastSealed bool) error { // and is then followed up with canonical-hash entry of genesis. db.lastEntryContext = logContext{ nextEntryIndex: 0, - blockHash: types.TruncatedHash{}, + blockHash: common.Hash{}, blockNum: 0, timestamp: 0, logsSince: 0, - logHash: types.TruncatedHash{}, + logHash: common.Hash{}, execMsg: nil, out: nil, } @@ -199,7 +199,7 @@ func (db *DB) FindSealedBlock(block eth.BlockID) (nextEntry entrydb.EntryIdx, er if !ok { panic("expected block") } - if types.TruncateHash(block.Hash) != h { + if block.Hash != h { return 0, fmt.Errorf("queried %s but got %s at number %d: %w", block.Hash, h, block.Number, ErrConflict) } return iter.NextIndex(), nil @@ -220,9 +220,9 @@ func (db *DB) LatestSealedBlockNum() (n uint64, ok bool) { return db.lastEntryContext.blockNum, true } -// Get returns the truncated hash of the log at the specified blockNum (of the sealed block) +// Get returns the hash of the log at the specified blockNum (of the sealed block) // and logIdx (of the log after the block), or an error if the log is not found. -func (db *DB) Get(blockNum uint64, logIdx uint32) (types.TruncatedHash, error) { +func (db *DB) Get(blockNum uint64, logIdx uint32) (common.Hash, error) { db.rwLock.RLock() defer db.rwLock.RUnlock() hash, _, err := db.findLogInfo(blockNum, logIdx) @@ -234,7 +234,7 @@ func (db *DB) Get(blockNum uint64, logIdx uint32) (types.TruncatedHash, error) { // If the log is determined to conflict with the canonical chain, then ErrConflict is returned. // logIdx is the index of the log in the array of all logs in the block. // This can be used to check the validity of cross-chain interop events. -func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash types.TruncatedHash) (entrydb.EntryIdx, error) { +func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (entrydb.EntryIdx, error) { db.rwLock.RLock() defer db.rwLock.RUnlock() db.log.Trace("Checking for log", "blockNum", blockNum, "logIdx", logIdx, "hash", logHash) @@ -251,29 +251,29 @@ func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash types.TruncatedHa return iter.NextIndex(), nil } -func (db *DB) findLogInfo(blockNum uint64, logIdx uint32) (types.TruncatedHash, Iterator, error) { +func (db *DB) findLogInfo(blockNum uint64, logIdx uint32) (common.Hash, Iterator, error) { if blockNum == 0 { - return types.TruncatedHash{}, nil, ErrConflict // no logs in block 0 + return common.Hash{}, nil, ErrConflict // no logs in block 0 } // blockNum-1, such that we find a log that came after the parent num-1 was sealed. // logIdx, such that all entries before logIdx can be skipped, but logIdx itself is still readable. iter, err := db.newIteratorAt(blockNum-1, logIdx) if errors.Is(err, ErrFuture) { db.log.Trace("Could not find log yet", "blockNum", blockNum, "logIdx", logIdx) - return types.TruncatedHash{}, nil, err + return common.Hash{}, nil, err } else if err != nil { db.log.Error("Failed searching for log", "blockNum", blockNum, "logIdx", logIdx) - return types.TruncatedHash{}, nil, err + return common.Hash{}, nil, err } if err := iter.NextInitMsg(); err != nil { - return types.TruncatedHash{}, nil, fmt.Errorf("failed to read initiating message %d, on top of block %d: %w", logIdx, blockNum, err) + return common.Hash{}, nil, fmt.Errorf("failed to read initiating message %d, on top of block %d: %w", logIdx, blockNum, err) } if _, x, ok := iter.SealedBlock(); !ok { panic("expected block") } else if x < blockNum-1 { panic(fmt.Errorf("bug in newIteratorAt, expected to have found parent block %d but got %d", blockNum-1, x)) } else if x > blockNum-1 { - return types.TruncatedHash{}, nil, fmt.Errorf("log does not exist, found next block already: %w", ErrConflict) + return common.Hash{}, nil, fmt.Errorf("log does not exist, found next block already: %w", ErrConflict) } logHash, x, ok := iter.InitMessage() if !ok { @@ -459,7 +459,7 @@ func (db *DB) SealBlock(parentHash common.Hash, block eth.BlockID, timestamp uin return db.flush() } -func (db *DB) AddLog(logHash types.TruncatedHash, parentBlock eth.BlockID, logIdx uint32, execMsg *types.ExecutingMessage) error { +func (db *DB) AddLog(logHash common.Hash, parentBlock eth.BlockID, logIdx uint32, execMsg *types.ExecutingMessage) error { db.rwLock.Lock() defer db.rwLock.Unlock() diff --git a/op-supervisor/supervisor/backend/db/logs/db_test.go b/op-supervisor/supervisor/backend/db/logs/db_test.go index d2dbced9f393..c89433c7b4fe 100644 --- a/op-supervisor/supervisor/backend/db/logs/db_test.go +++ b/op-supervisor/supervisor/backend/db/logs/db_test.go @@ -17,13 +17,9 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) -func createTruncatedHash(i int) types.TruncatedHash { - return types.TruncateHash(createHash(i)) -} - func createHash(i int) common.Hash { if i == -1 { // parent-hash of genesis is zero return common.Hash{} @@ -92,7 +88,7 @@ func TestAddLog(t *testing.T) { func(t *testing.T, db *DB, m *stubMetrics) {}, func(t *testing.T, db *DB, m *stubMetrics) { genesis := eth.BlockID{Hash: createHash(15), Number: 0} - err := db.AddLog(createTruncatedHash(1), genesis, 0, nil) + err := db.AddLog(createHash(1), genesis, 0, nil) require.ErrorIs(t, err, ErrLogOutOfOrder) }) }) @@ -102,7 +98,7 @@ func TestAddLog(t *testing.T) { func(t *testing.T, db *DB, m *stubMetrics) { genesis := eth.BlockID{Hash: createHash(15), Number: 15} require.NoError(t, db.SealBlock(common.Hash{}, genesis, 5000), "seal genesis") - err := db.AddLog(createTruncatedHash(1), genesis, 0, nil) + err := db.AddLog(createHash(1), genesis, 0, nil) require.NoError(t, err, "first log after genesis") require.NoError(t, db.SealBlock(genesis.Hash, eth.BlockID{Hash: createHash(16), Number: 16}, 5001)) }, @@ -121,11 +117,11 @@ func TestAddLog(t *testing.T) { } // Now apply 3 logs on top of that, contents for block 16 bl15 := eth.BlockID{Hash: createHash(15), Number: 15} - err := db.AddLog(createTruncatedHash(1), bl15, 0, nil) + err := db.AddLog(createHash(1), bl15, 0, nil) require.NoError(t, err) - err = db.AddLog(createTruncatedHash(2), bl15, 1, nil) + err = db.AddLog(createHash(2), bl15, 1, nil) require.NoError(t, err) - err = db.AddLog(createTruncatedHash(3), bl15, 2, nil) + err = db.AddLog(createHash(3), bl15, 2, nil) require.NoError(t, err) // Now seal block 16 bl16 := eth.BlockID{Hash: createHash(16), Number: 16} @@ -149,16 +145,16 @@ func TestAddLog(t *testing.T) { bl15 := eth.BlockID{Hash: createHash(15), Number: 15} err = db.SealBlock(createHash(14), bl15, 5001) require.NoError(t, err) - err = db.AddLog(createTruncatedHash(1), bl15, 0, nil) + err = db.AddLog(createHash(1), bl15, 0, nil) require.NoError(t, err) - err = db.AddLog(createTruncatedHash(2), bl15, 1, nil) + err = db.AddLog(createHash(2), bl15, 1, nil) require.NoError(t, err) bl16 := eth.BlockID{Hash: createHash(16), Number: 16} err = db.SealBlock(bl15.Hash, bl16, 5003) require.NoError(t, err) - err = db.AddLog(createTruncatedHash(3), bl16, 0, nil) + err = db.AddLog(createHash(3), bl16, 0, nil) require.NoError(t, err) - err = db.AddLog(createTruncatedHash(4), bl16, 1, nil) + err = db.AddLog(createHash(4), bl16, 1, nil) require.NoError(t, err) bl17 := eth.BlockID{Hash: createHash(17), Number: 17} err = db.SealBlock(bl16.Hash, bl17, 5003) @@ -199,7 +195,7 @@ func TestAddLog(t *testing.T) { }, func(t *testing.T, db *DB, m *stubMetrics) { onto := eth.BlockID{Hash: createHash(14), Number: 14} - err := db.AddLog(createTruncatedHash(1), onto, 0, nil) + err := db.AddLog(createHash(1), onto, 0, nil) require.ErrorIs(t, err, ErrLogOutOfOrder, "cannot build logs on 14 when 15 is already sealed") }) }) @@ -210,12 +206,12 @@ func TestAddLog(t *testing.T) { bl15 := eth.BlockID{Hash: createHash(15), Number: 15} err := db.lastEntryContext.forceBlock(bl15, 5000) require.NoError(t, err) - require.NoError(t, db.AddLog(createTruncatedHash(1), bl15, 0, nil)) - require.NoError(t, db.AddLog(createTruncatedHash(1), bl15, 1, nil)) + require.NoError(t, db.AddLog(createHash(1), bl15, 0, nil)) + require.NoError(t, db.AddLog(createHash(1), bl15, 1, nil)) }, func(t *testing.T, db *DB, m *stubMetrics) { bl15 := eth.BlockID{Hash: createHash(15), Number: 15} - err := db.AddLog(createTruncatedHash(1), bl15, 0, nil) + err := db.AddLog(createHash(1), bl15, 0, nil) require.ErrorIs(t, err, ErrLogOutOfOrder, "already at log index 2") }) }) @@ -226,11 +222,11 @@ func TestAddLog(t *testing.T) { bl15 := eth.BlockID{Hash: createHash(15), Number: 15} err := db.lastEntryContext.forceBlock(bl15, 5000) require.NoError(t, err) - require.NoError(t, db.AddLog(createTruncatedHash(1), bl15, 0, nil)) - require.NoError(t, db.AddLog(createTruncatedHash(1), bl15, 1, nil)) + require.NoError(t, db.AddLog(createHash(1), bl15, 0, nil)) + require.NoError(t, db.AddLog(createHash(1), bl15, 1, nil)) }, func(t *testing.T, db *DB, m *stubMetrics) { - err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(16), Number: 16}, 0, nil) + err := db.AddLog(createHash(1), eth.BlockID{Hash: createHash(16), Number: 16}, 0, nil) require.ErrorIs(t, err, ErrLogOutOfOrder) }) }) @@ -241,12 +237,12 @@ func TestAddLog(t *testing.T) { bl15 := eth.BlockID{Hash: createHash(15), Number: 15} err := db.lastEntryContext.forceBlock(bl15, 5000) require.NoError(t, err) - require.NoError(t, db.AddLog(createTruncatedHash(1), bl15, 0, nil)) - require.NoError(t, db.AddLog(createTruncatedHash(1), bl15, 1, nil)) + require.NoError(t, db.AddLog(createHash(1), bl15, 0, nil)) + require.NoError(t, db.AddLog(createHash(1), bl15, 1, nil)) }, func(t *testing.T, db *DB, m *stubMetrics) { bl15 := eth.BlockID{Hash: createHash(15), Number: 15} - err := db.AddLog(createTruncatedHash(1), bl15, 1, nil) + err := db.AddLog(createHash(1), bl15, 1, nil) require.ErrorIs(t, err, ErrLogOutOfOrder, "already at log index 2") }) }) @@ -257,12 +253,12 @@ func TestAddLog(t *testing.T) { bl15 := eth.BlockID{Hash: createHash(15), Number: 15} err := db.lastEntryContext.forceBlock(bl15, 5000) require.NoError(t, err) - require.NoError(t, db.AddLog(createTruncatedHash(1), bl15, 0, nil)) - require.NoError(t, db.AddLog(createTruncatedHash(1), bl15, 1, nil)) + require.NoError(t, db.AddLog(createHash(1), bl15, 0, nil)) + require.NoError(t, db.AddLog(createHash(1), bl15, 1, nil)) }, func(t *testing.T, db *DB, m *stubMetrics) { bl15 := eth.BlockID{Hash: createHash(16), Number: 16} - err := db.AddLog(createTruncatedHash(1), bl15, 2, nil) + err := db.AddLog(createHash(1), bl15, 2, nil) require.ErrorIs(t, err, ErrLogOutOfOrder) }) }) @@ -273,11 +269,11 @@ func TestAddLog(t *testing.T) { bl15 := eth.BlockID{Hash: createHash(15), Number: 15} err := db.lastEntryContext.forceBlock(bl15, 5000) require.NoError(t, err) - require.NoError(t, db.AddLog(createTruncatedHash(1), bl15, 0, nil)) + require.NoError(t, db.AddLog(createHash(1), bl15, 0, nil)) }, func(t *testing.T, db *DB, m *stubMetrics) { bl15 := eth.BlockID{Hash: createHash(15), Number: 15} - err := db.AddLog(createTruncatedHash(1), bl15, 2, nil) + err := db.AddLog(createHash(1), bl15, 2, nil) require.ErrorIs(t, err, ErrLogOutOfOrder) }) }) @@ -290,7 +286,7 @@ func TestAddLog(t *testing.T) { }, func(t *testing.T, db *DB, m *stubMetrics) { bl15 := eth.BlockID{Hash: createHash(15), Number: 15} - err := db.AddLog(createTruncatedHash(1), bl15, 5, nil) + err := db.AddLog(createHash(1), bl15, 5, nil) require.ErrorIs(t, err, ErrLogOutOfOrder) }) }) @@ -301,17 +297,17 @@ func TestAddLog(t *testing.T) { bl15 := eth.BlockID{Hash: createHash(15), Number: 15} err := db.lastEntryContext.forceBlock(bl15, 5000) require.NoError(t, err) - err = db.AddLog(createTruncatedHash(1), bl15, 0, nil) + err = db.AddLog(createHash(1), bl15, 0, nil) require.NoError(t, err) }, func(t *testing.T, db *DB, m *stubMetrics) { bl15 := eth.BlockID{Hash: createHash(15), Number: 15} - err := db.AddLog(createTruncatedHash(1), bl15, 1, nil) + err := db.AddLog(createHash(1), bl15, 1, nil) require.NoError(t, err) bl16 := eth.BlockID{Hash: createHash(16), Number: 16} err = db.SealBlock(bl15.Hash, bl16, 5001) require.NoError(t, err) - err = db.AddLog(createTruncatedHash(1), bl16, 1, nil) + err = db.AddLog(createHash(1), bl16, 1, nil) require.ErrorIs(t, err, ErrLogOutOfOrder) }) }) @@ -340,7 +336,7 @@ func TestAddLog(t *testing.T) { require.Equal(t, expectedIndex, db.lastEntryContext.NextIndex()) { // create block 1 for i := 0; i < block1LogCount; i++ { - err := db.AddLog(createTruncatedHash(i), block0, uint32(i), nil) + err := db.AddLog(createHash(i), block0, uint32(i), nil) require.NoError(t, err) } err := db.SealBlock(block0.Hash, block1, 3001) // second seal-checkpoint @@ -352,7 +348,7 @@ func TestAddLog(t *testing.T) { { // create block 2 for i := 0; i < block2LogCount; i++ { // two of these imply a search checkpoint, the second and third search-checkpoint - err := db.AddLog(createTruncatedHash(i), block1, uint32(i), nil) + err := db.AddLog(createHash(i), block1, uint32(i), nil) require.NoError(t, err) } err := db.SealBlock(block1.Hash, block2, 3002) // third seal-checkpoint @@ -363,7 +359,7 @@ func TestAddLog(t *testing.T) { require.Equal(t, expectedIndex, db.lastEntryContext.NextIndex(), "added logs, two search checkpoints, and a seal checkpoint") { // create block 3 for i := 0; i < block3LogCount; i++ { - err := db.AddLog(createTruncatedHash(i), block2, uint32(i), nil) + err := db.AddLog(createHash(i), block2, uint32(i), nil) require.NoError(t, err) } err := db.SealBlock(block2.Hash, block3, 3003) @@ -381,7 +377,7 @@ func TestAddLog(t *testing.T) { { // create block 4 for i := 0; i < block4LogCount; i++ { // includes a fourth search checkpoint - err := db.AddLog(createTruncatedHash(i), block3, uint32(i), nil) + err := db.AddLog(createHash(i), block3, uint32(i), nil) require.NoError(t, err) } err := db.SealBlock(block3.Hash, block4, 3003) // fourth seal checkpoint @@ -422,14 +418,14 @@ func TestAddDependentLog(t *testing.T) { BlockNum: 42894, LogIdx: 42, Timestamp: 8742482, - Hash: types.TruncateHash(createHash(8844)), + Hash: createHash(8844), } t.Run("FirstEntry", func(t *testing.T) { runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) { bl15 := eth.BlockID{Hash: createHash(15), Number: 15} require.NoError(t, db.lastEntryContext.forceBlock(bl15, 5000)) - err := db.AddLog(createTruncatedHash(1), bl15, 0, &execMsg) + err := db.AddLog(createHash(1), bl15, 0, &execMsg) require.NoError(t, err) }, func(t *testing.T, db *DB, m *stubMetrics) { @@ -443,13 +439,13 @@ func TestAddDependentLog(t *testing.T) { bl15 := eth.BlockID{Hash: createHash(15), Number: 15} require.NoError(t, db.lastEntryContext.forceBlock(bl15, 5000)) for i := uint32(0); m.entryCount < searchCheckpointFrequency-1; i++ { - require.NoError(t, db.AddLog(createTruncatedHash(9), bl15, i, nil)) + require.NoError(t, db.AddLog(createHash(9), bl15, i, nil)) } bl16 := eth.BlockID{Hash: createHash(16), Number: 16} require.NoError(t, db.SealBlock(bl15.Hash, bl16, 5001)) // added 3 entries: seal-checkpoint, then a search-checkpoint, then the canonical hash require.Equal(t, m.entryCount, int64(searchCheckpointFrequency+2)) - err := db.AddLog(createTruncatedHash(1), bl16, 0, &execMsg) + err := db.AddLog(createHash(1), bl16, 0, &execMsg) require.NoError(t, err) }, func(t *testing.T, db *DB, m *stubMetrics) { @@ -465,10 +461,10 @@ func TestAddDependentLog(t *testing.T) { require.NoError(t, db.lastEntryContext.forceBlock(bl15, 5000)) // we add 256 - 2 (start) - 2 (init msg, exec link) = 252 entries for i := uint32(0); i < 252; i++ { - require.NoError(t, db.AddLog(createTruncatedHash(9), bl15, i, nil)) + require.NoError(t, db.AddLog(createHash(9), bl15, i, nil)) } // add an executing message - err := db.AddLog(createTruncatedHash(1), bl15, 252, &execMsg) + err := db.AddLog(createHash(1), bl15, 252, &execMsg) require.NoError(t, err) // 0,1: start // 2..252+2: initiating logs without exec message @@ -495,10 +491,10 @@ func TestAddDependentLog(t *testing.T) { require.NoError(t, db.lastEntryContext.forceBlock(bl15, 5000)) // we add 256 - 2 (start) - 1 (init msg) = 253 entries for i := uint32(0); i < 253; i++ { - require.NoError(t, db.AddLog(createTruncatedHash(9), bl15, i, nil)) + require.NoError(t, db.AddLog(createHash(9), bl15, i, nil)) } // add an executing message - err := db.AddLog(createTruncatedHash(1), bl15, 253, &execMsg) + err := db.AddLog(createHash(1), bl15, 253, &execMsg) require.NoError(t, err) // 0,1: start // 2..253+2: initiating logs without exec message @@ -523,15 +519,15 @@ func TestContains(t *testing.T) { func(t *testing.T, db *DB, m *stubMetrics) { bl50 := eth.BlockID{Hash: createHash(50), Number: 50} require.NoError(t, db.lastEntryContext.forceBlock(bl50, 5000)) - require.NoError(t, db.AddLog(createTruncatedHash(1), bl50, 0, nil)) - require.NoError(t, db.AddLog(createTruncatedHash(3), bl50, 1, nil)) - require.NoError(t, db.AddLog(createTruncatedHash(2), bl50, 2, nil)) + require.NoError(t, db.AddLog(createHash(1), bl50, 0, nil)) + require.NoError(t, db.AddLog(createHash(3), bl50, 1, nil)) + require.NoError(t, db.AddLog(createHash(2), bl50, 2, nil)) bl51 := eth.BlockID{Hash: createHash(51), Number: 51} require.NoError(t, db.SealBlock(bl50.Hash, bl51, 5001)) bl52 := eth.BlockID{Hash: createHash(52), Number: 52} require.NoError(t, db.SealBlock(bl51.Hash, bl52, 5001)) - require.NoError(t, db.AddLog(createTruncatedHash(1), bl52, 0, nil)) - require.NoError(t, db.AddLog(createTruncatedHash(3), bl52, 1, nil)) + require.NoError(t, db.AddLog(createHash(1), bl52, 0, nil)) + require.NoError(t, db.AddLog(createHash(3), bl52, 1, nil)) }, func(t *testing.T, db *DB, m *stubMetrics) { // Should find added logs @@ -560,35 +556,35 @@ func TestExecutes(t *testing.T) { BlockNum: 22, LogIdx: 99, Timestamp: 948294, - Hash: createTruncatedHash(332299), + Hash: createHash(332299), } execMsg2 := types.ExecutingMessage{ Chain: 44, BlockNum: 55, LogIdx: 66, Timestamp: 77777, - Hash: createTruncatedHash(445566), + Hash: createHash(445566), } execMsg3 := types.ExecutingMessage{ Chain: 77, BlockNum: 88, LogIdx: 89, Timestamp: 6578567, - Hash: createTruncatedHash(778889), + Hash: createHash(778889), } runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) { bl50 := eth.BlockID{Hash: createHash(50), Number: 50} require.NoError(t, db.lastEntryContext.forceBlock(bl50, 500)) - require.NoError(t, db.AddLog(createTruncatedHash(1), bl50, 0, nil)) - require.NoError(t, db.AddLog(createTruncatedHash(3), bl50, 1, &execMsg1)) - require.NoError(t, db.AddLog(createTruncatedHash(2), bl50, 2, nil)) + require.NoError(t, db.AddLog(createHash(1), bl50, 0, nil)) + require.NoError(t, db.AddLog(createHash(3), bl50, 1, &execMsg1)) + require.NoError(t, db.AddLog(createHash(2), bl50, 2, nil)) bl51 := eth.BlockID{Hash: createHash(51), Number: 51} require.NoError(t, db.SealBlock(bl50.Hash, bl51, 5001)) bl52 := eth.BlockID{Hash: createHash(52), Number: 52} require.NoError(t, db.SealBlock(bl51.Hash, bl52, 5001)) - require.NoError(t, db.AddLog(createTruncatedHash(1), bl52, 0, &execMsg2)) - require.NoError(t, db.AddLog(createTruncatedHash(3), bl52, 1, &execMsg3)) + require.NoError(t, db.AddLog(createHash(1), bl52, 0, &execMsg2)) + require.NoError(t, db.AddLog(createHash(3), bl52, 1, &execMsg3)) }, func(t *testing.T, db *DB, m *stubMetrics) { // Should find added logs @@ -627,7 +623,7 @@ func TestGetBlockInfo(t *testing.T) { func(t *testing.T, db *DB, m *stubMetrics) { bl11 := eth.BlockID{Hash: createHash(11), Number: 11} require.NoError(t, db.lastEntryContext.forceBlock(bl11, 500)) - err := db.AddLog(createTruncatedHash(1), bl11, 0, nil) + err := db.AddLog(createHash(1), bl11, 0, nil) require.NoError(t, err) }, func(t *testing.T, db *DB, m *stubMetrics) { @@ -657,7 +653,7 @@ func requireContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHa require.LessOrEqual(t, len(execMsg), 1, "cannot have multiple executing messages for a single log") m, ok := db.m.(*stubMetrics) require.True(t, ok, "Did not get the expected metrics type") - _, err := db.Contains(blockNum, logIdx, types.TruncateHash(logHash)) + _, err := db.Contains(blockNum, logIdx, logHash) require.NoErrorf(t, err, "Error searching for log %v in block %v", logIdx, blockNum) require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency*2), "Should not need to read more than between two checkpoints") require.NotZero(t, m.entriesReadForSearch, "Must read at least some entries to find the log") @@ -672,7 +668,7 @@ func requireContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHa func requireConflicts(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash common.Hash) { m, ok := db.m.(*stubMetrics) require.True(t, ok, "Did not get the expected metrics type") - _, err := db.Contains(blockNum, logIdx, types.TruncateHash(logHash)) + _, err := db.Contains(blockNum, logIdx, logHash) require.ErrorIs(t, err, ErrConflict, "canonical chain must not include this log") require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency*2), "Should not need to read more than between two checkpoints") } @@ -680,7 +676,7 @@ func requireConflicts(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logH func requireFuture(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash common.Hash) { m, ok := db.m.(*stubMetrics) require.True(t, ok, "Did not get the expected metrics type") - _, err := db.Contains(blockNum, logIdx, types.TruncateHash(logHash)) + _, err := db.Contains(blockNum, logIdx, logHash) require.ErrorIs(t, err, ErrFuture, "canonical chain does not yet include this log") require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency*2), "Should not need to read more than between two checkpoints") } @@ -718,17 +714,17 @@ func TestRecoverOnCreate(t *testing.T) { store := storeWithEvents( // seal 0, 1, 2, 3 newSearchCheckpoint(0, 0, 100).encode(), - newCanonicalHash(createTruncatedHash(300)).encode(), + newCanonicalHash(createHash(300)).encode(), newSearchCheckpoint(1, 0, 101).encode(), - newCanonicalHash(createTruncatedHash(301)).encode(), + newCanonicalHash(createHash(301)).encode(), newSearchCheckpoint(2, 0, 102).encode(), - newCanonicalHash(createTruncatedHash(302)).encode(), + newCanonicalHash(createHash(302)).encode(), newSearchCheckpoint(3, 0, 103).encode(), - newCanonicalHash(createTruncatedHash(303)).encode(), + newCanonicalHash(createHash(303)).encode(), // open and seal 4 - newInitiatingEvent(createTruncatedHash(1), false).encode(), + newInitiatingEvent(createHash(1), false).encode(), newSearchCheckpoint(4, 0, 104).encode(), - newCanonicalHash(createTruncatedHash(304)).encode(), + newCanonicalHash(createHash(304)).encode(), ) db, m, err := createDb(t, store) require.NoError(t, err) @@ -742,22 +738,22 @@ func TestRecoverOnCreate(t *testing.T) { BlockNum: 10, LogIdx: 4, Timestamp: 1288, - Hash: createTruncatedHash(4), + Hash: createHash(4), } linkEvt, err := newExecutingLink(execMsg) require.NoError(t, err) store := storeWithEvents( newSearchCheckpoint(0, 0, 100).encode(), - newCanonicalHash(createTruncatedHash(300)).encode(), + newCanonicalHash(createHash(300)).encode(), newSearchCheckpoint(1, 0, 101).encode(), - newCanonicalHash(createTruncatedHash(301)).encode(), + newCanonicalHash(createHash(301)).encode(), newSearchCheckpoint(2, 0, 102).encode(), - newCanonicalHash(createTruncatedHash(302)).encode(), - newInitiatingEvent(createTruncatedHash(1111), true).encode(), + newCanonicalHash(createHash(302)).encode(), + newInitiatingEvent(createHash(1111), true).encode(), linkEvt.encode(), newExecutingCheck(execMsg.Hash).encode(), newSearchCheckpoint(3, 0, 103).encode(), - newCanonicalHash(createTruncatedHash(303)).encode(), + newCanonicalHash(createHash(303)).encode(), ) db, m, err := createDb(t, store) require.NoError(t, err) @@ -778,7 +774,7 @@ func TestRecoverOnCreate(t *testing.T) { // A completed seal is fine to have as last entry. store := storeWithEvents( newSearchCheckpoint(0, 0, 100).encode(), - newCanonicalHash(createTruncatedHash(344)).encode(), + newCanonicalHash(createHash(344)).encode(), ) _, m, err := createDb(t, store) require.NoError(t, err) @@ -790,10 +786,10 @@ func TestRecoverOnCreate(t *testing.T) { // without said executing message, is dropped. store := storeWithEvents( newSearchCheckpoint(0, 0, 100).encode(), - newCanonicalHash(createTruncatedHash(344)).encode(), + newCanonicalHash(createHash(344)).encode(), // both pruned because we go back to a seal - newInitiatingEvent(createTruncatedHash(0), false).encode(), - newInitiatingEvent(createTruncatedHash(1), true).encode(), + newInitiatingEvent(createHash(0), false).encode(), + newInitiatingEvent(createHash(1), true).encode(), ) _, m, err := createDb(t, store) require.NoError(t, err) @@ -805,11 +801,11 @@ func TestRecoverOnCreate(t *testing.T) { // without said executing message, is dropped. store := storeWithEvents( newSearchCheckpoint(0, 0, 100).encode(), - newCanonicalHash(createTruncatedHash(300)).encode(), + newCanonicalHash(createHash(300)).encode(), // pruned because we go back to a seal - newInitiatingEvent(createTruncatedHash(0), false).encode(), + newInitiatingEvent(createHash(0), false).encode(), newSearchCheckpoint(1, 0, 100).encode(), - newCanonicalHash(createTruncatedHash(301)).encode(), + newCanonicalHash(createHash(301)).encode(), ) _, m, err := createDb(t, store) require.NoError(t, err) @@ -822,14 +818,14 @@ func TestRecoverOnCreate(t *testing.T) { BlockNum: 10, LogIdx: 4, Timestamp: 1288, - Hash: createTruncatedHash(4), + Hash: createHash(4), } linkEvt, err := newExecutingLink(execMsg) require.NoError(t, err) store := storeWithEvents( newSearchCheckpoint(3, 0, 100).encode(), - newCanonicalHash(createTruncatedHash(344)).encode(), - newInitiatingEvent(createTruncatedHash(1), true).encode(), + newCanonicalHash(createHash(344)).encode(), + newInitiatingEvent(createHash(1), true).encode(), linkEvt.encode(), ) _, m, err := createDb(t, store) @@ -853,14 +849,14 @@ func TestRewind(t *testing.T) { func(t *testing.T, db *DB, m *stubMetrics) { bl50 := eth.BlockID{Hash: createHash(50), Number: 50} require.NoError(t, db.SealBlock(createHash(49), bl50, 500)) - require.NoError(t, db.AddLog(createTruncatedHash(1), bl50, 0, nil)) - require.NoError(t, db.AddLog(createTruncatedHash(2), bl50, 1, nil)) + require.NoError(t, db.AddLog(createHash(1), bl50, 0, nil)) + require.NoError(t, db.AddLog(createHash(2), bl50, 1, nil)) bl51 := eth.BlockID{Hash: createHash(51), Number: 51} require.NoError(t, db.SealBlock(bl50.Hash, bl51, 502)) - require.NoError(t, db.AddLog(createTruncatedHash(3), bl51, 0, nil)) + require.NoError(t, db.AddLog(createHash(3), bl51, 0, nil)) bl52 := eth.BlockID{Hash: createHash(52), Number: 52} require.NoError(t, db.SealBlock(bl51.Hash, bl52, 504)) - require.NoError(t, db.AddLog(createTruncatedHash(4), bl52, 0, nil)) + require.NoError(t, db.AddLog(createHash(4), bl52, 0, nil)) // cannot rewind to a block that is not sealed yet require.ErrorIs(t, db.Rewind(53), ErrFuture) }, @@ -878,8 +874,8 @@ func TestRewind(t *testing.T) { func(t *testing.T, db *DB, m *stubMetrics) { bl50 := eth.BlockID{Hash: createHash(50), Number: 50} require.NoError(t, db.SealBlock(createHash(49), bl50, 500)) - require.NoError(t, db.AddLog(createTruncatedHash(1), bl50, 0, nil)) - require.NoError(t, db.AddLog(createTruncatedHash(2), bl50, 1, nil)) + require.NoError(t, db.AddLog(createHash(1), bl50, 0, nil)) + require.NoError(t, db.AddLog(createHash(2), bl50, 1, nil)) // cannot go back to an unknown block require.ErrorIs(t, db.Rewind(25), ErrSkipped) }, @@ -894,12 +890,12 @@ func TestRewind(t *testing.T) { func(t *testing.T, db *DB, m *stubMetrics) { bl50 := eth.BlockID{Hash: createHash(50), Number: 50} require.NoError(t, db.SealBlock(createHash(49), bl50, 500)) - require.NoError(t, db.AddLog(createTruncatedHash(1), bl50, 0, nil)) - require.NoError(t, db.AddLog(createTruncatedHash(2), bl50, 1, nil)) + require.NoError(t, db.AddLog(createHash(1), bl50, 0, nil)) + require.NoError(t, db.AddLog(createHash(2), bl50, 1, nil)) bl51 := eth.BlockID{Hash: createHash(51), Number: 51} require.NoError(t, db.SealBlock(bl50.Hash, bl51, 502)) - require.NoError(t, db.AddLog(createTruncatedHash(1), bl51, 0, nil)) - require.NoError(t, db.AddLog(createTruncatedHash(2), bl51, 1, nil)) + require.NoError(t, db.AddLog(createHash(1), bl51, 0, nil)) + require.NoError(t, db.AddLog(createHash(2), bl51, 1, nil)) bl52 := eth.BlockID{Hash: createHash(52), Number: 52} require.NoError(t, db.SealBlock(bl51.Hash, bl52, 504)) require.NoError(t, db.Rewind(51)) @@ -918,7 +914,7 @@ func TestRewind(t *testing.T) { bl50 := eth.BlockID{Hash: createHash(50), Number: 50} require.NoError(t, db.SealBlock(createHash(49), bl50, 500)) for i := uint32(0); m.entryCount < searchCheckpointFrequency; i++ { - require.NoError(t, db.AddLog(createTruncatedHash(1), bl50, i, nil)) + require.NoError(t, db.AddLog(createHash(1), bl50, i, nil)) } // The checkpoint is added automatically, // it will be there as soon as it reaches 255 with log events. @@ -926,9 +922,9 @@ func TestRewind(t *testing.T) { require.EqualValues(t, searchCheckpointFrequency+2, m.entryCount) bl51 := eth.BlockID{Hash: createHash(51), Number: 51} require.NoError(t, db.SealBlock(bl50.Hash, bl51, 502)) - require.NoError(t, db.AddLog(createTruncatedHash(1), bl51, 0, nil)) + require.NoError(t, db.AddLog(createHash(1), bl51, 0, nil)) require.EqualValues(t, searchCheckpointFrequency+2+3, m.entryCount, "Should have inserted new checkpoint and extra log") - require.NoError(t, db.AddLog(createTruncatedHash(2), bl51, 1, nil)) + require.NoError(t, db.AddLog(createHash(2), bl51, 1, nil)) bl52 := eth.BlockID{Hash: createHash(52), Number: 52} require.NoError(t, db.SealBlock(bl51.Hash, bl52, 504)) require.NoError(t, db.Rewind(51)) @@ -950,8 +946,8 @@ func TestRewind(t *testing.T) { bl := eth.BlockID{Hash: createHash(int(i)), Number: uint64(i)} require.NoError(t, db.SealBlock(createHash(int(i)-1), bl, 500+uint64(i))) if i%2 == 0 { - require.NoError(t, db.AddLog(createTruncatedHash(1), bl, 0, nil)) - require.NoError(t, db.AddLog(createTruncatedHash(2), bl, 1, nil)) + require.NoError(t, db.AddLog(createHash(1), bl, 0, nil)) + require.NoError(t, db.AddLog(createHash(2), bl, 1, nil)) } } require.NoError(t, db.Rewind(15)) @@ -972,8 +968,8 @@ func TestRewind(t *testing.T) { bl := eth.BlockID{Hash: createHash(int(i)), Number: uint64(i)} require.NoError(t, db.SealBlock(createHash(int(i)-1), bl, 500+uint64(i))) if i%2 == 1 { - require.NoError(t, db.AddLog(createTruncatedHash(1), bl, 0, nil)) - require.NoError(t, db.AddLog(createTruncatedHash(2), bl, 1, nil)) + require.NoError(t, db.AddLog(createHash(1), bl, 0, nil)) + require.NoError(t, db.AddLog(createHash(2), bl, 1, nil)) } } // We ended at 30, and sealed it, nothing left to prune @@ -996,8 +992,8 @@ func TestRewind(t *testing.T) { bl := eth.BlockID{Hash: createHash(int(i)), Number: uint64(i)} require.NoError(t, db.SealBlock(createHash(int(i)-1), bl, 500+uint64(i))) if i%2 == 0 { - require.NoError(t, db.AddLog(createTruncatedHash(1), bl, 0, nil)) - require.NoError(t, db.AddLog(createTruncatedHash(2), bl, 1, nil)) + require.NoError(t, db.AddLog(createHash(1), bl, 0, nil)) + require.NoError(t, db.AddLog(createHash(2), bl, 1, nil)) } } require.NoError(t, db.Rewind(16)) @@ -1005,16 +1001,16 @@ func TestRewind(t *testing.T) { func(t *testing.T, db *DB, m *stubMetrics) { bl29 := eth.BlockID{Hash: createHash(29), Number: 29} // 29 was deleted - err := db.AddLog(createTruncatedHash(2), bl29, 1, nil) + err := db.AddLog(createHash(2), bl29, 1, nil) require.ErrorIs(t, err, ErrLogOutOfOrder, "Cannot add log on removed block") // 15 is older, we have up to 16 bl15 := eth.BlockID{Hash: createHash(15), Number: 15} // try to add a third log to 15 - err = db.AddLog(createTruncatedHash(10), bl15, 2, nil) + err = db.AddLog(createHash(10), bl15, 2, nil) require.ErrorIs(t, err, ErrLogOutOfOrder) bl16 := eth.BlockID{Hash: createHash(16), Number: 16} // try to add a log to 17, on top of 16 - err = db.AddLog(createTruncatedHash(42), bl16, 0, nil) + err = db.AddLog(createHash(42), bl16, 0, nil) require.NoError(t, err) requireContains(t, db, 17, 0, createHash(42)) }) diff --git a/op-supervisor/supervisor/backend/db/logs/entries.go b/op-supervisor/supervisor/backend/db/logs/entries.go index 5dbc1e3b4816..431adc99f465 100644 --- a/op-supervisor/supervisor/backend/db/logs/entries.go +++ b/op-supervisor/supervisor/backend/db/logs/entries.go @@ -4,8 +4,10 @@ import ( "encoding/binary" "fmt" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) // searchCheckpoint is both a checkpoint for searching, as well as a checkpoint for sealing blocks. @@ -48,10 +50,10 @@ func (s searchCheckpoint) encode() entrydb.Entry { } type canonicalHash struct { - hash types.TruncatedHash + hash common.Hash } -func newCanonicalHash(hash types.TruncatedHash) canonicalHash { +func newCanonicalHash(hash common.Hash) canonicalHash { return canonicalHash{hash: hash} } @@ -59,21 +61,19 @@ func newCanonicalHashFromEntry(data entrydb.Entry) (canonicalHash, error) { if data.Type() != entrydb.TypeCanonicalHash { return canonicalHash{}, fmt.Errorf("%w: attempting to decode canonical hash but was type %s", ErrDataCorruption, data.Type()) } - var truncated types.TruncatedHash - copy(truncated[:], data[1:21]) - return newCanonicalHash(truncated), nil + return newCanonicalHash(common.Hash(data[1:33])), nil } func (c canonicalHash) encode() entrydb.Entry { var entry entrydb.Entry entry[0] = uint8(entrydb.TypeCanonicalHash) - copy(entry[1:21], c.hash[:]) + copy(entry[1:33], c.hash[:]) return entry } type initiatingEvent struct { hasExecMsg bool - logHash types.TruncatedHash + logHash common.Hash } func newInitiatingEventFromEntry(data entrydb.Entry) (initiatingEvent, error) { @@ -83,11 +83,11 @@ func newInitiatingEventFromEntry(data entrydb.Entry) (initiatingEvent, error) { flags := data[1] return initiatingEvent{ hasExecMsg: flags&eventFlagHasExecutingMessage != 0, - logHash: types.TruncatedHash(data[2:22]), + logHash: common.Hash(data[2:34]), }, nil } -func newInitiatingEvent(logHash types.TruncatedHash, hasExecMsg bool) initiatingEvent { +func newInitiatingEvent(logHash common.Hash, hasExecMsg bool) initiatingEvent { return initiatingEvent{ hasExecMsg: hasExecMsg, logHash: logHash, @@ -104,7 +104,7 @@ func (i initiatingEvent) encode() entrydb.Entry { flags = flags | eventFlagHasExecutingMessage } data[1] = flags - copy(data[2:22], i.logHash[:]) + copy(data[2:34], i.logHash[:]) return data } @@ -157,10 +157,10 @@ func (e executingLink) encode() entrydb.Entry { } type executingCheck struct { - hash types.TruncatedHash + hash common.Hash } -func newExecutingCheck(hash types.TruncatedHash) executingCheck { +func newExecutingCheck(hash common.Hash) executingCheck { return executingCheck{hash: hash} } @@ -168,24 +168,22 @@ func newExecutingCheckFromEntry(data entrydb.Entry) (executingCheck, error) { if data.Type() != entrydb.TypeExecutingCheck { return executingCheck{}, fmt.Errorf("%w: attempting to decode executing check but was type %s", ErrDataCorruption, data.Type()) } - var hash types.TruncatedHash - copy(hash[:], data[1:21]) - return newExecutingCheck(hash), nil + return newExecutingCheck(common.Hash(data[1:33])), nil } // encode creates an executing check entry -// type 4: "executing check" = 21 bytes +// type 4: "executing check" = 33 bytes func (e executingCheck) encode() entrydb.Entry { var entry entrydb.Entry entry[0] = uint8(entrydb.TypeExecutingCheck) - copy(entry[1:21], e.hash[:]) + copy(entry[1:33], e.hash[:]) return entry } type paddingEntry struct{} // encoding of the padding entry -// type 5: "padding" = 24 bytes +// type 5: "padding" = 34 bytes func (e paddingEntry) encode() entrydb.Entry { var entry entrydb.Entry entry[0] = uint8(entrydb.TypePadding) diff --git a/op-supervisor/supervisor/backend/db/logs/iterator.go b/op-supervisor/supervisor/backend/db/logs/iterator.go index 29b47245e71c..4b3bd1b65908 100644 --- a/op-supervisor/supervisor/backend/db/logs/iterator.go +++ b/op-supervisor/supervisor/backend/db/logs/iterator.go @@ -5,14 +5,16 @@ import ( "fmt" "io" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) type IteratorState interface { NextIndex() entrydb.EntryIdx - SealedBlock() (hash types.TruncatedHash, num uint64, ok bool) - InitMessage() (hash types.TruncatedHash, logIndex uint32, ok bool) + SealedBlock() (hash common.Hash, num uint64, ok bool) + InitMessage() (hash common.Hash, logIndex uint32, ok bool) ExecMessage() *types.ExecutingMessage } @@ -127,12 +129,12 @@ func (i *iterator) NextIndex() entrydb.EntryIdx { // SealedBlock returns the sealed block that we are appending logs after, if any is available. // I.e. the block is the parent block of the block containing the logs that are currently appending to it. -func (i *iterator) SealedBlock() (hash types.TruncatedHash, num uint64, ok bool) { +func (i *iterator) SealedBlock() (hash common.Hash, num uint64, ok bool) { return i.current.SealedBlock() } // InitMessage returns the current initiating message, if any is available. -func (i *iterator) InitMessage() (hash types.TruncatedHash, logIndex uint32, ok bool) { +func (i *iterator) InitMessage() (hash common.Hash, logIndex uint32, ok bool) { return i.current.InitMessage() } diff --git a/op-supervisor/supervisor/backend/db/logs/state.go b/op-supervisor/supervisor/backend/db/logs/state.go index 083e07f97083..bb00762acc2e 100644 --- a/op-supervisor/supervisor/backend/db/logs/state.go +++ b/op-supervisor/supervisor/backend/db/logs/state.go @@ -9,7 +9,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) // logContext is a buffer on top of the DB, @@ -32,14 +32,14 @@ import ( // // Types ( = 1 byte): // type 0: "checkpoint" = 21 bytes -// type 1: "canonical hash" = 21 bytes -// type 2: "initiating event" = 22 bytes +// type 1: "canonical hash" = 33 bytes +// type 2: "initiating event" = 34 bytes // type 3: "executing link" = 24 bytes -// type 4: "executing check" = 21 bytes -// type 5: "padding" = 24 bytes +// type 4: "executing check" = 33 bytes +// type 5: "padding" = 34 bytes // other types: future compat. E.g. for linking to L1, registering block-headers as a kind of initiating-event, tracking safe-head progression, etc. // -// Right-pad each entry that is not 24 bytes. +// Right-pad each entry that is not 34 bytes. // // We insert a checkpoint for every search interval and block sealing event, // and these may overlap as the same thing. @@ -55,7 +55,7 @@ type logContext struct { // blockHash of the last sealed block. // A block is not considered sealed until we know its block hash. // While we process logs we keep the parent-block of said logs around as sealed block. - blockHash types.TruncatedHash + blockHash common.Hash // blockNum of the last sealed block blockNum uint64 // timestamp of the last sealed block @@ -65,7 +65,7 @@ type logContext struct { logsSince uint32 // payload-hash of the log-event that was last processed. (may not be fully processed, see doneLog) - logHash types.TruncatedHash + logHash common.Hash // executing message that might exist for the current log event. // Might be incomplete; if !logDone while we already processed the initiating event, @@ -91,9 +91,9 @@ func (l *logContext) NextIndex() entrydb.EntryIdx { } // SealedBlock returns the block that we are building on top of, and if it is sealed. -func (l *logContext) SealedBlock() (hash types.TruncatedHash, num uint64, ok bool) { +func (l *logContext) SealedBlock() (hash common.Hash, num uint64, ok bool) { if !l.hasCompleteBlock() { - return types.TruncatedHash{}, 0, false + return common.Hash{}, 0, false } return l.blockHash, l.blockNum, true } @@ -111,9 +111,9 @@ func (l *logContext) hasReadableLog() bool { } // InitMessage returns the current initiating message, if any is available. -func (l *logContext) InitMessage() (hash types.TruncatedHash, logIndex uint32, ok bool) { +func (l *logContext) InitMessage() (hash common.Hash, logIndex uint32, ok bool) { if !l.hasReadableLog() { - return types.TruncatedHash{}, 0, false + return common.Hash{}, 0, false } return l.logHash, l.logsSince - 1, true } @@ -150,13 +150,13 @@ func (l *logContext) processEntry(entry entrydb.Entry) error { return err } l.blockNum = current.blockNum - l.blockHash = types.TruncatedHash{} + l.blockHash = common.Hash{} l.logsSince = current.logsSince // TODO this is bumping the logsSince? l.timestamp = current.timestamp l.need.Add(entrydb.FlagCanonicalHash) // Log data after the block we are sealing remains to be seen if l.logsSince == 0 { - l.logHash = types.TruncatedHash{} + l.logHash = common.Hash{} l.execMsg = nil } case entrydb.TypeCanonicalHash: @@ -201,7 +201,7 @@ func (l *logContext) processEntry(entry entrydb.Entry) error { BlockNum: link.blockNum, LogIdx: link.logIdx, Timestamp: link.timestamp, - Hash: types.TruncatedHash{}, // not known yet + Hash: common.Hash{}, // not known yet } l.need.Remove(entrydb.FlagExecutingLink) l.need.Add(entrydb.FlagExecutingCheck) @@ -331,12 +331,12 @@ func (l *logContext) forceBlock(upd eth.BlockID, timestamp uint64) error { if l.nextEntryIndex != 0 { return errors.New("can only bootstrap on top of an empty state") } - l.blockHash = types.TruncateHash(upd.Hash) + l.blockHash = upd.Hash l.blockNum = upd.Number l.timestamp = timestamp l.logsSince = 0 l.execMsg = nil - l.logHash = types.TruncatedHash{} + l.logHash = common.Hash{} l.need = 0 l.out = nil return l.inferFull() // apply to the state as much as possible @@ -350,29 +350,29 @@ func (l *logContext) SealBlock(parent common.Hash, upd eth.BlockID, timestamp ui if err := l.inferFull(); err != nil { // ensure we can start applying return err } - if l.blockHash != types.TruncateHash(parent) { + if l.blockHash != parent { return fmt.Errorf("%w: cannot apply block %s (parent %s) on top of %s", ErrConflict, upd, parent, l.blockHash) } - if l.blockHash != (types.TruncatedHash{}) && l.blockNum+1 != upd.Number { + if l.blockHash != (common.Hash{}) && l.blockNum+1 != upd.Number { return fmt.Errorf("%w: cannot apply block %d on top of %d", ErrConflict, upd.Number, l.blockNum) } if l.timestamp > timestamp { return fmt.Errorf("%w: block timestamp %d must be equal or larger than current timestamp %d", ErrConflict, timestamp, l.timestamp) } } - l.blockHash = types.TruncateHash(upd.Hash) + l.blockHash = upd.Hash l.blockNum = upd.Number l.timestamp = timestamp l.logsSince = 0 l.execMsg = nil - l.logHash = types.TruncatedHash{} + l.logHash = common.Hash{} l.need.Add(entrydb.FlagSearchCheckpoint) return l.inferFull() // apply to the state as much as possible } // ApplyLog applies a log on top of the current state. // The parent-block that the log comes after must be applied with ApplyBlock first. -func (l *logContext) ApplyLog(parentBlock eth.BlockID, logIdx uint32, logHash types.TruncatedHash, execMsg *types.ExecutingMessage) error { +func (l *logContext) ApplyLog(parentBlock eth.BlockID, logIdx uint32, logHash common.Hash, execMsg *types.ExecutingMessage) error { if parentBlock == (eth.BlockID{}) { return fmt.Errorf("genesis does not have logs: %w", ErrLogOutOfOrder) } @@ -387,7 +387,7 @@ func (l *logContext) ApplyLog(parentBlock eth.BlockID, logIdx uint32, logHash ty } } // check parent block - if l.blockHash != types.TruncateHash(parentBlock.Hash) { + if l.blockHash != parentBlock.Hash { return fmt.Errorf("%w: log builds on top of block %s, but have block %s", ErrLogOutOfOrder, parentBlock, l.blockHash) } if l.blockNum != parentBlock.Number { diff --git a/op-supervisor/supervisor/backend/db/safety_checkers.go b/op-supervisor/supervisor/backend/db/safety_checkers.go index 3ed297a60c3e..916f26f6dead 100644 --- a/op-supervisor/supervisor/backend/db/safety_checkers.go +++ b/op-supervisor/supervisor/backend/db/safety_checkers.go @@ -3,10 +3,11 @@ package db import ( "errors" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" - backendTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -21,7 +22,7 @@ const ( type SafetyChecker interface { LocalHeadForChain(chainID types.ChainID) entrydb.EntryIdx CrossHeadForChain(chainID types.ChainID) entrydb.EntryIdx - Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash backendTypes.TruncatedHash) bool + Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) bool Update(chain types.ChainID, index entrydb.EntryIdx) heads.OperationFn Name() string SafetyLevel() types.SafetyLevel @@ -129,7 +130,7 @@ func check( chain types.ChainID, blockNum uint64, logIdx uint32, - logHash backendTypes.TruncatedHash) bool { + logHash common.Hash) bool { // for the Check to be valid, the log must: // exist at the blockNum and logIdx @@ -150,13 +151,13 @@ func check( // Check checks if the log entry is safe, provided a local head for the chain // it passes on the local head this checker is concerned with, along with its view of the database -func (c *unsafeChecker) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash backendTypes.TruncatedHash) bool { +func (c *unsafeChecker) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) bool { return check(c.chainsDB, c.LocalHeadForChain(chain), chain, blockNum, logIdx, logHash) } -func (c *safeChecker) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash backendTypes.TruncatedHash) bool { +func (c *safeChecker) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) bool { return check(c.chainsDB, c.LocalHeadForChain(chain), chain, blockNum, logIdx, logHash) } -func (c *finalizedChecker) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash backendTypes.TruncatedHash) bool { +func (c *finalizedChecker) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) bool { return check(c.chainsDB, c.LocalHeadForChain(chain), chain, blockNum, logIdx, logHash) } diff --git a/op-supervisor/supervisor/backend/db/safety_checkers_test.go b/op-supervisor/supervisor/backend/db/safety_checkers_test.go index 667cd8d46607..c8fb4e34a757 100644 --- a/op-supervisor/supervisor/backend/db/safety_checkers_test.go +++ b/op-supervisor/supervisor/backend/db/safety_checkers_test.go @@ -6,13 +6,13 @@ import ( "github.com/stretchr/testify/require" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" - backendTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -105,7 +105,7 @@ func TestCheck(t *testing.T) { chainID types.ChainID blockNum uint64 logIdx uint32 - loghash backendTypes.TruncatedHash + loghash common.Hash containsResponse containsResponse expected bool }{ @@ -117,7 +117,7 @@ func TestCheck(t *testing.T) { types.ChainIDFromUInt64(1), 1, 1, - backendTypes.TruncatedHash{1, 2, 3}, + common.Hash{1, 2, 3}, containsResponse{entrydb.EntryIdx(6), nil}, true, }, @@ -128,7 +128,7 @@ func TestCheck(t *testing.T) { types.ChainIDFromUInt64(1), 1, 1, - backendTypes.TruncatedHash{1, 2, 3}, + common.Hash{1, 2, 3}, containsResponse{entrydb.EntryIdx(3), nil}, true, }, @@ -139,7 +139,7 @@ func TestCheck(t *testing.T) { types.ChainIDFromUInt64(1), 1, 1, - backendTypes.TruncatedHash{1, 2, 3}, + common.Hash{1, 2, 3}, containsResponse{entrydb.EntryIdx(1), nil}, true, }, @@ -150,7 +150,7 @@ func TestCheck(t *testing.T) { types.ChainIDFromUInt64(1), 1, 1, - backendTypes.TruncatedHash{1, 2, 3}, + common.Hash{1, 2, 3}, containsResponse{entrydb.EntryIdx(1), logs.ErrConflict}, false, }, @@ -161,7 +161,7 @@ func TestCheck(t *testing.T) { types.ChainIDFromUInt64(1), 1, 1, - backendTypes.TruncatedHash{1, 2, 3}, + common.Hash{1, 2, 3}, containsResponse{entrydb.EntryIdx(100), nil}, false, }, @@ -172,7 +172,7 @@ func TestCheck(t *testing.T) { types.ChainIDFromUInt64(1), 1, 1, - backendTypes.TruncatedHash{1, 2, 3}, + common.Hash{1, 2, 3}, containsResponse{entrydb.EntryIdx(5), nil}, false, }, @@ -183,7 +183,7 @@ func TestCheck(t *testing.T) { types.ChainIDFromUInt64(1), 1, 1, - backendTypes.TruncatedHash{1, 2, 3}, + common.Hash{1, 2, 3}, containsResponse{entrydb.EntryIdx(3), nil}, false, }, @@ -194,7 +194,7 @@ func TestCheck(t *testing.T) { types.ChainIDFromUInt64(1), 1, 1, - backendTypes.TruncatedHash{1, 2, 3}, + common.Hash{1, 2, 3}, containsResponse{entrydb.EntryIdx(0), errors.New("error")}, false, }, diff --git a/op-supervisor/supervisor/backend/source/contracts/l2inbox.go b/op-supervisor/supervisor/backend/source/contracts/l2inbox.go index b490b612e6c6..741c6ae3d865 100644 --- a/op-supervisor/supervisor/backend/source/contracts/l2inbox.go +++ b/op-supervisor/supervisor/backend/source/contracts/l2inbox.go @@ -10,7 +10,6 @@ import ( "github.com/ethereum-optimism/optimism/op-service/predeploys" "github.com/ethereum-optimism/optimism/op-service/solabi" "github.com/ethereum-optimism/optimism/op-service/sources/batching" - backendTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/snapshots" "github.com/ethereum/go-ethereum/common" @@ -48,20 +47,20 @@ func NewCrossL2Inbox() *CrossL2Inbox { } } -func (i *CrossL2Inbox) DecodeExecutingMessageLog(l *ethTypes.Log) (backendTypes.ExecutingMessage, error) { +func (i *CrossL2Inbox) DecodeExecutingMessageLog(l *ethTypes.Log) (types.ExecutingMessage, error) { if l.Address != i.contract.Addr() { - return backendTypes.ExecutingMessage{}, fmt.Errorf("%w: log not from CrossL2Inbox", ErrEventNotFound) + return types.ExecutingMessage{}, fmt.Errorf("%w: log not from CrossL2Inbox", ErrEventNotFound) } // use DecodeEvent to check the name of the event // but the actual decoding is done manually to extract the contract identifier name, _, err := i.contract.DecodeEvent(l) if errors.Is(err, batching.ErrUnknownEvent) { - return backendTypes.ExecutingMessage{}, fmt.Errorf("%w: %v", ErrEventNotFound, err.Error()) + return types.ExecutingMessage{}, fmt.Errorf("%w: %v", ErrEventNotFound, err.Error()) } else if err != nil { - return backendTypes.ExecutingMessage{}, fmt.Errorf("failed to decode event: %w", err) + return types.ExecutingMessage{}, fmt.Errorf("failed to decode event: %w", err) } if name != eventExecutingMessage { - return backendTypes.ExecutingMessage{}, fmt.Errorf("%w: event %v not an ExecutingMessage event", ErrEventNotFound, name) + return types.ExecutingMessage{}, fmt.Errorf("%w: event %v not an ExecutingMessage event", ErrEventNotFound, name) } // the second topic is the hash of the payload (the first is the event ID) msgHash := l.Topics[1] @@ -69,14 +68,14 @@ func (i *CrossL2Inbox) DecodeExecutingMessageLog(l *ethTypes.Log) (backendTypes. identifierBytes := bytes.NewReader(l.Data[32:]) identifier, err := identifierFromBytes(identifierBytes) if err != nil { - return backendTypes.ExecutingMessage{}, fmt.Errorf("failed to read contract identifier: %w", err) + return types.ExecutingMessage{}, fmt.Errorf("failed to read contract identifier: %w", err) } chainID, err := types.ChainIDFromBig(identifier.ChainId).ToUInt32() if err != nil { - return backendTypes.ExecutingMessage{}, fmt.Errorf("failed to convert chain ID %v to uint32: %w", identifier.ChainId, err) + return types.ExecutingMessage{}, fmt.Errorf("failed to convert chain ID %v to uint32: %w", identifier.ChainId, err) } hash := payloadHashToLogHash(msgHash, identifier.Origin) - return backendTypes.ExecutingMessage{ + return types.ExecutingMessage{ Chain: chainID, Hash: hash, BlockNum: identifier.BlockNumber.Uint64(), @@ -126,9 +125,9 @@ func identifierFromBytes(identifierBytes io.Reader) (contractIdentifier, error) // to the log the referenced initiating message. // TODO: this function is duplicated between contracts and backend/source/log_processor.go // to avoid a circular dependency. It should be reorganized to avoid this duplication. -func payloadHashToLogHash(payloadHash common.Hash, addr common.Address) backendTypes.TruncatedHash { +func payloadHashToLogHash(payloadHash common.Hash, addr common.Address) common.Hash { msg := make([]byte, 0, 2*common.HashLength) msg = append(msg, addr.Bytes()...) msg = append(msg, payloadHash.Bytes()...) - return backendTypes.TruncateHash(crypto.Keccak256Hash(msg)) + return crypto.Keccak256Hash(msg) } diff --git a/op-supervisor/supervisor/backend/source/contracts/l2inbox_test.go b/op-supervisor/supervisor/backend/source/contracts/l2inbox_test.go index b343519a48fc..302b188e5cdf 100644 --- a/op-supervisor/supervisor/backend/source/contracts/l2inbox_test.go +++ b/op-supervisor/supervisor/backend/source/contracts/l2inbox_test.go @@ -7,7 +7,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/predeploys" "github.com/ethereum-optimism/optimism/op-service/sources/batching" - backendTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/snapshots" "github.com/ethereum/go-ethereum/common" ethTypes "github.com/ethereum/go-ethereum/core/types" @@ -19,7 +19,7 @@ func TestDecodeExecutingMessageEvent(t *testing.T) { inbox := NewCrossL2Inbox() payload := bytes.Repeat([]byte{0xaa, 0xbb}, 50) payloadHash := crypto.Keccak256Hash(payload) - expected := backendTypes.ExecutingMessage{ + expected := types.ExecutingMessage{ Chain: 42424, BlockNum: 12345, LogIdx: 98, diff --git a/op-supervisor/supervisor/backend/source/log_processor.go b/op-supervisor/supervisor/backend/source/log_processor.go index 1a23d149216a..1c20f8c4530a 100644 --- a/op-supervisor/supervisor/backend/source/log_processor.go +++ b/op-supervisor/supervisor/backend/source/log_processor.go @@ -11,26 +11,25 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/source/contracts" - backendTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types" - supTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) type LogStorage interface { - SealBlock(chain supTypes.ChainID, parentHash common.Hash, block eth.BlockID, timestamp uint64) error - AddLog(chain supTypes.ChainID, logHash backendTypes.TruncatedHash, parentBlock eth.BlockID, logIdx uint32, execMsg *backendTypes.ExecutingMessage) error + SealBlock(chain types.ChainID, parentHash common.Hash, block eth.BlockID, timestamp uint64) error + AddLog(chain types.ChainID, logHash common.Hash, parentBlock eth.BlockID, logIdx uint32, execMsg *types.ExecutingMessage) error } type EventDecoder interface { - DecodeExecutingMessageLog(log *ethTypes.Log) (backendTypes.ExecutingMessage, error) + DecodeExecutingMessageLog(log *ethTypes.Log) (types.ExecutingMessage, error) } type logProcessor struct { - chain supTypes.ChainID + chain types.ChainID logStore LogStorage eventDecoder EventDecoder } -func newLogProcessor(chain supTypes.ChainID, logStore LogStorage) *logProcessor { +func newLogProcessor(chain types.ChainID, logStore LogStorage) *logProcessor { return &logProcessor{ chain: chain, logStore: logStore, @@ -45,7 +44,7 @@ func (p *logProcessor) ProcessLogs(_ context.Context, block eth.L1BlockRef, rcpt for _, l := range rcpt.Logs { // log hash represents the hash of *this* log as a potentially initiating message logHash := logToLogHash(l) - var execMsg *backendTypes.ExecutingMessage + var execMsg *types.ExecutingMessage msg, err := p.eventDecoder.DecodeExecutingMessageLog(l) if err != nil && !errors.Is(err, contracts.ErrEventNotFound) { return fmt.Errorf("failed to decode executing message log: %w", err) @@ -72,7 +71,7 @@ func (p *logProcessor) ProcessLogs(_ context.Context, block eth.L1BlockRef, rcpt // which is then hashed again. This is the hash that is stored in the log storage. // The address is hashed into the payload hash to save space in the log storage, // and because they represent paired data. -func logToLogHash(l *ethTypes.Log) backendTypes.TruncatedHash { +func logToLogHash(l *ethTypes.Log) common.Hash { payloadHash := crypto.Keccak256(logToMessagePayload(l)) return payloadHashToLogHash(common.Hash(payloadHash), l.Address) } @@ -94,9 +93,9 @@ func logToMessagePayload(l *ethTypes.Log) []byte { // which is then hashed. This is the hash that is stored in the log storage. // The logHash can then be used to traverse from the executing message // to the log the referenced initiating message. -func payloadHashToLogHash(payloadHash common.Hash, addr common.Address) backendTypes.TruncatedHash { +func payloadHashToLogHash(payloadHash common.Hash, addr common.Address) common.Hash { msg := make([]byte, 0, 2*common.HashLength) msg = append(msg, addr.Bytes()...) msg = append(msg, payloadHash.Bytes()...) - return backendTypes.TruncateHash(crypto.Keccak256Hash(msg)) + return crypto.Keccak256Hash(msg) } diff --git a/op-supervisor/supervisor/backend/source/log_processor_test.go b/op-supervisor/supervisor/backend/source/log_processor_test.go index 01d274aa57ee..bd7aa7abc3d1 100644 --- a/op-supervisor/supervisor/backend/source/log_processor_test.go +++ b/op-supervisor/supervisor/backend/source/log_processor_test.go @@ -7,14 +7,13 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/predeploys" - backendTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types" - supTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum/go-ethereum/common" ethTypes "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/require" ) -var logProcessorChainID = supTypes.ChainIDFromUInt64(4) +var logProcessorChainID = types.ChainIDFromUInt64(4) func TestLogProcessor(t *testing.T) { ctx := context.Background() @@ -108,16 +107,16 @@ func TestLogProcessor(t *testing.T) { }, }, } - execMsg := backendTypes.ExecutingMessage{ + execMsg := types.ExecutingMessage{ Chain: 4, BlockNum: 6, LogIdx: 8, Timestamp: 10, - Hash: backendTypes.TruncatedHash{0xaa}, + Hash: common.Hash{0xaa}, } store := &stubLogStorage{} - processor := newLogProcessor(supTypes.ChainID{4}, store) - processor.eventDecoder = EventDecoderFn(func(l *ethTypes.Log) (backendTypes.ExecutingMessage, error) { + processor := newLogProcessor(types.ChainID{4}, store) + processor.eventDecoder = EventDecoderFn(func(l *ethTypes.Log) (types.ExecutingMessage, error) { require.Equal(t, rcpts[0].Logs[0], l) return execMsg, nil }) @@ -182,7 +181,7 @@ func TestToLogHash(t *testing.T) { refHash := logToLogHash(mkLog()) // The log hash is stored in the database so test that it matches the actual value. // If this changes, compatibility with existing databases may be affected - expectedRefHash := backendTypes.TruncateHash(common.HexToHash("0x4e1dc08fddeb273275f787762cdfe945cf47bb4e80a1fabbc7a825801e81b73f")) + expectedRefHash := common.HexToHash("0x4e1dc08fddeb273275f787762cdfe945cf47bb4e80a1fabbc7a825801e81b73f") require.Equal(t, expectedRefHash, refHash, "reference hash changed, check that database compatibility is not broken") // Check that the hash is changed when any data it should include changes @@ -206,7 +205,7 @@ type stubLogStorage struct { seals []storedSeal } -func (s *stubLogStorage) SealBlock(chainID supTypes.ChainID, parentHash common.Hash, block eth.BlockID, timestamp uint64) error { +func (s *stubLogStorage) SealBlock(chainID types.ChainID, parentHash common.Hash, block eth.BlockID, timestamp uint64) error { if logProcessorChainID != chainID { return fmt.Errorf("chain id mismatch, expected %v but got %v", logProcessorChainID, chainID) } @@ -218,7 +217,7 @@ func (s *stubLogStorage) SealBlock(chainID supTypes.ChainID, parentHash common.H return nil } -func (s *stubLogStorage) AddLog(chainID supTypes.ChainID, logHash backendTypes.TruncatedHash, parentBlock eth.BlockID, logIdx uint32, execMsg *backendTypes.ExecutingMessage) error { +func (s *stubLogStorage) AddLog(chainID types.ChainID, logHash common.Hash, parentBlock eth.BlockID, logIdx uint32, execMsg *types.ExecutingMessage) error { if logProcessorChainID != chainID { return fmt.Errorf("chain id mismatch, expected %v but got %v", logProcessorChainID, chainID) } @@ -240,12 +239,12 @@ type storedSeal struct { type storedLog struct { parent eth.BlockID logIdx uint32 - logHash backendTypes.TruncatedHash - execMsg *backendTypes.ExecutingMessage + logHash common.Hash + execMsg *types.ExecutingMessage } -type EventDecoderFn func(*ethTypes.Log) (backendTypes.ExecutingMessage, error) +type EventDecoderFn func(*ethTypes.Log) (types.ExecutingMessage, error) -func (f EventDecoderFn) DecodeExecutingMessageLog(log *ethTypes.Log) (backendTypes.ExecutingMessage, error) { +func (f EventDecoderFn) DecodeExecutingMessageLog(log *ethTypes.Log) (types.ExecutingMessage, error) { return f(log) } diff --git a/op-supervisor/supervisor/backend/types/types.go b/op-supervisor/supervisor/backend/types/types.go deleted file mode 100644 index cf28120a34ee..000000000000 --- a/op-supervisor/supervisor/backend/types/types.go +++ /dev/null @@ -1,27 +0,0 @@ -package types - -import ( - "encoding/hex" - - "github.com/ethereum/go-ethereum/common" -) - -type TruncatedHash [20]byte - -func TruncateHash(hash common.Hash) TruncatedHash { - var truncated TruncatedHash - copy(truncated[:], hash[0:20]) - return truncated -} - -func (h TruncatedHash) String() string { - return hex.EncodeToString(h[:]) -} - -type ExecutingMessage struct { - Chain uint32 - BlockNum uint64 - LogIdx uint32 - Timestamp uint64 - Hash TruncatedHash -} diff --git a/op-supervisor/supervisor/types/types.go b/op-supervisor/supervisor/types/types.go index 54b1a8a0c92c..b035e26abcef 100644 --- a/op-supervisor/supervisor/types/types.go +++ b/op-supervisor/supervisor/types/types.go @@ -13,6 +13,14 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" ) +type ExecutingMessage struct { + Chain uint32 // same as ChainID for now, but will be indirect, i.e. translated to full ID, later + BlockNum uint64 + LogIdx uint32 + Timestamp uint64 + Hash common.Hash +} + type Message struct { Identifier Identifier `json:"identifier"` PayloadHash common.Hash `json:"payloadHash"`