diff --git a/.mockery.yaml b/.mockery.yaml index 2d68074111..31883ab545 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -58,3 +58,15 @@ packages: dir: ./block/internal/syncing pkgname: syncing filename: syncer_mock.go + github.com/evstack/ev-node/block/internal/common: + interfaces: + Broadcaster: + config: + dir: ./block/internal/common + pkgname: common + filename: broadcaster_mock.go + p2pHandler: + config: + dir: ./block/internal/syncing + pkgname: syncing + filename: syncer_mock.go diff --git a/block/components.go b/block/components.go index a157817703..d1c0084da2 100644 --- a/block/components.go +++ b/block/components.go @@ -5,10 +5,10 @@ import ( "errors" "fmt" - goheader "github.com/celestiaorg/go-header" "github.com/rs/zerolog" "github.com/evstack/ev-node/block/internal/cache" + "github.com/evstack/ev-node/block/internal/common" "github.com/evstack/ev-node/block/internal/executing" "github.com/evstack/ev-node/block/internal/reaping" "github.com/evstack/ev-node/block/internal/submitting" @@ -122,11 +122,6 @@ func (bc *Components) Stop() error { return errs } -// broadcaster interface for P2P broadcasting -type broadcaster[T any] interface { - WriteToStoreAndBroadcast(ctx context.Context, payload T) error -} - // NewSyncComponents creates components for a non-aggregator full node that can only sync blocks. // Non-aggregator full nodes can sync from P2P and DA but cannot produce blocks or submit to DA. // They have more sync capabilities than light nodes but no block production. No signer required. @@ -136,8 +131,8 @@ func NewSyncComponents( store store.Store, exec coreexecutor.Executor, da coreda.DA, - headerStore goheader.Store[*types.SignedHeader], - dataStore goheader.Store[*types.Data], + headerStore common.Broadcaster[*types.SignedHeader], + dataStore common.Broadcaster[*types.Data], logger zerolog.Logger, metrics *Metrics, blockOpts BlockOptions, @@ -199,8 +194,8 @@ func NewAggregatorComponents( sequencer coresequencer.Sequencer, da coreda.DA, signer signer.Signer, - headerBroadcaster broadcaster[*types.SignedHeader], - dataBroadcaster broadcaster[*types.Data], + headerBroadcaster common.Broadcaster[*types.SignedHeader], + dataBroadcaster common.Broadcaster[*types.Data], logger zerolog.Logger, metrics *Metrics, blockOpts BlockOptions, diff --git a/block/internal/common/broadcaster_mock.go b/block/internal/common/broadcaster_mock.go new file mode 100644 index 0000000000..2cd41fd81d --- /dev/null +++ b/block/internal/common/broadcaster_mock.go @@ -0,0 +1,142 @@ +// Code generated by mockery; DO NOT EDIT. +// github.com/vektra/mockery +// template: testify + +package common + +import ( + "context" + + "github.com/celestiaorg/go-header" + mock "github.com/stretchr/testify/mock" +) + +// NewMockBroadcaster creates a new instance of MockBroadcaster. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockBroadcaster[H header.Header[H]](t interface { + mock.TestingT + Cleanup(func()) +}) *MockBroadcaster[H] { + mock := &MockBroadcaster[H]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// MockBroadcaster is an autogenerated mock type for the Broadcaster type +type MockBroadcaster[H header.Header[H]] struct { + mock.Mock +} + +type MockBroadcaster_Expecter[H header.Header[H]] struct { + mock *mock.Mock +} + +func (_m *MockBroadcaster[H]) EXPECT() *MockBroadcaster_Expecter[H] { + return &MockBroadcaster_Expecter[H]{mock: &_m.Mock} +} + +// Store provides a mock function for the type MockBroadcaster +func (_mock *MockBroadcaster[H]) Store() header.Store[H] { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for Store") + } + + var r0 header.Store[H] + if returnFunc, ok := ret.Get(0).(func() header.Store[H]); ok { + r0 = returnFunc() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(header.Store[H]) + } + } + return r0 +} + +// MockBroadcaster_Store_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Store' +type MockBroadcaster_Store_Call[H header.Header[H]] struct { + *mock.Call +} + +// Store is a helper method to define mock.On call +func (_e *MockBroadcaster_Expecter[H]) Store() *MockBroadcaster_Store_Call[H] { + return &MockBroadcaster_Store_Call[H]{Call: _e.mock.On("Store")} +} + +func (_c *MockBroadcaster_Store_Call[H]) Run(run func()) *MockBroadcaster_Store_Call[H] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockBroadcaster_Store_Call[H]) Return(store header.Store[H]) *MockBroadcaster_Store_Call[H] { + _c.Call.Return(store) + return _c +} + +func (_c *MockBroadcaster_Store_Call[H]) RunAndReturn(run func() header.Store[H]) *MockBroadcaster_Store_Call[H] { + _c.Call.Return(run) + return _c +} + +// WriteToStoreAndBroadcast provides a mock function for the type MockBroadcaster +func (_mock *MockBroadcaster[H]) WriteToStoreAndBroadcast(ctx context.Context, payload H) error { + ret := _mock.Called(ctx, payload) + + if len(ret) == 0 { + panic("no return value specified for WriteToStoreAndBroadcast") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(context.Context, H) error); ok { + r0 = returnFunc(ctx, payload) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockBroadcaster_WriteToStoreAndBroadcast_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WriteToStoreAndBroadcast' +type MockBroadcaster_WriteToStoreAndBroadcast_Call[H header.Header[H]] struct { + *mock.Call +} + +// WriteToStoreAndBroadcast is a helper method to define mock.On call +// - ctx context.Context +// - payload H +func (_e *MockBroadcaster_Expecter[H]) WriteToStoreAndBroadcast(ctx interface{}, payload interface{}) *MockBroadcaster_WriteToStoreAndBroadcast_Call[H] { + return &MockBroadcaster_WriteToStoreAndBroadcast_Call[H]{Call: _e.mock.On("WriteToStoreAndBroadcast", ctx, payload)} +} + +func (_c *MockBroadcaster_WriteToStoreAndBroadcast_Call[H]) Run(run func(ctx context.Context, payload H)) *MockBroadcaster_WriteToStoreAndBroadcast_Call[H] { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 H + if args[1] != nil { + arg1 = args[1].(H) + } + run( + arg0, + arg1, + ) + }) + return _c +} + +func (_c *MockBroadcaster_WriteToStoreAndBroadcast_Call[H]) Return(err error) *MockBroadcaster_WriteToStoreAndBroadcast_Call[H] { + _c.Call.Return(err) + return _c +} + +func (_c *MockBroadcaster_WriteToStoreAndBroadcast_Call[H]) RunAndReturn(run func(ctx context.Context, payload H) error) *MockBroadcaster_WriteToStoreAndBroadcast_Call[H] { + _c.Call.Return(run) + return _c +} diff --git a/block/internal/common/event.go b/block/internal/common/event.go index 227b1ed391..69d0300f9f 100644 --- a/block/internal/common/event.go +++ b/block/internal/common/event.go @@ -2,10 +2,22 @@ package common import "github.com/evstack/ev-node/types" +// EventSource represents the origin of a block event +type EventSource string + +const ( + // SourceDA indicates the event came from the DA layer + SourceDA EventSource = "DA" + // SourceP2P indicates the event came from P2P network + SourceP2P EventSource = "P2P" +) + // DAHeightEvent represents a DA event for caching type DAHeightEvent struct { Header *types.SignedHeader Data *types.Data // DaHeight corresponds to the highest DA included height between the Header and Data. DaHeight uint64 + // Source indicates where this event originated from (DA or P2P) + Source EventSource } diff --git a/block/internal/common/expected_interfaces.go b/block/internal/common/expected_interfaces.go new file mode 100644 index 0000000000..eaeebda438 --- /dev/null +++ b/block/internal/common/expected_interfaces.go @@ -0,0 +1,13 @@ +package common + +import ( + "context" + + "github.com/celestiaorg/go-header" +) + +// broadcaster interface for P2P broadcasting +type Broadcaster[H header.Header[H]] interface { + WriteToStoreAndBroadcast(ctx context.Context, payload H) error + Store() header.Store[H] +} diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index 17e393eeb7..00259b7050 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -24,11 +24,6 @@ import ( "github.com/evstack/ev-node/types" ) -// broadcaster interface for P2P broadcasting -type broadcaster[T any] interface { - WriteToStoreAndBroadcast(ctx context.Context, payload T) error -} - // Executor handles block production, transaction processing, and state management type Executor struct { // Core components @@ -42,8 +37,8 @@ type Executor struct { metrics *common.Metrics // Broadcasting - headerBroadcaster broadcaster[*types.SignedHeader] - dataBroadcaster broadcaster[*types.Data] + headerBroadcaster common.Broadcaster[*types.SignedHeader] + dataBroadcaster common.Broadcaster[*types.Data] // Configuration config config.Config @@ -81,8 +76,8 @@ func NewExecutor( metrics *common.Metrics, config config.Config, genesis genesis.Genesis, - headerBroadcaster broadcaster[*types.SignedHeader], - dataBroadcaster broadcaster[*types.Data], + headerBroadcaster common.Broadcaster[*types.SignedHeader], + dataBroadcaster common.Broadcaster[*types.Data], logger zerolog.Logger, options common.BlockOptions, errorCh chan<- error, diff --git a/block/internal/executing/executor_lazy_test.go b/block/internal/executing/executor_lazy_test.go index 3609f10c55..b72f0a856b 100644 --- a/block/internal/executing/executor_lazy_test.go +++ b/block/internal/executing/executor_lazy_test.go @@ -47,8 +47,10 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - hb := &mockBroadcaster[*types.SignedHeader]{} - db := &mockBroadcaster[*types.Data]{} + hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() + db := common.NewMockBroadcaster[*types.Data](t) + db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec, err := NewExecutor( memStore, @@ -155,8 +157,10 @@ func TestRegularMode_ProduceBlockLogic(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - hb := &mockBroadcaster[*types.SignedHeader]{} - db := &mockBroadcaster[*types.Data]{} + hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() + db := common.NewMockBroadcaster[*types.Data](t) + db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec, err := NewExecutor( memStore, diff --git a/block/internal/executing/executor_logic_test.go b/block/internal/executing/executor_logic_test.go index 2ddd80996c..da96f59276 100644 --- a/block/internal/executing/executor_logic_test.go +++ b/block/internal/executing/executor_logic_test.go @@ -67,9 +67,11 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - // Broadcasters are required by produceBlock; use simple mocks - hb := &mockBroadcaster[*types.SignedHeader]{} - db := &mockBroadcaster[*types.Data]{} + // Broadcasters are required by produceBlock; use generated mocks + hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() + db := common.NewMockBroadcaster[*types.Data](t) + db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec, err := NewExecutor( memStore, @@ -126,8 +128,7 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { assert.EqualValues(t, common.DataHashForEmptyTxs, sh.DataHash) // Broadcasters should have been called with the produced header and data - assert.True(t, hb.called) - assert.True(t, db.called) + // The testify mock framework tracks calls automatically } func TestPendingLimit_SkipsProduction(t *testing.T) { @@ -154,8 +155,10 @@ func TestPendingLimit_SkipsProduction(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - hb := &mockBroadcaster[*types.SignedHeader]{} - db := &mockBroadcaster[*types.Data]{} + hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() + db := common.NewMockBroadcaster[*types.Data](t) + db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec, err := NewExecutor( memStore, diff --git a/block/internal/executing/executor_restart_test.go b/block/internal/executing/executor_restart_test.go index f4b300df4b..88b3618e6c 100644 --- a/block/internal/executing/executor_restart_test.go +++ b/block/internal/executing/executor_restart_test.go @@ -47,8 +47,10 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { // Create first executor instance mockExec1 := testmocks.NewMockExecutor(t) mockSeq1 := testmocks.NewMockSequencer(t) - hb1 := &mockBroadcaster[*types.SignedHeader]{} - db1 := &mockBroadcaster[*types.Data]{} + hb1 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() + db1 := common.NewMockBroadcaster[*types.Data](t) + db1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec1, err := NewExecutor( memStore, @@ -165,8 +167,10 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { // Create second executor instance (restart scenario) mockExec2 := testmocks.NewMockExecutor(t) mockSeq2 := testmocks.NewMockSequencer(t) - hb2 := &mockBroadcaster[*types.SignedHeader]{} - db2 := &mockBroadcaster[*types.Data]{} + hb2 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() + db2 := common.NewMockBroadcaster[*types.Data](t) + db2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec2, err := NewExecutor( memStore, // same store @@ -225,8 +229,7 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { assert.Equal(t, pendingData.DACommitment(), finalHeader.DataHash) // Verify broadcasters were called with the pending block data - assert.True(t, hb2.called, "header broadcaster should be called") - assert.True(t, db2.called, "data broadcaster should be called") + // The testify mock framework tracks calls automatically // Verify the executor state was updated correctly finalState := exec2.getLastState() @@ -262,8 +265,10 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { // Create first executor and produce one block mockExec1 := testmocks.NewMockExecutor(t) mockSeq1 := testmocks.NewMockSequencer(t) - hb1 := &mockBroadcaster[*types.SignedHeader]{} - db1 := &mockBroadcaster[*types.Data]{} + hb1 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() + db1 := common.NewMockBroadcaster[*types.Data](t) + db1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec1, err := NewExecutor( memStore, @@ -312,8 +317,10 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { // Create second executor (restart) mockExec2 := testmocks.NewMockExecutor(t) mockSeq2 := testmocks.NewMockSequencer(t) - hb2 := &mockBroadcaster[*types.SignedHeader]{} - db2 := &mockBroadcaster[*types.Data]{} + hb2 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() + db2 := common.NewMockBroadcaster[*types.Data](t) + db2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec2, err := NewExecutor( memStore, diff --git a/block/internal/executing/executor_test.go b/block/internal/executing/executor_test.go index b5f9e2f47a..e310c6d40d 100644 --- a/block/internal/executing/executor_test.go +++ b/block/internal/executing/executor_test.go @@ -19,18 +19,6 @@ import ( "github.com/evstack/ev-node/types" ) -// mockBroadcaster for testing -type mockBroadcaster[T any] struct { - called bool - payload T -} - -func (m *mockBroadcaster[T]) WriteToStoreAndBroadcast(ctx context.Context, payload T) error { - m.called = true - m.payload = payload - return nil -} - func TestExecutor_BroadcasterIntegration(t *testing.T) { // Create in-memory store ds := sync.MutexWrap(datastore.NewMapDatastore()) @@ -52,8 +40,8 @@ func TestExecutor_BroadcasterIntegration(t *testing.T) { } // Create mock broadcasters - headerBroadcaster := &mockBroadcaster[*types.SignedHeader]{} - dataBroadcaster := &mockBroadcaster[*types.Data]{} + headerBroadcaster := common.NewMockBroadcaster[*types.SignedHeader](t) + dataBroadcaster := common.NewMockBroadcaster[*types.Data](t) // Create executor with broadcasters executor, err := NewExecutor( @@ -137,9 +125,9 @@ func TestExecutor_BroadcastFlow(t *testing.T) { // This test demonstrates how the broadcast flow works // when an Executor produces a block - // Create mock broadcasters that track calls - headerBroadcaster := &mockBroadcaster[*types.SignedHeader]{} - dataBroadcaster := &mockBroadcaster[*types.Data]{} + // Create mock broadcasters + headerBroadcaster := common.NewMockBroadcaster[*types.SignedHeader](t) + dataBroadcaster := common.NewMockBroadcaster[*types.Data](t) // Create sample data that would be broadcast sampleHeader := &types.SignedHeader{ @@ -164,16 +152,16 @@ func TestExecutor_BroadcastFlow(t *testing.T) { // Test broadcast calls ctx := context.Background() + // Set up expectations + headerBroadcaster.EXPECT().WriteToStoreAndBroadcast(ctx, sampleHeader).Return(nil).Once() + dataBroadcaster.EXPECT().WriteToStoreAndBroadcast(ctx, sampleData).Return(nil).Once() + // Simulate what happens in produceBlock() after block creation err := headerBroadcaster.WriteToStoreAndBroadcast(ctx, sampleHeader) require.NoError(t, err) - assert.True(t, headerBroadcaster.called, "header broadcaster should be called") err = dataBroadcaster.WriteToStoreAndBroadcast(ctx, sampleData) require.NoError(t, err) - assert.True(t, dataBroadcaster.called, "data broadcaster should be called") - // Verify the correct data was passed to broadcasters - assert.Equal(t, sampleHeader, headerBroadcaster.payload) - assert.Equal(t, sampleData, dataBroadcaster.payload) + // Verify expectations were met (automatically checked by testify mock on cleanup) } diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index 40248019a4..dcb7f2047b 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -207,6 +207,7 @@ func (r *DARetriever) processBlobs(ctx context.Context, blobs [][]byte, daHeight Header: header, Data: data, DaHeight: daHeight, + Source: common.SourceDA, } events = append(events, event) diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index cfd72dc40e..eedbc97bb4 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -3,7 +3,9 @@ package syncing import ( "bytes" "context" + "errors" "fmt" + "time" goheader "github.com/celestiaorg/go-header" "github.com/rs/zerolog" @@ -53,8 +55,18 @@ func (h *P2PHandler) ProcessHeaderRange(ctx context.Context, startHeight, endHei default: } - header, err := h.headerStore.GetByHeight(ctx, height) + // Create a timeout context for each GetByHeight call to prevent blocking + timeoutCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond) + header, err := h.headerStore.GetByHeight(timeoutCtx, height) + cancel() + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + h.logger.Debug().Uint64("height", height).Msg("timeout waiting for header from store, will retry later") + // Don't continue processing further heights if we timeout on one + // This prevents blocking on sequential heights + return + } h.logger.Debug().Uint64("height", height).Err(err).Msg("failed to get header from store") continue } @@ -65,20 +77,23 @@ func (h *P2PHandler) ProcessHeaderRange(ctx context.Context, startHeight, endHei continue } - // Get corresponding data + // Get corresponding data (empty data are still broadcasted by peers) var data *types.Data - if bytes.Equal(header.DataHash, common.DataHashForEmptyTxs) { - // Create empty data for headers with empty data hash - data = createEmptyDataForHeader(ctx, header) - } else { - // Try to get data from data store - retrievedData, err := h.dataStore.GetByHeight(ctx, height) - if err != nil { - h.logger.Debug().Uint64("height", height).Err(err).Msg("could not retrieve data for header from data store") + timeoutCtx, cancel = context.WithTimeout(ctx, 500*time.Millisecond) + retrievedData, err := h.dataStore.GetByHeight(timeoutCtx, height) + cancel() + + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + h.logger.Debug().Uint64("height", height).Msg("timeout waiting for data from store, will retry later") + // Don't continue processing if data is not available + // Store event with header only for later processing continue } - data = retrievedData + h.logger.Debug().Uint64("height", height).Err(err).Msg("could not retrieve data for header from data store") + continue } + data = retrievedData // further header validation (signature) is done in validateBlock. // we need to be sure that the previous block n-1 was executed before validating block n @@ -88,6 +103,7 @@ func (h *P2PHandler) ProcessHeaderRange(ctx context.Context, startHeight, endHei Header: header, Data: data, DaHeight: 0, // P2P events don't have DA height context + Source: common.SourceP2P, } select { @@ -113,15 +129,33 @@ func (h *P2PHandler) ProcessDataRange(ctx context.Context, startHeight, endHeigh default: } - data, err := h.dataStore.GetByHeight(ctx, height) + // Create a timeout context for each GetByHeight call to prevent blocking + timeoutCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond) + data, err := h.dataStore.GetByHeight(timeoutCtx, height) + cancel() + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + h.logger.Debug().Uint64("height", height).Msg("timeout waiting for data from store, will retry later") + // Don't continue processing further heights if we timeout on one + // This prevents blocking on sequential heights + return + } h.logger.Debug().Uint64("height", height).Err(err).Msg("failed to get data from store") continue } - // Get corresponding header - header, err := h.headerStore.GetByHeight(ctx, height) + // Get corresponding header with timeout + timeoutCtx, cancel = context.WithTimeout(ctx, 500*time.Millisecond) + header, err := h.headerStore.GetByHeight(timeoutCtx, height) + cancel() + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + h.logger.Debug().Uint64("height", height).Msg("timeout waiting for header from store, will retry later") + // Don't continue processing if header is not available + continue + } h.logger.Debug().Uint64("height", height).Err(err).Msg("could not retrieve header for data from header store") continue } @@ -140,6 +174,7 @@ func (h *P2PHandler) ProcessDataRange(ctx context.Context, startHeight, endHeigh Header: header, Data: data, DaHeight: 0, // P2P events don't have DA height context + Source: common.SourceP2P, } select { diff --git a/block/internal/syncing/p2p_handler_test.go b/block/internal/syncing/p2p_handler_test.go index 090ef4a7ee..c6d9ef6406 100644 --- a/block/internal/syncing/p2p_handler_test.go +++ b/block/internal/syncing/p2p_handler_test.go @@ -222,22 +222,6 @@ func TestP2PHandler_ProposerMismatch_Rejected(t *testing.T) { require.Len(t, events, 0) } -func TestP2PHandler_CreateEmptyDataForHeader(t *testing.T) { - p2pData := setupP2P(t) - ctx := context.Background() - - // Prepare a header at height 2 (previous height exists but will return error) - signedHeader := p2pMakeSignedHeader(t, p2pData.Genesis.ChainID, 2, p2pData.ProposerAddr, p2pData.ProposerPub, p2pData.Signer) - signedHeader.DataHash = common.DataHashForEmptyTxs - - emptyData := createEmptyDataForHeader(ctx, signedHeader) - require.NotNil(t, emptyData, "handler should synthesize empty data even when previous data is unavailable") - require.Equal(t, p2pData.Genesis.ChainID, emptyData.ChainID(), "synthesized data should carry header chain ID") - require.Equal(t, uint64(2), emptyData.Height(), "synthesized data should carry header height") - require.Equal(t, signedHeader.BaseHeader.Time, emptyData.Metadata.Time, "synthesized data should carry header time") - require.Equal(t, (types.Hash)(nil), emptyData.LastDataHash) -} - func TestP2PHandler_ProcessHeaderRange_MultipleHeightsHappyPath(t *testing.T) { p2pData := setupP2P(t) ctx := context.Background() diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index d1dc7253a5..0a9443c319 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -9,8 +9,8 @@ import ( "sync/atomic" "time" - goheader "github.com/celestiaorg/go-header" "github.com/rs/zerolog" + "golang.org/x/sync/errgroup" "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" @@ -54,8 +54,8 @@ type Syncer struct { daHeight *atomic.Uint64 // P2P stores - headerStore goheader.Store[*types.SignedHeader] - dataStore goheader.Store[*types.Data] + headerStore common.Broadcaster[*types.SignedHeader] + dataStore common.Broadcaster[*types.Data] // Channels for coordination heightInCh chan common.DAHeightEvent @@ -83,8 +83,8 @@ func NewSyncer( metrics *common.Metrics, config config.Config, genesis genesis.Genesis, - headerStore goheader.Store[*types.SignedHeader], - dataStore goheader.Store[*types.Data], + headerStore common.Broadcaster[*types.SignedHeader], + dataStore common.Broadcaster[*types.Data], logger zerolog.Logger, options common.BlockOptions, errorCh chan<- error, @@ -119,7 +119,7 @@ func (s *Syncer) Start(ctx context.Context) error { // Initialize handlers s.daRetriever = NewDARetriever(s.da, s.cache, s.config, s.genesis, s.logger) - s.p2pHandler = NewP2PHandler(s.headerStore, s.dataStore, s.cache, s.genesis, s.logger) + s.p2pHandler = NewP2PHandler(s.headerStore.Store(), s.dataStore.Store(), s.cache, s.genesis, s.logger) // Start main processing loop s.wg.Add(1) @@ -323,13 +323,13 @@ func (s *Syncer) tryFetchFromP2P() { } // Process headers - newHeaderHeight := s.headerStore.Height() + newHeaderHeight := s.headerStore.Store().Height() if newHeaderHeight > currentHeight { s.p2pHandler.ProcessHeaderRange(s.ctx, currentHeight+1, newHeaderHeight, s.heightInCh) } // Process data (if not already processed by headers) - newDataHeight := s.dataStore.Height() + newDataHeight := s.dataStore.Store().Height() if newDataHeight != newHeaderHeight && newDataHeight > currentHeight { s.p2pHandler.ProcessDataRange(s.ctx, currentHeight+1, newDataHeight, s.heightInCh) } @@ -365,8 +365,9 @@ func (s *Syncer) processHeightEvent(event *common.DAHeightEvent) { return } - // LastDataHash must be gotten from store when the data hash is empty. - if bytes.Equal(event.Header.DataHash, common.DataHashForEmptyTxs) && currentHeight > 0 { + // Last data must be got from store if the event comes from DA and the data hash is empty. + // When if the event comes from P2P, the sequencer and then all the full nodes contains the data. + if event.Source == common.SourceDA && bytes.Equal(event.Header.DataHash, common.DataHashForEmptyTxs) && currentHeight > 0 { _, lastData, err := s.store.GetBlockData(s.ctx, currentHeight) if err != nil { s.logger.Error().Err(err).Msg("failed to get last data") @@ -384,6 +385,16 @@ func (s *Syncer) processHeightEvent(event *common.DAHeightEvent) { } return } + + // only save to p2p stores if the event came from DA + if event.Source == common.SourceDA { + g, ctx := errgroup.WithContext(s.ctx) + g.Go(func() error { return s.headerStore.WriteToStoreAndBroadcast(ctx, event.Header) }) + g.Go(func() error { return s.dataStore.WriteToStoreAndBroadcast(ctx, event.Data) }) + if err := g.Wait(); err != nil { + s.logger.Error().Err(err).Msg("failed to append event header and/or data to p2p store") + } + } } // errInvalidBlock is returned when a block is failing validation @@ -578,6 +589,7 @@ func (s *Syncer) processPendingEvents() { Header: event.Header, Data: event.Data, DaHeight: event.DaHeight, + Source: event.Source, } select { diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index 2dc2bd804f..7a9e80dbbd 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -20,7 +20,7 @@ import ( "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/store" - mocks "github.com/evstack/ev-node/test/mocks/external" + extmocks "github.com/evstack/ev-node/test/mocks/external" "github.com/evstack/ev-node/types" ) @@ -74,12 +74,19 @@ func TestSyncer_BackoffOnDAError(t *testing.T) { syncer.daRetriever = daRetriever syncer.p2pHandler = p2pHandler - headerStore := mocks.NewMockStore[*types.SignedHeader](t) - headerStore.On("Height").Return(uint64(0)).Maybe() + // Create mock stores for P2P + mockHeaderStore := extmocks.NewMockStore[*types.SignedHeader](t) + mockHeaderStore.EXPECT().Height().Return(uint64(0)).Maybe() + + mockDataStore := extmocks.NewMockStore[*types.Data](t) + mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() + + headerStore := common.NewMockBroadcaster[*types.SignedHeader](t) + headerStore.EXPECT().Store().Return(mockHeaderStore).Maybe() syncer.headerStore = headerStore - dataStore := mocks.NewMockStore[*types.Data](t) - dataStore.On("Height").Return(uint64(0)).Maybe() + dataStore := common.NewMockBroadcaster[*types.Data](t) + dataStore.EXPECT().Store().Return(mockDataStore).Maybe() syncer.dataStore = dataStore var callTimes []time.Time @@ -140,7 +147,7 @@ func TestSyncer_BackoffOnDAError(t *testing.T) { assert.GreaterOrEqual(t, callCount, 2, "should continue without significant delay") if len(callTimes) >= 2 { timeBetweenCalls := callTimes[1].Sub(callTimes[0]) - assert.Less(t, timeBetweenCalls, 100*time.Millisecond, + assert.Less(t, timeBetweenCalls, 120*time.Millisecond, "should not have backoff delay for ErrBlobNotFound") } } @@ -165,12 +172,19 @@ func TestSyncer_BackoffResetOnSuccess(t *testing.T) { syncer.daRetriever = daRetriever syncer.p2pHandler = p2pHandler - headerStore := mocks.NewMockStore[*types.SignedHeader](t) - headerStore.On("Height").Return(uint64(0)).Maybe() + // Create mock stores for P2P + mockHeaderStore := extmocks.NewMockStore[*types.SignedHeader](t) + mockHeaderStore.EXPECT().Height().Return(uint64(0)).Maybe() + + mockDataStore := extmocks.NewMockStore[*types.Data](t) + mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() + + headerStore := common.NewMockBroadcaster[*types.SignedHeader](t) + headerStore.EXPECT().Store().Return(mockHeaderStore).Maybe() syncer.headerStore = headerStore - dataStore := mocks.NewMockStore[*types.Data](t) - dataStore.On("Height").Return(uint64(0)).Maybe() + dataStore := common.NewMockBroadcaster[*types.Data](t) + dataStore.EXPECT().Store().Return(mockDataStore).Maybe() syncer.dataStore = dataStore var callTimes []time.Time @@ -251,12 +265,19 @@ func TestSyncer_BackoffBehaviorIntegration(t *testing.T) { syncer.daRetriever = daRetriever syncer.p2pHandler = p2pHandler - headerStore := mocks.NewMockStore[*types.SignedHeader](t) - headerStore.On("Height").Return(uint64(0)).Maybe() + // Create mock stores for P2P + mockHeaderStore := extmocks.NewMockStore[*types.SignedHeader](t) + mockHeaderStore.EXPECT().Height().Return(uint64(0)).Maybe() + + mockDataStore := extmocks.NewMockStore[*types.Data](t) + mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() + + headerStore := common.NewMockBroadcaster[*types.SignedHeader](t) + headerStore.EXPECT().Store().Return(mockHeaderStore).Maybe() syncer.headerStore = headerStore - dataStore := mocks.NewMockStore[*types.Data](t) - dataStore.On("Height").Return(uint64(0)).Maybe() + dataStore := common.NewMockBroadcaster[*types.Data](t) + dataStore.EXPECT().Store().Return(mockDataStore).Maybe() syncer.dataStore = dataStore var callTimes []time.Time @@ -335,8 +356,8 @@ func setupTestSyncer(t *testing.T, daBlockTime time.Duration) *Syncer { common.NopMetrics(), cfg, gen, - nil, - nil, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), diff --git a/block/internal/syncing/syncer_benchmark_test.go b/block/internal/syncing/syncer_benchmark_test.go index 28cf2af22d..8c9cfea362 100644 --- a/block/internal/syncing/syncer_benchmark_test.go +++ b/block/internal/syncing/syncer_benchmark_test.go @@ -12,7 +12,6 @@ import ( "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/store" testmocks "github.com/evstack/ev-node/test/mocks" - mocks "github.com/evstack/ev-node/test/mocks/external" "github.com/evstack/ev-node/types" "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" @@ -150,11 +149,9 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay // Attach mocks s.daRetriever = daR s.p2pHandler = newMockp2pHandler(b) // not used directly in this benchmark path - headerP2PStore := mocks.NewMockStore[*types.SignedHeader](b) - headerP2PStore.On("Height").Return(uint64(0)).Maybe() + headerP2PStore := common.NewMockBroadcaster[*types.SignedHeader](b) s.headerStore = headerP2PStore - dataP2PStore := mocks.NewMockStore[*types.Data](b) - dataP2PStore.On("Height").Return(uint64(0)).Maybe() + dataP2PStore := common.NewMockBroadcaster[*types.Data](b) s.dataStore = dataP2PStore return &benchFixture{s: s, st: st, cm: cm, cancel: cancel} } diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 3e4fe3af62..65ecfb674c 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -13,7 +13,6 @@ import ( signerpkg "github.com/evstack/ev-node/pkg/signer" "github.com/evstack/ev-node/pkg/signer/noop" testmocks "github.com/evstack/ev-node/test/mocks" - mocks "github.com/evstack/ev-node/test/mocks/external" "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" "github.com/libp2p/go-libp2p/core/crypto" @@ -26,6 +25,7 @@ import ( "github.com/evstack/ev-node/block/internal/common" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/store" + extmocks "github.com/evstack/ev-node/test/mocks/external" "github.com/evstack/ev-node/types" ) @@ -108,8 +108,8 @@ func TestSyncer_validateBlock_DataHashMismatch(t *testing.T) { common.NopMetrics(), cfg, gen, - &mocks.MockStore[*types.SignedHeader]{}, - &mocks.MockStore[*types.Data]{}, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -156,8 +156,8 @@ func TestProcessHeightEvent_SyncsAndUpdatesState(t *testing.T) { common.NopMetrics(), cfg, gen, - &mocks.MockStore[*types.SignedHeader]{}, - &mocks.MockStore[*types.Data]{}, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -206,8 +206,8 @@ func TestSequentialBlockSync(t *testing.T) { common.NopMetrics(), cfg, gen, - &mocks.MockStore[*types.SignedHeader]{}, - &mocks.MockStore[*types.Data]{}, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -327,10 +327,19 @@ func TestSyncLoopPersistState(t *testing.T) { gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, DAStartHeight: myDAHeightOffset} dummyExec := execution.NewDummyExecutor() - mockP2PHeaderStore := &mocks.MockStore[*types.SignedHeader]{} - mockP2PDataStore := &mocks.MockStore[*types.Data]{} - mockP2PHeaderStore.On("Height", mock.Anything).Return(uint64(1), nil).Maybe() - mockP2PDataStore.On("Height", mock.Anything).Return(uint64(1), nil).Maybe() + + // Create mock stores for P2P + mockHeaderStore := extmocks.NewMockStore[*types.SignedHeader](t) + mockHeaderStore.EXPECT().Height().Return(uint64(0)).Maybe() + + mockDataStore := extmocks.NewMockStore[*types.Data](t) + mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() + + mockP2PHeaderStore := common.NewMockBroadcaster[*types.SignedHeader](t) + mockP2PHeaderStore.EXPECT().Store().Return(mockHeaderStore).Maybe() + + mockP2PDataStore := common.NewMockBroadcaster[*types.Data](t) + mockP2PDataStore.EXPECT().Store().Return(mockDataStore).Maybe() syncerInst1 := NewSyncer( st, diff --git a/node/full.go b/node/full.go index caa769c643..ce1369e49c 100644 --- a/node/full.go +++ b/node/full.go @@ -120,8 +120,8 @@ func newFullNode( rktStore, exec, da, - headerSyncService.Store(), - dataSyncService.Store(), + headerSyncService, + dataSyncService, logger, blockMetrics, nodeOpts.BlockOptions, diff --git a/pkg/sync/sync_service.go b/pkg/sync/sync_service.go index 6af6e40ab8..ccaa804535 100644 --- a/pkg/sync/sync_service.go +++ b/pkg/sync/sync_service.go @@ -117,7 +117,7 @@ func newSyncService[H header.Header[H]]( } // Store returns the store of the SyncService -func (syncService *SyncService[H]) Store() *goheaderstore.Store[H] { +func (syncService *SyncService[H]) Store() header.Store[H] { return syncService.store } @@ -420,6 +420,7 @@ func newSyncer[H header.Header[H]]( opts = append(opts, goheadersync.WithMetrics(), goheadersync.WithPruningWindow(ninetyNineYears), + goheadersync.WithTrustingPeriod(ninetyNineYears), ) return goheadersync.NewSyncer(ex, store, sub, opts...) }