diff --git a/.mockery.yaml b/.mockery.yaml index f1d971730b..11090f7153 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -49,6 +49,11 @@ packages: dir: ./block/internal/syncing pkgname: syncing filename: syncer_mock.go + HeightStore: + config: + dir: ./block/internal/syncing + pkgname: syncing + filename: height_store_mock.go github.com/evstack/ev-node/block/internal/common: interfaces: Broadcaster: diff --git a/apps/evm/cmd/rollback.go b/apps/evm/cmd/rollback.go index f28ebb8bd5..6d8a54a66b 100644 --- a/apps/evm/cmd/rollback.go +++ b/apps/evm/cmd/rollback.go @@ -5,12 +5,12 @@ import ( "errors" "fmt" + "github.com/evstack/ev-node/types" "github.com/spf13/cobra" goheaderstore "github.com/celestiaorg/go-header/store" rollcmd "github.com/evstack/ev-node/pkg/cmd" "github.com/evstack/ev-node/pkg/store" - "github.com/evstack/ev-node/types" ) // NewRollbackCmd creates a command to rollback ev-node state by one height. @@ -64,7 +64,7 @@ func NewRollbackCmd() *cobra.Command { } // rollback ev-node goheader state - headerStore, err := goheaderstore.NewStore[*types.SignedHeader]( + headerStore, err := goheaderstore.NewStore[*types.P2PSignedHeader]( evolveDB, goheaderstore.WithStorePrefix("headerSync"), goheaderstore.WithMetrics(), @@ -73,7 +73,7 @@ func NewRollbackCmd() *cobra.Command { return err } - dataStore, err := goheaderstore.NewStore[*types.Data]( + dataStore, err := goheaderstore.NewStore[*types.P2PData]( evolveDB, goheaderstore.WithStorePrefix("dataSync"), goheaderstore.WithMetrics(), diff --git a/apps/testapp/cmd/rollback.go b/apps/testapp/cmd/rollback.go index dfea32176f..f7805afbf8 100644 --- a/apps/testapp/cmd/rollback.go +++ b/apps/testapp/cmd/rollback.go @@ -70,7 +70,7 @@ func NewRollbackCmd() *cobra.Command { } // rollback ev-node goheader state - headerStore, err := goheaderstore.NewStore[*types.SignedHeader]( + headerStore, err := goheaderstore.NewStore[*types.P2PSignedHeader]( evolveDB, goheaderstore.WithStorePrefix("headerSync"), goheaderstore.WithMetrics(), @@ -79,7 +79,7 @@ func NewRollbackCmd() *cobra.Command { return err } - dataStore, err := goheaderstore.NewStore[*types.Data]( + dataStore, err := goheaderstore.NewStore[*types.P2PData]( evolveDB, goheaderstore.WithStorePrefix("dataSync"), goheaderstore.WithMetrics(), diff --git a/block/components.go b/block/components.go index 5602e5a8e6..dbb37f57bd 100644 --- a/block/components.go +++ b/block/components.go @@ -5,10 +5,10 @@ import ( "errors" "fmt" + "github.com/evstack/ev-node/pkg/sync" "github.com/rs/zerolog" "github.com/evstack/ev-node/block/internal/cache" - "github.com/evstack/ev-node/block/internal/common" da "github.com/evstack/ev-node/block/internal/da" "github.com/evstack/ev-node/block/internal/executing" "github.com/evstack/ev-node/block/internal/reaping" @@ -20,7 +20,6 @@ import ( "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/signer" "github.com/evstack/ev-node/pkg/store" - "github.com/evstack/ev-node/types" ) // Components represents the block-related components @@ -121,8 +120,8 @@ func NewSyncComponents( store store.Store, exec coreexecutor.Executor, daClient da.Client, - headerStore common.Broadcaster[*types.SignedHeader], - dataStore common.Broadcaster[*types.Data], + headerStore *sync.HeaderSyncService, + dataStore *sync.DataSyncService, logger zerolog.Logger, metrics *Metrics, blockOpts BlockOptions, @@ -152,7 +151,7 @@ func NewSyncComponents( ) // Create submitter for sync nodes (no signer, only DA inclusion processing) - daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger) + daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger, headerStore, dataStore) submitter := submitting.NewSubmitter( store, exec, @@ -186,8 +185,8 @@ func NewAggregatorComponents( sequencer coresequencer.Sequencer, daClient da.Client, signer signer.Signer, - headerBroadcaster common.Broadcaster[*types.SignedHeader], - dataBroadcaster common.Broadcaster[*types.Data], + headerBroadcaster *sync.HeaderSyncService, + dataBroadcaster *sync.DataSyncService, logger zerolog.Logger, metrics *Metrics, blockOpts BlockOptions, @@ -242,7 +241,7 @@ func NewAggregatorComponents( }, nil } - daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger) + daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger, headerBroadcaster, dataBroadcaster) submitter := submitting.NewSubmitter( store, exec, diff --git a/block/internal/common/broadcaster_mock.go b/block/internal/common/broadcaster_mock.go index 2983478078..39d748f5cb 100644 --- a/block/internal/common/broadcaster_mock.go +++ b/block/internal/common/broadcaster_mock.go @@ -8,6 +8,7 @@ import ( "context" "github.com/celestiaorg/go-header" + "github.com/evstack/ev-node/types" "github.com/libp2p/go-libp2p-pubsub" mock "github.com/stretchr/testify/mock" ) @@ -39,48 +40,152 @@ func (_m *MockBroadcaster[H]) EXPECT() *MockBroadcaster_Expecter[H] { return &MockBroadcaster_Expecter[H]{mock: &_m.Mock} } -// Store provides a mock function for the type MockBroadcaster -func (_mock *MockBroadcaster[H]) Store() header.Store[H] { - ret := _mock.Called() +// AppendDAHint provides a mock function for the type MockBroadcaster +func (_mock *MockBroadcaster[H]) AppendDAHint(ctx context.Context, daHeight uint64, hashes ...types.Hash) error { + // types.Hash + _va := make([]interface{}, len(hashes)) + for _i := range hashes { + _va[_i] = hashes[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, daHeight) + _ca = append(_ca, _va...) + ret := _mock.Called(_ca...) if len(ret) == 0 { - panic("no return value specified for Store") + panic("no return value specified for AppendDAHint") } - var r0 header.Store[H] - if returnFunc, ok := ret.Get(0).(func() header.Store[H]); ok { - r0 = returnFunc() + var r0 error + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64, ...types.Hash) error); ok { + r0 = returnFunc(ctx, daHeight, hashes...) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockBroadcaster_AppendDAHint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AppendDAHint' +type MockBroadcaster_AppendDAHint_Call[H header.Header[H]] struct { + *mock.Call +} + +// AppendDAHint is a helper method to define mock.On call +// - ctx context.Context +// - daHeight uint64 +// - hashes ...types.Hash +func (_e *MockBroadcaster_Expecter[H]) AppendDAHint(ctx interface{}, daHeight interface{}, hashes ...interface{}) *MockBroadcaster_AppendDAHint_Call[H] { + return &MockBroadcaster_AppendDAHint_Call[H]{Call: _e.mock.On("AppendDAHint", + append([]interface{}{ctx, daHeight}, hashes...)...)} +} + +func (_c *MockBroadcaster_AppendDAHint_Call[H]) Run(run func(ctx context.Context, daHeight uint64, hashes ...types.Hash)) *MockBroadcaster_AppendDAHint_Call[H] { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 uint64 + if args[1] != nil { + arg1 = args[1].(uint64) + } + var arg2 []types.Hash + variadicArgs := make([]types.Hash, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(types.Hash) + } + } + arg2 = variadicArgs + run( + arg0, + arg1, + arg2..., + ) + }) + return _c +} + +func (_c *MockBroadcaster_AppendDAHint_Call[H]) Return(err error) *MockBroadcaster_AppendDAHint_Call[H] { + _c.Call.Return(err) + return _c +} + +func (_c *MockBroadcaster_AppendDAHint_Call[H]) RunAndReturn(run func(ctx context.Context, daHeight uint64, hashes ...types.Hash) error) *MockBroadcaster_AppendDAHint_Call[H] { + _c.Call.Return(run) + return _c +} + +// GetByHeight provides a mock function for the type MockBroadcaster +func (_mock *MockBroadcaster[H]) GetByHeight(ctx context.Context, height uint64) (H, uint64, error) { + ret := _mock.Called(ctx, height) + + if len(ret) == 0 { + panic("no return value specified for GetByHeight") + } + + var r0 H + var r1 uint64 + var r2 error + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) (H, uint64, error)); ok { + return returnFunc(ctx, height) + } + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) H); ok { + r0 = returnFunc(ctx, height) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(header.Store[H]) + r0 = ret.Get(0).(H) } } - return r0 + if returnFunc, ok := ret.Get(1).(func(context.Context, uint64) uint64); ok { + r1 = returnFunc(ctx, height) + } else { + r1 = ret.Get(1).(uint64) + } + if returnFunc, ok := ret.Get(2).(func(context.Context, uint64) error); ok { + r2 = returnFunc(ctx, height) + } else { + r2 = ret.Error(2) + } + return r0, r1, r2 } -// MockBroadcaster_Store_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Store' -type MockBroadcaster_Store_Call[H header.Header[H]] struct { +// MockBroadcaster_GetByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetByHeight' +type MockBroadcaster_GetByHeight_Call[H header.Header[H]] struct { *mock.Call } -// Store is a helper method to define mock.On call -func (_e *MockBroadcaster_Expecter[H]) Store() *MockBroadcaster_Store_Call[H] { - return &MockBroadcaster_Store_Call[H]{Call: _e.mock.On("Store")} +// GetByHeight is a helper method to define mock.On call +// - ctx context.Context +// - height uint64 +func (_e *MockBroadcaster_Expecter[H]) GetByHeight(ctx interface{}, height interface{}) *MockBroadcaster_GetByHeight_Call[H] { + return &MockBroadcaster_GetByHeight_Call[H]{Call: _e.mock.On("GetByHeight", ctx, height)} } -func (_c *MockBroadcaster_Store_Call[H]) Run(run func()) *MockBroadcaster_Store_Call[H] { +func (_c *MockBroadcaster_GetByHeight_Call[H]) Run(run func(ctx context.Context, height uint64)) *MockBroadcaster_GetByHeight_Call[H] { _c.Call.Run(func(args mock.Arguments) { - run() + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 uint64 + if args[1] != nil { + arg1 = args[1].(uint64) + } + run( + arg0, + arg1, + ) }) return _c } -func (_c *MockBroadcaster_Store_Call[H]) Return(store header.Store[H]) *MockBroadcaster_Store_Call[H] { - _c.Call.Return(store) +func (_c *MockBroadcaster_GetByHeight_Call[H]) Return(v H, v1 uint64, err error) *MockBroadcaster_GetByHeight_Call[H] { + _c.Call.Return(v, v1, err) return _c } -func (_c *MockBroadcaster_Store_Call[H]) RunAndReturn(run func() header.Store[H]) *MockBroadcaster_Store_Call[H] { +func (_c *MockBroadcaster_GetByHeight_Call[H]) RunAndReturn(run func(ctx context.Context, height uint64) (H, uint64, error)) *MockBroadcaster_GetByHeight_Call[H] { _c.Call.Return(run) return _c } diff --git a/block/internal/common/event.go b/block/internal/common/event.go index 69d0300f9f..f02a181de8 100644 --- a/block/internal/common/event.go +++ b/block/internal/common/event.go @@ -20,4 +20,7 @@ type DAHeightEvent struct { DaHeight uint64 // Source indicates where this event originated from (DA or P2P) Source EventSource + + // Optional DA height hints from P2P. first is the DA height hint for the header, second is the DA height hint for the data + DaHeightHints [2]uint64 } diff --git a/block/internal/common/expected_interfaces.go b/block/internal/common/expected_interfaces.go index 8f36af6240..64464538a3 100644 --- a/block/internal/common/expected_interfaces.go +++ b/block/internal/common/expected_interfaces.go @@ -3,13 +3,20 @@ package common import ( "context" + "github.com/evstack/ev-node/types" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/celestiaorg/go-header" ) -// broadcaster interface for P2P broadcasting +type ( + HeaderP2PBroadcaster = Broadcaster[*types.P2PSignedHeader] + DataP2PBroadcaster = Broadcaster[*types.P2PData] +) + +// Broadcaster interface for P2P broadcasting type Broadcaster[H header.Header[H]] interface { WriteToStoreAndBroadcast(ctx context.Context, payload H, opts ...pubsub.PubOpt) error - Store() header.Store[H] + AppendDAHint(ctx context.Context, daHeight uint64, hashes ...types.Hash) error + GetByHeight(ctx context.Context, height uint64) (H, uint64, error) } diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index 845e18727e..a24d354b5e 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -38,8 +38,8 @@ type Executor struct { metrics *common.Metrics // Broadcasting - headerBroadcaster common.Broadcaster[*types.SignedHeader] - dataBroadcaster common.Broadcaster[*types.Data] + headerBroadcaster common.HeaderP2PBroadcaster + dataBroadcaster common.DataP2PBroadcaster // Configuration config config.Config @@ -79,8 +79,8 @@ func NewExecutor( metrics *common.Metrics, config config.Config, genesis genesis.Genesis, - headerBroadcaster common.Broadcaster[*types.SignedHeader], - dataBroadcaster common.Broadcaster[*types.Data], + headerBroadcaster common.HeaderP2PBroadcaster, + dataBroadcaster common.DataP2PBroadcaster, logger zerolog.Logger, options common.BlockOptions, errorCh chan<- error, @@ -432,8 +432,12 @@ func (e *Executor) produceBlock() error { // broadcast header and data to P2P network g, ctx := errgroup.WithContext(e.ctx) - g.Go(func() error { return e.headerBroadcaster.WriteToStoreAndBroadcast(ctx, header) }) - g.Go(func() error { return e.dataBroadcaster.WriteToStoreAndBroadcast(ctx, data) }) + g.Go(func() error { + return e.headerBroadcaster.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{Message: header}) + }) + g.Go(func() error { + return e.dataBroadcaster.WriteToStoreAndBroadcast(ctx, &types.P2PData{Message: data}) + }) if err := g.Wait(); err != nil { e.logger.Error().Err(err).Msg("failed to broadcast header and/data") // don't fail block production on broadcast error diff --git a/block/internal/executing/executor_lazy_test.go b/block/internal/executing/executor_lazy_test.go index a11cf6a1c2..0821454d3f 100644 --- a/block/internal/executing/executor_lazy_test.go +++ b/block/internal/executing/executor_lazy_test.go @@ -47,9 +47,9 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db := common.NewMockBroadcaster[*types.Data](t) + db := common.NewMockBroadcaster[*types.P2PData](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec, err := NewExecutor( @@ -162,9 +162,9 @@ func TestRegularMode_ProduceBlockLogic(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db := common.NewMockBroadcaster[*types.Data](t) + db := common.NewMockBroadcaster[*types.P2PData](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec, err := NewExecutor( diff --git a/block/internal/executing/executor_logic_test.go b/block/internal/executing/executor_logic_test.go index 6029186e86..2f9b29721a 100644 --- a/block/internal/executing/executor_logic_test.go +++ b/block/internal/executing/executor_logic_test.go @@ -69,9 +69,9 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { mockSeq := testmocks.NewMockSequencer(t) // Broadcasters are required by produceBlock; use generated mocks - hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db := common.NewMockBroadcaster[*types.Data](t) + db := common.NewMockBroadcaster[*types.P2PData](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec, err := NewExecutor( @@ -159,9 +159,9 @@ func TestPendingLimit_SkipsProduction(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db := common.NewMockBroadcaster[*types.Data](t) + db := common.NewMockBroadcaster[*types.P2PData](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec, err := NewExecutor( diff --git a/block/internal/executing/executor_restart_test.go b/block/internal/executing/executor_restart_test.go index 14daccddcc..8b68a1c651 100644 --- a/block/internal/executing/executor_restart_test.go +++ b/block/internal/executing/executor_restart_test.go @@ -47,9 +47,9 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { // Create first executor instance mockExec1 := testmocks.NewMockExecutor(t) mockSeq1 := testmocks.NewMockSequencer(t) - hb1 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb1 := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db1 := common.NewMockBroadcaster[*types.Data](t) + db1 := common.NewMockBroadcaster[*types.P2PData](t) db1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec1, err := NewExecutor( @@ -169,9 +169,9 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { // Create second executor instance (restart scenario) mockExec2 := testmocks.NewMockExecutor(t) mockSeq2 := testmocks.NewMockSequencer(t) - hb2 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb2 := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db2 := common.NewMockBroadcaster[*types.Data](t) + db2 := common.NewMockBroadcaster[*types.P2PData](t) db2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec2, err := NewExecutor( @@ -270,9 +270,9 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { // Create first executor and produce one block mockExec1 := testmocks.NewMockExecutor(t) mockSeq1 := testmocks.NewMockSequencer(t) - hb1 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb1 := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db1 := common.NewMockBroadcaster[*types.Data](t) + db1 := common.NewMockBroadcaster[*types.P2PData](t) db1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec1, err := NewExecutor( @@ -325,9 +325,9 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { // Create second executor (restart) mockExec2 := testmocks.NewMockExecutor(t) mockSeq2 := testmocks.NewMockSequencer(t) - hb2 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb2 := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db2 := common.NewMockBroadcaster[*types.Data](t) + db2 := common.NewMockBroadcaster[*types.P2PData](t) db2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec2, err := NewExecutor( diff --git a/block/internal/executing/executor_test.go b/block/internal/executing/executor_test.go index e310c6d40d..7e62d09170 100644 --- a/block/internal/executing/executor_test.go +++ b/block/internal/executing/executor_test.go @@ -1,7 +1,6 @@ package executing import ( - "context" "testing" "time" @@ -40,8 +39,8 @@ func TestExecutor_BroadcasterIntegration(t *testing.T) { } // Create mock broadcasters - headerBroadcaster := common.NewMockBroadcaster[*types.SignedHeader](t) - dataBroadcaster := common.NewMockBroadcaster[*types.Data](t) + headerBroadcaster := common.NewMockBroadcaster[*types.P2PSignedHeader](t) + dataBroadcaster := common.NewMockBroadcaster[*types.P2PData](t) // Create executor with broadcasters executor, err := NewExecutor( @@ -120,48 +119,3 @@ func TestExecutor_NilBroadcasters(t *testing.T) { assert.Equal(t, cacheManager, executor.cache) assert.Equal(t, gen, executor.genesis) } - -func TestExecutor_BroadcastFlow(t *testing.T) { - // This test demonstrates how the broadcast flow works - // when an Executor produces a block - - // Create mock broadcasters - headerBroadcaster := common.NewMockBroadcaster[*types.SignedHeader](t) - dataBroadcaster := common.NewMockBroadcaster[*types.Data](t) - - // Create sample data that would be broadcast - sampleHeader := &types.SignedHeader{ - Header: types.Header{ - BaseHeader: types.BaseHeader{ - ChainID: "test-chain", - Height: 1, - Time: uint64(time.Now().UnixNano()), - }, - }, - } - - sampleData := &types.Data{ - Metadata: &types.Metadata{ - ChainID: "test-chain", - Height: 1, - Time: uint64(time.Now().UnixNano()), - }, - Txs: []types.Tx{}, - } - - // Test broadcast calls - ctx := context.Background() - - // Set up expectations - headerBroadcaster.EXPECT().WriteToStoreAndBroadcast(ctx, sampleHeader).Return(nil).Once() - dataBroadcaster.EXPECT().WriteToStoreAndBroadcast(ctx, sampleData).Return(nil).Once() - - // Simulate what happens in produceBlock() after block creation - err := headerBroadcaster.WriteToStoreAndBroadcast(ctx, sampleHeader) - require.NoError(t, err) - - err = dataBroadcaster.WriteToStoreAndBroadcast(ctx, sampleData) - require.NoError(t, err) - - // Verify expectations were met (automatically checked by testify mock on cleanup) -} diff --git a/block/internal/submitting/da_submitter.go b/block/internal/submitting/da_submitter.go index 020f4b02de..a40841d0de 100644 --- a/block/internal/submitting/da_submitter.go +++ b/block/internal/submitting/da_submitter.go @@ -90,14 +90,20 @@ func clamp(v, min, max time.Duration) time.Duration { return v } +type DAHintAppender interface { + AppendDAHint(ctx context.Context, daHeight uint64, hash ...types.Hash) error +} + // DASubmitter handles DA submission operations type DASubmitter struct { - client da.Client - config config.Config - genesis genesis.Genesis - options common.BlockOptions - logger zerolog.Logger - metrics *common.Metrics + client da.Client + config config.Config + genesis genesis.Genesis + options common.BlockOptions + logger zerolog.Logger + metrics *common.Metrics + headerDAHintAppender DAHintAppender + dataDAHintAppender DAHintAppender // address selector for multi-account support addressSelector pkgda.AddressSelector @@ -111,6 +117,8 @@ func NewDASubmitter( options common.BlockOptions, metrics *common.Metrics, logger zerolog.Logger, + headerDAHintAppender DAHintAppender, + dataDAHintAppender DAHintAppender, ) *DASubmitter { daSubmitterLogger := logger.With().Str("component", "da_submitter").Logger() @@ -136,13 +144,15 @@ func NewDASubmitter( } return &DASubmitter{ - client: client, - config: config, - genesis: genesis, - options: options, - metrics: metrics, - logger: daSubmitterLogger, - addressSelector: addressSelector, + client: client, + config: config, + genesis: genesis, + options: options, + metrics: metrics, + logger: daSubmitterLogger, + addressSelector: addressSelector, + headerDAHintAppender: headerDAHintAppender, + dataDAHintAppender: dataDAHintAppender, } } @@ -182,8 +192,15 @@ func (s *DASubmitter) SubmitHeaders(ctx context.Context, cache cache.Manager) er return proto.Marshal(headerPb) }, func(submitted []*types.SignedHeader, res *datypes.ResultSubmit) { - for _, header := range submitted { - cache.SetHeaderDAIncluded(header.Hash().String(), res.Height, header.Height()) + hashes := make([]types.Hash, len(submitted)) + for i, header := range submitted { + headerHash := header.Hash() + cache.SetHeaderDAIncluded(headerHash.String(), res.Height, header.Height()) + hashes[i] = headerHash + } + if err := s.headerDAHintAppender.AppendDAHint(ctx, res.Height, hashes...); err != nil { + s.logger.Error().Err(err).Msg("failed to append da height hint in header p2p store") + // ignoring error here, since we don't want to block the block submission' } if l := len(submitted); l > 0 { lastHeight := submitted[l-1].Height() @@ -225,8 +242,14 @@ func (s *DASubmitter) SubmitData(ctx context.Context, cache cache.Manager, signe return signedData.MarshalBinary() }, func(submitted []*types.SignedData, res *datypes.ResultSubmit) { - for _, sd := range submitted { + hashes := make([]types.Hash, len(submitted)) + for i, sd := range submitted { cache.SetDataDAIncluded(sd.Data.DACommitment().String(), res.Height, sd.Height()) + hashes[i] = sd.Hash() + } + if err := s.dataDAHintAppender.AppendDAHint(ctx, res.Height, hashes...); err != nil { + s.logger.Error().Err(err).Msg("failed to append da height hint in data p2p store") + // ignoring error here, since we don't want to block the block submission' } if l := len(submitted); l > 0 { lastHeight := submitted[l-1].Height() diff --git a/block/internal/submitting/da_submitter_integration_test.go b/block/internal/submitting/da_submitter_integration_test.go index 8f03ff7c7f..3d985bbfad 100644 --- a/block/internal/submitting/da_submitter_integration_test.go +++ b/block/internal/submitting/da_submitter_integration_test.go @@ -96,7 +96,7 @@ func TestDASubmitter_SubmitHeadersAndData_MarksInclusionAndUpdatesLastSubmitted( Return(func(_ context.Context, blobs [][]byte, _ float64, _ []byte, _ []byte) datypes.ResultSubmit { return datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, SubmittedCount: uint64(len(blobs)), Height: 1}} }).Twice() - daSubmitter := NewDASubmitter(client, cfg, gen, common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop()) + daSubmitter := NewDASubmitter(client, cfg, gen, common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop(), noopDAHintAppender{}, noopDAHintAppender{}) // Submit headers and data require.NoError(t, daSubmitter.SubmitHeaders(context.Background(), cm)) @@ -113,3 +113,9 @@ func TestDASubmitter_SubmitHeadersAndData_MarksInclusionAndUpdatesLastSubmitted( assert.True(t, ok) } + +type noopDAHintAppender struct{} + +func (n noopDAHintAppender) AppendDAHint(ctx context.Context, daHeight uint64, hash ...types.Hash) error { + return nil +} diff --git a/block/internal/submitting/da_submitter_mocks_test.go b/block/internal/submitting/da_submitter_mocks_test.go index 19aea18b0d..5f64a5e423 100644 --- a/block/internal/submitting/da_submitter_mocks_test.go +++ b/block/internal/submitting/da_submitter_mocks_test.go @@ -35,7 +35,7 @@ func newTestSubmitter(t *testing.T, mockClient *mocks.MockClient, override func( mockClient.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() mockClient.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe() mockClient.On("HasForcedInclusionNamespace").Return(false).Maybe() - return NewDASubmitter(mockClient, cfg, genesis.Genesis{} /*options=*/, common.BlockOptions{}, common.NopMetrics(), zerolog.Nop()) + return NewDASubmitter(mockClient, cfg, genesis.Genesis{} /*options=*/, common.BlockOptions{}, common.NopMetrics(), zerolog.Nop(), nil, nil) } // marshal helper for simple items diff --git a/block/internal/submitting/da_submitter_test.go b/block/internal/submitting/da_submitter_test.go index a34589c222..a515cbb920 100644 --- a/block/internal/submitting/da_submitter_test.go +++ b/block/internal/submitting/da_submitter_test.go @@ -71,6 +71,8 @@ func setupDASubmitterTest(t *testing.T) (*DASubmitter, store.Store, cache.Manage common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop(), + noopDAHintAppender{}, + noopDAHintAppender{}, ) return daSubmitter, st, cm, mockDA, gen @@ -118,6 +120,8 @@ func TestNewDASubmitterSetsVisualizerWhenEnabled(t *testing.T) { common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop(), + noopDAHintAppender{}, + noopDAHintAppender{}, ) require.NotNil(t, server.GetDAVisualizationServer()) diff --git a/block/internal/submitting/submitter_test.go b/block/internal/submitting/submitter_test.go index 269aedd0e0..fe4afca654 100644 --- a/block/internal/submitting/submitter_test.go +++ b/block/internal/submitting/submitter_test.go @@ -167,7 +167,7 @@ func TestSubmitter_setSequencerHeightToDAHeight(t *testing.T) { daClient.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() daClient.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe() daClient.On("HasForcedInclusionNamespace").Return(false).Maybe() - daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) + daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop(), nil, nil) s := NewSubmitter(mockStore, nil, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, nil, zerolog.Nop(), nil) s.ctx = ctx @@ -251,7 +251,7 @@ func TestSubmitter_processDAInclusionLoop_advances(t *testing.T) { daClient.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() daClient.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe() daClient.On("HasForcedInclusionNamespace").Return(false).Maybe() - daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) + daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop(), nil ,nil) s := NewSubmitter(st, exec, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, nil, zerolog.Nop(), nil) // prepare two consecutive blocks in store with DA included in cache @@ -441,7 +441,7 @@ func TestSubmitter_CacheClearedOnHeightInclusion(t *testing.T) { daClient.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() daClient.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe() daClient.On("HasForcedInclusionNamespace").Return(false).Maybe() - daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) + daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop(), nil, nil) s := NewSubmitter(st, exec, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, nil, zerolog.Nop(), nil) // Create test blocks diff --git a/block/internal/syncing/async_da_retriever.go b/block/internal/syncing/async_da_retriever.go new file mode 100644 index 0000000000..7c79a37512 --- /dev/null +++ b/block/internal/syncing/async_da_retriever.go @@ -0,0 +1,111 @@ +package syncing + +import ( + "context" + "sync" + + "github.com/evstack/ev-node/block/internal/common" + "github.com/rs/zerolog" +) + +// AsyncDARetriever handles concurrent DA retrieval operations. +type AsyncDARetriever struct { + retriever DARetriever + resultCh chan<- common.DAHeightEvent + workCh chan uint64 + inFlight map[uint64]struct{} + mu sync.Mutex + logger zerolog.Logger + wg sync.WaitGroup + ctx context.Context + cancel context.CancelFunc +} + +// NewAsyncDARetriever creates a new AsyncDARetriever. +func NewAsyncDARetriever( + retriever DARetriever, + resultCh chan<- common.DAHeightEvent, + logger zerolog.Logger, +) *AsyncDARetriever { + return &AsyncDARetriever{ + retriever: retriever, + resultCh: resultCh, + workCh: make(chan uint64, 100), // Buffer size 100 + inFlight: make(map[uint64]struct{}), + logger: logger.With().Str("component", "async_da_retriever").Logger(), + } +} + +// Start starts the worker pool. +func (r *AsyncDARetriever) Start(ctx context.Context) { + r.ctx, r.cancel = context.WithCancel(ctx) + // Start 5 workers + for i := 0; i < 5; i++ { + r.wg.Add(1) + go r.worker() + } + r.logger.Info().Msg("AsyncDARetriever started") +} + +// Stop stops the worker pool. +func (r *AsyncDARetriever) Stop() { + if r.cancel != nil { + r.cancel() + } + r.wg.Wait() + r.logger.Info().Msg("AsyncDARetriever stopped") +} + +// RequestRetrieval requests a DA retrieval for the given height. +// It is non-blocking and idempotent. +func (r *AsyncDARetriever) RequestRetrieval(height uint64) { + r.mu.Lock() + defer r.mu.Unlock() + + if _, exists := r.inFlight[height]; exists { + return + } + + select { + case r.workCh <- height: + r.inFlight[height] = struct{}{} + r.logger.Debug().Uint64("height", height).Msg("queued DA retrieval request") + default: + r.logger.Debug().Uint64("height", height).Msg("DA retrieval worker pool full, dropping request") + } +} + +func (r *AsyncDARetriever) worker() { + defer r.wg.Done() + + for { + select { + case <-r.ctx.Done(): + return + case height := <-r.workCh: + r.processRetrieval(height) + } + } +} + +func (r *AsyncDARetriever) processRetrieval(height uint64) { + defer func() { + r.mu.Lock() + delete(r.inFlight, height) + r.mu.Unlock() + }() + + events, err := r.retriever.RetrieveFromDA(r.ctx, height) + if err != nil { + r.logger.Debug().Err(err).Uint64("height", height).Msg("async DA retrieval failed") + return + } + + for _, event := range events { + select { + case r.resultCh <- event: + case <-r.ctx.Done(): + return + } + } +} diff --git a/block/internal/syncing/async_da_retriever_test.go b/block/internal/syncing/async_da_retriever_test.go new file mode 100644 index 0000000000..dfaecc922e --- /dev/null +++ b/block/internal/syncing/async_da_retriever_test.go @@ -0,0 +1,141 @@ +package syncing + +import ( + "context" + "testing" + "time" + + "github.com/evstack/ev-node/block/internal/common" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestAsyncDARetriever_RequestRetrieval(t *testing.T) { + logger := zerolog.Nop() + mockRetriever := NewMockDARetriever(t) + resultCh := make(chan common.DAHeightEvent, 10) + + asyncRetriever := NewAsyncDARetriever(mockRetriever, resultCh, logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + asyncRetriever.Start(ctx) + defer asyncRetriever.Stop() + + // 1. Test successful retrieval + height1 := uint64(100) + mockRetriever.EXPECT().RetrieveFromDA(mock.Anything, height1).Return([]common.DAHeightEvent{{DaHeight: height1}}, nil).Once() + + asyncRetriever.RequestRetrieval(height1) + + select { + case event := <-resultCh: + assert.Equal(t, height1, event.DaHeight) + case <-time.After(1 * time.Second): + t.Fatal("timeout waiting for result") + } + + // 2. Test deduplication (idempotency) + // We'll block the retriever to simulate a slow request, then send multiple requests for the same height + height2 := uint64(200) + + // Create a channel to signal when the mock is called + calledCh := make(chan struct{}) + // Create a channel to unblock the mock + unblockCh := make(chan struct{}) + + mockRetriever.EXPECT().RetrieveFromDA(mock.Anything, height2).RunAndReturn(func(ctx context.Context, h uint64) ([]common.DAHeightEvent, error) { + close(calledCh) + <-unblockCh + return []common.DAHeightEvent{{DaHeight: h}}, nil + }).Once() // Should be called only once despite multiple requests + + // Send first request + asyncRetriever.RequestRetrieval(height2) + + // Wait for the worker to pick it up + select { + case <-calledCh: + case <-time.After(1 * time.Second): + t.Fatal("timeout waiting for retriever call") + } + + // Send duplicate requests while the first one is still in flight + asyncRetriever.RequestRetrieval(height2) + asyncRetriever.RequestRetrieval(height2) + + // Unblock the worker + close(unblockCh) + + // We should receive exactly one result + select { + case event := <-resultCh: + assert.Equal(t, height2, event.DaHeight) + case <-time.After(1 * time.Second): + t.Fatal("timeout waiting for result") + } + + // Ensure no more results come through + select { + case <-resultCh: + t.Fatal("received duplicate result") + default: + } +} + +func TestAsyncDARetriever_WorkerPoolLimit(t *testing.T) { + logger := zerolog.Nop() + mockRetriever := NewMockDARetriever(t) + resultCh := make(chan common.DAHeightEvent, 100) + + asyncRetriever := NewAsyncDARetriever(mockRetriever, resultCh, logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + asyncRetriever.Start(ctx) + defer asyncRetriever.Stop() + + // We have 5 workers. We'll block them all. + unblockCh := make(chan struct{}) + + // Expect 5 calls that block + for i := 0; i < 5; i++ { + h := uint64(1000 + i) + mockRetriever.EXPECT().RetrieveFromDA(mock.Anything, h).RunAndReturn(func(ctx context.Context, h uint64) ([]common.DAHeightEvent, error) { + <-unblockCh + return []common.DAHeightEvent{{DaHeight: h}}, nil + }).Once() + asyncRetriever.RequestRetrieval(h) + } + + // Give workers time to pick up tasks + time.Sleep(100 * time.Millisecond) + + // Now send a 6th request. It should be queued but not processed yet. + height6 := uint64(1005) + processed6 := make(chan struct{}) + mockRetriever.EXPECT().RetrieveFromDA(mock.Anything, height6).RunAndReturn(func(ctx context.Context, h uint64) ([]common.DAHeightEvent, error) { + close(processed6) + return []common.DAHeightEvent{{DaHeight: h}}, nil + }).Once() + + asyncRetriever.RequestRetrieval(height6) + + // Ensure 6th request is NOT processed yet + select { + case <-processed6: + t.Fatal("6th request processed too early") + default: + } + + // Unblock workers + close(unblockCh) + + // Now 6th request should be processed + select { + case <-processed6: + case <-time.After(1 * time.Second): + t.Fatal("timeout waiting for 6th request") + } +} diff --git a/block/internal/syncing/height_store_mock.go b/block/internal/syncing/height_store_mock.go new file mode 100644 index 0000000000..b4857accfa --- /dev/null +++ b/block/internal/syncing/height_store_mock.go @@ -0,0 +1,113 @@ +// Code generated by mockery; DO NOT EDIT. +// github.com/vektra/mockery +// template: testify + +package syncing + +import ( + "context" + + "github.com/celestiaorg/go-header" + mock "github.com/stretchr/testify/mock" +) + +// NewMockHeightStore creates a new instance of MockHeightStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockHeightStore[H header.Header[H]](t interface { + mock.TestingT + Cleanup(func()) +}) *MockHeightStore[H] { + mock := &MockHeightStore[H]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// MockHeightStore is an autogenerated mock type for the HeightStore type +type MockHeightStore[H header.Header[H]] struct { + mock.Mock +} + +type MockHeightStore_Expecter[H header.Header[H]] struct { + mock *mock.Mock +} + +func (_m *MockHeightStore[H]) EXPECT() *MockHeightStore_Expecter[H] { + return &MockHeightStore_Expecter[H]{mock: &_m.Mock} +} + +// GetByHeight provides a mock function for the type MockHeightStore +func (_mock *MockHeightStore[H]) GetByHeight(ctx context.Context, height uint64) (H, uint64, error) { + ret := _mock.Called(ctx, height) + + if len(ret) == 0 { + panic("no return value specified for GetByHeight") + } + + var r0 H + var r1 uint64 + var r2 error + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) (H, uint64, error)); ok { + return returnFunc(ctx, height) + } + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) H); ok { + r0 = returnFunc(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(H) + } + } + if returnFunc, ok := ret.Get(1).(func(context.Context, uint64) uint64); ok { + r1 = returnFunc(ctx, height) + } else { + r1 = ret.Get(1).(uint64) + } + if returnFunc, ok := ret.Get(2).(func(context.Context, uint64) error); ok { + r2 = returnFunc(ctx, height) + } else { + r2 = ret.Error(2) + } + return r0, r1, r2 +} + +// MockHeightStore_GetByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetByHeight' +type MockHeightStore_GetByHeight_Call[H header.Header[H]] struct { + *mock.Call +} + +// GetByHeight is a helper method to define mock.On call +// - ctx context.Context +// - height uint64 +func (_e *MockHeightStore_Expecter[H]) GetByHeight(ctx interface{}, height interface{}) *MockHeightStore_GetByHeight_Call[H] { + return &MockHeightStore_GetByHeight_Call[H]{Call: _e.mock.On("GetByHeight", ctx, height)} +} + +func (_c *MockHeightStore_GetByHeight_Call[H]) Run(run func(ctx context.Context, height uint64)) *MockHeightStore_GetByHeight_Call[H] { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 uint64 + if args[1] != nil { + arg1 = args[1].(uint64) + } + run( + arg0, + arg1, + ) + }) + return _c +} + +func (_c *MockHeightStore_GetByHeight_Call[H]) Return(v H, v1 uint64, err error) *MockHeightStore_GetByHeight_Call[H] { + _c.Call.Return(v, v1, err) + return _c +} + +func (_c *MockHeightStore_GetByHeight_Call[H]) RunAndReturn(run func(ctx context.Context, height uint64) (H, uint64, error)) *MockHeightStore_GetByHeight_Call[H] { + _c.Call.Return(run) + return _c +} diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index d8c10bc4c3..86fc95dee1 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -6,13 +6,13 @@ import ( "fmt" "sync/atomic" - goheader "github.com/celestiaorg/go-header" + "github.com/celestiaorg/go-header" + "github.com/evstack/ev-node/types" "github.com/rs/zerolog" "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" "github.com/evstack/ev-node/pkg/genesis" - "github.com/evstack/ev-node/types" ) type p2pHandler interface { @@ -20,6 +20,11 @@ type p2pHandler interface { SetProcessedHeight(height uint64) } +// HeightStore is a subset of goheader.Store +type HeightStore[H header.Header[H]] interface { + GetByHeight(ctx context.Context, height uint64) (H, uint64, error) +} + // P2PHandler coordinates block retrieval from P2P stores for the syncer. // It waits for both header and data to be available at a given height, // validates their consistency, and emits events to the syncer for processing. @@ -27,8 +32,8 @@ type p2pHandler interface { // The handler maintains a processedHeight to track the highest block that has been // successfully validated and sent to the syncer, preventing duplicate processing. type P2PHandler struct { - headerStore goheader.Store[*types.SignedHeader] - dataStore goheader.Store[*types.Data] + headerStore HeightStore[*types.P2PSignedHeader] + dataStore HeightStore[*types.P2PData] cache cache.CacheManager genesis genesis.Genesis logger zerolog.Logger @@ -38,8 +43,8 @@ type P2PHandler struct { // NewP2PHandler creates a new P2P handler. func NewP2PHandler( - headerStore goheader.Store[*types.SignedHeader], - dataStore goheader.Store[*types.Data], + headerStore HeightStore[*types.P2PSignedHeader], + dataStore HeightStore[*types.P2PData], cache cache.CacheManager, genesis genesis.Genesis, logger zerolog.Logger, @@ -74,26 +79,27 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC return nil } - header, err := h.headerStore.GetByHeight(ctx, height) + p2pHeader, headerDAHint, err := h.headerStore.GetByHeight(ctx, height) if err != nil { if ctx.Err() == nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("header unavailable in store") } return err } + header := p2pHeader.Message if err := h.assertExpectedProposer(header.ProposerAddress); err != nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("invalid header from P2P") return err } - data, err := h.dataStore.GetByHeight(ctx, height) + p2pData, dataDAHint, err := h.dataStore.GetByHeight(ctx, height) if err != nil { if ctx.Err() == nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("data unavailable in store") } return err } - + data := p2pData.Message dataCommitment := data.DACommitment() if !bytes.Equal(header.DataHash[:], dataCommitment[:]) { err := fmt.Errorf("data hash mismatch: header %x, data %x", header.DataHash, dataCommitment) @@ -104,10 +110,10 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC // further header validation (signature) is done in validateBlock. // we need to be sure that the previous block n-1 was executed before validating block n event := common.DAHeightEvent{ - Header: header, - Data: data, - DaHeight: 0, - Source: common.SourceP2P, + Header: header, + Data: data, + Source: common.SourceP2P, + DaHeightHints: [2]uint64{headerDAHint, dataDAHint}, } select { diff --git a/block/internal/syncing/p2p_handler_test.go b/block/internal/syncing/p2p_handler_test.go index dfab41faae..0e0604944b 100644 --- a/block/internal/syncing/p2p_handler_test.go +++ b/block/internal/syncing/p2p_handler_test.go @@ -18,7 +18,6 @@ import ( "github.com/evstack/ev-node/pkg/genesis" signerpkg "github.com/evstack/ev-node/pkg/signer" "github.com/evstack/ev-node/pkg/signer/noop" - extmocks "github.com/evstack/ev-node/test/mocks/external" "github.com/evstack/ev-node/types" ) @@ -37,7 +36,7 @@ func buildTestSigner(t *testing.T) ([]byte, crypto.PubKey, signerpkg.Signer) { } // p2pMakeSignedHeader creates a minimally valid SignedHeader for P2P tests. -func p2pMakeSignedHeader(t *testing.T, chainID string, height uint64, proposer []byte, pub crypto.PubKey, signer signerpkg.Signer) *types.SignedHeader { +func p2pMakeSignedHeader(t *testing.T, chainID string, height uint64, proposer []byte, pub crypto.PubKey, signer signerpkg.Signer) *types.P2PSignedHeader { t.Helper() hdr := &types.SignedHeader{ Header: types.Header{ @@ -51,14 +50,14 @@ func p2pMakeSignedHeader(t *testing.T, chainID string, height uint64, proposer [ sig, err := signer.Sign(bz) require.NoError(t, err, "failed to sign header bytes") hdr.Signature = sig - return hdr + return &types.P2PSignedHeader{Message: hdr} } // P2PTestData aggregates dependencies used by P2P handler tests. type P2PTestData struct { Handler *P2PHandler - HeaderStore *extmocks.MockStore[*types.SignedHeader] - DataStore *extmocks.MockStore[*types.Data] + HeaderStore *MockHeightStore[*types.P2PSignedHeader] + DataStore *MockHeightStore[*types.P2PData] Cache cache.CacheManager Genesis genesis.Genesis ProposerAddr []byte @@ -73,8 +72,8 @@ func setupP2P(t *testing.T) *P2PTestData { gen := genesis.Genesis{ChainID: "p2p-test", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: proposerAddr} - headerStoreMock := extmocks.NewMockStore[*types.SignedHeader](t) - dataStoreMock := extmocks.NewMockStore[*types.Data](t) + headerStoreMock := NewMockHeightStore[*types.P2PSignedHeader](t) + dataStoreMock := NewMockHeightStore[*types.P2PData](t) cfg := config.Config{ RootDir: t.TempDir(), @@ -129,16 +128,16 @@ func TestP2PHandler_ProcessHeight_EmitsEventWhenHeaderAndDataPresent(t *testing. require.Equal(t, string(p.Genesis.ProposerAddress), string(p.ProposerAddr)) header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 5, p.ProposerAddr, p.ProposerPub, p.Signer) - data := makeData(p.Genesis.ChainID, 5, 1) - header.DataHash = data.DACommitment() - bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) + data := &types.P2PData{Message: makeData(p.Genesis.ChainID, 5, 1)} + header.Message.DataHash = data.Message.DACommitment() + bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Message.Header) require.NoError(t, err) sig, err := p.Signer.Sign(bz) require.NoError(t, err) - header.Signature = sig + header.Message.Signature = sig - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(5)).Return(header, nil).Once() - p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(5)).Return(data, nil).Once() + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(5)).Return(header, 0, nil).Once() + p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(5)).Return(data, 0, nil).Once() ch := make(chan common.DAHeightEvent, 1) err = p.Handler.ProcessHeight(ctx, 5, ch) @@ -155,16 +154,16 @@ func TestP2PHandler_ProcessHeight_SkipsWhenDataMissing(t *testing.T) { ctx := context.Background() header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 7, p.ProposerAddr, p.ProposerPub, p.Signer) - data := makeData(p.Genesis.ChainID, 7, 1) - header.DataHash = data.DACommitment() - bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) + data := &types.P2PData{Message: makeData(p.Genesis.ChainID, 7, 1)} + header.Message.DataHash = data.Message.DACommitment() + bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Message.Header) require.NoError(t, err) sig, err := p.Signer.Sign(bz) require.NoError(t, err) - header.Signature = sig + header.Message.Signature = sig - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(7)).Return(header, nil).Once() - p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(7)).Return(nil, errors.New("missing")).Once() + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(7)).Return(header, 0, nil).Once() + p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(7)).Return(nil, 0, errors.New("missing")).Once() ch := make(chan common.DAHeightEvent, 1) err = p.Handler.ProcessHeight(ctx, 7, ch) @@ -177,7 +176,7 @@ func TestP2PHandler_ProcessHeight_SkipsWhenHeaderMissing(t *testing.T) { p := setupP2P(t) ctx := context.Background() - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(9)).Return(nil, errors.New("missing")).Once() + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(9)).Return(nil, 0, errors.New("missing")).Once() ch := make(chan common.DAHeightEvent, 1) err := p.Handler.ProcessHeight(ctx, 9, ch) @@ -196,9 +195,9 @@ func TestP2PHandler_ProcessHeight_SkipsOnProposerMismatch(t *testing.T) { require.NotEqual(t, string(p.Genesis.ProposerAddress), string(badAddr)) header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 11, badAddr, pub, signer) - header.DataHash = common.DataHashForEmptyTxs + header.Message.DataHash = common.DataHashForEmptyTxs - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(11)).Return(header, nil).Once() + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(11)).Return(header, 0, nil).Once() ch := make(chan common.DAHeightEvent, 1) err = p.Handler.ProcessHeight(ctx, 11, ch) @@ -225,16 +224,16 @@ func TestP2PHandler_ProcessedHeightSkipsPreviouslyHandledBlocks(t *testing.T) { // Height 6 should be fetched normally. header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 6, p.ProposerAddr, p.ProposerPub, p.Signer) - data := makeData(p.Genesis.ChainID, 6, 1) - header.DataHash = data.DACommitment() - bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) + data := &types.P2PData{Message: makeData(p.Genesis.ChainID, 6, 1)} + header.Message.DataHash = data.Message.DACommitment() + bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Message.Header) require.NoError(t, err) sig, err := p.Signer.Sign(bz) require.NoError(t, err) - header.Signature = sig + header.Message.Signature = sig - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(6)).Return(header, nil).Once() - p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(6)).Return(data, nil).Once() + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(6)).Return(header, 0, nil).Once() + p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(6)).Return(data, 0, nil).Once() require.NoError(t, p.Handler.ProcessHeight(ctx, 6, ch)) @@ -248,16 +247,16 @@ func TestP2PHandler_SetProcessedHeightPreventsDuplicates(t *testing.T) { ctx := context.Background() header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 8, p.ProposerAddr, p.ProposerPub, p.Signer) - data := makeData(p.Genesis.ChainID, 8, 0) - header.DataHash = data.DACommitment() - bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) + data := &types.P2PData{Message: makeData(p.Genesis.ChainID, 8, 0)} + header.Message.DataHash = data.Message.DACommitment() + bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Message.Header) require.NoError(t, err) sig, err := p.Signer.Sign(bz) require.NoError(t, err) - header.Signature = sig + header.Message.Signature = sig - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(8)).Return(header, nil).Once() - p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(8)).Return(data, nil).Once() + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(8)).Return(header, 0, nil).Once() + p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(8)).Return(data, 0, nil).Once() ch := make(chan common.DAHeightEvent, 1) require.NoError(t, p.Handler.ProcessHeight(ctx, 8, ch)) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 266bc55e40..bd391b8f49 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -93,8 +93,8 @@ type Syncer struct { daRetrieverHeight *atomic.Uint64 // P2P stores - headerStore common.Broadcaster[*types.SignedHeader] - dataStore common.Broadcaster[*types.Data] + headerStore common.HeaderP2PBroadcaster + dataStore common.DataP2PBroadcaster // Channels for coordination heightInCh chan common.DAHeightEvent @@ -118,6 +118,9 @@ type Syncer struct { // P2P wait coordination p2pWaitState atomic.Value // stores p2pWaitState + + // Async DA retriever + asyncDARetriever *AsyncDARetriever } // pendingForcedInclusionTx represents a forced inclusion transaction that hasn't been included yet @@ -137,8 +140,8 @@ func NewSyncer( metrics *common.Metrics, config config.Config, genesis genesis.Genesis, - headerStore common.Broadcaster[*types.SignedHeader], - dataStore common.Broadcaster[*types.Data], + headerStore common.HeaderP2PBroadcaster, + dataStore common.DataP2PBroadcaster, logger zerolog.Logger, options common.BlockOptions, errorCh chan<- error, @@ -187,8 +190,10 @@ func (s *Syncer) Start(ctx context.Context) error { // Initialize handlers s.daRetriever = NewDARetriever(s.daClient, s.cache, s.genesis, s.logger) + s.asyncDARetriever = NewAsyncDARetriever(s.daRetriever, s.heightInCh, s.logger) + s.asyncDARetriever.Start(s.ctx) s.fiRetriever = da.NewForcedInclusionRetriever(s.daClient, s.logger, s.genesis.DAStartHeight, s.genesis.DAEpochForcedInclusion) - s.p2pHandler = NewP2PHandler(s.headerStore.Store(), s.dataStore.Store(), s.cache, s.genesis, s.logger) + s.p2pHandler = NewP2PHandler(s.headerStore, s.dataStore, s.cache, s.genesis, s.logger) if currentHeight, err := s.store.Height(s.ctx); err != nil { s.logger.Error().Err(err).Msg("failed to set initial processed height for p2p handler") } else { @@ -500,6 +505,48 @@ func (s *Syncer) processHeightEvent(event *common.DAHeightEvent) { return } + // If this is a P2P event with a DA height hint, trigger targeted DA retrieval + // This allows us to fetch the block directly from the specified DA height instead of sequential scanning + if event.Source == common.SourceP2P { + var daHeightHints []uint64 + switch { + case event.DaHeightHints == [2]uint64{0, 0}: + // empty, nothing to do + case event.DaHeightHints[0] == 0: + // check only data + if _, exists := s.cache.GetDataDAIncluded(event.Data.Hash().String()); !exists { + daHeightHints = []uint64{event.DaHeightHints[1]} + } + case event.DaHeightHints[1] == 0: + // check only header + if _, exists := s.cache.GetHeaderDAIncluded(event.Header.Hash().String()); !exists { + daHeightHints = []uint64{event.DaHeightHints[0]} + } + default: + // check both + if _, exists := s.cache.GetHeaderDAIncluded(event.Header.Hash().String()); !exists { + daHeightHints = []uint64{event.DaHeightHints[0]} + } + if _, exists := s.cache.GetDataDAIncluded(event.Data.Hash().String()); !exists { + daHeightHints = append(daHeightHints, event.DaHeightHints[1]) + } + if len(daHeightHints) == 2 && daHeightHints[0] == daHeightHints[1] { + daHeightHints = daHeightHints[0:1] + } + } + if len(daHeightHints) > 0 { + for _, daHeightHint := range daHeightHints { + s.logger.Debug(). + Uint64("height", height). + Uint64("da_height_hint", daHeightHint). + Msg("P2P event with DA height hint, triggering targeted DA retrieval") + + // Trigger targeted DA retrieval in background via worker pool + s.asyncDARetriever.RequestRetrieval(daHeightHint) + } + } + } + // Last data must be got from store if the event comes from DA and the data hash is empty. // When if the event comes from P2P, the sequencer and then all the full nodes contains the data. if event.Source == common.SourceDA && bytes.Equal(event.Header.DataHash, common.DataHashForEmptyTxs) && currentHeight > 0 { @@ -539,12 +586,12 @@ func (s *Syncer) processHeightEvent(event *common.DAHeightEvent) { g.Go(func() error { // broadcast header locally only — prevents spamming the p2p network with old height notifications, // allowing the syncer to update its target and fill missing blocks - return s.headerStore.WriteToStoreAndBroadcast(ctx, event.Header, pubsub.WithLocalPublication(true)) + return s.headerStore.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{Message: event.Header}, pubsub.WithLocalPublication(true)) }) g.Go(func() error { // broadcast data locally only — prevents spamming the p2p network with old height notifications, // allowing the syncer to update its target and fill missing blocks - return s.dataStore.WriteToStoreAndBroadcast(ctx, event.Data, pubsub.WithLocalPublication(true)) + return s.dataStore.WriteToStoreAndBroadcast(ctx, &types.P2PData{Message: event.Data}, pubsub.WithLocalPublication(true)) }) if err := g.Wait(); err != nil { s.logger.Error().Err(err).Msg("failed to append event header and/or data to p2p store") diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index 6df2e577a0..9adef95f8f 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -83,14 +83,6 @@ func TestSyncer_BackoffOnDAError(t *testing.T) { mockDataStore := extmocks.NewMockStore[*types.Data](t) mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() - headerStore := common.NewMockBroadcaster[*types.SignedHeader](t) - headerStore.EXPECT().Store().Return(mockHeaderStore).Maybe() - syncer.headerStore = headerStore - - dataStore := common.NewMockBroadcaster[*types.Data](t) - dataStore.EXPECT().Store().Return(mockDataStore).Maybe() - syncer.dataStore = dataStore - var callTimes []time.Time callCount := 0 @@ -179,14 +171,6 @@ func TestSyncer_BackoffResetOnSuccess(t *testing.T) { mockDataStore := extmocks.NewMockStore[*types.Data](t) mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() - headerStore := common.NewMockBroadcaster[*types.SignedHeader](t) - headerStore.EXPECT().Store().Return(mockHeaderStore).Maybe() - syncer.headerStore = headerStore - - dataStore := common.NewMockBroadcaster[*types.Data](t) - dataStore.EXPECT().Store().Return(mockDataStore).Maybe() - syncer.dataStore = dataStore - var callTimes []time.Time // First call - error (should trigger backoff) @@ -269,14 +253,6 @@ func TestSyncer_BackoffBehaviorIntegration(t *testing.T) { mockDataStore := extmocks.NewMockStore[*types.Data](t) mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() - headerStore := common.NewMockBroadcaster[*types.SignedHeader](t) - headerStore.EXPECT().Store().Return(mockHeaderStore).Maybe() - syncer.headerStore = headerStore - - dataStore := common.NewMockBroadcaster[*types.Data](t) - dataStore.EXPECT().Store().Return(mockDataStore).Maybe() - syncer.dataStore = dataStore - var callTimes []time.Time p2pHandler.On("SetProcessedHeight", mock.Anything).Return().Maybe() @@ -350,8 +326,8 @@ func setupTestSyncer(t *testing.T, daBlockTime time.Duration) *Syncer { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), diff --git a/block/internal/syncing/syncer_benchmark_test.go b/block/internal/syncing/syncer_benchmark_test.go index e2b6f6e51f..a1f0a00314 100644 --- a/block/internal/syncing/syncer_benchmark_test.go +++ b/block/internal/syncing/syncer_benchmark_test.go @@ -153,9 +153,9 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay mockP2P := newMockp2pHandler(b) // not used directly in this benchmark path mockP2P.On("SetProcessedHeight", mock.Anything).Return().Maybe() s.p2pHandler = mockP2P - headerP2PStore := common.NewMockBroadcaster[*types.SignedHeader](b) + headerP2PStore := common.NewMockBroadcaster[*types.P2PSignedHeader](b) s.headerStore = headerP2PStore - dataP2PStore := common.NewMockBroadcaster[*types.Data](b) + dataP2PStore := common.NewMockBroadcaster[*types.P2PData](b) s.dataStore = dataP2PStore return &benchFixture{s: s, st: st, cm: cm, cancel: cancel} } diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index 8b0622a42d..0fe087efcd 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -380,8 +380,8 @@ func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -453,8 +453,8 @@ func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -556,8 +556,8 @@ func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -663,8 +663,8 @@ func TestVerifyForcedInclusionTxs_NoForcedTransactions(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -729,8 +729,8 @@ func TestVerifyForcedInclusionTxs_NamespaceNotConfigured(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -794,8 +794,8 @@ func TestVerifyForcedInclusionTxs_DeferralWithinEpoch(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -917,8 +917,8 @@ func TestVerifyForcedInclusionTxs_MaliciousAfterEpochEnd(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -1006,8 +1006,8 @@ func TestVerifyForcedInclusionTxs_SmoothingExceedsEpoch(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 21b012cf21..f8231bf4bc 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -123,8 +123,8 @@ func TestSyncer_validateBlock_DataHashMismatch(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -174,8 +174,8 @@ func TestProcessHeightEvent_SyncsAndUpdatesState(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), errChan, @@ -228,8 +228,8 @@ func TestSequentialBlockSync(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), errChan, @@ -346,11 +346,8 @@ func TestSyncLoopPersistState(t *testing.T) { mockDataStore := extmocks.NewMockStore[*types.Data](t) mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() - mockP2PHeaderStore := common.NewMockBroadcaster[*types.SignedHeader](t) - mockP2PHeaderStore.EXPECT().Store().Return(mockHeaderStore).Maybe() - - mockP2PDataStore := common.NewMockBroadcaster[*types.Data](t) - mockP2PDataStore.EXPECT().Store().Return(mockDataStore).Maybe() + mockP2PHeaderStore := common.NewMockBroadcaster[*types.P2PSignedHeader](t) + mockP2PDataStore := common.NewMockBroadcaster[*types.P2PData](t) errorCh := make(chan error, 1) syncerInst1 := NewSyncer( @@ -700,3 +697,79 @@ func TestSyncer_getHighestStoredDAHeight(t *testing.T) { highestDA = syncer.getHighestStoredDAHeight() assert.Equal(t, uint64(200), highestDA, "should return highest DA height from most recent included height") } + +func TestProcessHeightEvent_TriggersAsyncDARetrieval(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, _, _ := buildSyncTestSigner(t) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain").Return([]byte("app0"), uint64(1024), nil).Once() + + s := NewSyncer( + st, + mockExec, + nil, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Mock AsyncDARetriever + mockRetriever := NewMockDARetriever(t) + asyncRetriever := NewAsyncDARetriever(mockRetriever, s.heightInCh, zerolog.Nop()) + // We don't start the async retriever to avoid race conditions in test, + // we just want to verify RequestRetrieval queues the request. + // However, RequestRetrieval writes to a channel, so we need a consumer or a buffered channel. + // The workCh is buffered (100), so we are good. + s.asyncDARetriever = asyncRetriever + + // Create event with DA height hint + evt := common.DAHeightEvent{ + Header: &types.SignedHeader{Header: types.Header{BaseHeader: types.BaseHeader{ChainID: "c", Height: 2}}}, + Data: &types.Data{Metadata: &types.Metadata{ChainID: "c", Height: 2}}, + Source: common.SourceP2P, + DaHeightHints: [2]uint64{100, 100}, + } + + // Current height is 0 (from init), event height is 2. + // processHeightEvent checks: + // 1. height <= currentHeight (2 <= 0 -> false) + // 2. height != currentHeight+1 (2 != 1 -> true) -> stores as pending event + + // We need to simulate height 1 being processed first so height 2 is "next" + // OR we can just test that it DOES NOT trigger DA retrieval if it's pending. + // Wait, the logic for DA retrieval is BEFORE the "next block" check? + // Let's check syncer.go... + // Yes, "If this is a P2P event with a DA height hint, trigger targeted DA retrieval" block is AFTER "If this is not the next block in sequence... return" + + // So we need to be at height 1 to process height 2. + // Let's set the store height to 1. + batch, err := st.NewBatch(context.Background()) + require.NoError(t, err) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + s.processHeightEvent(&evt) + + // Verify that the request was queued in the async retriever + select { + case h := <-asyncRetriever.workCh: + assert.Equal(t, uint64(100), h) + default: + t.Fatal("expected DA retrieval request to be queued") + } +} diff --git a/docs/guides/migrating-to-ev-abci.md b/docs/guides/migrating-to-ev-abci.md index f49ba6df6f..eb6abcd9e0 100644 --- a/docs/guides/migrating-to-ev-abci.md +++ b/docs/guides/migrating-to-ev-abci.md @@ -41,9 +41,9 @@ import ( ) ``` -2. Add the migration manager keeper to your app struct -3. Register the module in your module manager -4. Configure the migration manager in your app initialization +1. Add the migration manager keeper to your app struct +2. Register the module in your module manager +3. Configure the migration manager in your app initialization ### Step 2: Replace Staking Module with Wrapper diff --git a/pkg/rpc/client/client_test.go b/pkg/rpc/client/client_test.go index 4b2b82e1b3..3841156635 100644 --- a/pkg/rpc/client/client_test.go +++ b/pkg/rpc/client/client_test.go @@ -28,10 +28,11 @@ import ( func setupTestServer( t *testing.T, mockStore *mocks.MockStore, - headerStore goheader.Store[*types.SignedHeader], - dataStore goheader.Store[*types.Data], + headerStore goheader.Store[*types.P2PSignedHeader], + dataStore goheader.Store[*types.P2PData], mockP2P *mocks.MockP2PRPC, ) (*httptest.Server, *Client) { + t.Helper() mux := http.NewServeMux() logger := zerolog.Nop() @@ -105,8 +106,8 @@ func TestClientGetMetadata(t *testing.T) { func TestClientGetP2PStoreInfo(t *testing.T) { mockStore := mocks.NewMockStore(t) mockP2P := mocks.NewMockP2PRPC(t) - headerStore := headerstoremocks.NewMockStore[*types.SignedHeader](t) - dataStore := headerstoremocks.NewMockStore[*types.Data](t) + headerStore := headerstoremocks.NewMockStore[*types.P2PSignedHeader](t) + dataStore := headerstoremocks.NewMockStore[*types.P2PData](t) now := time.Now().UTC() @@ -250,27 +251,31 @@ func TestClientGetNamespace(t *testing.T) { require.NotEmpty(t, namespaceResp.DataNamespace) } -func testSignedHeader(height uint64, ts time.Time) *types.SignedHeader { - return &types.SignedHeader{ - Header: types.Header{ - BaseHeader: types.BaseHeader{ - Height: height, - Time: uint64(ts.UnixNano()), - ChainID: "test-chain", +func testSignedHeader(height uint64, ts time.Time) *types.P2PSignedHeader { + return &types.P2PSignedHeader{ + Message: &types.SignedHeader{ + Header: types.Header{ + BaseHeader: types.BaseHeader{ + Height: height, + Time: uint64(ts.UnixNano()), + ChainID: "test-chain", + }, + ProposerAddress: []byte{0x01}, + DataHash: []byte{0x02}, + AppHash: []byte{0x03}, }, - ProposerAddress: []byte{0x01}, - DataHash: []byte{0x02}, - AppHash: []byte{0x03}, }, } } -func testData(height uint64, ts time.Time) *types.Data { - return &types.Data{ - Metadata: &types.Metadata{ - ChainID: "test-chain", - Height: height, - Time: uint64(ts.UnixNano()), +func testData(height uint64, ts time.Time) *types.P2PData { + return &types.P2PData{ + Message: &types.Data{ + Metadata: &types.Metadata{ + ChainID: "test-chain", + Height: height, + Time: uint64(ts.UnixNano()), + }, }, } } diff --git a/pkg/rpc/server/server.go b/pkg/rpc/server/server.go index 817ca1f377..dc21724ec9 100644 --- a/pkg/rpc/server/server.go +++ b/pkg/rpc/server/server.go @@ -34,16 +34,16 @@ var _ rpc.StoreServiceHandler = (*StoreServer)(nil) // StoreServer implements the StoreService defined in the proto file type StoreServer struct { store store.Store - headerStore goheader.Store[*types.SignedHeader] - dataStore goheader.Store[*types.Data] + headerStore goheader.Store[*types.P2PSignedHeader] + dataStore goheader.Store[*types.P2PData] logger zerolog.Logger } // NewStoreServer creates a new StoreServer instance func NewStoreServer( store store.Store, - headerStore goheader.Store[*types.SignedHeader], - dataStore goheader.Store[*types.Data], + headerStore goheader.Store[*types.P2PSignedHeader], + dataStore goheader.Store[*types.P2PData], logger zerolog.Logger, ) *StoreServer { return &StoreServer{ @@ -370,8 +370,8 @@ func (p *P2PServer) GetNetInfo( // NewServiceHandler creates a new HTTP handler for Store, P2P and Config services func NewServiceHandler( store store.Store, - headerStore goheader.Store[*types.SignedHeader], - dataStore goheader.Store[*types.Data], + headerStore goheader.Store[*types.P2PSignedHeader], + dataStore goheader.Store[*types.P2PData], peerManager p2p.P2PRPC, proposerAddress []byte, logger zerolog.Logger, diff --git a/pkg/rpc/server/server_test.go b/pkg/rpc/server/server_test.go index 32e9b0ebec..42a9812479 100644 --- a/pkg/rpc/server/server_test.go +++ b/pkg/rpc/server/server_test.go @@ -325,8 +325,8 @@ func TestGetGenesisDaHeight_InvalidLength(t *testing.T) { func TestGetP2PStoreInfo(t *testing.T) { t.Run("returns snapshots for configured stores", func(t *testing.T) { mockStore := mocks.NewMockStore(t) - headerStore := headerstoremocks.NewMockStore[*types.SignedHeader](t) - dataStore := headerstoremocks.NewMockStore[*types.Data](t) + headerStore := headerstoremocks.NewMockStore[*types.P2PSignedHeader](t) + dataStore := headerstoremocks.NewMockStore[*types.P2PData](t) logger := zerolog.Nop() server := NewStoreServer(mockStore, headerStore, dataStore, logger) @@ -354,10 +354,10 @@ func TestGetP2PStoreInfo(t *testing.T) { t.Run("returns error when a store edge fails", func(t *testing.T) { mockStore := mocks.NewMockStore(t) - headerStore := headerstoremocks.NewMockStore[*types.SignedHeader](t) + headerStore := headerstoremocks.NewMockStore[*types.P2PSignedHeader](t) logger := zerolog.Nop() headerStore.On("Height").Return(uint64(0)) - headerStore.On("Head", mock.Anything).Return((*types.SignedHeader)(nil), fmt.Errorf("boom")) + headerStore.On("Head", mock.Anything).Return((*types.P2PSignedHeader)(nil), fmt.Errorf("boom")) server := NewStoreServer(mockStore, headerStore, nil, logger) resp, err := server.GetP2PStoreInfo(context.Background(), connect.NewRequest(&emptypb.Empty{})) @@ -627,27 +627,31 @@ func TestHealthReadyEndpoint(t *testing.T) { }) } -func makeTestSignedHeader(height uint64, ts time.Time) *types.SignedHeader { - return &types.SignedHeader{ - Header: types.Header{ - BaseHeader: types.BaseHeader{ - Height: height, - Time: uint64(ts.UnixNano()), - ChainID: "test-chain", +func makeTestSignedHeader(height uint64, ts time.Time) *types.P2PSignedHeader { + return &types.P2PSignedHeader{ + Message: &types.SignedHeader{ + Header: types.Header{ + BaseHeader: types.BaseHeader{ + Height: height, + Time: uint64(ts.UnixNano()), + ChainID: "test-chain", + }, + ProposerAddress: []byte{0x01}, + DataHash: []byte{0x02}, + AppHash: []byte{0x03}, }, - ProposerAddress: []byte{0x01}, - DataHash: []byte{0x02}, - AppHash: []byte{0x03}, }, } } -func makeTestData(height uint64, ts time.Time) *types.Data { - return &types.Data{ - Metadata: &types.Metadata{ - ChainID: "test-chain", - Height: height, - Time: uint64(ts.UnixNano()), +func makeTestData(height uint64, ts time.Time) *types.P2PData { + return &types.P2PData{ + Message: &types.Data{ + Metadata: &types.Metadata{ + ChainID: "test-chain", + Height: height, + Time: uint64(ts.UnixNano()), + }, }, } } diff --git a/pkg/sync/sync_service.go b/pkg/sync/sync_service.go index 9b018460ed..c59ab2048a 100644 --- a/pkg/sync/sync_service.go +++ b/pkg/sync/sync_service.go @@ -36,10 +36,22 @@ const ( // TODO: when we add pruning we can remove this const ninetyNineYears = 99 * 365 * 24 * time.Hour +type EntityWithDAHint[H any] interface { + header.Header[H] + SetDAHint(daHeight uint64) + DAHint() uint64 +} + +// HeaderSyncService is the P2P Sync Service for headers. +type HeaderSyncService = SyncService[*types.P2PSignedHeader] + +// DataSyncService is the P2P Sync Service for blocks. +type DataSyncService = SyncService[*types.P2PData] + // SyncService is the P2P Sync Service for blocks and headers. // // Uses the go-header library for handling all P2P logic. -type SyncService[H header.Header[H]] struct { +type SyncService[V EntityWithDAHint[V]] struct { conf config.Config logger zerolog.Logger syncType syncType @@ -48,22 +60,16 @@ type SyncService[H header.Header[H]] struct { p2p *p2p.Client - ex *goheaderp2p.Exchange[H] - sub *goheaderp2p.Subscriber[H] - p2pServer *goheaderp2p.ExchangeServer[H] - store *goheaderstore.Store[H] - syncer *goheadersync.Syncer[H] + ex *goheaderp2p.Exchange[V] + sub *goheaderp2p.Subscriber[V] + p2pServer *goheaderp2p.ExchangeServer[V] + store *goheaderstore.Store[V] + syncer *goheadersync.Syncer[V] syncerStatus *SyncerStatus - topicSubscription header.Subscription[H] + topicSubscription header.Subscription[V] storeInitialized atomic.Bool } -// DataSyncService is the P2P Sync Service for blocks. -type DataSyncService = SyncService[*types.Data] - -// HeaderSyncService is the P2P Sync Service for headers. -type HeaderSyncService = SyncService[*types.SignedHeader] - // NewDataSyncService returns a new DataSyncService. func NewDataSyncService( store ds.Batching, @@ -72,7 +78,7 @@ func NewDataSyncService( p2p *p2p.Client, logger zerolog.Logger, ) (*DataSyncService, error) { - return newSyncService[*types.Data](store, dataSync, conf, genesis, p2p, logger) + return newSyncService[*types.P2PData](store, dataSync, conf, genesis, p2p, logger) } // NewHeaderSyncService returns a new HeaderSyncService. @@ -83,22 +89,22 @@ func NewHeaderSyncService( p2p *p2p.Client, logger zerolog.Logger, ) (*HeaderSyncService, error) { - return newSyncService[*types.SignedHeader](store, headerSync, conf, genesis, p2p, logger) + return newSyncService[*types.P2PSignedHeader](store, headerSync, conf, genesis, p2p, logger) } -func newSyncService[H header.Header[H]]( +func newSyncService[V EntityWithDAHint[V]]( store ds.Batching, syncType syncType, conf config.Config, genesis genesis.Genesis, p2p *p2p.Client, logger zerolog.Logger, -) (*SyncService[H], error) { +) (*SyncService[V], error) { if p2p == nil { return nil, errors.New("p2p client cannot be nil") } - ss, err := goheaderstore.NewStore[H]( + ss, err := goheaderstore.NewStore[V]( store, goheaderstore.WithStorePrefix(string(syncType)), goheaderstore.WithMetrics(), @@ -107,7 +113,7 @@ func newSyncService[H header.Header[H]]( return nil, fmt.Errorf("failed to initialize the %s store: %w", syncType, err) } - svc := &SyncService[H]{ + svc := &SyncService[V]{ conf: conf, genesis: genesis, p2p: p2p, @@ -121,21 +127,22 @@ func newSyncService[H header.Header[H]]( } // Store returns the store of the SyncService -func (syncService *SyncService[H]) Store() header.Store[H] { +func (syncService *SyncService[V]) Store() header.Store[V] { return syncService.store } // WriteToStoreAndBroadcast initializes store if needed and broadcasts provided header or block. // Note: Only returns an error in case store can't be initialized. Logs error if there's one while broadcasting. -func (syncService *SyncService[H]) WriteToStoreAndBroadcast(ctx context.Context, headerOrData H, opts ...pubsub.PubOpt) error { +func (syncService *SyncService[V]) WriteToStoreAndBroadcast(ctx context.Context, payload V, opts ...pubsub.PubOpt) error { if syncService.genesis.InitialHeight == 0 { return fmt.Errorf("invalid initial height; cannot be zero") } - if headerOrData.IsZero() { + if payload.IsZero() { return fmt.Errorf("empty header/data cannot write to store or broadcast") } + headerOrData := payload storeInitialized := false if syncService.storeInitialized.CompareAndSwap(false, true) { var err error @@ -174,8 +181,33 @@ func (syncService *SyncService[H]) WriteToStoreAndBroadcast(ctx context.Context, return nil } +func (s *SyncService[V]) AppendDAHint(ctx context.Context, daHeight uint64, hashes ...types.Hash) error { + entries := make([]V, 0, len(hashes)) + for _, h := range hashes { + v, err := s.store.Get(ctx, h) + if err != nil { + if errors.Is(err, header.ErrNotFound) { + continue + } + return err + } + v.SetDAHint(daHeight) + entries = append(entries, v) + } + return s.store.Append(ctx, entries...) +} + +func (s *SyncService[V]) GetByHeight(ctx context.Context, height uint64) (V, uint64, error) { + c, err := s.store.GetByHeight(ctx, height) + if err != nil { + var zero V + return zero, 0, err + } + return c, c.DAHint(), nil +} + // Start is a part of Service interface. -func (syncService *SyncService[H]) Start(ctx context.Context) error { +func (syncService *SyncService[V]) Start(ctx context.Context) error { // setup P2P infrastructure, but don't start Subscriber yet. peerIDs, err := syncService.setupP2PInfrastructure(ctx) if err != nil { @@ -183,7 +215,7 @@ func (syncService *SyncService[H]) Start(ctx context.Context) error { } // create syncer, must be before initFromP2PWithRetry which calls startSyncer. - if syncService.syncer, err = newSyncer( + if syncService.syncer, err = newSyncer[V]( syncService.ex, syncService.store, syncService.sub, @@ -226,7 +258,7 @@ func (syncService *SyncService[H]) startSyncer(ctx context.Context) error { // initStore initializes the store with the given initial header. // it is a no-op if the store is already initialized. // Returns true when the store was initialized by this call. -func (syncService *SyncService[H]) initStore(ctx context.Context, initial H) (bool, error) { +func (syncService *SyncService[V]) initStore(ctx context.Context, initial V) (bool, error) { if initial.IsZero() { return false, errors.New("failed to initialize the store") } @@ -250,7 +282,7 @@ func (syncService *SyncService[H]) initStore(ctx context.Context, initial H) (bo // setupP2PInfrastructure sets up the P2P infrastructure (Exchange, ExchangeServer, Store) // but does not start the Subscriber. Returns peer IDs for later use. -func (syncService *SyncService[H]) setupP2PInfrastructure(ctx context.Context) ([]peer.ID, error) { +func (syncService *SyncService[V]) setupP2PInfrastructure(ctx context.Context) ([]peer.ID, error) { ps := syncService.p2p.PubSub() _, _, chainID, err := syncService.p2p.Info() @@ -260,7 +292,7 @@ func (syncService *SyncService[H]) setupP2PInfrastructure(ctx context.Context) ( networkID := syncService.getNetworkID(chainID) // Create subscriber but DON'T start it yet - syncService.sub, err = goheaderp2p.NewSubscriber[H]( + syncService.sub, err = goheaderp2p.NewSubscriber[V]( ps, pubsub.DefaultMsgIdFn, goheaderp2p.WithSubscriberNetworkID(networkID), @@ -283,7 +315,7 @@ func (syncService *SyncService[H]) setupP2PInfrastructure(ctx context.Context) ( peerIDs := syncService.getPeerIDs() - if syncService.ex, err = newP2PExchange[H](syncService.p2p.Host(), peerIDs, networkID, syncService.genesis.ChainID, syncService.p2p.ConnectionGater()); err != nil { + if syncService.ex, err = newP2PExchange[V](syncService.p2p.Host(), peerIDs, networkID, syncService.genesis.ChainID, syncService.p2p.ConnectionGater()); err != nil { return nil, fmt.Errorf("error while creating exchange: %w", err) } if err := syncService.ex.Start(ctx); err != nil { @@ -311,14 +343,14 @@ func (syncService *SyncService[H]) startSubscriber(ctx context.Context) error { // It inspects the local store to determine the first height to request: // - when the store already contains items, it reuses the latest height as the starting point; // - otherwise, it falls back to the configured genesis height. -func (syncService *SyncService[H]) initFromP2PWithRetry(ctx context.Context, peerIDs []peer.ID) error { +func (syncService *SyncService[V]) initFromP2PWithRetry(ctx context.Context, peerIDs []peer.ID) error { if len(peerIDs) == 0 { return nil } tryInit := func(ctx context.Context) (bool, error) { var ( - trusted H + trusted V err error heightToQuery uint64 ) @@ -382,7 +414,7 @@ func (syncService *SyncService[H]) initFromP2PWithRetry(ctx context.Context, pee // Stop is a part of Service interface. // // `store` is closed last because it's used by other services. -func (syncService *SyncService[H]) Stop(ctx context.Context) error { +func (syncService *SyncService[V]) Stop(ctx context.Context) error { // unsubscribe from topic first so that sub.Stop() does not fail syncService.topicSubscription.Cancel() err := errors.Join( @@ -438,7 +470,7 @@ func newSyncer[H header.Header[H]]( goheadersync.WithPruningWindow(ninetyNineYears), goheadersync.WithTrustingPeriod(ninetyNineYears), ) - return goheadersync.NewSyncer(ex, store, sub, opts...) + return goheadersync.NewSyncer[H](ex, store, sub, opts...) } func (syncService *SyncService[H]) getNetworkID(network string) string { diff --git a/pkg/sync/sync_service_test.go b/pkg/sync/sync_service_test.go index 93603752a7..d8d5cd57ee 100644 --- a/pkg/sync/sync_service_test.go +++ b/pkg/sync/sync_service_test.go @@ -73,12 +73,12 @@ func TestHeaderSyncServiceRestart(t *testing.T) { signedHeader, err := types.GetRandomSignedHeaderCustom(&headerConfig, genesisDoc.ChainID) require.NoError(t, err) require.NoError(t, signedHeader.Validate()) - require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, signedHeader)) + require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{Message: signedHeader})) for i := genesisDoc.InitialHeight + 1; i < 2; i++ { signedHeader = nextHeader(t, signedHeader, genesisDoc.ChainID, noopSigner) t.Logf("signed header: %d", i) - require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, signedHeader)) + require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{Message: signedHeader})) } // then stop and restart service @@ -109,7 +109,7 @@ func TestHeaderSyncServiceRestart(t *testing.T) { for i := signedHeader.Height() + 1; i < 2; i++ { signedHeader = nextHeader(t, signedHeader, genesisDoc.ChainID, noopSigner) t.Logf("signed header: %d", i) - require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, signedHeader)) + require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{Message: signedHeader})) } cancel() } @@ -164,7 +164,182 @@ func TestHeaderSyncServiceInitFromHigherHeight(t *testing.T) { require.NoError(t, err) require.NoError(t, signedHeader.Validate()) - require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, signedHeader)) + require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{Message: signedHeader})) +} + +func TestDAHintStorageHeader(t *testing.T) { + mainKV := sync.MutexWrap(datastore.NewMapDatastore()) + pk, _, err := crypto.GenerateEd25519Key(cryptoRand.Reader) + require.NoError(t, err) + noopSigner, err := noop.NewNoopSigner(pk) + require.NoError(t, err) + rnd := rand.New(rand.NewSource(1)) // nolint:gosec // test code only + mn := mocknet.New() + + chainId := "test-chain-id" + + proposerAddr := []byte("test") + genesisDoc := genesispkg.Genesis{ + ChainID: chainId, + StartTime: time.Now(), + InitialHeight: 1, + ProposerAddress: proposerAddr, + } + conf := config.DefaultConfig() + conf.RootDir = t.TempDir() + nodeKey, err := key.LoadOrGenNodeKey(filepath.Dir(conf.ConfigPath())) + require.NoError(t, err) + logger := zerolog.Nop() + priv := nodeKey.PrivKey + p2pHost, err := mn.AddPeer(priv, nil) + require.NoError(t, err) + + p2pClient, err := p2p.NewClientWithHost(conf.P2P, nodeKey.PrivKey, mainKV, chainId, logger, p2p.NopMetrics(), p2pHost) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + require.NoError(t, p2pClient.Start(ctx)) + + headerSvc, err := NewHeaderSyncService(mainKV, conf, genesisDoc, p2pClient, logger) + require.NoError(t, err) + require.NoError(t, headerSvc.Start(ctx)) + + headerConfig := types.HeaderConfig{ + Height: genesisDoc.InitialHeight, + DataHash: bytesN(rnd, 32), + AppHash: bytesN(rnd, 32), + Signer: noopSigner, + } + signedHeader, err := types.GetRandomSignedHeaderCustom(&headerConfig, genesisDoc.ChainID) + require.NoError(t, err) + require.NoError(t, signedHeader.Validate()) + + require.NoError(t, headerSvc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{Message: signedHeader})) + + daHeight := uint64(100) + require.NoError(t, headerSvc.AppendDAHint(ctx, daHeight, signedHeader.Hash())) + + h, hint, err := headerSvc.GetByHeight(ctx, signedHeader.Height()) + require.NoError(t, err) + require.Equal(t, signedHeader.Hash(), h.Hash()) + require.Equal(t, daHeight, hint) + + _ = p2pClient.Close() + _ = headerSvc.Stop(ctx) + cancel() + + // Restart + h2, err := mn.AddPeer(priv, nil) + require.NoError(t, err) + p2pClient, err = p2p.NewClientWithHost(conf.P2P, nodeKey.PrivKey, mainKV, chainId, logger, p2p.NopMetrics(), h2) + require.NoError(t, err) + + ctx, cancel = context.WithCancel(t.Context()) + defer cancel() + require.NoError(t, p2pClient.Start(ctx)) + t.Cleanup(func() { _ = p2pClient.Close() }) + + headerSvc, err = NewHeaderSyncService(mainKV, conf, genesisDoc, p2pClient, logger) + require.NoError(t, err) + require.NoError(t, headerSvc.Start(ctx)) + t.Cleanup(func() { _ = headerSvc.Stop(context.Background()) }) + + h, hint, err = headerSvc.GetByHeight(ctx, signedHeader.Height()) + require.NoError(t, err) + require.Equal(t, signedHeader.Hash(), h.Hash()) + require.Equal(t, daHeight, hint) +} + +func TestDAHintStorageData(t *testing.T) { + mainKV := sync.MutexWrap(datastore.NewMapDatastore()) + pk, _, err := crypto.GenerateEd25519Key(cryptoRand.Reader) + require.NoError(t, err) + noopSigner, err := noop.NewNoopSigner(pk) + require.NoError(t, err) + rnd := rand.New(rand.NewSource(1)) // nolint:gosec // test code only + mn := mocknet.New() + + chainId := "test-chain-id" + + proposerAddr := []byte("test") + genesisDoc := genesispkg.Genesis{ + ChainID: chainId, + StartTime: time.Now(), + InitialHeight: 1, + ProposerAddress: proposerAddr, + } + conf := config.DefaultConfig() + conf.RootDir = t.TempDir() + nodeKey, err := key.LoadOrGenNodeKey(filepath.Dir(conf.ConfigPath())) + require.NoError(t, err) + logger := zerolog.Nop() + priv := nodeKey.PrivKey + p2pHost, err := mn.AddPeer(priv, nil) + require.NoError(t, err) + + p2pClient, err := p2p.NewClientWithHost(conf.P2P, nodeKey.PrivKey, mainKV, chainId, logger, p2p.NopMetrics(), p2pHost) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + require.NoError(t, p2pClient.Start(ctx)) + + dataSvc, err := NewDataSyncService(mainKV, conf, genesisDoc, p2pClient, logger) + require.NoError(t, err) + require.NoError(t, dataSvc.Start(ctx)) + + // Need a valid header height for data metadata + headerConfig := types.HeaderConfig{ + Height: genesisDoc.InitialHeight, + DataHash: bytesN(rnd, 32), + AppHash: bytesN(rnd, 32), + Signer: noopSigner, + } + signedHeader, err := types.GetRandomSignedHeaderCustom(&headerConfig, genesisDoc.ChainID) + require.NoError(t, err) + + data := types.Data{ + Txs: types.Txs{[]byte("tx1")}, + Metadata: &types.Metadata{ + Height: signedHeader.Height(), + }, + } + + require.NoError(t, dataSvc.WriteToStoreAndBroadcast(ctx, &types.P2PData{Message: &data})) + + daHeight := uint64(100) + require.NoError(t, dataSvc.AppendDAHint(ctx, daHeight, data.Hash())) + + d, hint, err := dataSvc.GetByHeight(ctx, signedHeader.Height()) + require.NoError(t, err) + require.Equal(t, data.Hash(), d.Hash()) + require.Equal(t, daHeight, hint) + + _ = p2pClient.Close() + _ = dataSvc.Stop(ctx) + cancel() + + // Restart + h2, err := mn.AddPeer(priv, nil) + require.NoError(t, err) + p2pClient, err = p2p.NewClientWithHost(conf.P2P, nodeKey.PrivKey, mainKV, chainId, logger, p2p.NopMetrics(), h2) + require.NoError(t, err) + + ctx, cancel = context.WithCancel(t.Context()) + defer cancel() + require.NoError(t, p2pClient.Start(ctx)) + t.Cleanup(func() { _ = p2pClient.Close() }) + + dataSvc, err = NewDataSyncService(mainKV, conf, genesisDoc, p2pClient, logger) + require.NoError(t, err) + require.NoError(t, dataSvc.Start(ctx)) + t.Cleanup(func() { _ = dataSvc.Stop(context.Background()) }) + + d, hint, err = dataSvc.GetByHeight(ctx, signedHeader.Height()) + require.NoError(t, err) + require.Equal(t, data.Hash(), d.Hash()) + require.Equal(t, daHeight, hint) } func nextHeader(t *testing.T, previousHeader *types.SignedHeader, chainID string, noopSigner signer.Signer) *types.SignedHeader { @@ -178,8 +353,7 @@ func nextHeader(t *testing.T, previousHeader *types.SignedHeader, chainID string require.NoError(t, err) newSignedHeader.Signature = signature require.NoError(t, newSignedHeader.Validate()) - previousHeader = newSignedHeader - return previousHeader + return newSignedHeader } func bytesN(r *rand.Rand, n int) []byte { diff --git a/proto/evnode/v1/evnode.proto b/proto/evnode/v1/evnode.proto index 1cb3e23ea1..8bd7d13a25 100644 --- a/proto/evnode/v1/evnode.proto +++ b/proto/evnode/v1/evnode.proto @@ -95,3 +95,18 @@ message Vote { // Validator address bytes validator_address = 5; } + +// P2PSignedHeader +message P2PSignedHeader { + Header header = 1; + bytes signature = 2; + Signer signer = 3; + optional uint64 da_height_hint = 4; +} + +// P2PData +message P2PData { + Metadata metadata = 1; + repeated bytes txs = 2; + optional uint64 da_height_hint = 3; +} diff --git a/types/p2p_envelope.go b/types/p2p_envelope.go new file mode 100644 index 0000000000..eb76e9f896 --- /dev/null +++ b/types/p2p_envelope.go @@ -0,0 +1,153 @@ +package types + +import ( + "fmt" + "time" + + "github.com/celestiaorg/go-header" + "google.golang.org/protobuf/proto" + + pb "github.com/evstack/ev-node/types/pb/evnode/v1" +) + +type ( + P2PSignedHeader = P2PEnvelope[*SignedHeader] + P2PData = P2PEnvelope[*Data] +) + +var ( + _ header.Header[*P2PData] = &P2PData{} + _ header.Header[*P2PSignedHeader] = &P2PSignedHeader{} +) + +// P2PEnvelope is a generic envelope for P2P messages that includes a DA height hint. +type P2PEnvelope[H header.Header[H]] struct { + Message H + DAHeightHint uint64 +} + +// New creates a new P2PEnvelope. +func (e *P2PEnvelope[H]) New() *P2PEnvelope[H] { + var empty H + return &P2PEnvelope[H]{Message: empty.New()} +} + +// IsZero checks if the envelope or its message is zero. +func (e *P2PEnvelope[H]) IsZero() bool { + return e == nil || e.Message.IsZero() +} + +// SetDAHint sets the DA height hint. +func (e *P2PEnvelope[H]) SetDAHint(daHeight uint64) { + e.DAHeightHint = daHeight +} + +// DAHint returns the DA height hint. +func (e *P2PEnvelope[H]) DAHint() uint64 { + return e.DAHeightHint +} + +// Verify verifies the envelope message against an untrusted envelope. +func (e *P2PEnvelope[H]) Verify(untrst *P2PEnvelope[H]) error { + return e.Message.Verify(untrst.Message) +} + +// ChainID returns the ChainID of the message. +func (e *P2PEnvelope[H]) ChainID() string { + return e.Message.ChainID() +} + +// Height returns the Height of the message. +func (e *P2PEnvelope[H]) Height() uint64 { + return e.Message.Height() +} + +// LastHeader returns the LastHeader hash of the message. +func (e *P2PEnvelope[H]) LastHeader() Hash { + return e.Message.LastHeader() +} + +// Time returns the Time of the message. +func (e *P2PEnvelope[H]) Time() time.Time { + return e.Message.Time() +} + +// Hash returns the hash of the message. +func (e *P2PEnvelope[H]) Hash() Hash { + return e.Message.Hash() +} + +// Validate performs basic validation on the message. +func (e *P2PEnvelope[H]) Validate() error { + return e.Message.Validate() +} + +// MarshalBinary marshals the envelope to binary. +func (e *P2PEnvelope[H]) MarshalBinary() ([]byte, error) { + var mirrorPb proto.Message + + switch msg := any(e.Message).(type) { + case *Data: + pData := msg.ToProto() + mirrorPb = &pb.P2PData{ + Metadata: pData.Metadata, + Txs: pData.Txs, + DaHeightHint: &e.DAHeightHint, + } + case *SignedHeader: + psh, err := msg.ToProto() + if err != nil { + return nil, err + } + mirrorPb = &pb.P2PSignedHeader{ + Header: psh.Header, + Signature: psh.Signature, + Signer: psh.Signer, + DaHeightHint: &e.DAHeightHint, + } + default: + return nil, fmt.Errorf("unsupported type for toProto: %T", msg) + } + return proto.Marshal(mirrorPb) +} + +// UnmarshalBinary unmarshals the envelope from binary. +func (e *P2PEnvelope[H]) UnmarshalBinary(data []byte) error { + switch target := any(e.Message).(type) { + case *Data: + var pData pb.P2PData + if err := proto.Unmarshal(data, &pData); err != nil { + return err + } + mirrorData := &pb.Data{ + Metadata: pData.Metadata, + Txs: pData.Txs, + } + if err := target.FromProto(mirrorData); err != nil { + return err + } + if pData.DaHeightHint != nil { + e.DAHeightHint = *pData.DaHeightHint + } + return nil + case *SignedHeader: + var pHeader pb.P2PSignedHeader + if err := proto.Unmarshal(data, &pHeader); err != nil { + return err + } + psh := &pb.SignedHeader{ + Header: pHeader.Header, + Signature: pHeader.Signature, + Signer: pHeader.Signer, + } + if err := target.FromProto(psh); err != nil { + return err + } + if pHeader.DaHeightHint != nil { + e.DAHeightHint = *pHeader.DaHeightHint + } + return nil + default: + return fmt.Errorf("unsupported type for UnmarshalBinary: %T", target) + } +} diff --git a/types/p2p_envelope_test.go b/types/p2p_envelope_test.go new file mode 100644 index 0000000000..3dc2127fed --- /dev/null +++ b/types/p2p_envelope_test.go @@ -0,0 +1,153 @@ +package types + +import ( + "bytes" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestP2PEnvelope_MarshalUnmarshal(t *testing.T) { + // Create a P2PData envelope + data := &Data{ + Metadata: &Metadata{ + ChainID: "test-chain", + Height: 10, + Time: uint64(time.Now().UnixNano()), + LastDataHash: bytes.Repeat([]byte{0x1}, 32), + }, + Txs: Txs{[]byte{0x1}, []byte{0x2}}, + } + envelope := &P2PData{ + Message: data, + DAHeightHint: 100, + } + + // Marshaling + bytes, err := envelope.MarshalBinary() + require.NoError(t, err) + assert.NotEmpty(t, bytes) + + // Unmarshaling + newEnvelope := (&P2PData{}).New() + err = newEnvelope.UnmarshalBinary(bytes) + require.NoError(t, err) + assert.Equal(t, envelope.DAHeightHint, newEnvelope.DAHeightHint) + assert.Equal(t, envelope.Message.Height(), newEnvelope.Message.Height()) + assert.Equal(t, envelope.Message.ChainID(), newEnvelope.Message.ChainID()) + assert.Equal(t, envelope.Message.LastDataHash, newEnvelope.Message.LastDataHash) + assert.Equal(t, envelope.Message.Txs, newEnvelope.Message.Txs) +} + +func TestP2PSignedHeader_MarshalUnmarshal(t *testing.T) { + _, pubKey, err := crypto.GenerateEd25519Key(nil) + require.NoError(t, err) + + header := &SignedHeader{ + Header: Header{ + BaseHeader: BaseHeader{ + ChainID: "test-chain", + Height: 5, + Time: uint64(time.Now().UnixNano()), + }, + Version: Version{ + Block: 1, + App: 2, + }, + LastHeaderHash: GetRandomBytes(32), + DataHash: GetRandomBytes(32), + AppHash: GetRandomBytes(32), + ProposerAddress: GetRandomBytes(32), + ValidatorHash: GetRandomBytes(32), + }, + Signature: GetRandomBytes(64), + Signer: Signer{ + PubKey: pubKey, + Address: GetRandomBytes(20), + }, + } + + envelope := &P2PSignedHeader{ + Message: header, + DAHeightHint: 200, + } + + // Marshaling + bz, err := envelope.MarshalBinary() + require.NoError(t, err) + assert.NotEmpty(t, bz) + + // Unmarshaling + newEnvelope := (&P2PSignedHeader{}).New() + err = newEnvelope.UnmarshalBinary(bz) + require.NoError(t, err) + assert.Equal(t, envelope.DAHeightHint, newEnvelope.DAHeightHint) + assert.Equal(t, envelope.Message.Signer, newEnvelope.Message.Signer) + assert.Equal(t, envelope, newEnvelope) +} + +func TestSignedHeaderBinaryCompatibility(t *testing.T) { + signedHeader, _, err := GetRandomSignedHeader("chain-id") + require.NoError(t, err) + bytes, err := signedHeader.MarshalBinary() + require.NoError(t, err) + + p2pHeader := (&P2PSignedHeader{}).New() + err = p2pHeader.UnmarshalBinary(bytes) + require.NoError(t, err) + + assert.Equal(t, signedHeader.Header, p2pHeader.Message.Header) + assert.Equal(t, signedHeader.Signature, p2pHeader.Message.Signature) + assert.Equal(t, signedHeader.Signer, p2pHeader.Message.Signer) + assert.Zero(t, p2pHeader.DAHeightHint) + + p2pHeader.DAHeightHint = 100 + p2pBytes, err := p2pHeader.MarshalBinary() + require.NoError(t, err) + + var decodedSignedHeader SignedHeader + err = decodedSignedHeader.UnmarshalBinary(p2pBytes) + require.NoError(t, err) + assert.Equal(t, signedHeader.Header, decodedSignedHeader.Header) + assert.Equal(t, signedHeader.Signature, decodedSignedHeader.Signature) + assert.Equal(t, signedHeader.Signer, decodedSignedHeader.Signer) +} + +func TestDataBinaryCompatibility(t *testing.T) { + data := &Data{ + Metadata: &Metadata{ + ChainID: "chain-id", + Height: 10, + Time: uint64(time.Now().UnixNano()), + LastDataHash: []byte("last-hash"), + }, + Txs: Txs{ + []byte("tx1"), + []byte("tx2"), + }, + } + bytes, err := data.MarshalBinary() + require.NoError(t, err) + + p2pData := (&P2PData{}).New() + err = p2pData.UnmarshalBinary(bytes) + require.NoError(t, err) + + assert.Equal(t, data.Metadata, p2pData.Message.Metadata) + assert.Equal(t, data.Txs, p2pData.Message.Txs) + assert.Zero(t, p2pData.DAHeightHint) + + p2pData.DAHeightHint = 200 + + p2pBytes, err := p2pData.MarshalBinary() + require.NoError(t, err) + + var decodedData Data + err = decodedData.UnmarshalBinary(p2pBytes) + require.NoError(t, err) + assert.Equal(t, data.Metadata, decodedData.Metadata) + assert.Equal(t, data.Txs, decodedData.Txs) +} diff --git a/types/pb/evnode/v1/evnode.pb.go b/types/pb/evnode/v1/evnode.pb.go index 86d9c4c572..344df7f3e0 100644 --- a/types/pb/evnode/v1/evnode.pb.go +++ b/types/pb/evnode/v1/evnode.pb.go @@ -585,6 +585,134 @@ func (x *Vote) GetValidatorAddress() []byte { return nil } +type P2PSignedHeader struct { + state protoimpl.MessageState `protogen:"open.v1"` + Header *Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + Signer *Signer `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` + DaHeightHint *uint64 `protobuf:"varint,4,opt,name=da_height_hint,json=daHeightHint,proto3,oneof" json:"da_height_hint,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *P2PSignedHeader) Reset() { + *x = P2PSignedHeader{} + mi := &file_evnode_v1_evnode_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *P2PSignedHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*P2PSignedHeader) ProtoMessage() {} + +func (x *P2PSignedHeader) ProtoReflect() protoreflect.Message { + mi := &file_evnode_v1_evnode_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use P2PSignedHeader.ProtoReflect.Descriptor instead. +func (*P2PSignedHeader) Descriptor() ([]byte, []int) { + return file_evnode_v1_evnode_proto_rawDescGZIP(), []int{8} +} + +func (x *P2PSignedHeader) GetHeader() *Header { + if x != nil { + return x.Header + } + return nil +} + +func (x *P2PSignedHeader) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +func (x *P2PSignedHeader) GetSigner() *Signer { + if x != nil { + return x.Signer + } + return nil +} + +func (x *P2PSignedHeader) GetDaHeightHint() uint64 { + if x != nil && x.DaHeightHint != nil { + return *x.DaHeightHint + } + return 0 +} + +type P2PData struct { + state protoimpl.MessageState `protogen:"open.v1"` + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + Txs [][]byte `protobuf:"bytes,2,rep,name=txs,proto3" json:"txs,omitempty"` + DaHeightHint *uint64 `protobuf:"varint,3,opt,name=da_height_hint,json=daHeightHint,proto3,oneof" json:"da_height_hint,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *P2PData) Reset() { + *x = P2PData{} + mi := &file_evnode_v1_evnode_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *P2PData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*P2PData) ProtoMessage() {} + +func (x *P2PData) ProtoReflect() protoreflect.Message { + mi := &file_evnode_v1_evnode_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use P2PData.ProtoReflect.Descriptor instead. +func (*P2PData) Descriptor() ([]byte, []int) { + return file_evnode_v1_evnode_proto_rawDescGZIP(), []int{9} +} + +func (x *P2PData) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *P2PData) GetTxs() [][]byte { + if x != nil { + return x.Txs + } + return nil +} + +func (x *P2PData) GetDaHeightHint() uint64 { + if x != nil && x.DaHeightHint != nil { + return *x.DaHeightHint + } + return 0 +} + var File_evnode_v1_evnode_proto protoreflect.FileDescriptor const file_evnode_v1_evnode_proto_rawDesc = "" + @@ -630,7 +758,18 @@ const file_evnode_v1_evnode_proto_rawDesc = "" + "\x06height\x18\x02 \x01(\x04R\x06height\x128\n" + "\ttimestamp\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12\"\n" + "\rblock_id_hash\x18\x04 \x01(\fR\vblockIdHash\x12+\n" + - "\x11validator_address\x18\x05 \x01(\fR\x10validatorAddressB/Z-github.com/evstack/ev-node/types/pb/evnode/v1b\x06proto3" + "\x11validator_address\x18\x05 \x01(\fR\x10validatorAddress\"\xc3\x01\n" + + "\x0fP2PSignedHeader\x12)\n" + + "\x06header\x18\x01 \x01(\v2\x11.evnode.v1.HeaderR\x06header\x12\x1c\n" + + "\tsignature\x18\x02 \x01(\fR\tsignature\x12)\n" + + "\x06signer\x18\x03 \x01(\v2\x11.evnode.v1.SignerR\x06signer\x12)\n" + + "\x0eda_height_hint\x18\x04 \x01(\x04H\x00R\fdaHeightHint\x88\x01\x01B\x11\n" + + "\x0f_da_height_hint\"\x8a\x01\n" + + "\aP2PData\x12/\n" + + "\bmetadata\x18\x01 \x01(\v2\x13.evnode.v1.MetadataR\bmetadata\x12\x10\n" + + "\x03txs\x18\x02 \x03(\fR\x03txs\x12)\n" + + "\x0eda_height_hint\x18\x03 \x01(\x04H\x00R\fdaHeightHint\x88\x01\x01B\x11\n" + + "\x0f_da_height_hintB/Z-github.com/evstack/ev-node/types/pb/evnode/v1b\x06proto3" var ( file_evnode_v1_evnode_proto_rawDescOnce sync.Once @@ -644,7 +783,7 @@ func file_evnode_v1_evnode_proto_rawDescGZIP() []byte { return file_evnode_v1_evnode_proto_rawDescData } -var file_evnode_v1_evnode_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_evnode_v1_evnode_proto_msgTypes = make([]protoimpl.MessageInfo, 10) var file_evnode_v1_evnode_proto_goTypes = []any{ (*Version)(nil), // 0: evnode.v1.Version (*Header)(nil), // 1: evnode.v1.Header @@ -654,21 +793,26 @@ var file_evnode_v1_evnode_proto_goTypes = []any{ (*Data)(nil), // 5: evnode.v1.Data (*SignedData)(nil), // 6: evnode.v1.SignedData (*Vote)(nil), // 7: evnode.v1.Vote - (*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp + (*P2PSignedHeader)(nil), // 8: evnode.v1.P2PSignedHeader + (*P2PData)(nil), // 9: evnode.v1.P2PData + (*timestamppb.Timestamp)(nil), // 10: google.protobuf.Timestamp } var file_evnode_v1_evnode_proto_depIdxs = []int32{ - 0, // 0: evnode.v1.Header.version:type_name -> evnode.v1.Version - 1, // 1: evnode.v1.SignedHeader.header:type_name -> evnode.v1.Header - 3, // 2: evnode.v1.SignedHeader.signer:type_name -> evnode.v1.Signer - 4, // 3: evnode.v1.Data.metadata:type_name -> evnode.v1.Metadata - 5, // 4: evnode.v1.SignedData.data:type_name -> evnode.v1.Data - 3, // 5: evnode.v1.SignedData.signer:type_name -> evnode.v1.Signer - 8, // 6: evnode.v1.Vote.timestamp:type_name -> google.protobuf.Timestamp - 7, // [7:7] is the sub-list for method output_type - 7, // [7:7] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name + 0, // 0: evnode.v1.Header.version:type_name -> evnode.v1.Version + 1, // 1: evnode.v1.SignedHeader.header:type_name -> evnode.v1.Header + 3, // 2: evnode.v1.SignedHeader.signer:type_name -> evnode.v1.Signer + 4, // 3: evnode.v1.Data.metadata:type_name -> evnode.v1.Metadata + 5, // 4: evnode.v1.SignedData.data:type_name -> evnode.v1.Data + 3, // 5: evnode.v1.SignedData.signer:type_name -> evnode.v1.Signer + 10, // 6: evnode.v1.Vote.timestamp:type_name -> google.protobuf.Timestamp + 1, // 7: evnode.v1.P2PSignedHeader.header:type_name -> evnode.v1.Header + 3, // 8: evnode.v1.P2PSignedHeader.signer:type_name -> evnode.v1.Signer + 4, // 9: evnode.v1.P2PData.metadata:type_name -> evnode.v1.Metadata + 10, // [10:10] is the sub-list for method output_type + 10, // [10:10] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name } func init() { file_evnode_v1_evnode_proto_init() } @@ -676,13 +820,15 @@ func file_evnode_v1_evnode_proto_init() { if File_evnode_v1_evnode_proto != nil { return } + file_evnode_v1_evnode_proto_msgTypes[8].OneofWrappers = []any{} + file_evnode_v1_evnode_proto_msgTypes[9].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_evnode_v1_evnode_proto_rawDesc), len(file_evnode_v1_evnode_proto_rawDesc)), NumEnums: 0, - NumMessages: 8, + NumMessages: 10, NumExtensions: 0, NumServices: 0, },