From 0b04da359afbe32a8640cb600c0f05a09277e387 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Wed, 26 Nov 2025 16:13:23 +0100 Subject: [PATCH 01/13] wip --- apps/evm/cmd/rollback.go | 2 +- apps/testapp/cmd/rollback.go | 2 +- block/components.go | 8 +-- block/internal/common/broadcaster_mock.go | 57 +++++++++++++++++++ block/internal/common/event.go | 3 + block/internal/common/expected_interfaces.go | 1 + block/internal/executing/executor.go | 6 +- .../internal/executing/executor_lazy_test.go | 4 +- .../internal/executing/executor_logic_test.go | 4 +- .../executing/executor_restart_test.go | 8 +-- block/internal/executing/executor_test.go | 48 +--------------- block/internal/submitting/da_submitter.go | 12 ++++ .../da_submitter_integration_test.go | 8 ++- .../submitting/da_submitter_mocks_test.go | 2 +- .../internal/submitting/da_submitter_test.go | 2 + block/internal/submitting/submitter_test.go | 6 +- block/internal/syncing/p2p_handler.go | 12 ++-- block/internal/syncing/p2p_handler_test.go | 20 ++++--- block/internal/syncing/syncer.go | 42 +++++++++++++- block/internal/syncing/syncer_backoff_test.go | 14 ++--- .../internal/syncing/syncer_benchmark_test.go | 2 +- block/internal/syncing/syncer_test.go | 10 ++-- pkg/rpc/client/client_test.go | 9 +-- pkg/rpc/server/server.go | 6 +- pkg/rpc/server/server_test.go | 11 ++-- pkg/sync/sync_service.go | 8 ++- pkg/sync/sync_service_test.go | 5 +- types/signed_header.go | 42 ++++++++++++++ types/utils.go | 8 +-- 29 files changed, 242 insertions(+), 120 deletions(-) diff --git a/apps/evm/cmd/rollback.go b/apps/evm/cmd/rollback.go index 5859b55b27..4a75f9a726 100644 --- a/apps/evm/cmd/rollback.go +++ b/apps/evm/cmd/rollback.go @@ -70,7 +70,7 @@ func NewRollbackCmd() *cobra.Command { } // rollback ev-node goheader state - headerStore, err := goheaderstore.NewStore[*types.SignedHeader]( + headerStore, err := goheaderstore.NewStore[*types.SignedHeaderWithDAHint]( evolveDB, goheaderstore.WithStorePrefix("headerSync"), goheaderstore.WithMetrics(), diff --git a/apps/testapp/cmd/rollback.go b/apps/testapp/cmd/rollback.go index 6d79a1ef13..761ec207d5 100644 --- a/apps/testapp/cmd/rollback.go +++ b/apps/testapp/cmd/rollback.go @@ -76,7 +76,7 @@ func NewRollbackCmd() *cobra.Command { } // rollback ev-node goheader state - headerStore, err := goheaderstore.NewStore[*types.SignedHeader]( + headerStore, err := goheaderstore.NewStore[*types.SignedHeaderWithDAHint]( evolveDB, goheaderstore.WithStorePrefix("headerSync"), goheaderstore.WithMetrics(), diff --git a/block/components.go b/block/components.go index 546cda62c3..43fd950f47 100644 --- a/block/components.go +++ b/block/components.go @@ -132,7 +132,7 @@ func NewSyncComponents( store store.Store, exec coreexecutor.Executor, da coreda.DA, - headerStore common.Broadcaster[*types.SignedHeader], + headerStore common.Broadcaster[*types.SignedHeaderWithDAHint], dataStore common.Broadcaster[*types.Data], logger zerolog.Logger, metrics *Metrics, @@ -165,7 +165,7 @@ func NewSyncComponents( ) // Create submitter for sync nodes (no signer, only DA inclusion processing) - daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger) + daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger, nil) // todo (Alex): use a noop submitter := submitting.NewSubmitter( store, exec, @@ -198,7 +198,7 @@ func NewAggregatorComponents( sequencer coresequencer.Sequencer, da coreda.DA, signer signer.Signer, - headerBroadcaster common.Broadcaster[*types.SignedHeader], + headerBroadcaster common.Broadcaster[*types.SignedHeaderWithDAHint], dataBroadcaster common.Broadcaster[*types.Data], logger zerolog.Logger, metrics *Metrics, @@ -247,7 +247,7 @@ func NewAggregatorComponents( // Create DA client and submitter for aggregator nodes (with signer for submission) daClient := NewDAClient(da, config, logger) - daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger) + daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger, headerBroadcaster) submitter := submitting.NewSubmitter( store, exec, diff --git a/block/internal/common/broadcaster_mock.go b/block/internal/common/broadcaster_mock.go index 2983478078..7b40164328 100644 --- a/block/internal/common/broadcaster_mock.go +++ b/block/internal/common/broadcaster_mock.go @@ -160,3 +160,60 @@ func (_c *MockBroadcaster_WriteToStoreAndBroadcast_Call[H]) RunAndReturn(run fun _c.Call.Return(run) return _c } + +// XXX provides a mock function for the type MockBroadcaster +func (_mock *MockBroadcaster[H]) XXX(ctx context.Context, headerOrData H) error { + ret := _mock.Called(ctx, headerOrData) + + if len(ret) == 0 { + panic("no return value specified for XXX") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(context.Context, H) error); ok { + r0 = returnFunc(ctx, headerOrData) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockBroadcaster_XXX_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'XXX' +type MockBroadcaster_XXX_Call[H header.Header[H]] struct { + *mock.Call +} + +// XXX is a helper method to define mock.On call +// - ctx context.Context +// - headerOrData H +func (_e *MockBroadcaster_Expecter[H]) XXX(ctx interface{}, headerOrData interface{}) *MockBroadcaster_XXX_Call[H] { + return &MockBroadcaster_XXX_Call[H]{Call: _e.mock.On("XXX", ctx, headerOrData)} +} + +func (_c *MockBroadcaster_XXX_Call[H]) Run(run func(ctx context.Context, headerOrData H)) *MockBroadcaster_XXX_Call[H] { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 H + if args[1] != nil { + arg1 = args[1].(H) + } + run( + arg0, + arg1, + ) + }) + return _c +} + +func (_c *MockBroadcaster_XXX_Call[H]) Return(err error) *MockBroadcaster_XXX_Call[H] { + _c.Call.Return(err) + return _c +} + +func (_c *MockBroadcaster_XXX_Call[H]) RunAndReturn(run func(ctx context.Context, headerOrData H) error) *MockBroadcaster_XXX_Call[H] { + _c.Call.Return(run) + return _c +} diff --git a/block/internal/common/event.go b/block/internal/common/event.go index 69d0300f9f..5b016c246b 100644 --- a/block/internal/common/event.go +++ b/block/internal/common/event.go @@ -20,4 +20,7 @@ type DAHeightEvent struct { DaHeight uint64 // Source indicates where this event originated from (DA or P2P) Source EventSource + + // Optional DA height hint from P2P + DaHeightHint uint64 } diff --git a/block/internal/common/expected_interfaces.go b/block/internal/common/expected_interfaces.go index 8f36af6240..6015f19963 100644 --- a/block/internal/common/expected_interfaces.go +++ b/block/internal/common/expected_interfaces.go @@ -12,4 +12,5 @@ import ( type Broadcaster[H header.Header[H]] interface { WriteToStoreAndBroadcast(ctx context.Context, payload H, opts ...pubsub.PubOpt) error Store() header.Store[H] + XXX(ctx context.Context, headerOrData H) error } diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index be969b1a75..d09d01eab1 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -37,7 +37,7 @@ type Executor struct { metrics *common.Metrics // Broadcasting - headerBroadcaster common.Broadcaster[*types.SignedHeader] + headerBroadcaster common.Broadcaster[*types.SignedHeaderWithDAHint] dataBroadcaster common.Broadcaster[*types.Data] // Configuration @@ -76,7 +76,7 @@ func NewExecutor( metrics *common.Metrics, config config.Config, genesis genesis.Genesis, - headerBroadcaster common.Broadcaster[*types.SignedHeader], + headerBroadcaster common.Broadcaster[*types.SignedHeaderWithDAHint], dataBroadcaster common.Broadcaster[*types.Data], logger zerolog.Logger, options common.BlockOptions, @@ -420,7 +420,7 @@ func (e *Executor) produceBlock() error { // broadcast header and data to P2P network g, ctx := errgroup.WithContext(e.ctx) - g.Go(func() error { return e.headerBroadcaster.WriteToStoreAndBroadcast(ctx, header) }) + g.Go(func() error { return e.headerBroadcaster.WriteToStoreAndBroadcast(ctx, &types.SignedHeaderWithDAHint{SignedHeader: header}) }) g.Go(func() error { return e.dataBroadcaster.WriteToStoreAndBroadcast(ctx, data) }) if err := g.Wait(); err != nil { e.logger.Error().Err(err).Msg("failed to broadcast header and/data") diff --git a/block/internal/executing/executor_lazy_test.go b/block/internal/executing/executor_lazy_test.go index b72f0a856b..25b784b29d 100644 --- a/block/internal/executing/executor_lazy_test.go +++ b/block/internal/executing/executor_lazy_test.go @@ -47,7 +47,7 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() db := common.NewMockBroadcaster[*types.Data](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() @@ -157,7 +157,7 @@ func TestRegularMode_ProduceBlockLogic(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() db := common.NewMockBroadcaster[*types.Data](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() diff --git a/block/internal/executing/executor_logic_test.go b/block/internal/executing/executor_logic_test.go index 9aa79d0c43..c615e458cc 100644 --- a/block/internal/executing/executor_logic_test.go +++ b/block/internal/executing/executor_logic_test.go @@ -69,7 +69,7 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { mockSeq := testmocks.NewMockSequencer(t) // Broadcasters are required by produceBlock; use generated mocks - hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() db := common.NewMockBroadcaster[*types.Data](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() @@ -156,7 +156,7 @@ func TestPendingLimit_SkipsProduction(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() db := common.NewMockBroadcaster[*types.Data](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() diff --git a/block/internal/executing/executor_restart_test.go b/block/internal/executing/executor_restart_test.go index 3f0e8b500c..b9a13f68b8 100644 --- a/block/internal/executing/executor_restart_test.go +++ b/block/internal/executing/executor_restart_test.go @@ -47,7 +47,7 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { // Create first executor instance mockExec1 := testmocks.NewMockExecutor(t) mockSeq1 := testmocks.NewMockSequencer(t) - hb1 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb1 := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) hb1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() db1 := common.NewMockBroadcaster[*types.Data](t) db1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() @@ -166,7 +166,7 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { // Create second executor instance (restart scenario) mockExec2 := testmocks.NewMockExecutor(t) mockSeq2 := testmocks.NewMockSequencer(t) - hb2 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb2 := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) hb2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() db2 := common.NewMockBroadcaster[*types.Data](t) db2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() @@ -264,7 +264,7 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { // Create first executor and produce one block mockExec1 := testmocks.NewMockExecutor(t) mockSeq1 := testmocks.NewMockSequencer(t) - hb1 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb1 := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) hb1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() db1 := common.NewMockBroadcaster[*types.Data](t) db1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() @@ -316,7 +316,7 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { // Create second executor (restart) mockExec2 := testmocks.NewMockExecutor(t) mockSeq2 := testmocks.NewMockSequencer(t) - hb2 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb2 := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) hb2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() db2 := common.NewMockBroadcaster[*types.Data](t) db2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() diff --git a/block/internal/executing/executor_test.go b/block/internal/executing/executor_test.go index e310c6d40d..76a7d21748 100644 --- a/block/internal/executing/executor_test.go +++ b/block/internal/executing/executor_test.go @@ -1,7 +1,6 @@ package executing import ( - "context" "testing" "time" @@ -40,7 +39,7 @@ func TestExecutor_BroadcasterIntegration(t *testing.T) { } // Create mock broadcasters - headerBroadcaster := common.NewMockBroadcaster[*types.SignedHeader](t) + headerBroadcaster := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) dataBroadcaster := common.NewMockBroadcaster[*types.Data](t) // Create executor with broadcasters @@ -120,48 +119,3 @@ func TestExecutor_NilBroadcasters(t *testing.T) { assert.Equal(t, cacheManager, executor.cache) assert.Equal(t, gen, executor.genesis) } - -func TestExecutor_BroadcastFlow(t *testing.T) { - // This test demonstrates how the broadcast flow works - // when an Executor produces a block - - // Create mock broadcasters - headerBroadcaster := common.NewMockBroadcaster[*types.SignedHeader](t) - dataBroadcaster := common.NewMockBroadcaster[*types.Data](t) - - // Create sample data that would be broadcast - sampleHeader := &types.SignedHeader{ - Header: types.Header{ - BaseHeader: types.BaseHeader{ - ChainID: "test-chain", - Height: 1, - Time: uint64(time.Now().UnixNano()), - }, - }, - } - - sampleData := &types.Data{ - Metadata: &types.Metadata{ - ChainID: "test-chain", - Height: 1, - Time: uint64(time.Now().UnixNano()), - }, - Txs: []types.Tx{}, - } - - // Test broadcast calls - ctx := context.Background() - - // Set up expectations - headerBroadcaster.EXPECT().WriteToStoreAndBroadcast(ctx, sampleHeader).Return(nil).Once() - dataBroadcaster.EXPECT().WriteToStoreAndBroadcast(ctx, sampleData).Return(nil).Once() - - // Simulate what happens in produceBlock() after block creation - err := headerBroadcaster.WriteToStoreAndBroadcast(ctx, sampleHeader) - require.NoError(t, err) - - err = dataBroadcaster.WriteToStoreAndBroadcast(ctx, sampleData) - require.NoError(t, err) - - // Verify expectations were met (automatically checked by testify mock on cleanup) -} diff --git a/block/internal/submitting/da_submitter.go b/block/internal/submitting/da_submitter.go index 8cf741dcd9..5123429bf2 100644 --- a/block/internal/submitting/da_submitter.go +++ b/block/internal/submitting/da_submitter.go @@ -93,6 +93,10 @@ func clamp(v, min, max time.Duration) time.Duration { return v } +type xxxer interface { + XXX(ctx context.Context, header *types.SignedHeaderWithDAHint) error +} + // DASubmitter handles DA submission operations type DASubmitter struct { client da.Client @@ -101,6 +105,7 @@ type DASubmitter struct { options common.BlockOptions logger zerolog.Logger metrics *common.Metrics + xxxer xxxer // address selector for multi-account support addressSelector pkgda.AddressSelector @@ -114,6 +119,7 @@ func NewDASubmitter( options common.BlockOptions, metrics *common.Metrics, logger zerolog.Logger, + xxxer xxxer, ) *DASubmitter { daSubmitterLogger := logger.With().Str("component", "da_submitter").Logger() @@ -146,6 +152,7 @@ func NewDASubmitter( metrics: metrics, logger: daSubmitterLogger, addressSelector: addressSelector, + xxxer: xxxer, } } @@ -187,6 +194,11 @@ func (s *DASubmitter) SubmitHeaders(ctx context.Context, cache cache.Manager) er func(submitted []*types.SignedHeader, res *coreda.ResultSubmit) { for _, header := range submitted { cache.SetHeaderDAIncluded(header.Hash().String(), res.Height, header.Height()) + payload := &types.SignedHeaderWithDAHint{SignedHeader: header, DAHeightHint: res.Height} + if err := s.xxxer.XXX(ctx, payload); err != nil { + s.logger.Error().Err(err).Msg("failed to update header in p2p store") + // ignoring error here, since we don't want to block the block submission' + } } if l := len(submitted); l > 0 { lastHeight := submitted[l-1].Height() diff --git a/block/internal/submitting/da_submitter_integration_test.go b/block/internal/submitting/da_submitter_integration_test.go index 5b768e1a51..854b0950d2 100644 --- a/block/internal/submitting/da_submitter_integration_test.go +++ b/block/internal/submitting/da_submitter_integration_test.go @@ -93,7 +93,7 @@ func TestDASubmitter_SubmitHeadersAndData_MarksInclusionAndUpdatesLastSubmitted( Namespace: cfg.DA.Namespace, DataNamespace: cfg.DA.DataNamespace, }) - daSubmitter := NewDASubmitter(daClient, cfg, gen, common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop()) + daSubmitter := NewDASubmitter(daClient, cfg, gen, common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop(), noopXXXer{}) // Submit headers and data require.NoError(t, daSubmitter.SubmitHeaders(context.Background(), cm)) @@ -110,3 +110,9 @@ func TestDASubmitter_SubmitHeadersAndData_MarksInclusionAndUpdatesLastSubmitted( assert.True(t, ok) } + +type noopXXXer struct{} + +func (n noopXXXer) XXX(ctx context.Context, header *types.SignedHeaderWithDAHint) error { + return nil +} diff --git a/block/internal/submitting/da_submitter_mocks_test.go b/block/internal/submitting/da_submitter_mocks_test.go index b215b0cf2f..1716b59929 100644 --- a/block/internal/submitting/da_submitter_mocks_test.go +++ b/block/internal/submitting/da_submitter_mocks_test.go @@ -36,7 +36,7 @@ func newTestSubmitter(mockDA *mocks.MockDA, override func(*config.Config)) *DASu Namespace: cfg.DA.Namespace, DataNamespace: cfg.DA.DataNamespace, }) - return NewDASubmitter(daClient, cfg, genesis.Genesis{} /*options=*/, common.BlockOptions{}, common.NopMetrics(), zerolog.Nop()) + return NewDASubmitter(daClient, cfg, genesis.Genesis{} /*options=*/, common.BlockOptions{}, common.NopMetrics(), zerolog.Nop(), nil) } // marshal helper for simple items diff --git a/block/internal/submitting/da_submitter_test.go b/block/internal/submitting/da_submitter_test.go index 214ab98db4..f33aaab21f 100644 --- a/block/internal/submitting/da_submitter_test.go +++ b/block/internal/submitting/da_submitter_test.go @@ -65,6 +65,7 @@ func setupDASubmitterTest(t *testing.T) (*DASubmitter, store.Store, cache.Manage common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop(), + noopXXXer{}, ) return daSubmitter, st, cm, dummyDA, gen @@ -115,6 +116,7 @@ func TestNewDASubmitterSetsVisualizerWhenEnabled(t *testing.T) { common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop(), + nil, ) require.NotNil(t, server.GetDAVisualizationServer()) diff --git a/block/internal/submitting/submitter_test.go b/block/internal/submitting/submitter_test.go index c1df11bf51..f317e0bdec 100644 --- a/block/internal/submitting/submitter_test.go +++ b/block/internal/submitting/submitter_test.go @@ -168,7 +168,7 @@ func TestSubmitter_setSequencerHeightToDAHeight(t *testing.T) { Namespace: cfg.DA.Namespace, DataNamespace: cfg.DA.DataNamespace, }) - daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) + daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop(), nil) s := NewSubmitter(mockStore, nil, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, zerolog.Nop(), nil) s.ctx = ctx @@ -253,7 +253,7 @@ func TestSubmitter_processDAInclusionLoop_advances(t *testing.T) { Namespace: cfg.DA.Namespace, DataNamespace: cfg.DA.DataNamespace, }) - daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) + daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop(), nil) s := NewSubmitter(st, exec, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, zerolog.Nop(), nil) // prepare two consecutive blocks in store with DA included in cache @@ -444,7 +444,7 @@ func TestSubmitter_CacheClearedOnHeightInclusion(t *testing.T) { Namespace: cfg.DA.Namespace, DataNamespace: cfg.DA.DataNamespace, }) - daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) + daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop(), nil) s := NewSubmitter(st, exec, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, zerolog.Nop(), nil) // Create test blocks diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index d8c10bc4c3..a5d4b53f26 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -27,7 +27,7 @@ type p2pHandler interface { // The handler maintains a processedHeight to track the highest block that has been // successfully validated and sent to the syncer, preventing duplicate processing. type P2PHandler struct { - headerStore goheader.Store[*types.SignedHeader] + headerStore goheader.Store[*types.SignedHeaderWithDAHint] dataStore goheader.Store[*types.Data] cache cache.CacheManager genesis genesis.Genesis @@ -38,7 +38,7 @@ type P2PHandler struct { // NewP2PHandler creates a new P2P handler. func NewP2PHandler( - headerStore goheader.Store[*types.SignedHeader], + headerStore goheader.Store[*types.SignedHeaderWithDAHint], dataStore goheader.Store[*types.Data], cache cache.CacheManager, genesis genesis.Genesis, @@ -104,10 +104,10 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC // further header validation (signature) is done in validateBlock. // we need to be sure that the previous block n-1 was executed before validating block n event := common.DAHeightEvent{ - Header: header, - Data: data, - DaHeight: 0, - Source: common.SourceP2P, + Header: header.SignedHeader, + Data: data, + Source: common.SourceP2P, + DaHeightHint: header.DAHeightHint, } select { diff --git a/block/internal/syncing/p2p_handler_test.go b/block/internal/syncing/p2p_handler_test.go index dfab41faae..aacd54d4cc 100644 --- a/block/internal/syncing/p2p_handler_test.go +++ b/block/internal/syncing/p2p_handler_test.go @@ -57,7 +57,7 @@ func p2pMakeSignedHeader(t *testing.T, chainID string, height uint64, proposer [ // P2PTestData aggregates dependencies used by P2P handler tests. type P2PTestData struct { Handler *P2PHandler - HeaderStore *extmocks.MockStore[*types.SignedHeader] + HeaderStore *extmocks.MockStore[*types.SignedHeaderWithDAHint] DataStore *extmocks.MockStore[*types.Data] Cache cache.CacheManager Genesis genesis.Genesis @@ -73,7 +73,7 @@ func setupP2P(t *testing.T) *P2PTestData { gen := genesis.Genesis{ChainID: "p2p-test", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: proposerAddr} - headerStoreMock := extmocks.NewMockStore[*types.SignedHeader](t) + headerStoreMock := extmocks.NewMockStore[*types.SignedHeaderWithDAHint](t) dataStoreMock := extmocks.NewMockStore[*types.Data](t) cfg := config.Config{ @@ -136,8 +136,8 @@ func TestP2PHandler_ProcessHeight_EmitsEventWhenHeaderAndDataPresent(t *testing. sig, err := p.Signer.Sign(bz) require.NoError(t, err) header.Signature = sig - - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(5)).Return(header, nil).Once() + payload := &types.SignedHeaderWithDAHint{SignedHeader: header} + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(5)).Return(payload, nil).Once() p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(5)).Return(data, nil).Once() ch := make(chan common.DAHeightEvent, 1) @@ -163,7 +163,8 @@ func TestP2PHandler_ProcessHeight_SkipsWhenDataMissing(t *testing.T) { require.NoError(t, err) header.Signature = sig - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(7)).Return(header, nil).Once() + payload := &types.SignedHeaderWithDAHint{SignedHeader: header} + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(7)).Return(payload, nil).Once() p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(7)).Return(nil, errors.New("missing")).Once() ch := make(chan common.DAHeightEvent, 1) @@ -198,7 +199,8 @@ func TestP2PHandler_ProcessHeight_SkipsOnProposerMismatch(t *testing.T) { header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 11, badAddr, pub, signer) header.DataHash = common.DataHashForEmptyTxs - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(11)).Return(header, nil).Once() + payload := &types.SignedHeaderWithDAHint{SignedHeader: header} + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(11)).Return(payload, nil).Once() ch := make(chan common.DAHeightEvent, 1) err = p.Handler.ProcessHeight(ctx, 11, ch) @@ -233,7 +235,8 @@ func TestP2PHandler_ProcessedHeightSkipsPreviouslyHandledBlocks(t *testing.T) { require.NoError(t, err) header.Signature = sig - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(6)).Return(header, nil).Once() + payload := &types.SignedHeaderWithDAHint{SignedHeader: header} + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(6)).Return(payload, nil).Once() p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(6)).Return(data, nil).Once() require.NoError(t, p.Handler.ProcessHeight(ctx, 6, ch)) @@ -256,7 +259,8 @@ func TestP2PHandler_SetProcessedHeightPreventsDuplicates(t *testing.T) { require.NoError(t, err) header.Signature = sig - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(8)).Return(header, nil).Once() + payload := &types.SignedHeaderWithDAHint{SignedHeader: header} + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(8)).Return(payload, nil).Once() p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(8)).Return(data, nil).Once() ch := make(chan common.DAHeightEvent, 1) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index ee69edea7f..a40e30fbd0 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -49,7 +49,7 @@ type Syncer struct { daRetrieverHeight *atomic.Uint64 // P2P stores - headerStore common.Broadcaster[*types.SignedHeader] + headerStore common.Broadcaster[*types.SignedHeaderWithDAHint] dataStore common.Broadcaster[*types.Data] // Channels for coordination @@ -81,7 +81,7 @@ func NewSyncer( metrics *common.Metrics, config config.Config, genesis genesis.Genesis, - headerStore common.Broadcaster[*types.SignedHeader], + headerStore common.Broadcaster[*types.SignedHeaderWithDAHint], dataStore common.Broadcaster[*types.Data], logger zerolog.Logger, options common.BlockOptions, @@ -432,6 +432,41 @@ func (s *Syncer) processHeightEvent(event *common.DAHeightEvent) { return } + // If this is a P2P event with a DA height hint, trigger targeted DA retrieval + // This allows us to fetch the block directly from the specified DA height instead of sequential scanning + if event.Source == common.SourceP2P && event.DaHeightHint != 0 { + if _, exists := s.cache.GetHeaderDAIncluded(event.Header.Hash().String()); !exists { + s.logger.Debug(). + Uint64("height", height). + Uint64("da_height_hint", event.DaHeightHint). + Msg("P2P event with DA height hint, triggering targeted DA retrieval") + + // Trigger targeted DA retrieval in background + go func() { + targetEvents, err := s.daRetriever.RetrieveFromDA(s.ctx, event.DaHeightHint) + if err != nil { + s.logger.Debug(). + Err(err). + Uint64("da_height", event.DaHeightHint). + Msg("targeted DA retrieval failed (hint may be incorrect or DA not yet available)") + // Not a critical error - the sequential DA worker will eventually find it + return + } + + // Process retrieved events from the targeted DA height + for _, daEvent := range targetEvents { + select { + case s.heightInCh <- daEvent: + case <-s.ctx.Done(): + return + default: + s.cache.SetPendingEvent(daEvent.Header.Height(), &daEvent) + } + } + }() + } + } + // Last data must be got from store if the event comes from DA and the data hash is empty. // When if the event comes from P2P, the sequencer and then all the full nodes contains the data. if event.Source == common.SourceDA && bytes.Equal(event.Header.DataHash, common.DataHashForEmptyTxs) && currentHeight > 0 { @@ -469,7 +504,8 @@ func (s *Syncer) processHeightEvent(event *common.DAHeightEvent) { g.Go(func() error { // broadcast header locally only — prevents spamming the p2p network with old height notifications, // allowing the syncer to update its target and fill missing blocks - return s.headerStore.WriteToStoreAndBroadcast(ctx, event.Header, pubsub.WithLocalPublication(true)) + payload := &types.SignedHeaderWithDAHint{SignedHeader: event.Header, DAHeightHint: event.DaHeightHint} + return s.headerStore.WriteToStoreAndBroadcast(ctx, payload, pubsub.WithLocalPublication(true)) }) g.Go(func() error { // broadcast data locally only — prevents spamming the p2p network with old height notifications, diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index 65f2586966..970dd0cc5c 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -77,13 +77,13 @@ func TestSyncer_BackoffOnDAError(t *testing.T) { p2pHandler.On("SetProcessedHeight", mock.Anything).Return().Maybe() // Create mock stores for P2P - mockHeaderStore := extmocks.NewMockStore[*types.SignedHeader](t) + mockHeaderStore := extmocks.NewMockStore[*types.SignedHeaderWithDAHint](t) mockHeaderStore.EXPECT().Height().Return(uint64(0)).Maybe() mockDataStore := extmocks.NewMockStore[*types.Data](t) mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() - headerStore := common.NewMockBroadcaster[*types.SignedHeader](t) + headerStore := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) headerStore.EXPECT().Store().Return(mockHeaderStore).Maybe() syncer.headerStore = headerStore @@ -173,13 +173,13 @@ func TestSyncer_BackoffResetOnSuccess(t *testing.T) { p2pHandler.On("SetProcessedHeight", mock.Anything).Return().Maybe() // Create mock stores for P2P - mockHeaderStore := extmocks.NewMockStore[*types.SignedHeader](t) + mockHeaderStore := extmocks.NewMockStore[*types.SignedHeaderWithDAHint](t) mockHeaderStore.EXPECT().Height().Return(uint64(0)).Maybe() mockDataStore := extmocks.NewMockStore[*types.Data](t) mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() - headerStore := common.NewMockBroadcaster[*types.SignedHeader](t) + headerStore := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) headerStore.EXPECT().Store().Return(mockHeaderStore).Maybe() syncer.headerStore = headerStore @@ -263,13 +263,13 @@ func TestSyncer_BackoffBehaviorIntegration(t *testing.T) { syncer.p2pHandler = p2pHandler // Create mock stores for P2P - mockHeaderStore := extmocks.NewMockStore[*types.SignedHeader](t) + mockHeaderStore := extmocks.NewMockStore[*types.SignedHeaderWithDAHint](t) mockHeaderStore.EXPECT().Height().Return(uint64(0)).Maybe() mockDataStore := extmocks.NewMockStore[*types.Data](t) mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() - headerStore := common.NewMockBroadcaster[*types.SignedHeader](t) + headerStore := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) headerStore.EXPECT().Store().Return(mockHeaderStore).Maybe() syncer.headerStore = headerStore @@ -350,7 +350,7 @@ func setupTestSyncer(t *testing.T, daBlockTime time.Duration) *Syncer { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t), common.NewMockBroadcaster[*types.Data](t), zerolog.Nop(), common.DefaultBlockOptions(), diff --git a/block/internal/syncing/syncer_benchmark_test.go b/block/internal/syncing/syncer_benchmark_test.go index e2b6f6e51f..29f8e86854 100644 --- a/block/internal/syncing/syncer_benchmark_test.go +++ b/block/internal/syncing/syncer_benchmark_test.go @@ -153,7 +153,7 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay mockP2P := newMockp2pHandler(b) // not used directly in this benchmark path mockP2P.On("SetProcessedHeight", mock.Anything).Return().Maybe() s.p2pHandler = mockP2P - headerP2PStore := common.NewMockBroadcaster[*types.SignedHeader](b) + headerP2PStore := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](b) s.headerStore = headerP2PStore dataP2PStore := common.NewMockBroadcaster[*types.Data](b) s.dataStore = dataP2PStore diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 5c16da4435..3fa92e4d26 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -123,7 +123,7 @@ func TestSyncer_validateBlock_DataHashMismatch(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t), common.NewMockBroadcaster[*types.Data](t), zerolog.Nop(), common.DefaultBlockOptions(), @@ -174,7 +174,7 @@ func TestProcessHeightEvent_SyncsAndUpdatesState(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t), common.NewMockBroadcaster[*types.Data](t), zerolog.Nop(), common.DefaultBlockOptions(), @@ -228,7 +228,7 @@ func TestSequentialBlockSync(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t), common.NewMockBroadcaster[*types.Data](t), zerolog.Nop(), common.DefaultBlockOptions(), @@ -340,13 +340,13 @@ func TestSyncLoopPersistState(t *testing.T) { dummyExec := execution.NewDummyExecutor() // Create mock stores for P2P - mockHeaderStore := extmocks.NewMockStore[*types.SignedHeader](t) + mockHeaderStore := extmocks.NewMockStore[*types.SignedHeaderWithDAHint](t) mockHeaderStore.EXPECT().Height().Return(uint64(0)).Maybe() mockDataStore := extmocks.NewMockStore[*types.Data](t) mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() - mockP2PHeaderStore := common.NewMockBroadcaster[*types.SignedHeader](t) + mockP2PHeaderStore := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) mockP2PHeaderStore.EXPECT().Store().Return(mockHeaderStore).Maybe() mockP2PDataStore := common.NewMockBroadcaster[*types.Data](t) diff --git a/pkg/rpc/client/client_test.go b/pkg/rpc/client/client_test.go index 4b2b82e1b3..e517d5128b 100644 --- a/pkg/rpc/client/client_test.go +++ b/pkg/rpc/client/client_test.go @@ -28,10 +28,11 @@ import ( func setupTestServer( t *testing.T, mockStore *mocks.MockStore, - headerStore goheader.Store[*types.SignedHeader], + headerStore goheader.Store[*types.SignedHeaderWithDAHint], dataStore goheader.Store[*types.Data], mockP2P *mocks.MockP2PRPC, ) (*httptest.Server, *Client) { + t.Helper() mux := http.NewServeMux() logger := zerolog.Nop() @@ -105,13 +106,13 @@ func TestClientGetMetadata(t *testing.T) { func TestClientGetP2PStoreInfo(t *testing.T) { mockStore := mocks.NewMockStore(t) mockP2P := mocks.NewMockP2PRPC(t) - headerStore := headerstoremocks.NewMockStore[*types.SignedHeader](t) + headerStore := headerstoremocks.NewMockStore[*types.SignedHeaderWithDAHint](t) dataStore := headerstoremocks.NewMockStore[*types.Data](t) now := time.Now().UTC() - headerHead := testSignedHeader(10, now) - headerTail := testSignedHeader(5, now.Add(-time.Minute)) + headerHead := &types.SignedHeaderWithDAHint{SignedHeader: testSignedHeader(10, now)} + headerTail := &types.SignedHeaderWithDAHint{SignedHeader: testSignedHeader(5, now.Add(-time.Minute))} headerStore.On("Height").Return(uint64(10)) headerStore.On("Head", mock.Anything).Return(headerHead, nil) headerStore.On("Tail", mock.Anything).Return(headerTail, nil) diff --git a/pkg/rpc/server/server.go b/pkg/rpc/server/server.go index e0abed2de0..6fb9ee8362 100644 --- a/pkg/rpc/server/server.go +++ b/pkg/rpc/server/server.go @@ -34,7 +34,7 @@ var _ rpc.StoreServiceHandler = (*StoreServer)(nil) // StoreServer implements the StoreService defined in the proto file type StoreServer struct { store store.Store - headerStore goheader.Store[*types.SignedHeader] + headerStore goheader.Store[*types.SignedHeaderWithDAHint] dataStore goheader.Store[*types.Data] logger zerolog.Logger } @@ -42,7 +42,7 @@ type StoreServer struct { // NewStoreServer creates a new StoreServer instance func NewStoreServer( store store.Store, - headerStore goheader.Store[*types.SignedHeader], + headerStore goheader.Store[*types.SignedHeaderWithDAHint], dataStore goheader.Store[*types.Data], logger zerolog.Logger, ) *StoreServer { @@ -370,7 +370,7 @@ func (p *P2PServer) GetNetInfo( // NewServiceHandler creates a new HTTP handler for Store, P2P and Config services func NewServiceHandler( store store.Store, - headerStore goheader.Store[*types.SignedHeader], + headerStore goheader.Store[*types.SignedHeaderWithDAHint], dataStore goheader.Store[*types.Data], peerManager p2p.P2PRPC, proposerAddress []byte, diff --git a/pkg/rpc/server/server_test.go b/pkg/rpc/server/server_test.go index 32e9b0ebec..befecd910f 100644 --- a/pkg/rpc/server/server_test.go +++ b/pkg/rpc/server/server_test.go @@ -325,7 +325,7 @@ func TestGetGenesisDaHeight_InvalidLength(t *testing.T) { func TestGetP2PStoreInfo(t *testing.T) { t.Run("returns snapshots for configured stores", func(t *testing.T) { mockStore := mocks.NewMockStore(t) - headerStore := headerstoremocks.NewMockStore[*types.SignedHeader](t) + headerStore := headerstoremocks.NewMockStore[*types.SignedHeaderWithDAHint](t) dataStore := headerstoremocks.NewMockStore[*types.Data](t) logger := zerolog.Nop() server := NewStoreServer(mockStore, headerStore, dataStore, logger) @@ -354,10 +354,10 @@ func TestGetP2PStoreInfo(t *testing.T) { t.Run("returns error when a store edge fails", func(t *testing.T) { mockStore := mocks.NewMockStore(t) - headerStore := headerstoremocks.NewMockStore[*types.SignedHeader](t) + headerStore := headerstoremocks.NewMockStore[*types.SignedHeaderWithDAHint](t) logger := zerolog.Nop() headerStore.On("Height").Return(uint64(0)) - headerStore.On("Head", mock.Anything).Return((*types.SignedHeader)(nil), fmt.Errorf("boom")) + headerStore.On("Head", mock.Anything).Return((*types.SignedHeaderWithDAHint)(nil), fmt.Errorf("boom")) server := NewStoreServer(mockStore, headerStore, nil, logger) resp, err := server.GetP2PStoreInfo(context.Background(), connect.NewRequest(&emptypb.Empty{})) @@ -627,8 +627,8 @@ func TestHealthReadyEndpoint(t *testing.T) { }) } -func makeTestSignedHeader(height uint64, ts time.Time) *types.SignedHeader { - return &types.SignedHeader{ +func makeTestSignedHeader(height uint64, ts time.Time) *types.SignedHeaderWithDAHint { + return &types.SignedHeaderWithDAHint{SignedHeader: &types.SignedHeader{ Header: types.Header{ BaseHeader: types.BaseHeader{ Height: height, @@ -639,6 +639,7 @@ func makeTestSignedHeader(height uint64, ts time.Time) *types.SignedHeader { DataHash: []byte{0x02}, AppHash: []byte{0x03}, }, + }, } } diff --git a/pkg/sync/sync_service.go b/pkg/sync/sync_service.go index 6a17a42a85..f4c63c33a8 100644 --- a/pkg/sync/sync_service.go +++ b/pkg/sync/sync_service.go @@ -62,7 +62,7 @@ type SyncService[H header.Header[H]] struct { type DataSyncService = SyncService[*types.Data] // HeaderSyncService is the P2P Sync Service for headers. -type HeaderSyncService = SyncService[*types.SignedHeader] +type HeaderSyncService = SyncService[*types.SignedHeaderWithDAHint] // NewDataSyncService returns a new DataSyncService. func NewDataSyncService( @@ -83,7 +83,7 @@ func NewHeaderSyncService( p2p *p2p.Client, logger zerolog.Logger, ) (*HeaderSyncService, error) { - return newSyncService[*types.SignedHeader](store, headerSync, conf, genesis, p2p, logger) + return newSyncService[*types.SignedHeaderWithDAHint](store, headerSync, conf, genesis, p2p, logger) } func newSyncService[H header.Header[H]]( @@ -174,6 +174,10 @@ func (syncService *SyncService[H]) WriteToStoreAndBroadcast(ctx context.Context, return nil } +func (s *SyncService[H]) XXX(ctx context.Context, headerOrData H) error { + return s.store.Append(ctx, headerOrData) +} + // Start is a part of Service interface. func (syncService *SyncService[H]) Start(ctx context.Context) error { // setup P2P infrastructure, but don't start Subscriber yet. diff --git a/pkg/sync/sync_service_test.go b/pkg/sync/sync_service_test.go index 93603752a7..aeaeda18b8 100644 --- a/pkg/sync/sync_service_test.go +++ b/pkg/sync/sync_service_test.go @@ -167,7 +167,7 @@ func TestHeaderSyncServiceInitFromHigherHeight(t *testing.T) { require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, signedHeader)) } -func nextHeader(t *testing.T, previousHeader *types.SignedHeader, chainID string, noopSigner signer.Signer) *types.SignedHeader { +func nextHeader(t *testing.T, previousHeader *types.SignedHeaderWithDAHint, chainID string, noopSigner signer.Signer) *types.SignedHeaderWithDAHint { newSignedHeader := &types.SignedHeader{ Header: types.GetRandomNextHeader(previousHeader.Header, chainID), Signer: previousHeader.Signer, @@ -178,8 +178,7 @@ func nextHeader(t *testing.T, previousHeader *types.SignedHeader, chainID string require.NoError(t, err) newSignedHeader.Signature = signature require.NoError(t, newSignedHeader.Validate()) - previousHeader = newSignedHeader - return previousHeader + return &types.SignedHeaderWithDAHint{SignedHeader: newSignedHeader} } func bytesN(r *rand.Rand, n int) []byte { diff --git a/types/signed_header.go b/types/signed_header.go index ffacbc847a..d0b322321d 100644 --- a/types/signed_header.go +++ b/types/signed_header.go @@ -3,6 +3,7 @@ package types import ( "bytes" "context" + "encoding/binary" "errors" "fmt" @@ -14,6 +15,47 @@ var ( ErrLastHeaderHashMismatch = errors.New("last header hash mismatch") ) +var _ header.Header[*SignedHeaderWithDAHint] = &SignedHeaderWithDAHint{} + +type SignedHeaderWithDAHint struct { + *SignedHeader + DAHeightHint uint64 +} + +func (s *SignedHeaderWithDAHint) New() *SignedHeaderWithDAHint { + return &SignedHeaderWithDAHint{SignedHeader: &SignedHeader{}} +} +func (sh *SignedHeaderWithDAHint) Verify(untrstH *SignedHeaderWithDAHint) error { + return sh.SignedHeader.Verify(untrstH.SignedHeader) +} + +func (s *SignedHeaderWithDAHint) Zero() bool { + return s == nil +} + +func (s *SignedHeaderWithDAHint) IsZero() bool { + return s == nil +} + +func (s *SignedHeaderWithDAHint) MarshalBinary() ([]byte, error) { + bz, err := s.SignedHeader.MarshalBinary() + if err != nil { + return nil, err + } + out := make([]byte, 8+len(bz)) + binary.BigEndian.PutUint64(out, s.DAHeightHint) + copy(out[8:], bz) + return out, nil +} + +func (s *SignedHeaderWithDAHint) UnmarshalBinary(data []byte) error { + if len(data) < 8 { + return fmt.Errorf("invalid length: %d", len(data)) + } + s.DAHeightHint = binary.BigEndian.Uint64(data) + return s.SignedHeader.UnmarshalBinary(data[8:]) +} + var _ header.Header[*SignedHeader] = &SignedHeader{} // SignedHeader combines Header and its signature. diff --git a/types/utils.go b/types/utils.go index d8c2527521..b0e28c80a8 100644 --- a/types/utils.go +++ b/types/utils.go @@ -78,7 +78,7 @@ func GenerateRandomBlockCustomWithAppHash(config *BlockConfig, chainID string, a Time: uint64(signedHeader.Time().UnixNano()), } - return signedHeader, data, config.PrivKey + return signedHeader.SignedHeader, data, config.PrivKey } // GenerateRandomBlockCustom returns a block with random data and the given height, transactions, privateKey and proposer address. @@ -150,11 +150,11 @@ func GetRandomSignedHeader(chainID string) (*SignedHeader, crypto.PrivKey, error if err != nil { return nil, nil, err } - return signedHeader, pk, nil + return signedHeader.SignedHeader, pk, nil } // GetRandomSignedHeaderCustom creates a signed header based on the provided HeaderConfig. -func GetRandomSignedHeaderCustom(config *HeaderConfig, chainID string) (*SignedHeader, error) { +func GetRandomSignedHeaderCustom(config *HeaderConfig, chainID string) (*SignedHeaderWithDAHint, error) { pk, err := config.Signer.GetPublic() if err != nil { return nil, err @@ -183,7 +183,7 @@ func GetRandomSignedHeaderCustom(config *HeaderConfig, chainID string) (*SignedH return nil, err } signedHeader.Signature = signature - return signedHeader, nil + return &SignedHeaderWithDAHint{SignedHeader: signedHeader}, nil } // GetRandomNextSignedHeader returns a signed header with random data and height of +1 from From 044903f852b300d8a2a9239c97627554ef1787be Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Thu, 27 Nov 2025 15:33:04 +0100 Subject: [PATCH 02/13] x --- block/components.go | 21 +-- block/internal/common/broadcaster_mock.go | 134 ++++++++++-------- block/internal/common/event.go | 4 +- block/internal/common/expected_interfaces.go | 53 ++++++- block/internal/executing/executor.go | 10 +- block/internal/submitting/da_submitter.go | 63 ++++---- .../da_submitter_integration_test.go | 2 +- block/internal/syncing/p2p_handler.go | 24 ++-- block/internal/syncing/syncer.go | 109 ++++++++------ pkg/rpc/server/server.go | 6 +- pkg/sync/sync_service.go | 36 +++-- pkg/sync/sync_service_test.go | 6 +- types/da_hint_container.go | 74 ++++++++++ types/signed_header.go | 43 +----- types/utils.go | 8 +- 15 files changed, 374 insertions(+), 219 deletions(-) create mode 100644 types/da_hint_container.go diff --git a/block/components.go b/block/components.go index 43fd950f47..4b5316eab4 100644 --- a/block/components.go +++ b/block/components.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" + "github.com/evstack/ev-node/pkg/sync" "github.com/rs/zerolog" "github.com/evstack/ev-node/block/internal/cache" @@ -132,8 +133,8 @@ func NewSyncComponents( store store.Store, exec coreexecutor.Executor, da coreda.DA, - headerStore common.Broadcaster[*types.SignedHeaderWithDAHint], - dataStore common.Broadcaster[*types.Data], + headerStore *sync.HeaderSyncService, + dataStore *sync.DataSyncService, logger zerolog.Logger, metrics *Metrics, blockOpts BlockOptions, @@ -157,15 +158,15 @@ func NewSyncComponents( metrics, config, genesis, - headerStore, - dataStore, + common.NewDecorator[*types.SignedHeader](headerStore), + common.NewDecorator[*types.Data](dataStore), logger, blockOpts, errorCh, ) // Create submitter for sync nodes (no signer, only DA inclusion processing) - daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger, nil) // todo (Alex): use a noop + daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger, headerStore, dataStore) submitter := submitting.NewSubmitter( store, exec, @@ -198,8 +199,8 @@ func NewAggregatorComponents( sequencer coresequencer.Sequencer, da coreda.DA, signer signer.Signer, - headerBroadcaster common.Broadcaster[*types.SignedHeaderWithDAHint], - dataBroadcaster common.Broadcaster[*types.Data], + headerBroadcaster *sync.HeaderSyncService, + dataBroadcaster *sync.DataSyncService, logger zerolog.Logger, metrics *Metrics, blockOpts BlockOptions, @@ -222,8 +223,8 @@ func NewAggregatorComponents( metrics, config, genesis, - headerBroadcaster, - dataBroadcaster, + common.NewDecorator[*types.SignedHeader](headerBroadcaster), + common.NewDecorator[*types.Data](dataBroadcaster), logger, blockOpts, errorCh, @@ -247,7 +248,7 @@ func NewAggregatorComponents( // Create DA client and submitter for aggregator nodes (with signer for submission) daClient := NewDAClient(da, config, logger) - daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger, headerBroadcaster) + daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger, headerBroadcaster, dataBroadcaster) submitter := submitting.NewSubmitter( store, exec, diff --git a/block/internal/common/broadcaster_mock.go b/block/internal/common/broadcaster_mock.go index 7b40164328..e761aa624a 100644 --- a/block/internal/common/broadcaster_mock.go +++ b/block/internal/common/broadcaster_mock.go @@ -8,6 +8,7 @@ import ( "context" "github.com/celestiaorg/go-header" + "github.com/evstack/ev-node/types" "github.com/libp2p/go-libp2p-pubsub" mock "github.com/stretchr/testify/mock" ) @@ -39,6 +40,82 @@ func (_m *MockBroadcaster[H]) EXPECT() *MockBroadcaster_Expecter[H] { return &MockBroadcaster_Expecter[H]{mock: &_m.Mock} } +// AppendDAHint provides a mock function for the type MockBroadcaster +func (_mock *MockBroadcaster[H]) AppendDAHint(ctx context.Context, daHeight uint64, hashes ...types.Hash) error { + // types.Hash + _va := make([]interface{}, len(hashes)) + for _i := range hashes { + _va[_i] = hashes[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, daHeight) + _ca = append(_ca, _va...) + ret := _mock.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for AppendDAHint") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64, ...types.Hash) error); ok { + r0 = returnFunc(ctx, daHeight, hashes...) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockBroadcaster_AppendDAHint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AppendDAHint' +type MockBroadcaster_AppendDAHint_Call[H header.Header[H]] struct { + *mock.Call +} + +// AppendDAHint is a helper method to define mock.On call +// - ctx context.Context +// - daHeight uint64 +// - hashes ...types.Hash +func (_e *MockBroadcaster_Expecter[H]) AppendDAHint(ctx interface{}, daHeight interface{}, hashes ...interface{}) *MockBroadcaster_AppendDAHint_Call[H] { + return &MockBroadcaster_AppendDAHint_Call[H]{Call: _e.mock.On("AppendDAHint", + append([]interface{}{ctx, daHeight}, hashes...)...)} +} + +func (_c *MockBroadcaster_AppendDAHint_Call[H]) Run(run func(ctx context.Context, daHeight uint64, hashes ...types.Hash)) *MockBroadcaster_AppendDAHint_Call[H] { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 uint64 + if args[1] != nil { + arg1 = args[1].(uint64) + } + var arg2 []types.Hash + variadicArgs := make([]types.Hash, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(types.Hash) + } + } + arg2 = variadicArgs + run( + arg0, + arg1, + arg2..., + ) + }) + return _c +} + +func (_c *MockBroadcaster_AppendDAHint_Call[H]) Return(err error) *MockBroadcaster_AppendDAHint_Call[H] { + _c.Call.Return(err) + return _c +} + +func (_c *MockBroadcaster_AppendDAHint_Call[H]) RunAndReturn(run func(ctx context.Context, daHeight uint64, hashes ...types.Hash) error) *MockBroadcaster_AppendDAHint_Call[H] { + _c.Call.Return(run) + return _c +} + // Store provides a mock function for the type MockBroadcaster func (_mock *MockBroadcaster[H]) Store() header.Store[H] { ret := _mock.Called() @@ -160,60 +237,3 @@ func (_c *MockBroadcaster_WriteToStoreAndBroadcast_Call[H]) RunAndReturn(run fun _c.Call.Return(run) return _c } - -// XXX provides a mock function for the type MockBroadcaster -func (_mock *MockBroadcaster[H]) XXX(ctx context.Context, headerOrData H) error { - ret := _mock.Called(ctx, headerOrData) - - if len(ret) == 0 { - panic("no return value specified for XXX") - } - - var r0 error - if returnFunc, ok := ret.Get(0).(func(context.Context, H) error); ok { - r0 = returnFunc(ctx, headerOrData) - } else { - r0 = ret.Error(0) - } - return r0 -} - -// MockBroadcaster_XXX_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'XXX' -type MockBroadcaster_XXX_Call[H header.Header[H]] struct { - *mock.Call -} - -// XXX is a helper method to define mock.On call -// - ctx context.Context -// - headerOrData H -func (_e *MockBroadcaster_Expecter[H]) XXX(ctx interface{}, headerOrData interface{}) *MockBroadcaster_XXX_Call[H] { - return &MockBroadcaster_XXX_Call[H]{Call: _e.mock.On("XXX", ctx, headerOrData)} -} - -func (_c *MockBroadcaster_XXX_Call[H]) Run(run func(ctx context.Context, headerOrData H)) *MockBroadcaster_XXX_Call[H] { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - var arg1 H - if args[1] != nil { - arg1 = args[1].(H) - } - run( - arg0, - arg1, - ) - }) - return _c -} - -func (_c *MockBroadcaster_XXX_Call[H]) Return(err error) *MockBroadcaster_XXX_Call[H] { - _c.Call.Return(err) - return _c -} - -func (_c *MockBroadcaster_XXX_Call[H]) RunAndReturn(run func(ctx context.Context, headerOrData H) error) *MockBroadcaster_XXX_Call[H] { - _c.Call.Return(run) - return _c -} diff --git a/block/internal/common/event.go b/block/internal/common/event.go index 5b016c246b..f02a181de8 100644 --- a/block/internal/common/event.go +++ b/block/internal/common/event.go @@ -21,6 +21,6 @@ type DAHeightEvent struct { // Source indicates where this event originated from (DA or P2P) Source EventSource - // Optional DA height hint from P2P - DaHeightHint uint64 + // Optional DA height hints from P2P. first is the DA height hint for the header, second is the DA height hint for the data + DaHeightHints [2]uint64 } diff --git a/block/internal/common/expected_interfaces.go b/block/internal/common/expected_interfaces.go index 6015f19963..59a0991670 100644 --- a/block/internal/common/expected_interfaces.go +++ b/block/internal/common/expected_interfaces.go @@ -3,14 +3,63 @@ package common import ( "context" + "github.com/evstack/ev-node/types" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/celestiaorg/go-header" ) -// broadcaster interface for P2P broadcasting +type ( + HeaderP2PBroadcaster = Decorator[*types.SignedHeader] + DataP2PBroadcaster = Decorator[*types.Data] +) + +// Broadcaster interface for P2P broadcasting type Broadcaster[H header.Header[H]] interface { WriteToStoreAndBroadcast(ctx context.Context, payload H, opts ...pubsub.PubOpt) error Store() header.Store[H] - XXX(ctx context.Context, headerOrData H) error + AppendDAHint(ctx context.Context, daHeight uint64, hashes ...types.Hash) error +} + +// Decorator to access the the payload type without the container +type Decorator[H header.Header[H]] struct { + nested Broadcaster[*types.DAHeightHintContainer[H]] +} + +func NewDecorator[H header.Header[H]](nested Broadcaster[*types.DAHeightHintContainer[H]]) Decorator[H] { + return Decorator[H]{nested: nested} +} + +func (d Decorator[H]) WriteToStoreAndBroadcast(ctx context.Context, payload H, opts ...pubsub.PubOpt) error { + return d.nested.WriteToStoreAndBroadcast(ctx, &types.DAHeightHintContainer[H]{Entry: payload}, opts...) +} + +func (d Decorator[H]) Store() HeightStore[H] { + return HeightStoreImpl[H]{store: d.nested.Store()} +} +func (d Decorator[H]) XStore() header.Store[*types.DAHeightHintContainer[H]] { + return d.nested.Store() +} + +func (d Decorator[H]) AppendDAHint(ctx context.Context, daHeight uint64, hashes ...types.Hash) error { + return d.nested.AppendDAHint(ctx, daHeight, hashes...) +} + +// HeightStore is a subset of goheader.Store +type HeightStore[H header.Header[H]] interface { + GetByHeight(context.Context, uint64) (H, error) +} + +type HeightStoreImpl[H header.Header[H]] struct { + store header.Store[*types.DAHeightHintContainer[H]] +} + +func (s HeightStoreImpl[H]) GetByHeight(ctx context.Context, height uint64) (H, error) { + var zero H + v, err := s.store.GetByHeight(ctx, height) + if err != nil { + return zero, err + } + return v.Entry, nil + } diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index d09d01eab1..7f300aefd9 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -37,8 +37,8 @@ type Executor struct { metrics *common.Metrics // Broadcasting - headerBroadcaster common.Broadcaster[*types.SignedHeaderWithDAHint] - dataBroadcaster common.Broadcaster[*types.Data] + headerBroadcaster common.HeaderP2PBroadcaster + dataBroadcaster common.DataP2PBroadcaster // Configuration config config.Config @@ -76,8 +76,8 @@ func NewExecutor( metrics *common.Metrics, config config.Config, genesis genesis.Genesis, - headerBroadcaster common.Broadcaster[*types.SignedHeaderWithDAHint], - dataBroadcaster common.Broadcaster[*types.Data], + headerBroadcaster common.HeaderP2PBroadcaster, + dataBroadcaster common.DataP2PBroadcaster, logger zerolog.Logger, options common.BlockOptions, errorCh chan<- error, @@ -420,7 +420,7 @@ func (e *Executor) produceBlock() error { // broadcast header and data to P2P network g, ctx := errgroup.WithContext(e.ctx) - g.Go(func() error { return e.headerBroadcaster.WriteToStoreAndBroadcast(ctx, &types.SignedHeaderWithDAHint{SignedHeader: header}) }) + g.Go(func() error { return e.headerBroadcaster.WriteToStoreAndBroadcast(ctx, header) }) g.Go(func() error { return e.dataBroadcaster.WriteToStoreAndBroadcast(ctx, data) }) if err := g.Wait(); err != nil { e.logger.Error().Err(err).Msg("failed to broadcast header and/data") diff --git a/block/internal/submitting/da_submitter.go b/block/internal/submitting/da_submitter.go index 5123429bf2..408ab0a2b7 100644 --- a/block/internal/submitting/da_submitter.go +++ b/block/internal/submitting/da_submitter.go @@ -93,19 +93,20 @@ func clamp(v, min, max time.Duration) time.Duration { return v } -type xxxer interface { - XXX(ctx context.Context, header *types.SignedHeaderWithDAHint) error +type DAHintAppender interface { + AppendDAHint(ctx context.Context, daHeight uint64, hash ...types.Hash) error } // DASubmitter handles DA submission operations type DASubmitter struct { - client da.Client - config config.Config - genesis genesis.Genesis - options common.BlockOptions - logger zerolog.Logger - metrics *common.Metrics - xxxer xxxer + client da.Client + config config.Config + genesis genesis.Genesis + options common.BlockOptions + logger zerolog.Logger + metrics *common.Metrics + headerDAHintAppender DAHintAppender + dataDAHintAppender DAHintAppender // address selector for multi-account support addressSelector pkgda.AddressSelector @@ -119,7 +120,8 @@ func NewDASubmitter( options common.BlockOptions, metrics *common.Metrics, logger zerolog.Logger, - xxxer xxxer, + headerDAHintAppender DAHintAppender, + dataDAHintAppender DAHintAppender, ) *DASubmitter { daSubmitterLogger := logger.With().Str("component", "da_submitter").Logger() @@ -145,14 +147,15 @@ func NewDASubmitter( } return &DASubmitter{ - client: client, - config: config, - genesis: genesis, - options: options, - metrics: metrics, - logger: daSubmitterLogger, - addressSelector: addressSelector, - xxxer: xxxer, + client: client, + config: config, + genesis: genesis, + options: options, + metrics: metrics, + logger: daSubmitterLogger, + addressSelector: addressSelector, + headerDAHintAppender: headerDAHintAppender, + dataDAHintAppender: dataDAHintAppender, } } @@ -192,13 +195,15 @@ func (s *DASubmitter) SubmitHeaders(ctx context.Context, cache cache.Manager) er return proto.Marshal(headerPb) }, func(submitted []*types.SignedHeader, res *coreda.ResultSubmit) { - for _, header := range submitted { - cache.SetHeaderDAIncluded(header.Hash().String(), res.Height, header.Height()) - payload := &types.SignedHeaderWithDAHint{SignedHeader: header, DAHeightHint: res.Height} - if err := s.xxxer.XXX(ctx, payload); err != nil { - s.logger.Error().Err(err).Msg("failed to update header in p2p store") - // ignoring error here, since we don't want to block the block submission' - } + hashes := make([]types.Hash, len(submitted)) + for i, header := range submitted { + headerHash := header.Hash() + cache.SetHeaderDAIncluded(headerHash.String(), res.Height, header.Height()) + hashes[i] = headerHash + } + if err := s.headerDAHintAppender.AppendDAHint(ctx, res.Height, hashes...); err != nil { + s.logger.Error().Err(err).Msg("failed to append da height hint in header p2p store") + // ignoring error here, since we don't want to block the block submission' } if l := len(submitted); l > 0 { lastHeight := submitted[l-1].Height() @@ -240,8 +245,14 @@ func (s *DASubmitter) SubmitData(ctx context.Context, cache cache.Manager, signe return signedData.MarshalBinary() }, func(submitted []*types.SignedData, res *coreda.ResultSubmit) { - for _, sd := range submitted { + hashes := make([]types.Hash, len(submitted)) + for i, sd := range submitted { cache.SetDataDAIncluded(sd.Data.DACommitment().String(), res.Height, sd.Height()) + hashes[i] = sd.Hash() + } + if err := s.dataDAHintAppender.AppendDAHint(ctx, res.Height, hashes...); err != nil { + s.logger.Error().Err(err).Msg("failed to append da height hint in data p2p store") + // ignoring error here, since we don't want to block the block submission' } if l := len(submitted); l > 0 { lastHeight := submitted[l-1].Height() diff --git a/block/internal/submitting/da_submitter_integration_test.go b/block/internal/submitting/da_submitter_integration_test.go index 854b0950d2..ac35927a04 100644 --- a/block/internal/submitting/da_submitter_integration_test.go +++ b/block/internal/submitting/da_submitter_integration_test.go @@ -113,6 +113,6 @@ func TestDASubmitter_SubmitHeadersAndData_MarksInclusionAndUpdatesLastSubmitted( type noopXXXer struct{} -func (n noopXXXer) XXX(ctx context.Context, header *types.SignedHeaderWithDAHint) error { +func (n noopXXXer) AppendDAHint(ctx context.Context, header *types.SignedHeaderWithDAHint) error { return nil } diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index a5d4b53f26..ef3c19d4ec 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -6,7 +6,6 @@ import ( "fmt" "sync/atomic" - goheader "github.com/celestiaorg/go-header" "github.com/rs/zerolog" "github.com/evstack/ev-node/block/internal/cache" @@ -27,8 +26,8 @@ type p2pHandler interface { // The handler maintains a processedHeight to track the highest block that has been // successfully validated and sent to the syncer, preventing duplicate processing. type P2PHandler struct { - headerStore goheader.Store[*types.SignedHeaderWithDAHint] - dataStore goheader.Store[*types.Data] + headerStore common.HeightStore[*types.SignedHeaderWithDAHint] + dataStore common.HeightStore[*types.DataWithDAHint] cache cache.CacheManager genesis genesis.Genesis logger zerolog.Logger @@ -38,8 +37,8 @@ type P2PHandler struct { // NewP2PHandler creates a new P2P handler. func NewP2PHandler( - headerStore goheader.Store[*types.SignedHeaderWithDAHint], - dataStore goheader.Store[*types.Data], + headerStore common.HeightStore[*types.SignedHeaderWithDAHint], + dataStore common.HeightStore[*types.DataWithDAHint], cache cache.CacheManager, genesis genesis.Genesis, logger zerolog.Logger, @@ -74,26 +73,27 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC return nil } - header, err := h.headerStore.GetByHeight(ctx, height) + headerTuple, err := h.headerStore.GetByHeight(ctx, height) if err != nil { if ctx.Err() == nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("header unavailable in store") } return err } + header := headerTuple.Entry if err := h.assertExpectedProposer(header.ProposerAddress); err != nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("invalid header from P2P") return err } - data, err := h.dataStore.GetByHeight(ctx, height) + dataTuple, err := h.dataStore.GetByHeight(ctx, height) if err != nil { if ctx.Err() == nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("data unavailable in store") } return err } - + data := dataTuple.Entry dataCommitment := data.DACommitment() if !bytes.Equal(header.DataHash[:], dataCommitment[:]) { err := fmt.Errorf("data hash mismatch: header %x, data %x", header.DataHash, dataCommitment) @@ -104,10 +104,10 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC // further header validation (signature) is done in validateBlock. // we need to be sure that the previous block n-1 was executed before validating block n event := common.DAHeightEvent{ - Header: header.SignedHeader, - Data: data, - Source: common.SourceP2P, - DaHeightHint: header.DAHeightHint, + Header: header, + Data: data, + Source: common.SourceP2P, + DaHeightHints: [2]uint64{headerTuple.DAHeightHint, dataTuple.DAHeightHint}, } select { diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index a40e30fbd0..ff42eccd37 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -10,20 +10,18 @@ import ( "sync/atomic" "time" - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/rs/zerolog" - "golang.org/x/sync/errgroup" - - coreda "github.com/evstack/ev-node/core/da" - coreexecutor "github.com/evstack/ev-node/core/execution" - "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" "github.com/evstack/ev-node/block/internal/da" + coreda "github.com/evstack/ev-node/core/da" + coreexecutor "github.com/evstack/ev-node/core/execution" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/store" "github.com/evstack/ev-node/types" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/rs/zerolog" + "golang.org/x/sync/errgroup" ) // Syncer handles block synchronization from DA and P2P sources. @@ -49,8 +47,8 @@ type Syncer struct { daRetrieverHeight *atomic.Uint64 // P2P stores - headerStore common.Broadcaster[*types.SignedHeaderWithDAHint] - dataStore common.Broadcaster[*types.Data] + headerStore common.HeaderP2PBroadcaster + dataStore common.DataP2PBroadcaster // Channels for coordination heightInCh chan common.DAHeightEvent @@ -81,8 +79,8 @@ func NewSyncer( metrics *common.Metrics, config config.Config, genesis genesis.Genesis, - headerStore common.Broadcaster[*types.SignedHeaderWithDAHint], - dataStore common.Broadcaster[*types.Data], + headerStore common.HeaderP2PBroadcaster, + dataStore common.DataP2PBroadcaster, logger zerolog.Logger, options common.BlockOptions, errorCh chan<- error, @@ -116,7 +114,7 @@ func (s *Syncer) Start(ctx context.Context) error { // Initialize handlers s.daRetriever = NewDARetriever(s.daClient, s.cache, s.genesis, s.logger) - s.p2pHandler = NewP2PHandler(s.headerStore.Store(), s.dataStore.Store(), s.cache, s.genesis, s.logger) + s.p2pHandler = NewP2PHandler(s.headerStore.XStore(), s.dataStore.XStore(), s.cache, s.genesis, s.logger) if currentHeight, err := s.store.Height(s.ctx); err != nil { s.logger.Error().Err(err).Msg("failed to set initial processed height for p2p handler") } else { @@ -434,36 +432,64 @@ func (s *Syncer) processHeightEvent(event *common.DAHeightEvent) { // If this is a P2P event with a DA height hint, trigger targeted DA retrieval // This allows us to fetch the block directly from the specified DA height instead of sequential scanning - if event.Source == common.SourceP2P && event.DaHeightHint != 0 { - if _, exists := s.cache.GetHeaderDAIncluded(event.Header.Hash().String()); !exists { - s.logger.Debug(). - Uint64("height", height). - Uint64("da_height_hint", event.DaHeightHint). - Msg("P2P event with DA height hint, triggering targeted DA retrieval") - - // Trigger targeted DA retrieval in background - go func() { - targetEvents, err := s.daRetriever.RetrieveFromDA(s.ctx, event.DaHeightHint) - if err != nil { - s.logger.Debug(). - Err(err). - Uint64("da_height", event.DaHeightHint). - Msg("targeted DA retrieval failed (hint may be incorrect or DA not yet available)") - // Not a critical error - the sequential DA worker will eventually find it - return - } - - // Process retrieved events from the targeted DA height - for _, daEvent := range targetEvents { - select { - case s.heightInCh <- daEvent: - case <-s.ctx.Done(): + if event.Source == common.SourceP2P { + var daHeightHints []uint64 + switch { + case event.DaHeightHints == [2]uint64{0, 0}: + // empty, nothing to do + case event.DaHeightHints[0] == 0: + // check only data + if _, exists := s.cache.GetDataDAIncluded(event.Data.Hash().String()); !exists { + daHeightHints = []uint64{event.DaHeightHints[1]} + } + case event.DaHeightHints[1] == 0: + // check only header + if _, exists := s.cache.GetHeaderDAIncluded(event.Header.Hash().String()); !exists { + daHeightHints = []uint64{event.DaHeightHints[0]} + } + default: + // check both + if _, exists := s.cache.GetDataDAIncluded(event.Data.Hash().String()); !exists { + daHeightHints = []uint64{event.DaHeightHints[1]} + } + if _, exists := s.cache.GetDataDAIncluded(event.Data.Hash().String()); !exists { + daHeightHints = append(daHeightHints, event.DaHeightHints[1]) + } + if len(daHeightHints) == 2 && daHeightHints[0] == daHeightHints[1] { + daHeightHints = daHeightHints[0:1] + } + } + if len(daHeightHints) > 0 { + for _, daHeightHint := range daHeightHints { + s.logger.Debug(). + Uint64("height", height). + Uint64("da_height_hint", daHeightHint). + Msg("P2P event with DA height hint, triggering targeted DA retrieval") + + // Trigger targeted DA retrieval in background + go func() { + targetEvents, err := s.daRetriever.RetrieveFromDA(s.ctx, daHeightHint) + if err != nil { + s.logger.Debug(). + Err(err). + Uint64("da_height", daHeightHint). + Msg("targeted DA retrieval failed (hint may be incorrect or DA not yet available)") + // Not a critical error - the sequential DA worker will eventually find it return - default: - s.cache.SetPendingEvent(daEvent.Header.Height(), &daEvent) } - } - }() + + // Process retrieved events from the targeted DA height + for _, daEvent := range targetEvents { + select { + case s.heightInCh <- daEvent: + case <-s.ctx.Done(): + return + default: + s.cache.SetPendingEvent(daEvent.Header.Height(), &daEvent) + } + } + }() + } } } @@ -504,8 +530,7 @@ func (s *Syncer) processHeightEvent(event *common.DAHeightEvent) { g.Go(func() error { // broadcast header locally only — prevents spamming the p2p network with old height notifications, // allowing the syncer to update its target and fill missing blocks - payload := &types.SignedHeaderWithDAHint{SignedHeader: event.Header, DAHeightHint: event.DaHeightHint} - return s.headerStore.WriteToStoreAndBroadcast(ctx, payload, pubsub.WithLocalPublication(true)) + return s.headerStore.WriteToStoreAndBroadcast(ctx, event.Header, pubsub.WithLocalPublication(true)) }) g.Go(func() error { // broadcast data locally only — prevents spamming the p2p network with old height notifications, diff --git a/pkg/rpc/server/server.go b/pkg/rpc/server/server.go index 6fb9ee8362..0a8546d3ae 100644 --- a/pkg/rpc/server/server.go +++ b/pkg/rpc/server/server.go @@ -35,7 +35,7 @@ var _ rpc.StoreServiceHandler = (*StoreServer)(nil) type StoreServer struct { store store.Store headerStore goheader.Store[*types.SignedHeaderWithDAHint] - dataStore goheader.Store[*types.Data] + dataStore goheader.Store[*types.DataWithDAHint] logger zerolog.Logger } @@ -43,7 +43,7 @@ type StoreServer struct { func NewStoreServer( store store.Store, headerStore goheader.Store[*types.SignedHeaderWithDAHint], - dataStore goheader.Store[*types.Data], + dataStore goheader.Store[*types.DataWithDAHint], logger zerolog.Logger, ) *StoreServer { return &StoreServer{ @@ -371,7 +371,7 @@ func (p *P2PServer) GetNetInfo( func NewServiceHandler( store store.Store, headerStore goheader.Store[*types.SignedHeaderWithDAHint], - dataStore goheader.Store[*types.Data], + dataStore goheader.Store[*types.DataWithDAHint], peerManager p2p.P2PRPC, proposerAddress []byte, logger zerolog.Logger, diff --git a/pkg/sync/sync_service.go b/pkg/sync/sync_service.go index f4c63c33a8..3ca112e983 100644 --- a/pkg/sync/sync_service.go +++ b/pkg/sync/sync_service.go @@ -36,10 +36,21 @@ const ( // TODO: when we add pruning we can remove this const ninetyNineYears = 99 * 365 * 24 * time.Hour +type EntityWithDAHint[H any] interface { + header.Header[H] + SetDAHint(daHeight uint64) +} + +// DataSyncService is the P2P Sync Service for blocks. +type DataSyncService = SyncService[*types.DataWithDAHint] + +// HeaderSyncService is the P2P Sync Service for headers. +type HeaderSyncService = SyncService[*types.SignedHeaderWithDAHint] + // SyncService is the P2P Sync Service for blocks and headers. // // Uses the go-header library for handling all P2P logic. -type SyncService[H header.Header[H]] struct { +type SyncService[H EntityWithDAHint[H]] struct { conf config.Config logger zerolog.Logger syncType syncType @@ -58,12 +69,6 @@ type SyncService[H header.Header[H]] struct { storeInitialized atomic.Bool } -// DataSyncService is the P2P Sync Service for blocks. -type DataSyncService = SyncService[*types.Data] - -// HeaderSyncService is the P2P Sync Service for headers. -type HeaderSyncService = SyncService[*types.SignedHeaderWithDAHint] - // NewDataSyncService returns a new DataSyncService. func NewDataSyncService( store ds.Batching, @@ -72,7 +77,7 @@ func NewDataSyncService( p2p *p2p.Client, logger zerolog.Logger, ) (*DataSyncService, error) { - return newSyncService[*types.Data](store, dataSync, conf, genesis, p2p, logger) + return newSyncService[*types.DataWithDAHint](store, dataSync, conf, genesis, p2p, logger) } // NewHeaderSyncService returns a new HeaderSyncService. @@ -86,7 +91,7 @@ func NewHeaderSyncService( return newSyncService[*types.SignedHeaderWithDAHint](store, headerSync, conf, genesis, p2p, logger) } -func newSyncService[H header.Header[H]]( +func newSyncService[H EntityWithDAHint[H]]( store ds.Batching, syncType syncType, conf config.Config, @@ -174,8 +179,17 @@ func (syncService *SyncService[H]) WriteToStoreAndBroadcast(ctx context.Context, return nil } -func (s *SyncService[H]) XXX(ctx context.Context, headerOrData H) error { - return s.store.Append(ctx, headerOrData) +func (s *SyncService[H]) AppendDAHint(ctx context.Context, daHeight uint64, hashes ...types.Hash) error { + entries := make([]H, 0, len(hashes)) + for _, h := range hashes { + v, err := s.store.Get(ctx, h) + if err != nil && !errors.Is(err, header.ErrNotFound) { + return err + } + v.SetDAHint(daHeight) + entries = append(entries, v) + } + return s.store.Append(ctx, entries...) } // Start is a part of Service interface. diff --git a/pkg/sync/sync_service_test.go b/pkg/sync/sync_service_test.go index aeaeda18b8..b8063e874b 100644 --- a/pkg/sync/sync_service_test.go +++ b/pkg/sync/sync_service_test.go @@ -169,8 +169,8 @@ func TestHeaderSyncServiceInitFromHigherHeight(t *testing.T) { func nextHeader(t *testing.T, previousHeader *types.SignedHeaderWithDAHint, chainID string, noopSigner signer.Signer) *types.SignedHeaderWithDAHint { newSignedHeader := &types.SignedHeader{ - Header: types.GetRandomNextHeader(previousHeader.Header, chainID), - Signer: previousHeader.Signer, + Header: types.GetRandomNextHeader(previousHeader.Entry.Header, chainID), + Signer: previousHeader.Entry.Signer, } b, err := newSignedHeader.Header.MarshalBinary() require.NoError(t, err) @@ -178,7 +178,7 @@ func nextHeader(t *testing.T, previousHeader *types.SignedHeaderWithDAHint, chai require.NoError(t, err) newSignedHeader.Signature = signature require.NoError(t, newSignedHeader.Validate()) - return &types.SignedHeaderWithDAHint{SignedHeader: newSignedHeader} + return &types.SignedHeaderWithDAHint{Entry: newSignedHeader} } func bytesN(r *rand.Rand, n int) []byte { diff --git a/types/da_hint_container.go b/types/da_hint_container.go new file mode 100644 index 0000000000..9e905c6296 --- /dev/null +++ b/types/da_hint_container.go @@ -0,0 +1,74 @@ +package types + +import ( + "encoding/binary" + "fmt" + "time" + + "github.com/celestiaorg/go-header" +) + +type DAHeightHintContainer[H header.Header[H]] struct { + Entry H + DAHeightHint uint64 +} + +func (s *DAHeightHintContainer[H]) ChainID() string { + return s.Entry.ChainID() +} + +func (s *DAHeightHintContainer[H]) Hash() header.Hash { + return s.Entry.Hash() +} + +func (s *DAHeightHintContainer[H]) Height() uint64 { + return s.Entry.Height() +} + +func (s *DAHeightHintContainer[H]) LastHeader() header.Hash { + return s.Entry.LastHeader() +} + +func (s *DAHeightHintContainer[H]) Time() time.Time { + return s.Entry.Time() +} + +func (s *DAHeightHintContainer[H]) Validate() error { + return s.Entry.Validate() +} + +func (s *DAHeightHintContainer[H]) New() *DAHeightHintContainer[H] { + var empty H + return &DAHeightHintContainer[H]{Entry: empty.New()} +} + +func (sh *DAHeightHintContainer[H]) Verify(untrstH *DAHeightHintContainer[H]) error { + return sh.Entry.Verify(untrstH.Entry) +} + +func (s *DAHeightHintContainer[H]) SetDAHint(daHeight uint64) { + s.DAHeightHint = daHeight +} + +func (s *DAHeightHintContainer[H]) IsZero() bool { + return s == nil +} + +func (s *DAHeightHintContainer[H]) MarshalBinary() ([]byte, error) { + bz, err := s.Entry.MarshalBinary() + if err != nil { + return nil, err + } + out := make([]byte, 8+len(bz)) + binary.BigEndian.PutUint64(out, s.DAHeightHint) + copy(out[8:], bz) + return out, nil +} + +func (s *DAHeightHintContainer[H]) UnmarshalBinary(data []byte) error { + if len(data) < 8 { + return fmt.Errorf("invalid length: %d", len(data)) + } + s.DAHeightHint = binary.BigEndian.Uint64(data) + return s.Entry.UnmarshalBinary(data[8:]) +} diff --git a/types/signed_header.go b/types/signed_header.go index d0b322321d..c63ecce3ac 100644 --- a/types/signed_header.go +++ b/types/signed_header.go @@ -3,7 +3,6 @@ package types import ( "bytes" "context" - "encoding/binary" "errors" "fmt" @@ -15,46 +14,8 @@ var ( ErrLastHeaderHashMismatch = errors.New("last header hash mismatch") ) -var _ header.Header[*SignedHeaderWithDAHint] = &SignedHeaderWithDAHint{} - -type SignedHeaderWithDAHint struct { - *SignedHeader - DAHeightHint uint64 -} - -func (s *SignedHeaderWithDAHint) New() *SignedHeaderWithDAHint { - return &SignedHeaderWithDAHint{SignedHeader: &SignedHeader{}} -} -func (sh *SignedHeaderWithDAHint) Verify(untrstH *SignedHeaderWithDAHint) error { - return sh.SignedHeader.Verify(untrstH.SignedHeader) -} - -func (s *SignedHeaderWithDAHint) Zero() bool { - return s == nil -} - -func (s *SignedHeaderWithDAHint) IsZero() bool { - return s == nil -} - -func (s *SignedHeaderWithDAHint) MarshalBinary() ([]byte, error) { - bz, err := s.SignedHeader.MarshalBinary() - if err != nil { - return nil, err - } - out := make([]byte, 8+len(bz)) - binary.BigEndian.PutUint64(out, s.DAHeightHint) - copy(out[8:], bz) - return out, nil -} - -func (s *SignedHeaderWithDAHint) UnmarshalBinary(data []byte) error { - if len(data) < 8 { - return fmt.Errorf("invalid length: %d", len(data)) - } - s.DAHeightHint = binary.BigEndian.Uint64(data) - return s.SignedHeader.UnmarshalBinary(data[8:]) -} +type SignedHeaderWithDAHint = DAHeightHintContainer[*SignedHeader] +type DataWithDAHint = DAHeightHintContainer[*Data] var _ header.Header[*SignedHeader] = &SignedHeader{} diff --git a/types/utils.go b/types/utils.go index b0e28c80a8..fe59fb20dc 100644 --- a/types/utils.go +++ b/types/utils.go @@ -68,7 +68,7 @@ func GenerateRandomBlockCustomWithAppHash(config *BlockConfig, chainID string, a } if config.ProposerAddr != nil { - signedHeader.ProposerAddress = config.ProposerAddr + signedHeader.Entry.ProposerAddress = config.ProposerAddr } data.Metadata = &Metadata{ @@ -78,7 +78,7 @@ func GenerateRandomBlockCustomWithAppHash(config *BlockConfig, chainID string, a Time: uint64(signedHeader.Time().UnixNano()), } - return signedHeader.SignedHeader, data, config.PrivKey + return signedHeader.Entry, data, config.PrivKey } // GenerateRandomBlockCustom returns a block with random data and the given height, transactions, privateKey and proposer address. @@ -150,7 +150,7 @@ func GetRandomSignedHeader(chainID string) (*SignedHeader, crypto.PrivKey, error if err != nil { return nil, nil, err } - return signedHeader.SignedHeader, pk, nil + return signedHeader.Entry, pk, nil } // GetRandomSignedHeaderCustom creates a signed header based on the provided HeaderConfig. @@ -183,7 +183,7 @@ func GetRandomSignedHeaderCustom(config *HeaderConfig, chainID string) (*SignedH return nil, err } signedHeader.Signature = signature - return &SignedHeaderWithDAHint{SignedHeader: signedHeader}, nil + return &SignedHeaderWithDAHint{Entry: signedHeader}, nil } // GetRandomNextSignedHeader returns a signed header with random data and height of +1 from From 2c9a2128381ab5abb3a5c4e560e8517fb9ff3f26 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Fri, 28 Nov 2025 09:20:20 +0100 Subject: [PATCH 03/13] Review feedback --- block/internal/syncing/syncer.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index ff42eccd37..b73b8f572d 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -449,8 +449,8 @@ func (s *Syncer) processHeightEvent(event *common.DAHeightEvent) { } default: // check both - if _, exists := s.cache.GetDataDAIncluded(event.Data.Hash().String()); !exists { - daHeightHints = []uint64{event.DaHeightHints[1]} + if _, exists := s.cache.GetHeaderDAIncluded(event.Header.Hash().String()); !exists { + daHeightHints = []uint64{event.DaHeightHints[0]} } if _, exists := s.cache.GetDataDAIncluded(event.Data.Hash().String()); !exists { daHeightHints = append(daHeightHints, event.DaHeightHints[1]) From e2b0520986a5af78591ffe07dc726b8880e0d50d Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Fri, 28 Nov 2025 13:37:01 +0100 Subject: [PATCH 04/13] Encapsulate hint in sync package --- .mockery.yaml | 5 + apps/evm/cmd/rollback.go | 6 +- apps/testapp/cmd/rollback.go | 9 +- block/components.go | 9 +- block/internal/common/broadcaster_mock.go | 66 +++++++--- block/internal/common/expected_interfaces.go | 72 ++++------- .../internal/executing/executor_lazy_test.go | 4 +- .../internal/executing/executor_logic_test.go | 4 +- .../executing/executor_restart_test.go | 8 +- block/internal/executing/executor_test.go | 2 +- .../da_submitter_integration_test.go | 6 +- .../submitting/da_submitter_mocks_test.go | 2 +- .../internal/submitting/da_submitter_test.go | 6 +- block/internal/submitting/submitter_test.go | 6 +- block/internal/syncing/height_store_mock.go | 113 ++++++++++++++++++ block/internal/syncing/p2p_handler.go | 24 ++-- block/internal/syncing/p2p_handler_test.go | 35 +++--- block/internal/syncing/syncer.go | 2 +- block/internal/syncing/syncer_backoff_test.go | 32 +---- .../internal/syncing/syncer_benchmark_test.go | 2 +- block/internal/syncing/syncer_test.go | 13 +- node/full.go | 2 +- pkg/rpc/client/client_test.go | 17 +-- pkg/rpc/server/server.go | 13 +- pkg/rpc/server/server_test.go | 18 +-- {types => pkg/sync}/da_hint_container.go | 9 +- pkg/sync/sync_service.go | 88 ++++++++------ pkg/sync/sync_service_test.go | 8 +- types/signed_header.go | 3 - types/utils.go | 10 +- 30 files changed, 356 insertions(+), 238 deletions(-) create mode 100644 block/internal/syncing/height_store_mock.go rename {types => pkg/sync}/da_hint_container.go (86%) diff --git a/.mockery.yaml b/.mockery.yaml index 8f139231cb..77930d950b 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -58,6 +58,11 @@ packages: dir: ./block/internal/syncing pkgname: syncing filename: syncer_mock.go + HeightStore: + config: + dir: ./block/internal/syncing + pkgname: syncing + filename: height_store_mock.go github.com/evstack/ev-node/block/internal/common: interfaces: Broadcaster: diff --git a/apps/evm/cmd/rollback.go b/apps/evm/cmd/rollback.go index 4a75f9a726..bb34d6a06a 100644 --- a/apps/evm/cmd/rollback.go +++ b/apps/evm/cmd/rollback.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" + "github.com/evstack/ev-node/pkg/sync" ds "github.com/ipfs/go-datastore" kt "github.com/ipfs/go-datastore/keytransform" "github.com/spf13/cobra" @@ -13,7 +14,6 @@ import ( "github.com/evstack/ev-node/node" rollcmd "github.com/evstack/ev-node/pkg/cmd" "github.com/evstack/ev-node/pkg/store" - "github.com/evstack/ev-node/types" ) // NewRollbackCmd creates a command to rollback ev-node state by one height. @@ -70,7 +70,7 @@ func NewRollbackCmd() *cobra.Command { } // rollback ev-node goheader state - headerStore, err := goheaderstore.NewStore[*types.SignedHeaderWithDAHint]( + headerStore, err := goheaderstore.NewStore[*sync.SignedHeaderWithDAHint]( evolveDB, goheaderstore.WithStorePrefix("headerSync"), goheaderstore.WithMetrics(), @@ -79,7 +79,7 @@ func NewRollbackCmd() *cobra.Command { return err } - dataStore, err := goheaderstore.NewStore[*types.Data]( + dataStore, err := goheaderstore.NewStore[*sync.DataWithDAHint]( evolveDB, goheaderstore.WithStorePrefix("dataSync"), goheaderstore.WithMetrics(), diff --git a/apps/testapp/cmd/rollback.go b/apps/testapp/cmd/rollback.go index 761ec207d5..42be93cf18 100644 --- a/apps/testapp/cmd/rollback.go +++ b/apps/testapp/cmd/rollback.go @@ -5,13 +5,12 @@ import ( "errors" "fmt" + goheaderstore "github.com/celestiaorg/go-header/store" kvexecutor "github.com/evstack/ev-node/apps/testapp/kv" "github.com/evstack/ev-node/node" rollcmd "github.com/evstack/ev-node/pkg/cmd" "github.com/evstack/ev-node/pkg/store" - "github.com/evstack/ev-node/types" - - goheaderstore "github.com/celestiaorg/go-header/store" + "github.com/evstack/ev-node/pkg/sync" ds "github.com/ipfs/go-datastore" kt "github.com/ipfs/go-datastore/keytransform" "github.com/spf13/cobra" @@ -76,7 +75,7 @@ func NewRollbackCmd() *cobra.Command { } // rollback ev-node goheader state - headerStore, err := goheaderstore.NewStore[*types.SignedHeaderWithDAHint]( + headerStore, err := goheaderstore.NewStore[*sync.SignedHeaderWithDAHint]( evolveDB, goheaderstore.WithStorePrefix("headerSync"), goheaderstore.WithMetrics(), @@ -85,7 +84,7 @@ func NewRollbackCmd() *cobra.Command { return err } - dataStore, err := goheaderstore.NewStore[*types.Data]( + dataStore, err := goheaderstore.NewStore[*sync.DataWithDAHint]( evolveDB, goheaderstore.WithStorePrefix("dataSync"), goheaderstore.WithMetrics(), diff --git a/block/components.go b/block/components.go index 4b5316eab4..298a97e379 100644 --- a/block/components.go +++ b/block/components.go @@ -9,7 +9,6 @@ import ( "github.com/rs/zerolog" "github.com/evstack/ev-node/block/internal/cache" - "github.com/evstack/ev-node/block/internal/common" "github.com/evstack/ev-node/block/internal/executing" "github.com/evstack/ev-node/block/internal/reaping" "github.com/evstack/ev-node/block/internal/submitting" @@ -158,8 +157,8 @@ func NewSyncComponents( metrics, config, genesis, - common.NewDecorator[*types.SignedHeader](headerStore), - common.NewDecorator[*types.Data](dataStore), + headerStore, + dataStore, logger, blockOpts, errorCh, @@ -223,8 +222,8 @@ func NewAggregatorComponents( metrics, config, genesis, - common.NewDecorator[*types.SignedHeader](headerBroadcaster), - common.NewDecorator[*types.Data](dataBroadcaster), + headerBroadcaster, + dataBroadcaster, logger, blockOpts, errorCh, diff --git a/block/internal/common/broadcaster_mock.go b/block/internal/common/broadcaster_mock.go index e761aa624a..39d748f5cb 100644 --- a/block/internal/common/broadcaster_mock.go +++ b/block/internal/common/broadcaster_mock.go @@ -116,48 +116,76 @@ func (_c *MockBroadcaster_AppendDAHint_Call[H]) RunAndReturn(run func(ctx contex return _c } -// Store provides a mock function for the type MockBroadcaster -func (_mock *MockBroadcaster[H]) Store() header.Store[H] { - ret := _mock.Called() +// GetByHeight provides a mock function for the type MockBroadcaster +func (_mock *MockBroadcaster[H]) GetByHeight(ctx context.Context, height uint64) (H, uint64, error) { + ret := _mock.Called(ctx, height) if len(ret) == 0 { - panic("no return value specified for Store") + panic("no return value specified for GetByHeight") } - var r0 header.Store[H] - if returnFunc, ok := ret.Get(0).(func() header.Store[H]); ok { - r0 = returnFunc() + var r0 H + var r1 uint64 + var r2 error + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) (H, uint64, error)); ok { + return returnFunc(ctx, height) + } + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) H); ok { + r0 = returnFunc(ctx, height) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(header.Store[H]) + r0 = ret.Get(0).(H) } } - return r0 + if returnFunc, ok := ret.Get(1).(func(context.Context, uint64) uint64); ok { + r1 = returnFunc(ctx, height) + } else { + r1 = ret.Get(1).(uint64) + } + if returnFunc, ok := ret.Get(2).(func(context.Context, uint64) error); ok { + r2 = returnFunc(ctx, height) + } else { + r2 = ret.Error(2) + } + return r0, r1, r2 } -// MockBroadcaster_Store_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Store' -type MockBroadcaster_Store_Call[H header.Header[H]] struct { +// MockBroadcaster_GetByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetByHeight' +type MockBroadcaster_GetByHeight_Call[H header.Header[H]] struct { *mock.Call } -// Store is a helper method to define mock.On call -func (_e *MockBroadcaster_Expecter[H]) Store() *MockBroadcaster_Store_Call[H] { - return &MockBroadcaster_Store_Call[H]{Call: _e.mock.On("Store")} +// GetByHeight is a helper method to define mock.On call +// - ctx context.Context +// - height uint64 +func (_e *MockBroadcaster_Expecter[H]) GetByHeight(ctx interface{}, height interface{}) *MockBroadcaster_GetByHeight_Call[H] { + return &MockBroadcaster_GetByHeight_Call[H]{Call: _e.mock.On("GetByHeight", ctx, height)} } -func (_c *MockBroadcaster_Store_Call[H]) Run(run func()) *MockBroadcaster_Store_Call[H] { +func (_c *MockBroadcaster_GetByHeight_Call[H]) Run(run func(ctx context.Context, height uint64)) *MockBroadcaster_GetByHeight_Call[H] { _c.Call.Run(func(args mock.Arguments) { - run() + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 uint64 + if args[1] != nil { + arg1 = args[1].(uint64) + } + run( + arg0, + arg1, + ) }) return _c } -func (_c *MockBroadcaster_Store_Call[H]) Return(store header.Store[H]) *MockBroadcaster_Store_Call[H] { - _c.Call.Return(store) +func (_c *MockBroadcaster_GetByHeight_Call[H]) Return(v H, v1 uint64, err error) *MockBroadcaster_GetByHeight_Call[H] { + _c.Call.Return(v, v1, err) return _c } -func (_c *MockBroadcaster_Store_Call[H]) RunAndReturn(run func() header.Store[H]) *MockBroadcaster_Store_Call[H] { +func (_c *MockBroadcaster_GetByHeight_Call[H]) RunAndReturn(run func(ctx context.Context, height uint64) (H, uint64, error)) *MockBroadcaster_GetByHeight_Call[H] { _c.Call.Return(run) return _c } diff --git a/block/internal/common/expected_interfaces.go b/block/internal/common/expected_interfaces.go index 59a0991670..c880a5b543 100644 --- a/block/internal/common/expected_interfaces.go +++ b/block/internal/common/expected_interfaces.go @@ -10,56 +10,34 @@ import ( ) type ( - HeaderP2PBroadcaster = Decorator[*types.SignedHeader] - DataP2PBroadcaster = Decorator[*types.Data] + HeaderP2PBroadcaster = Broadcaster[*types.SignedHeader] + DataP2PBroadcaster = Broadcaster[*types.Data] ) // Broadcaster interface for P2P broadcasting type Broadcaster[H header.Header[H]] interface { WriteToStoreAndBroadcast(ctx context.Context, payload H, opts ...pubsub.PubOpt) error - Store() header.Store[H] AppendDAHint(ctx context.Context, daHeight uint64, hashes ...types.Hash) error -} - -// Decorator to access the the payload type without the container -type Decorator[H header.Header[H]] struct { - nested Broadcaster[*types.DAHeightHintContainer[H]] -} - -func NewDecorator[H header.Header[H]](nested Broadcaster[*types.DAHeightHintContainer[H]]) Decorator[H] { - return Decorator[H]{nested: nested} -} - -func (d Decorator[H]) WriteToStoreAndBroadcast(ctx context.Context, payload H, opts ...pubsub.PubOpt) error { - return d.nested.WriteToStoreAndBroadcast(ctx, &types.DAHeightHintContainer[H]{Entry: payload}, opts...) -} - -func (d Decorator[H]) Store() HeightStore[H] { - return HeightStoreImpl[H]{store: d.nested.Store()} -} -func (d Decorator[H]) XStore() header.Store[*types.DAHeightHintContainer[H]] { - return d.nested.Store() -} - -func (d Decorator[H]) AppendDAHint(ctx context.Context, daHeight uint64, hashes ...types.Hash) error { - return d.nested.AppendDAHint(ctx, daHeight, hashes...) -} - -// HeightStore is a subset of goheader.Store -type HeightStore[H header.Header[H]] interface { - GetByHeight(context.Context, uint64) (H, error) -} - -type HeightStoreImpl[H header.Header[H]] struct { - store header.Store[*types.DAHeightHintContainer[H]] -} - -func (s HeightStoreImpl[H]) GetByHeight(ctx context.Context, height uint64) (H, error) { - var zero H - v, err := s.store.GetByHeight(ctx, height) - if err != nil { - return zero, err - } - return v.Entry, nil - -} + GetByHeight(ctx context.Context, height uint64) (H, uint64, error) +} + +// +//// Decorator to access the payload type without the container +//type Decorator[H header.Header[H]] struct { +// nested Broadcaster[*sync.DAHeightHintContainer[H]] +//} +// +//func NewDecorator[H header.Header[H]](nested Broadcaster[*sync.DAHeightHintContainer[H]]) Decorator[H] { +// return Decorator[H]{nested: nested} +//} +// +//func (d Decorator[H]) WriteToStoreAndBroadcast(ctx context.Context, payload H, opts ...pubsub.PubOpt) error { +// return d.nested.WriteToStoreAndBroadcast(ctx, &sync.DAHeightHintContainer[H]{Entry: payload}, opts...) +//} +// +//func (d Decorator[H]) AppendDAHint(ctx context.Context, daHeight uint64, hashes ...types.Hash) error { +// return d.nested.AppendDAHint(ctx, daHeight, hashes...) +//} +//func (d Decorator[H]) GetByHeight(ctx context.Context, height uint64) (H, error) { +// panic("not implemented") +//} diff --git a/block/internal/executing/executor_lazy_test.go b/block/internal/executing/executor_lazy_test.go index 25b784b29d..b72f0a856b 100644 --- a/block/internal/executing/executor_lazy_test.go +++ b/block/internal/executing/executor_lazy_test.go @@ -47,7 +47,7 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - hb := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) + hb := common.NewMockBroadcaster[*types.SignedHeader](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() db := common.NewMockBroadcaster[*types.Data](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() @@ -157,7 +157,7 @@ func TestRegularMode_ProduceBlockLogic(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - hb := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) + hb := common.NewMockBroadcaster[*types.SignedHeader](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() db := common.NewMockBroadcaster[*types.Data](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() diff --git a/block/internal/executing/executor_logic_test.go b/block/internal/executing/executor_logic_test.go index c615e458cc..9aa79d0c43 100644 --- a/block/internal/executing/executor_logic_test.go +++ b/block/internal/executing/executor_logic_test.go @@ -69,7 +69,7 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { mockSeq := testmocks.NewMockSequencer(t) // Broadcasters are required by produceBlock; use generated mocks - hb := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) + hb := common.NewMockBroadcaster[*types.SignedHeader](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() db := common.NewMockBroadcaster[*types.Data](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() @@ -156,7 +156,7 @@ func TestPendingLimit_SkipsProduction(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - hb := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) + hb := common.NewMockBroadcaster[*types.SignedHeader](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() db := common.NewMockBroadcaster[*types.Data](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() diff --git a/block/internal/executing/executor_restart_test.go b/block/internal/executing/executor_restart_test.go index b9a13f68b8..3f0e8b500c 100644 --- a/block/internal/executing/executor_restart_test.go +++ b/block/internal/executing/executor_restart_test.go @@ -47,7 +47,7 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { // Create first executor instance mockExec1 := testmocks.NewMockExecutor(t) mockSeq1 := testmocks.NewMockSequencer(t) - hb1 := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) + hb1 := common.NewMockBroadcaster[*types.SignedHeader](t) hb1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() db1 := common.NewMockBroadcaster[*types.Data](t) db1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() @@ -166,7 +166,7 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { // Create second executor instance (restart scenario) mockExec2 := testmocks.NewMockExecutor(t) mockSeq2 := testmocks.NewMockSequencer(t) - hb2 := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) + hb2 := common.NewMockBroadcaster[*types.SignedHeader](t) hb2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() db2 := common.NewMockBroadcaster[*types.Data](t) db2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() @@ -264,7 +264,7 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { // Create first executor and produce one block mockExec1 := testmocks.NewMockExecutor(t) mockSeq1 := testmocks.NewMockSequencer(t) - hb1 := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) + hb1 := common.NewMockBroadcaster[*types.SignedHeader](t) hb1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() db1 := common.NewMockBroadcaster[*types.Data](t) db1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() @@ -316,7 +316,7 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { // Create second executor (restart) mockExec2 := testmocks.NewMockExecutor(t) mockSeq2 := testmocks.NewMockSequencer(t) - hb2 := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) + hb2 := common.NewMockBroadcaster[*types.SignedHeader](t) hb2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() db2 := common.NewMockBroadcaster[*types.Data](t) db2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() diff --git a/block/internal/executing/executor_test.go b/block/internal/executing/executor_test.go index 76a7d21748..7ef0f64e21 100644 --- a/block/internal/executing/executor_test.go +++ b/block/internal/executing/executor_test.go @@ -39,7 +39,7 @@ func TestExecutor_BroadcasterIntegration(t *testing.T) { } // Create mock broadcasters - headerBroadcaster := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) + headerBroadcaster := common.NewMockBroadcaster[*types.SignedHeader](t) dataBroadcaster := common.NewMockBroadcaster[*types.Data](t) // Create executor with broadcasters diff --git a/block/internal/submitting/da_submitter_integration_test.go b/block/internal/submitting/da_submitter_integration_test.go index ac35927a04..943abbff71 100644 --- a/block/internal/submitting/da_submitter_integration_test.go +++ b/block/internal/submitting/da_submitter_integration_test.go @@ -93,7 +93,7 @@ func TestDASubmitter_SubmitHeadersAndData_MarksInclusionAndUpdatesLastSubmitted( Namespace: cfg.DA.Namespace, DataNamespace: cfg.DA.DataNamespace, }) - daSubmitter := NewDASubmitter(daClient, cfg, gen, common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop(), noopXXXer{}) + daSubmitter := NewDASubmitter(daClient, cfg, gen, common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop(), noopDAHintAppender{}, noopDAHintAppender{}) // Submit headers and data require.NoError(t, daSubmitter.SubmitHeaders(context.Background(), cm)) @@ -111,8 +111,8 @@ func TestDASubmitter_SubmitHeadersAndData_MarksInclusionAndUpdatesLastSubmitted( } -type noopXXXer struct{} +type noopDAHintAppender struct{} -func (n noopXXXer) AppendDAHint(ctx context.Context, header *types.SignedHeaderWithDAHint) error { +func (n noopDAHintAppender) AppendDAHint(ctx context.Context, daHeight uint64, hash ...types.Hash) error { return nil } diff --git a/block/internal/submitting/da_submitter_mocks_test.go b/block/internal/submitting/da_submitter_mocks_test.go index 1716b59929..045cf821e6 100644 --- a/block/internal/submitting/da_submitter_mocks_test.go +++ b/block/internal/submitting/da_submitter_mocks_test.go @@ -36,7 +36,7 @@ func newTestSubmitter(mockDA *mocks.MockDA, override func(*config.Config)) *DASu Namespace: cfg.DA.Namespace, DataNamespace: cfg.DA.DataNamespace, }) - return NewDASubmitter(daClient, cfg, genesis.Genesis{} /*options=*/, common.BlockOptions{}, common.NopMetrics(), zerolog.Nop(), nil) + return NewDASubmitter(daClient, cfg, genesis.Genesis{} /*options=*/, common.BlockOptions{}, common.NopMetrics(), zerolog.Nop(), nil, nil) } // marshal helper for simple items diff --git a/block/internal/submitting/da_submitter_test.go b/block/internal/submitting/da_submitter_test.go index f33aaab21f..02f06f0573 100644 --- a/block/internal/submitting/da_submitter_test.go +++ b/block/internal/submitting/da_submitter_test.go @@ -65,7 +65,8 @@ func setupDASubmitterTest(t *testing.T) (*DASubmitter, store.Store, cache.Manage common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop(), - noopXXXer{}, + noopDAHintAppender{}, + noopDAHintAppender{}, ) return daSubmitter, st, cm, dummyDA, gen @@ -116,7 +117,8 @@ func TestNewDASubmitterSetsVisualizerWhenEnabled(t *testing.T) { common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop(), - nil, + noopDAHintAppender{}, + noopDAHintAppender{}, ) require.NotNil(t, server.GetDAVisualizationServer()) diff --git a/block/internal/submitting/submitter_test.go b/block/internal/submitting/submitter_test.go index f317e0bdec..dc4c8e142e 100644 --- a/block/internal/submitting/submitter_test.go +++ b/block/internal/submitting/submitter_test.go @@ -168,7 +168,7 @@ func TestSubmitter_setSequencerHeightToDAHeight(t *testing.T) { Namespace: cfg.DA.Namespace, DataNamespace: cfg.DA.DataNamespace, }) - daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop(), nil) + daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop(), nil, nil) s := NewSubmitter(mockStore, nil, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, zerolog.Nop(), nil) s.ctx = ctx @@ -253,7 +253,7 @@ func TestSubmitter_processDAInclusionLoop_advances(t *testing.T) { Namespace: cfg.DA.Namespace, DataNamespace: cfg.DA.DataNamespace, }) - daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop(), nil) + daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop(), nil, nil) s := NewSubmitter(st, exec, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, zerolog.Nop(), nil) // prepare two consecutive blocks in store with DA included in cache @@ -444,7 +444,7 @@ func TestSubmitter_CacheClearedOnHeightInclusion(t *testing.T) { Namespace: cfg.DA.Namespace, DataNamespace: cfg.DA.DataNamespace, }) - daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop(), nil) + daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop(), nil, nil) s := NewSubmitter(st, exec, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, zerolog.Nop(), nil) // Create test blocks diff --git a/block/internal/syncing/height_store_mock.go b/block/internal/syncing/height_store_mock.go new file mode 100644 index 0000000000..b4857accfa --- /dev/null +++ b/block/internal/syncing/height_store_mock.go @@ -0,0 +1,113 @@ +// Code generated by mockery; DO NOT EDIT. +// github.com/vektra/mockery +// template: testify + +package syncing + +import ( + "context" + + "github.com/celestiaorg/go-header" + mock "github.com/stretchr/testify/mock" +) + +// NewMockHeightStore creates a new instance of MockHeightStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockHeightStore[H header.Header[H]](t interface { + mock.TestingT + Cleanup(func()) +}) *MockHeightStore[H] { + mock := &MockHeightStore[H]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// MockHeightStore is an autogenerated mock type for the HeightStore type +type MockHeightStore[H header.Header[H]] struct { + mock.Mock +} + +type MockHeightStore_Expecter[H header.Header[H]] struct { + mock *mock.Mock +} + +func (_m *MockHeightStore[H]) EXPECT() *MockHeightStore_Expecter[H] { + return &MockHeightStore_Expecter[H]{mock: &_m.Mock} +} + +// GetByHeight provides a mock function for the type MockHeightStore +func (_mock *MockHeightStore[H]) GetByHeight(ctx context.Context, height uint64) (H, uint64, error) { + ret := _mock.Called(ctx, height) + + if len(ret) == 0 { + panic("no return value specified for GetByHeight") + } + + var r0 H + var r1 uint64 + var r2 error + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) (H, uint64, error)); ok { + return returnFunc(ctx, height) + } + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) H); ok { + r0 = returnFunc(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(H) + } + } + if returnFunc, ok := ret.Get(1).(func(context.Context, uint64) uint64); ok { + r1 = returnFunc(ctx, height) + } else { + r1 = ret.Get(1).(uint64) + } + if returnFunc, ok := ret.Get(2).(func(context.Context, uint64) error); ok { + r2 = returnFunc(ctx, height) + } else { + r2 = ret.Error(2) + } + return r0, r1, r2 +} + +// MockHeightStore_GetByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetByHeight' +type MockHeightStore_GetByHeight_Call[H header.Header[H]] struct { + *mock.Call +} + +// GetByHeight is a helper method to define mock.On call +// - ctx context.Context +// - height uint64 +func (_e *MockHeightStore_Expecter[H]) GetByHeight(ctx interface{}, height interface{}) *MockHeightStore_GetByHeight_Call[H] { + return &MockHeightStore_GetByHeight_Call[H]{Call: _e.mock.On("GetByHeight", ctx, height)} +} + +func (_c *MockHeightStore_GetByHeight_Call[H]) Run(run func(ctx context.Context, height uint64)) *MockHeightStore_GetByHeight_Call[H] { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 uint64 + if args[1] != nil { + arg1 = args[1].(uint64) + } + run( + arg0, + arg1, + ) + }) + return _c +} + +func (_c *MockHeightStore_GetByHeight_Call[H]) Return(v H, v1 uint64, err error) *MockHeightStore_GetByHeight_Call[H] { + _c.Call.Return(v, v1, err) + return _c +} + +func (_c *MockHeightStore_GetByHeight_Call[H]) RunAndReturn(run func(ctx context.Context, height uint64) (H, uint64, error)) *MockHeightStore_GetByHeight_Call[H] { + _c.Call.Return(run) + return _c +} diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index ef3c19d4ec..18c3e6c634 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -6,12 +6,13 @@ import ( "fmt" "sync/atomic" + "github.com/celestiaorg/go-header" + "github.com/evstack/ev-node/types" "github.com/rs/zerolog" "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" "github.com/evstack/ev-node/pkg/genesis" - "github.com/evstack/ev-node/types" ) type p2pHandler interface { @@ -19,6 +20,11 @@ type p2pHandler interface { SetProcessedHeight(height uint64) } +// HeightStore is a subset of goheader.Store +type HeightStore[H header.Header[H]] interface { + GetByHeight(ctx context.Context, height uint64) (H, uint64, error) +} + // P2PHandler coordinates block retrieval from P2P stores for the syncer. // It waits for both header and data to be available at a given height, // validates their consistency, and emits events to the syncer for processing. @@ -26,8 +32,8 @@ type p2pHandler interface { // The handler maintains a processedHeight to track the highest block that has been // successfully validated and sent to the syncer, preventing duplicate processing. type P2PHandler struct { - headerStore common.HeightStore[*types.SignedHeaderWithDAHint] - dataStore common.HeightStore[*types.DataWithDAHint] + headerStore HeightStore[*types.SignedHeader] + dataStore HeightStore[*types.Data] cache cache.CacheManager genesis genesis.Genesis logger zerolog.Logger @@ -37,8 +43,8 @@ type P2PHandler struct { // NewP2PHandler creates a new P2P handler. func NewP2PHandler( - headerStore common.HeightStore[*types.SignedHeaderWithDAHint], - dataStore common.HeightStore[*types.DataWithDAHint], + headerStore HeightStore[*types.SignedHeader], + dataStore HeightStore[*types.Data], cache cache.CacheManager, genesis genesis.Genesis, logger zerolog.Logger, @@ -73,27 +79,25 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC return nil } - headerTuple, err := h.headerStore.GetByHeight(ctx, height) + header, headerDAHint, err := h.headerStore.GetByHeight(ctx, height) if err != nil { if ctx.Err() == nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("header unavailable in store") } return err } - header := headerTuple.Entry if err := h.assertExpectedProposer(header.ProposerAddress); err != nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("invalid header from P2P") return err } - dataTuple, err := h.dataStore.GetByHeight(ctx, height) + data, dataDAHint, err := h.dataStore.GetByHeight(ctx, height) if err != nil { if ctx.Err() == nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("data unavailable in store") } return err } - data := dataTuple.Entry dataCommitment := data.DACommitment() if !bytes.Equal(header.DataHash[:], dataCommitment[:]) { err := fmt.Errorf("data hash mismatch: header %x, data %x", header.DataHash, dataCommitment) @@ -107,7 +111,7 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC Header: header, Data: data, Source: common.SourceP2P, - DaHeightHints: [2]uint64{headerTuple.DAHeightHint, dataTuple.DAHeightHint}, + DaHeightHints: [2]uint64{headerDAHint, dataDAHint}, } select { diff --git a/block/internal/syncing/p2p_handler_test.go b/block/internal/syncing/p2p_handler_test.go index aacd54d4cc..5970900474 100644 --- a/block/internal/syncing/p2p_handler_test.go +++ b/block/internal/syncing/p2p_handler_test.go @@ -18,7 +18,6 @@ import ( "github.com/evstack/ev-node/pkg/genesis" signerpkg "github.com/evstack/ev-node/pkg/signer" "github.com/evstack/ev-node/pkg/signer/noop" - extmocks "github.com/evstack/ev-node/test/mocks/external" "github.com/evstack/ev-node/types" ) @@ -57,8 +56,8 @@ func p2pMakeSignedHeader(t *testing.T, chainID string, height uint64, proposer [ // P2PTestData aggregates dependencies used by P2P handler tests. type P2PTestData struct { Handler *P2PHandler - HeaderStore *extmocks.MockStore[*types.SignedHeaderWithDAHint] - DataStore *extmocks.MockStore[*types.Data] + HeaderStore *MockHeightStore[*types.SignedHeader] + DataStore *MockHeightStore[*types.Data] Cache cache.CacheManager Genesis genesis.Genesis ProposerAddr []byte @@ -73,8 +72,8 @@ func setupP2P(t *testing.T) *P2PTestData { gen := genesis.Genesis{ChainID: "p2p-test", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: proposerAddr} - headerStoreMock := extmocks.NewMockStore[*types.SignedHeaderWithDAHint](t) - dataStoreMock := extmocks.NewMockStore[*types.Data](t) + headerStoreMock := NewMockHeightStore[*types.SignedHeader](t) + dataStoreMock := NewMockHeightStore[*types.Data](t) cfg := config.Config{ RootDir: t.TempDir(), @@ -136,9 +135,9 @@ func TestP2PHandler_ProcessHeight_EmitsEventWhenHeaderAndDataPresent(t *testing. sig, err := p.Signer.Sign(bz) require.NoError(t, err) header.Signature = sig - payload := &types.SignedHeaderWithDAHint{SignedHeader: header} - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(5)).Return(payload, nil).Once() - p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(5)).Return(data, nil).Once() + + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(5)).Return(header, 0, nil).Once() + p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(5)).Return(data, 0, nil).Once() ch := make(chan common.DAHeightEvent, 1) err = p.Handler.ProcessHeight(ctx, 5, ch) @@ -163,9 +162,8 @@ func TestP2PHandler_ProcessHeight_SkipsWhenDataMissing(t *testing.T) { require.NoError(t, err) header.Signature = sig - payload := &types.SignedHeaderWithDAHint{SignedHeader: header} - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(7)).Return(payload, nil).Once() - p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(7)).Return(nil, errors.New("missing")).Once() + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(7)).Return(header, 0, nil).Once() + p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(7)).Return(nil, 0, errors.New("missing")).Once() ch := make(chan common.DAHeightEvent, 1) err = p.Handler.ProcessHeight(ctx, 7, ch) @@ -178,7 +176,7 @@ func TestP2PHandler_ProcessHeight_SkipsWhenHeaderMissing(t *testing.T) { p := setupP2P(t) ctx := context.Background() - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(9)).Return(nil, errors.New("missing")).Once() + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(9)).Return(nil, 0, errors.New("missing")).Once() ch := make(chan common.DAHeightEvent, 1) err := p.Handler.ProcessHeight(ctx, 9, ch) @@ -199,8 +197,7 @@ func TestP2PHandler_ProcessHeight_SkipsOnProposerMismatch(t *testing.T) { header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 11, badAddr, pub, signer) header.DataHash = common.DataHashForEmptyTxs - payload := &types.SignedHeaderWithDAHint{SignedHeader: header} - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(11)).Return(payload, nil).Once() + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(11)).Return(header, 0, nil).Once() ch := make(chan common.DAHeightEvent, 1) err = p.Handler.ProcessHeight(ctx, 11, ch) @@ -235,9 +232,8 @@ func TestP2PHandler_ProcessedHeightSkipsPreviouslyHandledBlocks(t *testing.T) { require.NoError(t, err) header.Signature = sig - payload := &types.SignedHeaderWithDAHint{SignedHeader: header} - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(6)).Return(payload, nil).Once() - p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(6)).Return(data, nil).Once() + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(6)).Return(header, 0, nil).Once() + p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(6)).Return(data, 0, nil).Once() require.NoError(t, p.Handler.ProcessHeight(ctx, 6, ch)) @@ -259,9 +255,8 @@ func TestP2PHandler_SetProcessedHeightPreventsDuplicates(t *testing.T) { require.NoError(t, err) header.Signature = sig - payload := &types.SignedHeaderWithDAHint{SignedHeader: header} - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(8)).Return(payload, nil).Once() - p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(8)).Return(data, nil).Once() + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(8)).Return(header, 0, nil).Once() + p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(8)).Return(data, 0, nil).Once() ch := make(chan common.DAHeightEvent, 1) require.NoError(t, p.Handler.ProcessHeight(ctx, 8, ch)) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index b73b8f572d..c9fba02067 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -114,7 +114,7 @@ func (s *Syncer) Start(ctx context.Context) error { // Initialize handlers s.daRetriever = NewDARetriever(s.daClient, s.cache, s.genesis, s.logger) - s.p2pHandler = NewP2PHandler(s.headerStore.XStore(), s.dataStore.XStore(), s.cache, s.genesis, s.logger) + s.p2pHandler = NewP2PHandler(s.headerStore, s.dataStore, s.cache, s.genesis, s.logger) if currentHeight, err := s.store.Height(s.ctx); err != nil { s.logger.Error().Err(err).Msg("failed to set initial processed height for p2p handler") } else { diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index 970dd0cc5c..ed9cd4c407 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -77,20 +77,12 @@ func TestSyncer_BackoffOnDAError(t *testing.T) { p2pHandler.On("SetProcessedHeight", mock.Anything).Return().Maybe() // Create mock stores for P2P - mockHeaderStore := extmocks.NewMockStore[*types.SignedHeaderWithDAHint](t) + mockHeaderStore := extmocks.NewMockStore[*types.SignedHeader](t) mockHeaderStore.EXPECT().Height().Return(uint64(0)).Maybe() mockDataStore := extmocks.NewMockStore[*types.Data](t) mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() - headerStore := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) - headerStore.EXPECT().Store().Return(mockHeaderStore).Maybe() - syncer.headerStore = headerStore - - dataStore := common.NewMockBroadcaster[*types.Data](t) - dataStore.EXPECT().Store().Return(mockDataStore).Maybe() - syncer.dataStore = dataStore - var callTimes []time.Time callCount := 0 @@ -173,20 +165,12 @@ func TestSyncer_BackoffResetOnSuccess(t *testing.T) { p2pHandler.On("SetProcessedHeight", mock.Anything).Return().Maybe() // Create mock stores for P2P - mockHeaderStore := extmocks.NewMockStore[*types.SignedHeaderWithDAHint](t) + mockHeaderStore := extmocks.NewMockStore[*types.SignedHeader](t) mockHeaderStore.EXPECT().Height().Return(uint64(0)).Maybe() mockDataStore := extmocks.NewMockStore[*types.Data](t) mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() - headerStore := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) - headerStore.EXPECT().Store().Return(mockHeaderStore).Maybe() - syncer.headerStore = headerStore - - dataStore := common.NewMockBroadcaster[*types.Data](t) - dataStore.EXPECT().Store().Return(mockDataStore).Maybe() - syncer.dataStore = dataStore - var callTimes []time.Time // First call - error (should trigger backoff) @@ -263,20 +247,12 @@ func TestSyncer_BackoffBehaviorIntegration(t *testing.T) { syncer.p2pHandler = p2pHandler // Create mock stores for P2P - mockHeaderStore := extmocks.NewMockStore[*types.SignedHeaderWithDAHint](t) + mockHeaderStore := extmocks.NewMockStore[*types.SignedHeader](t) mockHeaderStore.EXPECT().Height().Return(uint64(0)).Maybe() mockDataStore := extmocks.NewMockStore[*types.Data](t) mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() - headerStore := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) - headerStore.EXPECT().Store().Return(mockHeaderStore).Maybe() - syncer.headerStore = headerStore - - dataStore := common.NewMockBroadcaster[*types.Data](t) - dataStore.EXPECT().Store().Return(mockDataStore).Maybe() - syncer.dataStore = dataStore - var callTimes []time.Time p2pHandler.On("SetProcessedHeight", mock.Anything).Return().Maybe() @@ -350,7 +326,7 @@ func setupTestSyncer(t *testing.T, daBlockTime time.Duration) *Syncer { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t), + common.NewMockBroadcaster[*types.SignedHeader](t), common.NewMockBroadcaster[*types.Data](t), zerolog.Nop(), common.DefaultBlockOptions(), diff --git a/block/internal/syncing/syncer_benchmark_test.go b/block/internal/syncing/syncer_benchmark_test.go index 29f8e86854..e2b6f6e51f 100644 --- a/block/internal/syncing/syncer_benchmark_test.go +++ b/block/internal/syncing/syncer_benchmark_test.go @@ -153,7 +153,7 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay mockP2P := newMockp2pHandler(b) // not used directly in this benchmark path mockP2P.On("SetProcessedHeight", mock.Anything).Return().Maybe() s.p2pHandler = mockP2P - headerP2PStore := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](b) + headerP2PStore := common.NewMockBroadcaster[*types.SignedHeader](b) s.headerStore = headerP2PStore dataP2PStore := common.NewMockBroadcaster[*types.Data](b) s.dataStore = dataP2PStore diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 3fa92e4d26..36d0a92599 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -123,7 +123,7 @@ func TestSyncer_validateBlock_DataHashMismatch(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t), + common.NewMockBroadcaster[*types.SignedHeader](t), common.NewMockBroadcaster[*types.Data](t), zerolog.Nop(), common.DefaultBlockOptions(), @@ -174,7 +174,7 @@ func TestProcessHeightEvent_SyncsAndUpdatesState(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t), + common.NewMockBroadcaster[*types.SignedHeader](t), common.NewMockBroadcaster[*types.Data](t), zerolog.Nop(), common.DefaultBlockOptions(), @@ -228,7 +228,7 @@ func TestSequentialBlockSync(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t), + common.NewMockBroadcaster[*types.SignedHeader](t), common.NewMockBroadcaster[*types.Data](t), zerolog.Nop(), common.DefaultBlockOptions(), @@ -340,17 +340,14 @@ func TestSyncLoopPersistState(t *testing.T) { dummyExec := execution.NewDummyExecutor() // Create mock stores for P2P - mockHeaderStore := extmocks.NewMockStore[*types.SignedHeaderWithDAHint](t) + mockHeaderStore := extmocks.NewMockStore[*types.SignedHeader](t) mockHeaderStore.EXPECT().Height().Return(uint64(0)).Maybe() mockDataStore := extmocks.NewMockStore[*types.Data](t) mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() - mockP2PHeaderStore := common.NewMockBroadcaster[*types.SignedHeaderWithDAHint](t) - mockP2PHeaderStore.EXPECT().Store().Return(mockHeaderStore).Maybe() - + mockP2PHeaderStore := common.NewMockBroadcaster[*types.SignedHeader](t) mockP2PDataStore := common.NewMockBroadcaster[*types.Data](t) - mockP2PDataStore.EXPECT().Store().Return(mockDataStore).Maybe() errorCh := make(chan error, 1) syncerInst1 := NewSyncer( diff --git a/node/full.go b/node/full.go index 6d03a87c04..69be1d35b8 100644 --- a/node/full.go +++ b/node/full.go @@ -31,7 +31,7 @@ import ( evsync "github.com/evstack/ev-node/pkg/sync" ) -// prefixes used in KV store to separate rollkit data from execution environment data (if the same data base is reused) +// EvPrefix used in KV store to separate rollkit data from execution environment data (if the same data base is reused) var EvPrefix = "0" const ( diff --git a/pkg/rpc/client/client_test.go b/pkg/rpc/client/client_test.go index e517d5128b..05c8df4d08 100644 --- a/pkg/rpc/client/client_test.go +++ b/pkg/rpc/client/client_test.go @@ -8,6 +8,7 @@ import ( "time" goheader "github.com/celestiaorg/go-header" + "github.com/evstack/ev-node/pkg/sync" "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" "github.com/rs/zerolog" @@ -28,8 +29,8 @@ import ( func setupTestServer( t *testing.T, mockStore *mocks.MockStore, - headerStore goheader.Store[*types.SignedHeaderWithDAHint], - dataStore goheader.Store[*types.Data], + headerStore goheader.Store[*sync.SignedHeaderWithDAHint], + dataStore goheader.Store[*sync.DataWithDAHint], mockP2P *mocks.MockP2PRPC, ) (*httptest.Server, *Client) { t.Helper() @@ -106,19 +107,19 @@ func TestClientGetMetadata(t *testing.T) { func TestClientGetP2PStoreInfo(t *testing.T) { mockStore := mocks.NewMockStore(t) mockP2P := mocks.NewMockP2PRPC(t) - headerStore := headerstoremocks.NewMockStore[*types.SignedHeaderWithDAHint](t) - dataStore := headerstoremocks.NewMockStore[*types.Data](t) + headerStore := headerstoremocks.NewMockStore[*sync.SignedHeaderWithDAHint](t) + dataStore := headerstoremocks.NewMockStore[*sync.DataWithDAHint](t) now := time.Now().UTC() - headerHead := &types.SignedHeaderWithDAHint{SignedHeader: testSignedHeader(10, now)} - headerTail := &types.SignedHeaderWithDAHint{SignedHeader: testSignedHeader(5, now.Add(-time.Minute))} + headerHead := &sync.SignedHeaderWithDAHint{Entry: testSignedHeader(10, now)} + headerTail := &sync.SignedHeaderWithDAHint{Entry: testSignedHeader(5, now.Add(-time.Minute))} headerStore.On("Height").Return(uint64(10)) headerStore.On("Head", mock.Anything).Return(headerHead, nil) headerStore.On("Tail", mock.Anything).Return(headerTail, nil) - dataHead := testData(8, now.Add(-30*time.Second)) - dataTail := testData(4, now.Add(-2*time.Minute)) + dataHead := &sync.DataWithDAHint{Entry: testData(8, now.Add(-30*time.Second))} + dataTail := &sync.DataWithDAHint{Entry: testData(4, now.Add(-2*time.Minute))} dataStore.On("Height").Return(uint64(8)) dataStore.On("Head", mock.Anything).Return(dataHead, nil) dataStore.On("Tail", mock.Anything).Return(dataTail, nil) diff --git a/pkg/rpc/server/server.go b/pkg/rpc/server/server.go index 0a8546d3ae..f113b52fb8 100644 --- a/pkg/rpc/server/server.go +++ b/pkg/rpc/server/server.go @@ -14,6 +14,7 @@ import ( "connectrpc.com/grpcreflect" goheader "github.com/celestiaorg/go-header" coreda "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/pkg/sync" ds "github.com/ipfs/go-datastore" "github.com/rs/zerolog" "golang.org/x/net/http2" @@ -34,16 +35,16 @@ var _ rpc.StoreServiceHandler = (*StoreServer)(nil) // StoreServer implements the StoreService defined in the proto file type StoreServer struct { store store.Store - headerStore goheader.Store[*types.SignedHeaderWithDAHint] - dataStore goheader.Store[*types.DataWithDAHint] + headerStore goheader.Store[*sync.SignedHeaderWithDAHint] + dataStore goheader.Store[*sync.DataWithDAHint] logger zerolog.Logger } // NewStoreServer creates a new StoreServer instance func NewStoreServer( store store.Store, - headerStore goheader.Store[*types.SignedHeaderWithDAHint], - dataStore goheader.Store[*types.DataWithDAHint], + headerStore goheader.Store[*sync.SignedHeaderWithDAHint], + dataStore goheader.Store[*sync.DataWithDAHint], logger zerolog.Logger, ) *StoreServer { return &StoreServer{ @@ -370,8 +371,8 @@ func (p *P2PServer) GetNetInfo( // NewServiceHandler creates a new HTTP handler for Store, P2P and Config services func NewServiceHandler( store store.Store, - headerStore goheader.Store[*types.SignedHeaderWithDAHint], - dataStore goheader.Store[*types.DataWithDAHint], + headerStore goheader.Store[*sync.SignedHeaderWithDAHint], + dataStore goheader.Store[*sync.DataWithDAHint], peerManager p2p.P2PRPC, proposerAddress []byte, logger zerolog.Logger, diff --git a/pkg/rpc/server/server_test.go b/pkg/rpc/server/server_test.go index befecd910f..3a4dbd89bd 100644 --- a/pkg/rpc/server/server_test.go +++ b/pkg/rpc/server/server_test.go @@ -11,6 +11,7 @@ import ( "time" "connectrpc.com/connect" + "github.com/evstack/ev-node/pkg/sync" ds "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" @@ -325,8 +326,8 @@ func TestGetGenesisDaHeight_InvalidLength(t *testing.T) { func TestGetP2PStoreInfo(t *testing.T) { t.Run("returns snapshots for configured stores", func(t *testing.T) { mockStore := mocks.NewMockStore(t) - headerStore := headerstoremocks.NewMockStore[*types.SignedHeaderWithDAHint](t) - dataStore := headerstoremocks.NewMockStore[*types.Data](t) + headerStore := headerstoremocks.NewMockStore[*sync.SignedHeaderWithDAHint](t) + dataStore := headerstoremocks.NewMockStore[*sync.DataWithDAHint](t) logger := zerolog.Nop() server := NewStoreServer(mockStore, headerStore, dataStore, logger) @@ -354,10 +355,10 @@ func TestGetP2PStoreInfo(t *testing.T) { t.Run("returns error when a store edge fails", func(t *testing.T) { mockStore := mocks.NewMockStore(t) - headerStore := headerstoremocks.NewMockStore[*types.SignedHeaderWithDAHint](t) + headerStore := headerstoremocks.NewMockStore[*sync.SignedHeaderWithDAHint](t) logger := zerolog.Nop() headerStore.On("Height").Return(uint64(0)) - headerStore.On("Head", mock.Anything).Return((*types.SignedHeaderWithDAHint)(nil), fmt.Errorf("boom")) + headerStore.On("Head", mock.Anything).Return((*sync.SignedHeaderWithDAHint)(nil), fmt.Errorf("boom")) server := NewStoreServer(mockStore, headerStore, nil, logger) resp, err := server.GetP2PStoreInfo(context.Background(), connect.NewRequest(&emptypb.Empty{})) @@ -627,8 +628,8 @@ func TestHealthReadyEndpoint(t *testing.T) { }) } -func makeTestSignedHeader(height uint64, ts time.Time) *types.SignedHeaderWithDAHint { - return &types.SignedHeaderWithDAHint{SignedHeader: &types.SignedHeader{ +func makeTestSignedHeader(height uint64, ts time.Time) *sync.SignedHeaderWithDAHint { + return &sync.SignedHeaderWithDAHint{Entry: &types.SignedHeader{ Header: types.Header{ BaseHeader: types.BaseHeader{ Height: height, @@ -643,12 +644,13 @@ func makeTestSignedHeader(height uint64, ts time.Time) *types.SignedHeaderWithDA } } -func makeTestData(height uint64, ts time.Time) *types.Data { - return &types.Data{ +func makeTestData(height uint64, ts time.Time) *sync.DataWithDAHint { + return &sync.DataWithDAHint{Entry: &types.Data{ Metadata: &types.Metadata{ ChainID: "test-chain", Height: height, Time: uint64(ts.UnixNano()), }, + }, } } diff --git a/types/da_hint_container.go b/pkg/sync/da_hint_container.go similarity index 86% rename from types/da_hint_container.go rename to pkg/sync/da_hint_container.go index 9e905c6296..5d904d885d 100644 --- a/types/da_hint_container.go +++ b/pkg/sync/da_hint_container.go @@ -1,4 +1,4 @@ -package types +package sync import ( "encoding/binary" @@ -6,8 +6,12 @@ import ( "time" "github.com/celestiaorg/go-header" + "github.com/evstack/ev-node/types" ) +type SignedHeaderWithDAHint = DAHeightHintContainer[*types.SignedHeader] +type DataWithDAHint = DAHeightHintContainer[*types.Data] + type DAHeightHintContainer[H header.Header[H]] struct { Entry H DAHeightHint uint64 @@ -49,6 +53,9 @@ func (sh *DAHeightHintContainer[H]) Verify(untrstH *DAHeightHintContainer[H]) er func (s *DAHeightHintContainer[H]) SetDAHint(daHeight uint64) { s.DAHeightHint = daHeight } +func (s *DAHeightHintContainer[H]) DAHint() uint64 { + return s.DAHeightHint +} func (s *DAHeightHintContainer[H]) IsZero() bool { return s == nil diff --git a/pkg/sync/sync_service.go b/pkg/sync/sync_service.go index 3ca112e983..9a45ab4c42 100644 --- a/pkg/sync/sync_service.go +++ b/pkg/sync/sync_service.go @@ -39,18 +39,19 @@ const ninetyNineYears = 99 * 365 * 24 * time.Hour type EntityWithDAHint[H any] interface { header.Header[H] SetDAHint(daHeight uint64) + DAHint() uint64 } -// DataSyncService is the P2P Sync Service for blocks. -type DataSyncService = SyncService[*types.DataWithDAHint] - // HeaderSyncService is the P2P Sync Service for headers. -type HeaderSyncService = SyncService[*types.SignedHeaderWithDAHint] +type HeaderSyncService = SyncService[*types.SignedHeader] + +// DataSyncService is the P2P Sync Service for blocks. +type DataSyncService = SyncService[*types.Data] // SyncService is the P2P Sync Service for blocks and headers. // // Uses the go-header library for handling all P2P logic. -type SyncService[H EntityWithDAHint[H]] struct { +type SyncService[V header.Header[V]] struct { conf config.Config logger zerolog.Logger syncType syncType @@ -59,13 +60,13 @@ type SyncService[H EntityWithDAHint[H]] struct { p2p *p2p.Client - ex *goheaderp2p.Exchange[H] - sub *goheaderp2p.Subscriber[H] - p2pServer *goheaderp2p.ExchangeServer[H] - store *goheaderstore.Store[H] - syncer *goheadersync.Syncer[H] + ex *goheaderp2p.Exchange[*DAHeightHintContainer[V]] + sub *goheaderp2p.Subscriber[*DAHeightHintContainer[V]] + p2pServer *goheaderp2p.ExchangeServer[*DAHeightHintContainer[V]] + store *goheaderstore.Store[*DAHeightHintContainer[V]] + syncer *goheadersync.Syncer[*DAHeightHintContainer[V]] syncerStatus *SyncerStatus - topicSubscription header.Subscription[H] + topicSubscription header.Subscription[*DAHeightHintContainer[V]] storeInitialized atomic.Bool } @@ -77,7 +78,7 @@ func NewDataSyncService( p2p *p2p.Client, logger zerolog.Logger, ) (*DataSyncService, error) { - return newSyncService[*types.DataWithDAHint](store, dataSync, conf, genesis, p2p, logger) + return newSyncService[*types.Data](store, dataSync, conf, genesis, p2p, logger) } // NewHeaderSyncService returns a new HeaderSyncService. @@ -88,22 +89,22 @@ func NewHeaderSyncService( p2p *p2p.Client, logger zerolog.Logger, ) (*HeaderSyncService, error) { - return newSyncService[*types.SignedHeaderWithDAHint](store, headerSync, conf, genesis, p2p, logger) + return newSyncService[*types.SignedHeader](store, headerSync, conf, genesis, p2p, logger) } -func newSyncService[H EntityWithDAHint[H]]( +func newSyncService[V header.Header[V]]( store ds.Batching, syncType syncType, conf config.Config, genesis genesis.Genesis, p2p *p2p.Client, logger zerolog.Logger, -) (*SyncService[H], error) { +) (*SyncService[V], error) { if p2p == nil { return nil, errors.New("p2p client cannot be nil") } - ss, err := goheaderstore.NewStore[H]( + ss, err := goheaderstore.NewStore[*DAHeightHintContainer[V]]( store, goheaderstore.WithStorePrefix(string(syncType)), goheaderstore.WithMetrics(), @@ -112,7 +113,7 @@ func newSyncService[H EntityWithDAHint[H]]( return nil, fmt.Errorf("failed to initialize the %s store: %w", syncType, err) } - svc := &SyncService[H]{ + svc := &SyncService[V]{ conf: conf, genesis: genesis, p2p: p2p, @@ -126,21 +127,22 @@ func newSyncService[H EntityWithDAHint[H]]( } // Store returns the store of the SyncService -func (syncService *SyncService[H]) Store() header.Store[H] { +func (syncService *SyncService[V]) Store() header.Store[*DAHeightHintContainer[V]] { return syncService.store } // WriteToStoreAndBroadcast initializes store if needed and broadcasts provided header or block. // Note: Only returns an error in case store can't be initialized. Logs error if there's one while broadcasting. -func (syncService *SyncService[H]) WriteToStoreAndBroadcast(ctx context.Context, headerOrData H, opts ...pubsub.PubOpt) error { +func (syncService *SyncService[V]) WriteToStoreAndBroadcast(ctx context.Context, payload V, opts ...pubsub.PubOpt) error { if syncService.genesis.InitialHeight == 0 { return fmt.Errorf("invalid initial height; cannot be zero") } - if headerOrData.IsZero() { + if payload.IsZero() { return fmt.Errorf("empty header/data cannot write to store or broadcast") } + headerOrData := &DAHeightHintContainer[V]{Entry: payload} storeInitialized := false if syncService.storeInitialized.CompareAndSwap(false, true) { var err error @@ -179,11 +181,14 @@ func (syncService *SyncService[H]) WriteToStoreAndBroadcast(ctx context.Context, return nil } -func (s *SyncService[H]) AppendDAHint(ctx context.Context, daHeight uint64, hashes ...types.Hash) error { - entries := make([]H, 0, len(hashes)) +func (s *SyncService[V]) AppendDAHint(ctx context.Context, daHeight uint64, hashes ...types.Hash) error { + entries := make([]*DAHeightHintContainer[V], 0, len(hashes)) for _, h := range hashes { v, err := s.store.Get(ctx, h) - if err != nil && !errors.Is(err, header.ErrNotFound) { + if err != nil { + if errors.Is(err, header.ErrNotFound) { + continue + } return err } v.SetDAHint(daHeight) @@ -192,8 +197,17 @@ func (s *SyncService[H]) AppendDAHint(ctx context.Context, daHeight uint64, hash return s.store.Append(ctx, entries...) } +func (s *SyncService[V]) GetByHeight(ctx context.Context, height uint64) (V, uint64, error) { + c, err := s.store.GetByHeight(ctx, height) + if err != nil { + var zero V + return zero, 0, err + } + return c.Entry, c.DAHint(), nil +} + // Start is a part of Service interface. -func (syncService *SyncService[H]) Start(ctx context.Context) error { +func (syncService *SyncService[V]) Start(ctx context.Context) error { // setup P2P infrastructure, but don't start Subscriber yet. peerIDs, err := syncService.setupP2PInfrastructure(ctx) if err != nil { @@ -201,7 +215,7 @@ func (syncService *SyncService[H]) Start(ctx context.Context) error { } // create syncer, must be before initFromP2PWithRetry which calls startSyncer. - if syncService.syncer, err = newSyncer( + if syncService.syncer, err = newSyncer[V]( syncService.ex, syncService.store, syncService.sub, @@ -244,7 +258,7 @@ func (syncService *SyncService[H]) startSyncer(ctx context.Context) error { // initStore initializes the store with the given initial header. // it is a no-op if the store is already initialized. // Returns true when the store was initialized by this call. -func (syncService *SyncService[H]) initStore(ctx context.Context, initial H) (bool, error) { +func (syncService *SyncService[V]) initStore(ctx context.Context, initial *DAHeightHintContainer[V]) (bool, error) { if initial.IsZero() { return false, errors.New("failed to initialize the store") } @@ -268,12 +282,12 @@ func (syncService *SyncService[H]) initStore(ctx context.Context, initial H) (bo // setupP2PInfrastructure sets up the P2P infrastructure (Exchange, ExchangeServer, Store) // but does not start the Subscriber. Returns peer IDs for later use. -func (syncService *SyncService[H]) setupP2PInfrastructure(ctx context.Context) ([]peer.ID, error) { +func (syncService *SyncService[V]) setupP2PInfrastructure(ctx context.Context) ([]peer.ID, error) { ps := syncService.p2p.PubSub() var err error // Create subscriber but DON'T start it yet - syncService.sub, err = goheaderp2p.NewSubscriber[H]( + syncService.sub, err = goheaderp2p.NewSubscriber[*DAHeightHintContainer[V]]( ps, pubsub.DefaultMsgIdFn, goheaderp2p.WithSubscriberNetworkID(syncService.getChainID()), @@ -302,7 +316,7 @@ func (syncService *SyncService[H]) setupP2PInfrastructure(ctx context.Context) ( peerIDs := syncService.getPeerIDs() - if syncService.ex, err = newP2PExchange[H](syncService.p2p.Host(), peerIDs, networkID, syncService.genesis.ChainID, syncService.p2p.ConnectionGater()); err != nil { + if syncService.ex, err = newP2PExchange[*DAHeightHintContainer[V]](syncService.p2p.Host(), peerIDs, networkID, syncService.genesis.ChainID, syncService.p2p.ConnectionGater()); err != nil { return nil, fmt.Errorf("error while creating exchange: %w", err) } if err := syncService.ex.Start(ctx); err != nil { @@ -330,14 +344,14 @@ func (syncService *SyncService[H]) startSubscriber(ctx context.Context) error { // It inspects the local store to determine the first height to request: // - when the store already contains items, it reuses the latest height as the starting point; // - otherwise, it falls back to the configured genesis height. -func (syncService *SyncService[H]) initFromP2PWithRetry(ctx context.Context, peerIDs []peer.ID) error { +func (syncService *SyncService[V]) initFromP2PWithRetry(ctx context.Context, peerIDs []peer.ID) error { if len(peerIDs) == 0 { return nil } tryInit := func(ctx context.Context) (bool, error) { var ( - trusted H + trusted *DAHeightHintContainer[V] err error heightToQuery uint64 ) @@ -401,7 +415,7 @@ func (syncService *SyncService[H]) initFromP2PWithRetry(ctx context.Context, pee // Stop is a part of Service interface. // // `store` is closed last because it's used by other services. -func (syncService *SyncService[H]) Stop(ctx context.Context) error { +func (syncService *SyncService[V]) Stop(ctx context.Context) error { // unsubscribe from topic first so that sub.Stop() does not fail syncService.topicSubscription.Cancel() err := errors.Join( @@ -447,17 +461,17 @@ func newP2PExchange[H header.Header[H]]( // newSyncer constructs new Syncer for headers/blocks. func newSyncer[H header.Header[H]]( - ex header.Exchange[H], - store header.Store[H], - sub header.Subscriber[H], + ex header.Exchange[*DAHeightHintContainer[H]], + store header.Store[*DAHeightHintContainer[H]], + sub header.Subscriber[*DAHeightHintContainer[H]], opts []goheadersync.Option, -) (*goheadersync.Syncer[H], error) { +) (*goheadersync.Syncer[*DAHeightHintContainer[H]], error) { opts = append(opts, goheadersync.WithMetrics(), goheadersync.WithPruningWindow(ninetyNineYears), goheadersync.WithTrustingPeriod(ninetyNineYears), ) - return goheadersync.NewSyncer(ex, store, sub, opts...) + return goheadersync.NewSyncer[*DAHeightHintContainer[H]](ex, store, sub, opts...) } func (syncService *SyncService[H]) getNetworkID(network string) string { diff --git a/pkg/sync/sync_service_test.go b/pkg/sync/sync_service_test.go index b8063e874b..99d6ed8a0e 100644 --- a/pkg/sync/sync_service_test.go +++ b/pkg/sync/sync_service_test.go @@ -167,10 +167,10 @@ func TestHeaderSyncServiceInitFromHigherHeight(t *testing.T) { require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, signedHeader)) } -func nextHeader(t *testing.T, previousHeader *types.SignedHeaderWithDAHint, chainID string, noopSigner signer.Signer) *types.SignedHeaderWithDAHint { +func nextHeader(t *testing.T, previousHeader *types.SignedHeader, chainID string, noopSigner signer.Signer) *types.SignedHeader { newSignedHeader := &types.SignedHeader{ - Header: types.GetRandomNextHeader(previousHeader.Entry.Header, chainID), - Signer: previousHeader.Entry.Signer, + Header: types.GetRandomNextHeader(previousHeader.Header, chainID), + Signer: previousHeader.Signer, } b, err := newSignedHeader.Header.MarshalBinary() require.NoError(t, err) @@ -178,7 +178,7 @@ func nextHeader(t *testing.T, previousHeader *types.SignedHeaderWithDAHint, chai require.NoError(t, err) newSignedHeader.Signature = signature require.NoError(t, newSignedHeader.Validate()) - return &types.SignedHeaderWithDAHint{Entry: newSignedHeader} + return newSignedHeader } func bytesN(r *rand.Rand, n int) []byte { diff --git a/types/signed_header.go b/types/signed_header.go index c63ecce3ac..ffacbc847a 100644 --- a/types/signed_header.go +++ b/types/signed_header.go @@ -14,9 +14,6 @@ var ( ErrLastHeaderHashMismatch = errors.New("last header hash mismatch") ) -type SignedHeaderWithDAHint = DAHeightHintContainer[*SignedHeader] -type DataWithDAHint = DAHeightHintContainer[*Data] - var _ header.Header[*SignedHeader] = &SignedHeader{} // SignedHeader combines Header and its signature. diff --git a/types/utils.go b/types/utils.go index fe59fb20dc..d8c2527521 100644 --- a/types/utils.go +++ b/types/utils.go @@ -68,7 +68,7 @@ func GenerateRandomBlockCustomWithAppHash(config *BlockConfig, chainID string, a } if config.ProposerAddr != nil { - signedHeader.Entry.ProposerAddress = config.ProposerAddr + signedHeader.ProposerAddress = config.ProposerAddr } data.Metadata = &Metadata{ @@ -78,7 +78,7 @@ func GenerateRandomBlockCustomWithAppHash(config *BlockConfig, chainID string, a Time: uint64(signedHeader.Time().UnixNano()), } - return signedHeader.Entry, data, config.PrivKey + return signedHeader, data, config.PrivKey } // GenerateRandomBlockCustom returns a block with random data and the given height, transactions, privateKey and proposer address. @@ -150,11 +150,11 @@ func GetRandomSignedHeader(chainID string) (*SignedHeader, crypto.PrivKey, error if err != nil { return nil, nil, err } - return signedHeader.Entry, pk, nil + return signedHeader, pk, nil } // GetRandomSignedHeaderCustom creates a signed header based on the provided HeaderConfig. -func GetRandomSignedHeaderCustom(config *HeaderConfig, chainID string) (*SignedHeaderWithDAHint, error) { +func GetRandomSignedHeaderCustom(config *HeaderConfig, chainID string) (*SignedHeader, error) { pk, err := config.Signer.GetPublic() if err != nil { return nil, err @@ -183,7 +183,7 @@ func GetRandomSignedHeaderCustom(config *HeaderConfig, chainID string) (*SignedH return nil, err } signedHeader.Signature = signature - return &SignedHeaderWithDAHint{Entry: signedHeader}, nil + return signedHeader, nil } // GetRandomNextSignedHeader returns a signed header with random data and height of +1 from From aaacbde53b5105734baa668aa5b6e403eb701594 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Fri, 28 Nov 2025 17:20:08 +0100 Subject: [PATCH 05/13] Async DA pull --- block/internal/common/expected_interfaces.go | 21 --- block/internal/syncing/async_da_retriever.go | 111 +++++++++++ .../syncing/async_da_retriever_test.go | 141 ++++++++++++++ block/internal/syncing/syncer.go | 34 ++-- block/internal/syncing/syncer_test.go | 76 ++++++++ pkg/sync/sync_service_test.go | 175 ++++++++++++++++++ 6 files changed, 514 insertions(+), 44 deletions(-) create mode 100644 block/internal/syncing/async_da_retriever.go create mode 100644 block/internal/syncing/async_da_retriever_test.go diff --git a/block/internal/common/expected_interfaces.go b/block/internal/common/expected_interfaces.go index c880a5b543..0eeef6ab31 100644 --- a/block/internal/common/expected_interfaces.go +++ b/block/internal/common/expected_interfaces.go @@ -20,24 +20,3 @@ type Broadcaster[H header.Header[H]] interface { AppendDAHint(ctx context.Context, daHeight uint64, hashes ...types.Hash) error GetByHeight(ctx context.Context, height uint64) (H, uint64, error) } - -// -//// Decorator to access the payload type without the container -//type Decorator[H header.Header[H]] struct { -// nested Broadcaster[*sync.DAHeightHintContainer[H]] -//} -// -//func NewDecorator[H header.Header[H]](nested Broadcaster[*sync.DAHeightHintContainer[H]]) Decorator[H] { -// return Decorator[H]{nested: nested} -//} -// -//func (d Decorator[H]) WriteToStoreAndBroadcast(ctx context.Context, payload H, opts ...pubsub.PubOpt) error { -// return d.nested.WriteToStoreAndBroadcast(ctx, &sync.DAHeightHintContainer[H]{Entry: payload}, opts...) -//} -// -//func (d Decorator[H]) AppendDAHint(ctx context.Context, daHeight uint64, hashes ...types.Hash) error { -// return d.nested.AppendDAHint(ctx, daHeight, hashes...) -//} -//func (d Decorator[H]) GetByHeight(ctx context.Context, height uint64) (H, error) { -// panic("not implemented") -//} diff --git a/block/internal/syncing/async_da_retriever.go b/block/internal/syncing/async_da_retriever.go new file mode 100644 index 0000000000..7c79a37512 --- /dev/null +++ b/block/internal/syncing/async_da_retriever.go @@ -0,0 +1,111 @@ +package syncing + +import ( + "context" + "sync" + + "github.com/evstack/ev-node/block/internal/common" + "github.com/rs/zerolog" +) + +// AsyncDARetriever handles concurrent DA retrieval operations. +type AsyncDARetriever struct { + retriever DARetriever + resultCh chan<- common.DAHeightEvent + workCh chan uint64 + inFlight map[uint64]struct{} + mu sync.Mutex + logger zerolog.Logger + wg sync.WaitGroup + ctx context.Context + cancel context.CancelFunc +} + +// NewAsyncDARetriever creates a new AsyncDARetriever. +func NewAsyncDARetriever( + retriever DARetriever, + resultCh chan<- common.DAHeightEvent, + logger zerolog.Logger, +) *AsyncDARetriever { + return &AsyncDARetriever{ + retriever: retriever, + resultCh: resultCh, + workCh: make(chan uint64, 100), // Buffer size 100 + inFlight: make(map[uint64]struct{}), + logger: logger.With().Str("component", "async_da_retriever").Logger(), + } +} + +// Start starts the worker pool. +func (r *AsyncDARetriever) Start(ctx context.Context) { + r.ctx, r.cancel = context.WithCancel(ctx) + // Start 5 workers + for i := 0; i < 5; i++ { + r.wg.Add(1) + go r.worker() + } + r.logger.Info().Msg("AsyncDARetriever started") +} + +// Stop stops the worker pool. +func (r *AsyncDARetriever) Stop() { + if r.cancel != nil { + r.cancel() + } + r.wg.Wait() + r.logger.Info().Msg("AsyncDARetriever stopped") +} + +// RequestRetrieval requests a DA retrieval for the given height. +// It is non-blocking and idempotent. +func (r *AsyncDARetriever) RequestRetrieval(height uint64) { + r.mu.Lock() + defer r.mu.Unlock() + + if _, exists := r.inFlight[height]; exists { + return + } + + select { + case r.workCh <- height: + r.inFlight[height] = struct{}{} + r.logger.Debug().Uint64("height", height).Msg("queued DA retrieval request") + default: + r.logger.Debug().Uint64("height", height).Msg("DA retrieval worker pool full, dropping request") + } +} + +func (r *AsyncDARetriever) worker() { + defer r.wg.Done() + + for { + select { + case <-r.ctx.Done(): + return + case height := <-r.workCh: + r.processRetrieval(height) + } + } +} + +func (r *AsyncDARetriever) processRetrieval(height uint64) { + defer func() { + r.mu.Lock() + delete(r.inFlight, height) + r.mu.Unlock() + }() + + events, err := r.retriever.RetrieveFromDA(r.ctx, height) + if err != nil { + r.logger.Debug().Err(err).Uint64("height", height).Msg("async DA retrieval failed") + return + } + + for _, event := range events { + select { + case r.resultCh <- event: + case <-r.ctx.Done(): + return + } + } +} diff --git a/block/internal/syncing/async_da_retriever_test.go b/block/internal/syncing/async_da_retriever_test.go new file mode 100644 index 0000000000..bce1267a0f --- /dev/null +++ b/block/internal/syncing/async_da_retriever_test.go @@ -0,0 +1,141 @@ +package syncing + +import ( + "context" + "testing" + "time" + + "github.com/evstack/ev-node/block/internal/common" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestAsyncDARetriever_RequestRetrieval(t *testing.T) { + logger := zerolog.Nop() + mockRetriever := NewMockDARetriever(t) + resultCh := make(chan common.DAHeightEvent, 10) + + asyncRetriever := NewAsyncDARetriever(mockRetriever, resultCh, logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + asyncRetriever.Start(ctx) + defer asyncRetriever.Stop() + + // 1. Test successful retrieval + height1 := uint64(100) + mockRetriever.EXPECT().RetrieveFromDA(mock.Anything, height1).Return([]common.DAHeightEvent{{DaHeight: height1}}, nil).Once() + + asyncRetriever.RequestRetrieval(height1) + + select { + case event := <-resultCh: + assert.Equal(t, height1, event.DaHeight) + case <-time.After(1 * time.Second): + t.Fatal("timeout waiting for result") + } + + // 2. Test deduplication (idempotency) + // We'll block the retriever to simulate a slow request, then send multiple requests for the same height + height2 := uint64(200) + + // Create a channel to signal when the mock is called + calledCh := make(chan struct{}) + // Create a channel to unblock the mock + unblockCh := make(chan struct{}) + + mockRetriever.EXPECT().RetrieveFromDA(mock.Anything, height2).RunAndReturn(func(ctx context.Context, h uint64) ([]common.DAHeightEvent, error) { + close(calledCh) + <-unblockCh + return []common.DAHeightEvent{{DaHeight: h}}, nil + }).Once() // Should be called only once despite multiple requests + + // Send first request + asyncRetriever.RequestRetrieval(height2) + + // Wait for the worker to pick it up + select { + case <-calledCh: + case <-time.After(1 * time.Second): + t.Fatal("timeout waiting for retriever call") + } + + // Send duplicate requests while the first one is still in flight + asyncRetriever.RequestRetrieval(height2) + asyncRetriever.RequestRetrieval(height2) + + // Unblock the worker + close(unblockCh) + + // We should receive exactly one result + select { + case event := <-resultCh: + assert.Equal(t, height2, event.DaHeight) + case <-time.After(1 * time.Second): + t.Fatal("timeout waiting for result") + } + + // Ensure no more results come through + select { + case <-resultCh: + t.Fatal("received duplicate result") + default: + } +} + +func TestAsyncDARetriever_WorkerPoolLimit(t *testing.T) { + logger := zerolog.Nop() + mockRetriever := NewMockDARetriever(t) + resultCh := make(chan common.DAHeightEvent, 100) + + asyncRetriever := NewAsyncDARetriever(mockRetriever, resultCh, logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + asyncRetriever.Start(ctx) + defer asyncRetriever.Stop() + + // We have 5 workers. We'll block them all. + unblockCh := make(chan struct{}) + + // Expect 5 calls that block + for i := 0; i < 5; i++ { + h := uint64(1000 + i) + mockRetriever.EXPECT().RetrieveFromDA(mock.Anything, h).RunAndReturn(func(ctx context.Context, h uint64) ([]common.DAHeightEvent, error) { + <-unblockCh + return []common.DAHeightEvent{{DaHeight: h}}, nil + }).Once() + asyncRetriever.RequestRetrieval(h) + } + + // Give workers time to pick up tasks + time.Sleep(100 * time.Millisecond) + + // Now send a 6th request. It should be queued but not processed yet. + height6 := uint64(1005) + processed6 := make(chan struct{}) + mockRetriever.EXPECT().RetrieveFromDA(mock.Anything, height6).RunAndReturn(func(ctx context.Context, h uint64) ([]common.DAHeightEvent, error) { + close(processed6) + return []common.DAHeightEvent{{DaHeight: h}}, nil + }).Once() + + asyncRetriever.RequestRetrieval(height6) + + // Ensure 6th request is NOT processed yet + select { + case <-processed6: + t.Fatal("6th request processed too early") + default: + } + + // Unblock workers + close(unblockCh) + + // Now 6th request should be processed + select { + case <-processed6: + case <-time.After(1 * time.Second): + t.Fatal("timeout waiting for 6th request") + } +} diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index c9fba02067..501d2aac06 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -68,6 +68,9 @@ type Syncer struct { // P2P wait coordination p2pWaitState atomic.Value // stores p2pWaitState + + // Async DA retriever + asyncDARetriever *AsyncDARetriever } // NewSyncer creates a new block syncer @@ -114,6 +117,9 @@ func (s *Syncer) Start(ctx context.Context) error { // Initialize handlers s.daRetriever = NewDARetriever(s.daClient, s.cache, s.genesis, s.logger) + s.asyncDARetriever = NewAsyncDARetriever(s.daRetriever, s.heightInCh, s.logger) + s.asyncDARetriever.Start(s.ctx) + s.p2pHandler = NewP2PHandler(s.headerStore, s.dataStore, s.cache, s.genesis, s.logger) if currentHeight, err := s.store.Height(s.ctx); err != nil { s.logger.Error().Err(err).Msg("failed to set initial processed height for p2p handler") @@ -144,6 +150,9 @@ func (s *Syncer) Stop() error { if s.cancel != nil { s.cancel() } + if s.asyncDARetriever != nil { + s.asyncDARetriever.Stop() + } s.cancelP2PWait(0) s.wg.Wait() s.logger.Info().Msg("syncer stopped") @@ -466,29 +475,8 @@ func (s *Syncer) processHeightEvent(event *common.DAHeightEvent) { Uint64("da_height_hint", daHeightHint). Msg("P2P event with DA height hint, triggering targeted DA retrieval") - // Trigger targeted DA retrieval in background - go func() { - targetEvents, err := s.daRetriever.RetrieveFromDA(s.ctx, daHeightHint) - if err != nil { - s.logger.Debug(). - Err(err). - Uint64("da_height", daHeightHint). - Msg("targeted DA retrieval failed (hint may be incorrect or DA not yet available)") - // Not a critical error - the sequential DA worker will eventually find it - return - } - - // Process retrieved events from the targeted DA height - for _, daEvent := range targetEvents { - select { - case s.heightInCh <- daEvent: - case <-s.ctx.Done(): - return - default: - s.cache.SetPendingEvent(daEvent.Header.Height(), &daEvent) - } - } - }() + // Trigger targeted DA retrieval in background via worker pool + s.asyncDARetriever.RequestRetrieval(daHeightHint) } } } diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 36d0a92599..adacd15efc 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -697,3 +697,79 @@ func TestSyncer_getHighestStoredDAHeight(t *testing.T) { highestDA = syncer.getHighestStoredDAHeight() assert.Equal(t, uint64(200), highestDA, "should return highest DA height from most recent included height") } + +func TestProcessHeightEvent_TriggersAsyncDARetrieval(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, _, _ := buildSyncTestSigner(t) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain").Return([]byte("app0"), uint64(1024), nil).Once() + + s := NewSyncer( + st, + mockExec, + nil, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Mock AsyncDARetriever + mockRetriever := NewMockDARetriever(t) + asyncRetriever := NewAsyncDARetriever(mockRetriever, s.heightInCh, zerolog.Nop()) + // We don't start the async retriever to avoid race conditions in test, + // we just want to verify RequestRetrieval queues the request. + // However, RequestRetrieval writes to a channel, so we need a consumer or a buffered channel. + // The workCh is buffered (100), so we are good. + s.asyncDARetriever = asyncRetriever + + // Create event with DA height hint + evt := common.DAHeightEvent{ + Header: &types.SignedHeader{Header: types.Header{BaseHeader: types.BaseHeader{ChainID: "c", Height: 2}}}, + Data: &types.Data{Metadata: &types.Metadata{ChainID: "c", Height: 2}}, + Source: common.SourceP2P, + DaHeightHints: [2]uint64{100, 100}, + } + + // Current height is 0 (from init), event height is 2. + // processHeightEvent checks: + // 1. height <= currentHeight (2 <= 0 -> false) + // 2. height != currentHeight+1 (2 != 1 -> true) -> stores as pending event + + // We need to simulate height 1 being processed first so height 2 is "next" + // OR we can just test that it DOES NOT trigger DA retrieval if it's pending. + // Wait, the logic for DA retrieval is BEFORE the "next block" check? + // Let's check syncer.go... + // Yes, "If this is a P2P event with a DA height hint, trigger targeted DA retrieval" block is AFTER "If this is not the next block in sequence... return" + + // So we need to be at height 1 to process height 2. + // Let's set the store height to 1. + batch, err := st.NewBatch(context.Background()) + require.NoError(t, err) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + s.processHeightEvent(&evt) + + // Verify that the request was queued in the async retriever + select { + case h := <-asyncRetriever.workCh: + assert.Equal(t, uint64(100), h) + default: + t.Fatal("expected DA retrieval request to be queued") + } +} diff --git a/pkg/sync/sync_service_test.go b/pkg/sync/sync_service_test.go index 99d6ed8a0e..b0e244a95f 100644 --- a/pkg/sync/sync_service_test.go +++ b/pkg/sync/sync_service_test.go @@ -167,6 +167,181 @@ func TestHeaderSyncServiceInitFromHigherHeight(t *testing.T) { require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, signedHeader)) } +func TestDAHintStorageHeader(t *testing.T) { + mainKV := sync.MutexWrap(datastore.NewMapDatastore()) + pk, _, err := crypto.GenerateEd25519Key(cryptoRand.Reader) + require.NoError(t, err) + noopSigner, err := noop.NewNoopSigner(pk) + require.NoError(t, err) + rnd := rand.New(rand.NewSource(1)) // nolint:gosec // test code only + mn := mocknet.New() + + chainId := "test-chain-id" + + proposerAddr := []byte("test") + genesisDoc := genesispkg.Genesis{ + ChainID: chainId, + StartTime: time.Now(), + InitialHeight: 1, + ProposerAddress: proposerAddr, + } + conf := config.DefaultConfig() + conf.RootDir = t.TempDir() + nodeKey, err := key.LoadOrGenNodeKey(filepath.Dir(conf.ConfigPath())) + require.NoError(t, err) + logger := zerolog.Nop() + priv := nodeKey.PrivKey + p2pHost, err := mn.AddPeer(priv, nil) + require.NoError(t, err) + + p2pClient, err := p2p.NewClientWithHost(conf.P2P, nodeKey.PrivKey, mainKV, chainId, logger, p2p.NopMetrics(), p2pHost) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + require.NoError(t, p2pClient.Start(ctx)) + + headerSvc, err := NewHeaderSyncService(mainKV, conf, genesisDoc, p2pClient, logger) + require.NoError(t, err) + require.NoError(t, headerSvc.Start(ctx)) + + headerConfig := types.HeaderConfig{ + Height: genesisDoc.InitialHeight, + DataHash: bytesN(rnd, 32), + AppHash: bytesN(rnd, 32), + Signer: noopSigner, + } + signedHeader, err := types.GetRandomSignedHeaderCustom(&headerConfig, genesisDoc.ChainID) + require.NoError(t, err) + require.NoError(t, signedHeader.Validate()) + + require.NoError(t, headerSvc.WriteToStoreAndBroadcast(ctx, signedHeader)) + + daHeight := uint64(100) + require.NoError(t, headerSvc.AppendDAHint(ctx, daHeight, signedHeader.Hash())) + + h, hint, err := headerSvc.GetByHeight(ctx, signedHeader.Height()) + require.NoError(t, err) + require.Equal(t, signedHeader.Hash(), h.Hash()) + require.Equal(t, daHeight, hint) + + _ = p2pClient.Close() + _ = headerSvc.Stop(ctx) + cancel() + + // Restart + h2, err := mn.AddPeer(priv, nil) + require.NoError(t, err) + p2pClient, err = p2p.NewClientWithHost(conf.P2P, nodeKey.PrivKey, mainKV, chainId, logger, p2p.NopMetrics(), h2) + require.NoError(t, err) + + ctx, cancel = context.WithCancel(t.Context()) + defer cancel() + require.NoError(t, p2pClient.Start(ctx)) + t.Cleanup(func() { _ = p2pClient.Close() }) + + headerSvc, err = NewHeaderSyncService(mainKV, conf, genesisDoc, p2pClient, logger) + require.NoError(t, err) + require.NoError(t, headerSvc.Start(ctx)) + t.Cleanup(func() { _ = headerSvc.Stop(context.Background()) }) + + h, hint, err = headerSvc.GetByHeight(ctx, signedHeader.Height()) + require.NoError(t, err) + require.Equal(t, signedHeader.Hash(), h.Hash()) + require.Equal(t, daHeight, hint) +} + +func TestDAHintStorageData(t *testing.T) { + mainKV := sync.MutexWrap(datastore.NewMapDatastore()) + pk, _, err := crypto.GenerateEd25519Key(cryptoRand.Reader) + require.NoError(t, err) + noopSigner, err := noop.NewNoopSigner(pk) + require.NoError(t, err) + rnd := rand.New(rand.NewSource(1)) // nolint:gosec // test code only + mn := mocknet.New() + + chainId := "test-chain-id" + + proposerAddr := []byte("test") + genesisDoc := genesispkg.Genesis{ + ChainID: chainId, + StartTime: time.Now(), + InitialHeight: 1, + ProposerAddress: proposerAddr, + } + conf := config.DefaultConfig() + conf.RootDir = t.TempDir() + nodeKey, err := key.LoadOrGenNodeKey(filepath.Dir(conf.ConfigPath())) + require.NoError(t, err) + logger := zerolog.Nop() + priv := nodeKey.PrivKey + p2pHost, err := mn.AddPeer(priv, nil) + require.NoError(t, err) + + p2pClient, err := p2p.NewClientWithHost(conf.P2P, nodeKey.PrivKey, mainKV, chainId, logger, p2p.NopMetrics(), p2pHost) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + require.NoError(t, p2pClient.Start(ctx)) + + dataSvc, err := NewDataSyncService(mainKV, conf, genesisDoc, p2pClient, logger) + require.NoError(t, err) + require.NoError(t, dataSvc.Start(ctx)) + + // Need a valid header height for data metadata + headerConfig := types.HeaderConfig{ + Height: genesisDoc.InitialHeight, + DataHash: bytesN(rnd, 32), + AppHash: bytesN(rnd, 32), + Signer: noopSigner, + } + signedHeader, err := types.GetRandomSignedHeaderCustom(&headerConfig, genesisDoc.ChainID) + require.NoError(t, err) + + data := types.Data{ + Txs: types.Txs{[]byte("tx1")}, + Metadata: &types.Metadata{ + Height: signedHeader.Height(), + }, + } + + require.NoError(t, dataSvc.WriteToStoreAndBroadcast(ctx, &data)) + + daHeight := uint64(100) + require.NoError(t, dataSvc.AppendDAHint(ctx, daHeight, data.Hash())) + + d, hint, err := dataSvc.GetByHeight(ctx, signedHeader.Height()) + require.NoError(t, err) + require.Equal(t, data.Hash(), d.Hash()) + require.Equal(t, daHeight, hint) + + _ = p2pClient.Close() + _ = dataSvc.Stop(ctx) + cancel() + + // Restart + h2, err := mn.AddPeer(priv, nil) + require.NoError(t, err) + p2pClient, err = p2p.NewClientWithHost(conf.P2P, nodeKey.PrivKey, mainKV, chainId, logger, p2p.NopMetrics(), h2) + require.NoError(t, err) + + ctx, cancel = context.WithCancel(t.Context()) + defer cancel() + require.NoError(t, p2pClient.Start(ctx)) + t.Cleanup(func() { _ = p2pClient.Close() }) + + dataSvc, err = NewDataSyncService(mainKV, conf, genesisDoc, p2pClient, logger) + require.NoError(t, err) + require.NoError(t, dataSvc.Start(ctx)) + t.Cleanup(func() { _ = dataSvc.Stop(context.Background()) }) + + d, hint, err = dataSvc.GetByHeight(ctx, signedHeader.Height()) + require.NoError(t, err) + require.Equal(t, data.Hash(), d.Hash()) + require.Equal(t, daHeight, hint) +} + func nextHeader(t *testing.T, previousHeader *types.SignedHeader, chainID string, noopSigner signer.Signer) *types.SignedHeader { newSignedHeader := &types.SignedHeader{ Header: types.GetRandomNextHeader(previousHeader.Header, chainID), From b9b5f5b930a4cdf259031516584e9211aca5d1b3 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Mon, 1 Dec 2025 09:45:27 +0100 Subject: [PATCH 06/13] Minor cleanup --- block/internal/syncing/async_da_retriever_test.go | 4 ++-- block/internal/syncing/syncer_test.go | 12 ++++++------ test/e2e/go.sum | 2 ++ 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/block/internal/syncing/async_da_retriever_test.go b/block/internal/syncing/async_da_retriever_test.go index bce1267a0f..dfaecc922e 100644 --- a/block/internal/syncing/async_da_retriever_test.go +++ b/block/internal/syncing/async_da_retriever_test.go @@ -39,7 +39,7 @@ func TestAsyncDARetriever_RequestRetrieval(t *testing.T) { // 2. Test deduplication (idempotency) // We'll block the retriever to simulate a slow request, then send multiple requests for the same height height2 := uint64(200) - + // Create a channel to signal when the mock is called calledCh := make(chan struct{}) // Create a channel to unblock the mock @@ -98,7 +98,7 @@ func TestAsyncDARetriever_WorkerPoolLimit(t *testing.T) { // We have 5 workers. We'll block them all. unblockCh := make(chan struct{}) - + // Expect 5 calls that block for i := 0; i < 5; i++ { h := uint64(1000 + i) diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index adacd15efc..5132b37ba7 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -731,7 +731,7 @@ func TestProcessHeightEvent_TriggersAsyncDARetrieval(t *testing.T) { // Mock AsyncDARetriever mockRetriever := NewMockDARetriever(t) asyncRetriever := NewAsyncDARetriever(mockRetriever, s.heightInCh, zerolog.Nop()) - // We don't start the async retriever to avoid race conditions in test, + // We don't start the async retriever to avoid race conditions in test, // we just want to verify RequestRetrieval queues the request. // However, RequestRetrieval writes to a channel, so we need a consumer or a buffered channel. // The workCh is buffered (100), so we are good. @@ -739,9 +739,9 @@ func TestProcessHeightEvent_TriggersAsyncDARetrieval(t *testing.T) { // Create event with DA height hint evt := common.DAHeightEvent{ - Header: &types.SignedHeader{Header: types.Header{BaseHeader: types.BaseHeader{ChainID: "c", Height: 2}}}, - Data: &types.Data{Metadata: &types.Metadata{ChainID: "c", Height: 2}}, - Source: common.SourceP2P, + Header: &types.SignedHeader{Header: types.Header{BaseHeader: types.BaseHeader{ChainID: "c", Height: 2}}}, + Data: &types.Data{Metadata: &types.Metadata{ChainID: "c", Height: 2}}, + Source: common.SourceP2P, DaHeightHints: [2]uint64{100, 100}, } @@ -749,13 +749,13 @@ func TestProcessHeightEvent_TriggersAsyncDARetrieval(t *testing.T) { // processHeightEvent checks: // 1. height <= currentHeight (2 <= 0 -> false) // 2. height != currentHeight+1 (2 != 1 -> true) -> stores as pending event - + // We need to simulate height 1 being processed first so height 2 is "next" // OR we can just test that it DOES NOT trigger DA retrieval if it's pending. // Wait, the logic for DA retrieval is BEFORE the "next block" check? // Let's check syncer.go... // Yes, "If this is a P2P event with a DA height hint, trigger targeted DA retrieval" block is AFTER "If this is not the next block in sequence... return" - + // So we need to be at height 1 to process height 2. // Let's set the store height to 1. batch, err := st.NewBatch(context.Background()) diff --git a/test/e2e/go.sum b/test/e2e/go.sum index 4373442258..50914ffe49 100644 --- a/test/e2e/go.sum +++ b/test/e2e/go.sum @@ -72,6 +72,8 @@ github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/ github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE= github.com/celestiaorg/go-header v0.7.4 h1:kQx3bVvKV+H2etxRi4IUuby5VQydBONx3giHFXDcZ/o= github.com/celestiaorg/go-header v0.7.4/go.mod h1:eX9iTSPthVEAlEDLux40ZT/olXPGhpxHd+mEzJeDhd0= +github.com/celestiaorg/go-libp2p-messenger v0.2.2 h1:osoUfqjss7vWTIZrrDSy953RjQz+ps/vBFE7bychLEc= +github.com/celestiaorg/go-libp2p-messenger v0.2.2/go.mod h1:oTCRV5TfdO7V/k6nkx7QjQzGrWuJbupv+0o1cgnY2i4= github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY72BOgcduA4= github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= github.com/celestiaorg/tastora v0.8.0 h1:+FWAIsP2onwwqPTGzBLIBtx8B1h9sImdx4msv2N4DsI= From c40b96b2ef0e3ea0237b4df38fa230e22190c077 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Mon, 15 Dec 2025 10:52:10 +0100 Subject: [PATCH 07/13] Indipendent types for p2p store --- apps/evm/cmd/rollback.go | 6 +- apps/testapp/cmd/rollback.go | 6 +- block/internal/common/expected_interfaces.go | 4 +- block/internal/executing/executor.go | 8 +- .../internal/executing/executor_lazy_test.go | 8 +- .../internal/executing/executor_logic_test.go | 8 +- .../executing/executor_restart_test.go | 16 +- block/internal/executing/executor_test.go | 4 +- block/internal/syncing/p2p_handler.go | 14 +- block/internal/syncing/p2p_handler_test.go | 20 +- block/internal/syncing/syncer.go | 4 +- block/internal/syncing/syncer_backoff_test.go | 4 +- .../internal/syncing/syncer_benchmark_test.go | 4 +- .../syncing/syncer_forced_inclusion_test.go | 28 +-- block/internal/syncing/syncer_test.go | 20 +- pkg/rpc/client/client_test.go | 53 +++--- pkg/rpc/server/server.go | 13 +- pkg/rpc/server/server_test.go | 47 ++--- pkg/sync/da_hint_container.go | 21 +-- pkg/sync/sync_service.go | 52 ++--- pkg/sync/sync_service_test.go | 12 +- proto/evnode/v1/evnode.proto | 15 ++ types/binary_compatibility_test.go | 72 +++++++ types/p2p_data.go | 80 ++++++++ types/p2p_signed_header.go | 85 +++++++++ types/pb/evnode/v1/evnode.pb.go | 178 ++++++++++++++++-- 26 files changed, 586 insertions(+), 196 deletions(-) create mode 100644 types/binary_compatibility_test.go create mode 100644 types/p2p_data.go create mode 100644 types/p2p_signed_header.go diff --git a/apps/evm/cmd/rollback.go b/apps/evm/cmd/rollback.go index a8f3ed645c..25a75d8bd8 100644 --- a/apps/evm/cmd/rollback.go +++ b/apps/evm/cmd/rollback.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" - "github.com/evstack/ev-node/pkg/sync" + "github.com/evstack/ev-node/types" ds "github.com/ipfs/go-datastore" kt "github.com/ipfs/go-datastore/keytransform" "github.com/spf13/cobra" @@ -70,7 +70,7 @@ func NewRollbackCmd() *cobra.Command { } // rollback ev-node goheader state - headerStore, err := goheaderstore.NewStore[*sync.SignedHeaderWithDAHint]( + headerStore, err := goheaderstore.NewStore[*types.P2PSignedHeader]( evolveDB, goheaderstore.WithStorePrefix("headerSync"), goheaderstore.WithMetrics(), @@ -79,7 +79,7 @@ func NewRollbackCmd() *cobra.Command { return err } - dataStore, err := goheaderstore.NewStore[*sync.DataWithDAHint]( + dataStore, err := goheaderstore.NewStore[*types.P2PData]( evolveDB, goheaderstore.WithStorePrefix("dataSync"), goheaderstore.WithMetrics(), diff --git a/apps/testapp/cmd/rollback.go b/apps/testapp/cmd/rollback.go index 22ee216b9e..76e860e598 100644 --- a/apps/testapp/cmd/rollback.go +++ b/apps/testapp/cmd/rollback.go @@ -10,7 +10,7 @@ import ( "github.com/evstack/ev-node/node" rollcmd "github.com/evstack/ev-node/pkg/cmd" "github.com/evstack/ev-node/pkg/store" - "github.com/evstack/ev-node/pkg/sync" + "github.com/evstack/ev-node/types" ds "github.com/ipfs/go-datastore" kt "github.com/ipfs/go-datastore/keytransform" "github.com/spf13/cobra" @@ -75,7 +75,7 @@ func NewRollbackCmd() *cobra.Command { } // rollback ev-node goheader state - headerStore, err := goheaderstore.NewStore[*sync.SignedHeaderWithDAHint]( + headerStore, err := goheaderstore.NewStore[*types.P2PSignedHeader]( evolveDB, goheaderstore.WithStorePrefix("headerSync"), goheaderstore.WithMetrics(), @@ -84,7 +84,7 @@ func NewRollbackCmd() *cobra.Command { return err } - dataStore, err := goheaderstore.NewStore[*sync.DataWithDAHint]( + dataStore, err := goheaderstore.NewStore[*types.P2PData]( evolveDB, goheaderstore.WithStorePrefix("dataSync"), goheaderstore.WithMetrics(), diff --git a/block/internal/common/expected_interfaces.go b/block/internal/common/expected_interfaces.go index 0eeef6ab31..64464538a3 100644 --- a/block/internal/common/expected_interfaces.go +++ b/block/internal/common/expected_interfaces.go @@ -10,8 +10,8 @@ import ( ) type ( - HeaderP2PBroadcaster = Broadcaster[*types.SignedHeader] - DataP2PBroadcaster = Broadcaster[*types.Data] + HeaderP2PBroadcaster = Broadcaster[*types.P2PSignedHeader] + DataP2PBroadcaster = Broadcaster[*types.P2PData] ) // Broadcaster interface for P2P broadcasting diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index 42609de568..212d6c5728 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -439,8 +439,12 @@ func (e *Executor) produceBlock() error { // broadcast header and data to P2P network g, ctx := errgroup.WithContext(e.ctx) - g.Go(func() error { return e.headerBroadcaster.WriteToStoreAndBroadcast(ctx, header) }) - g.Go(func() error { return e.dataBroadcaster.WriteToStoreAndBroadcast(ctx, data) }) + g.Go(func() error { + return e.headerBroadcaster.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: *header}) + }) + g.Go(func() error { + return e.dataBroadcaster.WriteToStoreAndBroadcast(ctx, &types.P2PData{Data: *data}) + }) if err := g.Wait(); err != nil { e.logger.Error().Err(err).Msg("failed to broadcast header and/data") // don't fail block production on broadcast error diff --git a/block/internal/executing/executor_lazy_test.go b/block/internal/executing/executor_lazy_test.go index a11cf6a1c2..0821454d3f 100644 --- a/block/internal/executing/executor_lazy_test.go +++ b/block/internal/executing/executor_lazy_test.go @@ -47,9 +47,9 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db := common.NewMockBroadcaster[*types.Data](t) + db := common.NewMockBroadcaster[*types.P2PData](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec, err := NewExecutor( @@ -162,9 +162,9 @@ func TestRegularMode_ProduceBlockLogic(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db := common.NewMockBroadcaster[*types.Data](t) + db := common.NewMockBroadcaster[*types.P2PData](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec, err := NewExecutor( diff --git a/block/internal/executing/executor_logic_test.go b/block/internal/executing/executor_logic_test.go index 6029186e86..2f9b29721a 100644 --- a/block/internal/executing/executor_logic_test.go +++ b/block/internal/executing/executor_logic_test.go @@ -69,9 +69,9 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { mockSeq := testmocks.NewMockSequencer(t) // Broadcasters are required by produceBlock; use generated mocks - hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db := common.NewMockBroadcaster[*types.Data](t) + db := common.NewMockBroadcaster[*types.P2PData](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec, err := NewExecutor( @@ -159,9 +159,9 @@ func TestPendingLimit_SkipsProduction(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - hb := common.NewMockBroadcaster[*types.SignedHeader](t) + hb := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db := common.NewMockBroadcaster[*types.Data](t) + db := common.NewMockBroadcaster[*types.P2PData](t) db.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec, err := NewExecutor( diff --git a/block/internal/executing/executor_restart_test.go b/block/internal/executing/executor_restart_test.go index 14daccddcc..8b68a1c651 100644 --- a/block/internal/executing/executor_restart_test.go +++ b/block/internal/executing/executor_restart_test.go @@ -47,9 +47,9 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { // Create first executor instance mockExec1 := testmocks.NewMockExecutor(t) mockSeq1 := testmocks.NewMockSequencer(t) - hb1 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb1 := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db1 := common.NewMockBroadcaster[*types.Data](t) + db1 := common.NewMockBroadcaster[*types.P2PData](t) db1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec1, err := NewExecutor( @@ -169,9 +169,9 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { // Create second executor instance (restart scenario) mockExec2 := testmocks.NewMockExecutor(t) mockSeq2 := testmocks.NewMockSequencer(t) - hb2 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb2 := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db2 := common.NewMockBroadcaster[*types.Data](t) + db2 := common.NewMockBroadcaster[*types.P2PData](t) db2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec2, err := NewExecutor( @@ -270,9 +270,9 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { // Create first executor and produce one block mockExec1 := testmocks.NewMockExecutor(t) mockSeq1 := testmocks.NewMockSequencer(t) - hb1 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb1 := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db1 := common.NewMockBroadcaster[*types.Data](t) + db1 := common.NewMockBroadcaster[*types.P2PData](t) db1.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec1, err := NewExecutor( @@ -325,9 +325,9 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { // Create second executor (restart) mockExec2 := testmocks.NewMockExecutor(t) mockSeq2 := testmocks.NewMockSequencer(t) - hb2 := common.NewMockBroadcaster[*types.SignedHeader](t) + hb2 := common.NewMockBroadcaster[*types.P2PSignedHeader](t) hb2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() - db2 := common.NewMockBroadcaster[*types.Data](t) + db2 := common.NewMockBroadcaster[*types.P2PData](t) db2.EXPECT().WriteToStoreAndBroadcast(mock.Anything, mock.Anything).Return(nil).Maybe() exec2, err := NewExecutor( diff --git a/block/internal/executing/executor_test.go b/block/internal/executing/executor_test.go index 7ef0f64e21..7e62d09170 100644 --- a/block/internal/executing/executor_test.go +++ b/block/internal/executing/executor_test.go @@ -39,8 +39,8 @@ func TestExecutor_BroadcasterIntegration(t *testing.T) { } // Create mock broadcasters - headerBroadcaster := common.NewMockBroadcaster[*types.SignedHeader](t) - dataBroadcaster := common.NewMockBroadcaster[*types.Data](t) + headerBroadcaster := common.NewMockBroadcaster[*types.P2PSignedHeader](t) + dataBroadcaster := common.NewMockBroadcaster[*types.P2PData](t) // Create executor with broadcasters executor, err := NewExecutor( diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index 18c3e6c634..72f942432d 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -32,8 +32,8 @@ type HeightStore[H header.Header[H]] interface { // The handler maintains a processedHeight to track the highest block that has been // successfully validated and sent to the syncer, preventing duplicate processing. type P2PHandler struct { - headerStore HeightStore[*types.SignedHeader] - dataStore HeightStore[*types.Data] + headerStore HeightStore[*types.P2PSignedHeader] + dataStore HeightStore[*types.P2PData] cache cache.CacheManager genesis genesis.Genesis logger zerolog.Logger @@ -43,8 +43,8 @@ type P2PHandler struct { // NewP2PHandler creates a new P2P handler. func NewP2PHandler( - headerStore HeightStore[*types.SignedHeader], - dataStore HeightStore[*types.Data], + headerStore HeightStore[*types.P2PSignedHeader], + dataStore HeightStore[*types.P2PData], cache cache.CacheManager, genesis genesis.Genesis, logger zerolog.Logger, @@ -79,25 +79,27 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC return nil } - header, headerDAHint, err := h.headerStore.GetByHeight(ctx, height) + p2pHeader, headerDAHint, err := h.headerStore.GetByHeight(ctx, height) if err != nil { if ctx.Err() == nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("header unavailable in store") } return err } + header := &p2pHeader.SignedHeader if err := h.assertExpectedProposer(header.ProposerAddress); err != nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("invalid header from P2P") return err } - data, dataDAHint, err := h.dataStore.GetByHeight(ctx, height) + p2pData, dataDAHint, err := h.dataStore.GetByHeight(ctx, height) if err != nil { if ctx.Err() == nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("data unavailable in store") } return err } + data := &p2pData.Data dataCommitment := data.DACommitment() if !bytes.Equal(header.DataHash[:], dataCommitment[:]) { err := fmt.Errorf("data hash mismatch: header %x, data %x", header.DataHash, dataCommitment) diff --git a/block/internal/syncing/p2p_handler_test.go b/block/internal/syncing/p2p_handler_test.go index 5970900474..b0d2dbdabc 100644 --- a/block/internal/syncing/p2p_handler_test.go +++ b/block/internal/syncing/p2p_handler_test.go @@ -36,7 +36,7 @@ func buildTestSigner(t *testing.T) ([]byte, crypto.PubKey, signerpkg.Signer) { } // p2pMakeSignedHeader creates a minimally valid SignedHeader for P2P tests. -func p2pMakeSignedHeader(t *testing.T, chainID string, height uint64, proposer []byte, pub crypto.PubKey, signer signerpkg.Signer) *types.SignedHeader { +func p2pMakeSignedHeader(t *testing.T, chainID string, height uint64, proposer []byte, pub crypto.PubKey, signer signerpkg.Signer) *types.P2PSignedHeader { t.Helper() hdr := &types.SignedHeader{ Header: types.Header{ @@ -50,14 +50,14 @@ func p2pMakeSignedHeader(t *testing.T, chainID string, height uint64, proposer [ sig, err := signer.Sign(bz) require.NoError(t, err, "failed to sign header bytes") hdr.Signature = sig - return hdr + return &types.P2PSignedHeader{SignedHeader: *hdr} } // P2PTestData aggregates dependencies used by P2P handler tests. type P2PTestData struct { Handler *P2PHandler - HeaderStore *MockHeightStore[*types.SignedHeader] - DataStore *MockHeightStore[*types.Data] + HeaderStore *MockHeightStore[*types.P2PSignedHeader] + DataStore *MockHeightStore[*types.P2PData] Cache cache.CacheManager Genesis genesis.Genesis ProposerAddr []byte @@ -72,8 +72,8 @@ func setupP2P(t *testing.T) *P2PTestData { gen := genesis.Genesis{ChainID: "p2p-test", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: proposerAddr} - headerStoreMock := NewMockHeightStore[*types.SignedHeader](t) - dataStoreMock := NewMockHeightStore[*types.Data](t) + headerStoreMock := NewMockHeightStore[*types.P2PSignedHeader](t) + dataStoreMock := NewMockHeightStore[*types.P2PData](t) cfg := config.Config{ RootDir: t.TempDir(), @@ -128,7 +128,7 @@ func TestP2PHandler_ProcessHeight_EmitsEventWhenHeaderAndDataPresent(t *testing. require.Equal(t, string(p.Genesis.ProposerAddress), string(p.ProposerAddr)) header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 5, p.ProposerAddr, p.ProposerPub, p.Signer) - data := makeData(p.Genesis.ChainID, 5, 1) + data := &types.P2PData{Data: *makeData(p.Genesis.ChainID, 5, 1)} header.DataHash = data.DACommitment() bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) require.NoError(t, err) @@ -154,7 +154,7 @@ func TestP2PHandler_ProcessHeight_SkipsWhenDataMissing(t *testing.T) { ctx := context.Background() header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 7, p.ProposerAddr, p.ProposerPub, p.Signer) - data := makeData(p.Genesis.ChainID, 7, 1) + data := &types.P2PData{Data: *makeData(p.Genesis.ChainID, 7, 1)} header.DataHash = data.DACommitment() bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) require.NoError(t, err) @@ -224,7 +224,7 @@ func TestP2PHandler_ProcessedHeightSkipsPreviouslyHandledBlocks(t *testing.T) { // Height 6 should be fetched normally. header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 6, p.ProposerAddr, p.ProposerPub, p.Signer) - data := makeData(p.Genesis.ChainID, 6, 1) + data := &types.P2PData{Data: *makeData(p.Genesis.ChainID, 6, 1)} header.DataHash = data.DACommitment() bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) require.NoError(t, err) @@ -247,7 +247,7 @@ func TestP2PHandler_SetProcessedHeightPreventsDuplicates(t *testing.T) { ctx := context.Background() header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 8, p.ProposerAddr, p.ProposerPub, p.Signer) - data := makeData(p.Genesis.ChainID, 8, 0) + data := &types.P2PData{Data: *makeData(p.Genesis.ChainID, 8, 0)} header.DataHash = data.DACommitment() bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) require.NoError(t, err) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 0e8b61b640..0e9eaf334e 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -535,12 +535,12 @@ func (s *Syncer) processHeightEvent(event *common.DAHeightEvent) { g.Go(func() error { // broadcast header locally only — prevents spamming the p2p network with old height notifications, // allowing the syncer to update its target and fill missing blocks - return s.headerStore.WriteToStoreAndBroadcast(ctx, event.Header, pubsub.WithLocalPublication(true)) + return s.headerStore.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: *event.Header}, pubsub.WithLocalPublication(true)) }) g.Go(func() error { // broadcast data locally only — prevents spamming the p2p network with old height notifications, // allowing the syncer to update its target and fill missing blocks - return s.dataStore.WriteToStoreAndBroadcast(ctx, event.Data, pubsub.WithLocalPublication(true)) + return s.dataStore.WriteToStoreAndBroadcast(ctx, &types.P2PData{Data: *event.Data}, pubsub.WithLocalPublication(true)) }) if err := g.Wait(); err != nil { s.logger.Error().Err(err).Msg("failed to append event header and/or data to p2p store") diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index ed9cd4c407..dcb2323b59 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -326,8 +326,8 @@ func setupTestSyncer(t *testing.T, daBlockTime time.Duration) *Syncer { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), diff --git a/block/internal/syncing/syncer_benchmark_test.go b/block/internal/syncing/syncer_benchmark_test.go index e2b6f6e51f..a1f0a00314 100644 --- a/block/internal/syncing/syncer_benchmark_test.go +++ b/block/internal/syncing/syncer_benchmark_test.go @@ -153,9 +153,9 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay mockP2P := newMockp2pHandler(b) // not used directly in this benchmark path mockP2P.On("SetProcessedHeight", mock.Anything).Return().Maybe() s.p2pHandler = mockP2P - headerP2PStore := common.NewMockBroadcaster[*types.SignedHeader](b) + headerP2PStore := common.NewMockBroadcaster[*types.P2PSignedHeader](b) s.headerStore = headerP2PStore - dataP2PStore := common.NewMockBroadcaster[*types.Data](b) + dataP2PStore := common.NewMockBroadcaster[*types.P2PData](b) s.dataStore = dataP2PStore return &benchFixture{s: s, st: st, cm: cm, cancel: cancel} } diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index 741432eb28..072d2ed448 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -67,8 +67,8 @@ func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -152,8 +152,8 @@ func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -253,8 +253,8 @@ func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -356,8 +356,8 @@ func TestVerifyForcedInclusionTxs_NoForcedTransactions(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -430,8 +430,8 @@ func TestVerifyForcedInclusionTxs_NamespaceNotConfigured(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -499,8 +499,8 @@ func TestVerifyForcedInclusionTxs_DeferralWithinEpoch(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -646,8 +646,8 @@ func TestVerifyForcedInclusionTxs_MaliciousAfterEpochEnd(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 5132b37ba7..f7386d7a5f 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -123,8 +123,8 @@ func TestSyncer_validateBlock_DataHashMismatch(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), @@ -174,8 +174,8 @@ func TestProcessHeightEvent_SyncsAndUpdatesState(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), errChan, @@ -228,8 +228,8 @@ func TestSequentialBlockSync(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), errChan, @@ -346,8 +346,8 @@ func TestSyncLoopPersistState(t *testing.T) { mockDataStore := extmocks.NewMockStore[*types.Data](t) mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() - mockP2PHeaderStore := common.NewMockBroadcaster[*types.SignedHeader](t) - mockP2PDataStore := common.NewMockBroadcaster[*types.Data](t) + mockP2PHeaderStore := common.NewMockBroadcaster[*types.P2PSignedHeader](t) + mockP2PDataStore := common.NewMockBroadcaster[*types.P2PData](t) errorCh := make(chan error, 1) syncerInst1 := NewSyncer( @@ -719,8 +719,8 @@ func TestProcessHeightEvent_TriggersAsyncDARetrieval(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), diff --git a/pkg/rpc/client/client_test.go b/pkg/rpc/client/client_test.go index 05c8df4d08..fd3d1f2dc0 100644 --- a/pkg/rpc/client/client_test.go +++ b/pkg/rpc/client/client_test.go @@ -8,7 +8,6 @@ import ( "time" goheader "github.com/celestiaorg/go-header" - "github.com/evstack/ev-node/pkg/sync" "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" "github.com/rs/zerolog" @@ -29,8 +28,8 @@ import ( func setupTestServer( t *testing.T, mockStore *mocks.MockStore, - headerStore goheader.Store[*sync.SignedHeaderWithDAHint], - dataStore goheader.Store[*sync.DataWithDAHint], + headerStore goheader.Store[*types.P2PSignedHeader], + dataStore goheader.Store[*types.P2PData], mockP2P *mocks.MockP2PRPC, ) (*httptest.Server, *Client) { t.Helper() @@ -107,19 +106,19 @@ func TestClientGetMetadata(t *testing.T) { func TestClientGetP2PStoreInfo(t *testing.T) { mockStore := mocks.NewMockStore(t) mockP2P := mocks.NewMockP2PRPC(t) - headerStore := headerstoremocks.NewMockStore[*sync.SignedHeaderWithDAHint](t) - dataStore := headerstoremocks.NewMockStore[*sync.DataWithDAHint](t) + headerStore := headerstoremocks.NewMockStore[*types.P2PSignedHeader](t) + dataStore := headerstoremocks.NewMockStore[*types.P2PData](t) now := time.Now().UTC() - headerHead := &sync.SignedHeaderWithDAHint{Entry: testSignedHeader(10, now)} - headerTail := &sync.SignedHeaderWithDAHint{Entry: testSignedHeader(5, now.Add(-time.Minute))} + headerHead := testSignedHeader(10, now) + headerTail := testSignedHeader(5, now.Add(-time.Minute)) headerStore.On("Height").Return(uint64(10)) headerStore.On("Head", mock.Anything).Return(headerHead, nil) headerStore.On("Tail", mock.Anything).Return(headerTail, nil) - dataHead := &sync.DataWithDAHint{Entry: testData(8, now.Add(-30*time.Second))} - dataTail := &sync.DataWithDAHint{Entry: testData(4, now.Add(-2*time.Minute))} + dataHead := testData(8, now.Add(-30*time.Second)) + dataTail := testData(4, now.Add(-2*time.Minute)) dataStore.On("Height").Return(uint64(8)) dataStore.On("Head", mock.Anything).Return(dataHead, nil) dataStore.On("Tail", mock.Anything).Return(dataTail, nil) @@ -252,27 +251,31 @@ func TestClientGetNamespace(t *testing.T) { require.NotEmpty(t, namespaceResp.DataNamespace) } -func testSignedHeader(height uint64, ts time.Time) *types.SignedHeader { - return &types.SignedHeader{ - Header: types.Header{ - BaseHeader: types.BaseHeader{ - Height: height, - Time: uint64(ts.UnixNano()), - ChainID: "test-chain", +func testSignedHeader(height uint64, ts time.Time) *types.P2PSignedHeader { + return &types.P2PSignedHeader{ + SignedHeader: types.SignedHeader{ + Header: types.Header{ + BaseHeader: types.BaseHeader{ + Height: height, + Time: uint64(ts.UnixNano()), + ChainID: "test-chain", + }, + ProposerAddress: []byte{0x01}, + DataHash: []byte{0x02}, + AppHash: []byte{0x03}, }, - ProposerAddress: []byte{0x01}, - DataHash: []byte{0x02}, - AppHash: []byte{0x03}, }, } } -func testData(height uint64, ts time.Time) *types.Data { - return &types.Data{ - Metadata: &types.Metadata{ - ChainID: "test-chain", - Height: height, - Time: uint64(ts.UnixNano()), +func testData(height uint64, ts time.Time) *types.P2PData { + return &types.P2PData{ + Data: types.Data{ + Metadata: &types.Metadata{ + ChainID: "test-chain", + Height: height, + Time: uint64(ts.UnixNano()), + }, }, } } diff --git a/pkg/rpc/server/server.go b/pkg/rpc/server/server.go index f113b52fb8..27048c3010 100644 --- a/pkg/rpc/server/server.go +++ b/pkg/rpc/server/server.go @@ -14,7 +14,6 @@ import ( "connectrpc.com/grpcreflect" goheader "github.com/celestiaorg/go-header" coreda "github.com/evstack/ev-node/core/da" - "github.com/evstack/ev-node/pkg/sync" ds "github.com/ipfs/go-datastore" "github.com/rs/zerolog" "golang.org/x/net/http2" @@ -35,16 +34,16 @@ var _ rpc.StoreServiceHandler = (*StoreServer)(nil) // StoreServer implements the StoreService defined in the proto file type StoreServer struct { store store.Store - headerStore goheader.Store[*sync.SignedHeaderWithDAHint] - dataStore goheader.Store[*sync.DataWithDAHint] + headerStore goheader.Store[*types.P2PSignedHeader] + dataStore goheader.Store[*types.P2PData] logger zerolog.Logger } // NewStoreServer creates a new StoreServer instance func NewStoreServer( store store.Store, - headerStore goheader.Store[*sync.SignedHeaderWithDAHint], - dataStore goheader.Store[*sync.DataWithDAHint], + headerStore goheader.Store[*types.P2PSignedHeader], + dataStore goheader.Store[*types.P2PData], logger zerolog.Logger, ) *StoreServer { return &StoreServer{ @@ -371,8 +370,8 @@ func (p *P2PServer) GetNetInfo( // NewServiceHandler creates a new HTTP handler for Store, P2P and Config services func NewServiceHandler( store store.Store, - headerStore goheader.Store[*sync.SignedHeaderWithDAHint], - dataStore goheader.Store[*sync.DataWithDAHint], + headerStore goheader.Store[*types.P2PSignedHeader], + dataStore goheader.Store[*types.P2PData], peerManager p2p.P2PRPC, proposerAddress []byte, logger zerolog.Logger, diff --git a/pkg/rpc/server/server_test.go b/pkg/rpc/server/server_test.go index 3a4dbd89bd..842adea560 100644 --- a/pkg/rpc/server/server_test.go +++ b/pkg/rpc/server/server_test.go @@ -11,7 +11,6 @@ import ( "time" "connectrpc.com/connect" - "github.com/evstack/ev-node/pkg/sync" ds "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" @@ -326,8 +325,8 @@ func TestGetGenesisDaHeight_InvalidLength(t *testing.T) { func TestGetP2PStoreInfo(t *testing.T) { t.Run("returns snapshots for configured stores", func(t *testing.T) { mockStore := mocks.NewMockStore(t) - headerStore := headerstoremocks.NewMockStore[*sync.SignedHeaderWithDAHint](t) - dataStore := headerstoremocks.NewMockStore[*sync.DataWithDAHint](t) + headerStore := headerstoremocks.NewMockStore[*types.P2PSignedHeader](t) + dataStore := headerstoremocks.NewMockStore[*types.P2PData](t) logger := zerolog.Nop() server := NewStoreServer(mockStore, headerStore, dataStore, logger) @@ -355,10 +354,10 @@ func TestGetP2PStoreInfo(t *testing.T) { t.Run("returns error when a store edge fails", func(t *testing.T) { mockStore := mocks.NewMockStore(t) - headerStore := headerstoremocks.NewMockStore[*sync.SignedHeaderWithDAHint](t) + headerStore := headerstoremocks.NewMockStore[*types.P2PSignedHeader](t) logger := zerolog.Nop() headerStore.On("Height").Return(uint64(0)) - headerStore.On("Head", mock.Anything).Return((*sync.SignedHeaderWithDAHint)(nil), fmt.Errorf("boom")) + headerStore.On("Head", mock.Anything).Return((*types.P2PSignedHeader)(nil), fmt.Errorf("boom")) server := NewStoreServer(mockStore, headerStore, nil, logger) resp, err := server.GetP2PStoreInfo(context.Background(), connect.NewRequest(&emptypb.Empty{})) @@ -628,29 +627,31 @@ func TestHealthReadyEndpoint(t *testing.T) { }) } -func makeTestSignedHeader(height uint64, ts time.Time) *sync.SignedHeaderWithDAHint { - return &sync.SignedHeaderWithDAHint{Entry: &types.SignedHeader{ - Header: types.Header{ - BaseHeader: types.BaseHeader{ - Height: height, - Time: uint64(ts.UnixNano()), - ChainID: "test-chain", +func makeTestSignedHeader(height uint64, ts time.Time) *types.P2PSignedHeader { + return &types.P2PSignedHeader{ + SignedHeader: types.SignedHeader{ + Header: types.Header{ + BaseHeader: types.BaseHeader{ + Height: height, + Time: uint64(ts.UnixNano()), + ChainID: "test-chain", + }, + ProposerAddress: []byte{0x01}, + DataHash: []byte{0x02}, + AppHash: []byte{0x03}, }, - ProposerAddress: []byte{0x01}, - DataHash: []byte{0x02}, - AppHash: []byte{0x03}, }, - }, } } -func makeTestData(height uint64, ts time.Time) *sync.DataWithDAHint { - return &sync.DataWithDAHint{Entry: &types.Data{ - Metadata: &types.Metadata{ - ChainID: "test-chain", - Height: height, - Time: uint64(ts.UnixNano()), +func makeTestData(height uint64, ts time.Time) *types.P2PData { + return &types.P2PData{ + Data: types.Data{ + Metadata: &types.Metadata{ + ChainID: "test-chain", + Height: height, + Time: uint64(ts.UnixNano()), + }, }, - }, } } diff --git a/pkg/sync/da_hint_container.go b/pkg/sync/da_hint_container.go index 5d904d885d..dc52fcb953 100644 --- a/pkg/sync/da_hint_container.go +++ b/pkg/sync/da_hint_container.go @@ -1,17 +1,11 @@ package sync import ( - "encoding/binary" - "fmt" "time" "github.com/celestiaorg/go-header" - "github.com/evstack/ev-node/types" ) -type SignedHeaderWithDAHint = DAHeightHintContainer[*types.SignedHeader] -type DataWithDAHint = DAHeightHintContainer[*types.Data] - type DAHeightHintContainer[H header.Header[H]] struct { Entry H DAHeightHint uint64 @@ -62,20 +56,9 @@ func (s *DAHeightHintContainer[H]) IsZero() bool { } func (s *DAHeightHintContainer[H]) MarshalBinary() ([]byte, error) { - bz, err := s.Entry.MarshalBinary() - if err != nil { - return nil, err - } - out := make([]byte, 8+len(bz)) - binary.BigEndian.PutUint64(out, s.DAHeightHint) - copy(out[8:], bz) - return out, nil + return s.Entry.MarshalBinary() } func (s *DAHeightHintContainer[H]) UnmarshalBinary(data []byte) error { - if len(data) < 8 { - return fmt.Errorf("invalid length: %d", len(data)) - } - s.DAHeightHint = binary.BigEndian.Uint64(data) - return s.Entry.UnmarshalBinary(data[8:]) + return s.Entry.UnmarshalBinary(data) } diff --git a/pkg/sync/sync_service.go b/pkg/sync/sync_service.go index 22d7c3c904..c59ab2048a 100644 --- a/pkg/sync/sync_service.go +++ b/pkg/sync/sync_service.go @@ -43,15 +43,15 @@ type EntityWithDAHint[H any] interface { } // HeaderSyncService is the P2P Sync Service for headers. -type HeaderSyncService = SyncService[*types.SignedHeader] +type HeaderSyncService = SyncService[*types.P2PSignedHeader] // DataSyncService is the P2P Sync Service for blocks. -type DataSyncService = SyncService[*types.Data] +type DataSyncService = SyncService[*types.P2PData] // SyncService is the P2P Sync Service for blocks and headers. // // Uses the go-header library for handling all P2P logic. -type SyncService[V header.Header[V]] struct { +type SyncService[V EntityWithDAHint[V]] struct { conf config.Config logger zerolog.Logger syncType syncType @@ -60,13 +60,13 @@ type SyncService[V header.Header[V]] struct { p2p *p2p.Client - ex *goheaderp2p.Exchange[*DAHeightHintContainer[V]] - sub *goheaderp2p.Subscriber[*DAHeightHintContainer[V]] - p2pServer *goheaderp2p.ExchangeServer[*DAHeightHintContainer[V]] - store *goheaderstore.Store[*DAHeightHintContainer[V]] - syncer *goheadersync.Syncer[*DAHeightHintContainer[V]] + ex *goheaderp2p.Exchange[V] + sub *goheaderp2p.Subscriber[V] + p2pServer *goheaderp2p.ExchangeServer[V] + store *goheaderstore.Store[V] + syncer *goheadersync.Syncer[V] syncerStatus *SyncerStatus - topicSubscription header.Subscription[*DAHeightHintContainer[V]] + topicSubscription header.Subscription[V] storeInitialized atomic.Bool } @@ -78,7 +78,7 @@ func NewDataSyncService( p2p *p2p.Client, logger zerolog.Logger, ) (*DataSyncService, error) { - return newSyncService[*types.Data](store, dataSync, conf, genesis, p2p, logger) + return newSyncService[*types.P2PData](store, dataSync, conf, genesis, p2p, logger) } // NewHeaderSyncService returns a new HeaderSyncService. @@ -89,10 +89,10 @@ func NewHeaderSyncService( p2p *p2p.Client, logger zerolog.Logger, ) (*HeaderSyncService, error) { - return newSyncService[*types.SignedHeader](store, headerSync, conf, genesis, p2p, logger) + return newSyncService[*types.P2PSignedHeader](store, headerSync, conf, genesis, p2p, logger) } -func newSyncService[V header.Header[V]]( +func newSyncService[V EntityWithDAHint[V]]( store ds.Batching, syncType syncType, conf config.Config, @@ -104,7 +104,7 @@ func newSyncService[V header.Header[V]]( return nil, errors.New("p2p client cannot be nil") } - ss, err := goheaderstore.NewStore[*DAHeightHintContainer[V]]( + ss, err := goheaderstore.NewStore[V]( store, goheaderstore.WithStorePrefix(string(syncType)), goheaderstore.WithMetrics(), @@ -127,7 +127,7 @@ func newSyncService[V header.Header[V]]( } // Store returns the store of the SyncService -func (syncService *SyncService[V]) Store() header.Store[*DAHeightHintContainer[V]] { +func (syncService *SyncService[V]) Store() header.Store[V] { return syncService.store } @@ -142,7 +142,7 @@ func (syncService *SyncService[V]) WriteToStoreAndBroadcast(ctx context.Context, return fmt.Errorf("empty header/data cannot write to store or broadcast") } - headerOrData := &DAHeightHintContainer[V]{Entry: payload} + headerOrData := payload storeInitialized := false if syncService.storeInitialized.CompareAndSwap(false, true) { var err error @@ -182,7 +182,7 @@ func (syncService *SyncService[V]) WriteToStoreAndBroadcast(ctx context.Context, } func (s *SyncService[V]) AppendDAHint(ctx context.Context, daHeight uint64, hashes ...types.Hash) error { - entries := make([]*DAHeightHintContainer[V], 0, len(hashes)) + entries := make([]V, 0, len(hashes)) for _, h := range hashes { v, err := s.store.Get(ctx, h) if err != nil { @@ -203,7 +203,7 @@ func (s *SyncService[V]) GetByHeight(ctx context.Context, height uint64) (V, uin var zero V return zero, 0, err } - return c.Entry, c.DAHint(), nil + return c, c.DAHint(), nil } // Start is a part of Service interface. @@ -258,7 +258,7 @@ func (syncService *SyncService[H]) startSyncer(ctx context.Context) error { // initStore initializes the store with the given initial header. // it is a no-op if the store is already initialized. // Returns true when the store was initialized by this call. -func (syncService *SyncService[V]) initStore(ctx context.Context, initial *DAHeightHintContainer[V]) (bool, error) { +func (syncService *SyncService[V]) initStore(ctx context.Context, initial V) (bool, error) { if initial.IsZero() { return false, errors.New("failed to initialize the store") } @@ -292,7 +292,7 @@ func (syncService *SyncService[V]) setupP2PInfrastructure(ctx context.Context) ( networkID := syncService.getNetworkID(chainID) // Create subscriber but DON'T start it yet - syncService.sub, err = goheaderp2p.NewSubscriber[*DAHeightHintContainer[V]]( + syncService.sub, err = goheaderp2p.NewSubscriber[V]( ps, pubsub.DefaultMsgIdFn, goheaderp2p.WithSubscriberNetworkID(networkID), @@ -315,7 +315,7 @@ func (syncService *SyncService[V]) setupP2PInfrastructure(ctx context.Context) ( peerIDs := syncService.getPeerIDs() - if syncService.ex, err = newP2PExchange[*DAHeightHintContainer[V]](syncService.p2p.Host(), peerIDs, networkID, syncService.genesis.ChainID, syncService.p2p.ConnectionGater()); err != nil { + if syncService.ex, err = newP2PExchange[V](syncService.p2p.Host(), peerIDs, networkID, syncService.genesis.ChainID, syncService.p2p.ConnectionGater()); err != nil { return nil, fmt.Errorf("error while creating exchange: %w", err) } if err := syncService.ex.Start(ctx); err != nil { @@ -350,7 +350,7 @@ func (syncService *SyncService[V]) initFromP2PWithRetry(ctx context.Context, pee tryInit := func(ctx context.Context) (bool, error) { var ( - trusted *DAHeightHintContainer[V] + trusted V err error heightToQuery uint64 ) @@ -460,17 +460,17 @@ func newP2PExchange[H header.Header[H]]( // newSyncer constructs new Syncer for headers/blocks. func newSyncer[H header.Header[H]]( - ex header.Exchange[*DAHeightHintContainer[H]], - store header.Store[*DAHeightHintContainer[H]], - sub header.Subscriber[*DAHeightHintContainer[H]], + ex header.Exchange[H], + store header.Store[H], + sub header.Subscriber[H], opts []goheadersync.Option, -) (*goheadersync.Syncer[*DAHeightHintContainer[H]], error) { +) (*goheadersync.Syncer[H], error) { opts = append(opts, goheadersync.WithMetrics(), goheadersync.WithPruningWindow(ninetyNineYears), goheadersync.WithTrustingPeriod(ninetyNineYears), ) - return goheadersync.NewSyncer[*DAHeightHintContainer[H]](ex, store, sub, opts...) + return goheadersync.NewSyncer[H](ex, store, sub, opts...) } func (syncService *SyncService[H]) getNetworkID(network string) string { diff --git a/pkg/sync/sync_service_test.go b/pkg/sync/sync_service_test.go index b0e244a95f..048d19d6cd 100644 --- a/pkg/sync/sync_service_test.go +++ b/pkg/sync/sync_service_test.go @@ -73,12 +73,12 @@ func TestHeaderSyncServiceRestart(t *testing.T) { signedHeader, err := types.GetRandomSignedHeaderCustom(&headerConfig, genesisDoc.ChainID) require.NoError(t, err) require.NoError(t, signedHeader.Validate()) - require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, signedHeader)) + require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: *signedHeader})) for i := genesisDoc.InitialHeight + 1; i < 2; i++ { signedHeader = nextHeader(t, signedHeader, genesisDoc.ChainID, noopSigner) t.Logf("signed header: %d", i) - require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, signedHeader)) + require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: *signedHeader})) } // then stop and restart service @@ -109,7 +109,7 @@ func TestHeaderSyncServiceRestart(t *testing.T) { for i := signedHeader.Height() + 1; i < 2; i++ { signedHeader = nextHeader(t, signedHeader, genesisDoc.ChainID, noopSigner) t.Logf("signed header: %d", i) - require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, signedHeader)) + require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: *signedHeader})) } cancel() } @@ -164,7 +164,7 @@ func TestHeaderSyncServiceInitFromHigherHeight(t *testing.T) { require.NoError(t, err) require.NoError(t, signedHeader.Validate()) - require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, signedHeader)) + require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: *signedHeader})) } func TestDAHintStorageHeader(t *testing.T) { @@ -215,7 +215,7 @@ func TestDAHintStorageHeader(t *testing.T) { require.NoError(t, err) require.NoError(t, signedHeader.Validate()) - require.NoError(t, headerSvc.WriteToStoreAndBroadcast(ctx, signedHeader)) + require.NoError(t, headerSvc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: *signedHeader})) daHeight := uint64(100) require.NoError(t, headerSvc.AppendDAHint(ctx, daHeight, signedHeader.Hash())) @@ -306,7 +306,7 @@ func TestDAHintStorageData(t *testing.T) { }, } - require.NoError(t, dataSvc.WriteToStoreAndBroadcast(ctx, &data)) + require.NoError(t, dataSvc.WriteToStoreAndBroadcast(ctx, &types.P2PData{Data: data})) daHeight := uint64(100) require.NoError(t, dataSvc.AppendDAHint(ctx, daHeight, data.Hash())) diff --git a/proto/evnode/v1/evnode.proto b/proto/evnode/v1/evnode.proto index 1cb3e23ea1..8bd7d13a25 100644 --- a/proto/evnode/v1/evnode.proto +++ b/proto/evnode/v1/evnode.proto @@ -95,3 +95,18 @@ message Vote { // Validator address bytes validator_address = 5; } + +// P2PSignedHeader +message P2PSignedHeader { + Header header = 1; + bytes signature = 2; + Signer signer = 3; + optional uint64 da_height_hint = 4; +} + +// P2PData +message P2PData { + Metadata metadata = 1; + repeated bytes txs = 2; + optional uint64 da_height_hint = 3; +} diff --git a/types/binary_compatibility_test.go b/types/binary_compatibility_test.go new file mode 100644 index 0000000000..a8e8253d4e --- /dev/null +++ b/types/binary_compatibility_test.go @@ -0,0 +1,72 @@ +package types + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSignedHeaderBinaryCompatibility(t *testing.T) { + signedHeader, _, err := GetRandomSignedHeader("chain-id") + require.NoError(t, err) + bytes, err := signedHeader.MarshalBinary() + require.NoError(t, err) + + var p2pHeader P2PSignedHeader + err = p2pHeader.UnmarshalBinary(bytes) + require.NoError(t, err) + + assert.Equal(t, signedHeader.Header, p2pHeader.Header) + assert.Equal(t, signedHeader.Signature, p2pHeader.Signature) + assert.Equal(t, signedHeader.Signer, p2pHeader.Signer) + assert.Zero(t, p2pHeader.DAHeightHint) + + p2pHeader.DAHeightHint = 100 + p2pBytes, err := p2pHeader.MarshalBinary() + require.NoError(t, err) + + var decodedSignedHeader SignedHeader + err = decodedSignedHeader.UnmarshalBinary(p2pBytes) + require.NoError(t, err) + assert.Equal(t, signedHeader.Header, decodedSignedHeader.Header) + assert.Equal(t, signedHeader.Signature, decodedSignedHeader.Signature) + assert.Equal(t, signedHeader.Signer, decodedSignedHeader.Signer) +} + +func TestDataBinaryCompatibility(t *testing.T) { + data := &Data{ + Metadata: &Metadata{ + ChainID: "chain-id", + Height: 10, + Time: uint64(time.Now().UnixNano()), + LastDataHash: []byte("last-hash"), + }, + Txs: Txs{ + []byte("tx1"), + []byte("tx2"), + }, + } + bytes, err := data.MarshalBinary() + require.NoError(t, err) + + var p2pData P2PData + err = p2pData.UnmarshalBinary(bytes) + require.NoError(t, err) + + assert.Equal(t, data.Metadata, p2pData.Metadata) + assert.Equal(t, data.Txs, p2pData.Txs) + assert.Zero(t, p2pData.DAHeightHint) + + p2pData.DAHeightHint = 200 + + p2pBytes, err := p2pData.MarshalBinary() + require.NoError(t, err) + + var decodedData Data + err = decodedData.UnmarshalBinary(p2pBytes) + require.NoError(t, err) + assert.Equal(t, data.Metadata, decodedData.Metadata) + assert.Equal(t, data.Txs, decodedData.Txs) +} diff --git a/types/p2p_data.go b/types/p2p_data.go new file mode 100644 index 0000000000..d57671a081 --- /dev/null +++ b/types/p2p_data.go @@ -0,0 +1,80 @@ +package types + +import ( + "errors" + + "github.com/celestiaorg/go-header" + "google.golang.org/protobuf/proto" + + pb "github.com/evstack/ev-node/types/pb/evnode/v1" +) + +var _ header.Header[*P2PData] = &P2PData{} + +type P2PData struct { + Data + DAHeightHint uint64 +} + +func (d *P2PData) New() *P2PData { + return new(P2PData) +} + +func (d *P2PData) IsZero() bool { + return d == nil || d.Data.IsZero() +} + +func (d *P2PData) Verify(untrstD *P2PData) error { + return d.Data.Verify(&untrstD.Data) +} + +func (d *P2PData) SetDAHint(daHeight uint64) { + d.DAHeightHint = daHeight +} + +func (d *P2PData) DAHint() uint64 { + return d.DAHeightHint +} + +func (d *P2PData) MarshalBinary() ([]byte, error) { + msg, err := d.ToProto() + if err != nil { + return nil, err + } + return proto.Marshal(msg) +} + +func (d *P2PData) UnmarshalBinary(data []byte) error { + var pData pb.P2PData + if err := proto.Unmarshal(data, &pData); err != nil { + return err + } + return d.FromProto(&pData) +} + +func (d *P2PData) ToProto() (*pb.P2PData, error) { + pData := d.Data.ToProto() + return &pb.P2PData{ + Metadata: pData.Metadata, + Txs: pData.Txs, + DaHeightHint: &d.DAHeightHint, + }, nil +} + +func (d *P2PData) FromProto(other *pb.P2PData) error { + if other == nil { + return errors.New("P2PData is nil") + } + + pData := &pb.Data{ + Metadata: other.Metadata, + Txs: other.Txs, + } + if err := d.Data.FromProto(pData); err != nil { + return err + } + if other.DaHeightHint != nil { + d.DAHeightHint = *other.DaHeightHint + } + return nil +} diff --git a/types/p2p_signed_header.go b/types/p2p_signed_header.go new file mode 100644 index 0000000000..fe1ce807ba --- /dev/null +++ b/types/p2p_signed_header.go @@ -0,0 +1,85 @@ +package types + +import ( + "errors" + + "github.com/celestiaorg/go-header" + "google.golang.org/protobuf/proto" + + pb "github.com/evstack/ev-node/types/pb/evnode/v1" +) + +var _ header.Header[*P2PSignedHeader] = &P2PSignedHeader{} + +type P2PSignedHeader struct { + SignedHeader + DAHeightHint uint64 +} + +func (sh *P2PSignedHeader) New() *P2PSignedHeader { + return new(P2PSignedHeader) +} + +func (sh *P2PSignedHeader) IsZero() bool { + return sh == nil || sh.SignedHeader.IsZero() +} + +func (sh *P2PSignedHeader) Verify(untrstH *P2PSignedHeader) error { + return sh.SignedHeader.Verify(&untrstH.SignedHeader) +} + +func (sh *P2PSignedHeader) SetDAHint(daHeight uint64) { + sh.DAHeightHint = daHeight +} + +func (sh *P2PSignedHeader) DAHint() uint64 { + return sh.DAHeightHint +} + +func (sh *P2PSignedHeader) MarshalBinary() ([]byte, error) { + msg, err := sh.ToProto() + if err != nil { + return nil, err + } + return proto.Marshal(msg) +} + +func (sh *P2PSignedHeader) UnmarshalBinary(data []byte) error { + var pHeader pb.P2PSignedHeader + if err := proto.Unmarshal(data, &pHeader); err != nil { + return err + } + return sh.FromProto(&pHeader) +} + +func (sh *P2PSignedHeader) ToProto() (*pb.P2PSignedHeader, error) { + psh, err := sh.SignedHeader.ToProto() + if err != nil { + return nil, err + } + return &pb.P2PSignedHeader{ + Header: psh.Header, + Signature: psh.Signature, + Signer: psh.Signer, + DaHeightHint: &sh.DAHeightHint, + }, nil +} + +func (sh *P2PSignedHeader) FromProto(other *pb.P2PSignedHeader) error { + if other == nil { + return errors.New("P2PSignedHeader is nil") + } + // Reconstruct SignedHeader + psh := &pb.SignedHeader{ + Header: other.Header, + Signature: other.Signature, + Signer: other.Signer, + } + if err := sh.SignedHeader.FromProto(psh); err != nil { + return err + } + if other.DaHeightHint != nil { + sh.DAHeightHint = *other.DaHeightHint + } + return nil +} diff --git a/types/pb/evnode/v1/evnode.pb.go b/types/pb/evnode/v1/evnode.pb.go index 7c532c7c7e..775e35e992 100644 --- a/types/pb/evnode/v1/evnode.pb.go +++ b/types/pb/evnode/v1/evnode.pb.go @@ -585,6 +585,134 @@ func (x *Vote) GetValidatorAddress() []byte { return nil } +type P2PSignedHeader struct { + state protoimpl.MessageState `protogen:"open.v1"` + Header *Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + Signer *Signer `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` + DaHeightHint *uint64 `protobuf:"varint,4,opt,name=da_height_hint,json=daHeightHint,proto3,oneof" json:"da_height_hint,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *P2PSignedHeader) Reset() { + *x = P2PSignedHeader{} + mi := &file_evnode_v1_evnode_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *P2PSignedHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*P2PSignedHeader) ProtoMessage() {} + +func (x *P2PSignedHeader) ProtoReflect() protoreflect.Message { + mi := &file_evnode_v1_evnode_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use P2PSignedHeader.ProtoReflect.Descriptor instead. +func (*P2PSignedHeader) Descriptor() ([]byte, []int) { + return file_evnode_v1_evnode_proto_rawDescGZIP(), []int{8} +} + +func (x *P2PSignedHeader) GetHeader() *Header { + if x != nil { + return x.Header + } + return nil +} + +func (x *P2PSignedHeader) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +func (x *P2PSignedHeader) GetSigner() *Signer { + if x != nil { + return x.Signer + } + return nil +} + +func (x *P2PSignedHeader) GetDaHeightHint() uint64 { + if x != nil && x.DaHeightHint != nil { + return *x.DaHeightHint + } + return 0 +} + +type P2PData struct { + state protoimpl.MessageState `protogen:"open.v1"` + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + Txs [][]byte `protobuf:"bytes,2,rep,name=txs,proto3" json:"txs,omitempty"` + DaHeightHint *uint64 `protobuf:"varint,3,opt,name=da_height_hint,json=daHeightHint,proto3,oneof" json:"da_height_hint,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *P2PData) Reset() { + *x = P2PData{} + mi := &file_evnode_v1_evnode_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *P2PData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*P2PData) ProtoMessage() {} + +func (x *P2PData) ProtoReflect() protoreflect.Message { + mi := &file_evnode_v1_evnode_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use P2PData.ProtoReflect.Descriptor instead. +func (*P2PData) Descriptor() ([]byte, []int) { + return file_evnode_v1_evnode_proto_rawDescGZIP(), []int{9} +} + +func (x *P2PData) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *P2PData) GetTxs() [][]byte { + if x != nil { + return x.Txs + } + return nil +} + +func (x *P2PData) GetDaHeightHint() uint64 { + if x != nil && x.DaHeightHint != nil { + return *x.DaHeightHint + } + return 0 +} + var File_evnode_v1_evnode_proto protoreflect.FileDescriptor const file_evnode_v1_evnode_proto_rawDesc = "" + @@ -630,7 +758,18 @@ const file_evnode_v1_evnode_proto_rawDesc = "" + "\x06height\x18\x02 \x01(\x04R\x06height\x128\n" + "\ttimestamp\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12\"\n" + "\rblock_id_hash\x18\x04 \x01(\fR\vblockIdHash\x12+\n" + - "\x11validator_address\x18\x05 \x01(\fR\x10validatorAddressB/Z-github.com/evstack/ev-node/types/pb/evnode/v1b\x06proto3" + "\x11validator_address\x18\x05 \x01(\fR\x10validatorAddress\"\xc3\x01\n" + + "\x0fP2PSignedHeader\x12)\n" + + "\x06header\x18\x01 \x01(\v2\x11.evnode.v1.HeaderR\x06header\x12\x1c\n" + + "\tsignature\x18\x02 \x01(\fR\tsignature\x12)\n" + + "\x06signer\x18\x03 \x01(\v2\x11.evnode.v1.SignerR\x06signer\x12)\n" + + "\x0eda_height_hint\x18\x04 \x01(\x04H\x00R\fdaHeightHint\x88\x01\x01B\x11\n" + + "\x0f_da_height_hint\"\x8a\x01\n" + + "\aP2PData\x12/\n" + + "\bmetadata\x18\x01 \x01(\v2\x13.evnode.v1.MetadataR\bmetadata\x12\x10\n" + + "\x03txs\x18\x02 \x03(\fR\x03txs\x12)\n" + + "\x0eda_height_hint\x18\x03 \x01(\x04H\x00R\fdaHeightHint\x88\x01\x01B\x11\n" + + "\x0f_da_height_hintB/Z-github.com/evstack/ev-node/types/pb/evnode/v1b\x06proto3" var ( file_evnode_v1_evnode_proto_rawDescOnce sync.Once @@ -644,7 +783,7 @@ func file_evnode_v1_evnode_proto_rawDescGZIP() []byte { return file_evnode_v1_evnode_proto_rawDescData } -var file_evnode_v1_evnode_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_evnode_v1_evnode_proto_msgTypes = make([]protoimpl.MessageInfo, 10) var file_evnode_v1_evnode_proto_goTypes = []any{ (*Version)(nil), // 0: evnode.v1.Version (*Header)(nil), // 1: evnode.v1.Header @@ -654,21 +793,26 @@ var file_evnode_v1_evnode_proto_goTypes = []any{ (*Data)(nil), // 5: evnode.v1.Data (*SignedData)(nil), // 6: evnode.v1.SignedData (*Vote)(nil), // 7: evnode.v1.Vote - (*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp + (*P2PSignedHeader)(nil), // 8: evnode.v1.P2PSignedHeader + (*P2PData)(nil), // 9: evnode.v1.P2PData + (*timestamppb.Timestamp)(nil), // 10: google.protobuf.Timestamp } var file_evnode_v1_evnode_proto_depIdxs = []int32{ - 0, // 0: evnode.v1.Header.version:type_name -> evnode.v1.Version - 1, // 1: evnode.v1.SignedHeader.header:type_name -> evnode.v1.Header - 3, // 2: evnode.v1.SignedHeader.signer:type_name -> evnode.v1.Signer - 4, // 3: evnode.v1.Data.metadata:type_name -> evnode.v1.Metadata - 5, // 4: evnode.v1.SignedData.data:type_name -> evnode.v1.Data - 3, // 5: evnode.v1.SignedData.signer:type_name -> evnode.v1.Signer - 8, // 6: evnode.v1.Vote.timestamp:type_name -> google.protobuf.Timestamp - 7, // [7:7] is the sub-list for method output_type - 7, // [7:7] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name + 0, // 0: evnode.v1.Header.version:type_name -> evnode.v1.Version + 1, // 1: evnode.v1.SignedHeader.header:type_name -> evnode.v1.Header + 3, // 2: evnode.v1.SignedHeader.signer:type_name -> evnode.v1.Signer + 4, // 3: evnode.v1.Data.metadata:type_name -> evnode.v1.Metadata + 5, // 4: evnode.v1.SignedData.data:type_name -> evnode.v1.Data + 3, // 5: evnode.v1.SignedData.signer:type_name -> evnode.v1.Signer + 10, // 6: evnode.v1.Vote.timestamp:type_name -> google.protobuf.Timestamp + 1, // 7: evnode.v1.P2PSignedHeader.header:type_name -> evnode.v1.Header + 3, // 8: evnode.v1.P2PSignedHeader.signer:type_name -> evnode.v1.Signer + 4, // 9: evnode.v1.P2PData.metadata:type_name -> evnode.v1.Metadata + 10, // [10:10] is the sub-list for method output_type + 10, // [10:10] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name } func init() { file_evnode_v1_evnode_proto_init() } @@ -676,13 +820,15 @@ func file_evnode_v1_evnode_proto_init() { if File_evnode_v1_evnode_proto != nil { return } + file_evnode_v1_evnode_proto_msgTypes[8].OneofWrappers = []any{} + file_evnode_v1_evnode_proto_msgTypes[9].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_evnode_v1_evnode_proto_rawDesc), len(file_evnode_v1_evnode_proto_rawDesc)), NumEnums: 0, - NumMessages: 8, + NumMessages: 10, NumExtensions: 0, NumServices: 0, }, From 56c278f17c60357dfae9876d7888464999ede7be Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Mon, 15 Dec 2025 12:03:47 +0100 Subject: [PATCH 08/13] Merge updates --- block/components.go | 1 - block/internal/syncing/syncer_forced_inclusion_test.go | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/block/components.go b/block/components.go index 54a70e6e8a..6a2f1c2d7e 100644 --- a/block/components.go +++ b/block/components.go @@ -20,7 +20,6 @@ import ( "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/signer" "github.com/evstack/ev-node/pkg/store" - "github.com/evstack/ev-node/types" ) // Components represents the block-related components diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index 2b6448819f..a3bb206354 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -1092,8 +1092,8 @@ func TestVerifyForcedInclusionTxs_SmoothingExceedsEpoch(t *testing.T) { common.NopMetrics(), cfg, gen, - common.NewMockBroadcaster[*types.SignedHeader](t), - common.NewMockBroadcaster[*types.Data](t), + common.NewMockBroadcaster[*types.P2PSignedHeader](t), + common.NewMockBroadcaster[*types.P2PData](t), zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), From a5851907cb04852031cbed5708649e1314c0d1dd Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Mon, 15 Dec 2025 15:10:15 +0100 Subject: [PATCH 09/13] Bump sonic version --- test/e2e/go.mod | 7 ++++--- test/e2e/go.sum | 10 ++++++++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/test/e2e/go.mod b/test/e2e/go.mod index 5011f11387..b217d251cd 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -51,15 +51,16 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/speakeasy v0.2.0 // indirect github.com/bits-and-blooms/bitset v1.22.0 // indirect - github.com/bytedance/sonic v1.13.2 // indirect - github.com/bytedance/sonic/loader v0.2.4 // indirect + github.com/bytedance/gopkg v0.1.3 // indirect + github.com/bytedance/sonic v1.14.2 // indirect + github.com/bytedance/sonic/loader v0.4.0 // indirect github.com/celestiaorg/go-header v0.7.4 // indirect github.com/celestiaorg/go-square/merkle v0.0.0-20240627094109-7d01436067a3 // indirect github.com/celestiaorg/nmt v0.24.2 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chzyer/readline v1.5.1 // indirect - github.com/cloudwego/base64x v0.1.5 // indirect + github.com/cloudwego/base64x v0.1.6 // indirect github.com/cockroachdb/errors v1.12.0 // indirect github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 // indirect diff --git a/test/e2e/go.sum b/test/e2e/go.sum index f291834660..83a2b7a74d 100644 --- a/test/e2e/go.sum +++ b/test/e2e/go.sum @@ -122,11 +122,17 @@ github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/ github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE= github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= +github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M= +github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM= github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ= github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4= +github.com/bytedance/sonic v1.14.2 h1:k1twIoe97C1DtYUo+fZQy865IuHia4PR5RPiuGPPIIE= +github.com/bytedance/sonic v1.14.2/go.mod h1:T80iDELeHiHKSc0C9tubFygiuXoGzrkjKzX2quAx980= github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY= github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= +github.com/bytedance/sonic/loader v0.4.0 h1:olZ7lEqcxtZygCK9EKYKADnpQoYkRQxaeY2NYzevs+o= +github.com/bytedance/sonic/loader v0.4.0/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/celestiaorg/go-header v0.7.4 h1:kQx3bVvKV+H2etxRi4IUuby5VQydBONx3giHFXDcZ/o= github.com/celestiaorg/go-header v0.7.4/go.mod h1:eX9iTSPthVEAlEDLux40ZT/olXPGhpxHd+mEzJeDhd0= @@ -162,6 +168,8 @@ github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4= github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= +github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -1011,6 +1019,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= From d09c8ab342e53da7b10394a2409ac5cfb7469325 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Mon, 15 Dec 2025 15:31:36 +0100 Subject: [PATCH 10/13] Make tidy-all --- test/e2e/go.sum | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/test/e2e/go.sum b/test/e2e/go.sum index 83a2b7a74d..03603f3c36 100644 --- a/test/e2e/go.sum +++ b/test/e2e/go.sum @@ -124,13 +124,8 @@ github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28 github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M= github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM= -github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ= -github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4= github.com/bytedance/sonic v1.14.2 h1:k1twIoe97C1DtYUo+fZQy865IuHia4PR5RPiuGPPIIE= github.com/bytedance/sonic v1.14.2/go.mod h1:T80iDELeHiHKSc0C9tubFygiuXoGzrkjKzX2quAx980= -github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= -github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY= -github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= github.com/bytedance/sonic/loader v0.4.0 h1:olZ7lEqcxtZygCK9EKYKADnpQoYkRQxaeY2NYzevs+o= github.com/bytedance/sonic/loader v0.4.0/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= @@ -166,11 +161,8 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4= -github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= -github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= @@ -614,10 +606,8 @@ github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= -github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU= @@ -1423,7 +1413,6 @@ lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= -nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= From 4ecf0a003b615d2486e46bef85327076e4d5a529 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Fri, 19 Dec 2025 17:00:08 +0100 Subject: [PATCH 11/13] Use envelope for p2p store --- block/internal/executing/executor.go | 4 +- block/internal/syncing/p2p_handler.go | 4 +- block/internal/syncing/p2p_handler_test.go | 36 ++--- block/internal/syncing/syncer.go | 4 +- docs/guides/migrating-to-ev-abci.md | 6 +- pkg/rpc/client/client_test.go | 4 +- pkg/rpc/server/server_test.go | 4 +- pkg/sync/da_hint_container.go | 64 --------- pkg/sync/sync_service_test.go | 12 +- types/binary_compatibility_test.go | 14 +- types/p2p_data.go | 80 ----------- types/p2p_envelope.go | 153 +++++++++++++++++++++ types/p2p_envelope_test.go | 85 ++++++++++++ types/p2p_signed_header.go | 85 ------------ 14 files changed, 282 insertions(+), 273 deletions(-) delete mode 100644 pkg/sync/da_hint_container.go delete mode 100644 types/p2p_data.go create mode 100644 types/p2p_envelope.go create mode 100644 types/p2p_envelope_test.go delete mode 100644 types/p2p_signed_header.go diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index 3424df67c1..be32c37b77 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -432,10 +432,10 @@ func (e *Executor) produceBlock() error { // broadcast header and data to P2P network g, ctx := errgroup.WithContext(e.ctx) g.Go(func() error { - return e.headerBroadcaster.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: *header}) + return e.headerBroadcaster.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{Message: header}) }) g.Go(func() error { - return e.dataBroadcaster.WriteToStoreAndBroadcast(ctx, &types.P2PData{Data: *data}) + return e.dataBroadcaster.WriteToStoreAndBroadcast(ctx, &types.P2PData{Message: data}) }) if err := g.Wait(); err != nil { e.logger.Error().Err(err).Msg("failed to broadcast header and/data") diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index 72f942432d..86fc95dee1 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -86,7 +86,7 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC } return err } - header := &p2pHeader.SignedHeader + header := p2pHeader.Message if err := h.assertExpectedProposer(header.ProposerAddress); err != nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("invalid header from P2P") return err @@ -99,7 +99,7 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC } return err } - data := &p2pData.Data + data := p2pData.Message dataCommitment := data.DACommitment() if !bytes.Equal(header.DataHash[:], dataCommitment[:]) { err := fmt.Errorf("data hash mismatch: header %x, data %x", header.DataHash, dataCommitment) diff --git a/block/internal/syncing/p2p_handler_test.go b/block/internal/syncing/p2p_handler_test.go index b0d2dbdabc..0e0604944b 100644 --- a/block/internal/syncing/p2p_handler_test.go +++ b/block/internal/syncing/p2p_handler_test.go @@ -50,7 +50,7 @@ func p2pMakeSignedHeader(t *testing.T, chainID string, height uint64, proposer [ sig, err := signer.Sign(bz) require.NoError(t, err, "failed to sign header bytes") hdr.Signature = sig - return &types.P2PSignedHeader{SignedHeader: *hdr} + return &types.P2PSignedHeader{Message: hdr} } // P2PTestData aggregates dependencies used by P2P handler tests. @@ -128,13 +128,13 @@ func TestP2PHandler_ProcessHeight_EmitsEventWhenHeaderAndDataPresent(t *testing. require.Equal(t, string(p.Genesis.ProposerAddress), string(p.ProposerAddr)) header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 5, p.ProposerAddr, p.ProposerPub, p.Signer) - data := &types.P2PData{Data: *makeData(p.Genesis.ChainID, 5, 1)} - header.DataHash = data.DACommitment() - bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) + data := &types.P2PData{Message: makeData(p.Genesis.ChainID, 5, 1)} + header.Message.DataHash = data.Message.DACommitment() + bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Message.Header) require.NoError(t, err) sig, err := p.Signer.Sign(bz) require.NoError(t, err) - header.Signature = sig + header.Message.Signature = sig p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(5)).Return(header, 0, nil).Once() p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(5)).Return(data, 0, nil).Once() @@ -154,13 +154,13 @@ func TestP2PHandler_ProcessHeight_SkipsWhenDataMissing(t *testing.T) { ctx := context.Background() header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 7, p.ProposerAddr, p.ProposerPub, p.Signer) - data := &types.P2PData{Data: *makeData(p.Genesis.ChainID, 7, 1)} - header.DataHash = data.DACommitment() - bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) + data := &types.P2PData{Message: makeData(p.Genesis.ChainID, 7, 1)} + header.Message.DataHash = data.Message.DACommitment() + bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Message.Header) require.NoError(t, err) sig, err := p.Signer.Sign(bz) require.NoError(t, err) - header.Signature = sig + header.Message.Signature = sig p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(7)).Return(header, 0, nil).Once() p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(7)).Return(nil, 0, errors.New("missing")).Once() @@ -195,7 +195,7 @@ func TestP2PHandler_ProcessHeight_SkipsOnProposerMismatch(t *testing.T) { require.NotEqual(t, string(p.Genesis.ProposerAddress), string(badAddr)) header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 11, badAddr, pub, signer) - header.DataHash = common.DataHashForEmptyTxs + header.Message.DataHash = common.DataHashForEmptyTxs p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(11)).Return(header, 0, nil).Once() @@ -224,13 +224,13 @@ func TestP2PHandler_ProcessedHeightSkipsPreviouslyHandledBlocks(t *testing.T) { // Height 6 should be fetched normally. header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 6, p.ProposerAddr, p.ProposerPub, p.Signer) - data := &types.P2PData{Data: *makeData(p.Genesis.ChainID, 6, 1)} - header.DataHash = data.DACommitment() - bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) + data := &types.P2PData{Message: makeData(p.Genesis.ChainID, 6, 1)} + header.Message.DataHash = data.Message.DACommitment() + bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Message.Header) require.NoError(t, err) sig, err := p.Signer.Sign(bz) require.NoError(t, err) - header.Signature = sig + header.Message.Signature = sig p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(6)).Return(header, 0, nil).Once() p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(6)).Return(data, 0, nil).Once() @@ -247,13 +247,13 @@ func TestP2PHandler_SetProcessedHeightPreventsDuplicates(t *testing.T) { ctx := context.Background() header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 8, p.ProposerAddr, p.ProposerPub, p.Signer) - data := &types.P2PData{Data: *makeData(p.Genesis.ChainID, 8, 0)} - header.DataHash = data.DACommitment() - bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) + data := &types.P2PData{Message: makeData(p.Genesis.ChainID, 8, 0)} + header.Message.DataHash = data.Message.DACommitment() + bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Message.Header) require.NoError(t, err) sig, err := p.Signer.Sign(bz) require.NoError(t, err) - header.Signature = sig + header.Message.Signature = sig p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(8)).Return(header, 0, nil).Once() p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(8)).Return(data, 0, nil).Once() diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 33c151b0e0..0b419dc17a 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -586,12 +586,12 @@ func (s *Syncer) processHeightEvent(event *common.DAHeightEvent) { g.Go(func() error { // broadcast header locally only — prevents spamming the p2p network with old height notifications, // allowing the syncer to update its target and fill missing blocks - return s.headerStore.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: *event.Header}, pubsub.WithLocalPublication(true)) + return s.headerStore.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{Message: event.Header}, pubsub.WithLocalPublication(true)) }) g.Go(func() error { // broadcast data locally only — prevents spamming the p2p network with old height notifications, // allowing the syncer to update its target and fill missing blocks - return s.dataStore.WriteToStoreAndBroadcast(ctx, &types.P2PData{Data: *event.Data}, pubsub.WithLocalPublication(true)) + return s.dataStore.WriteToStoreAndBroadcast(ctx, &types.P2PData{Message: event.Data}, pubsub.WithLocalPublication(true)) }) if err := g.Wait(); err != nil { s.logger.Error().Err(err).Msg("failed to append event header and/or data to p2p store") diff --git a/docs/guides/migrating-to-ev-abci.md b/docs/guides/migrating-to-ev-abci.md index f49ba6df6f..eb6abcd9e0 100644 --- a/docs/guides/migrating-to-ev-abci.md +++ b/docs/guides/migrating-to-ev-abci.md @@ -41,9 +41,9 @@ import ( ) ``` -2. Add the migration manager keeper to your app struct -3. Register the module in your module manager -4. Configure the migration manager in your app initialization +1. Add the migration manager keeper to your app struct +2. Register the module in your module manager +3. Configure the migration manager in your app initialization ### Step 2: Replace Staking Module with Wrapper diff --git a/pkg/rpc/client/client_test.go b/pkg/rpc/client/client_test.go index fd3d1f2dc0..3841156635 100644 --- a/pkg/rpc/client/client_test.go +++ b/pkg/rpc/client/client_test.go @@ -253,7 +253,7 @@ func TestClientGetNamespace(t *testing.T) { func testSignedHeader(height uint64, ts time.Time) *types.P2PSignedHeader { return &types.P2PSignedHeader{ - SignedHeader: types.SignedHeader{ + Message: &types.SignedHeader{ Header: types.Header{ BaseHeader: types.BaseHeader{ Height: height, @@ -270,7 +270,7 @@ func testSignedHeader(height uint64, ts time.Time) *types.P2PSignedHeader { func testData(height uint64, ts time.Time) *types.P2PData { return &types.P2PData{ - Data: types.Data{ + Message: &types.Data{ Metadata: &types.Metadata{ ChainID: "test-chain", Height: height, diff --git a/pkg/rpc/server/server_test.go b/pkg/rpc/server/server_test.go index 842adea560..42a9812479 100644 --- a/pkg/rpc/server/server_test.go +++ b/pkg/rpc/server/server_test.go @@ -629,7 +629,7 @@ func TestHealthReadyEndpoint(t *testing.T) { func makeTestSignedHeader(height uint64, ts time.Time) *types.P2PSignedHeader { return &types.P2PSignedHeader{ - SignedHeader: types.SignedHeader{ + Message: &types.SignedHeader{ Header: types.Header{ BaseHeader: types.BaseHeader{ Height: height, @@ -646,7 +646,7 @@ func makeTestSignedHeader(height uint64, ts time.Time) *types.P2PSignedHeader { func makeTestData(height uint64, ts time.Time) *types.P2PData { return &types.P2PData{ - Data: types.Data{ + Message: &types.Data{ Metadata: &types.Metadata{ ChainID: "test-chain", Height: height, diff --git a/pkg/sync/da_hint_container.go b/pkg/sync/da_hint_container.go deleted file mode 100644 index dc52fcb953..0000000000 --- a/pkg/sync/da_hint_container.go +++ /dev/null @@ -1,64 +0,0 @@ -package sync - -import ( - "time" - - "github.com/celestiaorg/go-header" -) - -type DAHeightHintContainer[H header.Header[H]] struct { - Entry H - DAHeightHint uint64 -} - -func (s *DAHeightHintContainer[H]) ChainID() string { - return s.Entry.ChainID() -} - -func (s *DAHeightHintContainer[H]) Hash() header.Hash { - return s.Entry.Hash() -} - -func (s *DAHeightHintContainer[H]) Height() uint64 { - return s.Entry.Height() -} - -func (s *DAHeightHintContainer[H]) LastHeader() header.Hash { - return s.Entry.LastHeader() -} - -func (s *DAHeightHintContainer[H]) Time() time.Time { - return s.Entry.Time() -} - -func (s *DAHeightHintContainer[H]) Validate() error { - return s.Entry.Validate() -} - -func (s *DAHeightHintContainer[H]) New() *DAHeightHintContainer[H] { - var empty H - return &DAHeightHintContainer[H]{Entry: empty.New()} -} - -func (sh *DAHeightHintContainer[H]) Verify(untrstH *DAHeightHintContainer[H]) error { - return sh.Entry.Verify(untrstH.Entry) -} - -func (s *DAHeightHintContainer[H]) SetDAHint(daHeight uint64) { - s.DAHeightHint = daHeight -} -func (s *DAHeightHintContainer[H]) DAHint() uint64 { - return s.DAHeightHint -} - -func (s *DAHeightHintContainer[H]) IsZero() bool { - return s == nil -} - -func (s *DAHeightHintContainer[H]) MarshalBinary() ([]byte, error) { - return s.Entry.MarshalBinary() -} - -func (s *DAHeightHintContainer[H]) UnmarshalBinary(data []byte) error { - return s.Entry.UnmarshalBinary(data) -} diff --git a/pkg/sync/sync_service_test.go b/pkg/sync/sync_service_test.go index 048d19d6cd..d8d5cd57ee 100644 --- a/pkg/sync/sync_service_test.go +++ b/pkg/sync/sync_service_test.go @@ -73,12 +73,12 @@ func TestHeaderSyncServiceRestart(t *testing.T) { signedHeader, err := types.GetRandomSignedHeaderCustom(&headerConfig, genesisDoc.ChainID) require.NoError(t, err) require.NoError(t, signedHeader.Validate()) - require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: *signedHeader})) + require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{Message: signedHeader})) for i := genesisDoc.InitialHeight + 1; i < 2; i++ { signedHeader = nextHeader(t, signedHeader, genesisDoc.ChainID, noopSigner) t.Logf("signed header: %d", i) - require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: *signedHeader})) + require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{Message: signedHeader})) } // then stop and restart service @@ -109,7 +109,7 @@ func TestHeaderSyncServiceRestart(t *testing.T) { for i := signedHeader.Height() + 1; i < 2; i++ { signedHeader = nextHeader(t, signedHeader, genesisDoc.ChainID, noopSigner) t.Logf("signed header: %d", i) - require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: *signedHeader})) + require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{Message: signedHeader})) } cancel() } @@ -164,7 +164,7 @@ func TestHeaderSyncServiceInitFromHigherHeight(t *testing.T) { require.NoError(t, err) require.NoError(t, signedHeader.Validate()) - require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: *signedHeader})) + require.NoError(t, svc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{Message: signedHeader})) } func TestDAHintStorageHeader(t *testing.T) { @@ -215,7 +215,7 @@ func TestDAHintStorageHeader(t *testing.T) { require.NoError(t, err) require.NoError(t, signedHeader.Validate()) - require.NoError(t, headerSvc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{SignedHeader: *signedHeader})) + require.NoError(t, headerSvc.WriteToStoreAndBroadcast(ctx, &types.P2PSignedHeader{Message: signedHeader})) daHeight := uint64(100) require.NoError(t, headerSvc.AppendDAHint(ctx, daHeight, signedHeader.Hash())) @@ -306,7 +306,7 @@ func TestDAHintStorageData(t *testing.T) { }, } - require.NoError(t, dataSvc.WriteToStoreAndBroadcast(ctx, &types.P2PData{Data: data})) + require.NoError(t, dataSvc.WriteToStoreAndBroadcast(ctx, &types.P2PData{Message: &data})) daHeight := uint64(100) require.NoError(t, dataSvc.AppendDAHint(ctx, daHeight, data.Hash())) diff --git a/types/binary_compatibility_test.go b/types/binary_compatibility_test.go index a8e8253d4e..86f4cf1e9e 100644 --- a/types/binary_compatibility_test.go +++ b/types/binary_compatibility_test.go @@ -14,13 +14,13 @@ func TestSignedHeaderBinaryCompatibility(t *testing.T) { bytes, err := signedHeader.MarshalBinary() require.NoError(t, err) - var p2pHeader P2PSignedHeader + p2pHeader := (&P2PSignedHeader{}).New() err = p2pHeader.UnmarshalBinary(bytes) require.NoError(t, err) - assert.Equal(t, signedHeader.Header, p2pHeader.Header) - assert.Equal(t, signedHeader.Signature, p2pHeader.Signature) - assert.Equal(t, signedHeader.Signer, p2pHeader.Signer) + assert.Equal(t, signedHeader.Header, p2pHeader.Message.Header) + assert.Equal(t, signedHeader.Signature, p2pHeader.Message.Signature) + assert.Equal(t, signedHeader.Signer, p2pHeader.Message.Signer) assert.Zero(t, p2pHeader.DAHeightHint) p2pHeader.DAHeightHint = 100 @@ -51,12 +51,12 @@ func TestDataBinaryCompatibility(t *testing.T) { bytes, err := data.MarshalBinary() require.NoError(t, err) - var p2pData P2PData + p2pData := (&P2PData{}).New() err = p2pData.UnmarshalBinary(bytes) require.NoError(t, err) - assert.Equal(t, data.Metadata, p2pData.Metadata) - assert.Equal(t, data.Txs, p2pData.Txs) + assert.Equal(t, data.Metadata, p2pData.Message.Metadata) + assert.Equal(t, data.Txs, p2pData.Message.Txs) assert.Zero(t, p2pData.DAHeightHint) p2pData.DAHeightHint = 200 diff --git a/types/p2p_data.go b/types/p2p_data.go deleted file mode 100644 index d57671a081..0000000000 --- a/types/p2p_data.go +++ /dev/null @@ -1,80 +0,0 @@ -package types - -import ( - "errors" - - "github.com/celestiaorg/go-header" - "google.golang.org/protobuf/proto" - - pb "github.com/evstack/ev-node/types/pb/evnode/v1" -) - -var _ header.Header[*P2PData] = &P2PData{} - -type P2PData struct { - Data - DAHeightHint uint64 -} - -func (d *P2PData) New() *P2PData { - return new(P2PData) -} - -func (d *P2PData) IsZero() bool { - return d == nil || d.Data.IsZero() -} - -func (d *P2PData) Verify(untrstD *P2PData) error { - return d.Data.Verify(&untrstD.Data) -} - -func (d *P2PData) SetDAHint(daHeight uint64) { - d.DAHeightHint = daHeight -} - -func (d *P2PData) DAHint() uint64 { - return d.DAHeightHint -} - -func (d *P2PData) MarshalBinary() ([]byte, error) { - msg, err := d.ToProto() - if err != nil { - return nil, err - } - return proto.Marshal(msg) -} - -func (d *P2PData) UnmarshalBinary(data []byte) error { - var pData pb.P2PData - if err := proto.Unmarshal(data, &pData); err != nil { - return err - } - return d.FromProto(&pData) -} - -func (d *P2PData) ToProto() (*pb.P2PData, error) { - pData := d.Data.ToProto() - return &pb.P2PData{ - Metadata: pData.Metadata, - Txs: pData.Txs, - DaHeightHint: &d.DAHeightHint, - }, nil -} - -func (d *P2PData) FromProto(other *pb.P2PData) error { - if other == nil { - return errors.New("P2PData is nil") - } - - pData := &pb.Data{ - Metadata: other.Metadata, - Txs: other.Txs, - } - if err := d.Data.FromProto(pData); err != nil { - return err - } - if other.DaHeightHint != nil { - d.DAHeightHint = *other.DaHeightHint - } - return nil -} diff --git a/types/p2p_envelope.go b/types/p2p_envelope.go new file mode 100644 index 0000000000..eb76e9f896 --- /dev/null +++ b/types/p2p_envelope.go @@ -0,0 +1,153 @@ +package types + +import ( + "fmt" + "time" + + "github.com/celestiaorg/go-header" + "google.golang.org/protobuf/proto" + + pb "github.com/evstack/ev-node/types/pb/evnode/v1" +) + +type ( + P2PSignedHeader = P2PEnvelope[*SignedHeader] + P2PData = P2PEnvelope[*Data] +) + +var ( + _ header.Header[*P2PData] = &P2PData{} + _ header.Header[*P2PSignedHeader] = &P2PSignedHeader{} +) + +// P2PEnvelope is a generic envelope for P2P messages that includes a DA height hint. +type P2PEnvelope[H header.Header[H]] struct { + Message H + DAHeightHint uint64 +} + +// New creates a new P2PEnvelope. +func (e *P2PEnvelope[H]) New() *P2PEnvelope[H] { + var empty H + return &P2PEnvelope[H]{Message: empty.New()} +} + +// IsZero checks if the envelope or its message is zero. +func (e *P2PEnvelope[H]) IsZero() bool { + return e == nil || e.Message.IsZero() +} + +// SetDAHint sets the DA height hint. +func (e *P2PEnvelope[H]) SetDAHint(daHeight uint64) { + e.DAHeightHint = daHeight +} + +// DAHint returns the DA height hint. +func (e *P2PEnvelope[H]) DAHint() uint64 { + return e.DAHeightHint +} + +// Verify verifies the envelope message against an untrusted envelope. +func (e *P2PEnvelope[H]) Verify(untrst *P2PEnvelope[H]) error { + return e.Message.Verify(untrst.Message) +} + +// ChainID returns the ChainID of the message. +func (e *P2PEnvelope[H]) ChainID() string { + return e.Message.ChainID() +} + +// Height returns the Height of the message. +func (e *P2PEnvelope[H]) Height() uint64 { + return e.Message.Height() +} + +// LastHeader returns the LastHeader hash of the message. +func (e *P2PEnvelope[H]) LastHeader() Hash { + return e.Message.LastHeader() +} + +// Time returns the Time of the message. +func (e *P2PEnvelope[H]) Time() time.Time { + return e.Message.Time() +} + +// Hash returns the hash of the message. +func (e *P2PEnvelope[H]) Hash() Hash { + return e.Message.Hash() +} + +// Validate performs basic validation on the message. +func (e *P2PEnvelope[H]) Validate() error { + return e.Message.Validate() +} + +// MarshalBinary marshals the envelope to binary. +func (e *P2PEnvelope[H]) MarshalBinary() ([]byte, error) { + var mirrorPb proto.Message + + switch msg := any(e.Message).(type) { + case *Data: + pData := msg.ToProto() + mirrorPb = &pb.P2PData{ + Metadata: pData.Metadata, + Txs: pData.Txs, + DaHeightHint: &e.DAHeightHint, + } + case *SignedHeader: + psh, err := msg.ToProto() + if err != nil { + return nil, err + } + mirrorPb = &pb.P2PSignedHeader{ + Header: psh.Header, + Signature: psh.Signature, + Signer: psh.Signer, + DaHeightHint: &e.DAHeightHint, + } + default: + return nil, fmt.Errorf("unsupported type for toProto: %T", msg) + } + return proto.Marshal(mirrorPb) +} + +// UnmarshalBinary unmarshals the envelope from binary. +func (e *P2PEnvelope[H]) UnmarshalBinary(data []byte) error { + switch target := any(e.Message).(type) { + case *Data: + var pData pb.P2PData + if err := proto.Unmarshal(data, &pData); err != nil { + return err + } + mirrorData := &pb.Data{ + Metadata: pData.Metadata, + Txs: pData.Txs, + } + if err := target.FromProto(mirrorData); err != nil { + return err + } + if pData.DaHeightHint != nil { + e.DAHeightHint = *pData.DaHeightHint + } + return nil + case *SignedHeader: + var pHeader pb.P2PSignedHeader + if err := proto.Unmarshal(data, &pHeader); err != nil { + return err + } + psh := &pb.SignedHeader{ + Header: pHeader.Header, + Signature: pHeader.Signature, + Signer: pHeader.Signer, + } + if err := target.FromProto(psh); err != nil { + return err + } + if pHeader.DaHeightHint != nil { + e.DAHeightHint = *pHeader.DaHeightHint + } + return nil + default: + return fmt.Errorf("unsupported type for UnmarshalBinary: %T", target) + } +} diff --git a/types/p2p_envelope_test.go b/types/p2p_envelope_test.go new file mode 100644 index 0000000000..f0949f66b8 --- /dev/null +++ b/types/p2p_envelope_test.go @@ -0,0 +1,85 @@ +package types + +import ( + "crypto/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestP2PEnvelope_MarshalUnmarshal(t *testing.T) { + // Create a P2PData envelope + data := &Data{ + Metadata: &Metadata{ + ChainID: "test-chain", + Height: 10, + Time: uint64(time.Now().UnixNano()), + }, + Txs: nil, + } + envelope := &P2PData{ + Message: data, + DAHeightHint: 100, + } + + // Marshaling + bytes, err := envelope.MarshalBinary() + require.NoError(t, err) + assert.NotEmpty(t, bytes) + + // Unmarshaling + newEnvelope := (&P2PData{}).New() + err = newEnvelope.UnmarshalBinary(bytes) + require.NoError(t, err) + assert.Equal(t, envelope.DAHeightHint, newEnvelope.DAHeightHint) + assert.Equal(t, envelope.Message.Height(), newEnvelope.Message.Height()) + assert.Equal(t, envelope.Message.ChainID(), newEnvelope.Message.ChainID()) +} + +func TestP2PSignedHeader_MarshalUnmarshal(t *testing.T) { + // Create a SignedHeader + // Minimal valid SignedHeader + header := &SignedHeader{ + Header: Header{ + BaseHeader: BaseHeader{ + ChainID: "test-chain", + Height: 5, + Time: uint64(time.Now().UnixNano()), + }, + Version: Version{ + Block: 1, + App: 2, + }, + DataHash: make([]byte, 32), + }, + Signature: make([]byte, 64), + Signer: Signer{ + // PubKey can be nil for basic marshal check + Address: make([]byte, 20), + }, + } + _, _ = rand.Read(header.DataHash) + _, _ = rand.Read(header.Signature) + _, _ = rand.Read(header.Signer.Address) + + envelope := &P2PSignedHeader{ + Message: header, + DAHeightHint: 200, + } + + // Marshaling + bytes, err := envelope.MarshalBinary() + require.NoError(t, err) + assert.NotEmpty(t, bytes) + + // Unmarshaling + newEnvelope := (&P2PSignedHeader{}).New() + err = newEnvelope.UnmarshalBinary(bytes) + require.NoError(t, err) + assert.Equal(t, envelope.DAHeightHint, newEnvelope.DAHeightHint) + assert.Equal(t, envelope.Message.Height(), newEnvelope.Message.Height()) + assert.Equal(t, envelope.Message.ChainID(), newEnvelope.Message.ChainID()) + // Deep comparison of structs if needed +} diff --git a/types/p2p_signed_header.go b/types/p2p_signed_header.go deleted file mode 100644 index fe1ce807ba..0000000000 --- a/types/p2p_signed_header.go +++ /dev/null @@ -1,85 +0,0 @@ -package types - -import ( - "errors" - - "github.com/celestiaorg/go-header" - "google.golang.org/protobuf/proto" - - pb "github.com/evstack/ev-node/types/pb/evnode/v1" -) - -var _ header.Header[*P2PSignedHeader] = &P2PSignedHeader{} - -type P2PSignedHeader struct { - SignedHeader - DAHeightHint uint64 -} - -func (sh *P2PSignedHeader) New() *P2PSignedHeader { - return new(P2PSignedHeader) -} - -func (sh *P2PSignedHeader) IsZero() bool { - return sh == nil || sh.SignedHeader.IsZero() -} - -func (sh *P2PSignedHeader) Verify(untrstH *P2PSignedHeader) error { - return sh.SignedHeader.Verify(&untrstH.SignedHeader) -} - -func (sh *P2PSignedHeader) SetDAHint(daHeight uint64) { - sh.DAHeightHint = daHeight -} - -func (sh *P2PSignedHeader) DAHint() uint64 { - return sh.DAHeightHint -} - -func (sh *P2PSignedHeader) MarshalBinary() ([]byte, error) { - msg, err := sh.ToProto() - if err != nil { - return nil, err - } - return proto.Marshal(msg) -} - -func (sh *P2PSignedHeader) UnmarshalBinary(data []byte) error { - var pHeader pb.P2PSignedHeader - if err := proto.Unmarshal(data, &pHeader); err != nil { - return err - } - return sh.FromProto(&pHeader) -} - -func (sh *P2PSignedHeader) ToProto() (*pb.P2PSignedHeader, error) { - psh, err := sh.SignedHeader.ToProto() - if err != nil { - return nil, err - } - return &pb.P2PSignedHeader{ - Header: psh.Header, - Signature: psh.Signature, - Signer: psh.Signer, - DaHeightHint: &sh.DAHeightHint, - }, nil -} - -func (sh *P2PSignedHeader) FromProto(other *pb.P2PSignedHeader) error { - if other == nil { - return errors.New("P2PSignedHeader is nil") - } - // Reconstruct SignedHeader - psh := &pb.SignedHeader{ - Header: other.Header, - Signature: other.Signature, - Signer: other.Signer, - } - if err := sh.SignedHeader.FromProto(psh); err != nil { - return err - } - if other.DaHeightHint != nil { - sh.DAHeightHint = *other.DaHeightHint - } - return nil -} From 7abfecc9be722697d380573ae878c29c106264cc Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Fri, 19 Dec 2025 17:30:09 +0100 Subject: [PATCH 12/13] Minor cleanup --- types/binary_compatibility_test.go | 72 -------------------- types/p2p_envelope_test.go | 103 +++++++++++++++++++++++------ 2 files changed, 81 insertions(+), 94 deletions(-) delete mode 100644 types/binary_compatibility_test.go diff --git a/types/binary_compatibility_test.go b/types/binary_compatibility_test.go deleted file mode 100644 index 86f4cf1e9e..0000000000 --- a/types/binary_compatibility_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package types - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestSignedHeaderBinaryCompatibility(t *testing.T) { - signedHeader, _, err := GetRandomSignedHeader("chain-id") - require.NoError(t, err) - bytes, err := signedHeader.MarshalBinary() - require.NoError(t, err) - - p2pHeader := (&P2PSignedHeader{}).New() - err = p2pHeader.UnmarshalBinary(bytes) - require.NoError(t, err) - - assert.Equal(t, signedHeader.Header, p2pHeader.Message.Header) - assert.Equal(t, signedHeader.Signature, p2pHeader.Message.Signature) - assert.Equal(t, signedHeader.Signer, p2pHeader.Message.Signer) - assert.Zero(t, p2pHeader.DAHeightHint) - - p2pHeader.DAHeightHint = 100 - p2pBytes, err := p2pHeader.MarshalBinary() - require.NoError(t, err) - - var decodedSignedHeader SignedHeader - err = decodedSignedHeader.UnmarshalBinary(p2pBytes) - require.NoError(t, err) - assert.Equal(t, signedHeader.Header, decodedSignedHeader.Header) - assert.Equal(t, signedHeader.Signature, decodedSignedHeader.Signature) - assert.Equal(t, signedHeader.Signer, decodedSignedHeader.Signer) -} - -func TestDataBinaryCompatibility(t *testing.T) { - data := &Data{ - Metadata: &Metadata{ - ChainID: "chain-id", - Height: 10, - Time: uint64(time.Now().UnixNano()), - LastDataHash: []byte("last-hash"), - }, - Txs: Txs{ - []byte("tx1"), - []byte("tx2"), - }, - } - bytes, err := data.MarshalBinary() - require.NoError(t, err) - - p2pData := (&P2PData{}).New() - err = p2pData.UnmarshalBinary(bytes) - require.NoError(t, err) - - assert.Equal(t, data.Metadata, p2pData.Message.Metadata) - assert.Equal(t, data.Txs, p2pData.Message.Txs) - assert.Zero(t, p2pData.DAHeightHint) - - p2pData.DAHeightHint = 200 - - p2pBytes, err := p2pData.MarshalBinary() - require.NoError(t, err) - - var decodedData Data - err = decodedData.UnmarshalBinary(p2pBytes) - require.NoError(t, err) - assert.Equal(t, data.Metadata, decodedData.Metadata) - assert.Equal(t, data.Txs, decodedData.Txs) -} diff --git a/types/p2p_envelope_test.go b/types/p2p_envelope_test.go index f0949f66b8..9433d5a1aa 100644 --- a/types/p2p_envelope_test.go +++ b/types/p2p_envelope_test.go @@ -1,7 +1,7 @@ package types import ( - "crypto/rand" + "bytes" "testing" "time" @@ -13,11 +13,12 @@ func TestP2PEnvelope_MarshalUnmarshal(t *testing.T) { // Create a P2PData envelope data := &Data{ Metadata: &Metadata{ - ChainID: "test-chain", - Height: 10, - Time: uint64(time.Now().UnixNano()), + ChainID: "test-chain", + Height: 10, + Time: uint64(time.Now().UnixNano()), + LastDataHash: bytes.Repeat([]byte{0x1}, 32), }, - Txs: nil, + Txs: Txs{[]byte{0x1}, []byte{0x2}}, } envelope := &P2PData{ Message: data, @@ -36,11 +37,11 @@ func TestP2PEnvelope_MarshalUnmarshal(t *testing.T) { assert.Equal(t, envelope.DAHeightHint, newEnvelope.DAHeightHint) assert.Equal(t, envelope.Message.Height(), newEnvelope.Message.Height()) assert.Equal(t, envelope.Message.ChainID(), newEnvelope.Message.ChainID()) + assert.Equal(t, envelope.Message.LastDataHash, newEnvelope.Message.LastDataHash) + assert.Equal(t, envelope.Message.Txs, newEnvelope.Message.Txs) } func TestP2PSignedHeader_MarshalUnmarshal(t *testing.T) { - // Create a SignedHeader - // Minimal valid SignedHeader header := &SignedHeader{ Header: Header{ BaseHeader: BaseHeader{ @@ -52,17 +53,14 @@ func TestP2PSignedHeader_MarshalUnmarshal(t *testing.T) { Block: 1, App: 2, }, - DataHash: make([]byte, 32), - }, - Signature: make([]byte, 64), - Signer: Signer{ - // PubKey can be nil for basic marshal check - Address: make([]byte, 20), + LastHeaderHash: GetRandomBytes(32), + DataHash: GetRandomBytes(32), + AppHash: GetRandomBytes(32), + ProposerAddress: GetRandomBytes(32), + ValidatorHash: GetRandomBytes(32), }, + // Signature and Signer are transient } - _, _ = rand.Read(header.DataHash) - _, _ = rand.Read(header.Signature) - _, _ = rand.Read(header.Signer.Address) envelope := &P2PSignedHeader{ Message: header, @@ -70,16 +68,77 @@ func TestP2PSignedHeader_MarshalUnmarshal(t *testing.T) { } // Marshaling - bytes, err := envelope.MarshalBinary() + bz, err := envelope.MarshalBinary() require.NoError(t, err) - assert.NotEmpty(t, bytes) + assert.NotEmpty(t, bz) // Unmarshaling newEnvelope := (&P2PSignedHeader{}).New() - err = newEnvelope.UnmarshalBinary(bytes) + err = newEnvelope.UnmarshalBinary(bz) require.NoError(t, err) assert.Equal(t, envelope.DAHeightHint, newEnvelope.DAHeightHint) - assert.Equal(t, envelope.Message.Height(), newEnvelope.Message.Height()) - assert.Equal(t, envelope.Message.ChainID(), newEnvelope.Message.ChainID()) - // Deep comparison of structs if needed + assert.Equal(t, envelope, newEnvelope) +} + +func TestSignedHeaderBinaryCompatibility(t *testing.T) { + signedHeader, _, err := GetRandomSignedHeader("chain-id") + require.NoError(t, err) + bytes, err := signedHeader.MarshalBinary() + require.NoError(t, err) + + p2pHeader := (&P2PSignedHeader{}).New() + err = p2pHeader.UnmarshalBinary(bytes) + require.NoError(t, err) + + assert.Equal(t, signedHeader.Header, p2pHeader.Message.Header) + assert.Equal(t, signedHeader.Signature, p2pHeader.Message.Signature) + assert.Equal(t, signedHeader.Signer, p2pHeader.Message.Signer) + assert.Zero(t, p2pHeader.DAHeightHint) + + p2pHeader.DAHeightHint = 100 + p2pBytes, err := p2pHeader.MarshalBinary() + require.NoError(t, err) + + var decodedSignedHeader SignedHeader + err = decodedSignedHeader.UnmarshalBinary(p2pBytes) + require.NoError(t, err) + assert.Equal(t, signedHeader.Header, decodedSignedHeader.Header) + assert.Equal(t, signedHeader.Signature, decodedSignedHeader.Signature) + assert.Equal(t, signedHeader.Signer, decodedSignedHeader.Signer) +} + +func TestDataBinaryCompatibility(t *testing.T) { + data := &Data{ + Metadata: &Metadata{ + ChainID: "chain-id", + Height: 10, + Time: uint64(time.Now().UnixNano()), + LastDataHash: []byte("last-hash"), + }, + Txs: Txs{ + []byte("tx1"), + []byte("tx2"), + }, + } + bytes, err := data.MarshalBinary() + require.NoError(t, err) + + p2pData := (&P2PData{}).New() + err = p2pData.UnmarshalBinary(bytes) + require.NoError(t, err) + + assert.Equal(t, data.Metadata, p2pData.Message.Metadata) + assert.Equal(t, data.Txs, p2pData.Message.Txs) + assert.Zero(t, p2pData.DAHeightHint) + + p2pData.DAHeightHint = 200 + + p2pBytes, err := p2pData.MarshalBinary() + require.NoError(t, err) + + var decodedData Data + err = decodedData.UnmarshalBinary(p2pBytes) + require.NoError(t, err) + assert.Equal(t, data.Metadata, decodedData.Metadata) + assert.Equal(t, data.Txs, decodedData.Txs) } From e59384851eddee74c86fc50ea508d280a9eeb62d Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Fri, 19 Dec 2025 17:46:37 +0100 Subject: [PATCH 13/13] Better test data (cherry picked from commit ad3e21b79e8e57e3f17c9e4d16868bff3c18b9f8) --- types/p2p_envelope_test.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/types/p2p_envelope_test.go b/types/p2p_envelope_test.go index 9433d5a1aa..3dc2127fed 100644 --- a/types/p2p_envelope_test.go +++ b/types/p2p_envelope_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/libp2p/go-libp2p/core/crypto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -42,6 +43,9 @@ func TestP2PEnvelope_MarshalUnmarshal(t *testing.T) { } func TestP2PSignedHeader_MarshalUnmarshal(t *testing.T) { + _, pubKey, err := crypto.GenerateEd25519Key(nil) + require.NoError(t, err) + header := &SignedHeader{ Header: Header{ BaseHeader: BaseHeader{ @@ -59,7 +63,11 @@ func TestP2PSignedHeader_MarshalUnmarshal(t *testing.T) { ProposerAddress: GetRandomBytes(32), ValidatorHash: GetRandomBytes(32), }, - // Signature and Signer are transient + Signature: GetRandomBytes(64), + Signer: Signer{ + PubKey: pubKey, + Address: GetRandomBytes(20), + }, } envelope := &P2PSignedHeader{ @@ -77,6 +85,7 @@ func TestP2PSignedHeader_MarshalUnmarshal(t *testing.T) { err = newEnvelope.UnmarshalBinary(bz) require.NoError(t, err) assert.Equal(t, envelope.DAHeightHint, newEnvelope.DAHeightHint) + assert.Equal(t, envelope.Message.Signer, newEnvelope.Message.Signer) assert.Equal(t, envelope, newEnvelope) }