diff --git a/.mockery.yaml b/.mockery.yaml index 2694096012..8f139231cb 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -48,11 +48,11 @@ packages: filename: external/hstore.go github.com/evstack/ev-node/block/internal/syncing: interfaces: - daRetriever: + DARetriever: config: dir: ./block/internal/syncing pkgname: syncing - filename: syncer_mock.go + filename: da_retriever_mock.go p2pHandler: config: dir: ./block/internal/syncing diff --git a/CHANGELOG.md b/CHANGELOG.md index 30b04aea76..b5e0bd7970 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - Enhanced health check system with separate liveness (`/health/live`) and readiness (`/health/ready`) HTTP endpoints. Readiness endpoint includes P2P listening check and aggregator block production rate validation (5x block time threshold). ([#2800](https://github.com/evstack/ev-node/pull/2800)) +- Implement forced inclusion and based sequencing ([#2797](https://github.com/evstack/ev-node/pull/2797)) + This changes requires to add a `da_epoch_forced_inclusion` field in `genesis.json` file. + To enable this feature, set the force inclusion namespace in the `evnode.yaml`. ### Changed diff --git a/apps/evm/single/cmd/run.go b/apps/evm/single/cmd/run.go index 500107af66..092cea1f15 100644 --- a/apps/evm/single/cmd/run.go +++ b/apps/evm/single/cmd/run.go @@ -7,23 +7,28 @@ import ( "os" "path/filepath" - "github.com/evstack/ev-node/core/da" - "github.com/evstack/ev-node/da/jsonrpc" - "github.com/evstack/ev-node/node" - "github.com/evstack/ev-node/sequencers/single" - "github.com/ethereum/go-ethereum/common" + "github.com/ipfs/go-datastore" + "github.com/rs/zerolog" "github.com/spf13/cobra" - "github.com/evstack/ev-node/execution/evm" - + "github.com/evstack/ev-node/block" + "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/core/execution" + coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/da/jsonrpc" + "github.com/evstack/ev-node/execution/evm" + "github.com/evstack/ev-node/node" rollcmd "github.com/evstack/ev-node/pkg/cmd" "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" genesispkg "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/p2p" "github.com/evstack/ev-node/pkg/p2p/key" "github.com/evstack/ev-node/pkg/store" + "github.com/evstack/ev-node/sequencers/based" + seqcommon "github.com/evstack/ev-node/sequencers/common" + "github.com/evstack/ev-node/sequencers/single" ) var RunCmd = &cobra.Command{ @@ -53,7 +58,7 @@ var RunCmd = &cobra.Command{ logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") - daJrpc, err := jsonrpc.NewClient(context.Background(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daJrpc, err := jsonrpc.NewClient(context.Background(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, seqcommon.AbsoluteMaxBlobSize) if err != nil { return err } @@ -73,21 +78,8 @@ var RunCmd = &cobra.Command{ logger.Warn().Msg("da_start_height is not set in genesis.json, ask your chain developer") } - singleMetrics, err := single.DefaultMetricsProvider(nodeConfig.Instrumentation.IsPrometheusEnabled())(genesis.ChainID) - if err != nil { - return err - } - - sequencer, err := single.NewSequencer( - context.Background(), - logger, - datastore, - &daJrpc.DA, - []byte(genesis.ChainID), - nodeConfig.Node.BlockTime.Duration, - singleMetrics, - nodeConfig.Node.Aggregator, - ) + // Create sequencer based on configuration + sequencer, err := createSequencer(context.Background(), logger, datastore, &daJrpc.DA, nodeConfig, genesis) if err != nil { return err } @@ -111,6 +103,65 @@ func init() { addFlags(RunCmd) } +// createSequencer creates a sequencer based on the configuration. +// If BasedSequencer is enabled, it creates a based sequencer that fetches transactions from DA. +// Otherwise, it creates a single (traditional) sequencer. +func createSequencer( + ctx context.Context, + logger zerolog.Logger, + datastore datastore.Batching, + da da.DA, + nodeConfig config.Config, + genesis genesis.Genesis, +) (coresequencer.Sequencer, error) { + daClient := block.NewDAClient(da, nodeConfig, logger) + fiRetriever := block.NewForcedInclusionRetriever(daClient, genesis, logger) + + if nodeConfig.Node.BasedSequencer { + // Based sequencer mode - fetch transactions only from DA + if !nodeConfig.Node.Aggregator { + return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") + } + + basedSeq := based.NewBasedSequencer(fiRetriever, da, nodeConfig, genesis, logger) + + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Uint64("da_epoch", genesis.DAEpochForcedInclusion). + Msg("based sequencer initialized") + + return basedSeq, nil + } + + singleMetrics, err := single.NopMetrics() + if err != nil { + return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) + } + + sequencer, err := single.NewSequencer( + ctx, + logger, + datastore, + da, + []byte(genesis.ChainID), + nodeConfig.Node.BlockTime.Duration, + singleMetrics, + nodeConfig.Node.Aggregator, + 1000, + fiRetriever, + genesis, + ) + if err != nil { + return nil, fmt.Errorf("failed to create single sequencer: %w", err) + } + + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Msg("single sequencer initialized") + + return sequencer, nil +} + func createExecutionClient(cmd *cobra.Command) (execution.Executor, error) { // Read execution client parameters from flags ethURL, err := cmd.Flags().GetString(evm.FlagEvmEthURL) diff --git a/apps/evm/single/go.mod b/apps/evm/single/go.mod index ab97c9e768..34cde722ff 100644 --- a/apps/evm/single/go.mod +++ b/apps/evm/single/go.mod @@ -4,7 +4,11 @@ go 1.24.6 replace github.com/celestiaorg/go-header => github.com/julienrbrt/go-header v0.0.0-20251008134330-747c8c192fa8 // TODO: to remove after https://github.com/celestiaorg/go-header/pull/347 -replace github.com/evstack/ev-node => ../../../ +replace ( + github.com/evstack/ev-node => ../../../ + github.com/evstack/ev-node/core => ../../../core + github.com/evstack/ev-node/da => ../../../da +) require ( github.com/celestiaorg/go-header v0.7.3 @@ -14,6 +18,7 @@ require ( github.com/evstack/ev-node/da v1.0.0-beta.5 github.com/evstack/ev-node/execution/evm v1.0.0-beta.3 github.com/ipfs/go-datastore v0.9.0 + github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.1 ) @@ -145,7 +150,6 @@ require ( github.com/quic-go/quic-go v0.54.1 // indirect github.com/quic-go/webtransport-go v0.9.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect - github.com/rs/zerolog v1.34.0 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect @@ -191,7 +195,3 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.4.1 // indirect ) - -replace github.com/evstack/ev-node/core => ../../../core - -replace github.com/evstack/ev-node/da => ../../../da diff --git a/apps/evm/single/go.sum b/apps/evm/single/go.sum index 18d2261ebf..9ca42dd4fc 100644 --- a/apps/evm/single/go.sum +++ b/apps/evm/single/go.sum @@ -760,6 +760,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/apps/grpc/single/cmd/run.go b/apps/grpc/single/cmd/run.go index cef7e092e0..c1c5ba7e85 100644 --- a/apps/grpc/single/cmd/run.go +++ b/apps/grpc/single/cmd/run.go @@ -1,22 +1,30 @@ package cmd import ( + "context" "fmt" "path/filepath" + "github.com/ipfs/go-datastore" + "github.com/rs/zerolog" "github.com/spf13/cobra" + "github.com/evstack/ev-node/block" "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/core/execution" + coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/da/jsonrpc" executiongrpc "github.com/evstack/ev-node/execution/grpc" "github.com/evstack/ev-node/node" rollcmd "github.com/evstack/ev-node/pkg/cmd" "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" rollgenesis "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/p2p" "github.com/evstack/ev-node/pkg/p2p/key" "github.com/evstack/ev-node/pkg/store" + "github.com/evstack/ev-node/sequencers/based" + seqcommon "github.com/evstack/ev-node/sequencers/common" "github.com/evstack/ev-node/sequencers/single" ) @@ -52,7 +60,7 @@ The execution client must implement the Evolve execution gRPC interface.`, logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") // Create DA client - daJrpc, err := jsonrpc.NewClient(cmd.Context(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daJrpc, err := jsonrpc.NewClient(cmd.Context(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, seqcommon.AbsoluteMaxBlobSize) if err != nil { return err } @@ -73,23 +81,8 @@ The execution client must implement the Evolve execution gRPC interface.`, logger.Warn().Msg("da_start_height is not set in genesis.json, ask your chain developer") } - // Create metrics provider - singleMetrics, err := single.DefaultMetricsProvider(nodeConfig.Instrumentation.IsPrometheusEnabled())(genesis.ChainID) - if err != nil { - return err - } - - // Create sequencer - sequencer, err := single.NewSequencer( - cmd.Context(), - logger, - datastore, - &daJrpc.DA, - []byte(genesis.ChainID), - nodeConfig.Node.BlockTime.Duration, - singleMetrics, - nodeConfig.Node.Aggregator, - ) + // Create sequencer based on configuration + sequencer, err := createSequencer(cmd.Context(), logger, datastore, &daJrpc.DA, nodeConfig, genesis) if err != nil { return err } @@ -119,6 +112,65 @@ func init() { addGRPCFlags(RunCmd) } +// createSequencer creates a sequencer based on the configuration. +// If BasedSequencer is enabled, it creates a based sequencer that fetches transactions from DA. +// Otherwise, it creates a single (traditional) sequencer. +func createSequencer( + ctx context.Context, + logger zerolog.Logger, + datastore datastore.Batching, + da da.DA, + nodeConfig config.Config, + genesis genesis.Genesis, +) (coresequencer.Sequencer, error) { + daClient := block.NewDAClient(da, nodeConfig, logger) + fiRetriever := block.NewForcedInclusionRetriever(daClient, genesis, logger) + + if nodeConfig.Node.BasedSequencer { + // Based sequencer mode - fetch transactions only from DA + if !nodeConfig.Node.Aggregator { + return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") + } + + basedSeq := based.NewBasedSequencer(fiRetriever, da, nodeConfig, genesis, logger) + + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Uint64("da_epoch", genesis.DAEpochForcedInclusion). + Msg("based sequencer initialized") + + return basedSeq, nil + } + + singleMetrics, err := single.NopMetrics() + if err != nil { + return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) + } + + sequencer, err := single.NewSequencer( + ctx, + logger, + datastore, + da, + []byte(genesis.ChainID), + nodeConfig.Node.BlockTime.Duration, + singleMetrics, + nodeConfig.Node.Aggregator, + 1000, + fiRetriever, + genesis, + ) + if err != nil { + return nil, fmt.Errorf("failed to create single sequencer: %w", err) + } + + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Msg("single sequencer initialized") + + return sequencer, nil +} + // createGRPCExecutionClient creates a new gRPC execution client from command flags func createGRPCExecutionClient(cmd *cobra.Command) (execution.Executor, error) { // Get the gRPC executor URL from flags diff --git a/apps/grpc/single/go.mod b/apps/grpc/single/go.mod index 6d6a8f226e..1caa1fc32b 100644 --- a/apps/grpc/single/go.mod +++ b/apps/grpc/single/go.mod @@ -9,6 +9,8 @@ require ( github.com/evstack/ev-node/core v1.0.0-beta.4 github.com/evstack/ev-node/da v1.0.0-beta.5 github.com/evstack/ev-node/execution/grpc v0.0.0 + github.com/ipfs/go-datastore v0.9.0 + github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.1 ) @@ -49,7 +51,6 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/boxo v0.35.0 // indirect github.com/ipfs/go-cid v0.5.0 // indirect - github.com/ipfs/go-datastore v0.9.0 // indirect github.com/ipfs/go-ds-badger4 v0.1.8 // indirect github.com/ipfs/go-log/v2 v2.8.1 // indirect github.com/ipld/go-ipld-prime v0.21.0 // indirect @@ -73,7 +74,7 @@ require ( github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v5 v5.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/miekg/dns v1.1.68 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect @@ -123,7 +124,6 @@ require ( github.com/quic-go/qpack v0.5.1 // indirect github.com/quic-go/quic-go v0.54.1 // indirect github.com/quic-go/webtransport-go v0.9.0 // indirect - github.com/rs/zerolog v1.34.0 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect @@ -167,9 +167,7 @@ require ( replace ( github.com/evstack/ev-node => ../../../ + github.com/evstack/ev-node/core => ../../../core + github.com/evstack/ev-node/da => ../../../da github.com/evstack/ev-node/execution/grpc => ../../../execution/grpc ) - -replace github.com/evstack/ev-node/core => ../../../core - -replace github.com/evstack/ev-node/da => ../../../da diff --git a/apps/grpc/single/go.sum b/apps/grpc/single/go.sum index 9f9519135f..6dc9d5d9e1 100644 --- a/apps/grpc/single/go.sum +++ b/apps/grpc/single/go.sum @@ -229,8 +229,9 @@ github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= @@ -653,6 +654,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index c72d220cdd..dd3440b864 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -5,17 +5,24 @@ import ( "fmt" "path/filepath" + "github.com/ipfs/go-datastore" + "github.com/rs/zerolog" "github.com/spf13/cobra" kvexecutor "github.com/evstack/ev-node/apps/testapp/kv" + "github.com/evstack/ev-node/block" "github.com/evstack/ev-node/core/da" + coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/da/jsonrpc" "github.com/evstack/ev-node/node" - rollcmd "github.com/evstack/ev-node/pkg/cmd" - genesispkg "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/pkg/cmd" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/p2p" "github.com/evstack/ev-node/pkg/p2p/key" "github.com/evstack/ev-node/pkg/store" + "github.com/evstack/ev-node/sequencers/based" + seqcommon "github.com/evstack/ev-node/sequencers/common" "github.com/evstack/ev-node/sequencers/single" ) @@ -23,16 +30,16 @@ var RunCmd = &cobra.Command{ Use: "start", Aliases: []string{"node", "run"}, Short: "Run the testapp node", - RunE: func(cmd *cobra.Command, args []string) error { - nodeConfig, err := rollcmd.ParseConfig(cmd) + RunE: func(command *cobra.Command, args []string) error { + nodeConfig, err := cmd.ParseConfig(command) if err != nil { return err } - logger := rollcmd.SetupLogger(nodeConfig.Log) + logger := cmd.SetupLogger(nodeConfig.Log) // Get KV endpoint flag - kvEndpoint, _ := cmd.Flags().GetString(flagKVEndpoint) + kvEndpoint, _ := command.Flags().GetString(flagKVEndpoint) if kvEndpoint == "" { logger.Info().Msg("KV endpoint flag not set, using default from http_server") } @@ -51,7 +58,7 @@ var RunCmd = &cobra.Command{ logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") - daJrpc, err := jsonrpc.NewClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daJrpc, err := jsonrpc.NewClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, seqcommon.AbsoluteMaxBlobSize) if err != nil { return err } @@ -66,11 +73,6 @@ var RunCmd = &cobra.Command{ return err } - singleMetrics, err := single.NopMetrics() - if err != nil { - return err - } - // Start the KV executor HTTP server if kvEndpoint != "" { // Only start if endpoint is provided httpServer := kvexecutor.NewHTTPServer(executor, kvEndpoint) @@ -83,7 +85,7 @@ var RunCmd = &cobra.Command{ } genesisPath := filepath.Join(filepath.Dir(nodeConfig.ConfigPath()), "genesis.json") - genesis, err := genesispkg.LoadGenesis(genesisPath) + genesis, err := genesis.LoadGenesis(genesisPath) if err != nil { return fmt.Errorf("failed to load genesis: %w", err) } @@ -92,16 +94,8 @@ var RunCmd = &cobra.Command{ logger.Warn().Msg("da_start_height is not set in genesis.json, ask your chain developer") } - sequencer, err := single.NewSequencer( - ctx, - logger, - datastore, - &daJrpc.DA, - []byte(genesis.ChainID), - nodeConfig.Node.BlockTime.Duration, - singleMetrics, - nodeConfig.Node.Aggregator, - ) + // Create sequencer based on configuration + sequencer, err := createSequencer(ctx, logger, datastore, &daJrpc.DA, nodeConfig, genesis) if err != nil { return err } @@ -111,6 +105,65 @@ var RunCmd = &cobra.Command{ return err } - return rollcmd.StartNode(logger, cmd, executor, sequencer, &daJrpc.DA, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) + return cmd.StartNode(logger, command, executor, sequencer, &daJrpc.DA, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) }, } + +// createSequencer creates a sequencer based on the configuration. +// If BasedSequencer is enabled, it creates a based sequencer that fetches transactions from DA. +// Otherwise, it creates a single (traditional) sequencer. +func createSequencer( + ctx context.Context, + logger zerolog.Logger, + datastore datastore.Batching, + da da.DA, + nodeConfig config.Config, + genesis genesis.Genesis, +) (coresequencer.Sequencer, error) { + daClient := block.NewDAClient(da, nodeConfig, logger) + fiRetriever := block.NewForcedInclusionRetriever(daClient, genesis, logger) + + if nodeConfig.Node.BasedSequencer { + // Based sequencer mode - fetch transactions only from DA + if !nodeConfig.Node.Aggregator { + return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") + } + + basedSeq := based.NewBasedSequencer(fiRetriever, da, nodeConfig, genesis, logger) + + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Uint64("da_epoch", genesis.DAEpochForcedInclusion). + Msg("based sequencer initialized") + + return basedSeq, nil + } + + singleMetrics, err := single.NopMetrics() + if err != nil { + return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) + } + + sequencer, err := single.NewSequencer( + ctx, + logger, + datastore, + da, + []byte(genesis.ChainID), + nodeConfig.Node.BlockTime.Duration, + singleMetrics, + nodeConfig.Node.Aggregator, + 1000, + fiRetriever, + genesis, + ) + if err != nil { + return nil, fmt.Errorf("failed to create single sequencer: %w", err) + } + + logger.Info(). + Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). + Msg("single sequencer initialized") + + return sequencer, nil +} diff --git a/apps/testapp/go.mod b/apps/testapp/go.mod index 4ad8615c61..92021fea86 100644 --- a/apps/testapp/go.mod +++ b/apps/testapp/go.mod @@ -16,6 +16,7 @@ require ( github.com/evstack/ev-node/core v1.0.0-beta.4 github.com/evstack/ev-node/da v0.0.0-00010101000000-000000000000 github.com/ipfs/go-datastore v0.9.0 + github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.1 github.com/stretchr/testify v1.11.1 ) @@ -79,7 +80,7 @@ require ( github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v5 v5.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/miekg/dns v1.1.68 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect @@ -129,7 +130,6 @@ require ( github.com/quic-go/qpack v0.5.1 // indirect github.com/quic-go/quic-go v0.54.1 // indirect github.com/quic-go/webtransport-go v0.9.0 // indirect - github.com/rs/zerolog v1.34.0 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect diff --git a/apps/testapp/go.sum b/apps/testapp/go.sum index 9f9519135f..6dc9d5d9e1 100644 --- a/apps/testapp/go.sum +++ b/apps/testapp/go.sum @@ -229,8 +229,9 @@ github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= @@ -653,6 +654,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/block/components.go b/block/components.go index 4b493b5a4f..fee98db9fd 100644 --- a/block/components.go +++ b/block/components.go @@ -162,8 +162,9 @@ func NewSyncComponents( errorCh, ) - // Create DA submitter for sync nodes (no signer, only DA inclusion processing) - daSubmitter := submitting.NewDASubmitter(da, config, genesis, blockOpts, metrics, logger) + // Create DA client and submitter for sync nodes (no signer, only DA inclusion processing) + daClient := NewDAClient(da, config, logger) + daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger) submitter := submitting.NewSubmitter( store, exec, @@ -243,8 +244,18 @@ func NewAggregatorComponents( return nil, fmt.Errorf("failed to create reaper: %w", err) } - // Create DA submitter for aggregator nodes (with signer for submission) - daSubmitter := submitting.NewDASubmitter(da, config, genesis, blockOpts, metrics, logger) + if config.Node.BasedSequencer { // no submissions needed for bases sequencer + return &Components{ + Executor: executor, + Reaper: reaper, + Cache: cacheManager, + errorCh: errorCh, + }, nil + } + + // Create DA client and submitter for aggregator nodes (with signer for submission) + daClient := NewDAClient(da, config, logger) + daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger) submitter := submitting.NewSubmitter( store, exec, diff --git a/block/components_test.go b/block/components_test.go index eadf45328c..c288f5322c 100644 --- a/block/components_test.go +++ b/block/components_test.go @@ -203,6 +203,9 @@ func TestExecutor_RealExecutionClientFailure_StopsNode(t *testing.T) { mockExec.On("InitChain", mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return([]byte("state-root"), uint64(1024), nil).Once() + // Mock SetDAHeight to be called during initialization + mockSeq.On("SetDAHeight", uint64(0)).Return().Once() + // Mock GetNextBatch to return empty batch mockSeq.On("GetNextBatch", mock.Anything, mock.Anything). Return(&coresequencer.GetNextBatchResponse{ diff --git a/block/internal/cache/manager.go b/block/internal/cache/manager.go index d8f4a6bf8c..04fe242049 100644 --- a/block/internal/cache/manager.go +++ b/block/internal/cache/manager.go @@ -41,8 +41,8 @@ func registerGobTypes() { }) } -// Manager provides centralized cache management for both executing and syncing components -type Manager interface { +// CacheManager provides centralized cache management for both executing and syncing components +type CacheManager interface { // Header operations IsHeaderSeen(hash string) bool SetHeaderSeen(hash string, blockHeight uint64) @@ -61,14 +61,6 @@ type Manager interface { SetTxSeen(hash string) CleanupOldTxs(olderThan time.Duration) int - // Pending operations - GetPendingHeaders(ctx context.Context) ([]*types.SignedHeader, error) - GetPendingData(ctx context.Context) ([]*types.SignedData, error) - SetLastSubmittedHeaderHeight(ctx context.Context, height uint64) - SetLastSubmittedDataHeight(ctx context.Context, height uint64) - NumPendingHeaders() uint64 - NumPendingData() uint64 - // Pending events syncing coordination GetNextPendingEvent(blockHeight uint64) *common.DAHeightEvent SetPendingEvent(blockHeight uint64, event *common.DAHeightEvent) @@ -82,6 +74,22 @@ type Manager interface { DeleteHeight(blockHeight uint64) } +// PendingManager provides operations for managing pending headers and data +type PendingManager interface { + GetPendingHeaders(ctx context.Context) ([]*types.SignedHeader, error) + GetPendingData(ctx context.Context) ([]*types.SignedData, error) + SetLastSubmittedHeaderHeight(ctx context.Context, height uint64) + SetLastSubmittedDataHeight(ctx context.Context, height uint64) + NumPendingHeaders() uint64 + NumPendingData() uint64 +} + +// Manager provides centralized cache management for both executing and syncing components +type Manager interface { + CacheManager + PendingManager +} + var _ Manager = (*implementation)(nil) // implementation provides the concrete implementation of cache Manager @@ -97,6 +105,59 @@ type implementation struct { logger zerolog.Logger } +// NewPendingManager creates a new pending manager instance +func NewPendingManager(store store.Store, logger zerolog.Logger) (PendingManager, error) { + pendingHeaders, err := NewPendingHeaders(store, logger) + if err != nil { + return nil, fmt.Errorf("failed to create pending headers: %w", err) + } + + pendingData, err := NewPendingData(store, logger) + if err != nil { + return nil, fmt.Errorf("failed to create pending data: %w", err) + } + + return &implementation{ + pendingHeaders: pendingHeaders, + pendingData: pendingData, + logger: logger, + }, nil +} + +// NewCacheManager creates a new cache manager instance +func NewCacheManager(cfg config.Config, logger zerolog.Logger) (CacheManager, error) { + // Initialize caches + headerCache := NewCache[types.SignedHeader]() + dataCache := NewCache[types.Data]() + txCache := NewCache[struct{}]() + pendingEventsCache := NewCache[common.DAHeightEvent]() + + registerGobTypes() + impl := &implementation{ + headerCache: headerCache, + dataCache: dataCache, + txCache: txCache, + txTimestamps: new(sync.Map), + pendingEventsCache: pendingEventsCache, + config: cfg, + logger: logger, + } + + if cfg.ClearCache { + // Clear the cache from disk + if err := impl.ClearFromDisk(); err != nil { + logger.Warn().Err(err).Msg("failed to clear cache from disk, starting with empty cache") + } + } else { + // Load existing cache from disk + if err := impl.LoadFromDisk(); err != nil { + logger.Warn().Err(err).Msg("failed to load cache from disk, starting with empty cache") + } + } + + return impl, nil +} + // NewManager creates a new cache manager instance func NewManager(cfg config.Config, store store.Store, logger zerolog.Logger) (Manager, error) { // Initialize caches diff --git a/block/internal/common/event.go b/block/internal/common/event.go index 69d0300f9f..f1b4295c73 100644 --- a/block/internal/common/event.go +++ b/block/internal/common/event.go @@ -1,6 +1,8 @@ package common -import "github.com/evstack/ev-node/types" +import ( + "github.com/evstack/ev-node/types" +) // EventSource represents the origin of a block event type EventSource string @@ -13,7 +15,7 @@ const ( ) // DAHeightEvent represents a DA event for caching -type DAHeightEvent struct { +type DAHeightEvent = struct { Header *types.SignedHeader Data *types.Data // DaHeight corresponds to the highest DA included height between the Header and Data. diff --git a/block/internal/da/client.go b/block/internal/da/client.go new file mode 100644 index 0000000000..07bd7d3af2 --- /dev/null +++ b/block/internal/da/client.go @@ -0,0 +1,299 @@ +package da + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/rs/zerolog" + + coreda "github.com/evstack/ev-node/core/da" +) + +// Client is the interface representing the DA client. +type Client interface { + Submit(ctx context.Context, data [][]byte, gasPrice float64, namespace []byte, options []byte) coreda.ResultSubmit + Retrieve(ctx context.Context, height uint64, namespace []byte) coreda.ResultRetrieve + RetrieveHeaders(ctx context.Context, height uint64) coreda.ResultRetrieve + RetrieveData(ctx context.Context, height uint64) coreda.ResultRetrieve + RetrieveForcedInclusion(ctx context.Context, height uint64) coreda.ResultRetrieve + + GetHeaderNamespace() []byte + GetDataNamespace() []byte + GetForcedInclusionNamespace() []byte + HasForcedInclusionNamespace() bool + GetDA() coreda.DA +} + +// client provides a reusable wrapper around the core DA interface +// with common configuration for namespace handling and timeouts. +type client struct { + da coreda.DA + logger zerolog.Logger + defaultTimeout time.Duration + namespaceBz []byte + namespaceDataBz []byte + namespaceForcedInclusionBz []byte + hasForcedInclusionNs bool +} + +// Config contains configuration for the DA client. +type Config struct { + DA coreda.DA + Logger zerolog.Logger + DefaultTimeout time.Duration + Namespace string + DataNamespace string + ForcedInclusionNamespace string +} + +// NewClient creates a new DA client with pre-calculated namespace bytes. +func NewClient(cfg Config) *client { + if cfg.DefaultTimeout == 0 { + cfg.DefaultTimeout = 30 * time.Second + } + + hasForcedInclusionNs := cfg.ForcedInclusionNamespace != "" + var namespaceForcedInclusionBz []byte + if hasForcedInclusionNs { + namespaceForcedInclusionBz = coreda.NamespaceFromString(cfg.ForcedInclusionNamespace).Bytes() + } + + return &client{ + da: cfg.DA, + logger: cfg.Logger.With().Str("component", "da_client").Logger(), + defaultTimeout: cfg.DefaultTimeout, + namespaceBz: coreda.NamespaceFromString(cfg.Namespace).Bytes(), + namespaceDataBz: coreda.NamespaceFromString(cfg.DataNamespace).Bytes(), + namespaceForcedInclusionBz: namespaceForcedInclusionBz, + hasForcedInclusionNs: hasForcedInclusionNs, + } +} + +// Submit submits blobs to the DA layer with the specified options. +func (c *client) Submit(ctx context.Context, data [][]byte, gasPrice float64, namespace []byte, options []byte) coreda.ResultSubmit { + ids, err := c.da.SubmitWithOptions(ctx, data, gasPrice, namespace, options) + + // calculate blob size + var blobSize uint64 + for _, blob := range data { + blobSize += uint64(len(blob)) + } + + // Handle errors returned by Submit + if err != nil { + if errors.Is(err, context.Canceled) { + c.logger.Debug().Msg("DA submission canceled due to context cancellation") + return coreda.ResultSubmit{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusContextCanceled, + Message: "submission canceled", + IDs: ids, + BlobSize: blobSize, + }, + } + } + status := coreda.StatusError + switch { + case errors.Is(err, coreda.ErrTxTimedOut): + status = coreda.StatusNotIncludedInBlock + case errors.Is(err, coreda.ErrTxAlreadyInMempool): + status = coreda.StatusAlreadyInMempool + case errors.Is(err, coreda.ErrTxIncorrectAccountSequence): + status = coreda.StatusIncorrectAccountSequence + case errors.Is(err, coreda.ErrBlobSizeOverLimit): + status = coreda.StatusTooBig + case errors.Is(err, coreda.ErrContextDeadline): + status = coreda.StatusContextDeadline + } + + // Use debug level for StatusTooBig as it gets handled later in submitToDA through recursive splitting + if status == coreda.StatusTooBig { + c.logger.Debug().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed") + } else { + c.logger.Error().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed") + } + return coreda.ResultSubmit{ + BaseResult: coreda.BaseResult{ + Code: status, + Message: "failed to submit blobs: " + err.Error(), + IDs: ids, + SubmittedCount: uint64(len(ids)), + Height: 0, + Timestamp: time.Now(), + BlobSize: blobSize, + }, + } + } + + if len(ids) == 0 && len(data) > 0 { + c.logger.Warn().Msg("DA submission returned no IDs for non-empty input data") + return coreda.ResultSubmit{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusError, + Message: "failed to submit blobs: no IDs returned despite non-empty input", + }, + } + } + + // Get height from the first ID + var height uint64 + if len(ids) > 0 { + height, _, err = coreda.SplitID(ids[0]) + if err != nil { + c.logger.Error().Err(err).Msg("failed to split ID") + } + } + + c.logger.Debug().Int("num_ids", len(ids)).Msg("DA submission successful") + return coreda.ResultSubmit{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusSuccess, + IDs: ids, + SubmittedCount: uint64(len(ids)), + Height: height, + BlobSize: blobSize, + Timestamp: time.Now(), + }, + } +} + +// Retrieve retrieves blobs from the DA layer at the specified height and namespace. +func (c *client) Retrieve(ctx context.Context, height uint64, namespace []byte) coreda.ResultRetrieve { + // 1. Get IDs + getIDsCtx, cancel := context.WithTimeout(ctx, c.defaultTimeout) + defer cancel() + idsResult, err := c.da.GetIDs(getIDsCtx, height, namespace) + if err != nil { + // Handle specific "not found" error + if strings.Contains(err.Error(), coreda.ErrBlobNotFound.Error()) { + c.logger.Debug().Uint64("height", height).Msg("Blobs not found at height") + return coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusNotFound, + Message: coreda.ErrBlobNotFound.Error(), + Height: height, + Timestamp: time.Now(), + }, + } + } + if strings.Contains(err.Error(), coreda.ErrHeightFromFuture.Error()) { + c.logger.Debug().Uint64("height", height).Msg("Blobs not found at height") + return coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusHeightFromFuture, + Message: coreda.ErrHeightFromFuture.Error(), + Height: height, + Timestamp: time.Now(), + }, + } + } + // Handle other errors during GetIDs + c.logger.Error().Uint64("height", height).Err(err).Msg("Failed to get IDs") + return coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusError, + Message: fmt.Sprintf("failed to get IDs: %s", err.Error()), + Height: height, + Timestamp: time.Now(), + }, + } + } + + // This check should technically be redundant if GetIDs correctly returns ErrBlobNotFound + if idsResult == nil || len(idsResult.IDs) == 0 { + c.logger.Debug().Uint64("height", height).Msg("No IDs found at height") + return coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusNotFound, + Message: coreda.ErrBlobNotFound.Error(), + Height: height, + Timestamp: time.Now(), + }, + } + } + // 2. Get Blobs using the retrieved IDs in batches + batchSize := 100 + blobs := make([][]byte, 0, len(idsResult.IDs)) + for i := 0; i < len(idsResult.IDs); i += batchSize { + end := min(i+batchSize, len(idsResult.IDs)) + + getBlobsCtx, cancel := context.WithTimeout(ctx, c.defaultTimeout) + batchBlobs, err := c.da.Get(getBlobsCtx, idsResult.IDs[i:end], namespace) + cancel() + if err != nil { + // Handle errors during Get + c.logger.Error().Uint64("height", height).Int("num_ids", len(idsResult.IDs)).Err(err).Msg("Failed to get blobs") + return coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusError, + Message: fmt.Sprintf("failed to get blobs for batch %d-%d: %s", i, end-1, err.Error()), + Height: height, + Timestamp: time.Now(), + }, + } + } + blobs = append(blobs, batchBlobs...) + } + // Success + c.logger.Debug().Uint64("height", height).Int("num_blobs", len(blobs)).Msg("Successfully retrieved blobs") + return coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusSuccess, + Height: height, + IDs: idsResult.IDs, + Timestamp: idsResult.Timestamp, + }, + Data: blobs, + } +} + +// RetrieveHeaders retrieves blobs from the header namespace at the specified height. +func (c *client) RetrieveHeaders(ctx context.Context, height uint64) coreda.ResultRetrieve { + return c.Retrieve(ctx, height, c.namespaceBz) +} + +// RetrieveData retrieves blobs from the data namespace at the specified height. +func (c *client) RetrieveData(ctx context.Context, height uint64) coreda.ResultRetrieve { + return c.Retrieve(ctx, height, c.namespaceDataBz) +} + +// RetrieveForcedInclusion retrieves blobs from the forced inclusion namespace at the specified height. +func (c *client) RetrieveForcedInclusion(ctx context.Context, height uint64) coreda.ResultRetrieve { + if !c.hasForcedInclusionNs { + return coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusError, + Message: "forced inclusion namespace not configured", + }, + } + } + return c.Retrieve(ctx, height, c.namespaceForcedInclusionBz) +} + +// GetHeaderNamespace returns the header namespace bytes. +func (c *client) GetHeaderNamespace() []byte { + return c.namespaceBz +} + +// GetDataNamespace returns the data namespace bytes. +func (c *client) GetDataNamespace() []byte { + return c.namespaceDataBz +} + +// GetForcedInclusionNamespace returns the forced inclusion namespace bytes. +func (c *client) GetForcedInclusionNamespace() []byte { + return c.namespaceForcedInclusionBz +} + +// HasForcedInclusionNamespace returns whether forced inclusion namespace is configured. +func (c *client) HasForcedInclusionNamespace() bool { + return c.hasForcedInclusionNs +} + +// GetDA returns the underlying DA interface for advanced usage. +func (c *client) GetDA() coreda.DA { + return c.da +} diff --git a/block/internal/da/client_test.go b/block/internal/da/client_test.go new file mode 100644 index 0000000000..7bc7e972a6 --- /dev/null +++ b/block/internal/da/client_test.go @@ -0,0 +1,525 @@ +package da + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/rs/zerolog" + "gotest.tools/v3/assert" + + coreda "github.com/evstack/ev-node/core/da" +) + +// mockDA is a simple mock implementation of coreda.DA for testing +type mockDA struct { + submitFunc func(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte) ([]coreda.ID, error) + submitWithOptions func(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte, options []byte) ([]coreda.ID, error) + getIDsFunc func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) + getFunc func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) +} + +func (m *mockDA) Submit(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte) ([]coreda.ID, error) { + if m.submitFunc != nil { + return m.submitFunc(ctx, blobs, gasPrice, namespace) + } + return nil, nil +} + +func (m *mockDA) SubmitWithOptions(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte, options []byte) ([]coreda.ID, error) { + if m.submitWithOptions != nil { + return m.submitWithOptions(ctx, blobs, gasPrice, namespace, options) + } + return nil, nil +} + +func (m *mockDA) GetIDs(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + if m.getIDsFunc != nil { + return m.getIDsFunc(ctx, height, namespace) + } + return nil, errors.New("not implemented") +} + +func (m *mockDA) Get(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { + if m.getFunc != nil { + return m.getFunc(ctx, ids, namespace) + } + return nil, errors.New("not implemented") +} + +func (m *mockDA) GetProofs(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Proof, error) { + return nil, errors.New("not implemented") +} + +func (m *mockDA) Commit(ctx context.Context, blobs []coreda.Blob, namespace []byte) ([]coreda.Commitment, error) { + return nil, errors.New("not implemented") +} + +func (m *mockDA) Validate(ctx context.Context, ids []coreda.ID, proofs []coreda.Proof, namespace []byte) ([]bool, error) { + return nil, errors.New("not implemented") +} + +func TestNewClient(t *testing.T) { + tests := []struct { + name string + cfg Config + }{ + { + name: "with all namespaces", + cfg: Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + DefaultTimeout: 5 * time.Second, + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }, + }, + { + name: "without forced inclusion namespace", + cfg: Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + DefaultTimeout: 5 * time.Second, + Namespace: "test-ns", + DataNamespace: "test-data-ns", + }, + }, + { + name: "with default timeout", + cfg: Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := NewClient(tt.cfg) + assert.Assert(t, client != nil) + assert.Assert(t, client.da != nil) + assert.Assert(t, len(client.namespaceBz) > 0) + assert.Assert(t, len(client.namespaceDataBz) > 0) + + if tt.cfg.ForcedInclusionNamespace != "" { + assert.Assert(t, client.hasForcedInclusionNs) + assert.Assert(t, len(client.namespaceForcedInclusionBz) > 0) + } else { + assert.Assert(t, !client.hasForcedInclusionNs) + } + + expectedTimeout := tt.cfg.DefaultTimeout + if expectedTimeout == 0 { + expectedTimeout = 30 * time.Second + } + assert.Equal(t, client.defaultTimeout, expectedTimeout) + }) + } +} + +func TestClient_HasForcedInclusionNamespace(t *testing.T) { + tests := []struct { + name string + cfg Config + expected bool + }{ + { + name: "with forced inclusion namespace", + cfg: Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }, + expected: true, + }, + { + name: "without forced inclusion namespace", + cfg: Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := NewClient(tt.cfg) + assert.Equal(t, client.HasForcedInclusionNamespace(), tt.expected) + }) + } +} + +func TestClient_GetNamespaces(t *testing.T) { + cfg := Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-header", + DataNamespace: "test-data", + ForcedInclusionNamespace: "test-fi", + } + + client := NewClient(cfg) + + headerNs := client.GetHeaderNamespace() + assert.Assert(t, len(headerNs) > 0) + + dataNs := client.GetDataNamespace() + assert.Assert(t, len(dataNs) > 0) + + fiNs := client.GetForcedInclusionNamespace() + assert.Assert(t, len(fiNs) > 0) + + // Namespaces should be different + assert.Assert(t, string(headerNs) != string(dataNs)) + assert.Assert(t, string(headerNs) != string(fiNs)) + assert.Assert(t, string(dataNs) != string(fiNs)) +} + +func TestClient_RetrieveForcedInclusion_NotConfigured(t *testing.T) { + cfg := Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + } + + client := NewClient(cfg) + ctx := context.Background() + + result := client.RetrieveForcedInclusion(ctx, 100) + assert.Equal(t, result.Code, coreda.StatusError) + assert.Assert(t, result.Message != "") +} + +func TestClient_GetDA(t *testing.T) { + mockDAInstance := &mockDA{} + cfg := Config{ + DA: mockDAInstance, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + } + + client := NewClient(cfg) + da := client.GetDA() + assert.Equal(t, da, mockDAInstance) +} + +func TestClient_Submit(t *testing.T) { + logger := zerolog.Nop() + + testCases := []struct { + name string + data [][]byte + gasPrice float64 + options []byte + submitErr error + submitIDs [][]byte + expectedCode coreda.StatusCode + expectedErrMsg string + expectedIDs [][]byte + expectedCount uint64 + }{ + { + name: "successful submission", + data: [][]byte{[]byte("blob1"), []byte("blob2")}, + gasPrice: 1.0, + options: []byte("opts"), + submitIDs: [][]byte{[]byte("id1"), []byte("id2")}, + expectedCode: coreda.StatusSuccess, + expectedIDs: [][]byte{[]byte("id1"), []byte("id2")}, + expectedCount: 2, + }, + { + name: "context canceled error", + data: [][]byte{[]byte("blob1")}, + gasPrice: 1.0, + options: []byte("opts"), + submitErr: context.Canceled, + expectedCode: coreda.StatusContextCanceled, + expectedErrMsg: "submission canceled", + }, + { + name: "tx timed out error", + data: [][]byte{[]byte("blob1")}, + gasPrice: 1.0, + options: []byte("opts"), + submitErr: coreda.ErrTxTimedOut, + expectedCode: coreda.StatusNotIncludedInBlock, + expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxTimedOut.Error(), + }, + { + name: "tx already in mempool error", + data: [][]byte{[]byte("blob1")}, + gasPrice: 1.0, + options: []byte("opts"), + submitErr: coreda.ErrTxAlreadyInMempool, + expectedCode: coreda.StatusAlreadyInMempool, + expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxAlreadyInMempool.Error(), + }, + { + name: "incorrect account sequence error", + data: [][]byte{[]byte("blob1")}, + gasPrice: 1.0, + options: []byte("opts"), + submitErr: coreda.ErrTxIncorrectAccountSequence, + expectedCode: coreda.StatusIncorrectAccountSequence, + expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxIncorrectAccountSequence.Error(), + }, + { + name: "blob size over limit error", + data: [][]byte{[]byte("blob1")}, + gasPrice: 1.0, + options: []byte("opts"), + submitErr: coreda.ErrBlobSizeOverLimit, + expectedCode: coreda.StatusTooBig, + expectedErrMsg: "failed to submit blobs: " + coreda.ErrBlobSizeOverLimit.Error(), + }, + { + name: "context deadline error", + data: [][]byte{[]byte("blob1")}, + gasPrice: 1.0, + options: []byte("opts"), + submitErr: coreda.ErrContextDeadline, + expectedCode: coreda.StatusContextDeadline, + expectedErrMsg: "failed to submit blobs: " + coreda.ErrContextDeadline.Error(), + }, + { + name: "generic submission error", + data: [][]byte{[]byte("blob1")}, + gasPrice: 1.0, + options: []byte("opts"), + submitErr: errors.New("some generic error"), + expectedCode: coreda.StatusError, + expectedErrMsg: "failed to submit blobs: some generic error", + }, + { + name: "no IDs returned for non-empty data", + data: [][]byte{[]byte("blob1")}, + gasPrice: 1.0, + options: []byte("opts"), + submitIDs: [][]byte{}, + expectedCode: coreda.StatusError, + expectedErrMsg: "failed to submit blobs: no IDs returned despite non-empty input", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockDAInstance := &mockDA{ + submitWithOptions: func(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte, options []byte) ([]coreda.ID, error) { + return tc.submitIDs, tc.submitErr + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: logger, + Namespace: "test-namespace", + DataNamespace: "test-data-namespace", + }) + + encodedNamespace := coreda.NamespaceFromString("test-namespace") + result := client.Submit(context.Background(), tc.data, tc.gasPrice, encodedNamespace.Bytes(), tc.options) + + assert.Equal(t, tc.expectedCode, result.Code) + if tc.expectedErrMsg != "" { + assert.Assert(t, result.Message != "") + } + if tc.expectedIDs != nil { + assert.Equal(t, len(tc.expectedIDs), len(result.IDs)) + } + if tc.expectedCount != 0 { + assert.Equal(t, tc.expectedCount, result.SubmittedCount) + } + }) + } +} + +func TestClient_Retrieve(t *testing.T) { + logger := zerolog.Nop() + dataLayerHeight := uint64(100) + mockIDs := [][]byte{[]byte("id1"), []byte("id2")} + mockBlobs := [][]byte{[]byte("blobA"), []byte("blobB")} + mockTimestamp := time.Now() + + testCases := []struct { + name string + getIDsResult *coreda.GetIDsResult + getIDsErr error + getBlobsErr error + expectedCode coreda.StatusCode + expectedErrMsg string + expectedIDs [][]byte + expectedData [][]byte + expectedHeight uint64 + }{ + { + name: "successful retrieval", + getIDsResult: &coreda.GetIDsResult{ + IDs: mockIDs, + Timestamp: mockTimestamp, + }, + expectedCode: coreda.StatusSuccess, + expectedIDs: mockIDs, + expectedData: mockBlobs, + expectedHeight: dataLayerHeight, + }, + { + name: "blob not found error during GetIDs", + getIDsErr: coreda.ErrBlobNotFound, + expectedCode: coreda.StatusNotFound, + expectedErrMsg: coreda.ErrBlobNotFound.Error(), + expectedHeight: dataLayerHeight, + }, + { + name: "height from future error during GetIDs", + getIDsErr: coreda.ErrHeightFromFuture, + expectedCode: coreda.StatusHeightFromFuture, + expectedErrMsg: coreda.ErrHeightFromFuture.Error(), + expectedHeight: dataLayerHeight, + }, + { + name: "generic error during GetIDs", + getIDsErr: errors.New("failed to connect to DA"), + expectedCode: coreda.StatusError, + expectedErrMsg: "failed to get IDs: failed to connect to DA", + expectedHeight: dataLayerHeight, + }, + { + name: "GetIDs returns nil result", + getIDsResult: nil, + expectedCode: coreda.StatusNotFound, + expectedErrMsg: coreda.ErrBlobNotFound.Error(), + expectedHeight: dataLayerHeight, + }, + { + name: "GetIDs returns empty IDs", + getIDsResult: &coreda.GetIDsResult{ + IDs: [][]byte{}, + Timestamp: mockTimestamp, + }, + expectedCode: coreda.StatusNotFound, + expectedErrMsg: coreda.ErrBlobNotFound.Error(), + expectedHeight: dataLayerHeight, + }, + { + name: "error during Get (blobs retrieval)", + getIDsResult: &coreda.GetIDsResult{ + IDs: mockIDs, + Timestamp: mockTimestamp, + }, + getBlobsErr: errors.New("network error during blob retrieval"), + expectedCode: coreda.StatusError, + expectedErrMsg: "failed to get blobs for batch 0-1: network error during blob retrieval", + expectedHeight: dataLayerHeight, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + return tc.getIDsResult, tc.getIDsErr + }, + getFunc: func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { + if tc.getBlobsErr != nil { + return nil, tc.getBlobsErr + } + return mockBlobs, nil + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: logger, + Namespace: "test-namespace", + DataNamespace: "test-data-namespace", + DefaultTimeout: 5 * time.Second, + }) + + encodedNamespace := coreda.NamespaceFromString("test-namespace") + result := client.Retrieve(context.Background(), dataLayerHeight, encodedNamespace.Bytes()) + + assert.Equal(t, tc.expectedCode, result.Code) + assert.Equal(t, tc.expectedHeight, result.Height) + if tc.expectedErrMsg != "" { + assert.Assert(t, result.Message != "") + } + if tc.expectedIDs != nil { + assert.Equal(t, len(tc.expectedIDs), len(result.IDs)) + } + if tc.expectedData != nil { + assert.Equal(t, len(tc.expectedData), len(result.Data)) + } + }) + } +} + +func TestClient_Retrieve_Timeout(t *testing.T) { + logger := zerolog.Nop() + dataLayerHeight := uint64(100) + encodedNamespace := coreda.NamespaceFromString("test-namespace") + + t.Run("timeout during GetIDs", func(t *testing.T) { + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + <-ctx.Done() // Wait for context cancellation + return nil, context.DeadlineExceeded + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: logger, + Namespace: "test-namespace", + DataNamespace: "test-data-namespace", + DefaultTimeout: 1 * time.Millisecond, + }) + + result := client.Retrieve(context.Background(), dataLayerHeight, encodedNamespace.Bytes()) + + assert.Equal(t, coreda.StatusError, result.Code) + assert.Assert(t, result.Message != "") + }) + + t.Run("timeout during Get", func(t *testing.T) { + mockIDs := [][]byte{[]byte("id1")} + mockTimestamp := time.Now() + + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + return &coreda.GetIDsResult{ + IDs: mockIDs, + Timestamp: mockTimestamp, + }, nil + }, + getFunc: func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { + <-ctx.Done() // Wait for context cancellation + return nil, context.DeadlineExceeded + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: logger, + Namespace: "test-namespace", + DataNamespace: "test-data-namespace", + DefaultTimeout: 1 * time.Millisecond, + }) + + result := client.Retrieve(context.Background(), dataLayerHeight, encodedNamespace.Bytes()) + + assert.Equal(t, coreda.StatusError, result.Code) + assert.Assert(t, result.Message != "") + }) +} diff --git a/block/internal/da/forced_inclusion_retriever.go b/block/internal/da/forced_inclusion_retriever.go new file mode 100644 index 0000000000..8b0375634c --- /dev/null +++ b/block/internal/da/forced_inclusion_retriever.go @@ -0,0 +1,455 @@ +package da + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/rs/zerolog" + + coreda "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/types" +) + +const ( + // defaultEpochLag is the default number of blocks to lag behind DA height when fetching forced inclusion txs + defaultEpochLag = 10 + + // defaultMinEpochWindow is the minimum window size for epoch lag calculation + defaultMinEpochWindow = 5 + + // defaultMaxEpochWindow is the maximum window size for epoch lag calculation + defaultMaxEpochWindow = 100 + + // defaultFetchInterval is the interval between async fetch attempts + defaultFetchInterval = 2 * time.Second +) + +// ErrForceInclusionNotConfigured is returned when the forced inclusion namespace is not configured. +var ErrForceInclusionNotConfigured = errors.New("forced inclusion namespace not configured") + +// epochCache stores fetched forced inclusion events by epoch start height +type epochCache struct { + events atomic.Pointer[map[uint64]*ForcedInclusionEvent] + fetchTimes atomic.Pointer[[]time.Duration] + maxSamples int +} + +func newEpochCache(maxSamples int) *epochCache { + c := &epochCache{ + maxSamples: maxSamples, + } + initialEvents := make(map[uint64]*ForcedInclusionEvent) + c.events.Store(&initialEvents) + initialTimes := make([]time.Duration, 0, maxSamples) + c.fetchTimes.Store(&initialTimes) + return c +} + +func (c *epochCache) get(epochStart uint64) (*ForcedInclusionEvent, bool) { + events := c.events.Load() + event, ok := (*events)[epochStart] + return event, ok +} + +func (c *epochCache) set(epochStart uint64, event *ForcedInclusionEvent) { + for { + oldEventsPtr := c.events.Load() + oldEvents := *oldEventsPtr + newEvents := make(map[uint64]*ForcedInclusionEvent, len(oldEvents)+1) + for k, v := range oldEvents { + newEvents[k] = v + } + newEvents[epochStart] = event + if c.events.CompareAndSwap(oldEventsPtr, &newEvents) { + return + } + } +} + +func (c *epochCache) recordFetchTime(duration time.Duration) { + for { + oldTimesPtr := c.fetchTimes.Load() + oldTimes := *oldTimesPtr + newTimes := make([]time.Duration, 0, c.maxSamples) + newTimes = append(newTimes, oldTimes...) + newTimes = append(newTimes, duration) + if len(newTimes) > c.maxSamples { + newTimes = newTimes[1:] + } + if c.fetchTimes.CompareAndSwap(oldTimesPtr, &newTimes) { + return + } + } +} + +func (c *epochCache) averageFetchTime() time.Duration { + timesPtr := c.fetchTimes.Load() + times := *timesPtr + if len(times) == 0 { + return 0 + } + var sum time.Duration + for _, d := range times { + sum += d + } + return sum / time.Duration(len(times)) +} + +func (c *epochCache) cleanup(beforeEpoch uint64) { + for { + oldEventsPtr := c.events.Load() + oldEvents := *oldEventsPtr + newEvents := make(map[uint64]*ForcedInclusionEvent) + for epoch, event := range oldEvents { + if epoch >= beforeEpoch { + newEvents[epoch] = event + } + } + if c.events.CompareAndSwap(oldEventsPtr, &newEvents) { + return + } + } +} + +// ForcedInclusionRetriever handles retrieval of forced inclusion transactions from DA. +type ForcedInclusionRetriever struct { + client Client + genesis genesis.Genesis + logger zerolog.Logger + daEpochSize uint64 + + // Async forced inclusion fetching + epochCache *epochCache + fetcherCtx context.Context + fetcherCancel context.CancelFunc + fetcherWg sync.WaitGroup + currentDAHeight atomic.Uint64 +} + +// ForcedInclusionEvent contains forced inclusion transactions retrieved from DA. +type ForcedInclusionEvent struct { + StartDaHeight uint64 + EndDaHeight uint64 + Txs [][]byte +} + +// NewForcedInclusionRetriever creates a new forced inclusion retriever. +func NewForcedInclusionRetriever( + client Client, + genesis genesis.Genesis, + logger zerolog.Logger, +) *ForcedInclusionRetriever { + ctx, cancel := context.WithCancel(context.Background()) + + r := &ForcedInclusionRetriever{ + client: client, + genesis: genesis, + logger: logger.With().Str("component", "forced_inclusion_retriever").Logger(), + daEpochSize: genesis.DAEpochForcedInclusion, + epochCache: newEpochCache(10), // Keep last 10 fetch times for averaging + fetcherCtx: ctx, + fetcherCancel: cancel, + } + r.currentDAHeight.Store(genesis.DAStartHeight) + + // Start background fetcher if forced inclusion is configured + if client.HasForcedInclusionNamespace() { + r.fetcherWg.Add(1) + go r.backgroundFetcher() + } + + return r +} + +// StopBackgroundFetcher stops the background fetcher goroutine +func (r *ForcedInclusionRetriever) StopBackgroundFetcher() { + if r.fetcherCancel != nil { + r.fetcherCancel() + } + r.fetcherWg.Wait() +} + +// SetDAHeight updates the current DA height for async fetching +func (r *ForcedInclusionRetriever) SetDAHeight(height uint64) { + for { + current := r.currentDAHeight.Load() + if height <= current { + return + } + if r.currentDAHeight.CompareAndSwap(current, height) { + return + } + } +} + +// GetDAHeight returns the current DA height +func (r *ForcedInclusionRetriever) GetDAHeight() uint64 { + return r.currentDAHeight.Load() +} + +// calculateAdaptiveEpochWindow calculates the epoch lag window based on average fetch time +func (r *ForcedInclusionRetriever) calculateAdaptiveEpochWindow() uint64 { + avgFetchTime := r.epochCache.averageFetchTime() + if avgFetchTime == 0 { + return defaultEpochLag + } + + // Scale window based on fetch time: faster fetches = smaller window + // If fetch takes 1 second, window = 5 + // If fetch takes 5 seconds, window = 25 + // If fetch takes 10 seconds, window = 50 + window := uint64(avgFetchTime.Seconds() * 5) + + if window < defaultMinEpochWindow { + window = defaultMinEpochWindow + } + if window > defaultMaxEpochWindow { + window = defaultMaxEpochWindow + } + + return window +} + +// backgroundFetcher continuously fetches forced inclusion transactions ahead of time +func (r *ForcedInclusionRetriever) backgroundFetcher() { + defer r.fetcherWg.Done() + + ticker := time.NewTicker(defaultFetchInterval) + defer ticker.Stop() + + r.logger.Info().Msg("started background forced inclusion fetcher") + + for { + select { + case <-r.fetcherCtx.Done(): + r.logger.Info().Msg("stopped background forced inclusion fetcher") + return + case <-ticker.C: + r.fetchNextEpoch() + } + } +} + +// fetchNextEpoch fetches the next epoch that should be available based on current DA height and lag +func (r *ForcedInclusionRetriever) fetchNextEpoch() { + currentHeight := r.GetDAHeight() + if currentHeight == 0 { + return + } + + window := r.calculateAdaptiveEpochWindow() + + // Calculate which epoch the sequencer will need soon (lagging behind current height) + // We want to prefetch this epoch before it's actually requested + laggedHeight := currentHeight + if currentHeight > window { + laggedHeight = currentHeight - window + } + + epochStart, _ := types.CalculateEpochBoundaries(laggedHeight, r.genesis.DAStartHeight, r.daEpochSize) + + // Check if we already have this epoch cached + if _, ok := r.epochCache.get(epochStart); ok { + return + } + + // Fetch this epoch in the background + r.logger.Debug(). + Uint64("current_height", currentHeight). + Uint64("lagged_height", laggedHeight). + Uint64("epoch_start", epochStart). + Uint64("window", window). + Msg("fetching epoch in background") + + startTime := time.Now() + ctx, cancel := context.WithTimeout(r.fetcherCtx, 30*time.Second) + defer cancel() + + event, err := r.fetchEpochSync(ctx, epochStart) + if err != nil { + r.logger.Debug().Err(err).Uint64("epoch_start", epochStart).Msg("failed to fetch epoch in background") + return + } + + // Record fetch time for adaptive window + fetchDuration := time.Since(startTime) + r.epochCache.recordFetchTime(fetchDuration) + + // Cache the event + r.epochCache.set(epochStart, event) + + r.logger.Debug(). + Uint64("epoch_start", epochStart). + Int("tx_count", len(event.Txs)). + Dur("fetch_duration", fetchDuration). + Msg("cached epoch in background") + + // Cleanup old epochs (keep last 5 epochs) + if epochStart >= r.genesis.DAStartHeight+r.daEpochSize*5 { + cleanupBefore := epochStart - r.daEpochSize*5 + if cleanupBefore < r.genesis.DAStartHeight { + cleanupBefore = r.genesis.DAStartHeight + } + r.epochCache.cleanup(cleanupBefore) + } +} + +// RetrieveForcedIncludedTxs retrieves forced inclusion transactions at the given DA height. +// It respects epoch boundaries and only fetches at epoch start. +// Uses cached results from background fetcher when available. +func (r *ForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*ForcedInclusionEvent, error) { + if !r.client.HasForcedInclusionNamespace() { + return nil, ErrForceInclusionNotConfigured + } + + // Update our tracking of DA height + r.SetDAHeight(daHeight) + + epochStart, _ := types.CalculateEpochBoundaries(daHeight, r.genesis.DAStartHeight, r.daEpochSize) + + if daHeight != epochStart { + r.logger.Debug(). + Uint64("da_height", daHeight). + Uint64("epoch_start", epochStart). + Msg("not at epoch start - returning empty transactions") + + return &ForcedInclusionEvent{ + StartDaHeight: daHeight, + EndDaHeight: daHeight, + Txs: [][]byte{}, + }, nil + } + + // Check if we have this epoch cached from background fetcher + if cachedEvent, ok := r.epochCache.get(epochStart); ok { + r.logger.Debug(). + Uint64("epoch_start", epochStart). + Int("tx_count", len(cachedEvent.Txs)). + Msg("using cached forced inclusion transactions") + return cachedEvent, nil + } + + // Not cached, fetch synchronously + r.logger.Debug(). + Uint64("da_height", daHeight). + Uint64("epoch_start", epochStart). + Msg("cache miss, fetching forced inclusion transactions synchronously") + + return r.fetchEpochSync(ctx, epochStart) +} + +// fetchEpochSync synchronously fetches an entire epoch's forced inclusion transactions +func (r *ForcedInclusionRetriever) fetchEpochSync(ctx context.Context, epochStart uint64) (*ForcedInclusionEvent, error) { + epochEnd := epochStart + r.daEpochSize - 1 + currentEpochNumber := types.CalculateEpochNumber(epochStart, r.genesis.DAStartHeight, r.daEpochSize) + + event := &ForcedInclusionEvent{ + StartDaHeight: epochStart, + Txs: [][]byte{}, + } + + r.logger.Debug(). + Uint64("epoch_start", epochStart). + Uint64("epoch_end", epochEnd). + Uint64("epoch_num", currentEpochNumber). + Msg("fetching forced included transactions from DA") + + epochStartResult := r.client.RetrieveForcedInclusion(ctx, epochStart) + if epochStartResult.Code == coreda.StatusHeightFromFuture { + r.logger.Debug(). + Uint64("epoch_start", epochStart). + Msg("epoch start height not yet available on DA - backoff required") + return nil, fmt.Errorf("%w: epoch start height %d not yet available", coreda.ErrHeightFromFuture, epochStart) + } + + epochEndResult := epochStartResult + if epochStart != epochEnd { + epochEndResult = r.client.RetrieveForcedInclusion(ctx, epochEnd) + if epochEndResult.Code == coreda.StatusHeightFromFuture { + r.logger.Debug(). + Uint64("epoch_end", epochEnd). + Msg("epoch end height not yet available on DA - backoff required") + return nil, fmt.Errorf("%w: epoch end height %d not yet available", coreda.ErrHeightFromFuture, epochEnd) + } + } + + lastProcessedHeight := epochStart + + if err := r.processForcedInclusionBlobs(event, &lastProcessedHeight, epochStartResult, epochStart); err != nil { + return nil, err + } + + // Process heights between start and end (exclusive) + for epochHeight := epochStart + 1; epochHeight < epochEnd; epochHeight++ { + result := r.client.RetrieveForcedInclusion(ctx, epochHeight) + + // If any intermediate height is from future, break early + if result.Code == coreda.StatusHeightFromFuture { + r.logger.Debug(). + Uint64("epoch_height", epochHeight). + Uint64("last_processed", lastProcessedHeight). + Msg("reached future DA height within epoch - stopping") + break + } + + if err := r.processForcedInclusionBlobs(event, &lastProcessedHeight, result, epochHeight); err != nil { + return nil, err + } + } + + // Process epoch end (only if different from start) + if epochEnd != epochStart { + if err := r.processForcedInclusionBlobs(event, &lastProcessedHeight, epochEndResult, epochEnd); err != nil { + return nil, err + } + } + + event.EndDaHeight = lastProcessedHeight + + r.logger.Info(). + Uint64("epoch_start", epochStart). + Uint64("epoch_end", lastProcessedHeight). + Int("tx_count", len(event.Txs)). + Msg("retrieved forced inclusion transactions") + + return event, nil +} + +// processForcedInclusionBlobs processes blobs from a single DA height for forced inclusion. +func (r *ForcedInclusionRetriever) processForcedInclusionBlobs( + event *ForcedInclusionEvent, + lastProcessedHeight *uint64, + result coreda.ResultRetrieve, + height uint64, +) error { + if result.Code == coreda.StatusNotFound { + r.logger.Debug().Uint64("height", height).Msg("no forced inclusion blobs at height") + *lastProcessedHeight = height + return nil + } + + if result.Code != coreda.StatusSuccess { + return fmt.Errorf("failed to retrieve forced inclusion blobs at height %d: %s", height, result.Message) + } + + // Process each blob as a transaction + for _, blob := range result.Data { + if len(blob) > 0 { + event.Txs = append(event.Txs, blob) + } + } + + *lastProcessedHeight = height + + r.logger.Debug(). + Uint64("height", height). + Int("blob_count", len(result.Data)). + Msg("processed forced inclusion blobs") + + return nil +} diff --git a/block/internal/da/forced_inclusion_retriever_test.go b/block/internal/da/forced_inclusion_retriever_test.go new file mode 100644 index 0000000000..0897bc23fd --- /dev/null +++ b/block/internal/da/forced_inclusion_retriever_test.go @@ -0,0 +1,352 @@ +package da + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/rs/zerolog" + "gotest.tools/v3/assert" + + coreda "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/pkg/genesis" +) + +func TestNewForcedInclusionRetriever(t *testing.T) { + client := NewClient(Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + t.Cleanup(func() { retriever.StopBackgroundFetcher() }) + assert.Assert(t, retriever != nil) + assert.Equal(t, retriever.daEpochSize, uint64(10)) +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NoNamespace(t *testing.T) { + client := NewClient(Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + // No forced inclusion namespace + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + t.Cleanup(func() { retriever.StopBackgroundFetcher() }) + ctx := context.Background() + + _, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.Assert(t, err != nil) + assert.ErrorContains(t, err, "not configured") +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NotAtEpochStart(t *testing.T) { + client := NewClient(Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + t.Cleanup(func() { retriever.StopBackgroundFetcher() }) + ctx := context.Background() + + // Height 105 is not an epoch start (100, 110, 120, etc. are epoch starts) + event, err := retriever.RetrieveForcedIncludedTxs(ctx, 105) + assert.NilError(t, err) + assert.Assert(t, event != nil) + assert.Equal(t, event.StartDaHeight, uint64(105)) + assert.Equal(t, event.EndDaHeight, uint64(105)) + assert.Equal(t, len(event.Txs), 0) +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EpochStartSuccess(t *testing.T) { + testBlobs := [][]byte{ + []byte("tx1"), + []byte("tx2"), + []byte("tx3"), + } + + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + return &coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1"), []byte("id2"), []byte("id3")}, + Timestamp: time.Now(), + }, nil + }, + getFunc: func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { + return testBlobs, nil + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 1, // Single height epoch + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + t.Cleanup(func() { retriever.StopBackgroundFetcher() }) + ctx := context.Background() + + // Height 100 is an epoch start + event, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.NilError(t, err) + assert.Assert(t, event != nil) + assert.Equal(t, event.StartDaHeight, uint64(100)) + assert.Equal(t, event.EndDaHeight, uint64(100)) + assert.Equal(t, len(event.Txs), len(testBlobs)) + assert.DeepEqual(t, event.Txs[0], testBlobs[0]) +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_EpochStartNotAvailable(t *testing.T) { + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + return nil, coreda.ErrHeightFromFuture + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + t.Cleanup(func() { retriever.StopBackgroundFetcher() }) + ctx := context.Background() + + _, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.Assert(t, err != nil) + assert.ErrorContains(t, err, "not yet available") +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_NoBlobsAtHeight(t *testing.T) { + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + return nil, coreda.ErrBlobNotFound + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 1, // Single height epoch + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + t.Cleanup(func() { retriever.StopBackgroundFetcher() }) + ctx := context.Background() + + event, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.NilError(t, err) + assert.Assert(t, event != nil) + assert.Equal(t, len(event.Txs), 0) +} + +func TestForcedInclusionRetriever_RetrieveForcedIncludedTxs_MultiHeightEpoch(t *testing.T) { + callCount := 0 + testBlobsByHeight := map[uint64][][]byte{ + 100: {[]byte("tx1"), []byte("tx2")}, + 101: {[]byte("tx3")}, + 102: {[]byte("tx4"), []byte("tx5"), []byte("tx6")}, + } + + mockDAInstance := &mockDA{ + getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + callCount++ + blobs, exists := testBlobsByHeight[height] + if !exists { + return nil, coreda.ErrBlobNotFound + } + ids := make([]coreda.ID, len(blobs)) + for i := range blobs { + ids[i] = []byte("id") + } + return &coreda.GetIDsResult{ + IDs: ids, + Timestamp: time.Now(), + }, nil + }, + getFunc: func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { + // Return blobs based on current call count + switch callCount { + case 1: + return testBlobsByHeight[100], nil + case 2: + return testBlobsByHeight[101], nil + case 3: + return testBlobsByHeight[102], nil + default: + return nil, errors.New("unexpected call") + } + }, + } + + client := NewClient(Config{ + DA: mockDAInstance, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 3, // Epoch: 100-102 + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + t.Cleanup(func() { retriever.StopBackgroundFetcher() }) + ctx := context.Background() + + event, err := retriever.RetrieveForcedIncludedTxs(ctx, 100) + assert.NilError(t, err) + assert.Assert(t, event != nil) + assert.Equal(t, event.StartDaHeight, uint64(100)) + assert.Equal(t, event.EndDaHeight, uint64(102)) + + // Should have collected all txs from all heights + expectedTxCount := len(testBlobsByHeight[100]) + len(testBlobsByHeight[101]) + len(testBlobsByHeight[102]) + assert.Equal(t, len(event.Txs), expectedTxCount) +} + +func TestForcedInclusionRetriever_processForcedInclusionBlobs(t *testing.T) { + client := NewClient(Config{ + DA: &mockDA{}, + Logger: zerolog.Nop(), + Namespace: "test-ns", + DataNamespace: "test-data-ns", + ForcedInclusionNamespace: "test-fi-ns", + }) + + gen := genesis.Genesis{ + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + retriever := NewForcedInclusionRetriever(client, gen, zerolog.Nop()) + t.Cleanup(func() { retriever.StopBackgroundFetcher() }) + + tests := []struct { + name string + result coreda.ResultRetrieve + height uint64 + expectedTxCount int + expectedLastHeight uint64 + expectError bool + }{ + { + name: "success with blobs", + result: coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusSuccess, + }, + Data: [][]byte{[]byte("tx1"), []byte("tx2")}, + }, + height: 100, + expectedTxCount: 2, + expectedLastHeight: 100, + expectError: false, + }, + { + name: "not found", + result: coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusNotFound, + }, + }, + height: 100, + expectedTxCount: 0, + expectedLastHeight: 100, + expectError: false, + }, + { + name: "error status", + result: coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusError, + Message: "test error", + }, + }, + height: 100, + expectError: true, + }, + { + name: "empty blobs are skipped", + result: coreda.ResultRetrieve{ + BaseResult: coreda.BaseResult{ + Code: coreda.StatusSuccess, + }, + Data: [][]byte{[]byte("tx1"), {}, []byte("tx2")}, + }, + height: 100, + expectedTxCount: 2, + expectedLastHeight: 100, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + event := &ForcedInclusionEvent{ + Txs: [][]byte{}, + } + lastHeight := uint64(0) + + err := retriever.processForcedInclusionBlobs(event, &lastHeight, tt.result, tt.height) + + if tt.expectError { + assert.Assert(t, err != nil) + } else { + assert.NilError(t, err) + assert.Equal(t, len(event.Txs), tt.expectedTxCount) + assert.Equal(t, lastHeight, tt.expectedLastHeight) + } + }) + } +} diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index 5578fe043e..856932ce24 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -10,6 +10,7 @@ import ( "time" "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p/core/crypto" "github.com/rs/zerolog" "golang.org/x/sync/errgroup" @@ -67,6 +68,8 @@ type Executor struct { // - State transitions and validation // - P2P broadcasting of produced blocks // - DA submission of headers and data +// +// When BasedSequencer is enabled, signer can be nil as blocks are not signed. func NewExecutor( store store.Store, exec coreexecutor.Executor, @@ -82,17 +85,20 @@ func NewExecutor( options common.BlockOptions, errorCh chan<- error, ) (*Executor, error) { - if signer == nil { - return nil, errors.New("signer cannot be nil") - } + // For based sequencer, signer is optional as blocks are not signed + if !config.Node.BasedSequencer { + if signer == nil { + return nil, errors.New("signer cannot be nil") + } - addr, err := signer.GetAddress() - if err != nil { - return nil, fmt.Errorf("failed to get address: %w", err) - } + addr, err := signer.GetAddress() + if err != nil { + return nil, fmt.Errorf("failed to get address: %w", err) + } - if !bytes.Equal(addr, genesis.ProposerAddress) { - return nil, common.ErrNotProposer + if !bytes.Equal(addr, genesis.ProposerAddress) { + return nil, common.ErrNotProposer + } } return &Executor{ @@ -204,6 +210,7 @@ func (e *Executor) initializeState() error { } e.setLastState(state) + e.sequencer.SetDAHeight(state.DAHeight) // Initialize store height using batch for atomicity batch, err := e.store.NewBatch(e.ctx) @@ -379,8 +386,12 @@ func (e *Executor) produceBlock() error { return fmt.Errorf("failed to apply block: %w", err) } + // set the DA height in the sequencer + newState.DAHeight = e.sequencer.GetDAHeight() + // signing the header is done after applying the block // as for signing, the state of the block may be required by the signature payload provider. + // For based sequencer, this will return an empty signature signature, err := e.signHeader(header.Header) if err != nil { return fmt.Errorf("failed to sign header: %w", err) @@ -440,8 +451,9 @@ func (e *Executor) produceBlock() error { // retrieveBatch gets the next batch of transactions from the sequencer func (e *Executor) retrieveBatch(ctx context.Context) (*BatchData, error) { req := coresequencer.GetNextBatchRequest{ - Id: []byte(e.genesis.ChainID), - MaxBytes: common.DefaultMaxBlobSize, + Id: []byte(e.genesis.ChainID), + MaxBytes: common.DefaultMaxBlobSize, + LastBatchData: [][]byte{}, // Can be populated if needed for sequencer context } res, err := e.sequencer.GetNextBatch(ctx, req) @@ -495,16 +507,28 @@ func (e *Executor) createBlock(ctx context.Context, height uint64, batchData *Ba lastSignature = *lastSignaturePtr } - // Get signer info - pubKey, err := e.signer.GetPublic() - if err != nil { - return nil, nil, fmt.Errorf("failed to get public key: %w", err) - } + // Get signer info and validator hash + var pubKey crypto.PubKey + var validatorHash types.Hash - // Get validator hash - validatorHash, err := e.options.ValidatorHasherProvider(e.genesis.ProposerAddress, pubKey) - if err != nil { - return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) + if e.signer != nil { + var err error + pubKey, err = e.signer.GetPublic() + if err != nil { + return nil, nil, fmt.Errorf("failed to get public key: %w", err) + } + + validatorHash, err = e.options.ValidatorHasherProvider(e.genesis.ProposerAddress, pubKey) + if err != nil { + return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) + } + } else { + // For based sequencer without signer, use nil pubkey and compute validator hash + var err error + validatorHash, err = e.options.ValidatorHasherProvider(e.genesis.ProposerAddress, nil) + if err != nil { + return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) + } } // Create header @@ -586,6 +610,11 @@ func (e *Executor) applyBlock(ctx context.Context, header types.Header, data *ty // signHeader signs the block header func (e *Executor) signHeader(header types.Header) (types.Signature, error) { + // For based sequencer, return empty signature as there is no signer + if e.signer == nil { + return types.Signature{}, nil + } + bz, err := e.options.AggregatorNodeSignatureBytesProvider(&header) if err != nil { return nil, fmt.Errorf("failed to get signature payload: %w", err) diff --git a/block/internal/executing/executor_lazy_test.go b/block/internal/executing/executor_lazy_test.go index b72f0a856b..a11cf6a1c2 100644 --- a/block/internal/executing/executor_lazy_test.go +++ b/block/internal/executing/executor_lazy_test.go @@ -73,6 +73,7 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { initStateRoot := []byte("init_root") mockExec.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec.initializeState()) // Set up context for the executor (normally done in Start method) @@ -91,6 +92,8 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root_1"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + // Direct call to produceBlock should work (this is what lazy timer does) err = exec.produceBlock() require.NoError(t, err) @@ -113,6 +116,8 @@ func TestLazyMode_ProduceBlockLogic(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(2), mock.AnythingOfType("time.Time"), []byte("new_root_1")). Return([]byte("new_root_2"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec.produceBlock() require.NoError(t, err) @@ -183,6 +188,7 @@ func TestRegularMode_ProduceBlockLogic(t *testing.T) { initStateRoot := []byte("init_root") mockExec.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec.initializeState()) // Set up context for the executor (normally done in Start method) @@ -201,6 +207,8 @@ func TestRegularMode_ProduceBlockLogic(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root_1"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec.produceBlock() require.NoError(t, err) diff --git a/block/internal/executing/executor_logic_test.go b/block/internal/executing/executor_logic_test.go index 9aa79d0c43..6029186e86 100644 --- a/block/internal/executing/executor_logic_test.go +++ b/block/internal/executing/executor_logic_test.go @@ -95,6 +95,7 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { initStateRoot := []byte("init_root") mockExec.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq.EXPECT().SetDAHeight(uint64(0)).Return().Once() // initialize state (creates genesis block in store and sets state) require.NoError(t, exec.initializeState()) @@ -113,6 +114,8 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + // produce one block err = exec.produceBlock() require.NoError(t, err) @@ -180,6 +183,7 @@ func TestPendingLimit_SkipsProduction(t *testing.T) { mockExec.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return([]byte("i0"), uint64(1024), nil).Once() + mockSeq.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec.initializeState()) // Set up context for the executor (normally done in Start method) @@ -196,6 +200,8 @@ func TestPendingLimit_SkipsProduction(t *testing.T) { mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), []byte("i0")). Return([]byte("i1"), uint64(1024), nil).Once() + mockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + require.NoError(t, exec.produceBlock()) h1, err := memStore.Height(context.Background()) require.NoError(t, err) diff --git a/block/internal/executing/executor_restart_test.go b/block/internal/executing/executor_restart_test.go index 3f0e8b500c..14daccddcc 100644 --- a/block/internal/executing/executor_restart_test.go +++ b/block/internal/executing/executor_restart_test.go @@ -73,6 +73,7 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { initStateRoot := []byte("init_root") mockExec1.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq1.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec1.initializeState()) // Set up context for first executor @@ -92,6 +93,8 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { mockExec1.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root_1"), uint64(1024), nil).Once() + mockSeq1.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec1.produceBlock() require.NoError(t, err) @@ -189,6 +192,7 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { require.NoError(t, err) // Initialize state for second executor (should load existing state) + mockSeq2.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec2.initializeState()) // Set up context for second executor @@ -206,7 +210,9 @@ func TestExecutor_RestartUsesPendingHeader(t *testing.T) { mockExec2.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(2), mock.AnythingOfType("time.Time"), currentState2.AppHash). Return([]byte("new_root_2"), uint64(1024), nil).Once() - // Note: mockSeq2 should NOT receive any calls because pending block should be used + mockSeq2.EXPECT().GetDAHeight().Return(uint64(0)).Once() + + // Note: mockSeq2 should NOT receive GetNextBatch calls because pending block should be used err = exec2.produceBlock() require.NoError(t, err) @@ -289,6 +295,7 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { initStateRoot := []byte("init_root") mockExec1.EXPECT().InitChain(mock.Anything, mock.AnythingOfType("time.Time"), gen.InitialHeight, gen.ChainID). Return(initStateRoot, uint64(1024), nil).Once() + mockSeq1.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec1.initializeState()) exec1.ctx, exec1.cancel = context.WithCancel(context.Background()) @@ -307,6 +314,8 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { mockExec1.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), initStateRoot). Return([]byte("new_root_1"), uint64(1024), nil).Once() + mockSeq1.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec1.produceBlock() require.NoError(t, err) @@ -338,6 +347,7 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { ) require.NoError(t, err) + mockSeq2.EXPECT().SetDAHeight(uint64(0)).Return().Once() require.NoError(t, exec2.initializeState()) exec2.ctx, exec2.cancel = context.WithCancel(context.Background()) defer exec2.cancel() @@ -360,6 +370,8 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { mockExec2.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(2), mock.AnythingOfType("time.Time"), []byte("new_root_1")). Return([]byte("new_root_2"), uint64(1024), nil).Once() + mockSeq2.EXPECT().GetDAHeight().Return(uint64(0)).Once() + err = exec2.produceBlock() require.NoError(t, err) diff --git a/block/internal/reaping/reaper.go b/block/internal/reaping/reaper.go index 64388b2ce0..62ff5dfba1 100644 --- a/block/internal/reaping/reaper.go +++ b/block/internal/reaping/reaper.go @@ -34,7 +34,7 @@ type Reaper struct { sequencer coresequencer.Sequencer chainID string interval time.Duration - cache cache.Manager + cache cache.CacheManager executor *executing.Executor // shared components @@ -53,7 +53,7 @@ func NewReaper( genesis genesis.Genesis, logger zerolog.Logger, executor *executing.Executor, - cache cache.Manager, + cache cache.CacheManager, scrapeInterval time.Duration, ) (*Reaper, error) { if executor == nil { diff --git a/block/internal/reaping/reaper_test.go b/block/internal/reaping/reaper_test.go index d9dc701276..fac03bdd5d 100644 --- a/block/internal/reaping/reaper_test.go +++ b/block/internal/reaping/reaper_test.go @@ -65,28 +65,21 @@ func newTestExecutor(t *testing.T) *executing.Executor { } // helper to create a cache manager for tests -func newTestCache(t *testing.T) cache.Manager { +func newTestCache(t *testing.T) cache.CacheManager { t.Helper() - // Create a mock store for the cache manager - storeMock := testmocks.NewMockStore(t) - storeMock.EXPECT().GetMetadata(mock.Anything, "last-submitted-header-height").Return(nil, ds.ErrNotFound).Maybe() - storeMock.EXPECT().GetMetadata(mock.Anything, "last-submitted-data-height").Return(nil, ds.ErrNotFound).Maybe() - storeMock.EXPECT().Height(mock.Anything).Return(uint64(0), nil).Maybe() - storeMock.EXPECT().SetMetadata(mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() - cfg := config.Config{ RootDir: t.TempDir(), ClearCache: true, } - cacheManager, err := cache.NewManager(cfg, storeMock, zerolog.Nop()) + cacheManager, err := cache.NewCacheManager(cfg, zerolog.Nop()) require.NoError(t, err) return cacheManager } // reaper with mocks and cache manager -func newTestReaper(t *testing.T, chainID string, execMock *testmocks.MockExecutor, seqMock *testmocks.MockSequencer, e *executing.Executor, cm cache.Manager) *Reaper { +func newTestReaper(t *testing.T, chainID string, execMock *testmocks.MockExecutor, seqMock *testmocks.MockSequencer, e *executing.Executor, cm cache.CacheManager) *Reaper { t.Helper() r, err := NewReaper(execMock, seqMock, genesis.Genesis{ChainID: chainID}, zerolog.Nop(), e, cm, 100*time.Millisecond) diff --git a/block/internal/submitting/da_submitter.go b/block/internal/submitting/da_submitter.go index 5a8fabc167..8cf741dcd9 100644 --- a/block/internal/submitting/da_submitter.go +++ b/block/internal/submitting/da_submitter.go @@ -12,6 +12,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" coreda "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/pkg/config" pkgda "github.com/evstack/ev-node/pkg/da" @@ -94,24 +95,20 @@ func clamp(v, min, max time.Duration) time.Duration { // DASubmitter handles DA submission operations type DASubmitter struct { - da coreda.DA + client da.Client config config.Config genesis genesis.Genesis options common.BlockOptions logger zerolog.Logger metrics *common.Metrics - // calculate namespaces bytes once and reuse them - namespaceBz []byte - namespaceDataBz []byte - // address selector for multi-account support addressSelector pkgda.AddressSelector } // NewDASubmitter creates a new DA submitter func NewDASubmitter( - da coreda.DA, + client da.Client, config config.Config, genesis genesis.Genesis, options common.BlockOptions, @@ -122,7 +119,7 @@ func NewDASubmitter( if config.RPC.EnableDAVisualization { visualizerLogger := logger.With().Str("component", "da_visualization").Logger() - server.SetDAVisualizationServer(server.NewDAVisualizationServer(da, visualizerLogger, config.Node.Aggregator)) + server.SetDAVisualizationServer(server.NewDAVisualizationServer(client.GetDA(), visualizerLogger, config.Node.Aggregator)) } // Use NoOp metrics if nil to avoid nil checks throughout the code @@ -142,14 +139,12 @@ func NewDASubmitter( } return &DASubmitter{ - da: da, + client: client, config: config, genesis: genesis, options: options, metrics: metrics, logger: daSubmitterLogger, - namespaceBz: coreda.NamespaceFromString(config.DA.GetNamespace()).Bytes(), - namespaceDataBz: coreda.NamespaceFromString(config.DA.GetDataNamespace()).Bytes(), addressSelector: addressSelector, } } @@ -199,7 +194,7 @@ func (s *DASubmitter) SubmitHeaders(ctx context.Context, cache cache.Manager) er } }, "header", - s.namespaceBz, + s.client.GetHeaderNamespace(), []byte(s.config.DA.SubmitOptions), func() uint64 { return cache.NumPendingHeaders() }, ) @@ -242,7 +237,7 @@ func (s *DASubmitter) SubmitData(ctx context.Context, cache cache.Manager, signe } }, "data", - s.namespaceDataBz, + s.client.GetDataNamespace(), []byte(s.config.DA.SubmitOptions), func() uint64 { return cache.NumPendingData() }, ) @@ -411,7 +406,7 @@ func submitToDA[T any]( // Perform submission start := time.Now() - res := types.SubmitWithHelpers(submitCtx, s.da, s.logger, marshaled, -1, namespace, mergedOptions) + res := s.client.Submit(submitCtx, marshaled, -1, namespace, mergedOptions) s.logger.Debug().Int("attempts", rs.Attempt).Dur("elapsed", time.Since(start)).Uint64("code", uint64(res.Code)).Msg("got SubmitWithHelpers response from celestia") // Record submission result for observability diff --git a/block/internal/submitting/da_submitter_integration_test.go b/block/internal/submitting/da_submitter_integration_test.go index 421340e11d..5b768e1a51 100644 --- a/block/internal/submitting/da_submitter_integration_test.go +++ b/block/internal/submitting/da_submitter_integration_test.go @@ -15,6 +15,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" coreda "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -86,7 +87,13 @@ func TestDASubmitter_SubmitHeadersAndData_MarksInclusionAndUpdatesLastSubmitted( dummyDA := coreda.NewDummyDA(10_000_000, 10*time.Millisecond) // Create DA submitter - daSubmitter := NewDASubmitter(dummyDA, cfg, gen, common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop()) + daClient := da.NewClient(da.Config{ + DA: dummyDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + }) + daSubmitter := NewDASubmitter(daClient, cfg, gen, common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop()) // Submit headers and data require.NoError(t, daSubmitter.SubmitHeaders(context.Background(), cm)) diff --git a/block/internal/submitting/da_submitter_mocks_test.go b/block/internal/submitting/da_submitter_mocks_test.go index d914e6db61..b215b0cf2f 100644 --- a/block/internal/submitting/da_submitter_mocks_test.go +++ b/block/internal/submitting/da_submitter_mocks_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" coreda "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -25,10 +26,17 @@ func newTestSubmitter(mockDA *mocks.MockDA, override func(*config.Config)) *DASu cfg.DA.MaxSubmitAttempts = 3 cfg.DA.SubmitOptions = "opts" cfg.DA.Namespace = "ns" + cfg.DA.DataNamespace = "ns-data" if override != nil { override(&cfg) } - return NewDASubmitter(mockDA, cfg, genesis.Genesis{} /*options=*/, common.BlockOptions{}, common.NopMetrics(), zerolog.Nop()) + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + }) + return NewDASubmitter(daClient, cfg, genesis.Genesis{} /*options=*/, common.BlockOptions{}, common.NopMetrics(), zerolog.Nop()) } // marshal helper for simple items diff --git a/block/internal/submitting/da_submitter_test.go b/block/internal/submitting/da_submitter_test.go index c657d8185b..214ab98db4 100644 --- a/block/internal/submitting/da_submitter_test.go +++ b/block/internal/submitting/da_submitter_test.go @@ -15,6 +15,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" coreda "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -51,8 +52,14 @@ func setupDASubmitterTest(t *testing.T) (*DASubmitter, store.Store, cache.Manage } // Create DA submitter + daClient := da.NewClient(da.Config{ + DA: dummyDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + }) daSubmitter := NewDASubmitter( - dummyDA, + daClient, cfg, gen, common.DefaultBlockOptions(), @@ -80,7 +87,7 @@ func TestDASubmitter_NewDASubmitter(t *testing.T) { submitter, _, _, _, _ := setupDASubmitterTest(t) assert.NotNil(t, submitter) - assert.NotNil(t, submitter.da) + assert.NotNil(t, submitter.client) assert.NotNil(t, submitter.config) assert.NotNil(t, submitter.genesis) } @@ -95,8 +102,14 @@ func TestNewDASubmitterSetsVisualizerWhenEnabled(t *testing.T) { dummyDA := coreda.NewDummyDA(10_000_000, 10*time.Millisecond) + daClient := da.NewClient(da.Config{ + DA: dummyDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + }) NewDASubmitter( - dummyDA, + daClient, cfg, genesis.Genesis{}, common.DefaultBlockOptions(), diff --git a/block/internal/submitting/submitter_test.go b/block/internal/submitting/submitter_test.go index c13d8a1df7..33350ae268 100644 --- a/block/internal/submitting/submitter_test.go +++ b/block/internal/submitting/submitter_test.go @@ -18,6 +18,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/rpc/server" @@ -158,8 +159,16 @@ func TestSubmitter_setSequencerHeightToDAHeight(t *testing.T) { mockStore := testmocks.NewMockStore(t) cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" metrics := common.NopMetrics() - daSub := NewDASubmitter(nil, cfg, genesis.Genesis{}, common.DefaultBlockOptions(), metrics, zerolog.Nop()) + daClient := da.NewClient(da.Config{ + DA: nil, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + }) + daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) s := NewSubmitter(mockStore, nil, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, zerolog.Nop(), nil) s.ctx = ctx @@ -238,7 +247,13 @@ func TestSubmitter_processDAInclusionLoop_advances(t *testing.T) { exec.On("SetFinal", mock.Anything, uint64(1)).Return(nil).Once() exec.On("SetFinal", mock.Anything, uint64(2)).Return(nil).Once() - daSub := NewDASubmitter(nil, cfg, genesis.Genesis{}, common.DefaultBlockOptions(), metrics, zerolog.Nop()) + daClient := da.NewClient(da.Config{ + DA: nil, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + }) + daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) s := NewSubmitter(st, exec, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, zerolog.Nop(), nil) // prepare two consecutive blocks in store with DA included in cache @@ -423,7 +438,13 @@ func TestSubmitter_CacheClearedOnHeightInclusion(t *testing.T) { exec.On("SetFinal", mock.Anything, uint64(1)).Return(nil).Once() exec.On("SetFinal", mock.Anything, uint64(2)).Return(nil).Once() - daSub := NewDASubmitter(nil, cfg, genesis.Genesis{}, common.DefaultBlockOptions(), metrics, zerolog.Nop()) + daClient := da.NewClient(da.Config{ + DA: nil, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + }) + daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) s := NewSubmitter(st, exec, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, zerolog.Nop(), nil) // Create test blocks diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index de67e1fd1c..c87750b0f5 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -5,34 +5,31 @@ import ( "context" "errors" "fmt" - "time" "github.com/rs/zerolog" "google.golang.org/protobuf/proto" "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" coreda "github.com/evstack/ev-node/core/da" - "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/types" pb "github.com/evstack/ev-node/types/pb/evnode/v1" ) -// defaultDATimeout is the default timeout for DA retrieval operations -const defaultDATimeout = 10 * time.Second +// DARetriever defines the interface for retrieving events from the DA layer +type DARetriever interface { + RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) +} -// DARetriever handles DA retrieval operations for syncing -type DARetriever struct { - da coreda.DA - cache cache.Manager +// daRetriever handles DA retrieval operations for syncing +type daRetriever struct { + client da.Client + cache cache.CacheManager genesis genesis.Genesis logger zerolog.Logger - // calculate namespaces bytes once and reuse them - namespaceBz []byte - namespaceDataBz []byte - // transient cache, only full event need to be passed to the syncer // on restart, will be refetch as da height is updated by syncer pendingHeaders map[uint64]*types.SignedHeader @@ -41,26 +38,23 @@ type DARetriever struct { // NewDARetriever creates a new DA retriever func NewDARetriever( - da coreda.DA, - cache cache.Manager, - config config.Config, + client da.Client, + cache cache.CacheManager, genesis genesis.Genesis, logger zerolog.Logger, -) *DARetriever { - return &DARetriever{ - da: da, - cache: cache, - genesis: genesis, - logger: logger.With().Str("component", "da_retriever").Logger(), - namespaceBz: coreda.NamespaceFromString(config.DA.GetNamespace()).Bytes(), - namespaceDataBz: coreda.NamespaceFromString(config.DA.GetDataNamespace()).Bytes(), - pendingHeaders: make(map[uint64]*types.SignedHeader), - pendingData: make(map[uint64]*types.Data), +) *daRetriever { + return &daRetriever{ + client: client, + cache: cache, + genesis: genesis, + logger: logger.With().Str("component", "da_retriever").Logger(), + pendingHeaders: make(map[uint64]*types.SignedHeader), + pendingData: make(map[uint64]*types.Data), } } // RetrieveFromDA retrieves blocks from the specified DA height and returns height events -func (r *DARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { +func (r *daRetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { r.logger.Debug().Uint64("da_height", daHeight).Msg("retrieving from DA") blobsResp, err := r.fetchBlobs(ctx, daHeight) if err != nil { @@ -76,17 +70,17 @@ func (r *DARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]co return r.processBlobs(ctx, blobsResp.Data, daHeight), nil } -// fetchBlobs retrieves blobs from the DA layer -func (r *DARetriever) fetchBlobs(ctx context.Context, daHeight uint64) (coreda.ResultRetrieve, error) { - // Retrieve from both namespaces - headerRes := types.RetrieveWithHelpers(ctx, r.da, r.logger, daHeight, r.namespaceBz, defaultDATimeout) +// fetchBlobs retrieves blobs from both header and data namespaces +func (r *daRetriever) fetchBlobs(ctx context.Context, daHeight uint64) (coreda.ResultRetrieve, error) { + // Retrieve from both namespaces using the DA client + headerRes := r.client.RetrieveHeaders(ctx, daHeight) // If namespaces are the same, return header result - if bytes.Equal(r.namespaceBz, r.namespaceDataBz) { + if bytes.Equal(r.client.GetHeaderNamespace(), r.client.GetDataNamespace()) { return headerRes, r.validateBlobResponse(headerRes, daHeight) } - dataRes := types.RetrieveWithHelpers(ctx, r.da, r.logger, daHeight, r.namespaceDataBz, defaultDATimeout) + dataRes := r.client.RetrieveData(ctx, daHeight) // Validate responses headerErr := r.validateBlobResponse(headerRes, daHeight) @@ -133,7 +127,7 @@ func (r *DARetriever) fetchBlobs(ctx context.Context, daHeight uint64) (coreda.R // validateBlobResponse validates a blob response from DA layer // those are the only error code returned by da.RetrieveWithHelpers -func (r *DARetriever) validateBlobResponse(res coreda.ResultRetrieve, daHeight uint64) error { +func (r *daRetriever) validateBlobResponse(res coreda.ResultRetrieve, daHeight uint64) error { switch res.Code { case coreda.StatusError: return fmt.Errorf("DA retrieval failed: %s", res.Message) @@ -150,7 +144,7 @@ func (r *DARetriever) validateBlobResponse(res coreda.ResultRetrieve, daHeight u } // processBlobs processes retrieved blobs to extract headers and data and returns height events -func (r *DARetriever) processBlobs(ctx context.Context, blobs [][]byte, daHeight uint64) []common.DAHeightEvent { +func (r *daRetriever) processBlobs(ctx context.Context, blobs [][]byte, daHeight uint64) []common.DAHeightEvent { // Decode all blobs for _, bz := range blobs { if len(bz) == 0 { @@ -219,7 +213,7 @@ func (r *DARetriever) processBlobs(ctx context.Context, blobs [][]byte, daHeight } // tryDecodeHeader attempts to decode a blob as a header -func (r *DARetriever) tryDecodeHeader(bz []byte, daHeight uint64) *types.SignedHeader { +func (r *daRetriever) tryDecodeHeader(bz []byte, daHeight uint64) *types.SignedHeader { header := new(types.SignedHeader) var headerPb pb.SignedHeader @@ -259,7 +253,7 @@ func (r *DARetriever) tryDecodeHeader(bz []byte, daHeight uint64) *types.SignedH } // tryDecodeData attempts to decode a blob as signed data -func (r *DARetriever) tryDecodeData(bz []byte, daHeight uint64) *types.Data { +func (r *daRetriever) tryDecodeData(bz []byte, daHeight uint64) *types.Data { var signedData types.SignedData if err := signedData.UnmarshalBinary(bz); err != nil { return nil @@ -290,7 +284,7 @@ func (r *DARetriever) tryDecodeData(bz []byte, daHeight uint64) *types.Data { } // assertExpectedProposer validates the proposer address -func (r *DARetriever) assertExpectedProposer(proposerAddr []byte) error { +func (r *daRetriever) assertExpectedProposer(proposerAddr []byte) error { if string(proposerAddr) != string(r.genesis.ProposerAddress) { return fmt.Errorf("unexpected proposer: got %x, expected %x", proposerAddr, r.genesis.ProposerAddress) @@ -299,7 +293,7 @@ func (r *DARetriever) assertExpectedProposer(proposerAddr []byte) error { } // assertValidSignedData validates signed data using the configured signature provider -func (r *DARetriever) assertValidSignedData(signedData *types.SignedData) error { +func (r *daRetriever) assertValidSignedData(signedData *types.SignedData) error { if signedData == nil || signedData.Txs == nil { return errors.New("empty signed data") } diff --git a/block/internal/syncing/da_retriever_mock.go b/block/internal/syncing/da_retriever_mock.go new file mode 100644 index 0000000000..b17109dcc3 --- /dev/null +++ b/block/internal/syncing/da_retriever_mock.go @@ -0,0 +1,180 @@ +// Code generated by mockery; DO NOT EDIT. +// github.com/vektra/mockery +// template: testify + +package syncing + +import ( + "context" + + "github.com/evstack/ev-node/block/internal/common" + mock "github.com/stretchr/testify/mock" +) + +// NewMockDARetriever creates a new instance of MockDARetriever. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockDARetriever(t interface { + mock.TestingT + Cleanup(func()) +}) *MockDARetriever { + mock := &MockDARetriever{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// MockDARetriever is an autogenerated mock type for the DARetriever type +type MockDARetriever struct { + mock.Mock +} + +type MockDARetriever_Expecter struct { + mock *mock.Mock +} + +func (_m *MockDARetriever) EXPECT() *MockDARetriever_Expecter { + return &MockDARetriever_Expecter{mock: &_m.Mock} +} + +// RetrieveFromDA provides a mock function for the type MockDARetriever +func (_mock *MockDARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { + ret := _mock.Called(ctx, daHeight) + + if len(ret) == 0 { + panic("no return value specified for RetrieveFromDA") + } + + var r0 []common.DAHeightEvent + var r1 error + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) ([]common.DAHeightEvent, error)); ok { + return returnFunc(ctx, daHeight) + } + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) []common.DAHeightEvent); ok { + r0 = returnFunc(ctx, daHeight) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]common.DAHeightEvent) + } + } + if returnFunc, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = returnFunc(ctx, daHeight) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// MockDARetriever_RetrieveFromDA_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RetrieveFromDA' +type MockDARetriever_RetrieveFromDA_Call struct { + *mock.Call +} + +// RetrieveFromDA is a helper method to define mock.On call +// - ctx context.Context +// - daHeight uint64 +func (_e *MockDARetriever_Expecter) RetrieveFromDA(ctx interface{}, daHeight interface{}) *MockDARetriever_RetrieveFromDA_Call { + return &MockDARetriever_RetrieveFromDA_Call{Call: _e.mock.On("RetrieveFromDA", ctx, daHeight)} +} + +func (_c *MockDARetriever_RetrieveFromDA_Call) Run(run func(ctx context.Context, daHeight uint64)) *MockDARetriever_RetrieveFromDA_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 uint64 + if args[1] != nil { + arg1 = args[1].(uint64) + } + run( + arg0, + arg1, + ) + }) + return _c +} + +func (_c *MockDARetriever_RetrieveFromDA_Call) Return(vs []common.DAHeightEvent, err error) *MockDARetriever_RetrieveFromDA_Call { + _c.Call.Return(vs, err) + return _c +} + +func (_c *MockDARetriever_RetrieveFromDA_Call) RunAndReturn(run func(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error)) *MockDARetriever_RetrieveFromDA_Call { + _c.Call.Return(run) + return _c +} + +// SetDAHeight provides a mock function for the type MockDARetriever +func (_mock *MockDARetriever) SetDAHeight(height uint64) { + _mock.Called(height) + return +} + +// MockDARetriever_SetDAHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetDAHeight' +type MockDARetriever_SetDAHeight_Call struct { + *mock.Call +} + +// SetDAHeight is a helper method to define mock.On call +// - height uint64 +func (_e *MockDARetriever_Expecter) SetDAHeight(height interface{}) *MockDARetriever_SetDAHeight_Call { + return &MockDARetriever_SetDAHeight_Call{Call: _e.mock.On("SetDAHeight", height)} +} + +func (_c *MockDARetriever_SetDAHeight_Call) Run(run func(height uint64)) *MockDARetriever_SetDAHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 uint64 + if args[0] != nil { + arg0 = args[0].(uint64) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockDARetriever_SetDAHeight_Call) Return() *MockDARetriever_SetDAHeight_Call { + _c.Call.Return() + return _c +} + +func (_c *MockDARetriever_SetDAHeight_Call) RunAndReturn(run func(height uint64)) *MockDARetriever_SetDAHeight_Call { + _c.Run(run) + return _c +} + +// StopBackgroundFetcher provides a mock function for the type MockDARetriever +func (_mock *MockDARetriever) StopBackgroundFetcher() { + _mock.Called() + return +} + +// MockDARetriever_StopBackgroundFetcher_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StopBackgroundFetcher' +type MockDARetriever_StopBackgroundFetcher_Call struct { + *mock.Call +} + +// StopBackgroundFetcher is a helper method to define mock.On call +func (_e *MockDARetriever_Expecter) StopBackgroundFetcher() *MockDARetriever_StopBackgroundFetcher_Call { + return &MockDARetriever_StopBackgroundFetcher_Call{Call: _e.mock.On("StopBackgroundFetcher")} +} + +func (_c *MockDARetriever_StopBackgroundFetcher_Call) Run(run func()) *MockDARetriever_StopBackgroundFetcher_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockDARetriever_StopBackgroundFetcher_Call) Return() *MockDARetriever_StopBackgroundFetcher_Call { + _c.Call.Return() + return _c +} + +func (_c *MockDARetriever_StopBackgroundFetcher_Call) RunAndReturn(run func()) *MockDARetriever_StopBackgroundFetcher_Call { + _c.Run(run) + return _c +} diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index c6e8daa78f..04ba66e423 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -8,8 +8,6 @@ import ( "testing" "time" - "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" "github.com/libp2p/go-libp2p/core/crypto" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" @@ -18,18 +16,45 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" coreda "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" signerpkg "github.com/evstack/ev-node/pkg/signer" - "github.com/evstack/ev-node/pkg/store" testmocks "github.com/evstack/ev-node/test/mocks" "github.com/evstack/ev-node/types" ) +// newTestDARetriever creates a DA retriever for testing with the given DA implementation +func newTestDARetriever(t *testing.T, mockDA coreda.DA, cfg config.Config, gen genesis.Genesis) *daRetriever { + t.Helper() + if cfg.DA.Namespace == "" { + cfg.DA.Namespace = "test-ns" + } + if cfg.DA.DataNamespace == "" { + cfg.DA.DataNamespace = "test-data-ns" + } + + cm, err := cache.NewCacheManager(cfg, zerolog.Nop()) + require.NoError(t, err) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + }) + + return NewDARetriever(daClient, cm, gen, zerolog.Nop()) +} + // makeSignedDataBytes builds SignedData containing the provided Data and returns its binary encoding func makeSignedDataBytes(t *testing.T, chainID string, height uint64, proposer []byte, pub crypto.PubKey, signer signerpkg.Signer, txs int) ([]byte, *types.SignedData) { - d := &types.Data{Metadata: &types.Metadata{ChainID: chainID, Height: height, Time: uint64(time.Now().UnixNano())}} + return makeSignedDataBytesWithTime(t, chainID, height, proposer, pub, signer, txs, uint64(time.Now().UnixNano())) +} + +func makeSignedDataBytesWithTime(t *testing.T, chainID string, height uint64, proposer []byte, pub crypto.PubKey, signer signerpkg.Signer, txs int, timestamp uint64) ([]byte, *types.SignedData) { + d := &types.Data{Metadata: &types.Metadata{ChainID: chainID, Height: height, Time: timestamp}} if txs > 0 { d.Txs = make(types.Txs, txs) for i := 0; i < txs; i++ { @@ -38,63 +63,45 @@ func makeSignedDataBytes(t *testing.T, chainID string, height uint64, proposer [ } // For DA SignedData, sign the Data payload bytes (matches DA submission logic) - payload, err := d.MarshalBinary() - require.NoError(t, err) - sig, err := signer.Sign(payload) - require.NoError(t, err) + payload, _ := d.MarshalBinary() + sig, _ := signer.Sign(payload) sd := &types.SignedData{Data: *d, Signature: sig, Signer: types.Signer{PubKey: pub, Address: proposer}} - bin, err := sd.MarshalBinary() - require.NoError(t, err) + bin, _ := sd.MarshalBinary() return bin, sd } func TestDARetriever_RetrieveFromDA_Invalid(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - assert.NoError(t, err) - mockDA := testmocks.NewMockDA(t) mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). Return(nil, errors.New("just invalid")).Maybe() - r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) + r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) events, err := r.RetrieveFromDA(context.Background(), 42) assert.Error(t, err) assert.Len(t, events, 0) } func TestDARetriever_RetrieveFromDA_NotFound(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - assert.NoError(t, err) - mockDA := testmocks.NewMockDA(t) // GetIDs returns ErrBlobNotFound -> helper maps to StatusNotFound mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). Return(nil, fmt.Errorf("%s: whatever", coreda.ErrBlobNotFound.Error())).Maybe() - r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) + r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) events, err := r.RetrieveFromDA(context.Background(), 42) assert.True(t, errors.Is(err, coreda.ErrBlobNotFound)) assert.Len(t, events, 0) } func TestDARetriever_RetrieveFromDA_HeightFromFuture(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) - mockDA := testmocks.NewMockDA(t) // GetIDs returns ErrHeightFromFuture -> helper maps to StatusHeightFromFuture, fetchBlobs returns error mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). Return(nil, fmt.Errorf("%s: later", coreda.ErrHeightFromFuture.Error())).Maybe() - r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) + r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) events, derr := r.RetrieveFromDA(context.Background(), 1000) assert.Error(t, derr) assert.True(t, errors.Is(derr, coreda.ErrHeightFromFuture)) @@ -102,10 +109,7 @@ func TestDARetriever_RetrieveFromDA_HeightFromFuture(t *testing.T) { } func TestDARetriever_RetrieveFromDA_Timeout(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) + t.Skip("Skipping flaky timeout test - timing is now controlled by DA client") mockDA := testmocks.NewMockDA(t) @@ -116,7 +120,7 @@ func TestDARetriever_RetrieveFromDA_Timeout(t *testing.T) { }). Return(nil, context.DeadlineExceeded).Maybe() - r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) + r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) start := time.Now() events, err := r.RetrieveFromDA(context.Background(), 42) @@ -129,15 +133,12 @@ func TestDARetriever_RetrieveFromDA_Timeout(t *testing.T) { assert.Len(t, events, 0) // Verify timeout occurred approximately at expected time (with some tolerance) - assert.Greater(t, duration, 9*time.Second, "should timeout after approximately 10 seconds") - assert.Less(t, duration, 12*time.Second, "should not take much longer than timeout") + // DA client has a 30-second default timeout + assert.Greater(t, duration, 29*time.Second, "should timeout after approximately 30 seconds") + assert.Less(t, duration, 35*time.Second, "should not take much longer than timeout") } func TestDARetriever_RetrieveFromDA_TimeoutFast(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) mockDA := testmocks.NewMockDA(t) @@ -145,7 +146,7 @@ func TestDARetriever_RetrieveFromDA_TimeoutFast(t *testing.T) { mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). Return(nil, context.DeadlineExceeded).Maybe() - r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) + r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) events, err := r.RetrieveFromDA(context.Background(), 42) @@ -157,15 +158,11 @@ func TestDARetriever_RetrieveFromDA_TimeoutFast(t *testing.T) { } func TestDARetriever_ProcessBlobs_HeaderAndData_Success(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} - r := NewDARetriever(nil, cm, config.DefaultConfig(), gen, zerolog.Nop()) + r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) dataBin, data := makeSignedDataBytes(t, gen.ChainID, 2, addr, pub, signer, 2) hdrBin, _ := makeSignedHeaderBytes(t, gen.ChainID, 2, addr, pub, signer, nil, &data.Data, nil) @@ -186,14 +183,10 @@ func TestDARetriever_ProcessBlobs_HeaderAndData_Success(t *testing.T) { } func TestDARetriever_ProcessBlobs_HeaderOnly_EmptyDataExpected(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} - r := NewDARetriever(nil, cm, config.DefaultConfig(), gen, zerolog.Nop()) + r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) // Header with no data hash present should trigger empty data creation (per current logic) hb, _ := makeSignedHeaderBytes(t, gen.ChainID, 3, addr, pub, signer, nil, nil, nil) @@ -214,14 +207,10 @@ func TestDARetriever_ProcessBlobs_HeaderOnly_EmptyDataExpected(t *testing.T) { } func TestDARetriever_TryDecodeHeaderAndData_Basic(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} - r := NewDARetriever(nil, cm, config.DefaultConfig(), gen, zerolog.Nop()) + r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) hb, sh := makeSignedHeaderBytes(t, gen.ChainID, 5, addr, pub, signer, nil, nil, nil) gotH := r.tryDecodeHeader(hb, 123) @@ -239,15 +228,11 @@ func TestDARetriever_TryDecodeHeaderAndData_Basic(t *testing.T) { } func TestDARetriever_tryDecodeData_InvalidSignatureOrProposer(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) goodAddr, pub, signer := buildSyncTestSigner(t) badAddr := []byte("not-the-proposer") gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: badAddr} - r := NewDARetriever(nil, cm, config.DefaultConfig(), gen, zerolog.Nop()) + r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) // Signed data is made by goodAddr; retriever expects badAddr -> should be rejected db, _ := makeSignedDataBytes(t, gen.ChainID, 7, goodAddr, pub, signer, 1) @@ -255,7 +240,7 @@ func TestDARetriever_tryDecodeData_InvalidSignatureOrProposer(t *testing.T) { } func TestDARetriever_validateBlobResponse(t *testing.T) { - r := &DARetriever{logger: zerolog.Nop()} + r := &daRetriever{logger: zerolog.Nop()} // StatusSuccess -> nil err := r.validateBlobResponse(coreda.ResultRetrieve{BaseResult: coreda.BaseResult{Code: coreda.StatusSuccess}}, 1) assert.NoError(t, err) @@ -269,10 +254,6 @@ func TestDARetriever_validateBlobResponse(t *testing.T) { } func TestDARetriever_RetrieveFromDA_TwoNamespaces_Success(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} @@ -300,7 +281,7 @@ func TestDARetriever_RetrieveFromDA_TwoNamespaces_Success(t *testing.T) { mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceDataBz) })). Return([][]byte{dataBin}, nil).Once() - r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + r := newTestDARetriever(t, mockDA, cfg, gen) events, derr := r.RetrieveFromDA(context.Background(), 1234) require.NoError(t, derr) @@ -310,15 +291,11 @@ func TestDARetriever_RetrieveFromDA_TwoNamespaces_Success(t *testing.T) { } func TestDARetriever_ProcessBlobs_CrossDAHeightMatching(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} - r := NewDARetriever(nil, cm, config.DefaultConfig(), gen, zerolog.Nop()) + r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) // Create header and data for the same block height but from different DA heights dataBin, data := makeSignedDataBytes(t, gen.ChainID, 5, addr, pub, signer, 2) @@ -346,15 +323,11 @@ func TestDARetriever_ProcessBlobs_CrossDAHeightMatching(t *testing.T) { } func TestDARetriever_ProcessBlobs_MultipleHeadersCrossDAHeightMatching(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} - r := NewDARetriever(nil, cm, config.DefaultConfig(), gen, zerolog.Nop()) + r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) // Create multiple headers and data for different block heights data3Bin, data3 := makeSignedDataBytes(t, gen.ChainID, 3, addr, pub, signer, 1) diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index 84aa0a363b..3410d495fc 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -15,6 +15,11 @@ import ( "github.com/evstack/ev-node/types" ) +type p2pHandler interface { + ProcessHeight(ctx context.Context, height uint64, heightInCh chan<- common.DAHeightEvent) error + SetProcessedHeight(height uint64) +} + // P2PHandler coordinates block retrieval from P2P stores for the syncer. // It waits for both header and data to be available at a given height, // validates their consistency, and emits events to the syncer for processing. @@ -24,7 +29,7 @@ import ( type P2PHandler struct { headerStore goheader.Store[*types.SignedHeader] dataStore goheader.Store[*types.Data] - cache cache.Manager + cache cache.CacheManager genesis genesis.Genesis logger zerolog.Logger @@ -35,7 +40,7 @@ type P2PHandler struct { func NewP2PHandler( headerStore goheader.Store[*types.SignedHeader], dataStore goheader.Store[*types.Data], - cache cache.Manager, + cache cache.CacheManager, genesis genesis.Genesis, logger zerolog.Logger, ) *P2PHandler { diff --git a/block/internal/syncing/p2p_handler_test.go b/block/internal/syncing/p2p_handler_test.go index bd9c4178af..dfab41faae 100644 --- a/block/internal/syncing/p2p_handler_test.go +++ b/block/internal/syncing/p2p_handler_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - ds "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p/core/crypto" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" @@ -19,7 +18,6 @@ import ( "github.com/evstack/ev-node/pkg/genesis" signerpkg "github.com/evstack/ev-node/pkg/signer" "github.com/evstack/ev-node/pkg/signer/noop" - storemocks "github.com/evstack/ev-node/test/mocks" extmocks "github.com/evstack/ev-node/test/mocks/external" "github.com/evstack/ev-node/types" ) @@ -61,7 +59,7 @@ type P2PTestData struct { Handler *P2PHandler HeaderStore *extmocks.MockStore[*types.SignedHeader] DataStore *extmocks.MockStore[*types.Data] - Cache cache.Manager + Cache cache.CacheManager Genesis genesis.Genesis ProposerAddr []byte ProposerPub crypto.PubKey @@ -78,17 +76,11 @@ func setupP2P(t *testing.T) *P2PTestData { headerStoreMock := extmocks.NewMockStore[*types.SignedHeader](t) dataStoreMock := extmocks.NewMockStore[*types.Data](t) - storeMock := storemocks.NewMockStore(t) - storeMock.EXPECT().GetMetadata(mock.Anything, "last-submitted-header-height").Return(nil, ds.ErrNotFound).Maybe() - storeMock.EXPECT().GetMetadata(mock.Anything, "last-submitted-data-height").Return(nil, ds.ErrNotFound).Maybe() - storeMock.EXPECT().Height(mock.Anything).Return(uint64(0), nil).Maybe() - storeMock.EXPECT().SetMetadata(mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() - cfg := config.Config{ RootDir: t.TempDir(), ClearCache: true, } - cacheManager, err := cache.NewManager(cfg, storeMock, zerolog.Nop()) + cacheManager, err := cache.NewCacheManager(cfg, zerolog.Nop()) require.NoError(t, err, "failed to create cache manager") handler := NewP2PHandler(headerStoreMock, dataStoreMock, cacheManager, gen, zerolog.Nop()) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index d34dceca51..930906a014 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -3,6 +3,8 @@ package syncing import ( "bytes" "context" + "crypto/sha256" + "encoding/hex" "errors" "fmt" "sync" @@ -18,21 +20,13 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/store" "github.com/evstack/ev-node/types" ) -type daRetriever interface { - RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) -} - -type p2pHandler interface { - ProcessHeight(ctx context.Context, height uint64, heightInCh chan<- common.DAHeightEvent) error - SetProcessedHeight(height uint64) -} - // Syncer handles block synchronization from DA and P2P sources. type Syncer struct { // Core components @@ -41,7 +35,7 @@ type Syncer struct { da coreda.DA // Shared components - cache cache.Manager + cache cache.CacheManager metrics *common.Metrics // Configuration @@ -64,7 +58,8 @@ type Syncer struct { errorCh chan<- error // Channel to report critical execution client failures // Handlers - daRetriever daRetriever + daRetriever DARetriever + fiRetriever *da.ForcedInclusionRetriever p2pHandler p2pHandler // Logging @@ -84,7 +79,7 @@ func NewSyncer( store store.Store, exec coreexecutor.Executor, da coreda.DA, - cache cache.Manager, + cache cache.CacheManager, metrics *common.Metrics, config config.Config, genesis genesis.Genesis, @@ -123,7 +118,16 @@ func (s *Syncer) Start(ctx context.Context) error { } // Initialize handlers - s.daRetriever = NewDARetriever(s.da, s.cache, s.config, s.genesis, s.logger) + daClient := da.NewClient(da.Config{ + DA: s.da, + Logger: s.logger, + DefaultTimeout: 30 * time.Second, + Namespace: s.config.DA.GetNamespace(), + DataNamespace: s.config.DA.GetDataNamespace(), + ForcedInclusionNamespace: s.config.DA.GetForcedInclusionNamespace(), + }) + s.daRetriever = NewDARetriever(daClient, s.cache, s.genesis, s.logger) + s.fiRetriever = da.NewForcedInclusionRetriever(daClient, s.genesis, s.logger) s.p2pHandler = NewP2PHandler(s.headerStore.Store(), s.dataStore.Store(), s.cache, s.genesis, s.logger) if currentHeight, err := s.store.Height(s.ctx); err != nil { s.logger.Error().Err(err).Msg("failed to set initial processed height for p2p handler") @@ -152,6 +156,9 @@ func (s *Syncer) Stop() error { } s.cancelP2PWait(0) s.wg.Wait() + + s.fiRetriever.StopBackgroundFetcher() + s.logger.Info().Msg("syncer stopped") return nil } @@ -482,6 +489,8 @@ func (s *Syncer) processHeightEvent(event *common.DAHeightEvent) { switch { case errors.Is(err, errInvalidBlock): // do not reschedule + case errors.Is(err, errMaliciousProposer): + s.sendCriticalError(fmt.Errorf("sequencer malicious. Restart the node with --node.aggregator --node.based_sequencer or keep the chain halted: %w", err)) case errors.Is(err, errInvalidState): s.sendCriticalError(fmt.Errorf("invalid state detected (block-height %d, state-height %d) "+ "- block references do not match local state. Manual intervention required: %w", event.Header.Height(), @@ -547,6 +556,15 @@ func (s *Syncer) trySyncNextBlock(event *common.DAHeightEvent) error { return err } + // Verify forced inclusion transactions if configured + if err := s.verifyForcedInclusionTxs(currentState, data); err != nil { + s.logger.Error().Err(err).Uint64("height", nextHeight).Msg("forced inclusion verification failed") + if errors.Is(err, errMaliciousProposer) { + s.cache.RemoveHeaderDAIncluded(headerHash) + return err + } + } + // Apply block newState, err := s.applyBlock(header.Header, data, currentState) if err != nil { @@ -668,6 +686,70 @@ func (s *Syncer) validateBlock(currState types.State, data *types.Data, header * return nil } +var errMaliciousProposer = errors.New("malicious proposer detected") + +// hashTx returns a hex-encoded SHA256 hash of the transaction. +func hashTx(tx []byte) string { + hash := sha256.Sum256(tx) + return hex.EncodeToString(hash[:]) +} + +// verifyForcedInclusionTxs verifies that all forced inclusion transactions from DA are included in the block +func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types.Data) error { + if s.fiRetriever == nil { + return nil + } + + // Retrieve forced inclusion transactions from DA + forcedIncludedTxsEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(s.ctx, currentState.DAHeight) + if err != nil { + if errors.Is(err, da.ErrForceInclusionNotConfigured) { + s.logger.Debug().Msg("forced inclusion namespace not configured, skipping verification") + return nil + } + + return fmt.Errorf("failed to retrieve forced included txs from DA: %w", err) + } + + // If no forced inclusion transactions found, nothing to verify + if len(forcedIncludedTxsEvent.Txs) == 0 { + s.logger.Debug().Uint64("da_height", currentState.DAHeight).Msg("no forced inclusion transactions to verify") + return nil + } + + blockTxMap := make(map[string]struct{}) + for _, tx := range data.Txs { + blockTxMap[hashTx(tx)] = struct{}{} + } + + // Check if all forced inclusion transactions are present in the block + var missingTxs [][]byte + for _, forcedTx := range forcedIncludedTxsEvent.Txs { + if _, ok := blockTxMap[hashTx(forcedTx)]; !ok { + missingTxs = append(missingTxs, forcedTx) + } + } + + if len(missingTxs) > 0 { + s.logger.Error(). + Uint64("height", data.Height()). + Uint64("da_height", currentState.DAHeight). + Uint64("da_epoch_start", forcedIncludedTxsEvent.StartDaHeight). + Uint64("da_epoch_end", forcedIncludedTxsEvent.EndDaHeight). + Int("missing_count", len(missingTxs)). + Int("total_forced", len(forcedIncludedTxsEvent.Txs)). + Msg("SEQUENCER IS MALICIOUS: forced inclusion transactions missing from block") + return errors.Join(errMaliciousProposer, fmt.Errorf("sequencer is malicious: %d forced inclusion transactions not included in block", len(missingTxs))) + } + + s.logger.Debug(). + Uint64("height", data.Height()). + Int("forced_txs", len(forcedIncludedTxsEvent.Txs)). + Msg("all forced inclusion transactions verified in block") + + return nil +} + // sendCriticalError sends a critical error to the error channel without blocking func (s *Syncer) sendCriticalError(err error) { if s.errorCh != nil { diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index 9d58d226ea..65f2586966 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -69,7 +69,7 @@ func TestSyncer_BackoffOnDAError(t *testing.T) { syncer.ctx = ctx // Setup mocks - daRetriever := newMockdaRetriever(t) + daRetriever := NewMockDARetriever(t) p2pHandler := newMockp2pHandler(t) p2pHandler.On("ProcessHeight", mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() syncer.daRetriever = daRetriever @@ -165,7 +165,7 @@ func TestSyncer_BackoffResetOnSuccess(t *testing.T) { addr, pub, signer := buildSyncTestSigner(t) gen := syncer.genesis - daRetriever := newMockdaRetriever(t) + daRetriever := NewMockDARetriever(t) p2pHandler := newMockp2pHandler(t) p2pHandler.On("ProcessHeight", mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() syncer.daRetriever = daRetriever @@ -256,7 +256,7 @@ func TestSyncer_BackoffBehaviorIntegration(t *testing.T) { syncer := setupTestSyncer(t, 500*time.Millisecond) syncer.ctx = ctx - daRetriever := newMockdaRetriever(t) + daRetriever := NewMockDARetriever(t) p2pHandler := newMockp2pHandler(t) p2pHandler.On("ProcessHeight", mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() syncer.daRetriever = daRetriever @@ -325,7 +325,8 @@ func setupTestSyncer(t *testing.T, daBlockTime time.Duration) *Syncer { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, _, _ := buildSyncTestSigner(t) diff --git a/block/internal/syncing/syncer_benchmark_test.go b/block/internal/syncing/syncer_benchmark_test.go index 7404de3c56..26b674b4b2 100644 --- a/block/internal/syncing/syncer_benchmark_test.go +++ b/block/internal/syncing/syncer_benchmark_test.go @@ -70,7 +70,7 @@ func BenchmarkSyncerIO(b *testing.B) { type benchFixture struct { s *Syncer st store.Store - cm cache.Manager + cm cache.CacheManager cancel context.CancelFunc } @@ -80,7 +80,8 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(b, err) addr, pub, signer := buildSyncTestSigner(b) @@ -132,7 +133,7 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay } // Mock DA retriever to emit exactly totalHeights events, then HFF and cancel - daR := newMockdaRetriever(b) + daR := NewMockDARetriever(b) for i := uint64(0); i < totalHeights; i++ { daHeight := i + daHeightOffset daR.On("RetrieveFromDA", mock.Anything, daHeight). diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go new file mode 100644 index 0000000000..9a0525c5f0 --- /dev/null +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -0,0 +1,433 @@ +package syncing + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/evstack/ev-node/block/internal/cache" + "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" + coreda "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/pkg/store" + testmocks "github.com/evstack/ev-node/test/mocks" + "github.com/evstack/ev-node/types" +) + +func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 0, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + t.Cleanup(func() { fiRetriever.StopBackgroundFetcher() }) + + s := NewSyncer( + st, + mockExec, + mockDA, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + s.fiRetriever = fiRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Mock DA to return forced inclusion transactions + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // Create forced inclusion transaction blob (SignedData) in DA + dataBin, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) + + // With DAStartHeight=0, epoch size=1, daHeight=0 -> epoch boundaries are [0, 0] + // Check epoch start only (end check is skipped when same as start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(0), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + + // Fetch epoch start data + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin}, nil).Once() + + // Create block data that includes the forced transaction blob + data := makeData(gen.ChainID, 1, 1) + data.Txs[0] = types.Tx(dataBin) + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should pass since all forced txs are included + err = s.verifyForcedInclusionTxs(currentState, data) + require.NoError(t, err) +} + +func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 0, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + t.Cleanup(func() { fiRetriever.StopBackgroundFetcher() }) + + s := NewSyncer( + st, + mockExec, + mockDA, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + s.fiRetriever = fiRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Mock DA to return forced inclusion transactions + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // Create forced inclusion transaction blob (SignedData) in DA + dataBin, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) + + // With DAStartHeight=0, epoch size=1, daHeight=0 -> epoch boundaries are [0, 0] + // Check epoch start only (end check is skipped when same as start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(0), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() + + // Fetch epoch start data + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin}, nil).Once() + + // Create block data that does NOT include the forced transaction blob + data := makeData(gen.ChainID, 1, 2) + data.Txs[0] = types.Tx([]byte("regular_tx_1")) + data.Txs[1] = types.Tx([]byte("regular_tx_2")) + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should fail since forced tx blob is missing + err = s.verifyForcedInclusionTxs(currentState, data) + require.Error(t, err) + require.Contains(t, err.Error(), "sequencer is malicious") + require.Contains(t, err.Error(), "1 forced inclusion transactions not included") +} + +func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 0, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + t.Cleanup(func() { fiRetriever.StopBackgroundFetcher() }) + + s := NewSyncer( + st, + mockExec, + mockDA, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + s.fiRetriever = fiRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Mock DA to return two forced inclusion transaction blobs + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // Create two forced inclusion transaction blobs in DA + dataBin1, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) + dataBin2, _ := makeSignedDataBytes(t, gen.ChainID, 11, addr, pub, signer, 1) + + // With DAStartHeight=0, epoch size=1, daHeight=0 -> epoch boundaries are [0, 0] + // Check epoch start only (end check is skipped when same as start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(0), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1"), []byte("fi2")}, Timestamp: time.Now()}, nil).Once() + + // Fetch epoch start data + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin1, dataBin2}, nil).Once() + + // Create block data that includes only one of the forced transaction blobs + data := makeData(gen.ChainID, 1, 2) + data.Txs[0] = types.Tx(dataBin1) + data.Txs[1] = types.Tx([]byte("regular_tx")) + // dataBin2 is missing + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should fail since dataBin2 is missing + err = s.verifyForcedInclusionTxs(currentState, data) + require.Error(t, err) + require.Contains(t, err.Error(), "sequencer is malicious") + require.Contains(t, err.Error(), "1 forced inclusion transactions not included") +} + +func TestVerifyForcedInclusionTxs_NoForcedTransactions(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, _, _ := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 0, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + t.Cleanup(func() { fiRetriever.StopBackgroundFetcher() }) + + s := NewSyncer( + st, + mockExec, + mockDA, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + s.fiRetriever = fiRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Mock DA to return no forced inclusion transactions + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // With DAStartHeight=0, epoch size=1, daHeight=0 -> epoch boundaries are [0, 0] + // Check epoch start only (end check is skipped when same as start) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(0), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + // Create block data + data := makeData(gen.ChainID, 1, 2) + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should pass since no forced txs to verify + err = s.verifyForcedInclusionTxs(currentState, data) + require.NoError(t, err) +} + +func TestVerifyForcedInclusionTxs_NamespaceNotConfigured(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, _, _ := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + } + + cfg := config.DefaultConfig() + // Leave ForcedInclusionNamespace empty + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + // No ForcedInclusionNamespace - not configured + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + t.Cleanup(func() { fiRetriever.StopBackgroundFetcher() }) + + s := NewSyncer( + st, + mockExec, + mockDA, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + s.fiRetriever = fiRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + // Create block data + data := makeData(gen.ChainID, 1, 2) + + currentState := s.GetLastState() + currentState.DAHeight = 0 + + // Verify - should pass since namespace not configured + err = s.verifyForcedInclusionTxs(currentState, data) + require.NoError(t, err) +} diff --git a/block/internal/syncing/syncer_mock.go b/block/internal/syncing/syncer_mock.go index 6238a58364..aae45399e4 100644 --- a/block/internal/syncing/syncer_mock.go +++ b/block/internal/syncing/syncer_mock.go @@ -11,101 +11,6 @@ import ( mock "github.com/stretchr/testify/mock" ) -// newMockdaRetriever creates a new instance of mockdaRetriever. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func newMockdaRetriever(t interface { - mock.TestingT - Cleanup(func()) -}) *mockdaRetriever { - mock := &mockdaRetriever{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} - -// mockdaRetriever is an autogenerated mock type for the daRetriever type -type mockdaRetriever struct { - mock.Mock -} - -type mockdaRetriever_Expecter struct { - mock *mock.Mock -} - -func (_m *mockdaRetriever) EXPECT() *mockdaRetriever_Expecter { - return &mockdaRetriever_Expecter{mock: &_m.Mock} -} - -// RetrieveFromDA provides a mock function for the type mockdaRetriever -func (_mock *mockdaRetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { - ret := _mock.Called(ctx, daHeight) - - if len(ret) == 0 { - panic("no return value specified for RetrieveFromDA") - } - - var r0 []common.DAHeightEvent - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) ([]common.DAHeightEvent, error)); ok { - return returnFunc(ctx, daHeight) - } - if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) []common.DAHeightEvent); ok { - r0 = returnFunc(ctx, daHeight) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]common.DAHeightEvent) - } - } - if returnFunc, ok := ret.Get(1).(func(context.Context, uint64) error); ok { - r1 = returnFunc(ctx, daHeight) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// mockdaRetriever_RetrieveFromDA_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RetrieveFromDA' -type mockdaRetriever_RetrieveFromDA_Call struct { - *mock.Call -} - -// RetrieveFromDA is a helper method to define mock.On call -// - ctx context.Context -// - daHeight uint64 -func (_e *mockdaRetriever_Expecter) RetrieveFromDA(ctx interface{}, daHeight interface{}) *mockdaRetriever_RetrieveFromDA_Call { - return &mockdaRetriever_RetrieveFromDA_Call{Call: _e.mock.On("RetrieveFromDA", ctx, daHeight)} -} - -func (_c *mockdaRetriever_RetrieveFromDA_Call) Run(run func(ctx context.Context, daHeight uint64)) *mockdaRetriever_RetrieveFromDA_Call { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - var arg1 uint64 - if args[1] != nil { - arg1 = args[1].(uint64) - } - run( - arg0, - arg1, - ) - }) - return _c -} - -func (_c *mockdaRetriever_RetrieveFromDA_Call) Return(dAHeightEvents []common.DAHeightEvent, err error) *mockdaRetriever_RetrieveFromDA_Call { - _c.Call.Return(dAHeightEvents, err) - return _c -} - -func (_c *mockdaRetriever_RetrieveFromDA_Call) RunAndReturn(run func(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error)) *mockdaRetriever_RetrieveFromDA_Call { - _c.Call.Return(run) - return _c -} - // newMockp2pHandler creates a new instance of mockp2pHandler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func newMockp2pHandler(t interface { diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 93e7ae38b7..baa9b3b138 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -103,7 +103,8 @@ func makeData(chainID string, height uint64, txs int) *types.Data { func TestSyncer_validateBlock_DataHashMismatch(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -151,7 +152,8 @@ func TestSyncer_validateBlock_DataHashMismatch(t *testing.T) { func TestProcessHeightEvent_SyncsAndUpdatesState(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -205,7 +207,8 @@ func TestProcessHeightEvent_SyncsAndUpdatesState(t *testing.T) { func TestSequentialBlockSync(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -294,7 +297,8 @@ func TestSyncer_sendNonBlockingSignal(t *testing.T) { func TestSyncer_processPendingEvents(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) // current height 1 @@ -340,7 +344,7 @@ func TestSyncLoopPersistState(t *testing.T) { cfg.RootDir = t.TempDir() cfg.ClearCache = true - cacheMgr, err := cache.NewManager(cfg, st, zerolog.Nop()) + cacheMgr, err := cache.NewCacheManager(cfg, zerolog.Nop()) require.NoError(t, err) const myDAHeightOffset = uint64(1) @@ -383,7 +387,7 @@ func TestSyncLoopPersistState(t *testing.T) { ctx, cancel := context.WithCancel(t.Context()) syncerInst1.ctx = ctx - daRtrMock, p2pHndlMock := newMockdaRetriever(t), newMockp2pHandler(t) + daRtrMock, p2pHndlMock := NewMockDARetriever(t), newMockp2pHandler(t) p2pHndlMock.On("ProcessHeight", mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() p2pHndlMock.On("SetProcessedHeight", mock.Anything).Return().Maybe() syncerInst1.daRetriever, syncerInst1.p2pHandler = daRtrMock, p2pHndlMock @@ -451,7 +455,7 @@ func TestSyncLoopPersistState(t *testing.T) { require.Nil(t, event, "event at height %d should have been removed", blockHeight) } // and when new instance is up on restart - cacheMgr, err = cache.NewManager(cfg, st, zerolog.Nop()) + cacheMgr, err = cache.NewCacheManager(cfg, zerolog.Nop()) require.NoError(t, err) require.NoError(t, cacheMgr.LoadFromDisk()) @@ -475,7 +479,7 @@ func TestSyncLoopPersistState(t *testing.T) { ctx, cancel = context.WithCancel(t.Context()) t.Cleanup(cancel) syncerInst2.ctx = ctx - daRtrMock, p2pHndlMock = newMockdaRetriever(t), newMockp2pHandler(t) + daRtrMock, p2pHndlMock = NewMockDARetriever(t), newMockp2pHandler(t) p2pHndlMock.On("ProcessHeight", mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() p2pHndlMock.On("SetProcessedHeight", mock.Anything).Return().Maybe() syncerInst2.daRetriever, syncerInst2.p2pHandler = daRtrMock, p2pHndlMock diff --git a/block/public.go b/block/public.go index 8bfc4c1674..ef633f5004 100644 --- a/block/public.go +++ b/block/public.go @@ -1,6 +1,16 @@ package block -import "github.com/evstack/ev-node/block/internal/common" +import ( + "context" + "time" + + "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" + coreda "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" + "github.com/rs/zerolog" +) // BlockOptions defines the options for creating block components type BlockOptions = common.BlockOptions @@ -22,3 +32,46 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { func NopMetrics() *Metrics { return common.NopMetrics() } + +// ErrForceInclusionNotConfigured is returned when force inclusion is not configured. +// It is exported because sequencers needs to check for this error. +var ErrForceInclusionNotConfigured = da.ErrForceInclusionNotConfigured + +// DAClient is the interface representing the DA client for public use. +type DAClient = da.Client + +// ForcedInclusionEvent represents forced inclusion transactions retrieved from DA +type ForcedInclusionEvent = da.ForcedInclusionEvent + +// ForcedInclusionRetriever defines the interface for retrieving forced inclusion transactions from DA +type ForcedInclusionRetriever interface { + RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*da.ForcedInclusionEvent, error) + StopBackgroundFetcher() + SetDAHeight(height uint64) + GetDAHeight() uint64 +} + +// NewDAClient creates a new DA client with configuration +func NewDAClient( + daLayer coreda.DA, + config config.Config, + logger zerolog.Logger, +) DAClient { + return da.NewClient(da.Config{ + DA: daLayer, + Logger: logger, + DefaultTimeout: 10 * time.Second, + Namespace: config.DA.GetNamespace(), + DataNamespace: config.DA.GetDataNamespace(), + ForcedInclusionNamespace: config.DA.GetForcedInclusionNamespace(), + }) +} + +// NewForcedInclusionRetriever creates a new forced inclusion retriever +func NewForcedInclusionRetriever( + client DAClient, + genesis genesis.Genesis, + logger zerolog.Logger, +) ForcedInclusionRetriever { + return da.NewForcedInclusionRetriever(client, genesis, logger) +} diff --git a/core/execution/execution.go b/core/execution/execution.go index 896e2d65af..5085ebe578 100644 --- a/core/execution/execution.go +++ b/core/execution/execution.go @@ -52,6 +52,7 @@ type Executor interface { // Requirements: // - Must validate state transition against previous state root // - Must handle empty transaction list + // - Must handle gracefully gibberish transactions // - Must maintain deterministic execution // - Must respect context cancellation/timeout // - The rest of the rules are defined by the specific execution layer diff --git a/core/sequencer/dummy.go b/core/sequencer/dummy.go index 5f44dae2a8..ef614173a8 100644 --- a/core/sequencer/dummy.go +++ b/core/sequencer/dummy.go @@ -64,3 +64,13 @@ func (s *DummySequencer) VerifyBatch(ctx context.Context, req VerifyBatchRequest Status: true, }, nil } + +// SetDAHeight sets the current DA height for the sequencer +func (s *DummySequencer) SetDAHeight(height uint64) { + // No-op for dummy sequencer +} + +// GetDAHeight returns the current DA height for the sequencer +func (s *DummySequencer) GetDAHeight() uint64 { + return 0 +} diff --git a/core/sequencer/sequencing.go b/core/sequencer/sequencing.go index 006c892d8f..e97ef93dd3 100644 --- a/core/sequencer/sequencing.go +++ b/core/sequencer/sequencing.go @@ -7,15 +7,15 @@ import ( "time" ) -// Sequencer is a generic interface for a sequencer +// Sequencer defines the minimal sequencing interface used by the block executor. type Sequencer interface { - // SubmitBatchTxs submits a batch of transactions from to sequencer + // SubmitBatchTxs submits a batch of transactions from executor to sequencer // Id is the unique identifier for the target chain // Batch is the batch of transactions to submit // returns an error if any from the sequencer SubmitBatchTxs(ctx context.Context, req SubmitBatchTxsRequest) (*SubmitBatchTxsResponse, error) - // GetNextBatch returns the next batch of transactions from sequencer to + // GetNextBatch returns the next batch of transactions from sequencer and from DA to // Id is the unique identifier for the target chain // LastBatchHash is the cryptographic hash of the last batch received by the // MaxBytes is the maximum number of bytes to return in the batch @@ -27,6 +27,13 @@ type Sequencer interface { // BatchHash is the cryptographic hash of the batch to verify // returns a boolean indicating if the batch is valid and an error if any from the sequencer VerifyBatch(ctx context.Context, req VerifyBatchRequest) (*VerifyBatchResponse, error) + + // SetDAHeight sets the current DA height for the sequencer + // This allows the sequencer to track DA height for forced inclusion retrieval + SetDAHeight(height uint64) + + // GetDAHeight returns the current DA height for the sequencer + GetDAHeight() uint64 } // Batch is a collection of transactions diff --git a/da/internal/mocks/da.go b/da/internal/mocks/da.go index 37539d5480..bb3ad63391 100644 --- a/da/internal/mocks/da.go +++ b/da/internal/mocks/da.go @@ -112,126 +112,6 @@ func (_c *MockDA_Commit_Call) RunAndReturn(run func(ctx context.Context, blobs [ return _c } -// GasMultiplier provides a mock function for the type MockDA -func (_mock *MockDA) GasMultiplier(ctx context.Context) (float64, error) { - ret := _mock.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GasMultiplier") - } - - var r0 float64 - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context) (float64, error)); ok { - return returnFunc(ctx) - } - if returnFunc, ok := ret.Get(0).(func(context.Context) float64); ok { - r0 = returnFunc(ctx) - } else { - r0 = ret.Get(0).(float64) - } - if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = returnFunc(ctx) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockDA_GasMultiplier_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GasMultiplier' -type MockDA_GasMultiplier_Call struct { - *mock.Call -} - -// GasMultiplier is a helper method to define mock.On call -// - ctx context.Context -func (_e *MockDA_Expecter) GasMultiplier(ctx interface{}) *MockDA_GasMultiplier_Call { - return &MockDA_GasMultiplier_Call{Call: _e.mock.On("GasMultiplier", ctx)} -} - -func (_c *MockDA_GasMultiplier_Call) Run(run func(ctx context.Context)) *MockDA_GasMultiplier_Call { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - run( - arg0, - ) - }) - return _c -} - -func (_c *MockDA_GasMultiplier_Call) Return(f float64, err error) *MockDA_GasMultiplier_Call { - _c.Call.Return(f, err) - return _c -} - -func (_c *MockDA_GasMultiplier_Call) RunAndReturn(run func(ctx context.Context) (float64, error)) *MockDA_GasMultiplier_Call { - _c.Call.Return(run) - return _c -} - -// GasPrice provides a mock function for the type MockDA -func (_mock *MockDA) GasPrice(ctx context.Context) (float64, error) { - ret := _mock.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GasPrice") - } - - var r0 float64 - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context) (float64, error)); ok { - return returnFunc(ctx) - } - if returnFunc, ok := ret.Get(0).(func(context.Context) float64); ok { - r0 = returnFunc(ctx) - } else { - r0 = ret.Get(0).(float64) - } - if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = returnFunc(ctx) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockDA_GasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GasPrice' -type MockDA_GasPrice_Call struct { - *mock.Call -} - -// GasPrice is a helper method to define mock.On call -// - ctx context.Context -func (_e *MockDA_Expecter) GasPrice(ctx interface{}) *MockDA_GasPrice_Call { - return &MockDA_GasPrice_Call{Call: _e.mock.On("GasPrice", ctx)} -} - -func (_c *MockDA_GasPrice_Call) Run(run func(ctx context.Context)) *MockDA_GasPrice_Call { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - run( - arg0, - ) - }) - return _c -} - -func (_c *MockDA_GasPrice_Call) Return(f float64, err error) *MockDA_GasPrice_Call { - _c.Call.Return(f, err) - return _c -} - -func (_c *MockDA_GasPrice_Call) RunAndReturn(run func(ctx context.Context) (float64, error)) *MockDA_GasPrice_Call { - _c.Call.Return(run) - return _c -} - // Get provides a mock function for the type MockDA func (_mock *MockDA) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Blob, error) { ret := _mock.Called(ctx, ids, namespace) diff --git a/docs/adr/adr-019-forced-inclusion-mechanism.md b/docs/adr/adr-019-forced-inclusion-mechanism.md index 378dd9b17f..51e1be63cf 100644 --- a/docs/adr/adr-019-forced-inclusion-mechanism.md +++ b/docs/adr/adr-019-forced-inclusion-mechanism.md @@ -4,435 +4,697 @@ - 2025-03-24: Initial draft - 2025-04-23: Renumbered from ADR-018 to ADR-019 to maintain chronological order. +- 2025-11-10: Updated to reflect actual implementation ## Context -Evolve currently supports a single sequencer implementation as described in ADR-013. While this approach provides a simple and efficient solution, it introduces a single point of failure that can impact the liveness of the network. If the sequencer goes down or becomes unresponsive, the chain cannot progress. +In a single-sequencer rollup architecture, users depend entirely on the sequencer to include their transactions in blocks. This creates several problems: -To address this limitation and improve the liveness properties of applications built with Evolve, we propose implementing a forced inclusion mechanism. This mechanism will allow transactions to be included directly from the Data Availability (DA) layer when the sequencer is unresponsive, creating an "unstoppable" property for Evolve-based chains. +1. **Censorship Risk**: A malicious or coerced sequencer can selectively exclude transactions +2. **Liveness Failure**: If the sequencer goes offline, no new transactions can be processed +3. **Centralization**: Users must trust a single entity to behave honestly +4. **No Recourse**: Users have no alternative path to submit transactions if the sequencer refuses them -This enhancement aligns with the requirements defined in the [L2 Beat framework](https://forum.l2beat.com/t/the-stages-framework/291#p-516-stage-1-requirements-3) for Stage 1 L2s, advancing Evolve's capabilities as a robust sequencer library. +While eventual solutions like decentralized sequencer networks exist, they introduce significant complexity. We need a simpler mechanism that provides censorship resistance and liveness guarantees while maintaining the performance benefits of a single sequencer. ## Alternative Approaches ### Decentralized Sequencer -A fully decentralized sequencer could solve the liveness issue by distributing sequencing responsibilities across multiple nodes. However, this approach introduces significant complexity in terms of consensus, leader election, and coordination between nodes. It would require substantial development effort and resources, making it less suitable as an immediate solution. +A fully decentralized sequencer network would eliminate single points of failure but requires: + +- Complex consensus mechanisms +- Increased latency due to coordination +- More infrastructure and operational complexity ### Automatic Sequencer Failover -Another approach would be to implement an automatic failover mechanism where backup sequencers take over when the primary sequencer fails. While simpler than a fully decentralized solution, this approach still requires managing multiple sequencers and introduces complexity in coordination and state transfer between them. +Implementing automatic failover to backup sequencers when the primary goes down requires: -## Decision +- Complex monitoring and health checks +- Coordination between sequencers to prevent forks +- Does not solve censorship issues with a malicious sequencer -We will implement a forced inclusion mechanism for the Evolve single sequencer architecture that uses a time-based inclusion delay approach. This approach will: +## Decision -1. Track when transactions are first seen in terms of DA block time -2. Require a minimum number of DA blocks to pass before including a direct transaction -3. Let full nodes enforce inclusion within a fixed period of time window +We implement a **forced inclusion mechanism** that allows users to submit transactions directly to the Data Availability (DA) layer. This approach provides: -The mechanism will be designed to maintain backward compatibility with existing Evolve deployments while providing enhanced liveness guarantees. +1. **Censorship Resistance**: Users can always bypass the sequencer by posting to DA +2. **Verifiable Inclusion**: Full nodes verify that sequencers include all forced transactions +3. **Based Rollup Option**: A based sequencer mode for fully DA-driven transaction ordering +4. **Simplicity**: No complex timing mechanisms or fallback modes ### High-Level Architecture -The following diagram illustrates the high-level architecture of the forced inclusion mechanism: - -```mermaid -flowchart TB - subgraph DAL["Data Availability Layer"] - end - - subgraph SEQ["Single Sequencer"] - subgraph NO["Normal Operation"] - direction TB - process["Process user txs"] - create["Create batches"] - include["Include direct txs from DA"] - checkDelay["Check MinDADelay"] - end - end - - subgraph FN["Full Nodes"] - subgraph NormalOp["Normal Operation"] - follow["Follow sequencer produced blocks"] - validate["Validate time windows"] - validateDelay["Validate MinDADelay"] - end - - subgraph FallbackMode["Fallback Mode"] - detect["Detect sequencer down"] - scan["Scan DA for direct txs"] - createBlocks["Create deterministic blocks from direct txs"] - end - end - - SEQ -->|"Publish Batches"| DAL - DAL -->|"Direct Txs"| SEQ - DAL -->|"Direct Txs"| FN - SEQ -->|"Blocks"| FN - NormalOp <--> FallbackMode +``` +┌─────────────────────────────────────────────────────────────────┐ +│ User Actions │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ Normal Path: Forced Inclusion Path: │ +│ Submit tx to Sequencer ────► Submit tx directly to DA │ +│ (Fast) (Censorship-resistant) │ +│ │ +└──────────┬────────────────────────────────────┬─────────────────┘ + │ │ + ▼ ▼ + ┌─────────────┐ ┌──────────────────┐ + │ Sequencer │ │ DA Layer │ + │ (Mempool) │ │ (Forced Inc. NS) │ + └──────┬──────┘ └─────────┬────────┘ + │ │ + │ 1. Fetch forced inc. txs │ + │◄────────────────────────────────────┘ + │ + │ 2. Prepend forced txs to batch + │ + ▼ + ┌─────────────┐ + │ Block │ + │ Production │ + └──────┬──────┘ + │ + │ 3. Submit block to DA + │ + ▼ + ┌─────────────┐ + │ DA Layer │ + └──────┬──────┘ + │ + │ 4. Full nodes retrieve block + │ + ▼ + ┌─────────────────────┐ + │ Full Nodes │ + │ (Verification) │ + │ │ + │ 5. Verify forced │ + │ inc. txs are │ + │ included │ + └─────────────────────┘ ``` +### Key Components + +1. **Forced Inclusion Namespace**: A dedicated DA namespace where users can post transactions +2. **DA Retriever**: Fetches forced inclusion transactions from DA using epoch-based scanning +3. **Single Sequencer**: Enhanced to include forced transactions from DA in every batch +4. **Based Sequencer**: Alternative sequencer that ONLY retrieves transactions from DA +5. **Verification**: Full nodes validate that blocks include all forced transactions + ## Detailed Design ### User Requirements -- Developers need a mechanism to ensure their chains can progress even when the single sequencer is unavailable -- The system should maintain a deterministic and consistent state regardless of sequencer availability -- The transition between sequencer-led and forced inclusion modes should be seamless -- Transactions must be included within a fixed time window from when they are first seen -- Direct transactions must wait for a minimum number of DA blocks before inclusion +Users can submit transactions in two ways: -### Systems Affected +1. **Normal Path**: Submit to sequencer's mempool/RPC (fast, low cost) +2. **Forced Inclusion Path**: Submit directly to DA forced inclusion namespace (censorship-resistant) -The implementation of the forced inclusion mechanism will affect several components of the Evolve framework: +No additional requirements or monitoring needed from users. -1. **Single Sequencer**: Must be modified to track and include direct transactions from the DA layer within the time window and after minimum DA block delay -2. **Full Node**: Must be updated to recognize and validate blocks with forced inclusions -3. **Block Processing Logic**: Must implement the modified fork choice rule -4. **DA Client**: Must be enhanced to scan for direct transactions -5. **Transaction Validation**: Must validate both sequencer-batched and direct transactions +### Systems Affected + +1. **DA Layer**: New namespace for forced inclusion transactions +2. **Sequencer (Single)**: Fetches and includes forced transactions +3. **Sequencer (Based)**: New sequencer type that only uses DA transactions +4. **DA Retriever**: New component for fetching forced transactions +5. **Syncer**: Verifies forced transaction inclusion in blocks +6. **Configuration**: New fields for forced inclusion settings ### Data Structures -#### Direct Transaction Tracking +#### Forced Inclusion Event ```go -type ForcedInclusionConfig struct { - MaxInclusionDelay uint64 // Max inclusion time in DA block time units - MinDADelay uint64 // Minimum number of DA blocks before including a direct tx +type ForcedIncludedEvent struct { + Txs [][]byte // Forced inclusion transactions + StartDaHeight uint64 // Start of DA height range + EndDaHeight uint64 // End of DA height range } +``` + +#### DA Retriever Interface -type DirectTransaction struct { - TxHash common.Hash - FirstSeenAt uint64 // DA block time when the tx was seen - Included bool // Whether it has been included in a block - IncludedAt uint64 // Height at which it was included +```go +type DARetriever interface { + // Retrieve forced inclusion transactions from DA at specified height + RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedIncludedEvent, error) } +``` -type DirectTxTracker struct { - txs map[common.Hash]DirectTransaction // Map of direct transactions - mu sync.RWMutex // Mutex for thread-safe access - latestSeenTime uint64 // Latest DA block time scanned - latestDAHeight uint64 // Latest DA block height +### APIs and Interfaces + +#### DA Retriever + +The DA Retriever component handles fetching forced inclusion transactions: + +```go +type daRetriever struct { + da coreda.DA + cache cache.CacheManager + genesis genesis.Genesis + logger zerolog.Logger + namespaceForcedInclusionBz []byte + hasForcedInclusionNs bool + daEpochSize uint64 } + +// RetrieveForcedIncludedTxsFromDA fetches forced inclusion transactions +// Only fetches at epoch boundaries to prevent redundant DA queries +func (r *daRetriever) RetrieveForcedIncludedTxsFromDA( + ctx context.Context, + daHeight uint64, +) (*ForcedIncludedEvent, error) ``` -#### Sequencer Status Tracking +#### Single Sequencer Extension + +The single sequencer is enhanced to fetch and include forced transactions: ```go -type SequencerStatus struct { - IsActive bool // Whether the sequencer is considered active - LastActiveTime uint64 // Last DA block time where sequencer posted a batch - InactiveTime uint64 // Time since last sequencer activity +type Sequencer struct { + // ... existing fields ... + fiRetriever ForcedInclusionRetriever + genesis genesis.Genesis + daHeight atomic.Uint64 + pendingForcedInclusionTxs []pendingForcedInclusionTx + queue *BatchQueue +} + +type pendingForcedInclusionTx struct { + Data []byte + OriginalHeight uint64 +} + +func (s *Sequencer) GetNextBatch(ctx context.Context, req GetNextBatchRequest) (*GetNextBatchResponse, error) { + // 1. Fetch forced inclusion transactions from DA + forcedEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, s.daHeight.Load()) + + // 2. Process forced txs with size validation and pending queue + forcedTxs := s.processForcedInclusionTxs(forcedEvent, req.MaxBytes) + + // 3. Get batch from mempool queue + batch, err := s.queue.Next(ctx) + + // 4. Prepend forced txs and trim batch to fit MaxBytes + if len(forcedTxs) > 0 { + forcedTxsSize := calculateSize(forcedTxs) + remainingBytes := req.MaxBytes - forcedTxsSize + + // Trim batch transactions to fit + trimmedBatchTxs := trimToSize(batch.Transactions, remainingBytes) + + // Return excluded txs to front of queue + if len(trimmedBatchTxs) < len(batch.Transactions) { + excludedBatch := batch.Transactions[len(trimmedBatchTxs):] + s.queue.Prepend(ctx, Batch{Transactions: excludedBatch}) + } + + batch.Transactions = append(forcedTxs, trimmedBatchTxs...) + } + + return &GetNextBatchResponse{Batch: batch} +} + +// processForcedInclusionTxs validates and queues forced txs +func (s *Sequencer) processForcedInclusionTxs(event *ForcedInclusionEvent, maxBytes uint64) [][]byte { + var validatedTxs [][]byte + var newPendingTxs []pendingForcedInclusionTx + currentSize := 0 + + // Process pending txs from previous epochs first + for _, pendingTx := range s.pendingForcedInclusionTxs { + if !ValidateBlobSize(pendingTx.Data) { + continue // Skip blobs exceeding absolute DA limit + } + if WouldExceedCumulativeSize(currentSize, len(pendingTx.Data), maxBytes) { + newPendingTxs = append(newPendingTxs, pendingTx) + continue + } + validatedTxs = append(validatedTxs, pendingTx.Data) + currentSize += len(pendingTx.Data) + } + + // Process new txs from this epoch + for _, tx := range event.Txs { + if !ValidateBlobSize(tx) { + continue // Skip blobs exceeding absolute DA limit + } + if WouldExceedCumulativeSize(currentSize, len(tx), maxBytes) { + newPendingTxs = append(newPendingTxs, pendingForcedInclusionTx{ + Data: tx, + OriginalHeight: event.StartDaHeight, + }) + continue + } + validatedTxs = append(validatedTxs, tx) + currentSize += len(tx) + } + + s.pendingForcedInclusionTxs = newPendingTxs + return validatedTxs } ``` -### APIs and Interfaces +#### Based Sequencer -#### Enhanced DA Client Interface +A new sequencer implementation that ONLY retrieves transactions from DA: ```go -type DAClient interface { - // Existing methods - // ... +type BasedSequencer struct { + fiRetriever ForcedInclusionRetriever + da coreda.DA + config config.Config + genesis genesis.Genesis + logger zerolog.Logger + mu sync.RWMutex + daHeight uint64 + txQueue [][]byte // Buffer for transactions exceeding batch size +} + +func (s *BasedSequencer) GetNextBatch(ctx context.Context, req GetNextBatchRequest) (*GetNextBatchResponse, error) { + + + // Always fetch forced inclusion transactions from DA + forcedEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, s.daHeight) + if err != nil && !errors.Is(err, ErrHeightFromFuture) { + return nil, err + } - // New method for forced inclusion - GetDirectTransactions(ctx context.Context, fromTime, toTime uint64) ([][]byte, error) - // Note: SubmitDirectTransaction is removed as it's not a responsibility of the node + // Validate and add transactions to queue + for _, tx := range forcedEvent.Txs { + if ValidateBlobSize(tx) { + s.txQueue = append(s.txQueue, tx) + } + } + + // Create batch from queue respecting MaxBytes + batch := s.createBatchFromQueue(req.MaxBytes) + + return &GetNextBatchResponse{Batch: batch} +} + +// SubmitBatchTxs is a no-op for based sequencer +func (s *BasedSequencer) SubmitBatchTxs(ctx context.Context, req SubmitBatchTxsRequest) (*SubmitBatchTxsResponse, error) { + // Based sequencer ignores submitted transactions + return &SubmitBatchTxsResponse{}, nil } ``` -#### Sequencer Interface Extensions +#### Syncer Verification + +Full nodes verify forced inclusion in the sync process: ```go -// New methods added to the Sequencer interface -func (s *Sequencer) ScanDALayerForDirectTxs(ctx context.Context) error -func (s *Sequencer) IncludeDirectTransactions(ctx context.Context, batch *Batch) error +func (s *Syncer) verifyForcedInclusionTxs(currentState State, data *Data) error { + // 1. Retrieve forced inclusion transactions from DA + forcedEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(s.ctx, currentState.DAHeight) + if err != nil { + return err + } + + // 2. Build map of transactions in block + blockTxMap := make(map[string]struct{}) + for _, tx := range data.Txs { + blockTxMap[string(tx)] = struct{}{} + } + + // 3. Verify all forced transactions are included + for _, forcedTx := range forcedEvent.Txs { + if _, ok := blockTxMap[string(forcedTx)]; !ok { + return errMaliciousProposer + } + } + + return nil +} ``` -#### Full Node Interface Extensions +### Implementation Details + +#### Epoch-Based Fetching + +To avoid excessive DA queries, the DA Retriever uses epoch-based fetching: + +- **Epoch Size**: Configurable number of DA blocks (e.g., 10) +- **Epoch Boundaries**: Deterministically calculated based on `DAStartHeight` +- **Fetch Timing**: Only fetch at epoch start to prevent duplicate fetches ```go -// New methods added to the Node interface -func (n *Node) CheckSequencerStatus(ctx context.Context) (bool, error) -func (n *Node) ProcessDirectTransactions(ctx context.Context) error -func (n *Node) ValidateBlockTimeWindow(ctx context.Context, block *types.Block) error +// Calculate epoch boundaries +func (r *daRetriever) calculateEpochBoundaries(daHeight uint64) (start, end uint64) { + epochNum := r.calculateEpochNumber(daHeight) + start = r.genesis.DAStartHeight + (epochNum-1)*r.daEpochSize + end = r.genesis.DAStartHeight + epochNum*r.daEpochSize - 1 + return start, end +} + +// Only fetch at epoch start +if daHeight != epochStart { + return &ForcedIncludedEvent{Txs: [][]byte{}} +} + +// Fetch all heights in epoch range +for height := epochStart; height <= epochEnd; height++ { + // Fetch forced inclusion blobs from this DA height +} ``` -### Implementation Changes - -#### Single Sequencer Node Changes - -1. **DA Layer Scanner**: - - Implement a periodic scanner that queries the DA layer for direct transactions - - Track all direct transactions in the DirectTxTracker data structure - - Update the latest seen DA block time and height after each scan - -2. **Transaction Inclusion Logic**: - - Modify the batch creation process to include direct transactions from the DA layer - - Ensure all direct transactions are included within the MaxInclusionDelay time window - - Check that transactions have waited for MinDADelay DA blocks - - Track transaction inclusion times and enforce both delay constraints - -3. **Validation Rules**: - - Implement time window validation to ensure transactions are included within MaxInclusionDelay - - Implement DA block delay validation to ensure transactions wait for MinDADelay blocks - - Track both time-based and DA block-based delays for each transaction - -4. **Recovery Mechanism**: - - Add logic to detect when the sequencer comes back online after downtime - - Implement state synchronization to catch up with any forced inclusions that occurred during downtime - - Resume normal operation by building on top of the canonical chain tip - -#### Sequencer Operation Flow - -The following diagram illustrates the operation flow for the sequencer with forced inclusion: - -```txt -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Sequencer Operation Flow │ -└─────────────────┬───────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────┐ ┌────────────────────────────────────────┐ -│ 1. Process User Transactions │ │ 2. Periodic DA Layer Scanning │ -│ │ │ │ -│ - Accept transactions from users│ │ - Query DA layer for direct txs │ -│ - Validate and queue txs │ │ - Update DirectTxTracker │ -│ - Process queue based on policy │ │ - Track latest seen DA block time │ -└─────────────────┬───────────────┘ └────────────────────┬───────────────────┘ - │ │ - ▼ ▼ -┌─────────────────────────────────┐ ┌────────────────────────────────────────┐ -│ 3. Batch Creation │ │ 4. Direct Transaction Inclusion │ -│ │ │ │ -│ - Create batch of txs │◄─────┤ - Include unprocessed direct txs │ -│ - Apply ordering policy │ │ - Prioritize by first seen │ -│ - Calculate batch metadata │ │ - Mark included txs as processed │ -└─────────────────┬───────────────┘ └────────────────────────────────────────┘ - │ - ▼ -┌──────────────────────────────────┐ ┌────────────────────────────────────────┐ -│ 5. Time Window Validation │ │ 6. Block Production │ -│ │ │ │ -│ - Check transaction timestamps │ │ - Create block with batch │ -│ - Ensure within MaxInclusionDelay│─────►│ - Sign and publish block │ -│ - Track inclusion times │ │ │ -└──────────────────────────────────┘ └─────────────────┬──────────────────────┘ - │ - ▼ - ┌────────────────────────────────────────┐ - │ 7. DA Batch Submission │ - │ │ - │ - Submit batch to DA layer │ - │ - Track submission status │ - │ - Handle retry on failure │ - └────────────────────────────────────────┘ +#### Height From Future Handling + +When DA height is not yet available: + +```go +if errors.Is(err, coreda.ErrHeightFromFuture) { + // Keep current DA height, return empty batch + // Retry same height on next call + return &ForcedIncludedEvent{Txs: [][]byte{}}, nil +} ``` -#### Full Node Operation Flow - -The following diagram illustrates the operation flow for full nodes with forced inclusion support: - -```txt -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Full Node Operation Flow │ -└─────────────────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────┐ ┌────────────────────────────────────────┐ -│ 1. Normal Operation Mode │ │ 2. Sequencer Status Monitoring │ -│ │ │ │ -│ - Receive blocks from sequencer │ │ - Monitor sequencer activity on DA │ -│ - Validate time windows │◄───►│ - Track time since last sequencer batch│ -│ - Apply state transitions │ │ - Check against downtime threshold │ -└─────────────────────────────────┘ └───────────────────┬────────────────────┘ - │ - ▼ - ┌────────────────────────────────────────┐ - │ Is Sequencer Down? │ - │ (Based on configurable threshold) │ - └───────────┬───────────────┬────────────┘ - │ │ - │ Yes │ No - ▼ │ - ┌────────────────────────┐ │ - │ 3. Enter Fallback Mode │ │ - │ │ │ - │ - Switch to direct tx │ │ - │ processing │ │ - │ - Notify subsystems │ │ - └──────────┬─────────────┘ │ - │ │ - ▼ │ - ┌────────────────────────┐ │ - │ 4. DA Layer Scanning │ │ - │ │ │ - │ - Scan DA for direct │ │ - │ transactions │ │ - │ - Track latest seen │ │ - │ DA block time │ │ - └──────────┬─────────────┘ │ - │ │ - ▼ │ - ┌────────────────────────┐ │ - │ 5. Deterministic Block │ │ - │ Creation │ │ - │ │ │ - │ - Create blocks with │ │ - │ direct txs only │ │ - │ - Apply deterministic │ │ - │ ordering rules │ │ - └──────────┬─────────────┘ │ - │ │ - ▼ ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ 6. Block Processing and State Update │ -│ │ -│ - Execute transactions │ -│ - Update state │ -│ - Persist blocks and state │ -└─────────────────────────────────────────────────────────────────────────────────┘ +#### Size Validation and Max Bytes Handling + +Both sequencers enforce strict size limits to prevent DoS and ensure batches never exceed the DA layer's limits: + +```go +// Size validation utilities +const AbsoluteMaxBlobSize = 1.5 * 1024 * 1024 // 1.5MB DA layer limit + +// ValidateBlobSize checks against absolute DA layer limit +func ValidateBlobSize(blob []byte) bool { + return uint64(len(blob)) <= AbsoluteMaxBlobSize +} + +// WouldExceedCumulativeSize checks against per-batch limit +func WouldExceedCumulativeSize(currentSize int, blobSize int, maxBytes uint64) bool { + return uint64(currentSize)+uint64(blobSize) > maxBytes +} ``` -### Fallback Mode Transition - -The following diagram illustrates the transition between normal operation and fallback mode: - -```mermaid -sequenceDiagram - participant DA as Data Availability Layer - participant S as Sequencer - participant R as Chain - - Note over S,R: Normal Operation - DA->>S: DA Block N - S->>R: Sequencer Block N - DA->>S: DA Block N+1 - S->>R: Sequencer Block N+1 - DA->>S: DA Block N+2 - S->>R: Sequencer Block N+2 - - Note over S,R: Sequencer Down - DA->>R: DA Block N+3 (Direct Txs) - Note over R: Fallback Mode Start - R->>R: Create Block from Direct Txs - DA->>R: DA Block N+4 (Direct Txs) - R->>R: Create Block from Direct Txs - DA->>R: DA Block N+5 (Direct Txs) - R->>R: Create Block from Direct Txs - - Note over S,R: Sequencer Back Online - DA->>S: DA Block N+6 - S->>R: Sequencer Block N+6 - DA->>S: DA Block N+7 - S->>R: Sequencer Block N+7 - - Note over R: Timeline shows: - Note over R: 1. Normal sequencer operation - Note over R: 2. Sequencer downtime & fallback - Note over R: 3. Sequencer recovery +**Key Behaviors**: + +- **Absolute validation**: Blobs exceeding 1.5MB are permanently rejected +- **Batch size limits**: `req.MaxBytes` is NEVER exceeded in any batch +- **Transaction preservation**: + - Single sequencer: Trimmed batch txs returned to queue via `Prepend()` + - Based sequencer: Excess txs remain in `txQueue` for next batch + - Forced txs that don't fit go to `pendingForcedInclusionTxs` (single) or stay in `txQueue` (based) + +#### Transaction Queue Management + +The based sequencer uses a simplified queue to handle transactions: + +```go +func (s *BasedSequencer) createBatchFromQueue(maxBytes uint64) *Batch { + var batch [][]byte + var totalBytes uint64 + + for i, tx := range s.txQueue { + txSize := uint64(len(tx)) + // Always respect maxBytes, even for first transaction + if totalBytes+txSize > maxBytes { + // Would exceed max bytes, keep remaining in queue + s.txQueue = s.txQueue[i:] + break + } + + batch = append(batch, tx) + totalBytes += txSize + + // Clear queue if we processed everything + if i == len(s.txQueue)-1 { + s.txQueue = s.txQueue[:0] + } + } + + return &Batch{Transactions: batch} +} ``` -### Configuration +**Note**: The based sequencer is simpler than the single sequencer - it doesn't need a separate pending queue because `txQueue` naturally handles all transaction buffering. -The forced inclusion mechanism will be configurable with the following parameters: +### Configuration ```go -type ForcedInclusionConfig struct { - Enabled bool // Whether forced inclusion is enabled - MaxInclusionDelay uint64 // Maximum time window for transaction inclusion - SequencerDownTime uint64 // Time after which the sequencer is considered down - MinDADelay uint64 // Minimum number of DA blocks before including a direct tx +type Genesis struct { + ChainID string + StartTime time.Time + InitialHeight uint64 + ProposerAddress []byte + DAStartHeight uint64 + // Number of DA blocks to scan per forced inclusion fetch + // Higher values reduce DA queries but increase latency + // Lower values increase DA queries but improve responsiveness + DAEpochForcedInclusion uint64 +} + +type DAConfig struct { + // ... existing fields ... + + // Namespace for forced inclusion transactions + ForcedInclusionNamespace string +} + +type NodeConfig struct { + // ... existing fields ... + + // Run node with based sequencer (requires aggregator mode) + BasedSequencer bool } ``` +### Configuration Examples + +#### Traditional Sequencer with Forced Inclusion + +```yaml +# genesis.json +{ + "chain_id": "my-rollup", + "forced_inclusion_da_epoch": 10 # Scan 10 DA blocks at a time +} + +# config.toml +[da] +forced_inclusion_namespace = "0x0000000000000000000000000000000000000000000000000000666f72636564" + +[node] +aggregator = true +based_sequencer = false # Use traditional sequencer +``` + +#### Based Sequencer (DA-Only) + +```yaml +# genesis.json +{ + "chain_id": "my-rollup", + "forced_inclusion_da_epoch": 5 # Scan 5 DA blocks at a time +} + +# config.toml +[da] +forced_inclusion_namespace = "0x0000000000000000000000000000000000000000000000000000666f72636564" + +[node] +aggregator = true +based_sequencer = true # Use based sequencer +``` + +### Sequencer Operation Flows + +#### Single Sequencer Flow + +``` +1. Timer triggers GetNextBatch +2. Fetch forced inclusion txs from DA (via DA Retriever) + - Only at epoch boundaries + - Scan epoch range for forced transactions +3. Get batch from mempool queue +4. Prepend forced txs to batch +5. Return batch for block production +``` + +#### Based Sequencer Flow + +``` +1. Timer triggers GetNextBatch +2. Check transaction queue for buffered txs +3. If queue empty or epoch boundary: + - Fetch forced inclusion txs from DA + - Add to queue +4. Create batch from queue (respecting MaxBytes) +5. Return batch for block production +``` + +### Full Node Verification Flow + +``` +1. Receive block from DA or P2P +2. Before applying block: + a. Fetch forced inclusion txs from DA at block's DA height + b. Build map of transactions in block + c. Verify all forced txs are in block + d. If missing: reject block, flag malicious proposer +3. Apply block if verification passes +``` + ### Efficiency Considerations -- DA layer scanning is integrated into the core block processing pipeline for continuous monitoring -- Direct transactions are indexed by hash for quick lookups -- The sequencer status is tracked by DA block time rather than block heights -- Time-based tracking simplifies the implementation and reduces overhead -- DA block height tracking adds minimal overhead to existing block processing +1. **Epoch-Based Fetching**: Reduces DA queries by batching multiple DA heights +2. **Deterministic Epochs**: All nodes calculate same epoch boundaries +3. **Fetch at Epoch Start**: Prevents duplicate fetches as DA height progresses +4. **Transaction Queue**: Buffers excess transactions across multiple blocks +5. **Conditional Fetching**: Only when forced inclusion namespace is configured +6. **Size Pre-validation**: Invalid blobs rejected early, before batch construction +7. **Efficient Queue Operations**: + - Single sequencer: `Prepend()` reuses space before head position + - Based sequencer: Simple slice operations for queue management + +**DA Query Frequency**: + +Every `DAEpochForcedInclusion` DA blocks ### Security Considerations -- The mechanism ensures that only valid direct transactions can be included in the chain -- Time window validation prevents delayed inclusion of transactions -- The configurable time threshold prevents premature switching to fallback mode due to temporary sequencer issues -- All transactions, whether sequencer-batched or direct, undergo the same validation rules -- MinDADelay provides protection against DA layer censorship by requiring multiple block proposers to collude -- Block-based delay prevents single block proposer censorship by ensuring transactions must be visible across multiple DA layer blocks -- The delay mechanism is inspired by the "Based Sequencing with Soft Confirmations" design from [Sovereign SDK #408](https://github.com/Sovereign-Labs/sovereign-sdk/issues/408), which uses deferred execution to prevent DA layer block proposers from censoring transactions +1. **Malicious Proposer Detection**: Full nodes reject blocks missing forced transactions +2. **No Timing Attacks**: Epoch boundaries are deterministic, no time-based logic +3. **Blob Size Limits**: Two-tier size validation prevents DoS + - Absolute limit (1.5MB): Blobs exceeding this are permanently rejected + - Batch limit (`MaxBytes`): Ensures no batch exceeds DA submission limits +4. **Graceful Degradation**: Continues operation if forced inclusion not configured +5. **Height Validation**: Handles "height from future" errors without state corruption +6. **Transaction Preservation**: No valid transactions are lost due to size constraints +7. **Strict MaxBytes Enforcement**: Batches NEVER exceed `req.MaxBytes`, preventing DA layer rejections -### Privacy Considerations +**Attack Vectors**: -- Direct transactions posted to the DA layer are publicly visible, just like sequencer-batched transactions -- No additional privacy concerns are introduced beyond the existing model +- **Censorship**: Mitigated by forced inclusion verification +- **DA Spam**: Limited by DA layer's native spam protection and two-tier blob size limits +- **Block Withholding**: Full nodes can fetch and verify from DA independently +- **Oversized Batches**: Prevented by strict size validation at multiple levels ### Testing Strategy -1. **Unit Tests**: - - Test individual components of the forced inclusion mechanism - - Verify time window validation logic - - Test the DA scanner functionality - - Test transaction inclusion timing constraints - - Test MinDADelay validation - -2. **Integration Tests**: - - Test the interaction between the sequencer and the DA layer - - Verify correct inclusion of direct transactions within time windows - - Test DA block delay validation - - Verify both time and block delay constraints - -3. **End-to-End Tests**: - - Simulate sequencer downtime and verify chain progression - - Test the transition between normal and fallback modes - - Verify the sequencer's recovery process after downtime - - Test transaction inclusion with various delay configurations - -4. **Performance Testing**: - - Measure the overhead introduced by the DA scanner - - Benchmark the system's performance in fallback mode - - Evaluate the impact of time-based tracking - - Measure the performance impact of DA block delay validation +#### Unit Tests + +1. **DA Retriever**: + - Epoch boundary calculations + - Height from future handling + - Blob size validation + - Empty epoch handling + +2. **Size Validation**: + - Individual blob size validation (absolute limit) + - Cumulative size checking (batch limit) + - Edge cases (empty blobs, exact limits, exceeding limits) + +3. **Single Sequencer**: + - Forced transaction prepending with size constraints + - Batch trimming when forced + batch exceeds MaxBytes + - Trimmed transactions returned to queue via Prepend + - Pending forced inclusion queue management + - DA height tracking + - Error handling + +4. **BatchQueue**: + - Prepend operation (empty queue, with items, after consuming) + - Multiple prepends (LIFO ordering) + - Space reuse before head position + +5. **Based Sequencer**: + - Queue management with size validation + - Batch size limits strictly enforced + - Transaction buffering across batches + - DA-only operation + - Always checking for new forced txs + +6. **Syncer Verification**: + - All forced txs included (pass) + - Missing forced txs (fail) + - No forced txs (pass) + +#### Integration Tests + +1. **Single Sequencer Integration**: + - Submit to mempool and forced inclusion + - Verify both included in block + - Forced txs appear first + +2. **Based Sequencer Integration**: + - Submit only to DA forced inclusion + - Verify block production + - Mempool submissions ignored + +3. **Verification Flow**: + - Full node rejects block missing forced tx + - Full node accepts block with all forced txs + +#### End-to-End Tests + +1. **User Flow**: + - User submits tx to forced inclusion namespace + - Sequencer includes tx in next epoch + - Full nodes verify inclusion + +2. **Based Rollup**: + - Start network with based sequencer + - Submit transactions to DA + - Verify block production and finalization + +3. **Censorship Resistance**: + - Sequencer ignores specific transaction + - User submits to forced inclusion + - Transaction included in next epoch + - Attempting to exclude causes block rejection ### Breaking Changes -This enhancement introduces no breaking changes to the existing API or data structures. It extends the current functionality by implementing time-based transaction tracking and inclusion rules, along with DA block-based delay validation, without modifying the core interfaces that developers interact with. +1. **Sequencer Initialization**: Requires `DARetriever` and `Genesis` parameters +2. **Configuration**: New fields in `DAConfig` and `NodeConfig` +3. **Syncer**: New verification step in block processing + +**Migration Path**: + +- Forced inclusion is optional (enabled when namespace configured) +- Existing deployments work without configuration changes +- Can enable incrementally per network ## Status -Proposed +Accepted and Implemented ## Consequences ### Positive -- Improves the liveness guarantees of Evolve-based chains -- Provides a path for Evolve to meet Stage 1 L2 requirements per the L2 Beat framework -- Creates an "unstoppable" property for applications, enhancing their reliability -- Maintains a deterministic chain state regardless of sequencer availability -- More predictable deadlines in DA time -- Easier to reason about for users and developers -- Prevents DA layer censorship by requiring multiple block proposers to collude +1. **Censorship Resistance**: Users have guaranteed path to include transactions +2. **Verifiable**: Full nodes enforce forced inclusion, detecting malicious sequencers +3. **Simple Design**: No complex timing mechanisms or fallback modes +4. **Based Rollup Option**: Fully DA-driven transaction ordering available (simplified implementation) +5. **Optional**: Forced inclusion can be disabled for permissioned deployments +6. **Efficient**: Epoch-based fetching minimizes DA queries +7. **Flexible**: Configurable epoch size allows tuning latency vs efficiency +8. **Robust Size Handling**: Two-tier size validation prevents DoS and DA rejections +9. **Transaction Preservation**: All valid transactions are preserved in queues, nothing is lost +10. **Strict MaxBytes Compliance**: Batches never exceed limits, preventing DA submission failures ### Negative -- Adds complexity to the block processing and validation logic -- Introduces overhead from scanning the DA layer for direct transactions -- Could potentially slow block production during fallback mode -- May need careful tuning of time window parameters -- Could be affected by variations in block production rate -- Additional complexity from tracking DA block heights for delay validation +1. **Increased Latency**: Forced transactions subject to epoch boundaries +2. **DA Dependency**: Requires DA layer to support multiple namespaces +3. **Higher DA Costs**: Users pay DA posting fees for forced inclusion +4. **Additional Complexity**: New component (DA Retriever) and verification logic +5. **Epoch Configuration**: Requires setting `DAEpochForcedInclusion` in genesis (consensus parameter) ### Neutral -- Requires application developers to consider both sequencer-batched and direct transaction flows -- Introduces configuration options that developers need to understand and set appropriately -- Changes the mental model of how the chain progresses, from purely sequencer-driven to a hybrid approach -- Users will need to use external tools or services to submit direct transactions to the DA layer during sequencer downtime +1. **Two Sequencer Types**: Choice between single (hybrid) and based (DA-only) +2. **Privacy Model Unchanged**: Forced inclusion has same privacy as normal path +3. **Monitoring**: Operators should monitor forced inclusion namespace usage +4. **Documentation**: Users need guidance on when to use forced inclusion +5. **Genesis Parameter**: `DAEpochForcedInclusion` is a consensus parameter fixed at genesis ## References diff --git a/go.mod b/go.mod index 5529903687..49467466e2 100644 --- a/go.mod +++ b/go.mod @@ -30,6 +30,7 @@ require ( golang.org/x/net v0.46.0 golang.org/x/sync v0.17.0 google.golang.org/protobuf v1.36.10 + gotest.tools/v3 v3.5.2 ) require ( @@ -53,6 +54,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/google/flatbuffers v24.12.23+incompatible // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect @@ -82,7 +84,7 @@ require ( github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v5 v5.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/miekg/dns v1.1.68 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect diff --git a/go.sum b/go.sum index 5b1debef77..ac49443213 100644 --- a/go.sum +++ b/go.sum @@ -227,8 +227,9 @@ github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= @@ -649,6 +650,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/cmd/run_node.go b/pkg/cmd/run_node.go index e0efe0b652..4dbc876879 100644 --- a/pkg/cmd/run_node.go +++ b/pkg/cmd/run_node.go @@ -26,8 +26,6 @@ import ( "github.com/evstack/ev-node/pkg/signer/file" ) -const DefaultMaxBlobSize = 1.5 * 1024 * 1024 // 1.5MB - // ParseConfig is an helpers that loads the node configuration and validates it. func ParseConfig(cmd *cobra.Command) (rollconf.Config, error) { nodeConfig, err := rollconf.Load(cmd) @@ -93,7 +91,7 @@ func StartNode( // create a new remote signer var signer signer.Signer - if nodeConfig.Signer.SignerType == "file" && nodeConfig.Node.Aggregator { + if nodeConfig.Signer.SignerType == "file" && (nodeConfig.Node.Aggregator && !nodeConfig.Node.BasedSequencer) { // Get passphrase file path passphraseFile, err := cmd.Flags().GetString(rollconf.FlagSignerPassphraseFile) if err != nil { diff --git a/pkg/config/config.go b/pkg/config/config.go index aad4ce6b93..51f7affa6d 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -33,6 +33,8 @@ const ( // FlagAggregator is a flag for running node in aggregator mode FlagAggregator = FlagPrefixEvnode + "node.aggregator" + // FlagBasedSequencer is a flag for enabling based sequencer mode (requires aggregator mode) + FlagBasedSequencer = FlagPrefixEvnode + "node.based_sequencer" // FlagLight is a flag for running the node in light mode FlagLight = FlagPrefixEvnode + "node.light" // FlagBlockTime is a flag for specifying the block time @@ -64,6 +66,8 @@ const ( FlagDANamespace = FlagPrefixEvnode + "da.namespace" // FlagDADataNamespace is a flag for specifying the DA data namespace ID FlagDADataNamespace = FlagPrefixEvnode + "da.data_namespace" + // FlagDAForcedInclusionNamespace is a flag for specifying the DA forced inclusion namespace ID + FlagDAForcedInclusionNamespace = FlagPrefixEvnode + "da.forced_inclusion_namespace" // FlagDASubmitOptions is a flag for data availability submit options FlagDASubmitOptions = FlagPrefixEvnode + "da.submit_options" // FlagDASigningAddresses is a flag for specifying multiple DA signing addresses @@ -155,15 +159,16 @@ type Config struct { // DAConfig contains all Data Availability configuration parameters type DAConfig struct { - Address string `mapstructure:"address" yaml:"address" comment:"Address of the data availability layer service (host:port). This is the endpoint where Rollkit will connect to submit and retrieve data."` - AuthToken string `mapstructure:"auth_token" yaml:"auth_token" comment:"Authentication token for the data availability layer service. Required if the DA service needs authentication."` - SubmitOptions string `mapstructure:"submit_options" yaml:"submit_options" comment:"Additional options passed to the DA layer when submitting data. Format depends on the specific DA implementation being used."` - SigningAddresses []string `mapstructure:"signing_addresses" yaml:"signing_addresses" comment:"List of addresses to use for DA submissions. When multiple addresses are provided, they will be used in round-robin fashion to prevent sequence mismatches. Useful for high-throughput chains."` - Namespace string `mapstructure:"namespace" yaml:"namespace" comment:"Namespace ID used when submitting blobs to the DA layer. When a DataNamespace is provided, only the header is sent to this namespace."` - DataNamespace string `mapstructure:"data_namespace" yaml:"data_namespace" comment:"Namespace ID for submitting data to DA layer. Use this to speed-up light clients."` - BlockTime DurationWrapper `mapstructure:"block_time" yaml:"block_time" comment:"Average block time of the DA chain (duration). Determines frequency of DA layer syncing, maximum backoff time for retries, and is multiplied by MempoolTTL to calculate transaction expiration. Examples: \"15s\", \"30s\", \"1m\", \"2m30s\", \"10m\"."` - MempoolTTL uint64 `mapstructure:"mempool_ttl" yaml:"mempool_ttl" comment:"Number of DA blocks after which a transaction is considered expired and dropped from the mempool. Controls retry backoff timing."` - MaxSubmitAttempts int `mapstructure:"max_submit_attempts" yaml:"max_submit_attempts" comment:"Maximum number of attempts to submit data to the DA layer before giving up. Higher values provide more resilience but can delay error reporting."` + Address string `mapstructure:"address" yaml:"address" comment:"Address of the data availability layer service (host:port). This is the endpoint where Rollkit will connect to submit and retrieve data."` + AuthToken string `mapstructure:"auth_token" yaml:"auth_token" comment:"Authentication token for the data availability layer service. Required if the DA service needs authentication."` + SubmitOptions string `mapstructure:"submit_options" yaml:"submit_options" comment:"Additional options passed to the DA layer when submitting data. Format depends on the specific DA implementation being used."` + SigningAddresses []string `mapstructure:"signing_addresses" yaml:"signing_addresses" comment:"List of addresses to use for DA submissions. When multiple addresses are provided, they will be used in round-robin fashion to prevent sequence mismatches. Useful for high-throughput chains."` + Namespace string `mapstructure:"namespace" yaml:"namespace" comment:"Namespace ID used when submitting blobs to the DA layer. When a DataNamespace is provided, only the header is sent to this namespace."` + DataNamespace string `mapstructure:"data_namespace" yaml:"data_namespace" comment:"Namespace ID for submitting data to DA layer. Use this to speed-up light clients."` + ForcedInclusionNamespace string `mapstructure:"forced_inclusion_namespace" yaml:"forced_inclusion_namespace" comment:"Namespace ID for forced inclusion transactions on the DA layer."` + BlockTime DurationWrapper `mapstructure:"block_time" yaml:"block_time" comment:"Average block time of the DA chain (duration). Determines frequency of DA layer syncing, maximum backoff time for retries, and is multiplied by MempoolTTL to calculate transaction expiration. Examples: \"15s\", \"30s\", \"1m\", \"2m30s\", \"10m\"."` + MempoolTTL uint64 `mapstructure:"mempool_ttl" yaml:"mempool_ttl" comment:"Number of DA blocks after which a transaction is considered expired and dropped from the mempool. Controls retry backoff timing."` + MaxSubmitAttempts int `mapstructure:"max_submit_attempts" yaml:"max_submit_attempts" comment:"Maximum number of attempts to submit data to the DA layer before giving up. Higher values provide more resilience but can delay error reporting."` } // GetNamespace returns the namespace for header submissions. @@ -180,11 +185,17 @@ func (d *DAConfig) GetDataNamespace() string { return d.GetNamespace() } +// GetForcedInclusionNamespace returns the namespace for forced inclusion transactions +func (d *DAConfig) GetForcedInclusionNamespace() string { + return d.ForcedInclusionNamespace +} + // NodeConfig contains all Rollkit specific configuration parameters type NodeConfig struct { // Node mode configuration - Aggregator bool `yaml:"aggregator" comment:"Run node in aggregator mode"` - Light bool `yaml:"light" comment:"Run node in light mode"` + Aggregator bool `yaml:"aggregator" comment:"Run node in aggregator mode"` + BasedSequencer bool `yaml:"based_sequencer" comment:"Run node with based sequencer (fetches transactions only from DA forced inclusion namespace). Requires aggregator mode to be enabled."` + Light bool `yaml:"light" comment:"Run node in light mode"` // Block management configuration BlockTime DurationWrapper `mapstructure:"block_time" yaml:"block_time" comment:"Block time (duration). Examples: \"500ms\", \"1s\", \"5s\", \"1m\", \"2m30s\", \"10m\"."` @@ -239,6 +250,11 @@ func (c *Config) Validate() error { return fmt.Errorf("could not create directory %q: %w", fullDir, err) } + // Validate based sequencer requires aggregator mode + if c.Node.BasedSequencer && !c.Node.Aggregator { + return fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") + } + // Validate namespaces if err := validateNamespace(c.DA.GetNamespace()); err != nil { return fmt.Errorf("could not validate namespace (%s): %w", c.DA.GetNamespace(), err) @@ -250,6 +266,14 @@ func (c *Config) Validate() error { } } + if len(c.DA.GetForcedInclusionNamespace()) > 0 { + // if err := validateNamespace(c.DA.GetForcedInclusionNamespace()); err != nil { + // return fmt.Errorf("could not validate forced inclusion namespace (%s): %w", c.DA.GetForcedInclusionNamespace(), err) + // } + return fmt.Errorf("forced inclusion is not yet live") + + } + // Validate lazy mode configuration if c.Node.LazyMode && c.Node.LazyBlockInterval.Duration <= c.Node.BlockTime.Duration { return fmt.Errorf("LazyBlockInterval (%v) must be greater than BlockTime (%v) in lazy mode", @@ -306,8 +330,9 @@ func AddFlags(cmd *cobra.Command) { cmd.Flags().Bool(FlagClearCache, def.ClearCache, "clear the cache") // Node configuration flags - cmd.Flags().Bool(FlagAggregator, def.Node.Aggregator, "run node in aggregator mode") - cmd.Flags().Bool(FlagLight, def.Node.Light, "run light client") + cmd.Flags().Bool(FlagAggregator, def.Node.Aggregator, "run node as an aggregator") + cmd.Flags().Bool(FlagBasedSequencer, def.Node.BasedSequencer, "run node with based sequencer (requires aggregator mode)") + cmd.Flags().Bool(FlagLight, def.Node.Light, "run node in light mode") cmd.Flags().Duration(FlagBlockTime, def.Node.BlockTime.Duration, "block time (for aggregator mode)") cmd.Flags().String(FlagTrustedHash, def.Node.TrustedHash, "initial trusted hash to start the header exchange service") cmd.Flags().Bool(FlagLazyAggregator, def.Node.LazyMode, "produce blocks only when transactions are available or after lazy block time") @@ -322,6 +347,7 @@ func AddFlags(cmd *cobra.Command) { cmd.Flags().Duration(FlagDABlockTime, def.DA.BlockTime.Duration, "DA chain block time (for syncing)") cmd.Flags().String(FlagDANamespace, def.DA.Namespace, "DA namespace for header (or blob) submissions") cmd.Flags().String(FlagDADataNamespace, def.DA.DataNamespace, "DA namespace for data submissions") + cmd.Flags().String(FlagDAForcedInclusionNamespace, def.DA.ForcedInclusionNamespace, "DA namespace for forced inclusion transactions") cmd.Flags().String(FlagDASubmitOptions, def.DA.SubmitOptions, "DA submit options") cmd.Flags().StringSlice(FlagDASigningAddresses, def.DA.SigningAddresses, "Comma-separated list of addresses for DA submissions (used in round-robin)") cmd.Flags().Uint64(FlagDAMempoolTTL, def.DA.MempoolTTL, "number of DA blocks until transaction is dropped from the mempool") @@ -349,6 +375,9 @@ func AddFlags(cmd *cobra.Command) { cmd.Flags().String(FlagSignerType, def.Signer.SignerType, "type of signer to use (file, grpc)") cmd.Flags().String(FlagSignerPath, def.Signer.SignerPath, "path to the signer file or address") cmd.Flags().String(FlagSignerPassphraseFile, "", "path to file containing the signer passphrase (required for file signer and if aggregator is enabled)") + + // flag constraints + cmd.MarkFlagsMutuallyExclusive(FlagLight, FlagAggregator) } // Load loads the node configuration in the following order of precedence: diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 206d6040bd..bfbf11329f 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -51,9 +51,11 @@ func TestAddFlags(t *testing.T) { // Test specific flags assertFlagValue(t, flags, FlagDBPath, DefaultConfig().DBPath) + assertFlagValue(t, flags, FlagClearCache, DefaultConfig().ClearCache) // Node flags assertFlagValue(t, flags, FlagAggregator, DefaultConfig().Node.Aggregator) + assertFlagValue(t, flags, FlagBasedSequencer, DefaultConfig().Node.BasedSequencer) assertFlagValue(t, flags, FlagLight, DefaultConfig().Node.Light) assertFlagValue(t, flags, FlagBlockTime, DefaultConfig().Node.BlockTime.Duration) assertFlagValue(t, flags, FlagTrustedHash, DefaultConfig().Node.TrustedHash) @@ -68,6 +70,8 @@ func TestAddFlags(t *testing.T) { assertFlagValue(t, flags, FlagDAAuthToken, DefaultConfig().DA.AuthToken) assertFlagValue(t, flags, FlagDABlockTime, DefaultConfig().DA.BlockTime.Duration) assertFlagValue(t, flags, FlagDANamespace, DefaultConfig().DA.Namespace) + assertFlagValue(t, flags, FlagDADataNamespace, DefaultConfig().DA.DataNamespace) + assertFlagValue(t, flags, FlagDAForcedInclusionNamespace, DefaultConfig().DA.ForcedInclusionNamespace) assertFlagValue(t, flags, FlagDASubmitOptions, DefaultConfig().DA.SubmitOptions) assertFlagValue(t, flags, FlagDASigningAddresses, DefaultConfig().DA.SigningAddresses) assertFlagValue(t, flags, FlagDAMempoolTTL, DefaultConfig().DA.MempoolTTL) @@ -91,6 +95,7 @@ func TestAddFlags(t *testing.T) { assertFlagValue(t, persistentFlags, FlagLogLevel, DefaultConfig().Log.Level) assertFlagValue(t, persistentFlags, FlagLogFormat, "text") assertFlagValue(t, persistentFlags, FlagLogTrace, false) + assertFlagValue(t, persistentFlags, FlagRootDir, DefaultRootDirWithName("test")) // Signer flags assertFlagValue(t, flags, FlagSignerPassphraseFile, "") @@ -99,9 +104,10 @@ func TestAddFlags(t *testing.T) { // RPC flags assertFlagValue(t, flags, FlagRPCAddress, DefaultConfig().RPC.Address) + assertFlagValue(t, flags, FlagRPCEnableDAVisualization, DefaultConfig().RPC.EnableDAVisualization) // Count the number of flags we're explicitly checking - expectedFlagCount := 38 // Update this number if you add more flag checks above + expectedFlagCount := 44 // Update this number if you add more flag checks above // Get the actual number of flags (both regular and persistent) actualFlagCount := 0 @@ -370,3 +376,57 @@ func assertFlagValue(t *testing.T, flags *pflag.FlagSet, name string, expectedVa } } } + +func TestBasedSequencerValidation(t *testing.T) { + tests := []struct { + name string + aggregator bool + basedSeq bool + expectError bool + errorMsg string + }{ + { + name: "based sequencer without aggregator should fail", + aggregator: false, + basedSeq: true, + expectError: true, + errorMsg: "based sequencer mode requires aggregator mode to be enabled", + }, + { + name: "based sequencer with aggregator should pass", + aggregator: true, + basedSeq: true, + expectError: false, + }, + { + name: "aggregator without based sequencer should pass", + aggregator: true, + basedSeq: false, + expectError: false, + }, + { + name: "neither aggregator nor based sequencer should pass", + aggregator: false, + basedSeq: false, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := DefaultConfig() + cfg.RootDir = t.TempDir() + cfg.Node.Aggregator = tt.aggregator + cfg.Node.BasedSequencer = tt.basedSeq + + err := cfg.Validate() + + if tt.expectError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/pkg/config/defaults.go b/pkg/config/defaults.go index 3410547531..3e2010982d 100644 --- a/pkg/config/defaults.go +++ b/pkg/config/defaults.go @@ -71,11 +71,12 @@ func DefaultConfig() Config { ReadinessMaxBlocksBehind: calculateReadinessMaxBlocksBehind(defaultBlockTime.Duration, defaultReadinessWindowSeconds), }, DA: DAConfig{ - Address: "http://localhost:7980", - BlockTime: DurationWrapper{6 * time.Second}, - MaxSubmitAttempts: 30, - Namespace: randString(10), - DataNamespace: "", + Address: "http://localhost:7980", + BlockTime: DurationWrapper{6 * time.Second}, + MaxSubmitAttempts: 30, + Namespace: randString(10), + DataNamespace: "", + ForcedInclusionNamespace: "", }, Instrumentation: DefaultInstrumentationConfig(), Log: LogConfig{ diff --git a/pkg/genesis/genesis.go b/pkg/genesis/genesis.go index 1fae146431..65cbed1737 100644 --- a/pkg/genesis/genesis.go +++ b/pkg/genesis/genesis.go @@ -11,11 +11,12 @@ const ChainIDFlag = "chain_id" // This genesis struct only contains the fields required by evolve. // The app state or other fields are not included here. type Genesis struct { - ChainID string `json:"chain_id"` - StartTime time.Time `json:"start_time"` - InitialHeight uint64 `json:"initial_height"` - ProposerAddress []byte `json:"proposer_address"` - DAStartHeight uint64 `json:"da_start_height"` + ChainID string `json:"chain_id"` + StartTime time.Time `json:"start_time"` + InitialHeight uint64 `json:"initial_height"` + ProposerAddress []byte `json:"proposer_address"` + DAStartHeight uint64 `json:"da_start_height"` + DAEpochForcedInclusion uint64 `json:"da_epoch_forced_inclusion"` } // NewGenesis creates a new Genesis instance. @@ -26,11 +27,12 @@ func NewGenesis( proposerAddress []byte, ) Genesis { genesis := Genesis{ - ChainID: chainID, - StartTime: startTime, - InitialHeight: initialHeight, - ProposerAddress: proposerAddress, - DAStartHeight: 0, + ChainID: chainID, + StartTime: startTime, + InitialHeight: initialHeight, + ProposerAddress: proposerAddress, + DAStartHeight: 0, + DAEpochForcedInclusion: 50, // Default epoch size } return genesis @@ -54,5 +56,9 @@ func (g Genesis) Validate() error { return fmt.Errorf("proposer_address cannot be nil") } + if g.DAEpochForcedInclusion < 1 { + return fmt.Errorf("da_epoch_forced_inclusion must be at least 1, got %d", g.DAEpochForcedInclusion) + } + return nil } diff --git a/pkg/genesis/genesis_test.go b/pkg/genesis/genesis_test.go index a5c1d280db..da3cc14b1f 100644 --- a/pkg/genesis/genesis_test.go +++ b/pkg/genesis/genesis_test.go @@ -72,50 +72,55 @@ func TestGenesis_Validate(t *testing.T) { { name: "valid genesis - chain ID can contain any character", genesis: Genesis{ - ChainID: "test@chain#123!", - StartTime: validTime, - InitialHeight: 1, - ProposerAddress: []byte("proposer"), + ChainID: "test@chain#123!", + StartTime: validTime, + InitialHeight: 1, + ProposerAddress: []byte("proposer"), + DAEpochForcedInclusion: 1, }, wantErr: false, }, { name: "invalid - empty chain_id", genesis: Genesis{ - ChainID: "", - StartTime: validTime, - InitialHeight: 1, - ProposerAddress: []byte("proposer"), + ChainID: "", + StartTime: validTime, + InitialHeight: 1, + ProposerAddress: []byte("proposer"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, { name: "invalid - zero initial height", genesis: Genesis{ - ChainID: "test-chain", - StartTime: validTime, - InitialHeight: 0, - ProposerAddress: []byte("proposer"), + ChainID: "test-chain", + StartTime: validTime, + InitialHeight: 0, + ProposerAddress: []byte("proposer"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, { name: "invalid - zero time DA start height", genesis: Genesis{ - ChainID: "test-chain", - StartTime: time.Time{}, - InitialHeight: 1, - ProposerAddress: []byte("proposer"), + ChainID: "test-chain", + StartTime: time.Time{}, + InitialHeight: 1, + ProposerAddress: []byte("proposer"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, { name: "invalid - nil proposer address", genesis: Genesis{ - ChainID: "test-chain", - StartTime: validTime, - InitialHeight: 1, - ProposerAddress: nil, + ChainID: "test-chain", + StartTime: validTime, + InitialHeight: 1, + ProposerAddress: nil, + DAEpochForcedInclusion: 1, }, wantErr: true, }, diff --git a/pkg/genesis/io_test.go b/pkg/genesis/io_test.go index fb6f223070..7c8b882a6f 100644 --- a/pkg/genesis/io_test.go +++ b/pkg/genesis/io_test.go @@ -30,40 +30,44 @@ func TestLoadAndSaveGenesis(t *testing.T) { { name: "valid genesis", genesis: Genesis{ - ChainID: "test-chain-1", - InitialHeight: 1, - StartTime: validTime, - ProposerAddress: []byte("proposer-address"), + ChainID: "test-chain-1", + InitialHeight: 1, + StartTime: validTime, + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, }, wantErr: false, }, { name: "valid genesis - minimal", genesis: Genesis{ - ChainID: "test-chain-2", - InitialHeight: 1, - StartTime: validTime, - ProposerAddress: []byte("proposer-address"), + ChainID: "test-chain-2", + InitialHeight: 1, + StartTime: validTime, + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, }, wantErr: false, }, { name: "invalid genesis - empty chain ID", genesis: Genesis{ - ChainID: "", - InitialHeight: 1, - StartTime: validTime, - ProposerAddress: []byte("proposer-address"), + ChainID: "", + InitialHeight: 1, + StartTime: validTime, + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, { name: "invalid genesis - zero initial height", genesis: Genesis{ - ChainID: "test-chain", - InitialHeight: 0, - StartTime: validTime, - ProposerAddress: []byte("proposer-address"), + ChainID: "test-chain", + InitialHeight: 0, + StartTime: validTime, + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, }, wantErr: true, }, @@ -177,10 +181,11 @@ func TestSaveGenesis_InvalidPath(t *testing.T) { } genesis := Genesis{ - ChainID: "test-chain", - InitialHeight: 1, - StartTime: time.Now().UTC(), - ProposerAddress: []byte("proposer-address"), + ChainID: "test-chain", + InitialHeight: 1, + StartTime: time.Now().UTC(), + ProposerAddress: []byte("proposer-address"), + DAEpochForcedInclusion: 1, } err := genesis.Save(tc.path) diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go new file mode 100644 index 0000000000..c0b8054e08 --- /dev/null +++ b/sequencers/based/sequencer.go @@ -0,0 +1,226 @@ +package based + +import ( + "context" + "errors" + "sync/atomic" + "time" + + "github.com/rs/zerolog" + + "github.com/evstack/ev-node/block" + coreda "github.com/evstack/ev-node/core/da" + coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" + seqcommon "github.com/evstack/ev-node/sequencers/common" +) + +// ForcedInclusionRetriever defines the interface for retrieving forced inclusion transactions from DA +type ForcedInclusionRetriever interface { + RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*block.ForcedInclusionEvent, error) +} + +var _ coresequencer.Sequencer = (*BasedSequencer)(nil) + +// BasedSequencer is a sequencer that only retrieves transactions from the DA layer +// via the forced inclusion mechanism. It does not accept transactions from the reaper. +type BasedSequencer struct { + fiRetriever ForcedInclusionRetriever + da coreda.DA + config config.Config + genesis genesis.Genesis + logger zerolog.Logger + + daHeight atomic.Uint64 + txQueue atomic.Pointer[[][]byte] +} + +// NewBasedSequencer creates a new based sequencer instance +func NewBasedSequencer( + fiRetriever ForcedInclusionRetriever, + da coreda.DA, + config config.Config, + genesis genesis.Genesis, + logger zerolog.Logger, +) *BasedSequencer { + bs := &BasedSequencer{ + fiRetriever: fiRetriever, + da: da, + config: config, + genesis: genesis, + logger: logger.With().Str("component", "based_sequencer").Logger(), + } + bs.daHeight.Store(genesis.DAStartHeight) + initialQueue := make([][]byte, 0) + bs.txQueue.Store(&initialQueue) + return bs +} + +// SubmitBatchTxs does nothing for a based sequencer as it only pulls from DA +// This satisfies the Sequencer interface but transactions submitted here are ignored +func (s *BasedSequencer) SubmitBatchTxs(ctx context.Context, req coresequencer.SubmitBatchTxsRequest) (*coresequencer.SubmitBatchTxsResponse, error) { + s.logger.Debug().Msg("based sequencer ignores submitted transactions - only DA transactions are processed") + return &coresequencer.SubmitBatchTxsResponse{}, nil +} + +// GetNextBatch retrieves the next batch of transactions from the DA layer +// It fetches forced inclusion transactions and returns them as the next batch +func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextBatchRequest) (*coresequencer.GetNextBatchResponse, error) { + // If we have transactions in the queue, return them first + queuePtr := s.txQueue.Load() + queue := *queuePtr + if len(queue) > 0 { + batch := s.createBatchFromQueue(req.MaxBytes) + if len(batch.Transactions) > 0 { + s.logger.Debug(). + Int("tx_count", len(batch.Transactions)). + Int("remaining", len(*s.txQueue.Load())). + Msg("returning batch from queue") + return &coresequencer.GetNextBatchResponse{ + Batch: batch, + Timestamp: time.Now(), + BatchData: req.LastBatchData, + }, nil + } + } + + // Fetch forced inclusion transactions from DA + currentDAHeight := s.daHeight.Load() + s.logger.Debug().Uint64("da_height", currentDAHeight).Msg("fetching forced inclusion transactions from DA") + + forcedTxsEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, currentDAHeight) + if err != nil { + // Check if forced inclusion is not configured + if errors.Is(err, block.ErrForceInclusionNotConfigured) { + s.logger.Error().Msg("forced inclusion not configured, returning empty batch") + return &coresequencer.GetNextBatchResponse{ + Batch: &coresequencer.Batch{Transactions: nil}, + Timestamp: time.Now(), + BatchData: req.LastBatchData, + }, nil + } else if errors.Is(err, coreda.ErrHeightFromFuture) { + // If we get a height from future error, keep the current DA height and return batch + // We'll retry the same height on the next call until DA produces that block + s.logger.Debug(). + Uint64("da_height", currentDAHeight). + Msg("DA height from future, waiting for DA to produce block") + return &coresequencer.GetNextBatchResponse{ + Batch: &coresequencer.Batch{Transactions: nil}, + Timestamp: time.Now(), + BatchData: req.LastBatchData, + }, nil + } + + s.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") + return nil, err + } + + // Update DA height based on the retrieved event + if forcedTxsEvent.EndDaHeight > currentDAHeight { + s.SetDAHeight(forcedTxsEvent.EndDaHeight) + } else if forcedTxsEvent.StartDaHeight > currentDAHeight { + s.SetDAHeight(forcedTxsEvent.StartDaHeight) + } + + // Add forced inclusion transactions to the queue with validation + validTxs := 0 + skippedTxs := 0 + for _, tx := range forcedTxsEvent.Txs { + // Validate blob size against absolute maximum + if !seqcommon.ValidateBlobSize(tx) { + s.logger.Warn(). + Uint64("da_height", forcedTxsEvent.StartDaHeight). + Int("blob_size", len(tx)). + Msg("forced inclusion blob exceeds absolute maximum size - skipping") + skippedTxs++ + continue + } + + // Add to queue atomically + for { + oldQueuePtr := s.txQueue.Load() + oldQueue := *oldQueuePtr + newQueue := append(oldQueue, tx) + if s.txQueue.CompareAndSwap(oldQueuePtr, &newQueue) { + validTxs++ + break + } + } + } + + s.logger.Info(). + Int("valid_tx_count", validTxs). + Int("skipped_tx_count", skippedTxs). + Int("queue_size", len(*s.txQueue.Load())). + Uint64("da_height_start", forcedTxsEvent.StartDaHeight). + Uint64("da_height_end", forcedTxsEvent.EndDaHeight). + Msg("processed forced inclusion transactions from DA") + + batch := s.createBatchFromQueue(req.MaxBytes) + + return &coresequencer.GetNextBatchResponse{ + Batch: batch, + Timestamp: time.Now(), + BatchData: req.LastBatchData, + }, nil +} + +// createBatchFromQueue creates a batch from the transaction queue respecting MaxBytes +func (s *BasedSequencer) createBatchFromQueue(maxBytes uint64) *coresequencer.Batch { + for { + queuePtr := s.txQueue.Load() + queue := *queuePtr + if len(queue) == 0 { + return &coresequencer.Batch{Transactions: nil} + } + + var batch [][]byte + var totalBytes uint64 + var remaining [][]byte + + for i, tx := range queue { + txSize := uint64(len(tx)) + // Always respect maxBytes, even for the first transaction + if totalBytes+txSize > maxBytes { + // Would exceed max bytes, stop here + remaining = queue[i:] + break + } + + batch = append(batch, tx) + totalBytes += txSize + + // If this is the last transaction, clear the queue + if i == len(queue)-1 { + remaining = nil + } + } + + // Try to update queue atomically + if s.txQueue.CompareAndSwap(queuePtr, &remaining) { + return &coresequencer.Batch{Transactions: batch} + } + // If CAS failed, retry with new queue state + } +} + +// VerifyBatch verifies a batch of transactions +// For a based sequencer, we always return true as all transactions come from DA +func (s *BasedSequencer) VerifyBatch(ctx context.Context, req coresequencer.VerifyBatchRequest) (*coresequencer.VerifyBatchResponse, error) { + return &coresequencer.VerifyBatchResponse{ + Status: true, + }, nil +} + +// SetDAHeight sets the current DA height for the sequencer +// This should be called when the sequencer needs to sync to a specific DA height +func (s *BasedSequencer) SetDAHeight(height uint64) { + s.daHeight.Store(height) + s.logger.Debug().Uint64("da_height", height).Msg("DA height updated") +} + +// GetDAHeight returns the current DA height +func (s *BasedSequencer) GetDAHeight() uint64 { + return s.daHeight.Load() +} diff --git a/sequencers/based/sequencer_test.go b/sequencers/based/sequencer_test.go new file mode 100644 index 0000000000..e0a4182488 --- /dev/null +++ b/sequencers/based/sequencer_test.go @@ -0,0 +1,605 @@ +package based + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/evstack/ev-node/block" + coreda "github.com/evstack/ev-node/core/da" + coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" +) + +// MockDA is a mock implementation of DA for testing +type MockDA struct { + mock.Mock +} + +func (m *MockDA) Submit(ctx context.Context, blobs [][]byte, gasPrice float64, namespace []byte) ([][]byte, error) { + args := m.Called(ctx, blobs, gasPrice, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func (m *MockDA) SubmitWithOptions(ctx context.Context, blobs [][]byte, gasPrice float64, namespace []byte, options []byte) ([][]byte, error) { + args := m.Called(ctx, blobs, gasPrice, namespace, options) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func (m *MockDA) GetIDs(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { + args := m.Called(ctx, height, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*coreda.GetIDsResult), args.Error(1) +} + +func (m *MockDA) Get(ctx context.Context, ids [][]byte, namespace []byte) ([][]byte, error) { + args := m.Called(ctx, ids, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func (m *MockDA) GetProofs(ctx context.Context, ids [][]byte, namespace []byte) ([]coreda.Proof, error) { + args := m.Called(ctx, ids, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]coreda.Proof), args.Error(1) +} + +func (m *MockDA) Validate(ctx context.Context, ids [][]byte, proofs []coreda.Proof, namespace []byte) ([]bool, error) { + args := m.Called(ctx, ids, proofs, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]bool), args.Error(1) +} + +func (m *MockDA) Commit(ctx context.Context, blobs [][]byte, namespace []byte) ([][]byte, error) { + args := m.Called(ctx, blobs, namespace) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([][]byte), args.Error(1) +} + +func TestNewBasedSequencer(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 10, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + require.NotNil(t, seq) + assert.Equal(t, uint64(100), seq.daHeight.Load()) + assert.Equal(t, 0, len(*seq.txQueue.Load())) +} + +func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAEpochForcedInclusion: 10, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // Submit should succeed but be ignored + req := coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{ + Transactions: [][]byte{[]byte("tx1"), []byte("tx2")}, + }, + } + + resp, err := seq.SubmitBatchTxs(context.Background(), req) + + require.NoError(t, err) + require.NotNil(t, resp) + // Transactions should not be added to queue for based sequencer + assert.Equal(t, 0, len(*seq.txQueue.Load())) +} + +func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { + testBlobs := [][]byte{[]byte("tx1"), []byte("tx2")} + + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1"), []byte("id2")}, + Timestamp: time.Now(), + }, nil) + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs, nil) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 2, len(resp.Batch.Transactions)) + assert.Equal(t, []byte("tx1"), resp.Batch.Transactions[0]) + assert.Equal(t, []byte("tx2"), resp.Batch.Transactions[1]) + + // DA height should be updated + assert.Equal(t, uint64(100), seq.GetDAHeight()) + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_EmptyDA(t *testing.T) { + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 0, len(resp.Batch.Transactions)) + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_NotConfigured(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + // Create config without forced inclusion namespace + cfgNoFI := config.DefaultConfig() + cfgNoFI.DA.ForcedInclusionNamespace = "" + daClient := block.NewDAClient(mockDA, cfgNoFI, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfgNoFI, gen, zerolog.Nop()) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 0, len(resp.Batch.Transactions)) +} + +func TestBasedSequencer_GetNextBatch_HeightFromFuture(t *testing.T) { + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrHeightFromFuture) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 0, len(resp.Batch.Transactions)) + + // DA height should remain the same + assert.Equal(t, uint64(100), seq.GetDAHeight()) + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { + testBlobs := [][]byte{ + make([]byte, 50), // 50 bytes + make([]byte, 60), // 60 bytes + make([]byte, 100), // 100 bytes + } + + mockDA := new(MockDA) + // First call returns forced txs + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1"), []byte("id2"), []byte("id3")}, + Timestamp: time.Now(), + }, nil).Once() + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs, nil).Once() + + // Subsequent calls should return no new forced txs + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // First call with max 100 bytes - should get first 2 txs (50 + 60 = 110, but logic allows if batch has content) + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 100, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + // Should get first tx (50 bytes), second tx would exceed limit (50+60=110 > 100) + assert.Equal(t, 1, len(resp.Batch.Transactions)) + assert.Equal(t, 2, len(*seq.txQueue.Load())) // 2 remaining in queue + + // Second call should get next tx from queue + resp2, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp2) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions)) + assert.Equal(t, 1, len(*seq.txQueue.Load())) // 1 remaining in queue + + // Third call with larger maxBytes to get the 100-byte tx + req3 := coresequencer.GetNextBatchRequest{ + MaxBytes: 200, + LastBatchData: nil, + } + resp3, err := seq.GetNextBatch(context.Background(), req3) + require.NoError(t, err) + require.NotNil(t, resp3) + require.NotNil(t, resp3.Batch) + assert.Equal(t, 1, len(resp3.Batch.Transactions)) + assert.Equal(t, 0, len(*seq.txQueue.Load())) // Queue should be empty + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_FromQueue(t *testing.T) { + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, mock.Anything, mock.Anything).Return(nil, coreda.ErrBlobNotFound) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // Pre-populate the queue + preQueue := [][]byte{[]byte("queued_tx1"), []byte("queued_tx2")} + seq.txQueue.Store(&preQueue) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 2, len(resp.Batch.Transactions)) + assert.Equal(t, []byte("queued_tx1"), resp.Batch.Transactions[0]) + assert.Equal(t, []byte("queued_tx2"), resp.Batch.Transactions[1]) + + // Queue should be empty now + assert.Equal(t, 0, len(*seq.txQueue.Load())) +} + +func TestBasedSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) { + mockDA := new(MockDA) + + // First call: return a forced tx that will be added to queue + forcedTx := make([]byte, 150) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1")}, + Timestamp: time.Now(), + }, nil).Once() + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return([][]byte{forcedTx}, nil).Once() + + // Second call: no new DA calls expected, should retrieve from queue + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // First call with maxBytes = 100 + // Forced tx (150 bytes) is added to queue, but batch will be empty since it exceeds maxBytes + req1 := coresequencer.GetNextBatchRequest{ + MaxBytes: 100, + LastBatchData: nil, + } + + resp1, err := seq.GetNextBatch(context.Background(), req1) + require.NoError(t, err) + require.NotNil(t, resp1) + require.NotNil(t, resp1.Batch) + assert.Equal(t, 0, len(resp1.Batch.Transactions), "Should have no txs as forced tx exceeds maxBytes") + + // Verify forced tx is in queue + assert.Equal(t, 1, len(*seq.txQueue.Load()), "Forced tx should be in queue") + + // Second call with larger maxBytes = 200 + // Should process tx from queue + req2 := coresequencer.GetNextBatchRequest{ + MaxBytes: 200, + LastBatchData: nil, + } + + resp2, err := seq.GetNextBatch(context.Background(), req2) + require.NoError(t, err) + require.NotNil(t, resp2) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include tx from queue") + assert.Equal(t, 150, len(resp2.Batch.Transactions[0])) + + // Queue should now be empty + assert.Equal(t, 0, len(*seq.txQueue.Load()), "Queue should be empty") + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_ForcedInclusionExceedsMaxBytes(t *testing.T) { + mockDA := new(MockDA) + + // Return forced txs where combined they exceed maxBytes + forcedTx1 := make([]byte, 100) + forcedTx2 := make([]byte, 80) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1"), []byte("id2")}, + Timestamp: time.Now(), + }, nil).Once() + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return([][]byte{forcedTx1, forcedTx2}, nil).Once() + + // Second call: no new DA calls expected, should retrieve from queue + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + // First call with maxBytes = 120 + // Should get only first forced tx (100 bytes), second stays in queue + req1 := coresequencer.GetNextBatchRequest{ + MaxBytes: 120, + LastBatchData: nil, + } + + resp1, err := seq.GetNextBatch(context.Background(), req1) + require.NoError(t, err) + require.NotNil(t, resp1) + require.NotNil(t, resp1.Batch) + assert.Equal(t, 1, len(resp1.Batch.Transactions), "Should only include first forced tx") + assert.Equal(t, 100, len(resp1.Batch.Transactions[0])) + + // Verify second tx is still in queue + assert.Equal(t, 1, len(*seq.txQueue.Load()), "Second tx should be in queue") + + // Second call - should get the second tx from queue + req2 := coresequencer.GetNextBatchRequest{ + MaxBytes: 120, + LastBatchData: nil, + } + + resp2, err := seq.GetNextBatch(context.Background(), req2) + require.NoError(t, err) + require.NotNil(t, resp2) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include second tx from queue") + assert.Equal(t, 80, len(resp2.Batch.Transactions[0])) + + // Queue should now be empty + assert.Equal(t, 0, len(*seq.txQueue.Load()), "Queue should be empty") + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_VerifyBatch(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.VerifyBatchRequest{ + Id: []byte("test-chain"), + BatchData: [][]byte{[]byte("tx1")}, + } + + resp, err := seq.VerifyBatch(context.Background(), req) + require.NoError(t, err) + assert.True(t, resp.Status) +} + +func TestBasedSequencer_SetDAHeight(t *testing.T) { + mockDA := new(MockDA) + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + assert.Equal(t, uint64(100), seq.GetDAHeight()) + + seq.SetDAHeight(200) + assert.Equal(t, uint64(200), seq.GetDAHeight()) +} + +func TestBasedSequencer_GetNextBatch_ErrorHandling(t *testing.T) { + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, errors.New("DA connection error")) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + _, err := seq.GetNextBatch(context.Background(), req) + require.Error(t, err) + + mockDA.AssertExpectations(t) +} diff --git a/sequencers/common/size_validation.go b/sequencers/common/size_validation.go new file mode 100644 index 0000000000..1032f5299f --- /dev/null +++ b/sequencers/common/size_validation.go @@ -0,0 +1,27 @@ +package common + +// TODO(@julienrbrt): technically we may need to check for block gas as well + +const ( + // AbsoluteMaxBlobSize is the absolute maximum size for a single blob (DA layer limit). + // Blobs exceeding this size are invalid and should be rejected permanently. + AbsoluteMaxBlobSize = 1.5 * 1024 * 1024 // 1.5MB +) + +// ValidateBlobSize checks if a single blob exceeds the absolute maximum allowed size. +// This checks against the DA layer limit, not the per-batch limit. +// Returns true if the blob is within the absolute size limit, false otherwise. +func ValidateBlobSize(blob []byte) bool { + return uint64(len(blob)) <= AbsoluteMaxBlobSize +} + +// WouldExceedCumulativeSize checks if adding a blob would exceed the cumulative size limit for a batch. +// Returns true if adding the blob would exceed the limit, false otherwise. +func WouldExceedCumulativeSize(currentSize int, blobSize int, maxBytes uint64) bool { + return uint64(currentSize)+uint64(blobSize) > maxBytes +} + +// GetBlobSize returns the size of a blob in bytes. +func GetBlobSize(blob []byte) int { + return len(blob) +} diff --git a/sequencers/common/size_validation_test.go b/sequencers/common/size_validation_test.go new file mode 100644 index 0000000000..103c66d8be --- /dev/null +++ b/sequencers/common/size_validation_test.go @@ -0,0 +1,141 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValidateBlobSize(t *testing.T) { + tests := []struct { + name string + blobSize int + want bool + }{ + { + name: "empty blob", + blobSize: 0, + want: true, + }, + { + name: "small blob", + blobSize: 100, + want: true, + }, + { + name: "exactly at limit", + blobSize: int(AbsoluteMaxBlobSize), + want: true, + }, + { + name: "one byte over limit", + blobSize: int(AbsoluteMaxBlobSize) + 1, + want: false, + }, + { + name: "far exceeds limit", + blobSize: int(AbsoluteMaxBlobSize) * 2, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + blob := make([]byte, tt.blobSize) + got := ValidateBlobSize(blob) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestWouldExceedCumulativeSize(t *testing.T) { + tests := []struct { + name string + currentSize int + blobSize int + maxBytes uint64 + want bool + }{ + { + name: "empty batch, small blob", + currentSize: 0, + blobSize: 50, + maxBytes: 100, + want: false, + }, + { + name: "would fit exactly", + currentSize: 50, + blobSize: 50, + maxBytes: 100, + want: false, + }, + { + name: "would exceed by one byte", + currentSize: 50, + blobSize: 51, + maxBytes: 100, + want: true, + }, + { + name: "far exceeds", + currentSize: 80, + blobSize: 100, + maxBytes: 100, + want: true, + }, + { + name: "zero max bytes", + currentSize: 0, + blobSize: 1, + maxBytes: 0, + want: true, + }, + { + name: "current already at limit", + currentSize: 100, + blobSize: 1, + maxBytes: 100, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := WouldExceedCumulativeSize(tt.currentSize, tt.blobSize, tt.maxBytes) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestGetBlobSize(t *testing.T) { + tests := []struct { + name string + blobSize int + want int + }{ + { + name: "empty blob", + blobSize: 0, + want: 0, + }, + { + name: "small blob", + blobSize: 42, + want: 42, + }, + { + name: "large blob", + blobSize: 1024 * 1024, + want: 1024 * 1024, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + blob := make([]byte, tt.blobSize) + got := GetBlobSize(blob) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/sequencers/single/queue.go b/sequencers/single/queue.go index dd69c26a2c..d992535ead 100644 --- a/sequencers/single/queue.go +++ b/sequencers/single/queue.go @@ -83,6 +83,26 @@ func (bq *BatchQueue) AddBatch(ctx context.Context, batch coresequencer.Batch) e return nil } +// Prepend adds a batch to the front of the queue (before head position). +// This is used to return transactions that couldn't fit in the current batch. +// The batch is NOT persisted to the DB since these are transactions that were +// already in the queue or were just processed. +func (bq *BatchQueue) Prepend(ctx context.Context, batch coresequencer.Batch) error { + bq.mu.Lock() + defer bq.mu.Unlock() + + // If we have room before head, use it + if bq.head > 0 { + bq.head-- + bq.queue[bq.head] = batch + } else { + // Need to expand the queue at the front + bq.queue = append([]coresequencer.Batch{batch}, bq.queue...) + } + + return nil +} + // Next extracts a batch of transactions from the queue and marks it as processed in the WAL func (bq *BatchQueue) Next(ctx context.Context) (*coresequencer.Batch, error) { bq.mu.Lock() diff --git a/sequencers/single/queue_test.go b/sequencers/single/queue_test.go index 0ede59a90e..b7665ee67f 100644 --- a/sequencers/single/queue_test.go +++ b/sequencers/single/queue_test.go @@ -12,6 +12,7 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" dssync "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" @@ -567,3 +568,156 @@ func TestBatchQueue_QueueLimit_Concurrency(t *testing.T) { t.Logf("Successfully added %d batches, rejected %d due to queue being full", addedCount, errorCount) } + +func TestBatchQueue_Prepend(t *testing.T) { + ctx := context.Background() + db := ds.NewMapDatastore() + + t.Run("prepend to empty queue", func(t *testing.T) { + queue := NewBatchQueue(db, "test-prepend-empty", 0) + err := queue.Load(ctx) + require.NoError(t, err) + + batch := coresequencer.Batch{ + Transactions: [][]byte{[]byte("tx1"), []byte("tx2")}, + } + + err = queue.Prepend(ctx, batch) + require.NoError(t, err) + + assert.Equal(t, 1, queue.Size()) + + // Next should return the prepended batch + nextBatch, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, 2, len(nextBatch.Transactions)) + assert.Equal(t, []byte("tx1"), nextBatch.Transactions[0]) + }) + + t.Run("prepend to queue with items", func(t *testing.T) { + queue := NewBatchQueue(db, "test-prepend-with-items", 0) + err := queue.Load(ctx) + require.NoError(t, err) + + // Add some batches first + batch1 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx1")}} + batch2 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx2")}} + err = queue.AddBatch(ctx, batch1) + require.NoError(t, err) + err = queue.AddBatch(ctx, batch2) + require.NoError(t, err) + + assert.Equal(t, 2, queue.Size()) + + // Prepend a batch + prependedBatch := coresequencer.Batch{Transactions: [][]byte{[]byte("prepended")}} + err = queue.Prepend(ctx, prependedBatch) + require.NoError(t, err) + + assert.Equal(t, 3, queue.Size()) + + // Next should return the prepended batch first + nextBatch, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, 1, len(nextBatch.Transactions)) + assert.Equal(t, []byte("prepended"), nextBatch.Transactions[0]) + + // Then the original batches + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx1"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx2"), nextBatch.Transactions[0]) + }) + + t.Run("prepend after consuming some items", func(t *testing.T) { + queue := NewBatchQueue(db, "test-prepend-after-consume", 0) + err := queue.Load(ctx) + require.NoError(t, err) + + // Add batches + batch1 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx1")}} + batch2 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx2")}} + batch3 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx3")}} + err = queue.AddBatch(ctx, batch1) + require.NoError(t, err) + err = queue.AddBatch(ctx, batch2) + require.NoError(t, err) + err = queue.AddBatch(ctx, batch3) + require.NoError(t, err) + + assert.Equal(t, 3, queue.Size()) + + // Consume first batch + nextBatch, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx1"), nextBatch.Transactions[0]) + assert.Equal(t, 2, queue.Size()) + + // Prepend - should reuse the head position + prependedBatch := coresequencer.Batch{Transactions: [][]byte{[]byte("prepended")}} + err = queue.Prepend(ctx, prependedBatch) + require.NoError(t, err) + + assert.Equal(t, 3, queue.Size()) + + // Should get prepended, then tx2, then tx3 + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("prepended"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx2"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx3"), nextBatch.Transactions[0]) + + assert.Equal(t, 0, queue.Size()) + }) + + t.Run("multiple prepends", func(t *testing.T) { + queue := NewBatchQueue(db, "test-multiple-prepends", 0) + err := queue.Load(ctx) + require.NoError(t, err) + + // Add a batch + batch1 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx1")}} + err = queue.AddBatch(ctx, batch1) + require.NoError(t, err) + + // Prepend multiple batches + prepend1 := coresequencer.Batch{Transactions: [][]byte{[]byte("prepend1")}} + prepend2 := coresequencer.Batch{Transactions: [][]byte{[]byte("prepend2")}} + prepend3 := coresequencer.Batch{Transactions: [][]byte{[]byte("prepend3")}} + + err = queue.Prepend(ctx, prepend1) + require.NoError(t, err) + err = queue.Prepend(ctx, prepend2) + require.NoError(t, err) + err = queue.Prepend(ctx, prepend3) + require.NoError(t, err) + + assert.Equal(t, 4, queue.Size()) + + // Should get in reverse order of prepending (LIFO for prepended items) + nextBatch, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("prepend3"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("prepend2"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("prepend1"), nextBatch.Transactions[0]) + + nextBatch, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx1"), nextBatch.Transactions[0]) + }) +} diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index dbc5bc567c..e97d7a157e 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -5,21 +5,36 @@ import ( "context" "errors" "fmt" + "sync/atomic" "time" ds "github.com/ipfs/go-datastore" "github.com/rs/zerolog" + "github.com/evstack/ev-node/block" coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/genesis" + seqcommon "github.com/evstack/ev-node/sequencers/common" ) -// ErrInvalidId is returned when the chain id is invalid var ( + // ErrInvalidId is returned when the chain id is invalid ErrInvalidId = errors.New("invalid chain id") ) -var _ coresequencer.Sequencer = &Sequencer{} +// ForcedInclusionRetriever defines the interface for retrieving forced inclusion transactions from DA +type ForcedInclusionRetriever interface { + RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*block.ForcedInclusionEvent, error) +} + +// pendingForcedInclusionTx represents a forced inclusion transaction that couldn't fit in the current epoch +type pendingForcedInclusionTx struct { + Data []byte + OriginalHeight uint64 +} + +var _ coresequencer.Sequencer = (*Sequencer)(nil) // Sequencer implements core sequencing interface type Sequencer struct { @@ -35,6 +50,12 @@ type Sequencer struct { queue *BatchQueue // single queue for immediate availability metrics *Metrics + + // Forced inclusion support + fiRetriever ForcedInclusionRetriever + genesis genesis.Genesis + daHeight atomic.Uint64 + pendingForcedInclusionTxs []pendingForcedInclusionTx } // NewSequencer creates a new Single Sequencer @@ -47,31 +68,23 @@ func NewSequencer( batchTime time.Duration, metrics *Metrics, proposer bool, -) (*Sequencer, error) { - return NewSequencerWithQueueSize(ctx, logger, db, da, id, batchTime, metrics, proposer, 1000) -} - -// NewSequencerWithQueueSize creates a new Single Sequencer with configurable queue size -func NewSequencerWithQueueSize( - ctx context.Context, - logger zerolog.Logger, - db ds.Batching, - da coreda.DA, - id []byte, - batchTime time.Duration, - metrics *Metrics, - proposer bool, maxQueueSize int, + fiRetriever ForcedInclusionRetriever, + gen genesis.Genesis, ) (*Sequencer, error) { s := &Sequencer{ - logger: logger, - da: da, - batchTime: batchTime, - Id: id, - queue: NewBatchQueue(db, "batches", maxQueueSize), - metrics: metrics, - proposer: proposer, + logger: logger, + da: da, + batchTime: batchTime, + Id: id, + queue: NewBatchQueue(db, "batches", maxQueueSize), + metrics: metrics, + proposer: proposer, + fiRetriever: fiRetriever, + genesis: gen, + pendingForcedInclusionTxs: make([]pendingForcedInclusionTx, 0), } + s.SetDAHeight(gen.DAStartHeight) // will be overridden by the executor loadCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() @@ -117,14 +130,97 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB return nil, ErrInvalidId } + currentDAHeight := c.daHeight.Load() + + forcedEvent, err := c.fiRetriever.RetrieveForcedIncludedTxs(ctx, currentDAHeight) + if err != nil { + // Continue without forced txs. Add logging for clarity. + + if errors.Is(err, coreda.ErrHeightFromFuture) { + c.logger.Debug(). + Uint64("da_height", currentDAHeight). + Msg("DA height from future, waiting for DA to produce block") + } else if !errors.Is(err, block.ErrForceInclusionNotConfigured) { + c.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") + } + + // Still create an empty forced inclusion event + forcedEvent = &block.ForcedInclusionEvent{ + Txs: [][]byte{}, + StartDaHeight: currentDAHeight, + EndDaHeight: currentDAHeight, + } + } + + // Always try to process forced inclusion transactions (including pending from previous epochs) + forcedTxs := c.processForcedInclusionTxs(forcedEvent, req.MaxBytes) + if forcedEvent.EndDaHeight > currentDAHeight { + c.SetDAHeight(forcedEvent.EndDaHeight) + } else if forcedEvent.StartDaHeight > currentDAHeight { + c.SetDAHeight(forcedEvent.StartDaHeight) + } + + c.logger.Debug(). + Int("tx_count", len(forcedTxs)). + Uint64("da_height_start", forcedEvent.StartDaHeight). + Uint64("da_height_end", forcedEvent.EndDaHeight). + Msg("retrieved forced inclusion transactions from DA") + + // Calculate size used by forced inclusion transactions + forcedTxsSize := 0 + for _, tx := range forcedTxs { + forcedTxsSize += len(tx) + } + batch, err := c.queue.Next(ctx) if err != nil { return nil, err } + // Prepend forced inclusion transactions to the batch + // and ensure total size doesn't exceed maxBytes + if len(forcedTxs) > 0 { + // Trim batch transactions to fit within maxBytes + remainingBytes := int(req.MaxBytes) - forcedTxsSize + trimmedBatchTxs := make([][]byte, 0, len(batch.Transactions)) + currentBatchSize := 0 + + for i, tx := range batch.Transactions { + txSize := len(tx) + if currentBatchSize+txSize > remainingBytes { + // Would exceed limit, return remaining txs to the front of the queue + excludedBatch := coresequencer.Batch{Transactions: batch.Transactions[i:]} + if err := c.queue.Prepend(ctx, excludedBatch); err != nil { + c.logger.Error().Err(err). + Int("excluded_count", len(batch.Transactions)-i). + Msg("failed to prepend excluded transactions back to queue") + } else { + c.logger.Debug(). + Int("excluded_count", len(batch.Transactions)-i). + Msg("returned excluded batch transactions to front of queue") + } + break + } + trimmedBatchTxs = append(trimmedBatchTxs, tx) + currentBatchSize += txSize + } + + batch.Transactions = append(forcedTxs, trimmedBatchTxs...) + + c.logger.Debug(). + Int("forced_tx_count", len(forcedTxs)). + Int("forced_txs_size", forcedTxsSize). + Int("batch_tx_count", len(trimmedBatchTxs)). + Int("batch_size", currentBatchSize). + Int("total_tx_count", len(batch.Transactions)). + Int("total_size", forcedTxsSize+currentBatchSize). + Msg("combined forced inclusion and batch transactions") + } + return &coresequencer.GetNextBatchResponse{ Batch: batch, Timestamp: time.Now(), + BatchData: req.LastBatchData, }, nil } @@ -171,3 +267,106 @@ func (c *Sequencer) VerifyBatch(ctx context.Context, req coresequencer.VerifyBat func (c *Sequencer) isValid(Id []byte) bool { return bytes.Equal(c.Id, Id) } + +// SetDAHeight sets the current DA height for the sequencer +// This should be called when the sequencer needs to sync to a specific DA height +func (c *Sequencer) SetDAHeight(height uint64) { + c.daHeight.Store(height) + c.logger.Debug().Uint64("da_height", height).Msg("DA height updated") +} + +// GetDAHeight returns the current DA height +func (c *Sequencer) GetDAHeight() uint64 { + return c.daHeight.Load() +} + +// processForcedInclusionTxs processes forced inclusion transactions with size validation and pending queue management +func (c *Sequencer) processForcedInclusionTxs(event *block.ForcedInclusionEvent, maxBytes uint64) [][]byte { + currentSize := 0 + var newPendingTxs []pendingForcedInclusionTx + var validatedTxs [][]byte + + // First, process any pending transactions from previous epochs + for _, pendingTx := range c.pendingForcedInclusionTxs { + txSize := seqcommon.GetBlobSize(pendingTx.Data) + + if !seqcommon.ValidateBlobSize(pendingTx.Data) { + c.logger.Warn(). + Uint64("original_height", pendingTx.OriginalHeight). + Int("blob_size", txSize). + Msg("pending forced inclusion blob exceeds absolute maximum size - skipping") + continue + } + + if seqcommon.WouldExceedCumulativeSize(currentSize, txSize, maxBytes) { + c.logger.Debug(). + Uint64("original_height", pendingTx.OriginalHeight). + Int("current_size", currentSize). + Int("blob_size", txSize). + Msg("pending blob would exceed max size for this epoch - deferring again") + newPendingTxs = append(newPendingTxs, pendingTx) + continue + } + + validatedTxs = append(validatedTxs, pendingTx.Data) + currentSize += txSize + + c.logger.Debug(). + Uint64("original_height", pendingTx.OriginalHeight). + Int("blob_size", txSize). + Int("current_size", currentSize). + Msg("processed pending forced inclusion transaction") + } + + // Now process new transactions from this epoch + for _, tx := range event.Txs { + txSize := seqcommon.GetBlobSize(tx) + + if !seqcommon.ValidateBlobSize(tx) { + c.logger.Warn(). + Uint64("da_height", event.StartDaHeight). + Int("blob_size", txSize). + Msg("forced inclusion blob exceeds absolute maximum size - skipping") + continue + } + + if seqcommon.WouldExceedCumulativeSize(currentSize, txSize, maxBytes) { + c.logger.Debug(). + Uint64("da_height", event.StartDaHeight). + Int("current_size", currentSize). + Int("blob_size", txSize). + Msg("blob would exceed max size for this epoch - deferring to pending queue") + + // Store for next call + newPendingTxs = append(newPendingTxs, pendingForcedInclusionTx{ + Data: tx, + OriginalHeight: event.StartDaHeight, + }) + continue + } + + validatedTxs = append(validatedTxs, tx) + currentSize += txSize + + c.logger.Debug(). + Int("blob_size", txSize). + Int("current_size", currentSize). + Msg("processed forced inclusion transaction") + } + + // Update pending queue + c.pendingForcedInclusionTxs = newPendingTxs + if len(newPendingTxs) > 0 { + c.logger.Info(). + Int("new_pending_count", len(newPendingTxs)). + Msg("stored pending forced inclusion transactions for next epoch") + } + + c.logger.Info(). + Int("processed_tx_count", len(validatedTxs)). + Int("pending_tx_count", len(newPendingTxs)). + Int("current_size", currentSize). + Msg("completed processing forced inclusion transactions") + + return validatedTxs +} diff --git a/sequencers/single/sequencer_test.go b/sequencers/single/sequencer_test.go index 3904a62f73..f0d69a0ae1 100644 --- a/sequencers/single/sequencer_test.go +++ b/sequencers/single/sequencer_test.go @@ -13,11 +13,26 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/evstack/ev-node/block" coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" + "github.com/evstack/ev-node/pkg/genesis" damocks "github.com/evstack/ev-node/test/mocks" ) +// MockForcedInclusionRetriever is a mock implementation of DARetriever for testing +type MockForcedInclusionRetriever struct { + mock.Mock +} + +func (m *MockForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*block.ForcedInclusionEvent, error) { + args := m.Called(ctx, daHeight) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*block.ForcedInclusionEvent), args.Error(1) +} + func TestNewSequencer(t *testing.T) { // Create a new sequencer with mock DA client dummyDA := coreda.NewDummyDA(100_000_000, 10*time.Second) @@ -26,7 +41,10 @@ func TestNewSequencer(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, dummyDA, []byte("test1"), 10*time.Second, metrics, false) + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + seq, err := NewSequencer(ctx, logger, db, dummyDA, []byte("test1"), 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err) } @@ -59,7 +77,11 @@ func TestSequencer_SubmitBatchTxs(t *testing.T) { defer cancel() Id := []byte("test1") logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false) + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() + seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err) } @@ -112,7 +134,11 @@ func TestSequencer_SubmitBatchTxs_EmptyBatch(t *testing.T) { defer cancel() Id := []byte("test1") logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false) + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() + seq, err := NewSequencer(ctx, logger, db, dummyDA, Id, 10*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) require.NoError(t, err, "Failed to create sequencer") defer func() { err := db.Close() @@ -152,10 +178,15 @@ func TestSequencer_GetNextBatch_NoLastBatch(t *testing.T) { db := ds.NewMapDatastore() logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq := &Sequencer{ - logger: logger, - queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test - Id: []byte("test"), + logger: logger, + queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test + Id: []byte("test"), + fiRetriever: mockRetriever, } defer func() { err := db.Close() @@ -188,10 +219,15 @@ func TestSequencer_GetNextBatch_Success(t *testing.T) { db := ds.NewMapDatastore() logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq := &Sequencer{ - logger: logger, - queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test - Id: []byte("test"), + logger: logger, + queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test + Id: []byte("test"), + fiRetriever: mockRetriever, } defer func() { err := db.Close() @@ -247,13 +283,18 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Proposer Mode", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: true, - da: mockDA, - queue: NewBatchQueue(db, "proposer_queue", 0), // 0 = unlimited for test + logger: logger, + Id: Id, + proposer: true, + da: mockDA, + queue: NewBatchQueue(db, "proposer_queue", 0), // 0 = unlimited for test + fiRetriever: mockRetriever, } res, err := seq.VerifyBatch(context.Background(), coresequencer.VerifyBatchRequest{Id: seq.Id, BatchData: batchData}) @@ -269,12 +310,17 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Valid Proofs", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "valid_proofs_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "valid_proofs_queue", 0), + fiRetriever: mockRetriever, } mockDA.On("GetProofs", context.Background(), batchData, Id).Return(proofs, nil).Once() @@ -290,12 +336,17 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Invalid Proof", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "invalid_proof_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "invalid_proof_queue", 0), + fiRetriever: mockRetriever, } mockDA.On("GetProofs", context.Background(), batchData, Id).Return(proofs, nil).Once() @@ -311,12 +362,17 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("GetProofs Error", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "getproofs_err_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "getproofs_err_queue", 0), + fiRetriever: mockRetriever, } expectedErr := errors.New("get proofs failed") @@ -333,12 +389,18 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Validate Error", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "validate_err_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "validate_err_queue", 0), + fiRetriever: mockRetriever, } expectedErr := errors.New("validate failed") @@ -355,13 +417,17 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Invalid ID", func(t *testing.T) { mockDA := damocks.NewMockDA(t) logger := zerolog.Nop() + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "invalid_queue", 0), + logger: logger, + Id: Id, + proposer: false, + da: mockDA, + queue: NewBatchQueue(db, "invalid_queue", 0), + fiRetriever: mockRetriever, } invalidId := []byte("invalid") @@ -385,7 +451,11 @@ func TestSequencer_GetNextBatch_BeforeDASubmission(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() logger := zerolog.Nop() - seq, err := NewSequencer(ctx, logger, db, mockDA, []byte("test1"), 1*time.Second, metrics, false) + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() + seq, err := NewSequencer(ctx, logger, db, mockDA, []byte("test1"), 1*time.Second, metrics, false, 1000, mockRetriever, genesis.Genesis{}) if err != nil { t.Fatalf("Failed to create sequencer: %v", err) } @@ -431,6 +501,254 @@ func TestSequencer_GetNextBatch_BeforeDASubmission(t *testing.T) { mockDA.AssertExpectations(t) } +func TestSequencer_GetNextBatch_ForcedInclusionAndBatch_MaxBytes(t *testing.T) { + ctx := context.Background() + logger := zerolog.New(zerolog.NewConsoleWriter()) + + // Create in-memory datastore + db := ds.NewMapDatastore() + + // Create mock forced inclusion retriever with txs that are 50 bytes each + mockFI := &MockForcedInclusionRetriever{} + forcedTx1 := make([]byte, 50) + forcedTx2 := make([]byte, 60) + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{forcedTx1, forcedTx2}, // Total 110 bytes + StartDaHeight: 100, + EndDaHeight: 100, + }, nil) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq, err := NewSequencer( + ctx, + logger, + db, + nil, + []byte("test-chain"), + 1*time.Second, + nil, + true, + 100, + mockFI, + gen, + ) + require.NoError(t, err) + + // Submit batch txs that are 40 bytes each + batchTx1 := make([]byte, 40) + batchTx2 := make([]byte, 40) + batchTx3 := make([]byte, 40) + + submitReq := coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{ + Transactions: [][]byte{batchTx1, batchTx2, batchTx3}, // Total 120 bytes + }, + } + + _, err = seq.SubmitBatchTxs(ctx, submitReq) + require.NoError(t, err) + + // Request batch with maxBytes = 150 + // Forced inclusion: 110 bytes (50 + 60) + // Batch txs: 120 bytes (40 + 40 + 40) + // Combined would be 230 bytes, exceeds 150 + // Should return forced txs + only 1 batch tx (110 + 40 = 150) + getReq := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 150, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp.Batch) + + // Should have forced txs (2) + partial batch txs + // Total size should not exceed 150 bytes + totalSize := 0 + for _, tx := range resp.Batch.Transactions { + totalSize += len(tx) + } + assert.LessOrEqual(t, totalSize, 150, "Total batch size should not exceed maxBytes") + + // First 2 txs should be forced inclusion txs + assert.GreaterOrEqual(t, len(resp.Batch.Transactions), 2, "Should have at least forced inclusion txs") + assert.Equal(t, forcedTx1, resp.Batch.Transactions[0]) + assert.Equal(t, forcedTx2, resp.Batch.Transactions[1]) + + mockFI.AssertExpectations(t) +} + +func TestSequencer_GetNextBatch_ForcedInclusion_ExceedsMaxBytes(t *testing.T) { + ctx := context.Background() + logger := zerolog.New(zerolog.NewConsoleWriter()) + + db := ds.NewMapDatastore() + + // Create forced inclusion txs where combined they exceed maxBytes + mockFI := &MockForcedInclusionRetriever{} + forcedTx1 := make([]byte, 100) + forcedTx2 := make([]byte, 80) // This would be deferred + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{forcedTx1, forcedTx2}, + StartDaHeight: 100, + EndDaHeight: 100, + }, nil).Once() + + // Second call should process pending tx + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{}, + StartDaHeight: 100, + EndDaHeight: 100, + }, nil).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq, err := NewSequencer( + ctx, + logger, + db, + nil, + []byte("test-chain"), + 1*time.Second, + nil, + true, + 100, + mockFI, + gen, + ) + require.NoError(t, err) + + // Request batch with maxBytes = 120 + getReq := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 120, + LastBatchData: nil, + } + + // First call - should get only first forced tx (100 bytes) + resp, err := seq.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp.Batch) + assert.Equal(t, 1, len(resp.Batch.Transactions), "Should only include first forced tx") + assert.Equal(t, 100, len(resp.Batch.Transactions[0])) + + // Verify pending tx is stored + assert.Equal(t, 1, len(seq.pendingForcedInclusionTxs), "Second tx should be pending") + + // Second call - should get the pending forced tx + resp2, err := seq.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include pending forced tx") + assert.Equal(t, 80, len(resp2.Batch.Transactions[0])) + + // Pending queue should now be empty + assert.Equal(t, 0, len(seq.pendingForcedInclusionTxs), "Pending queue should be empty") + + mockFI.AssertExpectations(t) +} + +func TestSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) { + ctx := context.Background() + logger := zerolog.New(zerolog.NewConsoleWriter()) + + db := ds.NewMapDatastore() + + mockFI := &MockForcedInclusionRetriever{} + + // First call returns a large forced tx that gets deferred + largeForcedTx := make([]byte, 150) + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{largeForcedTx}, + StartDaHeight: 100, + EndDaHeight: 100, + }, nil).Once() + + // Second call returns no new forced txs, but pending should still be processed + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{}, + StartDaHeight: 100, + EndDaHeight: 100, + }, nil).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + seq, err := NewSequencer( + ctx, + logger, + db, + nil, + []byte("test-chain"), + 1*time.Second, + nil, + true, + 100, + mockFI, + gen, + ) + require.NoError(t, err) + + // Submit a batch tx + batchTx := make([]byte, 50) + submitReq := coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{ + Transactions: [][]byte{batchTx}, + }, + } + _, err = seq.SubmitBatchTxs(ctx, submitReq) + require.NoError(t, err) + + // First call with maxBytes = 100 + // Large forced tx (150 bytes) won't fit, gets deferred + // Batch tx (50 bytes) should be returned + getReq := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 100, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp.Batch) + assert.Equal(t, 1, len(resp.Batch.Transactions), "Should have batch tx only") + assert.Equal(t, 50, len(resp.Batch.Transactions[0])) + + // Verify pending forced tx is stored + assert.Equal(t, 1, len(seq.pendingForcedInclusionTxs), "Large forced tx should be pending") + + // Second call with larger maxBytes = 200 + // Should process pending forced tx first + getReq2 := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 200, + LastBatchData: nil, + } + + resp2, err := seq.GetNextBatch(ctx, getReq2) + require.NoError(t, err) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include pending forced tx") + assert.Equal(t, 150, len(resp2.Batch.Transactions[0])) + + // Pending queue should now be empty + assert.Equal(t, 0, len(seq.pendingForcedInclusionTxs), "Pending queue should be empty") + + mockFI.AssertExpectations(t) +} + // TestSequencer_RecordMetrics tests the RecordMetrics method to ensure it properly updates metrics. func TestSequencer_RecordMetrics(t *testing.T) { t.Run("With Metrics", func(t *testing.T) { @@ -523,16 +841,20 @@ func TestSequencer_QueueLimit_Integration(t *testing.T) { defer db.Close() mockDA := &damocks.MockDA{} + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() // Create a sequencer with a small queue limit for testing logger := zerolog.Nop() seq := &Sequencer{ - logger: logger, - da: mockDA, - batchTime: time.Second, - Id: []byte("test"), - queue: NewBatchQueue(db, "test_queue", 2), // Very small limit for testing - proposer: true, + logger: logger, + da: mockDA, + batchTime: time.Second, + Id: []byte("test"), + queue: NewBatchQueue(db, "test_queue", 2), // Very small limit for testing + proposer: true, + fiRetriever: mockRetriever, } ctx := context.Background() @@ -641,7 +963,11 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { // Create sequencer with small queue size to trigger throttling quickly queueSize := 3 // Small for testing logger := zerolog.Nop() - seq, err := NewSequencerWithQueueSize( + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). + Return(nil, block.ErrForceInclusionNotConfigured).Maybe() + mockRetriever.On("SetDAHeight", mock.Anything).Return().Maybe() + seq, err := NewSequencer( context.Background(), logger, db, @@ -651,6 +977,8 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { nil, // metrics true, // proposer queueSize, + mockRetriever, // fiRetriever + genesis.Genesis{}, // genesis ) require.NoError(t, err) diff --git a/test/mocks/da.go b/test/mocks/da.go index 37539d5480..bb3ad63391 100644 --- a/test/mocks/da.go +++ b/test/mocks/da.go @@ -112,126 +112,6 @@ func (_c *MockDA_Commit_Call) RunAndReturn(run func(ctx context.Context, blobs [ return _c } -// GasMultiplier provides a mock function for the type MockDA -func (_mock *MockDA) GasMultiplier(ctx context.Context) (float64, error) { - ret := _mock.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GasMultiplier") - } - - var r0 float64 - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context) (float64, error)); ok { - return returnFunc(ctx) - } - if returnFunc, ok := ret.Get(0).(func(context.Context) float64); ok { - r0 = returnFunc(ctx) - } else { - r0 = ret.Get(0).(float64) - } - if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = returnFunc(ctx) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockDA_GasMultiplier_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GasMultiplier' -type MockDA_GasMultiplier_Call struct { - *mock.Call -} - -// GasMultiplier is a helper method to define mock.On call -// - ctx context.Context -func (_e *MockDA_Expecter) GasMultiplier(ctx interface{}) *MockDA_GasMultiplier_Call { - return &MockDA_GasMultiplier_Call{Call: _e.mock.On("GasMultiplier", ctx)} -} - -func (_c *MockDA_GasMultiplier_Call) Run(run func(ctx context.Context)) *MockDA_GasMultiplier_Call { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - run( - arg0, - ) - }) - return _c -} - -func (_c *MockDA_GasMultiplier_Call) Return(f float64, err error) *MockDA_GasMultiplier_Call { - _c.Call.Return(f, err) - return _c -} - -func (_c *MockDA_GasMultiplier_Call) RunAndReturn(run func(ctx context.Context) (float64, error)) *MockDA_GasMultiplier_Call { - _c.Call.Return(run) - return _c -} - -// GasPrice provides a mock function for the type MockDA -func (_mock *MockDA) GasPrice(ctx context.Context) (float64, error) { - ret := _mock.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GasPrice") - } - - var r0 float64 - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context) (float64, error)); ok { - return returnFunc(ctx) - } - if returnFunc, ok := ret.Get(0).(func(context.Context) float64); ok { - r0 = returnFunc(ctx) - } else { - r0 = ret.Get(0).(float64) - } - if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = returnFunc(ctx) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockDA_GasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GasPrice' -type MockDA_GasPrice_Call struct { - *mock.Call -} - -// GasPrice is a helper method to define mock.On call -// - ctx context.Context -func (_e *MockDA_Expecter) GasPrice(ctx interface{}) *MockDA_GasPrice_Call { - return &MockDA_GasPrice_Call{Call: _e.mock.On("GasPrice", ctx)} -} - -func (_c *MockDA_GasPrice_Call) Run(run func(ctx context.Context)) *MockDA_GasPrice_Call { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - run( - arg0, - ) - }) - return _c -} - -func (_c *MockDA_GasPrice_Call) Return(f float64, err error) *MockDA_GasPrice_Call { - _c.Call.Return(f, err) - return _c -} - -func (_c *MockDA_GasPrice_Call) RunAndReturn(run func(ctx context.Context) (float64, error)) *MockDA_GasPrice_Call { - _c.Call.Return(run) - return _c -} - // Get provides a mock function for the type MockDA func (_mock *MockDA) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Blob, error) { ret := _mock.Called(ctx, ids, namespace) diff --git a/test/mocks/sequencer.go b/test/mocks/sequencer.go index c3894f846b..e1ef0afb4d 100644 --- a/test/mocks/sequencer.go +++ b/test/mocks/sequencer.go @@ -38,6 +38,50 @@ func (_m *MockSequencer) EXPECT() *MockSequencer_Expecter { return &MockSequencer_Expecter{mock: &_m.Mock} } +// GetDAHeight provides a mock function for the type MockSequencer +func (_mock *MockSequencer) GetDAHeight() uint64 { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for GetDAHeight") + } + + var r0 uint64 + if returnFunc, ok := ret.Get(0).(func() uint64); ok { + r0 = returnFunc() + } else { + r0 = ret.Get(0).(uint64) + } + return r0 +} + +// MockSequencer_GetDAHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDAHeight' +type MockSequencer_GetDAHeight_Call struct { + *mock.Call +} + +// GetDAHeight is a helper method to define mock.On call +func (_e *MockSequencer_Expecter) GetDAHeight() *MockSequencer_GetDAHeight_Call { + return &MockSequencer_GetDAHeight_Call{Call: _e.mock.On("GetDAHeight")} +} + +func (_c *MockSequencer_GetDAHeight_Call) Run(run func()) *MockSequencer_GetDAHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockSequencer_GetDAHeight_Call) Return(v uint64) *MockSequencer_GetDAHeight_Call { + _c.Call.Return(v) + return _c +} + +func (_c *MockSequencer_GetDAHeight_Call) RunAndReturn(run func() uint64) *MockSequencer_GetDAHeight_Call { + _c.Call.Return(run) + return _c +} + // GetNextBatch provides a mock function for the type MockSequencer func (_mock *MockSequencer) GetNextBatch(ctx context.Context, req sequencer.GetNextBatchRequest) (*sequencer.GetNextBatchResponse, error) { ret := _mock.Called(ctx, req) @@ -106,6 +150,46 @@ func (_c *MockSequencer_GetNextBatch_Call) RunAndReturn(run func(ctx context.Con return _c } +// SetDAHeight provides a mock function for the type MockSequencer +func (_mock *MockSequencer) SetDAHeight(height uint64) { + _mock.Called(height) + return +} + +// MockSequencer_SetDAHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetDAHeight' +type MockSequencer_SetDAHeight_Call struct { + *mock.Call +} + +// SetDAHeight is a helper method to define mock.On call +// - height uint64 +func (_e *MockSequencer_Expecter) SetDAHeight(height interface{}) *MockSequencer_SetDAHeight_Call { + return &MockSequencer_SetDAHeight_Call{Call: _e.mock.On("SetDAHeight", height)} +} + +func (_c *MockSequencer_SetDAHeight_Call) Run(run func(height uint64)) *MockSequencer_SetDAHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 uint64 + if args[0] != nil { + arg0 = args[0].(uint64) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockSequencer_SetDAHeight_Call) Return() *MockSequencer_SetDAHeight_Call { + _c.Call.Return() + return _c +} + +func (_c *MockSequencer_SetDAHeight_Call) RunAndReturn(run func(height uint64)) *MockSequencer_SetDAHeight_Call { + _c.Run(run) + return _c +} + // SubmitBatchTxs provides a mock function for the type MockSequencer func (_mock *MockSequencer) SubmitBatchTxs(ctx context.Context, req sequencer.SubmitBatchTxsRequest) (*sequencer.SubmitBatchTxsResponse, error) { ret := _mock.Called(ctx, req) diff --git a/types/CLAUDE.md b/types/CLAUDE.md index 9cd5496e56..aafdd289a2 100644 --- a/types/CLAUDE.md +++ b/types/CLAUDE.md @@ -77,17 +77,16 @@ The types package defines the core data structures and types used throughout ev- - Signature verification - Identity validation -### DA Integration (`da.go`, `da_test.go`) +### DA Integration -- **Purpose**: Data Availability layer helpers -- **Key Functions**: - - `SubmitWithHelpers`: DA submission with error handling +- **Purpose**: Data Availability layer helpers moved to `block/internal/da` package +- **See**: `block/internal/da/client.go` for DA submission and retrieval logic - **Key Features**: - - Error mapping to status codes + - Error mapping to status codes (in DA Client) - Namespace support - Gas price configuration - Submission options handling -- **Status Codes**: +- **Status Codes** (defined in `core/da`): - `StatusContextCanceled`: Submission canceled - `StatusNotIncludedInBlock`: Transaction timeout - `StatusAlreadyInMempool`: Duplicate transaction diff --git a/types/da.go b/types/da.go deleted file mode 100644 index e0d58710d9..0000000000 --- a/types/da.go +++ /dev/null @@ -1,212 +0,0 @@ -package types - -import ( - "context" - "errors" - "fmt" - "strings" - "time" - - "github.com/rs/zerolog" - - coreda "github.com/evstack/ev-node/core/da" -) - -// SubmitWithHelpers performs blob submission using the underlying DA layer, -// handling error mapping to produce a ResultSubmit. -// It assumes blob size filtering is handled within the DA implementation's Submit. -// It mimics the logic previously found in da.DAClient.Submit. -func SubmitWithHelpers( - ctx context.Context, - da coreda.DA, // Use the core DA interface - logger zerolog.Logger, - data [][]byte, - gasPrice float64, - namespace []byte, - options []byte, -) coreda.ResultSubmit { // Return core ResultSubmit type - ids, err := da.SubmitWithOptions(ctx, data, gasPrice, namespace, options) - - // calculate blob size - var blobSize uint64 - for _, blob := range data { - blobSize += uint64(len(blob)) - } - - // Handle errors returned by Submit - if err != nil { - if errors.Is(err, context.Canceled) { - logger.Debug().Msg("DA submission canceled via helper due to context cancellation") - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusContextCanceled, - Message: "submission canceled", - IDs: ids, - BlobSize: blobSize, - }, - } - } - status := coreda.StatusError - switch { - case errors.Is(err, coreda.ErrTxTimedOut): - status = coreda.StatusNotIncludedInBlock - case errors.Is(err, coreda.ErrTxAlreadyInMempool): - status = coreda.StatusAlreadyInMempool - case errors.Is(err, coreda.ErrTxIncorrectAccountSequence): - status = coreda.StatusIncorrectAccountSequence - case errors.Is(err, coreda.ErrBlobSizeOverLimit): - status = coreda.StatusTooBig - case errors.Is(err, coreda.ErrContextDeadline): - status = coreda.StatusContextDeadline - } - - // Use debug level for StatusTooBig as it gets handled later in submitToDA through recursive splitting - if status == coreda.StatusTooBig { - logger.Debug().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed via helper") - } else { - logger.Error().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed via helper") - } - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: status, - Message: "failed to submit blobs: " + err.Error(), - IDs: ids, - SubmittedCount: uint64(len(ids)), - Height: 0, - Timestamp: time.Now(), - BlobSize: blobSize, - }, - } - } - - if len(ids) == 0 && len(data) > 0 { - logger.Warn().Msg("DA submission via helper returned no IDs for non-empty input data") - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusError, - Message: "failed to submit blobs: no IDs returned despite non-empty input", - }, - } - } - - // Get height from the first ID - var height uint64 - if len(ids) > 0 { - height, _, err = coreda.SplitID(ids[0]) - if err != nil { - logger.Error().Err(err).Msg("failed to split ID") - } - } - - logger.Debug().Int("num_ids", len(ids)).Msg("DA submission successful via helper") - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, - IDs: ids, - SubmittedCount: uint64(len(ids)), - Height: height, - BlobSize: blobSize, - Timestamp: time.Now(), - }, - } -} - -// RetrieveWithHelpers performs blob retrieval using the underlying DA layer, -// handling error mapping to produce a ResultRetrieve. -// It mimics the logic previously found in da.DAClient.Retrieve. -// requestTimeout defines the timeout for the each retrieval request. -func RetrieveWithHelpers( - ctx context.Context, - da coreda.DA, - logger zerolog.Logger, - dataLayerHeight uint64, - namespace []byte, - requestTimeout time.Duration, -) coreda.ResultRetrieve { - // 1. Get IDs - getIDsCtx, cancel := context.WithTimeout(ctx, requestTimeout) - defer cancel() - idsResult, err := da.GetIDs(getIDsCtx, dataLayerHeight, namespace) - if err != nil { - // Handle specific "not found" error - if strings.Contains(err.Error(), coreda.ErrBlobNotFound.Error()) { - logger.Debug().Uint64("height", dataLayerHeight).Msg("Retrieve helper: Blobs not found at height") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusNotFound, - Message: coreda.ErrBlobNotFound.Error(), - Height: dataLayerHeight, - Timestamp: time.Now(), - }, - } - } - if strings.Contains(err.Error(), coreda.ErrHeightFromFuture.Error()) { - logger.Debug().Uint64("height", dataLayerHeight).Msg("Retrieve helper: Blobs not found at height") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusHeightFromFuture, - Message: coreda.ErrHeightFromFuture.Error(), - Height: dataLayerHeight, - Timestamp: time.Now(), - }, - } - } - // Handle other errors during GetIDs - logger.Error().Uint64("height", dataLayerHeight).Err(err).Msg("Retrieve helper: Failed to get IDs") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusError, - Message: fmt.Sprintf("failed to get IDs: %s", err.Error()), - Height: dataLayerHeight, - Timestamp: time.Now(), - }, - } - } - - // This check should technically be redundant if GetIDs correctly returns ErrBlobNotFound - if idsResult == nil || len(idsResult.IDs) == 0 { - logger.Debug().Uint64("height", dataLayerHeight).Msg("Retrieve helper: No IDs found at height") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusNotFound, - Message: coreda.ErrBlobNotFound.Error(), - Height: dataLayerHeight, - Timestamp: time.Now(), - }, - } - } - // 2. Get Blobs using the retrieved IDs in batches - batchSize := 100 - blobs := make([][]byte, 0, len(idsResult.IDs)) - for i := 0; i < len(idsResult.IDs); i += batchSize { - end := min(i+batchSize, len(idsResult.IDs)) - - getBlobsCtx, cancel := context.WithTimeout(ctx, requestTimeout) - batchBlobs, err := da.Get(getBlobsCtx, idsResult.IDs[i:end], namespace) - cancel() - if err != nil { - // Handle errors during Get - logger.Error().Uint64("height", dataLayerHeight).Int("num_ids", len(idsResult.IDs)).Err(err).Msg("Retrieve helper: Failed to get blobs") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusError, - Message: fmt.Sprintf("failed to get blobs for batch %d-%d: %s", i, end-1, err.Error()), - Height: dataLayerHeight, - Timestamp: time.Now(), - }, - } - } - blobs = append(blobs, batchBlobs...) - } - // Success - logger.Debug().Uint64("height", dataLayerHeight).Int("num_blobs", len(blobs)).Msg("Retrieve helper: Successfully retrieved blobs") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, - Height: dataLayerHeight, - IDs: idsResult.IDs, - Timestamp: idsResult.Timestamp, - }, - Data: blobs, - } -} diff --git a/types/da_test.go b/types/da_test.go deleted file mode 100644 index 4a111499dc..0000000000 --- a/types/da_test.go +++ /dev/null @@ -1,298 +0,0 @@ -package types_test - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - - coreda "github.com/evstack/ev-node/core/da" - "github.com/evstack/ev-node/test/mocks" - "github.com/evstack/ev-node/types" -) - -func TestSubmitWithHelpers(t *testing.T) { - logger := zerolog.Nop() - - testCases := []struct { - name string - data [][]byte - gasPrice float64 - options []byte - submitErr error - submitIDs [][]byte - expectedCode coreda.StatusCode - expectedErrMsg string - expectedIDs [][]byte - expectedCount uint64 - }{ - { - name: "successful submission", - data: [][]byte{[]byte("blob1"), []byte("blob2")}, - gasPrice: 1.0, - options: []byte("opts"), - submitIDs: [][]byte{[]byte("id1"), []byte("id2")}, - expectedCode: coreda.StatusSuccess, - expectedIDs: [][]byte{[]byte("id1"), []byte("id2")}, - expectedCount: 2, - }, - { - name: "context canceled error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: context.Canceled, - expectedCode: coreda.StatusContextCanceled, - expectedErrMsg: "submission canceled", - }, - { - name: "tx timed out error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrTxTimedOut, - expectedCode: coreda.StatusNotIncludedInBlock, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxTimedOut.Error(), - }, - { - name: "tx already in mempool error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrTxAlreadyInMempool, - expectedCode: coreda.StatusAlreadyInMempool, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxAlreadyInMempool.Error(), - }, - { - name: "incorrect account sequence error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrTxIncorrectAccountSequence, - expectedCode: coreda.StatusIncorrectAccountSequence, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxIncorrectAccountSequence.Error(), - }, - { - name: "blob size over limit error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrBlobSizeOverLimit, - expectedCode: coreda.StatusTooBig, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrBlobSizeOverLimit.Error(), - }, - { - name: "context deadline error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrContextDeadline, - expectedCode: coreda.StatusContextDeadline, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrContextDeadline.Error(), - }, - { - name: "generic submission error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: errors.New("some generic error"), - expectedCode: coreda.StatusError, - expectedErrMsg: "failed to submit blobs: some generic error", - }, - { - name: "no IDs returned for non-empty data", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitIDs: [][]byte{}, - expectedCode: coreda.StatusError, - expectedErrMsg: "failed to submit blobs: no IDs returned despite non-empty input", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - mockDA := mocks.NewMockDA(t) - encodedNamespace := coreda.NamespaceFromString("test-namespace") - - mockDA.On("SubmitWithOptions", mock.Anything, tc.data, tc.gasPrice, encodedNamespace.Bytes(), tc.options).Return(tc.submitIDs, tc.submitErr) - - result := types.SubmitWithHelpers(context.Background(), mockDA, logger, tc.data, tc.gasPrice, encodedNamespace.Bytes(), tc.options) - - assert.Equal(t, tc.expectedCode, result.Code) - if tc.expectedErrMsg != "" { - assert.Contains(t, result.Message, tc.expectedErrMsg) - } - if tc.expectedIDs != nil { - assert.Equal(t, tc.expectedIDs, result.IDs) - } - if tc.expectedCount != 0 { - assert.Equal(t, tc.expectedCount, result.SubmittedCount) - } - mockDA.AssertExpectations(t) - }) - } -} - -func TestRetrieveWithHelpers(t *testing.T) { - logger := zerolog.Nop() - dataLayerHeight := uint64(100) - mockIDs := [][]byte{[]byte("id1"), []byte("id2")} - mockBlobs := [][]byte{[]byte("blobA"), []byte("blobB")} - mockTimestamp := time.Now() - - testCases := []struct { - name string - getIDsResult *coreda.GetIDsResult - getIDsErr error - getBlobsErr error - expectedCode coreda.StatusCode - expectedErrMsg string - expectedIDs [][]byte - expectedData [][]byte - expectedHeight uint64 - }{ - { - name: "successful retrieval", - getIDsResult: &coreda.GetIDsResult{ - IDs: mockIDs, - Timestamp: mockTimestamp, - }, - expectedCode: coreda.StatusSuccess, - expectedIDs: mockIDs, - expectedData: mockBlobs, - expectedHeight: dataLayerHeight, - }, - { - name: "blob not found error during GetIDs", - getIDsErr: coreda.ErrBlobNotFound, - expectedCode: coreda.StatusNotFound, - expectedErrMsg: coreda.ErrBlobNotFound.Error(), - expectedHeight: dataLayerHeight, - }, - { - name: "height from future error during GetIDs", - getIDsErr: coreda.ErrHeightFromFuture, - expectedCode: coreda.StatusHeightFromFuture, - expectedErrMsg: coreda.ErrHeightFromFuture.Error(), - expectedHeight: dataLayerHeight, - }, - { - name: "generic error during GetIDs", - getIDsErr: errors.New("failed to connect to DA"), - expectedCode: coreda.StatusError, - expectedErrMsg: "failed to get IDs: failed to connect to DA", - expectedHeight: dataLayerHeight, - }, - { - name: "GetIDs returns nil result", - getIDsResult: nil, - expectedCode: coreda.StatusNotFound, - expectedErrMsg: coreda.ErrBlobNotFound.Error(), - expectedHeight: dataLayerHeight, - }, - { - name: "GetIDs returns empty IDs", - getIDsResult: &coreda.GetIDsResult{ - IDs: [][]byte{}, - Timestamp: mockTimestamp, - }, - expectedCode: coreda.StatusNotFound, - expectedErrMsg: coreda.ErrBlobNotFound.Error(), - expectedHeight: dataLayerHeight, - }, - { - name: "error during Get (blobs retrieval)", - getIDsResult: &coreda.GetIDsResult{ - IDs: mockIDs, - Timestamp: mockTimestamp, - }, - getBlobsErr: errors.New("network error during blob retrieval"), - expectedCode: coreda.StatusError, - expectedErrMsg: "failed to get blobs for batch 0-1: network error during blob retrieval", - expectedHeight: dataLayerHeight, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - mockDA := mocks.NewMockDA(t) - encodedNamespace := coreda.NamespaceFromString("test-namespace") - - mockDA.On("GetIDs", mock.Anything, dataLayerHeight, mock.Anything).Return(tc.getIDsResult, tc.getIDsErr) - - if tc.getIDsErr == nil && tc.getIDsResult != nil && len(tc.getIDsResult.IDs) > 0 { - mockDA.On("Get", mock.Anything, tc.getIDsResult.IDs, mock.Anything).Return(mockBlobs, tc.getBlobsErr) - } - - result := types.RetrieveWithHelpers(context.Background(), mockDA, logger, dataLayerHeight, encodedNamespace.Bytes(), 5*time.Second) - - assert.Equal(t, tc.expectedCode, result.Code) - assert.Equal(t, tc.expectedHeight, result.Height) - if tc.expectedErrMsg != "" { - assert.Contains(t, result.Message, tc.expectedErrMsg) - } - if tc.expectedIDs != nil { - assert.Equal(t, tc.expectedIDs, result.IDs) - } - if tc.expectedData != nil { - assert.Equal(t, tc.expectedData, result.Data) - } - mockDA.AssertExpectations(t) - }) - } -} - -func TestRetrieveWithHelpers_Timeout(t *testing.T) { - logger := zerolog.Nop() - dataLayerHeight := uint64(100) - encodedNamespace := coreda.NamespaceFromString("test-namespace") - - t.Run("timeout during GetIDs", func(t *testing.T) { - mockDA := mocks.NewMockDA(t) - - // Mock GetIDs to block until context is cancelled - mockDA.On("GetIDs", mock.Anything, dataLayerHeight, mock.Anything).Run(func(args mock.Arguments) { - ctx := args.Get(0).(context.Context) - <-ctx.Done() // Wait for context cancellation - }).Return(nil, context.DeadlineExceeded) - - // Use a very short timeout to ensure it triggers - result := types.RetrieveWithHelpers(context.Background(), mockDA, logger, dataLayerHeight, encodedNamespace.Bytes(), 1*time.Millisecond) - - assert.Equal(t, coreda.StatusError, result.Code) - assert.Contains(t, result.Message, "failed to get IDs") - assert.Contains(t, result.Message, "context deadline exceeded") - mockDA.AssertExpectations(t) - }) - - t.Run("timeout during Get", func(t *testing.T) { - mockDA := mocks.NewMockDA(t) - mockIDs := [][]byte{[]byte("id1")} - mockTimestamp := time.Now() - - // Mock GetIDs to succeed - mockDA.On("GetIDs", mock.Anything, dataLayerHeight, mock.Anything).Return(&coreda.GetIDsResult{ - IDs: mockIDs, - Timestamp: mockTimestamp, - }, nil) - - // Mock Get to block until context is cancelled - mockDA.On("Get", mock.Anything, mockIDs, mock.Anything).Run(func(args mock.Arguments) { - ctx := args.Get(0).(context.Context) - <-ctx.Done() // Wait for context cancellation - }).Return(nil, context.DeadlineExceeded) - - // Use a very short timeout to ensure it triggers - result := types.RetrieveWithHelpers(context.Background(), mockDA, logger, dataLayerHeight, encodedNamespace.Bytes(), 1*time.Millisecond) - - assert.Equal(t, coreda.StatusError, result.Code) - assert.Contains(t, result.Message, "failed to get blobs for batch") - assert.Contains(t, result.Message, "context deadline exceeded") - mockDA.AssertExpectations(t) - }) -} diff --git a/types/epoch.go b/types/epoch.go new file mode 100644 index 0000000000..75d43e8048 --- /dev/null +++ b/types/epoch.go @@ -0,0 +1,50 @@ +package types + +// CalculateEpochNumber returns the deterministic epoch number for a given DA height. +// Epoch 1 starts at daStartHeight. +// +// Parameters: +// - daHeight: The DA height to calculate the epoch for +// - daStartHeight: The genesis DA start height +// - daEpochSize: The number of DA blocks per epoch (0 means all blocks in epoch 1) +// +// Returns: +// - Epoch number (0 if before daStartHeight, 1+ otherwise) +func CalculateEpochNumber(daHeight, daStartHeight, daEpochSize uint64) uint64 { + if daHeight < daStartHeight { + return 0 + } + + if daEpochSize == 0 { + return 1 + } + + return ((daHeight - daStartHeight) / daEpochSize) + 1 +} + +// CalculateEpochBoundaries returns the start and end DA heights for the epoch +// containing the given DA height. The boundaries are inclusive. +// +// Parameters: +// - daHeight: The DA height to calculate boundaries for +// - daStartHeight: The genesis DA start height +// - daEpochSize: The number of DA blocks per epoch (0 means single epoch) +// +// Returns: +// - start: The first DA height in the epoch (inclusive) +// - end: The last DA height in the epoch (inclusive) +func CalculateEpochBoundaries(daHeight, daStartHeight, daEpochSize uint64) (start, end uint64) { + if daEpochSize == 0 { + return daStartHeight, daStartHeight + } + + if daHeight < daStartHeight { + return daStartHeight, daStartHeight + daEpochSize - 1 + } + + epochNum := CalculateEpochNumber(daHeight, daStartHeight, daEpochSize) + start = daStartHeight + (epochNum-1)*daEpochSize + end = daStartHeight + epochNum*daEpochSize - 1 + + return start, end +} diff --git a/types/epoch_test.go b/types/epoch_test.go new file mode 100644 index 0000000000..295395d7b7 --- /dev/null +++ b/types/epoch_test.go @@ -0,0 +1,300 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCalculateEpochNumber(t *testing.T) { + tests := []struct { + name string + daStartHeight uint64 + daEpochSize uint64 + daHeight uint64 + expectedEpoch uint64 + }{ + { + name: "first epoch - start height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 100, + expectedEpoch: 1, + }, + { + name: "first epoch - middle", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 105, + expectedEpoch: 1, + }, + { + name: "first epoch - last height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 109, + expectedEpoch: 1, + }, + { + name: "second epoch - start", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 110, + expectedEpoch: 2, + }, + { + name: "second epoch - middle", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 115, + expectedEpoch: 2, + }, + { + name: "tenth epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 195, + expectedEpoch: 10, + }, + { + name: "before start height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 50, + expectedEpoch: 0, + }, + { + name: "zero epoch size", + daStartHeight: 100, + daEpochSize: 0, + daHeight: 200, + expectedEpoch: 1, + }, + { + name: "large epoch size", + daStartHeight: 1000, + daEpochSize: 1000, + daHeight: 2500, + expectedEpoch: 2, + }, + { + name: "start height zero", + daStartHeight: 0, + daEpochSize: 5, + daHeight: 10, + expectedEpoch: 3, + }, + { + name: "epoch size one", + daStartHeight: 100, + daEpochSize: 1, + daHeight: 105, + expectedEpoch: 6, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + epoch := CalculateEpochNumber(tt.daHeight, tt.daStartHeight, tt.daEpochSize) + assert.Equal(t, tt.expectedEpoch, epoch) + }) + } +} + +func TestCalculateEpochBoundaries(t *testing.T) { + tests := []struct { + name string + daStartHeight uint64 + daEpochSize uint64 + daHeight uint64 + expectedStart uint64 + expectedEnd uint64 + }{ + { + name: "first epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 105, + expectedStart: 100, + expectedEnd: 109, + }, + { + name: "second epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 110, + expectedStart: 110, + expectedEnd: 119, + }, + { + name: "third epoch - last height", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 129, + expectedStart: 120, + expectedEnd: 129, + }, + { + name: "before start height returns first epoch", + daStartHeight: 100, + daEpochSize: 10, + daHeight: 50, + expectedStart: 100, + expectedEnd: 109, + }, + { + name: "before start height with zero epoch size", + daStartHeight: 2, + daEpochSize: 0, + daHeight: 1, + expectedStart: 2, + expectedEnd: 2, + }, + { + name: "zero epoch size", + daStartHeight: 100, + daEpochSize: 0, + daHeight: 200, + expectedStart: 100, + expectedEnd: 100, + }, + { + name: "large epoch", + daStartHeight: 1000, + daEpochSize: 1000, + daHeight: 1500, + expectedStart: 1000, + expectedEnd: 1999, + }, + { + name: "epoch boundary exact start", + daStartHeight: 100, + daEpochSize: 50, + daHeight: 100, + expectedStart: 100, + expectedEnd: 149, + }, + { + name: "epoch boundary exact end of first epoch", + daStartHeight: 100, + daEpochSize: 50, + daHeight: 149, + expectedStart: 100, + expectedEnd: 149, + }, + { + name: "epoch boundary exact start of second epoch", + daStartHeight: 100, + daEpochSize: 50, + daHeight: 150, + expectedStart: 150, + expectedEnd: 199, + }, + { + name: "start height zero", + daStartHeight: 0, + daEpochSize: 5, + daHeight: 10, + expectedStart: 10, + expectedEnd: 14, + }, + { + name: "epoch size one", + daStartHeight: 100, + daEpochSize: 1, + daHeight: 105, + expectedStart: 105, + expectedEnd: 105, + }, + { + name: "very large numbers", + daStartHeight: 1000000, + daEpochSize: 100000, + daHeight: 5500000, + expectedStart: 5500000, + expectedEnd: 5599999, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + start, end := CalculateEpochBoundaries(tt.daHeight, tt.daStartHeight, tt.daEpochSize) + assert.Equal(t, tt.expectedStart, start, "start height mismatch") + assert.Equal(t, tt.expectedEnd, end, "end height mismatch") + }) + } +} + +func TestEpochConsistency(t *testing.T) { + tests := []struct { + name string + daStartHeight uint64 + daEpochSize uint64 + }{ + { + name: "standard epoch", + daStartHeight: 100, + daEpochSize: 10, + }, + { + name: "large epoch", + daStartHeight: 1000, + daEpochSize: 1000, + }, + { + name: "small epoch", + daStartHeight: 0, + daEpochSize: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test that all heights in an epoch return the same epoch number + // and boundaries + for epoch := uint64(1); epoch <= 10; epoch++ { + // Calculate expected boundaries for this epoch + expectedStart := tt.daStartHeight + (epoch-1)*tt.daEpochSize + expectedEnd := tt.daStartHeight + epoch*tt.daEpochSize - 1 + + // Test every height in the epoch + for h := expectedStart; h <= expectedEnd; h++ { + epochNum := CalculateEpochNumber(h, tt.daStartHeight, tt.daEpochSize) + assert.Equal(t, epoch, epochNum, "height %d should be in epoch %d", h, epoch) + + start, end := CalculateEpochBoundaries(h, tt.daStartHeight, tt.daEpochSize) + assert.Equal(t, expectedStart, start, "height %d should have start %d", h, expectedStart) + assert.Equal(t, expectedEnd, end, "height %d should have end %d", h, expectedEnd) + } + } + }) + } +} + +func TestEpochBoundaryTransitions(t *testing.T) { + daStartHeight := uint64(100) + daEpochSize := uint64(10) + + // Test that epoch boundaries are correctly calculated at transitions + transitions := []struct { + height uint64 + expectedEpoch uint64 + expectedStart uint64 + expectedEnd uint64 + }{ + {100, 1, 100, 109}, // First height of epoch 1 + {109, 1, 100, 109}, // Last height of epoch 1 + {110, 2, 110, 119}, // First height of epoch 2 + {119, 2, 110, 119}, // Last height of epoch 2 + {120, 3, 120, 129}, // First height of epoch 3 + } + + for _, tr := range transitions { + epoch := CalculateEpochNumber(tr.height, daStartHeight, daEpochSize) + assert.Equal(t, tr.expectedEpoch, epoch, "height %d epoch mismatch", tr.height) + + start, end := CalculateEpochBoundaries(tr.height, daStartHeight, daEpochSize) + assert.Equal(t, tr.expectedStart, start, "height %d start mismatch", tr.height) + assert.Equal(t, tr.expectedEnd, end, "height %d end mismatch", tr.height) + } +} diff --git a/types/state.go b/types/state.go index a439f6c34c..4b87dc6b5f 100644 --- a/types/state.go +++ b/types/state.go @@ -30,7 +30,8 @@ type State struct { // LastHeaderHash is the hash of the header of the last block LastHeaderHash Hash - // DAHeight identifies DA block containing the latest applied Evolve block. + // DAHeight identifies DA block containing the latest applied Evolve block for a syncing node. + // In the case of an aggregator, this corresponds as the last fetched DA block height for forced inclused transactions. DAHeight uint64 // the latest AppHash we've received from calling abci.Commit()