diff --git a/apps/evm/Dockerfile b/apps/evm/Dockerfile index 202bb9a232..5c674b21da 100644 --- a/apps/evm/Dockerfile +++ b/apps/evm/Dockerfile @@ -3,6 +3,7 @@ FROM golang:1.24-alpine AS build-env WORKDIR /src COPY core core +COPY da da COPY go.mod go.sum ./ RUN go mod download diff --git a/apps/evm/cmd/run.go b/apps/evm/cmd/run.go index 8d79264042..1752661243 100644 --- a/apps/evm/cmd/run.go +++ b/apps/evm/cmd/run.go @@ -7,25 +7,23 @@ import ( "os" "path/filepath" + "github.com/evstack/ev-node/da" + celestiada "github.com/evstack/ev-node/da/celestia" + "github.com/evstack/ev-node/node" + "github.com/evstack/ev-node/sequencers/single" + "github.com/ethereum/go-ethereum/common" - "github.com/ipfs/go-datastore" - "github.com/rs/zerolog" "github.com/spf13/cobra" - "github.com/evstack/ev-node/core/da" - "github.com/evstack/ev-node/core/execution" - coresequencer "github.com/evstack/ev-node/core/sequencer" - "github.com/evstack/ev-node/da/jsonrpc" "github.com/evstack/ev-node/execution/evm" - "github.com/evstack/ev-node/node" + + "github.com/evstack/ev-node/core/execution" rollcmd "github.com/evstack/ev-node/pkg/cmd" "github.com/evstack/ev-node/pkg/config" - "github.com/evstack/ev-node/pkg/genesis" genesispkg "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/p2p" "github.com/evstack/ev-node/pkg/p2p/key" "github.com/evstack/ev-node/pkg/store" - "github.com/evstack/ev-node/sequencers/single" ) var RunCmd = &cobra.Command{ @@ -55,12 +53,14 @@ var RunCmd = &cobra.Command{ logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") - daJrpc, err := jsonrpc.NewClient(context.Background(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + ctx := context.Background() + daClient, err := celestiada.NewClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) if err != nil { return err } + defer daClient.Close() - datastore, err := store.NewDefaultKVStore(nodeConfig.RootDir, nodeConfig.DBPath, "evm") + datastore, err := store.NewDefaultKVStore(nodeConfig.RootDir, nodeConfig.DBPath, "evm-single") if err != nil { return err } @@ -75,8 +75,21 @@ var RunCmd = &cobra.Command{ logger.Warn().Msg("da_start_height is not set in genesis.json, ask your chain developer") } - // Create sequencer based on configuration - sequencer, err := createSequencer(context.Background(), logger, datastore, &daJrpc.DA, nodeConfig, genesis) + singleMetrics, err := single.DefaultMetricsProvider(nodeConfig.Instrumentation.IsPrometheusEnabled())(genesis.ChainID) + if err != nil { + return err + } + + sequencer, err := single.NewSequencer( + ctx, + logger, + datastore, + daClient, + []byte(genesis.ChainID), + nodeConfig.Node.BlockTime.Duration, + singleMetrics, + nodeConfig.Node.Aggregator, + ) if err != nil { return err } @@ -91,7 +104,7 @@ var RunCmd = &cobra.Command{ return err } - return rollcmd.StartNode(logger, cmd, executor, sequencer, &daJrpc.DA, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) + return rollcmd.StartNode(logger, cmd, executor, sequencer, daClient, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) }, } @@ -100,37 +113,6 @@ func init() { addFlags(RunCmd) } -// createSequencer creates a sequencer based on the configuration. -func createSequencer( - ctx context.Context, - logger zerolog.Logger, - datastore datastore.Batching, - da da.DA, - nodeConfig config.Config, - genesis genesis.Genesis, -) (coresequencer.Sequencer, error) { - singleMetrics, err := single.NopMetrics() - if err != nil { - return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) - } - - sequencer, err := single.NewSequencer( - ctx, - logger, - datastore, - da, - []byte(genesis.ChainID), - nodeConfig.Node.BlockTime.Duration, - singleMetrics, - nodeConfig.Node.Aggregator, - ) - if err != nil { - return nil, fmt.Errorf("failed to create single sequencer: %w", err) - } - - return sequencer, nil -} - func createExecutionClient(cmd *cobra.Command) (execution.Executor, error) { // Read execution client parameters from flags ethURL, err := cmd.Flags().GetString(evm.FlagEvmEthURL) diff --git a/apps/evm/go.mod b/apps/evm/go.mod index c267dbe23f..6c05cb0823 100644 --- a/apps/evm/go.mod +++ b/apps/evm/go.mod @@ -18,7 +18,6 @@ require ( github.com/evstack/ev-node/da v1.0.0-beta.6 github.com/evstack/ev-node/execution/evm v1.0.0-beta.3 github.com/ipfs/go-datastore v0.9.0 - github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.1 ) @@ -31,7 +30,9 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.20.0 // indirect github.com/celestiaorg/go-libp2p-messenger v0.2.2 // indirect + github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 // indirect github.com/celestiaorg/go-square/v3 v3.0.2 // indirect + github.com/celestiaorg/nmt v0.24.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/consensys/gnark-crypto v0.18.1 // indirect github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect @@ -150,6 +151,7 @@ require ( github.com/quic-go/quic-go v0.54.1 // indirect github.com/quic-go/webtransport-go v0.9.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect + github.com/rs/zerolog v1.34.0 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect diff --git a/apps/evm/go.sum b/apps/evm/go.sum index 173fb3ba0d..ceeb4a29c2 100644 --- a/apps/evm/go.sum +++ b/apps/evm/go.sum @@ -34,8 +34,12 @@ github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBT github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/celestiaorg/go-libp2p-messenger v0.2.2 h1:osoUfqjss7vWTIZrrDSy953RjQz+ps/vBFE7bychLEc= github.com/celestiaorg/go-libp2p-messenger v0.2.2/go.mod h1:oTCRV5TfdO7V/k6nkx7QjQzGrWuJbupv+0o1cgnY2i4= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 h1:PYInrsYzrDIsZW9Yb86OTi2aEKuPcpgJt6Mc0Jlc/yg= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076/go.mod h1:hlidgivKyvv7m4Yl2Fdf2mSTmazZYxX8+bnr5IQrI98= github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY72BOgcduA4= github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= +github.com/celestiaorg/nmt v0.24.2 h1:LlpJSPOd6/Lw1Ig6HUhZuqiINHLka/ZSRTBzlNJpchg= +github.com/celestiaorg/nmt v0.24.2/go.mod h1:vgLBpWBi8F5KLxTdXSwb7AU4NhiIQ1AQRGa+PzdcLEA= github.com/celestiaorg/utils v0.1.0 h1:WsP3O8jF7jKRgLNFmlDCwdThwOFMFxg0MnqhkLFVxPo= github.com/celestiaorg/utils v0.1.0/go.mod h1:vQTh7MHnvpIeCQZ2/Ph+w7K1R2UerDheZbgJEJD2hSU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -183,6 +187,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -522,6 +528,12 @@ github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe/go.mod h1:jZ github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= @@ -760,8 +772,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= -gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/apps/grpc/README.md b/apps/grpc/README.md index dac9a847f2..51a49ebc09 100644 --- a/apps/grpc/README.md +++ b/apps/grpc/README.md @@ -150,5 +150,5 @@ If you have issues connecting to the DA layer: ## See Also - [Evolve Documentation](https://ev.xyz) -- [gRPC Execution Interface](../../../execution/grpc/README.md) -- [Single Sequencer Documentation](../../../sequencers/single/README.md) +- [gRPC Execution Interface](../../execution/grpc/README.md) +- [Single Sequencer Documentation](../../sequencers/single/README.md) diff --git a/apps/grpc/cmd/run.go b/apps/grpc/cmd/run.go index 484f51d7a5..a262c6449b 100644 --- a/apps/grpc/cmd/run.go +++ b/apps/grpc/cmd/run.go @@ -1,23 +1,18 @@ package cmd import ( - "context" "fmt" "path/filepath" - "github.com/ipfs/go-datastore" - "github.com/rs/zerolog" "github.com/spf13/cobra" - "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/core/execution" - coresequencer "github.com/evstack/ev-node/core/sequencer" - "github.com/evstack/ev-node/da/jsonrpc" + celestiada "github.com/evstack/ev-node/da/celestia" executiongrpc "github.com/evstack/ev-node/execution/grpc" "github.com/evstack/ev-node/node" rollcmd "github.com/evstack/ev-node/pkg/cmd" "github.com/evstack/ev-node/pkg/config" - "github.com/evstack/ev-node/pkg/genesis" rollgenesis "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/p2p" "github.com/evstack/ev-node/pkg/p2p/key" @@ -57,13 +52,14 @@ The execution client must implement the Evolve execution gRPC interface.`, logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") // Create DA client - daJrpc, err := jsonrpc.NewClient(cmd.Context(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daClient, err := celestiada.NewClient(cmd.Context(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) if err != nil { return err } + defer daClient.Close() // Create datastore - datastore, err := store.NewDefaultKVStore(nodeConfig.RootDir, nodeConfig.DBPath, "evgrpc") + datastore, err := store.NewDefaultKVStore(nodeConfig.RootDir, nodeConfig.DBPath, "grpc-single") if err != nil { return err } @@ -78,8 +74,23 @@ The execution client must implement the Evolve execution gRPC interface.`, logger.Warn().Msg("da_start_height is not set in genesis.json, ask your chain developer") } - // Create sequencer based on configuration - sequencer, err := createSequencer(cmd.Context(), logger, datastore, &daJrpc.DA, nodeConfig, genesis) + // Create metrics provider + singleMetrics, err := single.DefaultMetricsProvider(nodeConfig.Instrumentation.IsPrometheusEnabled())(genesis.ChainID) + if err != nil { + return err + } + + // Create sequencer + sequencer, err := single.NewSequencer( + cmd.Context(), + logger, + datastore, + daClient, + []byte(genesis.ChainID), + nodeConfig.Node.BlockTime.Duration, + singleMetrics, + nodeConfig.Node.Aggregator, + ) if err != nil { return err } @@ -97,7 +108,7 @@ The execution client must implement the Evolve execution gRPC interface.`, } // Start the node - return rollcmd.StartNode(logger, cmd, executor, sequencer, &daJrpc.DA, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) + return rollcmd.StartNode(logger, cmd, executor, sequencer, daClient, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) }, } @@ -109,37 +120,6 @@ func init() { addGRPCFlags(RunCmd) } -// createSequencer creates a sequencer based on the configuration. -func createSequencer( - ctx context.Context, - logger zerolog.Logger, - datastore datastore.Batching, - da da.DA, - nodeConfig config.Config, - genesis genesis.Genesis, -) (coresequencer.Sequencer, error) { - singleMetrics, err := single.NopMetrics() - if err != nil { - return nil, fmt.Errorf("failed to create single sequencer metrics: %w", err) - } - - sequencer, err := single.NewSequencer( - ctx, - logger, - datastore, - da, - []byte(genesis.ChainID), - nodeConfig.Node.BlockTime.Duration, - singleMetrics, - nodeConfig.Node.Aggregator, - ) - if err != nil { - return nil, fmt.Errorf("failed to create single sequencer: %w", err) - } - - return sequencer, nil -} - // createGRPCExecutionClient creates a new gRPC execution client from command flags func createGRPCExecutionClient(cmd *cobra.Command) (execution.Executor, error) { // Get the gRPC executor URL from flags diff --git a/apps/grpc/go.mod b/apps/grpc/go.mod index 44da32e0e7..7b195a4aa8 100644 --- a/apps/grpc/go.mod +++ b/apps/grpc/go.mod @@ -16,8 +16,6 @@ require ( github.com/evstack/ev-node/core v1.0.0-beta.5 github.com/evstack/ev-node/da v1.0.0-beta.6 github.com/evstack/ev-node/execution/grpc v0.0.0 - github.com/ipfs/go-datastore v0.9.0 - github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.1 ) @@ -28,7 +26,9 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/celestiaorg/go-header v0.7.4 // indirect github.com/celestiaorg/go-libp2p-messenger v0.2.2 // indirect + github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 // indirect github.com/celestiaorg/go-square/v3 v3.0.2 // indirect + github.com/celestiaorg/nmt v0.24.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect @@ -58,6 +58,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/boxo v0.35.0 // indirect github.com/ipfs/go-cid v0.5.0 // indirect + github.com/ipfs/go-datastore v0.9.0 // indirect github.com/ipfs/go-ds-badger4 v0.1.8 // indirect github.com/ipfs/go-log/v2 v2.8.1 // indirect github.com/ipld/go-ipld-prime v0.21.0 // indirect @@ -131,6 +132,7 @@ require ( github.com/quic-go/qpack v0.5.1 // indirect github.com/quic-go/quic-go v0.54.1 // indirect github.com/quic-go/webtransport-go v0.9.0 // indirect + github.com/rs/zerolog v1.34.0 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect diff --git a/apps/grpc/go.sum b/apps/grpc/go.sum index c2e0e46a7d..d5a7b6f672 100644 --- a/apps/grpc/go.sum +++ b/apps/grpc/go.sum @@ -24,8 +24,12 @@ github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBT github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/celestiaorg/go-libp2p-messenger v0.2.2 h1:osoUfqjss7vWTIZrrDSy953RjQz+ps/vBFE7bychLEc= github.com/celestiaorg/go-libp2p-messenger v0.2.2/go.mod h1:oTCRV5TfdO7V/k6nkx7QjQzGrWuJbupv+0o1cgnY2i4= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 h1:PYInrsYzrDIsZW9Yb86OTi2aEKuPcpgJt6Mc0Jlc/yg= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076/go.mod h1:hlidgivKyvv7m4Yl2Fdf2mSTmazZYxX8+bnr5IQrI98= github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY72BOgcduA4= github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= +github.com/celestiaorg/nmt v0.24.2 h1:LlpJSPOd6/Lw1Ig6HUhZuqiINHLka/ZSRTBzlNJpchg= +github.com/celestiaorg/nmt v0.24.2/go.mod h1:vgLBpWBi8F5KLxTdXSwb7AU4NhiIQ1AQRGa+PzdcLEA= github.com/celestiaorg/utils v0.1.0 h1:WsP3O8jF7jKRgLNFmlDCwdThwOFMFxg0MnqhkLFVxPo= github.com/celestiaorg/utils v0.1.0/go.mod h1:vQTh7MHnvpIeCQZ2/Ph+w7K1R2UerDheZbgJEJD2hSU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -123,6 +127,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -429,6 +435,12 @@ github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= @@ -654,8 +666,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= -gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index c72d220cdd..572a42305e 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -8,8 +8,8 @@ import ( "github.com/spf13/cobra" kvexecutor "github.com/evstack/ev-node/apps/testapp/kv" - "github.com/evstack/ev-node/core/da" - "github.com/evstack/ev-node/da/jsonrpc" + "github.com/evstack/ev-node/da" + celestiada "github.com/evstack/ev-node/da/celestia" "github.com/evstack/ev-node/node" rollcmd "github.com/evstack/ev-node/pkg/cmd" genesispkg "github.com/evstack/ev-node/pkg/genesis" @@ -51,10 +51,11 @@ var RunCmd = &cobra.Command{ logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") - daJrpc, err := jsonrpc.NewClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daClient, err := celestiada.NewClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) if err != nil { return err } + defer daClient.Close() nodeKey, err := key.LoadNodeKey(filepath.Dir(nodeConfig.ConfigPath())) if err != nil { @@ -96,7 +97,7 @@ var RunCmd = &cobra.Command{ ctx, logger, datastore, - &daJrpc.DA, + daClient, []byte(genesis.ChainID), nodeConfig.Node.BlockTime.Duration, singleMetrics, @@ -111,6 +112,6 @@ var RunCmd = &cobra.Command{ return err } - return rollcmd.StartNode(logger, cmd, executor, sequencer, &daJrpc.DA, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) + return rollcmd.StartNode(logger, cmd, executor, sequencer, daClient, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) }, } diff --git a/apps/testapp/go.mod b/apps/testapp/go.mod index f43dfcae47..bdb2519c9d 100644 --- a/apps/testapp/go.mod +++ b/apps/testapp/go.mod @@ -13,7 +13,6 @@ replace ( require ( github.com/celestiaorg/go-header v0.7.4 github.com/evstack/ev-node v1.0.0-beta.9 - github.com/evstack/ev-node/core v1.0.0-beta.5 github.com/evstack/ev-node/da v0.0.0-00010101000000-000000000000 github.com/ipfs/go-datastore v0.9.0 github.com/spf13/cobra v1.10.1 @@ -26,7 +25,9 @@ require ( github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/celestiaorg/go-libp2p-messenger v0.2.2 // indirect + github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 // indirect github.com/celestiaorg/go-square/v3 v3.0.2 // indirect + github.com/celestiaorg/nmt v0.24.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect @@ -34,6 +35,7 @@ require ( github.com/dgraph-io/badger/v4 v4.5.1 // indirect github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect + github.com/evstack/ev-node/core v1.0.0-beta.5 // indirect github.com/filecoin-project/go-clock v0.1.0 // indirect github.com/filecoin-project/go-jsonrpc v0.9.0 // indirect github.com/flynn/noise v1.1.0 // indirect diff --git a/apps/testapp/go.sum b/apps/testapp/go.sum index eeafba1a84..11b01cf828 100644 --- a/apps/testapp/go.sum +++ b/apps/testapp/go.sum @@ -24,8 +24,12 @@ github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBT github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/celestiaorg/go-libp2p-messenger v0.2.2 h1:osoUfqjss7vWTIZrrDSy953RjQz+ps/vBFE7bychLEc= github.com/celestiaorg/go-libp2p-messenger v0.2.2/go.mod h1:oTCRV5TfdO7V/k6nkx7QjQzGrWuJbupv+0o1cgnY2i4= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 h1:PYInrsYzrDIsZW9Yb86OTi2aEKuPcpgJt6Mc0Jlc/yg= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076/go.mod h1:hlidgivKyvv7m4Yl2Fdf2mSTmazZYxX8+bnr5IQrI98= github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY72BOgcduA4= github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= +github.com/celestiaorg/nmt v0.24.2 h1:LlpJSPOd6/Lw1Ig6HUhZuqiINHLka/ZSRTBzlNJpchg= +github.com/celestiaorg/nmt v0.24.2/go.mod h1:vgLBpWBi8F5KLxTdXSwb7AU4NhiIQ1AQRGa+PzdcLEA= github.com/celestiaorg/utils v0.1.0 h1:WsP3O8jF7jKRgLNFmlDCwdThwOFMFxg0MnqhkLFVxPo= github.com/celestiaorg/utils v0.1.0/go.mod h1:vQTh7MHnvpIeCQZ2/Ph+w7K1R2UerDheZbgJEJD2hSU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -123,6 +127,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -428,6 +434,12 @@ github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= @@ -653,8 +665,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= -gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/block/components.go b/block/components.go index 546cda62c3..6c78b46ad1 100644 --- a/block/components.go +++ b/block/components.go @@ -13,9 +13,9 @@ import ( "github.com/evstack/ev-node/block/internal/reaping" "github.com/evstack/ev-node/block/internal/submitting" "github.com/evstack/ev-node/block/internal/syncing" - coreda "github.com/evstack/ev-node/core/da" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/signer" @@ -131,7 +131,7 @@ func NewSyncComponents( genesis genesis.Genesis, store store.Store, exec coreexecutor.Executor, - da coreda.DA, + da da.DA, headerStore common.Broadcaster[*types.SignedHeader], dataStore common.Broadcaster[*types.Data], logger zerolog.Logger, @@ -144,15 +144,13 @@ func NewSyncComponents( return nil, fmt.Errorf("failed to create cache manager: %w", err) } - daClient := NewDAClient(da, config, logger) - // error channel for critical failures errorCh := make(chan error, 1) syncer := syncing.NewSyncer( store, exec, - daClient, + da, cacheManager, metrics, config, @@ -165,7 +163,7 @@ func NewSyncComponents( ) // Create submitter for sync nodes (no signer, only DA inclusion processing) - daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger) + daSubmitter := submitting.NewDASubmitter(da, config, genesis, blockOpts, metrics, logger) submitter := submitting.NewSubmitter( store, exec, @@ -196,7 +194,7 @@ func NewAggregatorComponents( store store.Store, exec coreexecutor.Executor, sequencer coresequencer.Sequencer, - da coreda.DA, + da da.DA, signer signer.Signer, headerBroadcaster common.Broadcaster[*types.SignedHeader], dataBroadcaster common.Broadcaster[*types.Data], @@ -245,9 +243,8 @@ func NewAggregatorComponents( return nil, fmt.Errorf("failed to create reaper: %w", err) } - // Create DA client and submitter for aggregator nodes (with signer for submission) - daClient := NewDAClient(da, config, logger) - daSubmitter := submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger) + // Create submitter for aggregator nodes (with signer for submission) + daSubmitter := submitting.NewDASubmitter(da, config, genesis, blockOpts, metrics, logger) submitter := submitting.NewSubmitter( store, exec, diff --git a/block/components_test.go b/block/components_test.go index eadf45328c..9e6991605d 100644 --- a/block/components_test.go +++ b/block/components_test.go @@ -15,8 +15,8 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/signer/noop" @@ -92,7 +92,7 @@ func TestNewSyncComponents_Creation(t *testing.T) { } mockExec := testmocks.NewMockExecutor(t) - dummyDA := coreda.NewDummyDA(10_000_000, 10*time.Millisecond) + dummyDA := da.NewDummyDA(10_000_000, 10*time.Millisecond) // Just test that the constructor doesn't panic - don't start the components // to avoid P2P store dependencies @@ -143,7 +143,7 @@ func TestNewAggregatorComponents_Creation(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - dummyDA := coreda.NewDummyDA(10_000_000, 10*time.Millisecond) + dummyDA := da.NewDummyDA(10_000_000, 10*time.Millisecond) components, err := NewAggregatorComponents( cfg, @@ -197,7 +197,7 @@ func TestExecutor_RealExecutionClientFailure_StopsNode(t *testing.T) { // Create mock executor that will fail on ExecuteTxs mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - dummyDA := coreda.NewDummyDA(10_000_000, 10*time.Millisecond) + dummyDA := da.NewDummyDA(10_000_000, 10*time.Millisecond) // Mock InitChain to succeed initially mockExec.On("InitChain", mock.Anything, mock.Anything, mock.Anything, mock.Anything). diff --git a/block/internal/da/client.go b/block/internal/da/client.go deleted file mode 100644 index 571e5f7650..0000000000 --- a/block/internal/da/client.go +++ /dev/null @@ -1,264 +0,0 @@ -// Package da provides a reusable wrapper around the core DA interface -// with common configuration for namespace handling and timeouts. -package da - -import ( - "context" - "errors" - "fmt" - "strings" - "time" - - "github.com/rs/zerolog" - - coreda "github.com/evstack/ev-node/core/da" -) - -// Client is the interface representing the DA client. -type Client interface { - Submit(ctx context.Context, data [][]byte, gasPrice float64, namespace []byte, options []byte) coreda.ResultSubmit - Retrieve(ctx context.Context, height uint64, namespace []byte) coreda.ResultRetrieve - RetrieveHeaders(ctx context.Context, height uint64) coreda.ResultRetrieve - RetrieveData(ctx context.Context, height uint64) coreda.ResultRetrieve - - GetHeaderNamespace() []byte - GetDataNamespace() []byte - GetDA() coreda.DA -} - -// client provides a reusable wrapper around the core DA interface -// with common configuration for namespace handling and timeouts. -type client struct { - da coreda.DA - logger zerolog.Logger - defaultTimeout time.Duration - namespaceBz []byte - namespaceDataBz []byte -} - -// Config contains configuration for the DA client. -type Config struct { - DA coreda.DA - Logger zerolog.Logger - DefaultTimeout time.Duration - Namespace string - DataNamespace string -} - -// NewClient creates a new DA client with pre-calculated namespace bytes. -func NewClient(cfg Config) *client { - if cfg.DefaultTimeout == 0 { - cfg.DefaultTimeout = 30 * time.Second - } - - return &client{ - da: cfg.DA, - logger: cfg.Logger.With().Str("component", "da_client").Logger(), - defaultTimeout: cfg.DefaultTimeout, - namespaceBz: coreda.NamespaceFromString(cfg.Namespace).Bytes(), - namespaceDataBz: coreda.NamespaceFromString(cfg.DataNamespace).Bytes(), - } -} - -// Submit submits blobs to the DA layer with the specified options. -func (c *client) Submit(ctx context.Context, data [][]byte, gasPrice float64, namespace []byte, options []byte) coreda.ResultSubmit { - ids, err := c.da.SubmitWithOptions(ctx, data, gasPrice, namespace, options) - - // calculate blob size - var blobSize uint64 - for _, blob := range data { - blobSize += uint64(len(blob)) - } - - // Handle errors returned by Submit - if err != nil { - if errors.Is(err, context.Canceled) { - c.logger.Debug().Msg("DA submission canceled due to context cancellation") - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusContextCanceled, - Message: "submission canceled", - IDs: ids, - BlobSize: blobSize, - }, - } - } - status := coreda.StatusError - switch { - case errors.Is(err, coreda.ErrTxTimedOut): - status = coreda.StatusNotIncludedInBlock - case errors.Is(err, coreda.ErrTxAlreadyInMempool): - status = coreda.StatusAlreadyInMempool - case errors.Is(err, coreda.ErrTxIncorrectAccountSequence): - status = coreda.StatusIncorrectAccountSequence - case errors.Is(err, coreda.ErrBlobSizeOverLimit): - status = coreda.StatusTooBig - case errors.Is(err, coreda.ErrContextDeadline): - status = coreda.StatusContextDeadline - } - - // Use debug level for StatusTooBig as it gets handled later in submitToDA through recursive splitting - if status == coreda.StatusTooBig { - c.logger.Debug().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed") - } else { - c.logger.Error().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed") - } - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: status, - Message: "failed to submit blobs: " + err.Error(), - IDs: ids, - SubmittedCount: uint64(len(ids)), - Height: 0, - Timestamp: time.Now(), - BlobSize: blobSize, - }, - } - } - - if len(ids) == 0 && len(data) > 0 { - c.logger.Warn().Msg("DA submission returned no IDs for non-empty input data") - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusError, - Message: "failed to submit blobs: no IDs returned despite non-empty input", - }, - } - } - - // Get height from the first ID - var height uint64 - if len(ids) > 0 { - height, _, err = coreda.SplitID(ids[0]) - if err != nil { - c.logger.Error().Err(err).Msg("failed to split ID") - } - } - - c.logger.Debug().Int("num_ids", len(ids)).Msg("DA submission successful") - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, - IDs: ids, - SubmittedCount: uint64(len(ids)), - Height: height, - BlobSize: blobSize, - Timestamp: time.Now(), - }, - } -} - -// Retrieve retrieves blobs from the DA layer at the specified height and namespace. -func (c *client) Retrieve(ctx context.Context, height uint64, namespace []byte) coreda.ResultRetrieve { - // 1. Get IDs - getIDsCtx, cancel := context.WithTimeout(ctx, c.defaultTimeout) - defer cancel() - idsResult, err := c.da.GetIDs(getIDsCtx, height, namespace) - if err != nil { - // Handle specific "not found" error - if strings.Contains(err.Error(), coreda.ErrBlobNotFound.Error()) { - c.logger.Debug().Uint64("height", height).Msg("Blobs not found at height") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusNotFound, - Message: coreda.ErrBlobNotFound.Error(), - Height: height, - Timestamp: time.Now(), - }, - } - } - if strings.Contains(err.Error(), coreda.ErrHeightFromFuture.Error()) { - c.logger.Debug().Uint64("height", height).Msg("Blobs not found at height") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusHeightFromFuture, - Message: coreda.ErrHeightFromFuture.Error(), - Height: height, - Timestamp: time.Now(), - }, - } - } - // Handle other errors during GetIDs - c.logger.Error().Uint64("height", height).Err(err).Msg("Failed to get IDs") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusError, - Message: fmt.Sprintf("failed to get IDs: %s", err.Error()), - Height: height, - Timestamp: time.Now(), - }, - } - } - - // This check should technically be redundant if GetIDs correctly returns ErrBlobNotFound - if idsResult == nil || len(idsResult.IDs) == 0 { - c.logger.Debug().Uint64("height", height).Msg("No IDs found at height") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusNotFound, - Message: coreda.ErrBlobNotFound.Error(), - Height: height, - Timestamp: time.Now(), - }, - } - } - // 2. Get Blobs using the retrieved IDs in batches - batchSize := 100 - blobs := make([][]byte, 0, len(idsResult.IDs)) - for i := 0; i < len(idsResult.IDs); i += batchSize { - end := min(i+batchSize, len(idsResult.IDs)) - - getBlobsCtx, cancel := context.WithTimeout(ctx, c.defaultTimeout) - batchBlobs, err := c.da.Get(getBlobsCtx, idsResult.IDs[i:end], namespace) - cancel() - if err != nil { - // Handle errors during Get - c.logger.Error().Uint64("height", height).Int("num_ids", len(idsResult.IDs)).Err(err).Msg("Failed to get blobs") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusError, - Message: fmt.Sprintf("failed to get blobs for batch %d-%d: %s", i, end-1, err.Error()), - Height: height, - Timestamp: time.Now(), - }, - } - } - blobs = append(blobs, batchBlobs...) - } - // Success - c.logger.Debug().Uint64("height", height).Int("num_blobs", len(blobs)).Msg("Successfully retrieved blobs") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, - Height: height, - IDs: idsResult.IDs, - Timestamp: idsResult.Timestamp, - }, - Data: blobs, - } -} - -// RetrieveHeaders retrieves blobs from the header namespace at the specified height. -func (c *client) RetrieveHeaders(ctx context.Context, height uint64) coreda.ResultRetrieve { - return c.Retrieve(ctx, height, c.namespaceBz) -} - -// RetrieveData retrieves blobs from the data namespace at the specified height. -func (c *client) RetrieveData(ctx context.Context, height uint64) coreda.ResultRetrieve { - return c.Retrieve(ctx, height, c.namespaceDataBz) -} - -// GetHeaderNamespace returns the header namespace bytes. -func (c *client) GetHeaderNamespace() []byte { - return c.namespaceBz -} - -// GetDataNamespace returns the data namespace bytes. -func (c *client) GetDataNamespace() []byte { - return c.namespaceDataBz -} - -// GetDA returns the underlying DA interface for advanced usage. -func (c *client) GetDA() coreda.DA { - return c.da -} diff --git a/block/internal/da/client_test.go b/block/internal/da/client_test.go deleted file mode 100644 index 788aab2b39..0000000000 --- a/block/internal/da/client_test.go +++ /dev/null @@ -1,458 +0,0 @@ -package da - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/rs/zerolog" - "gotest.tools/v3/assert" - - coreda "github.com/evstack/ev-node/core/da" -) - -// mockDA is a simple mock implementation of coreda.DA for testing -type mockDA struct { - submitFunc func(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte) ([]coreda.ID, error) - submitWithOptions func(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte, options []byte) ([]coreda.ID, error) - getIDsFunc func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) - getFunc func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) -} - -func (m *mockDA) Submit(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte) ([]coreda.ID, error) { - if m.submitFunc != nil { - return m.submitFunc(ctx, blobs, gasPrice, namespace) - } - return nil, nil -} - -func (m *mockDA) SubmitWithOptions(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte, options []byte) ([]coreda.ID, error) { - if m.submitWithOptions != nil { - return m.submitWithOptions(ctx, blobs, gasPrice, namespace, options) - } - return nil, nil -} - -func (m *mockDA) GetIDs(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { - if m.getIDsFunc != nil { - return m.getIDsFunc(ctx, height, namespace) - } - return nil, errors.New("not implemented") -} - -func (m *mockDA) Get(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { - if m.getFunc != nil { - return m.getFunc(ctx, ids, namespace) - } - return nil, errors.New("not implemented") -} - -func (m *mockDA) GetProofs(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Proof, error) { - return nil, errors.New("not implemented") -} - -func (m *mockDA) Commit(ctx context.Context, blobs []coreda.Blob, namespace []byte) ([]coreda.Commitment, error) { - return nil, errors.New("not implemented") -} - -func (m *mockDA) Validate(ctx context.Context, ids []coreda.ID, proofs []coreda.Proof, namespace []byte) ([]bool, error) { - return nil, errors.New("not implemented") -} - -func TestNewClient(t *testing.T) { - tests := []struct { - name string - cfg Config - }{ - { - name: "with all namespaces", - cfg: Config{ - DA: &mockDA{}, - Logger: zerolog.Nop(), - DefaultTimeout: 5 * time.Second, - Namespace: "test-ns", - DataNamespace: "test-data-ns", - }, - }, - { - name: "without forced inclusion namespace", - cfg: Config{ - DA: &mockDA{}, - Logger: zerolog.Nop(), - DefaultTimeout: 5 * time.Second, - Namespace: "test-ns", - DataNamespace: "test-data-ns", - }, - }, - { - name: "with default timeout", - cfg: Config{ - DA: &mockDA{}, - Logger: zerolog.Nop(), - Namespace: "test-ns", - DataNamespace: "test-data-ns", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - client := NewClient(tt.cfg) - assert.Assert(t, client != nil) - assert.Assert(t, client.da != nil) - assert.Assert(t, len(client.namespaceBz) > 0) - assert.Assert(t, len(client.namespaceDataBz) > 0) - - expectedTimeout := tt.cfg.DefaultTimeout - if expectedTimeout == 0 { - expectedTimeout = 30 * time.Second - } - assert.Equal(t, client.defaultTimeout, expectedTimeout) - }) - } -} - -func TestClient_GetNamespaces(t *testing.T) { - cfg := Config{ - DA: &mockDA{}, - Logger: zerolog.Nop(), - Namespace: "test-header", - DataNamespace: "test-data", - } - - client := NewClient(cfg) - - headerNs := client.GetHeaderNamespace() - assert.Assert(t, len(headerNs) > 0) - - dataNs := client.GetDataNamespace() - assert.Assert(t, len(dataNs) > 0) - - // Namespaces should be different - assert.Assert(t, string(headerNs) != string(dataNs)) -} - -func TestClient_GetDA(t *testing.T) { - mockDAInstance := &mockDA{} - cfg := Config{ - DA: mockDAInstance, - Logger: zerolog.Nop(), - Namespace: "test-ns", - DataNamespace: "test-data-ns", - } - - client := NewClient(cfg) - da := client.GetDA() - assert.Equal(t, da, mockDAInstance) -} - -func TestClient_Submit(t *testing.T) { - logger := zerolog.Nop() - - testCases := []struct { - name string - data [][]byte - gasPrice float64 - options []byte - submitErr error - submitIDs [][]byte - expectedCode coreda.StatusCode - expectedErrMsg string - expectedIDs [][]byte - expectedCount uint64 - }{ - { - name: "successful submission", - data: [][]byte{[]byte("blob1"), []byte("blob2")}, - gasPrice: 1.0, - options: []byte("opts"), - submitIDs: [][]byte{[]byte("id1"), []byte("id2")}, - expectedCode: coreda.StatusSuccess, - expectedIDs: [][]byte{[]byte("id1"), []byte("id2")}, - expectedCount: 2, - }, - { - name: "context canceled error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: context.Canceled, - expectedCode: coreda.StatusContextCanceled, - expectedErrMsg: "submission canceled", - }, - { - name: "tx timed out error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrTxTimedOut, - expectedCode: coreda.StatusNotIncludedInBlock, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxTimedOut.Error(), - }, - { - name: "tx already in mempool error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrTxAlreadyInMempool, - expectedCode: coreda.StatusAlreadyInMempool, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxAlreadyInMempool.Error(), - }, - { - name: "incorrect account sequence error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrTxIncorrectAccountSequence, - expectedCode: coreda.StatusIncorrectAccountSequence, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxIncorrectAccountSequence.Error(), - }, - { - name: "blob size over limit error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrBlobSizeOverLimit, - expectedCode: coreda.StatusTooBig, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrBlobSizeOverLimit.Error(), - }, - { - name: "context deadline error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrContextDeadline, - expectedCode: coreda.StatusContextDeadline, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrContextDeadline.Error(), - }, - { - name: "generic submission error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: errors.New("some generic error"), - expectedCode: coreda.StatusError, - expectedErrMsg: "failed to submit blobs: some generic error", - }, - { - name: "no IDs returned for non-empty data", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitIDs: [][]byte{}, - expectedCode: coreda.StatusError, - expectedErrMsg: "failed to submit blobs: no IDs returned despite non-empty input", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - mockDAInstance := &mockDA{ - submitWithOptions: func(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte, options []byte) ([]coreda.ID, error) { - return tc.submitIDs, tc.submitErr - }, - } - - client := NewClient(Config{ - DA: mockDAInstance, - Logger: logger, - Namespace: "test-namespace", - DataNamespace: "test-data-namespace", - }) - - encodedNamespace := coreda.NamespaceFromString("test-namespace") - result := client.Submit(context.Background(), tc.data, tc.gasPrice, encodedNamespace.Bytes(), tc.options) - - assert.Equal(t, tc.expectedCode, result.Code) - if tc.expectedErrMsg != "" { - assert.Assert(t, result.Message != "") - } - if tc.expectedIDs != nil { - assert.Equal(t, len(tc.expectedIDs), len(result.IDs)) - } - if tc.expectedCount != 0 { - assert.Equal(t, tc.expectedCount, result.SubmittedCount) - } - }) - } -} - -func TestClient_Retrieve(t *testing.T) { - logger := zerolog.Nop() - dataLayerHeight := uint64(100) - mockIDs := [][]byte{[]byte("id1"), []byte("id2")} - mockBlobs := [][]byte{[]byte("blobA"), []byte("blobB")} - mockTimestamp := time.Now() - - testCases := []struct { - name string - getIDsResult *coreda.GetIDsResult - getIDsErr error - getBlobsErr error - expectedCode coreda.StatusCode - expectedErrMsg string - expectedIDs [][]byte - expectedData [][]byte - expectedHeight uint64 - }{ - { - name: "successful retrieval", - getIDsResult: &coreda.GetIDsResult{ - IDs: mockIDs, - Timestamp: mockTimestamp, - }, - expectedCode: coreda.StatusSuccess, - expectedIDs: mockIDs, - expectedData: mockBlobs, - expectedHeight: dataLayerHeight, - }, - { - name: "blob not found error during GetIDs", - getIDsErr: coreda.ErrBlobNotFound, - expectedCode: coreda.StatusNotFound, - expectedErrMsg: coreda.ErrBlobNotFound.Error(), - expectedHeight: dataLayerHeight, - }, - { - name: "height from future error during GetIDs", - getIDsErr: coreda.ErrHeightFromFuture, - expectedCode: coreda.StatusHeightFromFuture, - expectedErrMsg: coreda.ErrHeightFromFuture.Error(), - expectedHeight: dataLayerHeight, - }, - { - name: "generic error during GetIDs", - getIDsErr: errors.New("failed to connect to DA"), - expectedCode: coreda.StatusError, - expectedErrMsg: "failed to get IDs: failed to connect to DA", - expectedHeight: dataLayerHeight, - }, - { - name: "GetIDs returns nil result", - getIDsResult: nil, - expectedCode: coreda.StatusNotFound, - expectedErrMsg: coreda.ErrBlobNotFound.Error(), - expectedHeight: dataLayerHeight, - }, - { - name: "GetIDs returns empty IDs", - getIDsResult: &coreda.GetIDsResult{ - IDs: [][]byte{}, - Timestamp: mockTimestamp, - }, - expectedCode: coreda.StatusNotFound, - expectedErrMsg: coreda.ErrBlobNotFound.Error(), - expectedHeight: dataLayerHeight, - }, - { - name: "error during Get (blobs retrieval)", - getIDsResult: &coreda.GetIDsResult{ - IDs: mockIDs, - Timestamp: mockTimestamp, - }, - getBlobsErr: errors.New("network error during blob retrieval"), - expectedCode: coreda.StatusError, - expectedErrMsg: "failed to get blobs for batch 0-1: network error during blob retrieval", - expectedHeight: dataLayerHeight, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - mockDAInstance := &mockDA{ - getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { - return tc.getIDsResult, tc.getIDsErr - }, - getFunc: func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { - if tc.getBlobsErr != nil { - return nil, tc.getBlobsErr - } - return mockBlobs, nil - }, - } - - client := NewClient(Config{ - DA: mockDAInstance, - Logger: logger, - Namespace: "test-namespace", - DataNamespace: "test-data-namespace", - DefaultTimeout: 5 * time.Second, - }) - - encodedNamespace := coreda.NamespaceFromString("test-namespace") - result := client.Retrieve(context.Background(), dataLayerHeight, encodedNamespace.Bytes()) - - assert.Equal(t, tc.expectedCode, result.Code) - assert.Equal(t, tc.expectedHeight, result.Height) - if tc.expectedErrMsg != "" { - assert.Assert(t, result.Message != "") - } - if tc.expectedIDs != nil { - assert.Equal(t, len(tc.expectedIDs), len(result.IDs)) - } - if tc.expectedData != nil { - assert.Equal(t, len(tc.expectedData), len(result.Data)) - } - }) - } -} - -func TestClient_Retrieve_Timeout(t *testing.T) { - logger := zerolog.Nop() - dataLayerHeight := uint64(100) - encodedNamespace := coreda.NamespaceFromString("test-namespace") - - t.Run("timeout during GetIDs", func(t *testing.T) { - mockDAInstance := &mockDA{ - getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { - <-ctx.Done() // Wait for context cancellation - return nil, context.DeadlineExceeded - }, - } - - client := NewClient(Config{ - DA: mockDAInstance, - Logger: logger, - Namespace: "test-namespace", - DataNamespace: "test-data-namespace", - DefaultTimeout: 1 * time.Millisecond, - }) - - result := client.Retrieve(context.Background(), dataLayerHeight, encodedNamespace.Bytes()) - - assert.Equal(t, coreda.StatusError, result.Code) - assert.Assert(t, result.Message != "") - }) - - t.Run("timeout during Get", func(t *testing.T) { - mockIDs := [][]byte{[]byte("id1")} - mockTimestamp := time.Now() - - mockDAInstance := &mockDA{ - getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { - return &coreda.GetIDsResult{ - IDs: mockIDs, - Timestamp: mockTimestamp, - }, nil - }, - getFunc: func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { - <-ctx.Done() // Wait for context cancellation - return nil, context.DeadlineExceeded - }, - } - - client := NewClient(Config{ - DA: mockDAInstance, - Logger: logger, - Namespace: "test-namespace", - DataNamespace: "test-data-namespace", - DefaultTimeout: 1 * time.Millisecond, - }) - - result := client.Retrieve(context.Background(), dataLayerHeight, encodedNamespace.Bytes()) - - assert.Equal(t, coreda.StatusError, result.Code) - assert.Assert(t, result.Message != "") - }) -} diff --git a/block/internal/submitting/da_submitter.go b/block/internal/submitting/da_submitter.go index 8cf741dcd9..29811af4db 100644 --- a/block/internal/submitting/da_submitter.go +++ b/block/internal/submitting/da_submitter.go @@ -12,8 +12,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - "github.com/evstack/ev-node/block/internal/da" - coreda "github.com/evstack/ev-node/core/da" + dapkg "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" pkgda "github.com/evstack/ev-node/pkg/da" "github.com/evstack/ev-node/pkg/genesis" @@ -95,20 +94,24 @@ func clamp(v, min, max time.Duration) time.Duration { // DASubmitter handles DA submission operations type DASubmitter struct { - client da.Client + da dapkg.DA config config.Config genesis genesis.Genesis options common.BlockOptions logger zerolog.Logger metrics *common.Metrics + // calculate namespaces bytes once and reuse them + namespaceBz []byte + namespaceDataBz []byte + // address selector for multi-account support addressSelector pkgda.AddressSelector } // NewDASubmitter creates a new DA submitter func NewDASubmitter( - client da.Client, + da dapkg.DA, config config.Config, genesis genesis.Genesis, options common.BlockOptions, @@ -119,7 +122,7 @@ func NewDASubmitter( if config.RPC.EnableDAVisualization { visualizerLogger := logger.With().Str("component", "da_visualization").Logger() - server.SetDAVisualizationServer(server.NewDAVisualizationServer(client.GetDA(), visualizerLogger, config.Node.Aggregator)) + server.SetDAVisualizationServer(server.NewDAVisualizationServer(da, visualizerLogger, config.Node.Aggregator)) } // Use NoOp metrics if nil to avoid nil checks throughout the code @@ -139,12 +142,14 @@ func NewDASubmitter( } return &DASubmitter{ - client: client, + da: da, config: config, genesis: genesis, options: options, metrics: metrics, logger: daSubmitterLogger, + namespaceBz: dapkg.NamespaceFromString(config.DA.GetNamespace()).Bytes(), + namespaceDataBz: dapkg.NamespaceFromString(config.DA.GetDataNamespace()).Bytes(), addressSelector: addressSelector, } } @@ -184,7 +189,7 @@ func (s *DASubmitter) SubmitHeaders(ctx context.Context, cache cache.Manager) er } return proto.Marshal(headerPb) }, - func(submitted []*types.SignedHeader, res *coreda.ResultSubmit) { + func(submitted []*types.SignedHeader, res *dapkg.ResultSubmit) { for _, header := range submitted { cache.SetHeaderDAIncluded(header.Hash().String(), res.Height, header.Height()) } @@ -194,7 +199,7 @@ func (s *DASubmitter) SubmitHeaders(ctx context.Context, cache cache.Manager) er } }, "header", - s.client.GetHeaderNamespace(), + s.namespaceBz, []byte(s.config.DA.SubmitOptions), func() uint64 { return cache.NumPendingHeaders() }, ) @@ -227,7 +232,7 @@ func (s *DASubmitter) SubmitData(ctx context.Context, cache cache.Manager, signe func(signedData *types.SignedData) ([]byte, error) { return signedData.MarshalBinary() }, - func(submitted []*types.SignedData, res *coreda.ResultSubmit) { + func(submitted []*types.SignedData, res *dapkg.ResultSubmit) { for _, sd := range submitted { cache.SetDataDAIncluded(sd.Data.DACommitment().String(), res.Height, sd.Height()) } @@ -237,7 +242,7 @@ func (s *DASubmitter) SubmitData(ctx context.Context, cache cache.Manager, signe } }, "data", - s.client.GetDataNamespace(), + s.namespaceDataBz, []byte(s.config.DA.SubmitOptions), func() uint64 { return cache.NumPendingData() }, ) @@ -343,7 +348,7 @@ func submitToDA[T any]( ctx context.Context, items []T, marshalFn func(T) ([]byte, error), - postSubmit func([]T, *coreda.ResultSubmit), + postSubmit func([]T, *dapkg.ResultSubmit), itemType string, namespace []byte, options []byte, @@ -406,8 +411,8 @@ func submitToDA[T any]( // Perform submission start := time.Now() - res := s.client.Submit(submitCtx, marshaled, -1, namespace, mergedOptions) - s.logger.Debug().Int("attempts", rs.Attempt).Dur("elapsed", time.Since(start)).Uint64("code", uint64(res.Code)).Msg("got SubmitWithHelpers response from celestia") + res := s.da.SubmitWithOptions(submitCtx, marshaled, -1, namespace, mergedOptions) + s.logger.Debug().Int("attempts", rs.Attempt).Dur("elapsed", time.Since(start)).Uint64("code", uint64(res.Code)).Msg("got SubmitWithOptions response from Celestia layer") // Record submission result for observability if daVisualizationServer := server.GetDAVisualizationServer(); daVisualizationServer != nil { @@ -415,7 +420,7 @@ func submitToDA[T any]( } switch res.Code { - case coreda.StatusSuccess: + case dapkg.StatusSuccess: submitted := items[:res.SubmittedCount] postSubmit(submitted, &res) s.logger.Info().Str("itemType", itemType).Uint64("count", res.SubmittedCount).Msg("successfully submitted items to DA layer") @@ -436,7 +441,7 @@ func submitToDA[T any]( s.metrics.DASubmitterPendingBlobs.Set(float64(getTotalPendingFn())) } - case coreda.StatusTooBig: + case dapkg.StatusTooBig: // Record failure metric s.recordFailure(common.DASubmitterFailureReasonTooBig) // Iteratively halve until it fits or single-item too big @@ -460,19 +465,19 @@ func submitToDA[T any]( s.metrics.DASubmitterPendingBlobs.Set(float64(getTotalPendingFn())) } - case coreda.StatusNotIncludedInBlock: + case dapkg.StatusNotIncludedInBlock: // Record failure metric s.recordFailure(common.DASubmitterFailureReasonNotIncludedInBlock) s.logger.Info().Dur("backoff", pol.MaxBackoff).Msg("retrying due to mempool state") rs.Next(reasonMempool, pol) - case coreda.StatusAlreadyInMempool: + case dapkg.StatusAlreadyInMempool: // Record failure metric s.recordFailure(common.DASubmitterFailureReasonAlreadyInMempool) s.logger.Info().Dur("backoff", pol.MaxBackoff).Msg("retrying due to mempool state") rs.Next(reasonMempool, pol) - case coreda.StatusContextCanceled: + case dapkg.StatusContextCanceled: // Record failure metric s.recordFailure(common.DASubmitterFailureReasonContextCanceled) s.logger.Info().Msg("DA layer submission canceled due to context cancellation") diff --git a/block/internal/submitting/da_submitter_integration_test.go b/block/internal/submitting/da_submitter_integration_test.go index 5b768e1a51..4daaeee9dc 100644 --- a/block/internal/submitting/da_submitter_integration_test.go +++ b/block/internal/submitting/da_submitter_integration_test.go @@ -15,8 +15,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - "github.com/evstack/ev-node/block/internal/da" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/signer/noop" @@ -84,16 +83,10 @@ func TestDASubmitter_SubmitHeadersAndData_MarksInclusionAndUpdatesLastSubmitted( require.NoError(t, batch2.Commit()) // Dummy DA - dummyDA := coreda.NewDummyDA(10_000_000, 10*time.Millisecond) + dummyDA := da.NewDummyDA(10_000_000, 10*time.Millisecond) // Create DA submitter - daClient := da.NewClient(da.Config{ - DA: dummyDA, - Logger: zerolog.Nop(), - Namespace: cfg.DA.Namespace, - DataNamespace: cfg.DA.DataNamespace, - }) - daSubmitter := NewDASubmitter(daClient, cfg, gen, common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop()) + daSubmitter := NewDASubmitter(dummyDA, cfg, gen, common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop()) // Submit headers and data require.NoError(t, daSubmitter.SubmitHeaders(context.Background(), cm)) diff --git a/block/internal/submitting/da_submitter_mocks_test.go b/block/internal/submitting/da_submitter_mocks_test.go index b215b0cf2f..0cbc0ae2fd 100644 --- a/block/internal/submitting/da_submitter_mocks_test.go +++ b/block/internal/submitting/da_submitter_mocks_test.go @@ -2,7 +2,6 @@ package submitting import ( "context" - "errors" "testing" "time" @@ -11,8 +10,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/evstack/ev-node/block/internal/common" - "github.com/evstack/ev-node/block/internal/da" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/test/mocks" @@ -26,45 +24,62 @@ func newTestSubmitter(mockDA *mocks.MockDA, override func(*config.Config)) *DASu cfg.DA.MaxSubmitAttempts = 3 cfg.DA.SubmitOptions = "opts" cfg.DA.Namespace = "ns" - cfg.DA.DataNamespace = "ns-data" if override != nil { override(&cfg) } - daClient := da.NewClient(da.Config{ - DA: mockDA, - Logger: zerolog.Nop(), - Namespace: cfg.DA.Namespace, - DataNamespace: cfg.DA.DataNamespace, - }) - return NewDASubmitter(daClient, cfg, genesis.Genesis{} /*options=*/, common.BlockOptions{}, common.NopMetrics(), zerolog.Nop()) + return NewDASubmitter(mockDA, cfg, genesis.Genesis{} /*options=*/, common.BlockOptions{}, common.NopMetrics(), zerolog.Nop()) } // marshal helper for simple items func marshalString(s string) ([]byte, error) { return []byte(s), nil } +// helper to create a ResultSubmit for errors +func errorResult(code da.StatusCode, msg string) da.ResultSubmit { + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: code, + Message: msg, + }, + } +} + +// helper to create a ResultSubmit for success +func successResult(ids []da.ID) da.ResultSubmit { + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, + IDs: ids, + SubmittedCount: uint64(len(ids)), + }, + } +} + func TestSubmitToDA_MempoolRetry_IncreasesGasAndSucceeds(t *testing.T) { t.Parallel() mockDA := mocks.NewMockDA(t) - nsBz := coreda.NamespaceFromString("ns").Bytes() + nsBz := da.NamespaceFromString("ns").Bytes() opts := []byte("opts") var usedGas []float64 + + // First attempt: timeout error (mapped to StatusNotIncludedInBlock) mockDA. On("SubmitWithOptions", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). Run(func(args mock.Arguments) { usedGas = append(usedGas, args.Get(2).(float64)) }). - Return(nil, coreda.ErrTxTimedOut). + Return(errorResult(da.StatusNotIncludedInBlock, "timeout")). Once() - ids := [][]byte{[]byte("id1"), []byte("id2"), []byte("id3")} + // Second attempt: success + ids := []da.ID{[]byte("id1"), []byte("id2"), []byte("id3")} mockDA. On("SubmitWithOptions", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). Run(func(args mock.Arguments) { usedGas = append(usedGas, args.Get(2).(float64)) }). - Return(ids, nil). + Return(successResult(ids)). Once() s := newTestSubmitter(mockDA, nil) @@ -76,7 +91,7 @@ func TestSubmitToDA_MempoolRetry_IncreasesGasAndSucceeds(t *testing.T) { ctx, items, marshalString, - func(_ []string, _ *coreda.ResultSubmit) {}, + func(_ []string, _ *da.ResultSubmit) {}, "item", nsBz, opts, @@ -94,7 +109,7 @@ func TestSubmitToDA_UnknownError_RetriesSameGasThenSucceeds(t *testing.T) { mockDA := mocks.NewMockDA(t) - nsBz := coreda.NamespaceFromString("ns").Bytes() + nsBz := da.NamespaceFromString("ns").Bytes() opts := []byte("opts") var usedGas []float64 @@ -103,15 +118,15 @@ func TestSubmitToDA_UnknownError_RetriesSameGasThenSucceeds(t *testing.T) { mockDA. On("SubmitWithOptions", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). Run(func(args mock.Arguments) { usedGas = append(usedGas, args.Get(2).(float64)) }). - Return(nil, errors.New("boom")). + Return(errorResult(da.StatusError, "boom")). Once() // Second attempt: same gas, success - ids := [][]byte{[]byte("id1")} + ids := []da.ID{[]byte("id1")} mockDA. On("SubmitWithOptions", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). Run(func(args mock.Arguments) { usedGas = append(usedGas, args.Get(2).(float64)) }). - Return(ids, nil). + Return(successResult(ids)). Once() s := newTestSubmitter(mockDA, nil) @@ -123,7 +138,7 @@ func TestSubmitToDA_UnknownError_RetriesSameGasThenSucceeds(t *testing.T) { ctx, items, marshalString, - func(_ []string, _ *coreda.ResultSubmit) {}, + func(_ []string, _ *da.ResultSubmit) {}, "item", nsBz, opts, @@ -139,7 +154,7 @@ func TestSubmitToDA_TooBig_HalvesBatch(t *testing.T) { mockDA := mocks.NewMockDA(t) - nsBz := coreda.NamespaceFromString("ns").Bytes() + nsBz := da.NamespaceFromString("ns").Bytes() opts := []byte("opts") // record sizes of batches sent to DA @@ -152,18 +167,18 @@ func TestSubmitToDA_TooBig_HalvesBatch(t *testing.T) { blobs := args.Get(1).([][]byte) batchSizes = append(batchSizes, len(blobs)) }). - Return(nil, coreda.ErrBlobSizeOverLimit). + Return(errorResult(da.StatusTooBig, "blob too big")). Once() // Second attempt: expect half the size, succeed - ids := [][]byte{[]byte("id1"), []byte("id2")} + ids := []da.ID{[]byte("id1"), []byte("id2")} mockDA. On("SubmitWithOptions", mock.Anything, mock.Anything, mock.Anything, nsBz, opts). Run(func(args mock.Arguments) { blobs := args.Get(1).([][]byte) batchSizes = append(batchSizes, len(blobs)) }). - Return(ids, nil). + Return(successResult(ids)). Once() s := newTestSubmitter(mockDA, nil) @@ -175,7 +190,7 @@ func TestSubmitToDA_TooBig_HalvesBatch(t *testing.T) { ctx, items, marshalString, - func(_ []string, _ *coreda.ResultSubmit) {}, + func(_ []string, _ *da.ResultSubmit) {}, "item", nsBz, opts, @@ -191,7 +206,7 @@ func TestSubmitToDA_SentinelNoGas_PreservesGasAcrossRetries(t *testing.T) { mockDA := mocks.NewMockDA(t) - nsBz := coreda.NamespaceFromString("ns").Bytes() + nsBz := da.NamespaceFromString("ns").Bytes() opts := []byte("opts") var usedGas []float64 @@ -200,15 +215,15 @@ func TestSubmitToDA_SentinelNoGas_PreservesGasAcrossRetries(t *testing.T) { mockDA. On("SubmitWithOptions", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). Run(func(args mock.Arguments) { usedGas = append(usedGas, args.Get(2).(float64)) }). - Return(nil, coreda.ErrTxAlreadyInMempool). + Return(errorResult(da.StatusAlreadyInMempool, "already in mempool")). Once() // Second attempt: should use same sentinel gas (-1), succeed - ids := [][]byte{[]byte("id1")} + ids := []da.ID{[]byte("id1")} mockDA. On("SubmitWithOptions", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). Run(func(args mock.Arguments) { usedGas = append(usedGas, args.Get(2).(float64)) }). - Return(ids, nil). + Return(successResult(ids)). Once() s := newTestSubmitter(mockDA, nil) @@ -220,7 +235,7 @@ func TestSubmitToDA_SentinelNoGas_PreservesGasAcrossRetries(t *testing.T) { ctx, items, marshalString, - func(_ []string, _ *coreda.ResultSubmit) {}, + func(_ []string, _ *da.ResultSubmit) {}, "item", nsBz, opts, @@ -236,19 +251,19 @@ func TestSubmitToDA_PartialSuccess_AdvancesWindow(t *testing.T) { mockDA := mocks.NewMockDA(t) - nsBz := coreda.NamespaceFromString("ns").Bytes() + nsBz := da.NamespaceFromString("ns").Bytes() opts := []byte("opts") // track how many items postSubmit sees across attempts var totalSubmitted int // First attempt: success for first 2 of 3 - firstIDs := [][]byte{[]byte("id1"), []byte("id2")} - mockDA.On("SubmitWithOptions", mock.Anything, mock.Anything, mock.Anything, nsBz, opts).Return(firstIDs, nil).Once() + firstIDs := []da.ID{[]byte("id1"), []byte("id2")} + mockDA.On("SubmitWithOptions", mock.Anything, mock.Anything, mock.Anything, nsBz, opts).Return(successResult(firstIDs)).Once() // Second attempt: success for remaining 1 - secondIDs := [][]byte{[]byte("id3")} - mockDA.On("SubmitWithOptions", mock.Anything, mock.Anything, mock.Anything, nsBz, opts).Return(secondIDs, nil).Once() + secondIDs := []da.ID{[]byte("id3")} + mockDA.On("SubmitWithOptions", mock.Anything, mock.Anything, mock.Anything, nsBz, opts).Return(successResult(secondIDs)).Once() s := newTestSubmitter(mockDA, nil) @@ -259,7 +274,7 @@ func TestSubmitToDA_PartialSuccess_AdvancesWindow(t *testing.T) { ctx, items, marshalString, - func(submitted []string, _ *coreda.ResultSubmit) { totalSubmitted += len(submitted) }, + func(submitted []string, _ *da.ResultSubmit) { totalSubmitted += len(submitted) }, "item", nsBz, opts, diff --git a/block/internal/submitting/da_submitter_test.go b/block/internal/submitting/da_submitter_test.go index 214ab98db4..afd7dba76e 100644 --- a/block/internal/submitting/da_submitter_test.go +++ b/block/internal/submitting/da_submitter_test.go @@ -15,8 +15,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - "github.com/evstack/ev-node/block/internal/da" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/rpc/server" @@ -26,7 +25,7 @@ import ( "github.com/evstack/ev-node/types" ) -func setupDASubmitterTest(t *testing.T) (*DASubmitter, store.Store, cache.Manager, coreda.DA, genesis.Genesis) { +func setupDASubmitterTest(t *testing.T) (*DASubmitter, store.Store, cache.Manager, da.DA, genesis.Genesis) { t.Helper() // Create store and cache @@ -36,7 +35,7 @@ func setupDASubmitterTest(t *testing.T) (*DASubmitter, store.Store, cache.Manage require.NoError(t, err) // Create dummy DA - dummyDA := coreda.NewDummyDA(10_000_000, 10*time.Millisecond) + dummyDA := da.NewDummyDA(10_000_000, 10*time.Millisecond) // Create config cfg := config.DefaultConfig() @@ -52,14 +51,8 @@ func setupDASubmitterTest(t *testing.T) (*DASubmitter, store.Store, cache.Manage } // Create DA submitter - daClient := da.NewClient(da.Config{ - DA: dummyDA, - Logger: zerolog.Nop(), - Namespace: cfg.DA.Namespace, - DataNamespace: cfg.DA.DataNamespace, - }) daSubmitter := NewDASubmitter( - daClient, + dummyDA, cfg, gen, common.DefaultBlockOptions(), @@ -87,7 +80,7 @@ func TestDASubmitter_NewDASubmitter(t *testing.T) { submitter, _, _, _, _ := setupDASubmitterTest(t) assert.NotNil(t, submitter) - assert.NotNil(t, submitter.client) + assert.NotNil(t, submitter.da) assert.NotNil(t, submitter.config) assert.NotNil(t, submitter.genesis) } @@ -100,16 +93,10 @@ func TestNewDASubmitterSetsVisualizerWhenEnabled(t *testing.T) { cfg.RPC.EnableDAVisualization = true cfg.Node.Aggregator = true - dummyDA := coreda.NewDummyDA(10_000_000, 10*time.Millisecond) + dummyDA := da.NewDummyDA(10_000_000, 10*time.Millisecond) - daClient := da.NewClient(da.Config{ - DA: dummyDA, - Logger: zerolog.Nop(), - Namespace: cfg.DA.Namespace, - DataNamespace: cfg.DA.DataNamespace, - }) NewDASubmitter( - daClient, + dummyDA, cfg, genesis.Genesis{}, common.DefaultBlockOptions(), diff --git a/block/internal/submitting/submitter_test.go b/block/internal/submitting/submitter_test.go index c1df11bf51..48615788ca 100644 --- a/block/internal/submitting/submitter_test.go +++ b/block/internal/submitting/submitter_test.go @@ -18,7 +18,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - "github.com/evstack/ev-node/block/internal/da" + dapkg "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/rpc/server" @@ -162,13 +162,8 @@ func TestSubmitter_setSequencerHeightToDAHeight(t *testing.T) { cfg.DA.Namespace = "test-ns" cfg.DA.DataNamespace = "test-data-ns" metrics := common.NopMetrics() - daClient := da.NewClient(da.Config{ - DA: nil, - Logger: zerolog.Nop(), - Namespace: cfg.DA.Namespace, - DataNamespace: cfg.DA.DataNamespace, - }) - daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) + dummyDA := dapkg.NewDummyDA(10_000_000, 10*time.Millisecond) + daSub := NewDASubmitter(dummyDA, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) s := NewSubmitter(mockStore, nil, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, zerolog.Nop(), nil) s.ctx = ctx @@ -247,13 +242,8 @@ func TestSubmitter_processDAInclusionLoop_advances(t *testing.T) { exec.On("SetFinal", mock.Anything, uint64(1)).Return(nil).Once() exec.On("SetFinal", mock.Anything, uint64(2)).Return(nil).Once() - daClient := da.NewClient(da.Config{ - DA: nil, - Logger: zerolog.Nop(), - Namespace: cfg.DA.Namespace, - DataNamespace: cfg.DA.DataNamespace, - }) - daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) + dummyDA := dapkg.NewDummyDA(10_000_000, 10*time.Millisecond) + daSub := NewDASubmitter(dummyDA, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) s := NewSubmitter(st, exec, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, zerolog.Nop(), nil) // prepare two consecutive blocks in store with DA included in cache @@ -438,13 +428,8 @@ func TestSubmitter_CacheClearedOnHeightInclusion(t *testing.T) { exec.On("SetFinal", mock.Anything, uint64(1)).Return(nil).Once() exec.On("SetFinal", mock.Anything, uint64(2)).Return(nil).Once() - daClient := da.NewClient(da.Config{ - DA: nil, - Logger: zerolog.Nop(), - Namespace: cfg.DA.Namespace, - DataNamespace: cfg.DA.DataNamespace, - }) - daSub := NewDASubmitter(daClient, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) + dummyDA := dapkg.NewDummyDA(10_000_000, 10*time.Millisecond) + daSub := NewDASubmitter(dummyDA, cfg, genesis.Genesis{}, common.BlockOptions{}, metrics, zerolog.Nop()) s := NewSubmitter(st, exec, cm, metrics, cfg, genesis.Genesis{}, daSub, nil, zerolog.Nop(), nil) // Create test blocks diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index 9325d4d3bd..bffde35f81 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -11,8 +11,8 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - "github.com/evstack/ev-node/block/internal/da" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" + "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/types" pb "github.com/evstack/ev-node/types/pb/evnode/v1" @@ -25,11 +25,15 @@ type DARetriever interface { // daRetriever handles DA retrieval operations for syncing type daRetriever struct { - client da.Client + da da.DA cache cache.CacheManager genesis genesis.Genesis logger zerolog.Logger + // namespace bytes calculated once + namespaceBz []byte + namespaceDataBz []byte + // transient cache, only full event need to be passed to the syncer // on restart, will be refetch as da height is updated by syncer pendingHeaders map[uint64]*types.SignedHeader @@ -38,18 +42,21 @@ type daRetriever struct { // NewDARetriever creates a new DA retriever func NewDARetriever( - client da.Client, + daClient da.DA, cache cache.CacheManager, + config config.Config, genesis genesis.Genesis, logger zerolog.Logger, ) *daRetriever { return &daRetriever{ - client: client, - cache: cache, - genesis: genesis, - logger: logger.With().Str("component", "da_retriever").Logger(), - pendingHeaders: make(map[uint64]*types.SignedHeader), - pendingData: make(map[uint64]*types.Data), + da: daClient, + cache: cache, + genesis: genesis, + logger: logger.With().Str("component", "da_retriever").Logger(), + namespaceBz: da.NamespaceFromString(config.DA.GetNamespace()).Bytes(), + namespaceDataBz: da.NamespaceFromString(config.DA.GetDataNamespace()).Bytes(), + pendingHeaders: make(map[uint64]*types.SignedHeader), + pendingData: make(map[uint64]*types.Data), } } @@ -71,45 +78,45 @@ func (r *daRetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]co } // fetchBlobs retrieves blobs from both header and data namespaces -func (r *daRetriever) fetchBlobs(ctx context.Context, daHeight uint64) (coreda.ResultRetrieve, error) { - // Retrieve from both namespaces using the DA client - headerRes := r.client.RetrieveHeaders(ctx, daHeight) +func (r *daRetriever) fetchBlobs(ctx context.Context, daHeight uint64) (da.ResultRetrieve, error) { + // Retrieve from header namespace + headerRes := r.da.Retrieve(ctx, daHeight, r.namespaceBz) // If namespaces are the same, return header result - if bytes.Equal(r.client.GetHeaderNamespace(), r.client.GetDataNamespace()) { + if bytes.Equal(r.namespaceBz, r.namespaceDataBz) { return headerRes, r.validateBlobResponse(headerRes, daHeight) } - dataRes := r.client.RetrieveData(ctx, daHeight) + dataRes := r.da.Retrieve(ctx, daHeight, r.namespaceDataBz) // Validate responses headerErr := r.validateBlobResponse(headerRes, daHeight) // ignoring error not found, as data can have data - if headerErr != nil && !errors.Is(headerErr, coreda.ErrBlobNotFound) { + if headerErr != nil && !errors.Is(headerErr, da.ErrBlobNotFound) { return headerRes, headerErr } dataErr := r.validateBlobResponse(dataRes, daHeight) // ignoring error not found, as header can have data - if dataErr != nil && !errors.Is(dataErr, coreda.ErrBlobNotFound) { + if dataErr != nil && !errors.Is(dataErr, da.ErrBlobNotFound) { return dataRes, dataErr } // Combine successful results - combinedResult := coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, + combinedResult := da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, Height: daHeight, }, Data: make([][]byte, 0), } - if headerRes.Code == coreda.StatusSuccess { + if headerRes.Code == da.StatusSuccess { combinedResult.Data = append(combinedResult.Data, headerRes.Data...) combinedResult.IDs = append(combinedResult.IDs, headerRes.IDs...) } - if dataRes.Code == coreda.StatusSuccess { + if dataRes.Code == da.StatusSuccess { combinedResult.Data = append(combinedResult.Data, dataRes.Data...) combinedResult.IDs = append(combinedResult.IDs, dataRes.IDs...) } @@ -117,9 +124,9 @@ func (r *daRetriever) fetchBlobs(ctx context.Context, daHeight uint64) (coreda.R // Re-throw error not found if both were not found. if len(combinedResult.Data) == 0 && len(combinedResult.IDs) == 0 { r.logger.Debug().Uint64("da_height", daHeight).Msg("no blob data found") - combinedResult.Code = coreda.StatusNotFound - combinedResult.Message = coreda.ErrBlobNotFound.Error() - return combinedResult, coreda.ErrBlobNotFound + combinedResult.Code = da.StatusNotFound + combinedResult.Message = da.ErrBlobNotFound.Error() + return combinedResult, da.ErrBlobNotFound } return combinedResult, nil @@ -127,15 +134,15 @@ func (r *daRetriever) fetchBlobs(ctx context.Context, daHeight uint64) (coreda.R // validateBlobResponse validates a blob response from DA layer // those are the only error code returned by da.RetrieveWithHelpers -func (r *daRetriever) validateBlobResponse(res coreda.ResultRetrieve, daHeight uint64) error { +func (r *daRetriever) validateBlobResponse(res da.ResultRetrieve, daHeight uint64) error { switch res.Code { - case coreda.StatusError: + case da.StatusError: return fmt.Errorf("DA retrieval failed: %s", res.Message) - case coreda.StatusHeightFromFuture: - return fmt.Errorf("%w: height from future", coreda.ErrHeightFromFuture) - case coreda.StatusNotFound: - return fmt.Errorf("%w: blob not found", coreda.ErrBlobNotFound) - case coreda.StatusSuccess: + case da.StatusHeightFromFuture: + return fmt.Errorf("%w: height from future", da.ErrHeightFromFuture) + case da.StatusNotFound: + return fmt.Errorf("%w: blob not found", da.ErrBlobNotFound) + case da.StatusSuccess: r.logger.Debug().Uint64("da_height", daHeight).Msg("successfully retrieved from DA") return nil default: diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index 4923600b8f..ee22a52055 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "errors" - "fmt" "testing" "time" @@ -16,8 +15,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - "github.com/evstack/ev-node/block/internal/da" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" signerpkg "github.com/evstack/ev-node/pkg/signer" @@ -26,7 +24,7 @@ import ( ) // newTestDARetriever creates a DA retriever for testing with the given DA implementation -func newTestDARetriever(t *testing.T, mockDA coreda.DA, cfg config.Config, gen genesis.Genesis) *daRetriever { +func newTestDARetriever(t *testing.T, mockDA da.DA, cfg config.Config, gen genesis.Genesis) *daRetriever { t.Helper() if cfg.DA.Namespace == "" { cfg.DA.Namespace = "test-ns" @@ -38,14 +36,7 @@ func newTestDARetriever(t *testing.T, mockDA coreda.DA, cfg config.Config, gen g cm, err := cache.NewCacheManager(cfg, zerolog.Nop()) require.NoError(t, err) - daClient := da.NewClient(da.Config{ - DA: mockDA, - Logger: zerolog.Nop(), - Namespace: cfg.DA.Namespace, - DataNamespace: cfg.DA.DataNamespace, - }) - - return NewDARetriever(daClient, cm, gen, zerolog.Nop()) + return NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) } // makeSignedDataBytes builds SignedData containing the provided Data and returns its binary encoding @@ -75,8 +66,13 @@ func makeSignedDataBytesWithTime(t *testing.T, chainID string, height uint64, pr func TestDARetriever_RetrieveFromDA_Invalid(t *testing.T) { mockDA := testmocks.NewMockDA(t) - mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). - Return(nil, errors.New("just invalid")).Maybe() + mockDA.EXPECT().Retrieve(mock.Anything, mock.Anything, mock.Anything). + Return(da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: "just invalid", + }, + }).Maybe() r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) events, err := r.RetrieveFromDA(context.Background(), 42) @@ -87,26 +83,37 @@ func TestDARetriever_RetrieveFromDA_Invalid(t *testing.T) { func TestDARetriever_RetrieveFromDA_NotFound(t *testing.T) { mockDA := testmocks.NewMockDA(t) - // GetIDs returns ErrBlobNotFound -> helper maps to StatusNotFound - mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). - Return(nil, fmt.Errorf("%s: whatever", coreda.ErrBlobNotFound.Error())).Maybe() + // Retrieve returns StatusNotFound + mockDA.EXPECT().Retrieve(mock.Anything, mock.Anything, mock.Anything). + Return(da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusNotFound, + Message: da.ErrBlobNotFound.Error(), + }, + }).Maybe() r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) events, err := r.RetrieveFromDA(context.Background(), 42) - assert.True(t, errors.Is(err, coreda.ErrBlobNotFound)) + assert.True(t, errors.Is(err, da.ErrBlobNotFound)) assert.Len(t, events, 0) } func TestDARetriever_RetrieveFromDA_HeightFromFuture(t *testing.T) { mockDA := testmocks.NewMockDA(t) - // GetIDs returns ErrHeightFromFuture -> helper maps to StatusHeightFromFuture, fetchBlobs returns error - mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). - Return(nil, fmt.Errorf("%s: later", coreda.ErrHeightFromFuture.Error())).Maybe() + + // Retrieve returns StatusHeightFromFuture + mockDA.EXPECT().Retrieve(mock.Anything, mock.Anything, mock.Anything). + Return(da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusHeightFromFuture, + Message: da.ErrHeightFromFuture.Error(), + }, + }).Maybe() r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) events, derr := r.RetrieveFromDA(context.Background(), 1000) assert.Error(t, derr) - assert.True(t, errors.Is(derr, coreda.ErrHeightFromFuture)) + assert.True(t, errors.Is(derr, da.ErrHeightFromFuture)) assert.Nil(t, events) } @@ -115,12 +122,17 @@ func TestDARetriever_RetrieveFromDA_Timeout(t *testing.T) { mockDA := testmocks.NewMockDA(t) - // Mock GetIDs to hang longer than the timeout - mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). + // Mock Retrieve to hang longer than the timeout + mockDA.EXPECT().Retrieve(mock.Anything, mock.Anything, mock.Anything). Run(func(ctx context.Context, height uint64, namespace []byte) { <-ctx.Done() }). - Return(nil, context.DeadlineExceeded).Maybe() + Return(da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: context.DeadlineExceeded.Error(), + }, + }).Maybe() r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) @@ -141,12 +153,16 @@ func TestDARetriever_RetrieveFromDA_Timeout(t *testing.T) { } func TestDARetriever_RetrieveFromDA_TimeoutFast(t *testing.T) { - mockDA := testmocks.NewMockDA(t) - // Mock GetIDs to immediately return context deadline exceeded - mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). - Return(nil, context.DeadlineExceeded).Maybe() + // Mock Retrieve to return error with context deadline exceeded + mockDA.EXPECT().Retrieve(mock.Anything, mock.Anything, mock.Anything). + Return(da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: context.DeadlineExceeded.Error(), + }, + }).Maybe() r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) @@ -244,15 +260,15 @@ func TestDARetriever_tryDecodeData_InvalidSignatureOrProposer(t *testing.T) { func TestDARetriever_validateBlobResponse(t *testing.T) { r := &daRetriever{logger: zerolog.Nop()} // StatusSuccess -> nil - err := r.validateBlobResponse(coreda.ResultRetrieve{BaseResult: coreda.BaseResult{Code: coreda.StatusSuccess}}, 1) + err := r.validateBlobResponse(da.ResultRetrieve{BaseResult: da.BaseResult{Code: da.StatusSuccess}}, 1) assert.NoError(t, err) // StatusError -> error - err = r.validateBlobResponse(coreda.ResultRetrieve{BaseResult: coreda.BaseResult{Code: coreda.StatusError, Message: "fail"}}, 1) + err = r.validateBlobResponse(da.ResultRetrieve{BaseResult: da.BaseResult{Code: da.StatusError, Message: "fail"}}, 1) assert.Error(t, err) // StatusHeightFromFuture -> specific error - err = r.validateBlobResponse(coreda.ResultRetrieve{BaseResult: coreda.BaseResult{Code: coreda.StatusHeightFromFuture}}, 1) + err = r.validateBlobResponse(da.ResultRetrieve{BaseResult: da.BaseResult{Code: da.StatusHeightFromFuture}}, 1) assert.Error(t, err) - assert.True(t, errors.Is(err, coreda.ErrHeightFromFuture)) + assert.True(t, errors.Is(err, da.ErrHeightFromFuture)) } func TestDARetriever_RetrieveFromDA_TwoNamespaces_Success(t *testing.T) { @@ -268,20 +284,31 @@ func TestDARetriever_RetrieveFromDA_TwoNamespaces_Success(t *testing.T) { cfg.DA.Namespace = "nsHdr" cfg.DA.DataNamespace = "nsData" - namespaceBz := coreda.NamespaceFromString(cfg.DA.GetNamespace()).Bytes() - namespaceDataBz := coreda.NamespaceFromString(cfg.DA.GetDataNamespace()).Bytes() + namespaceBz := da.NamespaceFromString(cfg.DA.GetNamespace()).Bytes() + namespaceDataBz := da.NamespaceFromString(cfg.DA.GetDataNamespace()).Bytes() mockDA := testmocks.NewMockDA(t) - // Expect GetIDs for both namespaces - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1234), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceBz) })). - Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("h1")}, Timestamp: time.Now()}, nil).Once() - mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceBz) })). - Return([][]byte{hdrBin}, nil).Once() - - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1234), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceDataBz) })). - Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("d1")}, Timestamp: time.Now()}, nil).Once() - mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceDataBz) })). - Return([][]byte{dataBin}, nil).Once() + // Expect Retrieve for header namespace + mockDA.EXPECT().Retrieve(mock.Anything, uint64(1234), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceBz) })). + Return(da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, + Height: 1234, + IDs: [][]byte{[]byte("h1")}, + }, + Data: [][]byte{hdrBin}, + }).Once() + + // Expect Retrieve for data namespace + mockDA.EXPECT().Retrieve(mock.Anything, uint64(1234), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceDataBz) })). + Return(da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, + Height: 1234, + IDs: [][]byte{[]byte("d1")}, + }, + Data: [][]byte{dataBin}, + }).Once() r := newTestDARetriever(t, mockDA, cfg, gen) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index ee69edea7f..b51f0304db 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -14,12 +14,11 @@ import ( "github.com/rs/zerolog" "golang.org/x/sync/errgroup" - coreda "github.com/evstack/ev-node/core/da" coreexecutor "github.com/evstack/ev-node/core/execution" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - "github.com/evstack/ev-node/block/internal/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/store" @@ -31,6 +30,7 @@ type Syncer struct { // Core components store store.Store exec coreexecutor.Executor + da da.DA // Shared components cache cache.CacheManager @@ -45,7 +45,6 @@ type Syncer struct { lastState *atomic.Pointer[types.State] // DA retriever - daClient da.Client daRetrieverHeight *atomic.Uint64 // P2P stores @@ -76,7 +75,7 @@ type Syncer struct { func NewSyncer( store store.Store, exec coreexecutor.Executor, - daClient da.Client, + da da.DA, cache cache.CacheManager, metrics *common.Metrics, config config.Config, @@ -90,7 +89,7 @@ func NewSyncer( return &Syncer{ store: store, exec: exec, - daClient: daClient, + da: da, cache: cache, metrics: metrics, config: config, @@ -115,7 +114,7 @@ func (s *Syncer) Start(ctx context.Context) error { } // Initialize handlers - s.daRetriever = NewDARetriever(s.daClient, s.cache, s.genesis, s.logger) + s.daRetriever = NewDARetriever(s.da, s.cache, s.config, s.genesis, s.logger) s.p2pHandler = NewP2PHandler(s.headerStore.Store(), s.dataStore.Store(), s.cache, s.genesis, s.logger) if currentHeight, err := s.store.Height(s.ctx); err != nil { s.logger.Error().Err(err).Msg("failed to set initial processed height for p2p handler") @@ -285,10 +284,10 @@ func (s *Syncer) fetchDAUntilCaughtUp() error { events, err := s.daRetriever.RetrieveFromDA(s.ctx, daHeight) if err != nil { switch { - case errors.Is(err, coreda.ErrBlobNotFound): + case errors.Is(err, da.ErrBlobNotFound): s.daRetrieverHeight.Store(daHeight + 1) continue // Fetch next height immediately - case errors.Is(err, coreda.ErrHeightFromFuture): + case errors.Is(err, da.ErrHeightFromFuture): s.logger.Debug().Err(err).Uint64("da_height", daHeight).Msg("DA is ahead of local target; backing off future height requests") return nil // Caught up default: diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index 65f2586966..47dab7325e 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -15,7 +15,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/core/execution" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -41,13 +41,13 @@ func TestSyncer_BackoffOnDAError(t *testing.T) { }, "height_from_future_triggers_backoff": { daBlockTime: 500 * time.Millisecond, - error: coreda.ErrHeightFromFuture, + error: da.ErrHeightFromFuture, expectsBackoff: true, description: "Height from future should trigger backoff", }, "blob_not_found_no_backoff": { daBlockTime: 1 * time.Second, - error: coreda.ErrBlobNotFound, + error: da.ErrBlobNotFound, expectsBackoff: false, description: "ErrBlobNotFound should not trigger backoff", }, @@ -111,7 +111,7 @@ func TestSyncer_BackoffOnDAError(t *testing.T) { // Cancel to end test cancel() }). - Return(nil, coreda.ErrBlobNotFound).Once() + Return(nil, da.ErrBlobNotFound).Once() } else { // For ErrBlobNotFound, DA height should increment daRetriever.On("RetrieveFromDA", mock.Anything, uint64(101)). @@ -120,7 +120,7 @@ func TestSyncer_BackoffOnDAError(t *testing.T) { callCount++ cancel() }). - Return(nil, coreda.ErrBlobNotFound).Once() + Return(nil, da.ErrBlobNotFound).Once() } // Run sync loop @@ -223,7 +223,7 @@ func TestSyncer_BackoffResetOnSuccess(t *testing.T) { callTimes = append(callTimes, time.Now()) cancel() }). - Return(nil, coreda.ErrBlobNotFound).Once() + Return(nil, da.ErrBlobNotFound).Once() // Start process loop to handle events go syncer.processLoop() @@ -292,7 +292,7 @@ func TestSyncer_BackoffBehaviorIntegration(t *testing.T) { Run(func(args mock.Arguments) { callTimes = append(callTimes, time.Now()) }). - Return(nil, coreda.ErrBlobNotFound).Once() + Return(nil, da.ErrBlobNotFound).Once() // Third call - should continue without delay (DA height incremented) daRetriever.On("RetrieveFromDA", mock.Anything, uint64(101)). @@ -300,7 +300,7 @@ func TestSyncer_BackoffBehaviorIntegration(t *testing.T) { callTimes = append(callTimes, time.Now()) cancel() }). - Return(nil, coreda.ErrBlobNotFound).Once() + Return(nil, da.ErrBlobNotFound).Once() go syncer.processLoop() syncer.startSyncWorkers() diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 5c16da4435..9cfce76adf 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/core/execution" "github.com/evstack/ev-node/pkg/genesis" signerpkg "github.com/evstack/ev-node/pkg/signer" @@ -412,7 +412,7 @@ func TestSyncLoopPersistState(t *testing.T) { }, 1*time.Second, 10*time.Millisecond) cancel() }). - Return(nil, coreda.ErrHeightFromFuture) + Return(nil, da.ErrHeightFromFuture) go syncerInst1.processLoop() syncerInst1.startSyncWorkers() @@ -575,9 +575,7 @@ func TestSyncer_InitializeState_CallsReplayer(t *testing.T) { // This test verifies that initializeState() invokes Replayer. // The detailed replay logic is tested in block/internal/common/replay_test.go - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) // Create mocks diff --git a/block/public.go b/block/public.go index f084f2757f..04e20ff07c 100644 --- a/block/public.go +++ b/block/public.go @@ -1,13 +1,7 @@ package block import ( - "time" - "github.com/evstack/ev-node/block/internal/common" - "github.com/evstack/ev-node/block/internal/da" - coreda "github.com/evstack/ev-node/core/da" - "github.com/evstack/ev-node/pkg/config" - "github.com/rs/zerolog" ) // BlockOptions defines the options for creating block components @@ -30,21 +24,3 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { func NopMetrics() *Metrics { return common.NopMetrics() } - -// DAClient is the interface representing the DA client for public use. -type DAClient = da.Client - -// NewDAClient creates a new DA client with configuration -func NewDAClient( - daLayer coreda.DA, - config config.Config, - logger zerolog.Logger, -) DAClient { - return da.NewClient(da.Config{ - DA: daLayer, - Logger: logger, - DefaultTimeout: 10 * time.Second, - Namespace: config.DA.GetNamespace(), - DataNamespace: config.DA.GetDataNamespace(), - }) -} diff --git a/core/da/da.go b/core/da/da.go deleted file mode 100644 index 4229f99879..0000000000 --- a/core/da/da.go +++ /dev/null @@ -1,126 +0,0 @@ -package da - -import ( - "context" - "encoding/binary" - "fmt" - "time" -) - -// DA defines very generic interface for interaction with Data Availability layers. -type DA interface { - // Get returns Blob for each given ID, or an error. - // - // Error should be returned if ID is not formatted properly, there is no Blob for given ID or any other client-level - // error occurred (dropped connection, timeout, etc). - Get(ctx context.Context, ids []ID, namespace []byte) ([]Blob, error) - - // GetIDs returns IDs of all Blobs located in DA at given height. - GetIDs(ctx context.Context, height uint64, namespace []byte) (*GetIDsResult, error) - - // GetProofs returns inclusion Proofs for Blobs specified by their IDs. - GetProofs(ctx context.Context, ids []ID, namespace []byte) ([]Proof, error) - - // Commit creates a Commitment for each given Blob. - Commit(ctx context.Context, blobs []Blob, namespace []byte) ([]Commitment, error) - - // Submit submits the Blobs to Data Availability layer. - // - // This method is synchronous. Upon successful submission to Data Availability layer, it returns the IDs identifying blobs - // in DA. - Submit(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte) ([]ID, error) - - // SubmitWithOptions submits the Blobs to Data Availability layer with additional options. - SubmitWithOptions(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte, options []byte) ([]ID, error) - - // Validate validates Commitments against the corresponding Proofs. This should be possible without retrieving the Blobs. - Validate(ctx context.Context, ids []ID, proofs []Proof, namespace []byte) ([]bool, error) -} - -// Blob is the data submitted/received from DA interface. -type Blob = []byte - -// ID should contain serialized data required by the implementation to find blob in Data Availability layer. -type ID = []byte - -// Commitment should contain serialized cryptographic commitment to Blob value. -type Commitment = []byte - -// Proof should contain serialized proof of inclusion (publication) of Blob in Data Availability layer. -type Proof = []byte - -// GetIDsResult holds the result of GetIDs call: IDs and timestamp of corresponding block. -type GetIDsResult struct { - IDs []ID - Timestamp time.Time -} - -// ResultSubmit contains information returned from DA layer after block headers/data submission. -type ResultSubmit struct { - BaseResult -} - -// ResultRetrieveHeaders contains batch of block headers returned from DA layer client. -type ResultRetrieve struct { - BaseResult - // Data is the block data retrieved from Data Availability Layer. - // If Code is not equal to StatusSuccess, it has to be nil. - Data [][]byte -} - -// StatusCode is a type for DA layer return status. -// TODO: define an enum of different non-happy-path cases -// that might need to be handled by Evolve independent of -// the underlying DA chain. -type StatusCode uint64 - -// Data Availability return codes. -const ( - StatusUnknown StatusCode = iota - StatusSuccess - StatusNotFound - StatusNotIncludedInBlock - StatusAlreadyInMempool - StatusTooBig - StatusContextDeadline - StatusError - StatusIncorrectAccountSequence - StatusContextCanceled - StatusHeightFromFuture -) - -// BaseResult contains basic information returned by DA layer. -type BaseResult struct { - // Code is to determine if the action succeeded. - Code StatusCode - // Message may contain DA layer specific information (like DA block height/hash, detailed error message, etc) - Message string - // Height is the height of the block on Data Availability Layer for given result. - Height uint64 - // SubmittedCount is the number of successfully submitted blocks. - SubmittedCount uint64 - // BlobSize is the size of the blob submitted. - BlobSize uint64 - // IDs is the list of IDs of the blobs submitted. - IDs [][]byte - // Timestamp is the timestamp of the posted data on Data Availability Layer. - Timestamp time.Time -} - -// makeID creates an ID from a height and a commitment. -func makeID(height uint64, commitment []byte) []byte { - id := make([]byte, len(commitment)+8) - binary.LittleEndian.PutUint64(id, height) - copy(id[8:], commitment) - return id -} - -// SplitID splits an ID into a height and a commitment. -// if len(id) <= 8, it returns 0 and nil. -func SplitID(id []byte) (uint64, []byte, error) { - if len(id) <= 8 { - return 0, nil, fmt.Errorf("invalid ID length: %d", len(id)) - } - commitment := id[8:] - return binary.LittleEndian.Uint64(id[:8]), commitment, nil -} diff --git a/core/da/errors.go b/core/da/errors.go deleted file mode 100644 index beac62be54..0000000000 --- a/core/da/errors.go +++ /dev/null @@ -1,16 +0,0 @@ -package da - -import ( - "errors" -) - -var ( - ErrBlobNotFound = errors.New("blob: not found") - ErrBlobSizeOverLimit = errors.New("blob: over size limit") - ErrTxTimedOut = errors.New("timed out waiting for tx to be included in a block") - ErrTxAlreadyInMempool = errors.New("tx already in mempool") - ErrTxIncorrectAccountSequence = errors.New("incorrect account sequence") - ErrContextDeadline = errors.New("context deadline") - ErrHeightFromFuture = errors.New("given height is from the future") - ErrContextCanceled = errors.New("context canceled") -) diff --git a/core/da/namespace.go b/core/da/namespace.go deleted file mode 100644 index 057bb29365..0000000000 --- a/core/da/namespace.go +++ /dev/null @@ -1,129 +0,0 @@ -package da - -import ( - "crypto/sha256" - "encoding/hex" - "fmt" - "strings" -) - -// Implemented in accordance to https://celestiaorg.github.io/celestia-app/namespace.html - -const ( - // NamespaceVersionIndex is the index of the namespace version in the byte slice - NamespaceVersionIndex = 0 - // NamespaceVersionSize is the size of the namespace version in bytes - NamespaceVersionSize = 1 - // NamespaceIDSize is the size of the namespace ID in bytes - NamespaceIDSize = 28 - // NamespaceSize is the total size of a namespace (version + ID) in bytes - NamespaceSize = NamespaceVersionSize + NamespaceIDSize - - // NamespaceVersionZero is the only supported user-specifiable namespace version - NamespaceVersionZero = uint8(0) - // NamespaceVersionMax is the max namespace version - NamespaceVersionMax = uint8(255) - - // NamespaceVersionZeroPrefixSize is the number of leading zero bytes required for version 0 - NamespaceVersionZeroPrefixSize = 18 - // NamespaceVersionZeroDataSize is the number of data bytes available for version 0 - NamespaceVersionZeroDataSize = 10 -) - -// Namespace represents a Celestia namespace -type Namespace struct { - Version uint8 - ID [NamespaceIDSize]byte -} - -// Bytes returns the namespace as a byte slice -func (n Namespace) Bytes() []byte { - result := make([]byte, NamespaceSize) - result[NamespaceVersionIndex] = n.Version - copy(result[NamespaceVersionSize:], n.ID[:]) - return result -} - -// IsValidForVersion0 checks if the namespace is valid for version 0 -// Version 0 requires the first 18 bytes of the ID to be zero -func (n Namespace) IsValidForVersion0() bool { - if n.Version != NamespaceVersionZero { - return false - } - - for i := range NamespaceVersionZeroPrefixSize { - if n.ID[i] != 0 { - return false - } - } - return true -} - -// NewNamespaceV0 creates a new version 0 namespace from the provided data -// The data should be up to 10 bytes and will be placed in the last 10 bytes of the ID -// The first 18 bytes will be zeros as required by the specification -func NewNamespaceV0(data []byte) (*Namespace, error) { - if len(data) > NamespaceVersionZeroDataSize { - return nil, fmt.Errorf("data too long for version 0 namespace: got %d bytes, max %d", - len(data), NamespaceVersionZeroDataSize) - } - - ns := &Namespace{ - Version: NamespaceVersionZero, - } - - // The first 18 bytes are already zero (Go zero-initializes) - // Copy the data to the last 10 bytes - copy(ns.ID[NamespaceVersionZeroPrefixSize:], data) - - return ns, nil -} - -// NamespaceFromBytes creates a namespace from a 29-byte slice -func NamespaceFromBytes(b []byte) (*Namespace, error) { - if len(b) != NamespaceSize { - return nil, fmt.Errorf("invalid namespace size: expected %d, got %d", NamespaceSize, len(b)) - } - - ns := &Namespace{ - Version: b[NamespaceVersionIndex], - } - copy(ns.ID[:], b[NamespaceVersionSize:]) - - // Validate if it's version 0 - if ns.Version == NamespaceVersionZero && !ns.IsValidForVersion0() { - return nil, fmt.Errorf("invalid version 0 namespace: first %d bytes of ID must be zero", - NamespaceVersionZeroPrefixSize) - } - - return ns, nil -} - -// NamespaceFromString creates a version 0 namespace from a string identifier -// The string is hashed and the first 10 bytes of the hash are used as the namespace data -func NamespaceFromString(s string) *Namespace { - // Hash the string to get consistent bytes - hash := sha256.Sum256([]byte(s)) - - // Use the first 10 bytes of the hash for the namespace data - ns, _ := NewNamespaceV0(hash[:NamespaceVersionZeroDataSize]) - return ns -} - -// HexString returns the hex representation of the namespace -func (n Namespace) HexString() string { - return "0x" + hex.EncodeToString(n.Bytes()) -} - -// ParseHexNamespace parses a hex string into a namespace -func ParseHexNamespace(hexStr string) (*Namespace, error) { - // Remove 0x prefix if present - hexStr = strings.TrimPrefix(hexStr, "0x") - - b, err := hex.DecodeString(hexStr) - if err != nil { - return nil, fmt.Errorf("invalid hex string: %w", err) - } - - return NamespaceFromBytes(b) -} diff --git a/core/sequencer/sequencing_test.go b/core/sequencer/sequencing_test.go index 53d51d6e21..dd1ac4c389 100644 --- a/core/sequencer/sequencing_test.go +++ b/core/sequencer/sequencing_test.go @@ -54,9 +54,9 @@ func TestBatchHash(t *testing.T) { name: "transactions with empty data", batch: &Batch{ Transactions: [][]byte{ - []byte{}, + {}, []byte("normal transaction"), - []byte{}, + {}, }, }, wantErr: false, diff --git a/da/celestia/client.go b/da/celestia/client.go new file mode 100644 index 0000000000..6ab10475b8 --- /dev/null +++ b/da/celestia/client.go @@ -0,0 +1,617 @@ +package celestia + +import ( + "context" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "net/http" + "strings" + "time" + + "github.com/filecoin-project/go-jsonrpc" + "github.com/rs/zerolog" + + "github.com/evstack/ev-node/da" +) + +// defaultRetrieveTimeout is the default timeout for DA retrieval operations +const defaultRetrieveTimeout = 10 * time.Second + +// Client connects to celestia-node's blob API via JSON-RPC and implements the da.DA interface. +type Client struct { + logger zerolog.Logger + maxBlobSize uint64 + closer jsonrpc.ClientCloser + + Internal struct { + Submit func(ctx context.Context, blobs []*Blob, opts *SubmitOptions) (uint64, error) `perm:"write"` + Get func(ctx context.Context, height uint64, ns Namespace, c Commitment) (*Blob, error) `perm:"read"` + GetAll func(ctx context.Context, height uint64, namespaces []Namespace) ([]*Blob, error) `perm:"read"` + GetProof func(ctx context.Context, height uint64, ns Namespace, c Commitment) (*Proof, error) `perm:"read"` + Included func(ctx context.Context, height uint64, ns Namespace, proof *Proof, c Commitment) (bool, error) `perm:"read"` + } +} + +// NewClient creates a new client connected to celestia-node that implements the da.DA interface. +// Token is obtained from: celestia light auth write +func NewClient( + ctx context.Context, + logger zerolog.Logger, + addr string, + token string, + maxBlobSize uint64, +) (*Client, error) { + if addr == "" { + return nil, fmt.Errorf("address cannot be empty") + } + + if maxBlobSize == 0 { + return nil, fmt.Errorf("maxBlobSize must be greater than 0") + } + + client := &Client{ + logger: logger, + maxBlobSize: maxBlobSize, + } + + authHeader := http.Header{} + if token != "" { + authHeader.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + } + + closer, err := jsonrpc.NewMergeClient( + ctx, + addr, + "blob", + []interface{}{&client.Internal}, + authHeader, + ) + if err != nil { + return nil, fmt.Errorf("failed to create JSON-RPC client: %w", err) + } + + client.closer = closer + + logger.Info(). + Str("address", addr). + Uint64("max_blob_size", maxBlobSize). + Msg("Celestia blob API client created successfully") + + return client, nil +} + +// Close closes the connection. Safe to call multiple times. +func (c *Client) Close() { + if c.closer != nil { + c.closer() + c.closer = nil + } + c.logger.Debug().Msg("Celestia client connection closed") +} + +// submit is a private method that submits blobs and returns the height (used internally). +func (c *Client) submit(ctx context.Context, blobs []*Blob, opts *SubmitOptions) (uint64, error) { + c.logger.Debug(). + Int("num_blobs", len(blobs)). + Msg("Submitting blobs to Celestia") + + height, err := c.Internal.Submit(ctx, blobs, opts) + if err != nil { + c.logger.Error(). + Err(err). + Int("num_blobs", len(blobs)). + Msg("Failed to submit blobs") + return 0, fmt.Errorf("failed to submit blobs: %w", err) + } + + c.logger.Info(). + Uint64("height", height). + Int("num_blobs", len(blobs)). + Msg("Successfully submitted blobs") + + return height, nil +} + +// get retrieves a single blob by commitment at a given height and namespace (used internally). +func (c *Client) get(ctx context.Context, height uint64, namespace Namespace, commitment Commitment) (*Blob, error) { + c.logger.Debug(). + Uint64("height", height). + Msg("Getting blob from Celestia") + + blob, err := c.Internal.Get(ctx, height, namespace, commitment) + if err != nil { + c.logger.Error(). + Err(err). + Uint64("height", height). + Msg("Failed to get blob") + return nil, fmt.Errorf("failed to get blob: %w", err) + } + + c.logger.Debug(). + Uint64("height", height). + Int("data_size", len(blob.Data)). + Msg("Successfully retrieved blob") + + return blob, nil +} + +// getAll retrieves all blobs at a given height for the specified namespaces (used internally). +func (c *Client) getAll(ctx context.Context, height uint64, namespaces []Namespace) ([]*Blob, error) { + c.logger.Debug(). + Uint64("height", height). + Int("num_namespaces", len(namespaces)). + Msg("Getting all blobs from Celestia") + + blobs, err := c.Internal.GetAll(ctx, height, namespaces) + if err != nil { + c.logger.Error(). + Err(err). + Uint64("height", height). + Int("num_namespaces", len(namespaces)). + Msg("Failed to get blobs") + return nil, fmt.Errorf("failed to get blobs: %w", err) + } + + c.logger.Debug(). + Uint64("height", height). + Int("num_blobs", len(blobs)). + Msg("Successfully retrieved blobs") + + return blobs, nil +} + +// getProof retrieves the inclusion proof for a blob (used internally). +func (c *Client) getProof(ctx context.Context, height uint64, namespace Namespace, commitment Commitment) (*Proof, error) { + c.logger.Debug(). + Uint64("height", height). + Msg("Getting proof from Celestia") + + proof, err := c.Internal.GetProof(ctx, height, namespace, commitment) + if err != nil { + c.logger.Error(). + Err(err). + Uint64("height", height). + Msg("Failed to get proof") + return nil, fmt.Errorf("failed to get proof: %w", err) + } + + proofSegments := 0 + if proof != nil { + proofSegments = len(*proof) + } + c.logger.Debug(). + Uint64("height", height). + Int("proof_segments", proofSegments). + Msg("Successfully retrieved proof") + + return proof, nil +} + +// included checks whether a blob is included in the Celestia block (used internally). +func (c *Client) included(ctx context.Context, height uint64, namespace Namespace, proof *Proof, commitment Commitment) (bool, error) { + c.logger.Debug(). + Uint64("height", height). + Msg("Checking blob inclusion in Celestia") + + included, err := c.Internal.Included(ctx, height, namespace, proof, commitment) + if err != nil { + c.logger.Error(). + Err(err). + Uint64("height", height). + Msg("Failed to check inclusion") + return false, fmt.Errorf("failed to check inclusion: %w", err) + } + + c.logger.Debug(). + Uint64("height", height). + Bool("included", included). + Msg("Inclusion check completed") + + return included, nil +} + +// DA interface implementation + +func (c *Client) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte) da.ResultSubmit { + return c.SubmitWithOptions(ctx, blobs, gasPrice, namespace, nil) +} + +// Get retrieves blobs by their IDs. +func (c *Client) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Blob, error) { + if len(ids) == 0 { + return []da.Blob{}, nil + } + + // Group IDs by height for efficient retrieval + type blobKey struct { + height uint64 + commitment string + } + heightGroups := make(map[uint64][]Commitment) + idToIndex := make(map[blobKey]int) + + for i, id := range ids { + height, commitment, err := da.SplitID(id) + if err != nil { + return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) + } + heightGroups[height] = append(heightGroups[height], commitment) + idToIndex[blobKey{height, string(commitment)}] = i + } + + // Retrieve blobs for each height + result := make([]da.Blob, len(ids)) + for height := range heightGroups { + blobs, err := c.getAll(ctx, height, []Namespace{namespace}) + if err != nil { + return nil, fmt.Errorf("failed to get blobs at height %d: %w", height, err) + } + + // Match blobs to their original positions + for _, blob := range blobs { + key := blobKey{height, string(blob.Commitment)} + if idx, ok := idToIndex[key]; ok { + result[idx] = blob.Data + } + } + } + + return result, nil +} + +func (c *Client) GetIDs(ctx context.Context, height uint64, namespace []byte) (*da.GetIDsResult, error) { + result := c.Retrieve(ctx, height, namespace) + if result.Code != da.StatusSuccess { + return nil, da.StatusCodeToError(result.Code, result.Message) + } + return &da.GetIDsResult{ + IDs: result.IDs, + Timestamp: result.Timestamp, + }, nil +} + +// GetProofs retrieves inclusion proofs for the given IDs. +func (c *Client) GetProofs(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Proof, error) { + if len(ids) == 0 { + return []da.Proof{}, nil + } + + proofs := make([]da.Proof, len(ids)) + for i, id := range ids { + height, commitment, err := da.SplitID(id) + if err != nil { + return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) + } + + proof, err := c.getProof(ctx, height, namespace, commitment) + if err != nil { + return nil, fmt.Errorf("failed to get proof for ID %d: %w", i, err) + } + + encodedProof, err := json.Marshal(proof) + if err != nil { + return nil, fmt.Errorf("failed to marshal proof for ID %d: %w", i, err) + } + proofs[i] = encodedProof + } + + return proofs, nil +} + +// Commit creates commitments for the given blobs. +// Commitments are computed locally using the same algorithm as celestia-node. +func (c *Client) Commit(ctx context.Context, blobs []da.Blob, namespace []byte) ([]da.Commitment, error) { + commitments := make([]da.Commitment, len(blobs)) + for i, blob := range blobs { + commitment, err := CreateCommitment(blob, namespace) + if err != nil { + return nil, fmt.Errorf("failed to create commitment for blob %d: %w", i, err) + } + commitments[i] = commitment + } + return commitments, nil +} + +// Validate validates commitments against proofs. +func (c *Client) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof, namespace []byte) ([]bool, error) { + if len(ids) != len(proofs) { + return nil, fmt.Errorf("mismatched lengths: %d IDs vs %d proofs", len(ids), len(proofs)) + } + + results := make([]bool, len(ids)) + for i, id := range ids { + height, commitment, err := da.SplitID(id) + if err != nil { + return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) + } + + var proof Proof + if err := json.Unmarshal(proofs[i], &proof); err != nil { + return nil, fmt.Errorf("failed to decode proof %d: %w", i, err) + } + + included, err := c.included(ctx, height, namespace, &proof, commitment) + if err != nil { + return nil, fmt.Errorf("failed to validate proof %d: %w", i, err) + } + + results[i] = included + } + + return results, nil +} + +// makeID creates an ID from a height and a commitment. +func makeID(height uint64, commitment []byte) []byte { + id := make([]byte, len(commitment)+8) + binary.LittleEndian.PutUint64(id, height) + copy(id[8:], commitment) + return id +} + +func (c *Client) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) da.ResultSubmit { + var blobSize uint64 + for _, blob := range blobs { + blobSize += uint64(len(blob)) + } + + if len(blobs) == 0 { + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, + IDs: []da.ID{}, + Timestamp: time.Now(), + }, + } + } + + if err := ValidateNamespace(namespace); err != nil { + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: fmt.Sprintf("invalid namespace: %s", err.Error()), + BlobSize: blobSize, + }, + } + } + + // Enforce max blob size locally so callers can handle StatusTooBig (used by submitter to split batches) + for i, blob := range blobs { + if uint64(len(blob)) > c.maxBlobSize { + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusTooBig, + Message: fmt.Sprintf("blob %d exceeds max blob size (%d > %d)", i, len(blob), c.maxBlobSize), + BlobSize: blobSize, + }, + } + } + } + if blobSize > c.maxBlobSize { + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusTooBig, + Message: fmt.Sprintf("total blob size exceeds max blob size (%d > %d)", blobSize, c.maxBlobSize), + BlobSize: blobSize, + }, + } + } + + celestiaBlobs := make([]*Blob, len(blobs)) + for i, blob := range blobs { + commitment, err := CreateCommitment(blob, namespace) + if err != nil { + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: fmt.Sprintf("failed to create commitment for blob %d: %s", i, err.Error()), + BlobSize: blobSize, + }, + } + } + celestiaBlobs[i] = &Blob{ + Namespace: namespace, + ShareVer: 0, + Data: blob, + Commitment: commitment, + } + } + + opts := &SubmitOptions{} + if len(options) > 0 { + if err := json.Unmarshal(options, opts); err != nil { + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: fmt.Sprintf("failed to unmarshal submit options: %s", err.Error()), + BlobSize: blobSize, + }, + } + } + } + opts.GasPrice = gasPrice + opts.IsGasPriceSet = true + + height, err := c.submit(ctx, celestiaBlobs, opts) + if err != nil { + return c.handleSubmitError(err, blobSize) + } + + ids := make([]da.ID, len(celestiaBlobs)) + for i, blob := range celestiaBlobs { + ids[i] = makeID(height, blob.Commitment) + } + + c.logger.Debug().Int("num_ids", len(ids)).Uint64("height", height).Msg("DA submission successful") + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, + IDs: ids, + SubmittedCount: uint64(len(ids)), + Height: height, + BlobSize: blobSize, + Timestamp: time.Now(), + }, + } +} + +func (c *Client) Retrieve(ctx context.Context, height uint64, namespace []byte) da.ResultRetrieve { + getCtx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout) + defer cancel() + + blobs, err := c.getAll(getCtx, height, []Namespace{namespace}) + if err != nil { + return c.handleRetrieveError(err, height) + } + + if len(blobs) == 0 { + c.logger.Debug().Uint64("height", height).Msg("No blobs found at height") + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusNotFound, + Message: da.ErrBlobNotFound.Error(), + Height: height, + Timestamp: time.Now(), + }, + } + } + + ids := make([]da.ID, len(blobs)) + data := make([][]byte, len(blobs)) + for i, blob := range blobs { + ids[i] = makeID(height, blob.Commitment) + data[i] = blob.Data + } + + c.logger.Debug().Uint64("height", height).Int("num_blobs", len(blobs)).Msg("Successfully retrieved blobs") + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, + Height: height, + IDs: ids, + Timestamp: time.Now(), + }, + Data: data, + } +} + +// handleSubmitError maps errors from the blob API to DA status codes and returns a ResultSubmit. +func (c *Client) handleSubmitError(err error, blobSize uint64) da.ResultSubmit { + status := da.StatusError + message := err.Error() + + var rpcErr *jsonrpc.JSONRPCError + if errors.As(err, &rpcErr) { + switch rpcErr.Code { + case jsonrpc.ErrorCode(da.StatusNotIncludedInBlock): + status = da.StatusNotIncludedInBlock + case jsonrpc.ErrorCode(da.StatusAlreadyInMempool): + status = da.StatusAlreadyInMempool + case jsonrpc.ErrorCode(da.StatusTooBig): + status = da.StatusTooBig + case jsonrpc.ErrorCode(da.StatusIncorrectAccountSequence): + status = da.StatusIncorrectAccountSequence + case jsonrpc.ErrorCode(da.StatusContextDeadline): + status = da.StatusContextDeadline + case jsonrpc.ErrorCode(da.StatusContextCanceled): + status = da.StatusContextCanceled + } + if rpcErr.Message != "" { + message = rpcErr.Message + } + } + + if status == da.StatusError { + errStr := err.Error() + switch { + case errors.Is(err, context.Canceled): + status = da.StatusContextCanceled + case errors.Is(err, context.DeadlineExceeded): + status = da.StatusContextDeadline + case strings.Contains(errStr, "timeout"): + status = da.StatusNotIncludedInBlock + case strings.Contains(errStr, "blob(s) too large"), + strings.Contains(errStr, "total blob size too large"), + strings.Contains(errStr, "too large"), + strings.Contains(errStr, "exceeds"): + status = da.StatusTooBig + case strings.Contains(errStr, "already in mempool"), + strings.Contains(errStr, "tx already exists in cache"): + status = da.StatusAlreadyInMempool + case strings.Contains(errStr, "incorrect account sequence"), + strings.Contains(errStr, "account sequence mismatch"): + status = da.StatusIncorrectAccountSequence + } + } + + if status == da.StatusTooBig { + c.logger.Debug().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed") + } else { + c.logger.Error().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed") + } + + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: status, + Message: message, + BlobSize: blobSize, + Timestamp: time.Now(), + }, + } +} + +// handleRetrieveError maps blob API errors to DA status codes and returns a ResultRetrieve. +func (c *Client) handleRetrieveError(err error, height uint64) da.ResultRetrieve { + status := da.StatusError + message := err.Error() + + var rpcErr *jsonrpc.JSONRPCError + if errors.As(err, &rpcErr) { + switch rpcErr.Code { + case jsonrpc.ErrorCode(da.StatusNotFound): + status = da.StatusNotFound + case jsonrpc.ErrorCode(da.StatusHeightFromFuture): + status = da.StatusHeightFromFuture + case jsonrpc.ErrorCode(da.StatusContextDeadline): + status = da.StatusContextDeadline + case jsonrpc.ErrorCode(da.StatusContextCanceled): + status = da.StatusContextCanceled + } + if rpcErr.Message != "" { + message = rpcErr.Message + } + } + + if status == da.StatusError { + errStr := err.Error() + switch { + case strings.Contains(errStr, "not found"): + status = da.StatusNotFound + message = da.ErrBlobNotFound.Error() + case strings.Contains(errStr, "height") && strings.Contains(errStr, "future"): + status = da.StatusHeightFromFuture + message = da.ErrHeightFromFuture.Error() + case errors.Is(err, context.Canceled): + status = da.StatusContextCanceled + case errors.Is(err, context.DeadlineExceeded): + status = da.StatusContextDeadline + } + } + + if status == da.StatusNotFound || status == da.StatusHeightFromFuture { + c.logger.Debug().Uint64("height", height).Str("status", fmt.Sprintf("%d", status)).Msg("Retrieve returned non-success status") + } else { + c.logger.Error().Uint64("height", height).Err(err).Uint64("status", uint64(status)).Msg("Failed to retrieve blobs") + } + + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: status, + Message: message, + Height: height, + Timestamp: time.Now(), + }, + } +} diff --git a/da/celestia/client_test.go b/da/celestia/client_test.go new file mode 100644 index 0000000000..4a3a56db26 --- /dev/null +++ b/da/celestia/client_test.go @@ -0,0 +1,225 @@ +package celestia + +import ( + "context" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/nmt" +) + +func TestNewClient(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + tests := []struct { + name string + addr string + token string + maxBlobSize uint64 + wantErr bool + errContains string + }{ + { + name: "valid parameters", + addr: "http://localhost:26658", + token: "test-token", + maxBlobSize: 1024 * 1024, + wantErr: false, + }, + { + name: "valid parameters without token", + addr: "http://localhost:26658", + token: "", + maxBlobSize: 1024 * 1024, + wantErr: false, + }, + { + name: "empty address", + addr: "", + token: "test-token", + maxBlobSize: 1024, + wantErr: true, + errContains: "address cannot be empty", + }, + { + name: "zero maxBlobSize", + addr: "http://localhost:26658", + token: "test-token", + maxBlobSize: 0, + wantErr: true, + errContains: "maxBlobSize must be greater than 0", + }, + { + name: "invalid address will fail on connection", + addr: "not-a-valid-url", + token: "test-token", + maxBlobSize: 1024, + wantErr: true, + errContains: "failed to create JSON-RPC client", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client, err := NewClient(ctx, logger, tt.addr, tt.token, tt.maxBlobSize) + + if tt.wantErr { + require.Error(t, err) + if tt.errContains != "" { + assert.Contains(t, err.Error(), tt.errContains) + } + assert.Nil(t, client) + } else { + require.NoError(t, err) + require.NotNil(t, client) + assert.Equal(t, tt.maxBlobSize, client.maxBlobSize) + assert.NotNil(t, client.closer) + + // Clean up + client.Close() + } + }) + } +} + +func TestClient_Close(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + client, err := NewClient(ctx, logger, "http://localhost:26658", "token", 1024*1024) + require.NoError(t, err) + require.NotNil(t, client) + + // Should not panic + assert.NotPanics(t, func() { + client.Close() + }) + + // Should be safe to call multiple times + assert.NotPanics(t, func() { + client.Close() + }) +} + +func TestClient_Submit(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + validNamespace := make([]byte, 29) + validBlob := &Blob{ + Namespace: validNamespace, + Data: []byte("test data"), + } + + tests := []struct { + name string + blobs []*Blob + wantRPC bool + }{ + { + name: "single blob", + blobs: []*Blob{validBlob}, + wantRPC: true, + }, + { + name: "multiple blobs", + blobs: []*Blob{ + validBlob, + { + Namespace: validNamespace, + Data: []byte("more data"), + }, + }, + wantRPC: true, + }, + { + name: "empty list delegates to celestia-node", + blobs: []*Blob{}, + wantRPC: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client, err := NewClient(ctx, logger, "http://localhost:26658", "token", 1024*1024) + require.NoError(t, err) + defer client.Close() + + _, err = client.submit(ctx, tt.blobs, nil) + + if tt.wantRPC { + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to submit blobs") + } + }) + } +} + +func TestClient_Get(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + client, err := NewClient(ctx, logger, "http://localhost:26658", "token", 1024*1024) + require.NoError(t, err) + defer client.Close() + + validNamespace := make([]byte, 29) + validCommitment := []byte("commitment") + + _, err = client.get(ctx, 100, validNamespace, validCommitment) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to get blob") +} + +func TestClient_GetAll(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + client, err := NewClient(ctx, logger, "http://localhost:26658", "token", 1024*1024) + require.NoError(t, err) + defer client.Close() + + validNamespace := make([]byte, 29) + namespaces := []Namespace{validNamespace} + + _, err = client.getAll(ctx, 100, namespaces) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to get blobs") +} + +func TestClient_GetProof(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + client, err := NewClient(ctx, logger, "http://localhost:26658", "token", 1024*1024) + require.NoError(t, err) + defer client.Close() + + validNamespace := make([]byte, 29) + validCommitment := []byte("commitment") + + _, err = client.getProof(ctx, 100, validNamespace, validCommitment) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to get proof") +} + +func TestClient_Included(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + client, err := NewClient(ctx, logger, "http://localhost:26658", "token", 1024*1024) + require.NoError(t, err) + defer client.Close() + + validNamespace := make([]byte, 29) + validCommitment := []byte("commitment") + proof := Proof{&nmt.Proof{}} + + _, err = client.included(ctx, 100, validNamespace, &proof, validCommitment) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to check inclusion") +} diff --git a/da/celestia/commitment.go b/da/celestia/commitment.go new file mode 100644 index 0000000000..9963aa1c14 --- /dev/null +++ b/da/celestia/commitment.go @@ -0,0 +1,53 @@ +package celestia + +import ( + "fmt" + + "github.com/celestiaorg/go-square/merkle" + "github.com/celestiaorg/go-square/v3/inclusion" + libshare "github.com/celestiaorg/go-square/v3/share" +) + +// subtreeRootThreshold matches the value used by celestia-app. +// This determines the size of subtrees when computing blob commitments. +const subtreeRootThreshold = 64 + +// CreateCommitment computes the commitment for a blob. +// The commitment is computed using the same algorithm as celestia-node: +// 1. Split the blob data into shares +// 2. Build a Merkle tree over the shares +// 3. Return the Merkle root +func CreateCommitment(data []byte, namespace []byte) (Commitment, error) { + // Create namespace from bytes + ns, err := libshare.NewNamespaceFromBytes(namespace) + if err != nil { + return nil, fmt.Errorf("failed to create namespace: %w", err) + } + + // Create a blob with share version 0 (default) + blob, err := libshare.NewBlob(ns, data, libshare.ShareVersionZero, nil) + if err != nil { + return nil, fmt.Errorf("failed to create blob: %w", err) + } + + // Compute commitment using the same function as celestia-node + commitment, err := inclusion.CreateCommitment(blob, merkle.HashFromByteSlices, subtreeRootThreshold) + if err != nil { + return nil, fmt.Errorf("failed to create commitment: %w", err) + } + + return commitment, nil +} + +// CreateCommitments computes commitments for multiple blobs. +func CreateCommitments(data [][]byte, namespace []byte) ([]Commitment, error) { + commitments := make([]Commitment, len(data)) + for i, d := range data { + commitment, err := CreateCommitment(d, namespace) + if err != nil { + return nil, fmt.Errorf("failed to create commitment for blob %d: %w", i, err) + } + commitments[i] = commitment + } + return commitments, nil +} diff --git a/da/celestia/commitment_test.go b/da/celestia/commitment_test.go new file mode 100644 index 0000000000..2299096890 --- /dev/null +++ b/da/celestia/commitment_test.go @@ -0,0 +1,166 @@ +package celestia + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCreateCommitment(t *testing.T) { + // Create a valid 29-byte namespace (version 0 + 28 bytes ID) + namespace := make([]byte, 29) + namespace[0] = 0 // version 0 + + tests := []struct { + name string + data []byte + namespace []byte + wantErr bool + errContains string + }{ + { + name: "valid small blob", + data: []byte("hello world"), + namespace: namespace, + wantErr: false, + }, + { + name: "valid larger blob", + data: make([]byte, 1024), + namespace: namespace, + wantErr: false, + }, + { + name: "empty blob not allowed", + data: []byte{}, + namespace: namespace, + wantErr: true, + errContains: "empty", + }, + { + name: "invalid namespace too short", + data: []byte("test"), + namespace: make([]byte, 10), + wantErr: true, + errContains: "namespace", + }, + { + name: "invalid namespace too long", + data: []byte("test"), + namespace: make([]byte, 30), + wantErr: true, + errContains: "namespace", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + commitment, err := CreateCommitment(tt.data, tt.namespace) + + if tt.wantErr { + require.Error(t, err) + if tt.errContains != "" { + assert.Contains(t, err.Error(), tt.errContains) + } + assert.Nil(t, commitment) + } else { + require.NoError(t, err) + require.NotNil(t, commitment) + // Commitment should be non-empty + assert.Greater(t, len(commitment), 0) + } + }) + } +} + +func TestCreateCommitment_Deterministic(t *testing.T) { + // Create a valid namespace + namespace := make([]byte, 29) + namespace[0] = 0 + + data := []byte("test data for deterministic commitment") + + // Create commitment twice + commitment1, err := CreateCommitment(data, namespace) + require.NoError(t, err) + + commitment2, err := CreateCommitment(data, namespace) + require.NoError(t, err) + + // Should be identical + assert.Equal(t, commitment1, commitment2) +} + +func TestCreateCommitment_DifferentData(t *testing.T) { + // Create a valid namespace + namespace := make([]byte, 29) + namespace[0] = 0 + + data1 := []byte("data one") + data2 := []byte("data two") + + commitment1, err := CreateCommitment(data1, namespace) + require.NoError(t, err) + + commitment2, err := CreateCommitment(data2, namespace) + require.NoError(t, err) + + // Should be different + assert.NotEqual(t, commitment1, commitment2) +} + +func TestCreateCommitment_DifferentNamespace(t *testing.T) { + // Create two different valid namespaces + namespace1 := make([]byte, 29) + namespace1[0] = 0 + namespace1[28] = 1 + + namespace2 := make([]byte, 29) + namespace2[0] = 0 + namespace2[28] = 2 + + data := []byte("same data") + + commitment1, err := CreateCommitment(data, namespace1) + require.NoError(t, err) + + commitment2, err := CreateCommitment(data, namespace2) + require.NoError(t, err) + + // Should be different due to different namespaces + assert.NotEqual(t, commitment1, commitment2) +} + +func TestCreateCommitments(t *testing.T) { + // Create a valid namespace + namespace := make([]byte, 29) + namespace[0] = 0 + + blobs := [][]byte{ + []byte("blob one"), + []byte("blob two"), + []byte("blob three"), + } + + commitments, err := CreateCommitments(blobs, namespace) + require.NoError(t, err) + require.Len(t, commitments, 3) + + // All commitments should be non-empty and different + for i, c := range commitments { + assert.Greater(t, len(c), 0, "commitment %d should not be empty", i) + } + + assert.NotEqual(t, commitments[0], commitments[1]) + assert.NotEqual(t, commitments[1], commitments[2]) + assert.NotEqual(t, commitments[0], commitments[2]) +} + +func TestCreateCommitments_Empty(t *testing.T) { + namespace := make([]byte, 29) + + commitments, err := CreateCommitments([][]byte{}, namespace) + require.NoError(t, err) + assert.Len(t, commitments, 0) +} diff --git a/da/celestia/types.go b/da/celestia/types.go new file mode 100644 index 0000000000..ac414f5a98 --- /dev/null +++ b/da/celestia/types.go @@ -0,0 +1,73 @@ +package celestia + +import ( + "encoding/json" + "fmt" + + "github.com/celestiaorg/nmt" + + "github.com/evstack/ev-node/da" +) + +// Namespace represents a Celestia namespace (29 bytes: 1 version + 28 ID) +type Namespace []byte + +// Commitment represents a blob commitment (merkle root) +type Commitment []byte + +// Blob represents a Celestia blob with namespace and commitment +type Blob struct { + Namespace Namespace `json:"namespace"` + Data []byte `json:"data"` + ShareVer uint8 `json:"share_version"` + Commitment Commitment `json:"commitment"` + Signer []byte `json:"signer,omitempty"` + Index int `json:"index"` +} + +// Proof represents a Celestia inclusion proof +type Proof []*nmt.Proof + +// SubmitOptions contains options for blob submission +type SubmitOptions struct { + GasPrice float64 `json:"gas_price,omitempty"` + IsGasPriceSet bool `json:"is_gas_price_set,omitempty"` + MaxGasPrice float64 `json:"max_gas_price,omitempty"` + Gas uint64 `json:"gas,omitempty"` + TxPriority int `json:"tx_priority,omitempty"` + KeyName string `json:"key_name,omitempty"` + SignerAddress string `json:"signer_address,omitempty"` + FeeGranterAddress string `json:"fee_granter_address,omitempty"` +} + +// MarshalJSON implements json.Marshaler for Proof +func (p Proof) MarshalJSON() ([]byte, error) { + return json.Marshal([]*nmt.Proof(p)) +} + +// UnmarshalJSON implements json.Unmarshaler for Proof +func (p *Proof) UnmarshalJSON(data []byte) error { + var proofs []*nmt.Proof + if err := json.Unmarshal(data, &proofs); err != nil { + return err + } + *p = proofs + return nil +} + +// ValidateNamespace validates that a namespace is properly formatted (29 bytes). +func ValidateNamespace(ns Namespace) error { + if len(ns) != da.NamespaceSize { + return fmt.Errorf("invalid namespace size: got %d, expected %d", len(ns), da.NamespaceSize) + } + + parsed, err := da.NamespaceFromBytes(ns) + if err != nil { + return fmt.Errorf("invalid namespace: %w", err) + } + + if parsed.Version != da.NamespaceVersionZero || !parsed.IsValidForVersion0() { + return fmt.Errorf("invalid namespace: only version 0 namespaces with first %d zero bytes are supported", da.NamespaceVersionZeroPrefixSize) + } + return nil +} diff --git a/da/celestia/types_test.go b/da/celestia/types_test.go new file mode 100644 index 0000000000..0d96069c5d --- /dev/null +++ b/da/celestia/types_test.go @@ -0,0 +1,92 @@ +package celestia + +import ( + "encoding/json" + "testing" + + "github.com/celestiaorg/nmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValidateNamespace(t *testing.T) { + tests := []struct { + name string + namespace Namespace + wantErr bool + }{ + { + name: "valid namespace (29 bytes)", + namespace: make([]byte, 29), + wantErr: false, + }, + { + name: "invalid namespace too short", + namespace: make([]byte, 10), + wantErr: true, + }, + { + name: "invalid namespace too long", + namespace: make([]byte, 30), + wantErr: true, + }, + { + name: "invalid namespace empty", + namespace: []byte{}, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ValidateNamespace(tt.namespace) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestProofJSONMarshaling(t *testing.T) { + proof := Proof{ + &nmt.Proof{}, + } + + // Marshal + data, err := json.Marshal(proof) + require.NoError(t, err) + + // Unmarshal + var decoded Proof + err = json.Unmarshal(data, &decoded) + require.NoError(t, err) + + assert.Equal(t, len(proof), len(decoded)) +} + +func TestSubmitOptionsJSON(t *testing.T) { + opts := &SubmitOptions{ + GasPrice: 0.002, + IsGasPriceSet: true, + Gas: 100000, + SignerAddress: "celestia1abc123", + FeeGranterAddress: "celestia1feegranter", + } + + // Marshal + data, err := json.Marshal(opts) + require.NoError(t, err) + + // Unmarshal + var decoded SubmitOptions + err = json.Unmarshal(data, &decoded) + require.NoError(t, err) + + assert.Equal(t, opts.GasPrice, decoded.GasPrice) + assert.Equal(t, opts.IsGasPriceSet, decoded.IsGasPriceSet) + assert.Equal(t, opts.Gas, decoded.Gas) + assert.Equal(t, opts.SignerAddress, decoded.SignerAddress) + assert.Equal(t, opts.FeeGranterAddress, decoded.FeeGranterAddress) +} diff --git a/da/cmd/local-da/local.go b/da/cmd/local-da/local.go index bd36393016..8e8ff589f1 100644 --- a/da/cmd/local-da/local.go +++ b/da/cmd/local-da/local.go @@ -7,13 +7,12 @@ import ( "crypto/rand" "crypto/sha256" "encoding/binary" - "encoding/hex" "errors" "fmt" "sync" "time" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" "github.com/rs/zerolog" ) @@ -57,7 +56,7 @@ func NewLocalDA(logger zerolog.Logger, opts ...func(*LocalDA) *LocalDA) *LocalDA return da } -var _ coreda.DA = &LocalDA{} +var _ da.DA = &LocalDA{} // validateNamespace checks that namespace is exactly 29 bytes func validateNamespace(ns []byte) error { @@ -74,7 +73,7 @@ func (d *LocalDA) MaxBlobSize(ctx context.Context) (uint64, error) { } // Get returns Blobs for given IDs. -func (d *LocalDA) Get(ctx context.Context, ids []coreda.ID, ns []byte) ([]coreda.Blob, error) { +func (d *LocalDA) Get(ctx context.Context, ids []da.ID, ns []byte) ([]da.Blob, error) { if err := validateNamespace(ns); err != nil { d.logger.Error().Err(err).Msg("Get: invalid namespace") return nil, err @@ -82,7 +81,7 @@ func (d *LocalDA) Get(ctx context.Context, ids []coreda.ID, ns []byte) ([]coreda d.logger.Debug().Interface("ids", ids).Msg("Get called") d.mu.Lock() defer d.mu.Unlock() - blobs := make([]coreda.Blob, len(ids)) + blobs := make([]da.Blob, len(ids)) for i, id := range ids { if len(id) < 8 { d.logger.Error().Interface("id", id).Msg("Get: invalid ID length") @@ -98,7 +97,7 @@ func (d *LocalDA) Get(ctx context.Context, ids []coreda.ID, ns []byte) ([]coreda } if !found { d.logger.Warn().Interface("id", id).Uint64("height", height).Msg("Get: blob not found") - return nil, coreda.ErrBlobNotFound + return nil, da.ErrBlobNotFound } } d.logger.Debug().Int("count", len(blobs)).Msg("Get successful") @@ -106,36 +105,20 @@ func (d *LocalDA) Get(ctx context.Context, ids []coreda.ID, ns []byte) ([]coreda } // GetIDs returns IDs of Blobs at given DA height. -func (d *LocalDA) GetIDs(ctx context.Context, height uint64, ns []byte) (*coreda.GetIDsResult, error) { - if err := validateNamespace(ns); err != nil { - d.logger.Error().Err(err).Msg("GetIDs: invalid namespace") - return nil, err - } - d.logger.Debug().Uint64("height", height).Msg("GetIDs called") - d.mu.Lock() - defer d.mu.Unlock() - - if height > d.height { - d.logger.Error().Uint64("requested", height).Uint64("current", d.height).Msg("GetIDs: height in future") - return nil, fmt.Errorf("height %d is in the future: %w", height, coreda.ErrHeightFromFuture) +// Delegates to Retrieve. +func (d *LocalDA) GetIDs(ctx context.Context, height uint64, ns []byte) (*da.GetIDsResult, error) { + result := d.Retrieve(ctx, height, ns) + if result.Code != da.StatusSuccess { + return nil, da.StatusCodeToError(result.Code, result.Message) } - - kvps, ok := d.data[height] - if !ok { - d.logger.Debug().Uint64("height", height).Msg("GetIDs: no data for height") - return nil, nil - } - - ids := make([]coreda.ID, len(kvps)) - for i, kv := range kvps { - ids[i] = kv.key - } - d.logger.Debug().Int("count", len(ids)).Msg("GetIDs successful") - return &coreda.GetIDsResult{IDs: ids, Timestamp: d.timestamps[height]}, nil + return &da.GetIDsResult{ + IDs: result.IDs, + Timestamp: result.Timestamp, + }, nil } // GetProofs returns inclusion Proofs for all Blobs located in DA at given height. -func (d *LocalDA) GetProofs(ctx context.Context, ids []coreda.ID, ns []byte) ([]coreda.Proof, error) { +func (d *LocalDA) GetProofs(ctx context.Context, ids []da.ID, ns []byte) ([]da.Proof, error) { if err := validateNamespace(ns); err != nil { d.logger.Error().Err(err).Msg("GetProofs: invalid namespace") return nil, err @@ -149,7 +132,7 @@ func (d *LocalDA) GetProofs(ctx context.Context, ids []coreda.ID, ns []byte) ([] d.mu.Lock() defer d.mu.Unlock() - proofs := make([]coreda.Proof, len(blobs)) + proofs := make([]da.Proof, len(blobs)) for i, blob := range blobs { proofs[i] = d.getProof(ids[i], blob) } @@ -158,13 +141,13 @@ func (d *LocalDA) GetProofs(ctx context.Context, ids []coreda.ID, ns []byte) ([] } // Commit returns cryptographic Commitments for given blobs. -func (d *LocalDA) Commit(ctx context.Context, blobs []coreda.Blob, ns []byte) ([]coreda.Commitment, error) { +func (d *LocalDA) Commit(ctx context.Context, blobs []da.Blob, ns []byte) ([]da.Commitment, error) { if err := validateNamespace(ns); err != nil { d.logger.Error().Err(err).Msg("Commit: invalid namespace") return nil, err } d.logger.Debug().Int("numBlobs", len(blobs)).Msg("Commit called") - commits := make([]coreda.Commitment, len(blobs)) + commits := make([]da.Commitment, len(blobs)) for i, blob := range blobs { commits[i] = d.getHash(blob) } @@ -172,68 +155,14 @@ func (d *LocalDA) Commit(ctx context.Context, blobs []coreda.Blob, ns []byte) ([ return commits, nil } -// SubmitWithOptions stores blobs in DA layer (options are ignored). -func (d *LocalDA) SubmitWithOptions(ctx context.Context, blobs []coreda.Blob, gasPrice float64, ns []byte, _ []byte) ([]coreda.ID, error) { - if err := validateNamespace(ns); err != nil { - d.logger.Error().Err(err).Msg("SubmitWithOptions: invalid namespace") - return nil, err - } - d.logger.Info().Int("numBlobs", len(blobs)).Float64("gasPrice", gasPrice).Str("namespace", hex.EncodeToString(ns)).Msg("SubmitWithOptions called") - - // Validate blob sizes before processing - for i, blob := range blobs { - if uint64(len(blob)) > d.maxBlobSize { - d.logger.Error().Int("blobIndex", i).Int("blobSize", len(blob)).Uint64("maxBlobSize", d.maxBlobSize).Msg("SubmitWithOptions: blob size exceeds limit") - return nil, coreda.ErrBlobSizeOverLimit - } - } - - d.mu.Lock() - defer d.mu.Unlock() - ids := make([]coreda.ID, len(blobs)) - d.height += 1 - d.timestamps[d.height] = time.Now() - for i, blob := range blobs { - ids[i] = append(d.nextID(), d.getHash(blob)...) - - d.data[d.height] = append(d.data[d.height], kvp{ids[i], blob}) - } - d.logger.Info().Uint64("newHeight", d.height).Int("count", len(ids)).Msg("SubmitWithOptions successful") - return ids, nil -} - -// Submit stores blobs in DA layer (options are ignored). -func (d *LocalDA) Submit(ctx context.Context, blobs []coreda.Blob, gasPrice float64, ns []byte) ([]coreda.ID, error) { - if err := validateNamespace(ns); err != nil { - d.logger.Error().Err(err).Msg("Submit: invalid namespace") - return nil, err - } - d.logger.Info().Int("numBlobs", len(blobs)).Float64("gasPrice", gasPrice).Str("namespace", string(ns)).Msg("Submit called") - - // Validate blob sizes before processing - for i, blob := range blobs { - if uint64(len(blob)) > d.maxBlobSize { - d.logger.Error().Int("blobIndex", i).Int("blobSize", len(blob)).Uint64("maxBlobSize", d.maxBlobSize).Msg("Submit: blob size exceeds limit") - return nil, coreda.ErrBlobSizeOverLimit - } - } - - d.mu.Lock() - defer d.mu.Unlock() - ids := make([]coreda.ID, len(blobs)) - d.height += 1 - d.timestamps[d.height] = time.Now() - for i, blob := range blobs { - ids[i] = append(d.nextID(), d.getHash(blob)...) - - d.data[d.height] = append(d.data[d.height], kvp{ids[i], blob}) - } - d.logger.Info().Uint64("newHeight", d.height).Int("count", len(ids)).Msg("Submit successful") - return ids, nil +// Submit stores blobs in DA layer and returns a structured result. +// Delegates to SubmitWithOptions with nil options. +func (d *LocalDA) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte) da.ResultSubmit { + return d.SubmitWithOptions(ctx, blobs, gasPrice, ns, nil) } // Validate checks the Proofs for given IDs. -func (d *LocalDA) Validate(ctx context.Context, ids []coreda.ID, proofs []coreda.Proof, ns []byte) ([]bool, error) { +func (d *LocalDA) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof, ns []byte) ([]bool, error) { if err := validateNamespace(ns); err != nil { d.logger.Error().Err(err).Msg("Validate: invalid namespace") return nil, err @@ -279,3 +208,128 @@ func WithMaxBlobSize(maxBlobSize uint64) func(*LocalDA) *LocalDA { return da } } + +// SubmitWithOptions stores blobs in DA layer with additional options and returns a structured result. +// This is the primary implementation - Submit delegates to this method. +func (d *LocalDA) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) da.ResultSubmit { + // Calculate blob size upfront + var blobSize uint64 + for _, blob := range blobs { + blobSize += uint64(len(blob)) + } + + // Validate namespace + if err := validateNamespace(namespace); err != nil { + d.logger.Error().Err(err).Msg("SubmitWithResult: invalid namespace") + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: err.Error(), + BlobSize: blobSize, + }, + } + } + + // Validate blob sizes before processing + for i, blob := range blobs { + if uint64(len(blob)) > d.maxBlobSize { + d.logger.Error().Int("blobIndex", i).Int("blobSize", len(blob)).Uint64("maxBlobSize", d.maxBlobSize).Msg("SubmitWithResult: blob size exceeds limit") + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusTooBig, + Message: "failed to submit blobs: " + da.ErrBlobSizeOverLimit.Error(), + BlobSize: blobSize, + }, + } + } + } + + d.mu.Lock() + defer d.mu.Unlock() + + ids := make([]da.ID, len(blobs)) + d.height++ + d.timestamps[d.height] = time.Now() + for i, blob := range blobs { + ids[i] = append(d.nextID(), d.getHash(blob)...) + d.data[d.height] = append(d.data[d.height], kvp{ids[i], blob}) + } + + d.logger.Debug().Int("num_ids", len(ids)).Uint64("height", d.height).Msg("DA submission successful") + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, + IDs: ids, + SubmittedCount: uint64(len(ids)), + Height: d.height, + BlobSize: blobSize, + Timestamp: time.Now(), + }, + } +} + +// Retrieve retrieves all blobs at the given height and returns a structured result. +// This is the primary implementation - GetIDs delegates to this method. +func (d *LocalDA) Retrieve(ctx context.Context, height uint64, namespace []byte) da.ResultRetrieve { + // Validate namespace + if err := validateNamespace(namespace); err != nil { + d.logger.Error().Err(err).Msg("Retrieve: invalid namespace") + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: err.Error(), + Height: height, + Timestamp: time.Now(), + }, + } + } + + d.mu.Lock() + defer d.mu.Unlock() + + // Check height bounds + if height > d.height { + d.logger.Error().Uint64("requested", height).Uint64("current", d.height).Msg("Retrieve: height in future") + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusHeightFromFuture, + Message: da.ErrHeightFromFuture.Error(), + Height: height, + Timestamp: time.Now(), + }, + } + } + + // Get data at height + kvps, ok := d.data[height] + if !ok || len(kvps) == 0 { + d.logger.Debug().Uint64("height", height).Msg("Retrieve: no data for height") + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusNotFound, + Message: da.ErrBlobNotFound.Error(), + Height: height, + Timestamp: time.Now(), + }, + } + } + + // Extract IDs and blobs + ids := make([]da.ID, len(kvps)) + blobs := make([][]byte, len(kvps)) + for i, kv := range kvps { + ids[i] = kv.key + blobs[i] = kv.value + } + + d.logger.Debug().Uint64("height", height).Int("num_blobs", len(blobs)).Msg("Successfully retrieved blobs") + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, + Height: height, + IDs: ids, + Timestamp: d.timestamps[height], + }, + Data: blobs, + } +} diff --git a/da/cmd/local-da/main.go b/da/cmd/local-da/main.go index 5823b77156..b861d35a40 100644 --- a/da/cmd/local-da/main.go +++ b/da/cmd/local-da/main.go @@ -9,8 +9,6 @@ import ( "syscall" "github.com/rs/zerolog" - - proxy "github.com/evstack/ev-node/da/jsonrpc" ) const ( @@ -46,7 +44,7 @@ func main() { } da := NewLocalDA(logger, opts...) - srv := proxy.NewServer(logger, host, port, da) + srv := NewServer(logger, host, port, da) logger.Info().Str("host", host).Str("port", port).Uint64("maxBlobSize", maxBlobSize).Msg("Listening on") if err := srv.Start(context.Background()); err != nil { logger.Error().Err(err).Msg("error while serving") diff --git a/da/cmd/local-da/server.go b/da/cmd/local-da/server.go new file mode 100644 index 0000000000..18037c17aa --- /dev/null +++ b/da/cmd/local-da/server.go @@ -0,0 +1,295 @@ +package main + +import ( + "context" + "crypto/sha256" + "encoding/binary" + "fmt" + "net" + "net/http" + "sync/atomic" + "time" + + "github.com/filecoin-project/go-jsonrpc" + "github.com/rs/zerolog" + + "github.com/evstack/ev-node/da" +) + +// Blob represents a Celestia-compatible blob for the blob API +type Blob struct { + Namespace []byte `json:"namespace"` + Data []byte `json:"data"` + ShareVer uint32 `json:"share_version"` + Commitment []byte `json:"commitment"` + Index int `json:"index"` +} + +// Proof represents a Celestia-compatible inclusion proof +type Proof struct { + Data []byte `json:"data"` +} + +// SubmitOptions contains options for blob submission +type SubmitOptions struct { + Fee float64 `json:"fee,omitempty"` + GasLimit uint64 `json:"gas_limit,omitempty"` + SignerAddress string `json:"signer_address,omitempty"` +} + +// Server is a jsonrpc service that serves the LocalDA implementation +type Server struct { + logger zerolog.Logger + srv *http.Server + rpc *jsonrpc.RPCServer + listener net.Listener + localDA *LocalDA + + started atomic.Bool +} + +// blobAPI provides Celestia-compatible Blob API methods +type blobAPI struct { + logger zerolog.Logger + localDA *LocalDA +} + +// Submit submits blobs and returns the DA height (Celestia blob API compatible) +func (b *blobAPI) Submit(ctx context.Context, blobs []*Blob, opts *SubmitOptions) (uint64, error) { + b.logger.Debug().Int("num_blobs", len(blobs)).Msg("blob.Submit called") + + if len(blobs) == 0 { + return 0, nil + } + + ns := blobs[0].Namespace + + rawBlobs := make([][]byte, len(blobs)) + for i, blob := range blobs { + rawBlobs[i] = blob.Data + } + + var gasPrice float64 + if opts != nil { + gasPrice = opts.Fee + } + + result := b.localDA.Submit(ctx, rawBlobs, gasPrice, ns) + if result.Code != da.StatusSuccess { + return 0, da.StatusCodeToError(result.Code, result.Message) + } + + b.logger.Info().Uint64("height", result.Height).Int("num_blobs", len(blobs)).Msg("blob.Submit successful") + return result.Height, nil +} + +// Get retrieves a single blob by commitment at a given height (Celestia blob API compatible) +func (b *blobAPI) Get(ctx context.Context, height uint64, ns []byte, commitment []byte) (*Blob, error) { + b.logger.Debug().Uint64("height", height).Msg("blob.Get called") + + blobs, err := b.GetAll(ctx, height, [][]byte{ns}) + if err != nil { + return nil, err + } + + for _, blob := range blobs { + if len(commitment) == 0 || bytesEqual(blob.Commitment, commitment) { + return blob, nil + } + } + + return nil, nil +} + +// GetAll retrieves all blobs at a given height for the specified namespaces (Celestia blob API compatible) +func (b *blobAPI) GetAll(ctx context.Context, height uint64, namespaces [][]byte) ([]*Blob, error) { + b.logger.Debug().Uint64("height", height).Int("num_namespaces", len(namespaces)).Msg("blob.GetAll called") + + if len(namespaces) == 0 { + return []*Blob{}, nil + } + + ns := namespaces[0] + + b.localDA.mu.Lock() + defer b.localDA.mu.Unlock() + + if height > b.localDA.height { + b.logger.Debug().Uint64("requested", height).Uint64("current", b.localDA.height).Msg("blob.GetAll: height in future") + return nil, fmt.Errorf("height %d from future, current height is %d", height, b.localDA.height) + } + + kvps, ok := b.localDA.data[height] + if !ok { + b.logger.Debug().Uint64("height", height).Msg("blob.GetAll: no data for height") + return []*Blob{}, nil + } + + blobs := make([]*Blob, 0, len(kvps)) + for i, kv := range kvps { + var commitment []byte + if len(kv.key) > 8 { + commitment = kv.key[8:] + } else { + hash := sha256.Sum256(kv.value) + commitment = hash[:] + } + + blobs = append(blobs, &Blob{ + Namespace: ns, + Data: kv.value, + ShareVer: 0, + Commitment: commitment, + Index: i, + }) + } + + b.logger.Debug().Uint64("height", height).Int("num_blobs", len(blobs)).Msg("blob.GetAll successful") + return blobs, nil +} + +// GetProof retrieves the inclusion proof for a blob (Celestia blob API compatible) +func (b *blobAPI) GetProof(ctx context.Context, height uint64, ns []byte, commitment []byte) (*Proof, error) { + b.logger.Debug().Uint64("height", height).Msg("blob.GetProof called") + + b.localDA.mu.Lock() + defer b.localDA.mu.Unlock() + + kvps, ok := b.localDA.data[height] + if !ok { + return nil, nil + } + + for _, kv := range kvps { + var blobCommitment []byte + if len(kv.key) > 8 { + blobCommitment = kv.key[8:] + } + + if len(commitment) == 0 || bytesEqual(blobCommitment, commitment) { + proof := b.localDA.getProof(kv.key, kv.value) + return &Proof{Data: proof}, nil + } + } + + return nil, nil +} + +// Included checks whether a blob is included in the DA layer (Celestia blob API compatible) +func (b *blobAPI) Included(ctx context.Context, height uint64, ns []byte, proof *Proof, commitment []byte) (bool, error) { + b.logger.Debug().Uint64("height", height).Msg("blob.Included called") + + b.localDA.mu.Lock() + defer b.localDA.mu.Unlock() + + kvps, ok := b.localDA.data[height] + if !ok { + return false, nil + } + + for _, kv := range kvps { + var blobCommitment []byte + if len(kv.key) > 8 { + blobCommitment = kv.key[8:] + } + + if bytesEqual(blobCommitment, commitment) { + return true, nil + } + } + + return false, nil +} + +// bytesEqual compares two byte slices +func bytesEqual(a, b []byte) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +// makeID creates an ID from height and commitment +func makeID(height uint64, commitment []byte) []byte { + id := make([]byte, 8+len(commitment)) + binary.LittleEndian.PutUint64(id, height) + copy(id[8:], commitment) + return id +} + +func getKnownErrorsMapping() jsonrpc.Errors { + errs := jsonrpc.NewErrors() + errs.Register(jsonrpc.ErrorCode(da.StatusNotFound), &da.ErrBlobNotFound) + errs.Register(jsonrpc.ErrorCode(da.StatusTooBig), &da.ErrBlobSizeOverLimit) + errs.Register(jsonrpc.ErrorCode(da.StatusContextDeadline), &da.ErrTxTimedOut) + errs.Register(jsonrpc.ErrorCode(da.StatusAlreadyInMempool), &da.ErrTxAlreadyInMempool) + errs.Register(jsonrpc.ErrorCode(da.StatusIncorrectAccountSequence), &da.ErrTxIncorrectAccountSequence) + errs.Register(jsonrpc.ErrorCode(da.StatusContextDeadline), &da.ErrContextDeadline) + errs.Register(jsonrpc.ErrorCode(da.StatusContextCanceled), &da.ErrContextCanceled) + errs.Register(jsonrpc.ErrorCode(da.StatusHeightFromFuture), &da.ErrHeightFromFuture) + return errs +} + +// NewServer creates a new JSON-RPC server for the LocalDA implementation +func NewServer(logger zerolog.Logger, address, port string, localDA *LocalDA) *Server { + rpc := jsonrpc.NewServer(jsonrpc.WithServerErrors(getKnownErrorsMapping())) + srv := &Server{ + rpc: rpc, + logger: logger, + localDA: localDA, + srv: &http.Server{ + Addr: address + ":" + port, + ReadHeaderTimeout: 2 * time.Second, + }, + } + srv.srv.Handler = http.HandlerFunc(rpc.ServeHTTP) + + // Register Celestia-compatible "blob" namespace API + blobAPIHandler := &blobAPI{ + logger: logger, + localDA: localDA, + } + srv.rpc.Register("blob", blobAPIHandler) + + return srv +} + +// Start starts the RPC Server. +func (s *Server) Start(context.Context) error { + couldStart := s.started.CompareAndSwap(false, true) + + if !couldStart { + s.logger.Warn().Msg("cannot start server: already started") + return nil + } + listener, err := net.Listen("tcp", s.srv.Addr) + if err != nil { + return err + } + s.listener = listener + s.logger.Info().Str("listening_on", s.srv.Addr).Msg("server started") + //nolint:errcheck + go s.srv.Serve(listener) + return nil +} + +// Stop stops the RPC Server. +func (s *Server) Stop(ctx context.Context) error { + couldStop := s.started.CompareAndSwap(true, false) + if !couldStop { + s.logger.Warn().Msg("cannot stop server: already stopped") + return nil + } + err := s.srv.Shutdown(ctx) + if err != nil { + return err + } + s.listener = nil + s.logger.Info().Msg("server stopped") + return nil +} diff --git a/da/da.go b/da/da.go new file mode 100644 index 0000000000..ab55e33fa0 --- /dev/null +++ b/da/da.go @@ -0,0 +1,264 @@ +package da + +import ( + "context" + "crypto/sha256" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "strings" + "time" +) + +// DA defines the interface for interaction with Data Availability layers. +type DA interface { + // Get returns Blob for each given ID, or an error. + Get(ctx context.Context, ids []ID, namespace []byte) ([]Blob, error) + + // GetIDs returns IDs of all Blobs located in DA at given height. + GetIDs(ctx context.Context, height uint64, namespace []byte) (*GetIDsResult, error) + + // GetProofs returns inclusion Proofs for Blobs specified by their IDs. + GetProofs(ctx context.Context, ids []ID, namespace []byte) ([]Proof, error) + + // Commit creates a Commitment for each given Blob. + Commit(ctx context.Context, blobs []Blob, namespace []byte) ([]Commitment, error) + + // Submit submits the Blobs to Data Availability layer and returns a structured result. + Submit(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte) ResultSubmit + + // SubmitWithOptions submits the Blobs to Data Availability layer with additional options. + SubmitWithOptions(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte, options []byte) ResultSubmit + + // Retrieve retrieves all blobs at the given height and returns a structured result. + Retrieve(ctx context.Context, height uint64, namespace []byte) ResultRetrieve + + // Validate validates Commitments against the corresponding Proofs. + Validate(ctx context.Context, ids []ID, proofs []Proof, namespace []byte) ([]bool, error) +} + +// Blob is the data submitted/received from DA interface. +type Blob = []byte + +// ID should contain serialized data required by the implementation to find blob in Data Availability layer. +type ID = []byte + +// Commitment should contain serialized cryptographic commitment to Blob value. +type Commitment = []byte + +// Proof should contain serialized proof of inclusion (publication) of Blob in Data Availability layer. +type Proof = []byte + +// GetIDsResult holds the result of GetIDs call: IDs and timestamp of corresponding block. +type GetIDsResult struct { + IDs []ID + Timestamp time.Time +} + +// ResultSubmit contains information returned from DA layer after block headers/data submission. +type ResultSubmit struct { + BaseResult +} + +// ResultRetrieve contains batch of block headers returned from DA layer client. +type ResultRetrieve struct { + BaseResult + // Data is the block data retrieved from Data Availability Layer. + Data [][]byte +} + +// StatusCode is a type for DA layer return status. +type StatusCode uint64 + +// Data Availability return codes. +const ( + StatusUnknown StatusCode = iota + StatusSuccess + StatusNotFound + StatusNotIncludedInBlock + StatusAlreadyInMempool + StatusTooBig + StatusContextDeadline + StatusError + StatusIncorrectAccountSequence + StatusContextCanceled + StatusHeightFromFuture +) + +// BaseResult contains basic information returned by DA layer. +type BaseResult struct { + Code StatusCode + Message string + Height uint64 + SubmittedCount uint64 + BlobSize uint64 + IDs [][]byte + Timestamp time.Time +} + +// makeID creates an ID from a height and a commitment. +func makeID(height uint64, commitment []byte) []byte { + id := make([]byte, len(commitment)+8) + binary.LittleEndian.PutUint64(id, height) + copy(id[8:], commitment) + return id +} + +// SplitID splits an ID into a height and a commitment. +func SplitID(id []byte) (uint64, []byte, error) { + if len(id) < 8 { + return 0, nil, fmt.Errorf("invalid ID length: %d", len(id)) + } + commitment := id[8:] + return binary.LittleEndian.Uint64(id[:8]), commitment, nil +} + +// Errors +var ( + ErrBlobNotFound = errors.New("blob: not found") + ErrBlobSizeOverLimit = errors.New("blob: over size limit") + ErrTxTimedOut = errors.New("timed out waiting for tx to be included in a block") + ErrTxAlreadyInMempool = errors.New("tx already in mempool") + ErrTxIncorrectAccountSequence = errors.New("incorrect account sequence") + ErrContextDeadline = errors.New("context deadline") + ErrHeightFromFuture = errors.New("given height is from the future") + ErrContextCanceled = errors.New("context canceled") +) + +// StatusCodeToError converts a StatusCode to its corresponding error. +// Returns nil for StatusSuccess or StatusUnknown. +func StatusCodeToError(code StatusCode, message string) error { + switch code { + case StatusSuccess, StatusUnknown: + return nil + case StatusNotFound: + return ErrBlobNotFound + case StatusNotIncludedInBlock: + return ErrTxTimedOut + case StatusAlreadyInMempool: + return ErrTxAlreadyInMempool + case StatusTooBig: + return ErrBlobSizeOverLimit + case StatusContextDeadline: + return ErrContextDeadline + case StatusIncorrectAccountSequence: + return ErrTxIncorrectAccountSequence + case StatusContextCanceled: + return ErrContextCanceled + case StatusHeightFromFuture: + return ErrHeightFromFuture + case StatusError: + return errors.New(message) + default: + return errors.New(message) + } +} + +// Namespace constants and types +const ( + // NamespaceVersionIndex is the index of the namespace version in the byte slice + NamespaceVersionIndex = 0 + // NamespaceVersionSize is the size of the namespace version in bytes + NamespaceVersionSize = 1 + // NamespaceIDSize is the size of the namespace ID in bytes + NamespaceIDSize = 28 + // NamespaceSize is the total size of a namespace (version + ID) in bytes + NamespaceSize = NamespaceVersionSize + NamespaceIDSize + + // NamespaceVersionZero is the only supported user-specifiable namespace version + NamespaceVersionZero = uint8(0) + // NamespaceVersionMax is the max namespace version + NamespaceVersionMax = uint8(255) + + // NamespaceVersionZeroPrefixSize is the number of leading zero bytes required for version 0 + NamespaceVersionZeroPrefixSize = 18 + // NamespaceVersionZeroDataSize is the number of data bytes available for version 0 + NamespaceVersionZeroDataSize = 10 +) + +// Namespace represents a Celestia namespace +type Namespace struct { + Version uint8 + ID [NamespaceIDSize]byte +} + +// Bytes returns the namespace as a byte slice +func (n Namespace) Bytes() []byte { + result := make([]byte, NamespaceSize) + result[NamespaceVersionIndex] = n.Version + copy(result[NamespaceVersionSize:], n.ID[:]) + return result +} + +// IsValidForVersion0 checks if the namespace is valid for version 0 +func (n Namespace) IsValidForVersion0() bool { + if n.Version != NamespaceVersionZero { + return false + } + + for i := range NamespaceVersionZeroPrefixSize { + if n.ID[i] != 0 { + return false + } + } + return true +} + +// NewNamespaceV0 creates a new version 0 namespace from the provided data +func NewNamespaceV0(data []byte) (*Namespace, error) { + if len(data) > NamespaceVersionZeroDataSize { + return nil, fmt.Errorf("data too long for version 0 namespace: got %d bytes, max %d", + len(data), NamespaceVersionZeroDataSize) + } + + ns := &Namespace{ + Version: NamespaceVersionZero, + } + + copy(ns.ID[NamespaceVersionZeroPrefixSize:], data) + return ns, nil +} + +// NamespaceFromBytes creates a namespace from a 29-byte slice +func NamespaceFromBytes(b []byte) (*Namespace, error) { + if len(b) != NamespaceSize { + return nil, fmt.Errorf("invalid namespace size: expected %d, got %d", NamespaceSize, len(b)) + } + + ns := &Namespace{ + Version: b[NamespaceVersionIndex], + } + copy(ns.ID[:], b[NamespaceVersionSize:]) + + if ns.Version == NamespaceVersionZero && !ns.IsValidForVersion0() { + return nil, fmt.Errorf("invalid version 0 namespace: first %d bytes of ID must be zero", + NamespaceVersionZeroPrefixSize) + } + + return ns, nil +} + +// NamespaceFromString creates a version 0 namespace from a string identifier +func NamespaceFromString(s string) *Namespace { + hash := sha256.Sum256([]byte(s)) + ns, _ := NewNamespaceV0(hash[:NamespaceVersionZeroDataSize]) + return ns +} + +// HexString returns the hex representation of the namespace +func (n Namespace) HexString() string { + return "0x" + hex.EncodeToString(n.Bytes()) +} + +// ParseHexNamespace parses a hex string into a namespace +func ParseHexNamespace(hexStr string) (*Namespace, error) { + hexStr = strings.TrimPrefix(hexStr, "0x") + + b, err := hex.DecodeString(hexStr) + if err != nil { + return nil, fmt.Errorf("invalid hex string: %w", err) + } + + return NamespaceFromBytes(b) +} diff --git a/core/da/namespace_test.go b/da/da_test.go similarity index 100% rename from core/da/namespace_test.go rename to da/da_test.go diff --git a/da/go.mod b/da/go.mod index 478488dfbc..cf53b0b2fe 100644 --- a/da/go.mod +++ b/da/go.mod @@ -3,14 +3,17 @@ module github.com/evstack/ev-node/da go 1.24.1 require ( - github.com/evstack/ev-node/core v1.0.0-beta.5 + github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 + github.com/celestiaorg/go-square/v3 v3.0.2 github.com/filecoin-project/go-jsonrpc v0.9.0 github.com/rs/zerolog v1.34.0 github.com/stretchr/testify v1.11.1 ) require ( + github.com/celestiaorg/nmt v0.24.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect @@ -25,8 +28,11 @@ require ( go.opencensus.io v0.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect + golang.org/x/exp v0.0.0-20250911091902-df9299821621 // indirect + golang.org/x/sync v0.17.0 // indirect golang.org/x/sys v0.31.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/da/go.sum b/da/go.sum index cfa1dc32e2..80bbfb1159 100644 --- a/da/go.sum +++ b/da/go.sum @@ -1,5 +1,11 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 h1:PYInrsYzrDIsZW9Yb86OTi2aEKuPcpgJt6Mc0Jlc/yg= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076/go.mod h1:hlidgivKyvv7m4Yl2Fdf2mSTmazZYxX8+bnr5IQrI98= +github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY72BOgcduA4= +github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= +github.com/celestiaorg/nmt v0.24.2 h1:LlpJSPOd6/Lw1Ig6HUhZuqiINHLka/ZSRTBzlNJpchg= +github.com/celestiaorg/nmt v0.24.2/go.mod h1:vgLBpWBi8F5KLxTdXSwb7AU4NhiIQ1AQRGa+PzdcLEA= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -13,11 +19,11 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evstack/ev-node/core v1.0.0-beta.5 h1:lgxE8XiF3U9pcFgh7xuKMgsOGvLBGRyd9kc9MR4WL0o= -github.com/evstack/ev-node/core v1.0.0-beta.5/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= github.com/filecoin-project/go-jsonrpc v0.9.0 h1:G47qEF52w7GholpI21vPSTVBFvsrip6geIoqNiqyZtQ= github.com/filecoin-project/go-jsonrpc v0.9.0/go.mod h1:OG7kVBVh/AbDFHIwx7Kw0l9ARmKOS6gGOr0LbdBpbLc= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= @@ -40,6 +46,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -48,6 +56,7 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/ipfs/go-log/v2 v2.0.8 h1:3b3YNopMHlj4AvyhWAx0pDxqSQWYi4/WuWO7yRV6/Qg= github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -89,6 +98,14 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -103,24 +120,35 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU= +golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -140,8 +168,13 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -163,6 +196,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/da/internal/mocks/da.go b/da/internal/mocks/da.go index bb3ad63391..afe641eacc 100644 --- a/da/internal/mocks/da.go +++ b/da/internal/mocks/da.go @@ -7,7 +7,7 @@ package mocks import ( "context" - "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/da" mock "github.com/stretchr/testify/mock" ) diff --git a/da/jsonrpc/client.go b/da/jsonrpc/client.go deleted file mode 100644 index 9803ebcd49..0000000000 --- a/da/jsonrpc/client.go +++ /dev/null @@ -1,241 +0,0 @@ -package jsonrpc - -import ( - "context" - "encoding/hex" - "fmt" - "net/http" - "strings" - - "github.com/filecoin-project/go-jsonrpc" - "github.com/rs/zerolog" - - "github.com/evstack/ev-node/core/da" -) - -//go:generate mockgen -destination=mocks/api.go -package=mocks . Module -type Module interface { - da.DA -} - -// API defines the jsonrpc service module API -type API struct { - Logger zerolog.Logger - MaxBlobSize uint64 - Internal struct { - Get func(ctx context.Context, ids []da.ID, ns []byte) ([]da.Blob, error) `perm:"read"` - GetIDs func(ctx context.Context, height uint64, ns []byte) (*da.GetIDsResult, error) `perm:"read"` - GetProofs func(ctx context.Context, ids []da.ID, ns []byte) ([]da.Proof, error) `perm:"read"` - Commit func(ctx context.Context, blobs []da.Blob, ns []byte) ([]da.Commitment, error) `perm:"read"` - Validate func(context.Context, []da.ID, []da.Proof, []byte) ([]bool, error) `perm:"read"` - Submit func(context.Context, []da.Blob, float64, []byte) ([]da.ID, error) `perm:"write"` - SubmitWithOptions func(context.Context, []da.Blob, float64, []byte, []byte) ([]da.ID, error) `perm:"write"` - } -} - -// Get returns Blob for each given ID, or an error. -func (api *API) Get(ctx context.Context, ids []da.ID, ns []byte) ([]da.Blob, error) { - api.Logger.Debug().Str("method", "Get").Int("num_ids", len(ids)).Str("namespace", hex.EncodeToString(ns)).Msg("Making RPC call") - res, err := api.Internal.Get(ctx, ids, ns) - if err != nil { - if strings.Contains(err.Error(), context.Canceled.Error()) { - api.Logger.Debug().Str("method", "Get").Msg("RPC call canceled due to context cancellation") - return res, context.Canceled - } - api.Logger.Error().Err(err).Str("method", "Get").Msg("RPC call failed") - // Wrap error for context, potentially using the translated error from the RPC library - return nil, fmt.Errorf("failed to get blobs: %w", err) - } - api.Logger.Debug().Str("method", "Get").Int("num_blobs_returned", len(res)).Msg("RPC call successful") - return res, nil -} - -// GetIDs returns IDs of all Blobs located in DA at given height. -func (api *API) GetIDs(ctx context.Context, height uint64, ns []byte) (*da.GetIDsResult, error) { - api.Logger.Debug().Str("method", "GetIDs").Uint64("height", height).Str("namespace", hex.EncodeToString(ns)).Msg("Making RPC call") - res, err := api.Internal.GetIDs(ctx, height, ns) - if err != nil { - // Using strings.contains since JSON RPC serialization doesn't preserve error wrapping - // Check if the error is specifically BlobNotFound, otherwise log and return - if strings.Contains(err.Error(), da.ErrBlobNotFound.Error()) { // Use the error variable directly - api.Logger.Debug().Str("method", "GetIDs").Uint64("height", height).Msg("RPC call indicates blobs not found") - return nil, err // Return the specific ErrBlobNotFound - } - if strings.Contains(err.Error(), da.ErrHeightFromFuture.Error()) { - api.Logger.Debug().Str("method", "GetIDs").Uint64("height", height).Msg("RPC call indicates height from future") - return nil, err // Return the specific ErrHeightFromFuture - } - if strings.Contains(err.Error(), context.Canceled.Error()) { - api.Logger.Debug().Str("method", "GetIDs").Msg("RPC call canceled due to context cancellation") - return res, context.Canceled - } - api.Logger.Error().Err(err).Str("method", "GetIDs").Msg("RPC call failed") - return nil, err - } - - // Handle cases where the RPC call succeeds but returns no IDs - if res == nil || len(res.IDs) == 0 { - api.Logger.Debug().Str("method", "GetIDs").Uint64("height", height).Msg("RPC call successful but no IDs found") - return nil, da.ErrBlobNotFound // Return specific error for not found (use variable directly) - } - - api.Logger.Debug().Str("method", "GetIDs").Msg("RPC call successful") - return res, nil -} - -// GetProofs returns inclusion Proofs for Blobs specified by their IDs. -func (api *API) GetProofs(ctx context.Context, ids []da.ID, ns []byte) ([]da.Proof, error) { - api.Logger.Debug().Str("method", "GetProofs").Int("num_ids", len(ids)).Str("namespace", hex.EncodeToString(ns)).Msg("Making RPC call") - res, err := api.Internal.GetProofs(ctx, ids, ns) - if err != nil { - api.Logger.Error().Err(err).Str("method", "GetProofs").Msg("RPC call failed") - } else { - api.Logger.Debug().Str("method", "GetProofs").Int("num_proofs_returned", len(res)).Msg("RPC call successful") - } - return res, err -} - -// Commit creates a Commitment for each given Blob. -func (api *API) Commit(ctx context.Context, blobs []da.Blob, ns []byte) ([]da.Commitment, error) { - api.Logger.Debug().Str("method", "Commit").Int("num_blobs", len(blobs)).Str("namespace", hex.EncodeToString(ns)).Msg("Making RPC call") - res, err := api.Internal.Commit(ctx, blobs, ns) - if err != nil { - api.Logger.Error().Err(err).Str("method", "Commit").Msg("RPC call failed") - } else { - api.Logger.Debug().Str("method", "Commit").Int("num_commitments_returned", len(res)).Msg("RPC call successful") - } - return res, err -} - -// Validate validates Commitments against the corresponding Proofs. This should be possible without retrieving the Blobs. -func (api *API) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof, ns []byte) ([]bool, error) { - api.Logger.Debug().Str("method", "Validate").Int("num_ids", len(ids)).Int("num_proofs", len(proofs)).Str("namespace", hex.EncodeToString(ns)).Msg("Making RPC call") - res, err := api.Internal.Validate(ctx, ids, proofs, ns) - if err != nil { - api.Logger.Error().Err(err).Str("method", "Validate").Msg("RPC call failed") - } else { - api.Logger.Debug().Str("method", "Validate").Int("num_results_returned", len(res)).Msg("RPC call successful") - } - return res, err -} - -// Submit submits the Blobs to Data Availability layer. -func (api *API) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte) ([]da.ID, error) { - api.Logger.Debug().Str("method", "Submit").Int("num_blobs", len(blobs)).Float64("gas_price", gasPrice).Str("namespace", hex.EncodeToString(ns)).Msg("Making RPC call") - res, err := api.Internal.Submit(ctx, blobs, gasPrice, ns) - if err != nil { - if strings.Contains(err.Error(), context.Canceled.Error()) { - api.Logger.Debug().Str("method", "Submit").Msg("RPC call canceled due to context cancellation") - return res, context.Canceled - } - api.Logger.Error().Err(err).Str("method", "Submit").Bytes("namespace", ns).Msg("RPC call failed") - } else { - api.Logger.Debug().Str("method", "Submit").Int("num_ids_returned", len(res)).Msg("RPC call successful") - } - return res, err -} - -// SubmitWithOptions submits the Blobs to Data Availability layer with additional options. -// It validates the entire batch against MaxBlobSize before submission. -// If any blob or the total batch size exceeds limits, it returns ErrBlobSizeOverLimit. -func (api *API) SubmitWithOptions(ctx context.Context, inputBlobs []da.Blob, gasPrice float64, ns []byte, options []byte) ([]da.ID, error) { - maxBlobSize := api.MaxBlobSize - - if len(inputBlobs) == 0 { - return []da.ID{}, nil - } - - // Validate each blob individually and calculate total size - var totalSize uint64 - for i, blob := range inputBlobs { - blobLen := uint64(len(blob)) - if blobLen > maxBlobSize { - api.Logger.Warn().Int("index", i).Uint64("blobSize", blobLen).Uint64("maxBlobSize", maxBlobSize).Msg("Individual blob exceeds MaxBlobSize") - return nil, da.ErrBlobSizeOverLimit - } - totalSize += blobLen - } - - // Validate total batch size - if totalSize > maxBlobSize { - return nil, da.ErrBlobSizeOverLimit - } - - api.Logger.Debug().Str("method", "SubmitWithOptions").Int("num_blobs", len(inputBlobs)).Uint64("total_size", totalSize).Float64("gas_price", gasPrice).Str("namespace", hex.EncodeToString(ns)).Msg("Making RPC call") - res, err := api.Internal.SubmitWithOptions(ctx, inputBlobs, gasPrice, ns, options) - if err != nil { - if strings.Contains(err.Error(), context.Canceled.Error()) { - api.Logger.Debug().Str("method", "SubmitWithOptions").Msg("RPC call canceled due to context cancellation") - return res, context.Canceled - } - api.Logger.Error().Err(err).Str("method", "SubmitWithOptions").Msg("RPC call failed") - } else { - api.Logger.Debug().Str("method", "SubmitWithOptions").Int("num_ids_returned", len(res)).Msg("RPC call successful") - } - - return res, err -} - -// Client is the jsonrpc client -type Client struct { - DA API - closer multiClientCloser -} - -// multiClientCloser is a wrapper struct to close clients across multiple namespaces. -type multiClientCloser struct { - closers []jsonrpc.ClientCloser -} - -// register adds a new closer to the multiClientCloser -func (m *multiClientCloser) register(closer jsonrpc.ClientCloser) { - m.closers = append(m.closers, closer) -} - -// closeAll closes all saved clients. -func (m *multiClientCloser) closeAll() { - for _, closer := range m.closers { - closer() - } -} - -// Close closes the connections to all namespaces registered on the staticClient. -func (c *Client) Close() { - c.closer.closeAll() -} - -// NewClient creates a new Client with one connection per namespace with the -// given token as the authorization token. -func NewClient(ctx context.Context, logger zerolog.Logger, addr, token string, maxBlobSize uint64) (*Client, error) { - authHeader := http.Header{"Authorization": []string{fmt.Sprintf("Bearer %s", token)}} - return newClient(ctx, logger, addr, authHeader, maxBlobSize) -} - -func newClient(ctx context.Context, logger zerolog.Logger, addr string, authHeader http.Header, maxBlobSize uint64) (*Client, error) { - var multiCloser multiClientCloser - var client Client - client.DA.Logger = logger - client.DA.MaxBlobSize = maxBlobSize - - errs := getKnownErrorsMapping() - for name, module := range moduleMap(&client) { - closer, err := jsonrpc.NewMergeClient(ctx, addr, name, []interface{}{module}, authHeader, jsonrpc.WithErrors(errs)) - if err != nil { - // If an error occurs, close any previously opened connections - multiCloser.closeAll() - return nil, err - } - multiCloser.register(closer) - } - - client.closer = multiCloser // Assign the multiCloser to the client - - return &client, nil -} - -func moduleMap(client *Client) map[string]interface{} { - // TODO: this duplication of strings many times across the codebase can be avoided with issue #1176 - return map[string]interface{}{ - "da": &client.DA.Internal, - } -} diff --git a/da/jsonrpc/client_test.go b/da/jsonrpc/client_test.go deleted file mode 100644 index af32882ea9..0000000000 --- a/da/jsonrpc/client_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package jsonrpc - -import ( - "context" - "testing" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - - "github.com/evstack/ev-node/core/da" -) - -// TestSubmitWithOptions_SizeValidation tests the corrected behavior of SubmitWithOptions -// where it validates the entire batch before submission and returns ErrBlobSizeOverLimit -// if the batch is too large, instead of silently dropping blobs. -func TestSubmitWithOptions_SizeValidation(t *testing.T) { - logger := zerolog.Nop() - - testCases := []struct { - name string - maxBlobSize uint64 - inputBlobs []da.Blob - expectError bool - expectedError error - description string - }{ - { - name: "Empty input", - maxBlobSize: 1000, - inputBlobs: []da.Blob{}, - expectError: false, - description: "Empty input should return empty result without error", - }, - { - name: "Single blob within limit", - maxBlobSize: 1000, - inputBlobs: []da.Blob{make([]byte, 500)}, - expectError: false, - description: "Single blob smaller than limit should succeed", - }, - { - name: "Single blob exceeds limit", - maxBlobSize: 1000, - inputBlobs: []da.Blob{make([]byte, 1500)}, - expectError: true, - expectedError: da.ErrBlobSizeOverLimit, - description: "Single blob larger than limit should fail", - }, - { - name: "Multiple blobs within limit", - maxBlobSize: 1000, - inputBlobs: []da.Blob{make([]byte, 300), make([]byte, 400), make([]byte, 200)}, - expectError: false, - description: "Multiple blobs totaling less than limit should succeed", - }, - { - name: "Multiple blobs exceed total limit", - maxBlobSize: 1000, - inputBlobs: []da.Blob{make([]byte, 400), make([]byte, 400), make([]byte, 400)}, - expectError: true, - expectedError: da.ErrBlobSizeOverLimit, - description: "Multiple blobs totaling more than limit should fail completely", - }, - { - name: "Mixed: some blobs fit, total exceeds limit", - maxBlobSize: 1000, - inputBlobs: []da.Blob{make([]byte, 100), make([]byte, 200), make([]byte, 800)}, - expectError: true, - expectedError: da.ErrBlobSizeOverLimit, - description: "Should fail completely, not partially submit blobs that fit", - }, - { - name: "One blob exceeds limit individually", - maxBlobSize: 1000, - inputBlobs: []da.Blob{make([]byte, 300), make([]byte, 1500), make([]byte, 200)}, - expectError: true, - expectedError: da.ErrBlobSizeOverLimit, - description: "Should fail if any individual blob exceeds limit", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Create API with test configuration - api := &API{ - Logger: logger, - MaxBlobSize: tc.maxBlobSize, - } - - // Mock the Internal.SubmitWithOptions to always succeed if called - // This tests that our validation logic works before reaching the actual RPC call - mockCalled := false - api.Internal.SubmitWithOptions = func(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) ([]da.ID, error) { - mockCalled = true - // Return mock IDs for successful submissions - ids := make([]da.ID, len(blobs)) - for i := range blobs { - ids[i] = []byte{byte(i)} - } - return ids, nil - } - - // Call SubmitWithOptions - ctx := context.Background() - result, err := api.SubmitWithOptions(ctx, tc.inputBlobs, 1.0, []byte("test"), nil) - - // Verify expectations - if tc.expectError { - assert.Error(t, err, tc.description) - if tc.expectedError != nil { - assert.ErrorIs(t, err, tc.expectedError, tc.description) - } - assert.Nil(t, result, "Result should be nil on error") - assert.False(t, mockCalled, "Internal RPC should not be called when validation fails") - } else { - assert.NoError(t, err, tc.description) - assert.NotNil(t, result, "Result should not be nil on success") - if len(tc.inputBlobs) > 0 { - assert.True(t, mockCalled, "Internal RPC should be called for valid submissions") - assert.Len(t, result, len(tc.inputBlobs), "Should return IDs for all submitted blobs") - } - } - }) - } -} diff --git a/da/jsonrpc/errors.go b/da/jsonrpc/errors.go deleted file mode 100644 index c81040e899..0000000000 --- a/da/jsonrpc/errors.go +++ /dev/null @@ -1,21 +0,0 @@ -package jsonrpc - -import ( - "github.com/filecoin-project/go-jsonrpc" - - coreda "github.com/evstack/ev-node/core/da" -) - -// getKnownErrorsMapping returns a mapping of known error codes to their corresponding error types. -func getKnownErrorsMapping() jsonrpc.Errors { - errs := jsonrpc.NewErrors() - errs.Register(jsonrpc.ErrorCode(coreda.StatusNotFound), &coreda.ErrBlobNotFound) - errs.Register(jsonrpc.ErrorCode(coreda.StatusTooBig), &coreda.ErrBlobSizeOverLimit) - errs.Register(jsonrpc.ErrorCode(coreda.StatusContextDeadline), &coreda.ErrTxTimedOut) - errs.Register(jsonrpc.ErrorCode(coreda.StatusAlreadyInMempool), &coreda.ErrTxAlreadyInMempool) - errs.Register(jsonrpc.ErrorCode(coreda.StatusIncorrectAccountSequence), &coreda.ErrTxIncorrectAccountSequence) - errs.Register(jsonrpc.ErrorCode(coreda.StatusContextDeadline), &coreda.ErrContextDeadline) - errs.Register(jsonrpc.ErrorCode(coreda.StatusContextCanceled), &coreda.ErrContextCanceled) - errs.Register(jsonrpc.ErrorCode(coreda.StatusHeightFromFuture), &coreda.ErrHeightFromFuture) - return errs -} diff --git a/da/jsonrpc/proxy_test.go b/da/jsonrpc/proxy_test.go deleted file mode 100644 index 1ab623c037..0000000000 --- a/da/jsonrpc/proxy_test.go +++ /dev/null @@ -1,351 +0,0 @@ -package jsonrpc_test - -import ( - "bytes" - "context" - "errors" - "fmt" - "strings" - "sync" - "testing" - "time" - - "github.com/evstack/ev-node/da/internal/mocks" - proxy "github.com/evstack/ev-node/da/jsonrpc" - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - coreda "github.com/evstack/ev-node/core/da" -) - -const ( - // ServerHost is the listen host for the test JSONRPC server - ServerHost = "localhost" - // ServerPort is the listen port for the test JSONRPC server - ServerPort = "3450" - // ClientURL is the url to dial for the test JSONRPC client - ClientURL = "http://localhost:3450" - - testMaxBlobSize = 100 - - DefaultMaxBlobSize = 2 * 1024 * 1024 // 2MB -) - -// testNamespace is a 15-byte namespace that will be hex encoded to 30 chars and truncated to 29 -var testNamespace = []byte("test-namespace1") - -// TestProxy runs the go-da DA test suite against the JSONRPC service -// NOTE: This test requires a test JSONRPC service to run on the port -// 3450 which is chosen to be sufficiently distinct from the default port - -func getTestDABlockTime() time.Duration { - return 100 * time.Millisecond -} - -func TestProxy(t *testing.T) { - dummy := coreda.NewDummyDA(100_000, getTestDABlockTime()) - dummy.StartHeightTicker() - logger := zerolog.Nop() - server := proxy.NewServer(logger, ServerHost, ServerPort, dummy) - err := server.Start(context.Background()) - require.NoError(t, err) - defer func() { - if err := server.Stop(context.Background()); err != nil { - require.NoError(t, err) - } - }() - - client, err := proxy.NewClient(context.Background(), logger, ClientURL, "74657374", DefaultMaxBlobSize) - require.NoError(t, err) - - t.Run("Basic DA test", func(t *testing.T) { - BasicDATest(t, &client.DA) - }) - t.Run("Get IDs and all data", func(t *testing.T) { - GetIDsTest(t, &client.DA) - }) - t.Run("Check Errors", func(t *testing.T) { - CheckErrors(t, &client.DA) - }) - t.Run("Concurrent read/write test", func(t *testing.T) { - ConcurrentReadWriteTest(t, &client.DA) - }) - t.Run("Given height is from the future", func(t *testing.T) { - HeightFromFutureTest(t, &client.DA) - }) - dummy.StopHeightTicker() -} - -// BasicDATest tests round trip of messages to DA and back. -func BasicDATest(t *testing.T, d coreda.DA) { - msg1 := []byte("message 1") - msg2 := []byte("message 2") - - ctx := t.Context() - id1, err := d.Submit(ctx, []coreda.Blob{msg1}, 0, testNamespace) - assert.NoError(t, err) - assert.NotEmpty(t, id1) - - id2, err := d.Submit(ctx, []coreda.Blob{msg2}, 0, testNamespace) - assert.NoError(t, err) - assert.NotEmpty(t, id2) - - time.Sleep(getTestDABlockTime()) - - id3, err := d.SubmitWithOptions(ctx, []coreda.Blob{msg1}, 0, testNamespace, []byte("random options")) - assert.NoError(t, err) - assert.NotEmpty(t, id3) - - assert.NotEqual(t, id1, id2) - assert.NotEqual(t, id1, id3) - - ret, err := d.Get(ctx, id1, testNamespace) - assert.NoError(t, err) - assert.Equal(t, []coreda.Blob{msg1}, ret) - - commitment1, err := d.Commit(ctx, []coreda.Blob{msg1}, []byte{}) - assert.NoError(t, err) - assert.NotEmpty(t, commitment1) - - commitment2, err := d.Commit(ctx, []coreda.Blob{msg2}, []byte{}) - assert.NoError(t, err) - assert.NotEmpty(t, commitment2) - - ids := []coreda.ID{id1[0], id2[0], id3[0]} - proofs, err := d.GetProofs(ctx, ids, testNamespace) - assert.NoError(t, err) - assert.NotEmpty(t, proofs) - oks, err := d.Validate(ctx, ids, proofs, testNamespace) - assert.NoError(t, err) - assert.NotEmpty(t, oks) - for _, ok := range oks { - assert.True(t, ok) - } -} - -// CheckErrors ensures that errors are handled properly by DA. -func CheckErrors(t *testing.T, d coreda.DA) { - ctx := t.Context() - blob, err := d.Get(ctx, []coreda.ID{[]byte("invalid blob id")}, testNamespace) - assert.Error(t, err) - assert.ErrorContains(t, err, coreda.ErrBlobNotFound.Error()) - assert.Empty(t, blob) -} - -// GetIDsTest tests iteration over DA -func GetIDsTest(t *testing.T, d coreda.DA) { - msgs := []coreda.Blob{[]byte("msg1"), []byte("msg2"), []byte("msg3")} - - ctx := t.Context() - ids, err := d.Submit(ctx, msgs, 0, testNamespace) - time.Sleep(getTestDABlockTime()) - assert.NoError(t, err) - assert.Len(t, ids, len(msgs)) - found := false - end := time.Now().Add(1 * time.Second) - - // To Keep It Simple: we assume working with DA used exclusively for this test (mock, devnet, etc) - // As we're the only user, we don't need to handle external data (that could be submitted in real world). - // There is no notion of height, so we need to scan the DA to get test data back. - for i := uint64(1); !found && !time.Now().After(end); i++ { - ret, err := d.GetIDs(ctx, i, testNamespace) - if err != nil { - if strings.Contains(err.Error(), coreda.ErrHeightFromFuture.Error()) { - break - } - t.Error("failed to get IDs:", err) - } - assert.NotNil(t, ret) - assert.NotZero(t, ret.Timestamp) - if len(ret.IDs) > 0 { - blobs, err := d.Get(ctx, ret.IDs, testNamespace) - assert.NoError(t, err) - - // Submit ensures atomicity of batch, so it makes sense to compare actual blobs (bodies) only when lengths - // of slices is the same. - if len(blobs) >= len(msgs) { - found = true - for _, msg := range msgs { - msgFound := false - for _, blob := range blobs { - if bytes.Equal(blob, msg) { - msgFound = true - break - } - } - if !msgFound { - found = false - break - } - } - } - } - } - - assert.True(t, found) -} - -// ConcurrentReadWriteTest tests the use of mutex lock in DummyDA by calling separate methods that use `d.data` and making sure there's no race conditions -func ConcurrentReadWriteTest(t *testing.T, d coreda.DA) { - var wg sync.WaitGroup - ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) - defer cancel() - - writeDone := make(chan struct{}) - - wg.Add(1) - go func() { - defer wg.Done() - for i := uint64(1); i <= 50; i++ { - _, err := d.Submit(ctx, []coreda.Blob{[]byte(fmt.Sprintf("test-%d", i))}, 0, []byte("test")) - assert.NoError(t, err) - } - close(writeDone) - }() - - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case <-writeDone: - return - default: - _, _ = d.GetIDs(ctx, 0, []byte("test")) - } - } - }() - - wg.Wait() -} - -// HeightFromFutureTest tests the case when the given height is from the future -func HeightFromFutureTest(t *testing.T, d coreda.DA) { - ctx := t.Context() - _, err := d.GetIDs(ctx, 999999999, []byte("test")) - assert.Error(t, err) - // Specifically check if the error contains the error message ErrHeightFromFuture - assert.ErrorContains(t, err, coreda.ErrHeightFromFuture.Error()) -} - -// TestSubmitWithOptions tests the SubmitWithOptions method with various scenarios -func TestSubmitWithOptions(t *testing.T) { - ctx := context.Background() - testNamespace := "options_test" - // The client will convert the namespace string to a proper Celestia namespace - // using SHA256 hashing and version 0 format (1 version byte + 28 ID bytes) - namespace := coreda.NamespaceFromString(testNamespace) - encodedNamespace := namespace.Bytes() - testOptions := []byte("test_options") - gasPrice := 0.0 - - // Helper function to create a client with a mocked internal API - createMockedClient := func(internalAPI *mocks.MockDA) *proxy.Client { - client := &proxy.Client{} - client.DA.Internal.SubmitWithOptions = internalAPI.SubmitWithOptions - client.DA.MaxBlobSize = testMaxBlobSize - client.DA.Logger = zerolog.Nop() - // Test verbosity no longer needed with Nop logger - return client - } - - t.Run("Happy Path - All blobs fit", func(t *testing.T) { - mockAPI := mocks.NewMockDA(t) - client := createMockedClient(mockAPI) - - blobs := []coreda.Blob{[]byte("blob1"), []byte("blob2")} - expectedIDs := []coreda.ID{[]byte("id1"), []byte("id2")} - - mockAPI.On("SubmitWithOptions", ctx, blobs, gasPrice, encodedNamespace, testOptions).Return(expectedIDs, nil).Once() - - ids, err := client.DA.SubmitWithOptions(ctx, blobs, gasPrice, encodedNamespace, testOptions) - - require.NoError(t, err) - assert.Equal(t, expectedIDs, ids) - mockAPI.AssertExpectations(t) - }) - - t.Run("Single Blob Too Large", func(t *testing.T) { - mockAPI := mocks.NewMockDA(t) - client := createMockedClient(mockAPI) - - largerBlob := make([]byte, testMaxBlobSize+1) - blobs := []coreda.Blob{largerBlob, []byte("this blob is definitely too large")} - - _, err := client.DA.SubmitWithOptions(ctx, blobs, gasPrice, encodedNamespace, testOptions) - - require.Error(t, err) - mockAPI.AssertExpectations(t) - }) - - t.Run("Total Size Exceeded", func(t *testing.T) { - mockAPI := mocks.NewMockDA(t) - client := createMockedClient(mockAPI) - - blobsizes := make([]byte, testMaxBlobSize/3) - blobsizesOver := make([]byte, testMaxBlobSize) - - blobs := []coreda.Blob{blobsizes, blobsizes, blobsizesOver} - - ids, err := client.DA.SubmitWithOptions(ctx, blobs, gasPrice, encodedNamespace, testOptions) - - require.Error(t, err) - assert.ErrorIs(t, err, coreda.ErrBlobSizeOverLimit) - assert.Nil(t, ids) - - // Should not call internal RPC when validation fails - mockAPI.AssertNotCalled(t, "SubmitWithOptions", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) - mockAPI.AssertExpectations(t) - }) - - t.Run("First Blob Too Large", func(t *testing.T) { - mockAPI := mocks.NewMockDA(t) - client := createMockedClient(mockAPI) - - largerBlob := make([]byte, testMaxBlobSize+1) - blobs := []coreda.Blob{largerBlob, []byte("small")} - - ids, err := client.DA.SubmitWithOptions(ctx, blobs, gasPrice, encodedNamespace, testOptions) - - require.Error(t, err) - assert.ErrorIs(t, err, coreda.ErrBlobSizeOverLimit) - assert.Nil(t, ids) - - mockAPI.AssertNotCalled(t, "SubmitWithOptions", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) - mockAPI.AssertExpectations(t) - }) - - t.Run("Empty Input Blobs", func(t *testing.T) { - mockAPI := mocks.NewMockDA(t) - client := createMockedClient(mockAPI) - - var blobs []coreda.Blob - - ids, err := client.DA.SubmitWithOptions(ctx, blobs, gasPrice, encodedNamespace, testOptions) - - require.NoError(t, err) - assert.Empty(t, ids) - - mockAPI.AssertNotCalled(t, "SubmitWithOptions", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) - mockAPI.AssertExpectations(t) - }) - - t.Run("Error During SubmitWithOptions RPC", func(t *testing.T) { - mockAPI := mocks.NewMockDA(t) - client := createMockedClient(mockAPI) - - blobs := []coreda.Blob{[]byte("blob1")} - expectedError := errors.New("rpc submit failed") - - mockAPI.On("SubmitWithOptions", ctx, blobs, gasPrice, encodedNamespace, testOptions).Return(nil, expectedError).Once() - - ids, err := client.DA.SubmitWithOptions(ctx, blobs, gasPrice, encodedNamespace, testOptions) - - require.Error(t, err) - assert.ErrorIs(t, err, expectedError) - assert.Nil(t, ids) - mockAPI.AssertExpectations(t) - }) -} diff --git a/da/jsonrpc/server.go b/da/jsonrpc/server.go deleted file mode 100644 index 456eefe908..0000000000 --- a/da/jsonrpc/server.go +++ /dev/null @@ -1,135 +0,0 @@ -package jsonrpc - -import ( - "context" - "net" - "net/http" - "sync/atomic" - "time" - - "github.com/filecoin-project/go-jsonrpc" - "github.com/rs/zerolog" - - "github.com/evstack/ev-node/core/da" -) - -// Server is a jsonrpc service that can serve the DA interface -type Server struct { - logger zerolog.Logger - srv *http.Server - rpc *jsonrpc.RPCServer - listener net.Listener - daImpl da.DA - - started atomic.Bool -} - -// serverInternalAPI provides the actual RPC methods. -type serverInternalAPI struct { - logger zerolog.Logger - daImpl da.DA -} - -// Get implements the RPC method. -func (s *serverInternalAPI) Get(ctx context.Context, ids []da.ID, ns []byte) ([]da.Blob, error) { - s.logger.Debug().Int("num_ids", len(ids)).Str("namespace", string(ns)).Msg("RPC server: Get called") - return s.daImpl.Get(ctx, ids, ns) -} - -// GetIDs implements the RPC method. -func (s *serverInternalAPI) GetIDs(ctx context.Context, height uint64, ns []byte) (*da.GetIDsResult, error) { - s.logger.Debug().Uint64("height", height).Str("namespace", string(ns)).Msg("RPC server: GetIDs called") - return s.daImpl.GetIDs(ctx, height, ns) -} - -// GetProofs implements the RPC method. -func (s *serverInternalAPI) GetProofs(ctx context.Context, ids []da.ID, ns []byte) ([]da.Proof, error) { - s.logger.Debug().Int("num_ids", len(ids)).Str("namespace", string(ns)).Msg("RPC server: GetProofs called") - return s.daImpl.GetProofs(ctx, ids, ns) -} - -// Commit implements the RPC method. -func (s *serverInternalAPI) Commit(ctx context.Context, blobs []da.Blob, ns []byte) ([]da.Commitment, error) { - s.logger.Debug().Int("num_blobs", len(blobs)).Str("namespace", string(ns)).Msg("RPC server: Commit called") - return s.daImpl.Commit(ctx, blobs, ns) -} - -// Validate implements the RPC method. -func (s *serverInternalAPI) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof, ns []byte) ([]bool, error) { - s.logger.Debug().Int("num_ids", len(ids)).Int("num_proofs", len(proofs)).Str("namespace", string(ns)).Msg("RPC server: Validate called") - return s.daImpl.Validate(ctx, ids, proofs, ns) -} - -// Submit implements the RPC method. This is the primary submit method which includes options. -func (s *serverInternalAPI) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte) ([]da.ID, error) { - s.logger.Debug().Int("num_blobs", len(blobs)).Float64("gas_price", gasPrice).Str("namespace", string(ns)).Msg("RPC server: Submit called") - return s.daImpl.Submit(ctx, blobs, gasPrice, ns) -} - -// SubmitWithOptions implements the RPC method. -func (s *serverInternalAPI) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte, options []byte) ([]da.ID, error) { - s.logger.Debug().Int("num_blobs", len(blobs)).Float64("gas_price", gasPrice).Str("namespace", string(ns)).Str("options", string(options)).Msg("RPC server: SubmitWithOptions called") - return s.daImpl.SubmitWithOptions(ctx, blobs, gasPrice, ns, options) -} - -// NewServer accepts the host address port and the DA implementation to serve as a jsonrpc service -func NewServer(logger zerolog.Logger, address, port string, daImplementation da.DA) *Server { - rpc := jsonrpc.NewServer(jsonrpc.WithServerErrors(getKnownErrorsMapping())) - srv := &Server{ - rpc: rpc, - logger: logger, - daImpl: daImplementation, - srv: &http.Server{ - Addr: address + ":" + port, - ReadHeaderTimeout: 2 * time.Second, - }, - } - srv.srv.Handler = http.HandlerFunc(rpc.ServeHTTP) - - apiHandler := &serverInternalAPI{ - logger: logger, - daImpl: daImplementation, - } - - srv.rpc.Register("da", apiHandler) - return srv -} - -// Start starts the RPC Server. -// This function can be called multiple times concurrently -// Once started, subsequent calls are a no-op -func (s *Server) Start(context.Context) error { - couldStart := s.started.CompareAndSwap(false, true) - - if !couldStart { - s.logger.Warn().Msg("cannot start server: already started") - return nil - } - listener, err := net.Listen("tcp", s.srv.Addr) - if err != nil { - return err - } - s.listener = listener - s.logger.Info().Str("listening_on", s.srv.Addr).Msg("server started") - //nolint:errcheck - go s.srv.Serve(listener) - return nil -} - -// Stop stops the RPC Server. -// This function can be called multiple times concurrently -// Once stopped, subsequent calls are a no-op -func (s *Server) Stop(ctx context.Context) error { - couldStop := s.started.CompareAndSwap(true, false) - if !couldStop { - s.logger.Warn().Msg("cannot stop server: already stopped") - return nil - } - err := s.srv.Shutdown(ctx) - if err != nil { - return err - } - s.listener = nil - s.logger.Info().Msg("server stopped") - return nil -} diff --git a/core/da/dummy.go b/da/testing.go similarity index 67% rename from core/da/dummy.go rename to da/testing.go index a66622bd77..007d6150cd 100644 --- a/core/da/dummy.go +++ b/da/testing.go @@ -90,34 +90,15 @@ func (d *DummyDA) Get(ctx context.Context, ids []ID, namespace []byte) ([]Blob, } // GetIDs returns IDs of all blobs at the given height. +// Delegates to Retrieve. func (d *DummyDA) GetIDs(ctx context.Context, height uint64, namespace []byte) (*GetIDsResult, error) { - d.mu.RLock() - defer d.mu.RUnlock() - - if height > d.currentHeight { - return nil, fmt.Errorf("%w: requested %d, current %d", ErrHeightFromFutureStr, height, d.currentHeight) + result := d.Retrieve(ctx, height, namespace) + if result.Code != StatusSuccess { + return nil, StatusCodeToError(result.Code, result.Message) } - - ids, exists := d.blobsByHeight[height] - if !exists { - return &GetIDsResult{ - IDs: []ID{}, - Timestamp: time.Now(), - }, nil - } - - // Filter IDs by namespace - filteredIDs := make([]ID, 0) - for _, id := range ids { - idStr := string(id) - if ns, exists := d.namespaceByID[idStr]; exists && bytes.Equal(ns, namespace) { - filteredIDs = append(filteredIDs, id) - } - } - return &GetIDsResult{ - IDs: filteredIDs, - Timestamp: d.timestampsByHeight[height], + IDs: result.IDs, + Timestamp: result.Timestamp, }, nil } @@ -151,8 +132,9 @@ func (d *DummyDA) Commit(ctx context.Context, blobs []Blob, namespace []byte) ([ return commitments, nil } -// Submit submits blobs to the DA layer. -func (d *DummyDA) Submit(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte) ([]ID, error) { +// Submit submits blobs to the DA layer and returns a structured result. +// Delegates to SubmitWithOptions with nil options. +func (d *DummyDA) Submit(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte) ResultSubmit { return d.SubmitWithOptions(ctx, blobs, gasPrice, namespace, nil) } @@ -163,36 +145,67 @@ func (d *DummyDA) SetSubmitFailure(shouldFail bool) { d.submitShouldFail = shouldFail } -// SubmitWithOptions submits blobs to the DA layer with additional options. -func (d *DummyDA) SubmitWithOptions(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte, options []byte) ([]ID, error) { +// Validate validates commitments against proofs. +func (d *DummyDA) Validate(ctx context.Context, ids []ID, proofs []Proof, namespace []byte) ([]bool, error) { + d.mu.RLock() + defer d.mu.RUnlock() + + if len(ids) != len(proofs) { + return nil, errors.New("number of IDs and proofs must match") + } + + results := make([]bool, len(ids)) + for i, id := range ids { + _, exists := d.blobs[string(id)] + results[i] = exists + } + + return results, nil +} + +// SubmitWithOptions submits blobs to the DA layer with additional options and returns a structured result. +// This is the primary implementation - Submit delegates to this method. +func (d *DummyDA) SubmitWithOptions(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte, options []byte) ResultSubmit { + // Calculate blob size upfront + var blobSize uint64 + for _, blob := range blobs { + blobSize += uint64(len(blob)) + } + d.mu.Lock() defer d.mu.Unlock() // Check if we should simulate failure if d.submitShouldFail { - return nil, errors.New("simulated DA layer failure") + return ResultSubmit{ + BaseResult: BaseResult{ + Code: StatusError, + Message: "simulated DA layer failure", + BlobSize: blobSize, + }, + } } height := d.currentHeight + 1 ids := make([]ID, 0, len(blobs)) var currentSize uint64 - for _, blob := range blobs { // Use _ instead of i + for _, blob := range blobs { blobLen := uint64(len(blob)) // Check individual blob size first if blobLen > d.maxBlobSize { - // Mimic DAClient behavior: if the first blob is too large, return error. - // Otherwise, we would have submitted the previous fitting blobs. - // Since DummyDA processes all at once, we return error if any *individual* blob is too large. - // A more complex dummy could simulate partial submission based on cumulative size. - // For now, error out if any single blob is too big. - return nil, ErrBlobSizeOverLimit // Use specific error type + return ResultSubmit{ + BaseResult: BaseResult{ + Code: StatusTooBig, + Message: "failed to submit blobs: " + ErrBlobSizeOverLimit.Error(), + BlobSize: blobSize, + }, + } } // Check cumulative batch size if currentSize+blobLen > d.maxBlobSize { // Stop processing blobs for this batch, return IDs collected so far - // d.logger.Info("DummyDA: Blob size limit reached for batch", "maxBlobSize", d.maxBlobSize, "index", i, "currentSize", currentSize, "nextBlobSize", blobLen) // Removed logger call break } currentSize += blobLen @@ -221,23 +234,80 @@ func (d *DummyDA) SubmitWithOptions(ctx context.Context, blobs []Blob, gasPrice } d.timestampsByHeight[height] = time.Now() - return ids, nil + return ResultSubmit{ + BaseResult: BaseResult{ + Code: StatusSuccess, + IDs: ids, + SubmittedCount: uint64(len(ids)), + Height: height, + BlobSize: blobSize, + Timestamp: time.Now(), + }, + } } -// Validate validates commitments against proofs. -func (d *DummyDA) Validate(ctx context.Context, ids []ID, proofs []Proof, namespace []byte) ([]bool, error) { +// Retrieve retrieves all blobs at the given height and returns a structured result. +// This is the primary implementation - GetIDs delegates to this method. +func (d *DummyDA) Retrieve(ctx context.Context, height uint64, namespace []byte) ResultRetrieve { d.mu.RLock() defer d.mu.RUnlock() - if len(ids) != len(proofs) { - return nil, errors.New("number of IDs and proofs must match") + // Check height bounds + if height > d.currentHeight { + return ResultRetrieve{ + BaseResult: BaseResult{ + Code: StatusHeightFromFuture, + Message: ErrHeightFromFuture.Error(), + Height: height, + Timestamp: time.Now(), + }, + } } - results := make([]bool, len(ids)) - for i, id := range ids { - _, exists := d.blobs[string(id)] - results[i] = exists + // Get IDs at height + ids, exists := d.blobsByHeight[height] + if !exists { + return ResultRetrieve{ + BaseResult: BaseResult{ + Code: StatusNotFound, + Message: ErrBlobNotFound.Error(), + Height: height, + Timestamp: time.Now(), + }, + } } - return results, nil + // Filter IDs by namespace and collect blobs + filteredIDs := make([]ID, 0) + blobs := make([]Blob, 0) + for _, id := range ids { + if ns, nsExists := d.namespaceByID[string(id)]; nsExists && bytes.Equal(ns, namespace) { + filteredIDs = append(filteredIDs, id) + if blob, blobExists := d.blobs[string(id)]; blobExists { + blobs = append(blobs, blob) + } + } + } + + // Handle empty result after namespace filtering + if len(filteredIDs) == 0 { + return ResultRetrieve{ + BaseResult: BaseResult{ + Code: StatusNotFound, + Message: ErrBlobNotFound.Error(), + Height: height, + Timestamp: time.Now(), + }, + } + } + + return ResultRetrieve{ + BaseResult: BaseResult{ + Code: StatusSuccess, + Height: height, + IDs: filteredIDs, + Timestamp: d.timestampsByHeight[height], + }, + Data: blobs, + } } diff --git a/core/da/dummy_test.go b/da/testing_test.go similarity index 84% rename from core/da/dummy_test.go rename to da/testing_test.go index 9538aacc83..3e12579ef2 100644 --- a/core/da/dummy_test.go +++ b/da/testing_test.go @@ -21,11 +21,12 @@ func TestDummyDA(t *testing.T) { []byte("test blob 1"), []byte("test blob 2"), } - ids, err := dummyDA.Submit(ctx, blobs, 0, nil) - if err != nil { - t.Fatalf("Submit failed: %v", err) + result := dummyDA.Submit(ctx, blobs, 0, nil) + if result.Code != StatusSuccess { + t.Fatalf("Submit failed: %s", result.Message) } - err = waitForFirstDAHeight(ctx, dummyDA) // Wait for height to increment + ids := result.IDs + err := waitForFirstDAHeight(ctx, dummyDA) // Wait for height to increment if err != nil { t.Fatalf("waitForFirstDAHeight failed: %v", err) } @@ -48,12 +49,12 @@ func TestDummyDA(t *testing.T) { } // Test GetIDs - result, err := dummyDA.GetIDs(ctx, 1, nil) + getIDsResult, err := dummyDA.GetIDs(ctx, 1, nil) if err != nil { t.Fatalf("GetIDs failed: %v", err) } - if len(result.IDs) != len(ids) { - t.Errorf("Expected %d IDs, got %d", len(ids), len(result.IDs)) + if len(getIDsResult.IDs) != len(ids) { + t.Errorf("Expected %d IDs, got %d", len(ids), len(getIDsResult.IDs)) } // Test Commit @@ -90,9 +91,9 @@ func TestDummyDA(t *testing.T) { // Test error case: blob size exceeds maximum largeBlob := make([]byte, 2048) // Larger than our max of 1024 - _, err = dummyDA.Submit(ctx, []Blob{largeBlob}, 0, nil) - if err == nil { - t.Errorf("Expected error for blob exceeding max size, got nil") + largeResult := dummyDA.Submit(ctx, []Blob{largeBlob}, 0, nil) + if largeResult.Code == StatusSuccess { + t.Errorf("Expected error for blob exceeding max size, got success") } } diff --git a/docs/guides/full-node.md b/docs/guides/full-node.md index 0022f65082..d6d597a000 100644 --- a/docs/guides/full-node.md +++ b/docs/guides/full-node.md @@ -4,7 +4,7 @@ This guide covers how to set up a full node to run alongside a sequencer node in a Evolve-based blockchain network. A full node maintains a complete copy of the blockchain and helps validate transactions, improving the network's decentralization and security. -> **Note: The guide on how to run an evolve EVM full node can be found [in the evm section](./evm/single#setting-up-a-full-node).** +> **Note: The guide on how to run an evolve EVM full node can be found [in the evm section](./evm/single.md#setting-up-a-full-node).** ## Prerequisites diff --git a/docs/learn/specs/da.md b/docs/learn/specs/da.md index d9f5ce5da7..0d1229e85f 100644 --- a/docs/learn/specs/da.md +++ b/docs/learn/specs/da.md @@ -1,10 +1,10 @@ # DA -Evolve provides a generic [data availability interface][da-interface] for modular blockchains. Any DA that implements this interface can be used with Evolve. +Evolve uses Celestia as its data availability layer through the [data availability interface][da-interface]. ## Details -`Client` can connect via JSON-RPC transports using Evolve's [jsonrpc][jsonrpc] implementations. The connection can be configured using the following cli flags: +The Celestia DA client connects directly to a Celestia node using the blob API. The connection can be configured using the following cli flags: * `--rollkit.da.address`: url address of the DA service (default: "grpc://localhost:26650") * `--rollkit.da.auth_token`: authentication token of the DA service @@ -21,10 +21,10 @@ Each submission first encodes the headers or data using protobuf (the encoded da To make sure that the serialised blocks don't exceed the underlying DA's blob limits, it fetches the blob size limit by calling `Config` which returns the limit as `uint64` bytes, then includes serialised blocks until the limit is reached. If the limit is reached, it submits the partial set and returns the count of successfully submitted blocks as `SubmittedCount`. The caller should retry with the remaining blocks until all the blocks are submitted. If the first block itself is over the limit, it throws an error. -The `Submit` call may result in an error (`StatusError`) based on the underlying DA implementations on following scenarios: +The `Submit` call may result in an error (`StatusError`) in the following scenarios: -* the total blobs size exceeds the underlying DA's limits (includes empty blobs) -* the implementation specific failures, e.g., for [celestia-da-json-rpc][jsonrpc], invalid namespace, unable to create the commitment or proof, setting low gas price, etc, could return error. +* the total blobs size exceeds Celestia's blob size limits (includes empty blobs) +* Celestia-specific failures, e.g., invalid namespace, unable to create the commitment or proof, setting low gas price, etc. The retrieval process now supports both legacy single-namespace mode and separate namespace mode: @@ -42,7 +42,7 @@ The retrieval process now supports both legacy single-namespace mode and separat If there are no blocks available for a given DA height in any namespace, `StatusNotFound` is returned (which is not an error case). The retrieved blobs are converted back to headers and data, then combined into complete blocks for processing. -Both header/data submission and retrieval operations may be unsuccessful if the DA node and the DA blockchain that the DA implementation is using have failures. For example, failures such as, DA mempool is full, DA submit transaction is nonce clashing with other transaction from the DA submitter account, DA node is not synced, etc. +Both header/data submission and retrieval operations may be unsuccessful if the Celestia node or the Celestia network have failures. For example, mempool is full, transaction nonce conflicts, node is not synced, etc. ## Namespace Separation Benefits @@ -57,7 +57,4 @@ The separation of headers and data into different namespaces provides several ad [1] [da-interface][da-interface] -[2] [jsonrpc][jsonrpc] - [da-interface]: https://github.com/evstack/ev-node/blob/main/core/da/da.go#L11 -[jsonrpc]: https://github.com/evstack/ev-node/tree/main/da/jsonrpc diff --git a/go.mod b/go.mod index 163e73152e..a473b3e89e 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( github.com/celestiaorg/go-square/v3 v3.0.2 github.com/celestiaorg/utils v0.1.0 github.com/evstack/ev-node/core v1.0.0-beta.5 + github.com/evstack/ev-node/da v0.0.0-00010101000000-000000000000 github.com/go-kit/kit v0.13.0 github.com/goccy/go-yaml v1.18.0 github.com/ipfs/go-datastore v0.9.0 @@ -30,7 +31,6 @@ require ( golang.org/x/net v0.47.0 golang.org/x/sync v0.18.0 google.golang.org/protobuf v1.36.10 - gotest.tools/v3 v3.5.2 ) require ( @@ -54,7 +54,6 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/google/flatbuffers v24.12.23+incompatible // indirect - github.com/google/go-cmp v0.7.0 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect @@ -163,3 +162,7 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.4.1 // indirect ) + +replace github.com/evstack/ev-node/core => ./core + +replace github.com/evstack/ev-node/da => ./da diff --git a/go.sum b/go.sum index 0dac8c7961..f0463efa4c 100644 --- a/go.sum +++ b/go.sum @@ -62,8 +62,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evstack/ev-node/core v1.0.0-beta.5 h1:lgxE8XiF3U9pcFgh7xuKMgsOGvLBGRyd9kc9MR4WL0o= -github.com/evstack/ev-node/core v1.0.0-beta.5/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -651,8 +649,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= -gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/mlc_config.json b/mlc_config.json new file mode 100644 index 0000000000..24b6f78160 --- /dev/null +++ b/mlc_config.json @@ -0,0 +1,9 @@ +{ + "$comment": "Medium.com returns 403 for automated requests in CI, even though the links are valid", + "ignorePatterns": [ + { + "pattern": "^https://medium\\.com" + } + ], + "aliveStatusCodes": [200, 206, 403] +} diff --git a/node/full.go b/node/full.go index 6d03a87c04..c41e7099f8 100644 --- a/node/full.go +++ b/node/full.go @@ -18,9 +18,9 @@ import ( "github.com/evstack/ev-node/block" - coreda "github.com/evstack/ev-node/core/da" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" genesispkg "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/p2p" @@ -53,7 +53,7 @@ type FullNode struct { nodeConfig config.Config - da coreda.DA + da da.DA p2pClient *p2p.Client hSyncService *evsync.HeaderSyncService @@ -75,7 +75,7 @@ func newFullNode( database ds.Batching, exec coreexecutor.Executor, sequencer coresequencer.Sequencer, - da coreda.DA, + da da.DA, metricsProvider MetricsProvider, logger zerolog.Logger, nodeOpts NodeOptions, diff --git a/node/helpers_test.go b/node/helpers_test.go index e77744a4ec..da7f43dbdb 100644 --- a/node/helpers_test.go +++ b/node/helpers_test.go @@ -17,9 +17,9 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/require" - coreda "github.com/evstack/ev-node/core/da" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" + da "github.com/evstack/ev-node/da" evconfig "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/p2p" @@ -43,10 +43,10 @@ const ( ) // createTestComponents creates test components for node initialization -func createTestComponents(t *testing.T, config evconfig.Config) (coreexecutor.Executor, coresequencer.Sequencer, coreda.DA, *p2p.Client, datastore.Batching, *key.NodeKey, func()) { +func createTestComponents(t *testing.T, config evconfig.Config) (coreexecutor.Executor, coresequencer.Sequencer, da.DA, *p2p.Client, datastore.Batching, *key.NodeKey, func()) { executor := coreexecutor.NewDummyExecutor() sequencer := coresequencer.NewDummySequencer() - dummyDA := coreda.NewDummyDA(100_000, config.DA.BlockTime.Duration) + dummyDA := da.NewDummyDA(100_000, config.DA.BlockTime.Duration) dummyDA.StartHeightTicker() stopDAHeightTicker := func() { @@ -101,7 +101,7 @@ func newTestNode( config evconfig.Config, executor coreexecutor.Executor, sequencer coresequencer.Sequencer, - dac coreda.DA, + dac da.DA, p2pClient *p2p.Client, ds datastore.Batching, stopDAHeightTicker func(), @@ -145,7 +145,7 @@ func createNodeWithCustomComponents( config evconfig.Config, executor coreexecutor.Executor, sequencer coresequencer.Sequencer, - dac coreda.DA, + dac da.DA, p2pClient *p2p.Client, ds datastore.Batching, stopDAHeightTicker func(), diff --git a/node/node.go b/node/node.go index 4d780035aa..218e873275 100644 --- a/node/node.go +++ b/node/node.go @@ -5,9 +5,9 @@ import ( "github.com/rs/zerolog" "github.com/evstack/ev-node/block" - coreda "github.com/evstack/ev-node/core/da" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/p2p" @@ -33,7 +33,7 @@ func NewNode( conf config.Config, exec coreexecutor.Executor, sequencer coresequencer.Sequencer, - da coreda.DA, + da da.DA, signer signer.Signer, p2pClient *p2p.Client, genesis genesis.Genesis, diff --git a/node/single_sequencer_integration_test.go b/node/single_sequencer_integration_test.go index 22b2fd4506..c202ec951f 100644 --- a/node/single_sequencer_integration_test.go +++ b/node/single_sequencer_integration_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" coreexecutor "github.com/evstack/ev-node/core/execution" evconfig "github.com/evstack/ev-node/pkg/config" ) @@ -321,7 +321,7 @@ func TestBatchQueueThrottlingWithDAFailure(t *testing.T) { require.True(ok, "Expected DummyExecutor implementation") // Cast dummyDA to our enhanced version so we can make it fail - dummyDAImpl, ok := dummyDA.(*coreda.DummyDA) + dummyDAImpl, ok := dummyDA.(*da.DummyDA) require.True(ok, "Expected DummyDA implementation") // Create node with components diff --git a/pkg/cmd/run_node.go b/pkg/cmd/run_node.go index fe42707f85..f3544a3bbb 100644 --- a/pkg/cmd/run_node.go +++ b/pkg/cmd/run_node.go @@ -16,9 +16,9 @@ import ( "github.com/rs/zerolog" "github.com/spf13/cobra" - coreda "github.com/evstack/ev-node/core/da" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/node" rollconf "github.com/evstack/ev-node/pkg/config" genesispkg "github.com/evstack/ev-node/pkg/genesis" @@ -82,7 +82,7 @@ func StartNode( cmd *cobra.Command, executor coreexecutor.Executor, sequencer coresequencer.Sequencer, - da coreda.DA, + da da.DA, p2pClient *p2p.Client, datastore datastore.Batching, nodeConfig rollconf.Config, diff --git a/pkg/cmd/run_node_test.go b/pkg/cmd/run_node_test.go index 16430ee450..c4058e0801 100644 --- a/pkg/cmd/run_node_test.go +++ b/pkg/cmd/run_node_test.go @@ -13,9 +13,9 @@ import ( "github.com/spf13/cobra" "github.com/stretchr/testify/assert" - coreda "github.com/evstack/ev-node/core/da" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/node" rollconf "github.com/evstack/ev-node/pkg/config" genesis "github.com/evstack/ev-node/pkg/genesis" @@ -26,10 +26,10 @@ import ( const MockDANamespace = "test" -func createTestComponents(_ context.Context, t *testing.T) (coreexecutor.Executor, coresequencer.Sequencer, coreda.DA, signer.Signer, *p2p.Client, datastore.Batching, func()) { +func createTestComponents(_ context.Context, t *testing.T) (coreexecutor.Executor, coresequencer.Sequencer, da.DA, signer.Signer, *p2p.Client, datastore.Batching, func()) { executor := coreexecutor.NewDummyExecutor() sequencer := coresequencer.NewDummySequencer() - dummyDA := coreda.NewDummyDA(100_000, 10*time.Second) + dummyDA := da.NewDummyDA(100_000, 10*time.Second) dummyDA.StartHeightTicker() stopDAHeightTicker := func() { dummyDA.StopHeightTicker() @@ -685,7 +685,7 @@ func newRunNodeCmd( ctx context.Context, executor coreexecutor.Executor, sequencer coresequencer.Sequencer, - dac coreda.DA, + dac da.DA, remoteSigner signer.Signer, p2pClient *p2p.Client, datastore datastore.Batching, diff --git a/pkg/config/config.go b/pkg/config/config.go index d6b1f15539..39187d9401 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -11,7 +11,7 @@ import ( "time" "github.com/celestiaorg/go-square/v3/share" - "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/da" "github.com/mitchellh/mapstructure" "github.com/spf13/cobra" "github.com/spf13/pflag" diff --git a/pkg/rpc/server/da_visualization.go b/pkg/rpc/server/da_visualization.go index ea003c6460..f31830d544 100644 --- a/pkg/rpc/server/da_visualization.go +++ b/pkg/rpc/server/da_visualization.go @@ -11,7 +11,7 @@ import ( "sync" "time" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" "github.com/rs/zerolog" ) @@ -33,7 +33,7 @@ type DASubmissionInfo struct { // DAVisualizationServer provides DA layer visualization endpoints type DAVisualizationServer struct { - da coreda.DA + da da.DA logger zerolog.Logger submissions []DASubmissionInfo mutex sync.RWMutex @@ -41,7 +41,7 @@ type DAVisualizationServer struct { } // NewDAVisualizationServer creates a new DA visualization server -func NewDAVisualizationServer(da coreda.DA, logger zerolog.Logger, isAggregator bool) *DAVisualizationServer { +func NewDAVisualizationServer(da da.DA, logger zerolog.Logger, isAggregator bool) *DAVisualizationServer { return &DAVisualizationServer{ da: da, logger: logger, @@ -52,7 +52,7 @@ func NewDAVisualizationServer(da coreda.DA, logger zerolog.Logger, isAggregator // RecordSubmission records a DA submission for visualization // Only keeps the last 100 submissions in memory for the dashboard display -func (s *DAVisualizationServer) RecordSubmission(result *coreda.ResultSubmit, gasPrice float64, numBlobs uint64) { +func (s *DAVisualizationServer) RecordSubmission(result *da.ResultSubmit, gasPrice float64, numBlobs uint64) { s.mutex.Lock() defer s.mutex.Unlock() @@ -83,27 +83,27 @@ func (s *DAVisualizationServer) RecordSubmission(result *coreda.ResultSubmit, ga } // getStatusCodeString converts status code to human-readable string -func (s *DAVisualizationServer) getStatusCodeString(code coreda.StatusCode) string { +func (s *DAVisualizationServer) getStatusCodeString(code da.StatusCode) string { switch code { - case coreda.StatusSuccess: + case da.StatusSuccess: return "Success" - case coreda.StatusNotFound: + case da.StatusNotFound: return "Not Found" - case coreda.StatusNotIncludedInBlock: + case da.StatusNotIncludedInBlock: return "Not Included In Block" - case coreda.StatusAlreadyInMempool: + case da.StatusAlreadyInMempool: return "Already In Mempool" - case coreda.StatusTooBig: + case da.StatusTooBig: return "Too Big" - case coreda.StatusContextDeadline: + case da.StatusContextDeadline: return "Context Deadline" - case coreda.StatusError: + case da.StatusError: return "Error" - case coreda.StatusIncorrectAccountSequence: + case da.StatusIncorrectAccountSequence: return "Incorrect Account Sequence" - case coreda.StatusContextCanceled: + case da.StatusContextCanceled: return "Context Canceled" - case coreda.StatusHeightFromFuture: + case da.StatusHeightFromFuture: return "Height From Future" default: return "Unknown" @@ -173,7 +173,7 @@ func (s *DAVisualizationServer) handleDABlobDetails(w http.ResponseWriter, r *ht // Extract namespace - using empty namespace for now, could be parameterized namespace := []byte{} - blobs, err := s.da.Get(ctx, []coreda.ID{id}, namespace) + blobs, err := s.da.Get(ctx, []da.ID{id}, namespace) if err != nil { s.logger.Error().Err(err).Str("blob_id", blobID).Msg("Failed to retrieve blob from DA") http.Error(w, fmt.Sprintf("Failed to retrieve blob: %v", err), http.StatusInternalServerError) @@ -186,7 +186,7 @@ func (s *DAVisualizationServer) handleDABlobDetails(w http.ResponseWriter, r *ht } // Parse the blob ID to extract height and commitment - height, commitment, err := coreda.SplitID(id) + height, commitment, err := da.SplitID(id) if err != nil { s.logger.Error().Err(err).Str("blob_id", blobID).Msg("Failed to split blob ID") } diff --git a/pkg/rpc/server/da_visualization_test.go b/pkg/rpc/server/da_visualization_test.go index 80b9a1408c..e08af48d66 100644 --- a/pkg/rpc/server/da_visualization_test.go +++ b/pkg/rpc/server/da_visualization_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - coreda "github.com/evstack/ev-node/core/da" + dapkg "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/test/mocks" "github.com/rs/zerolog" @@ -34,9 +34,9 @@ func TestRecordSubmission(t *testing.T) { server := NewDAVisualizationServer(da, logger, true) // Test recording a successful submission - result := &coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, + result := &dapkg.ResultSubmit{ + BaseResult: dapkg.BaseResult{ + Code: dapkg.StatusSuccess, Height: 100, BlobSize: 1024, Timestamp: time.Now(), @@ -67,9 +67,9 @@ func TestRecordSubmissionMemoryLimit(t *testing.T) { // Add 101 submissions (more than the limit of 100) for i := 0; i < 101; i++ { - result := &coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, + result := &dapkg.ResultSubmit{ + BaseResult: dapkg.BaseResult{ + Code: dapkg.StatusSuccess, Height: uint64(i), BlobSize: uint64(i * 10), Timestamp: time.Now(), @@ -92,15 +92,15 @@ func TestGetStatusCodeString(t *testing.T) { server := NewDAVisualizationServer(da, logger, true) tests := []struct { - code coreda.StatusCode + code dapkg.StatusCode expected string }{ - {coreda.StatusSuccess, "Success"}, - {coreda.StatusNotFound, "Not Found"}, - {coreda.StatusError, "Error"}, - {coreda.StatusTooBig, "Too Big"}, - {coreda.StatusContextDeadline, "Context Deadline"}, - {coreda.StatusUnknown, "Unknown"}, + {dapkg.StatusSuccess, "Success"}, + {dapkg.StatusNotFound, "Not Found"}, + {dapkg.StatusError, "Error"}, + {dapkg.StatusTooBig, "Too Big"}, + {dapkg.StatusContextDeadline, "Context Deadline"}, + {dapkg.StatusUnknown, "Unknown"}, } for _, tt := range tests { @@ -115,9 +115,9 @@ func TestHandleDASubmissions(t *testing.T) { server := NewDAVisualizationServer(da, logger, true) // Add a test submission - result := &coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, + result := &dapkg.ResultSubmit{ + BaseResult: dapkg.BaseResult{ + Code: dapkg.StatusSuccess, Height: 100, BlobSize: 1024, Timestamp: time.Now(), @@ -188,9 +188,9 @@ func TestHandleDAVisualizationHTML(t *testing.T) { server := NewDAVisualizationServer(da, logger, true) // Add a test submission - result := &coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, + result := &dapkg.ResultSubmit{ + BaseResult: dapkg.BaseResult{ + Code: dapkg.StatusSuccess, Height: 100, BlobSize: 1024, Timestamp: time.Now(), @@ -239,9 +239,9 @@ func TestRegisterCustomHTTPEndpointsDAVisualization(t *testing.T) { server := NewDAVisualizationServer(da, logger, true) // Add test submission - result := &coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, + result := &dapkg.ResultSubmit{ + BaseResult: dapkg.BaseResult{ + Code: dapkg.StatusSuccess, Height: 100, BlobSize: 1024, Timestamp: time.Now(), diff --git a/pkg/rpc/server/server.go b/pkg/rpc/server/server.go index e0abed2de0..cb4a22b295 100644 --- a/pkg/rpc/server/server.go +++ b/pkg/rpc/server/server.go @@ -13,7 +13,7 @@ import ( "connectrpc.com/connect" "connectrpc.com/grpcreflect" goheader "github.com/celestiaorg/go-header" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" ds "github.com/ipfs/go-datastore" "github.com/rs/zerolog" "golang.org/x/net/http2" @@ -111,7 +111,7 @@ func (s *StoreServer) GetBlock( // Fetch and set DA heights blockHeight := header.Height() if blockHeight > 0 { // DA heights are not stored for genesis/height 0 in the current impl - headerDAHeightKey := store.GetHeightToDAHeightHeaderKey(blockHeight) + headerDAHeightKey := fmt.Sprintf("%s/%d/h", store.HeightToDAHeightKey, blockHeight) headerDAHeightBytes, err := s.store.GetMetadata(ctx, headerDAHeightKey) if err == nil && len(headerDAHeightBytes) == 8 { resp.HeaderDaHeight = binary.LittleEndian.Uint64(headerDAHeightBytes) @@ -119,7 +119,7 @@ func (s *StoreServer) GetBlock( s.logger.Error().Uint64("height", blockHeight).Err(err).Msg("Error fetching header DA height for block") } - dataDAHeightKey := store.GetHeightToDAHeightDataKey(blockHeight) + dataDAHeightKey := fmt.Sprintf("%s/%d/d", store.HeightToDAHeightKey, blockHeight) dataDAHeightBytes, err := s.store.GetMetadata(ctx, dataDAHeightKey) if err == nil && len(dataDAHeightBytes) == 8 { resp.DataDaHeight = binary.LittleEndian.Uint64(dataDAHeightBytes) @@ -182,27 +182,44 @@ func (s *StoreServer) GetGenesisDaHeight( } -// GetP2PStoreInfo implements the GetP2PStoreInfo RPC method +// GetMetadata implements the GetMetadata RPC method +func (s *StoreServer) GetMetadata( + ctx context.Context, + req *connect.Request[pb.GetMetadataRequest], +) (*connect.Response[pb.GetMetadataResponse], error) { + value, err := s.store.GetMetadata(ctx, req.Msg.Key) + if err != nil { + return nil, connect.NewError(connect.CodeNotFound, err) + } + + return connect.NewResponse(&pb.GetMetadataResponse{ + Value: value, + }), nil +} + +// GetP2PStoreInfo returns head/tail information for the go-header stores used by P2P sync. func (s *StoreServer) GetP2PStoreInfo( ctx context.Context, _ *connect.Request[emptypb.Empty], ) (*connect.Response[pb.GetP2PStoreInfoResponse], error) { - snapshots := make([]*pb.P2PStoreSnapshot, 0, 2) + var snapshots []*pb.P2PStoreSnapshot + // Header store snapshot if s.headerStore != nil { - snapshot, err := collectP2PStoreSnapshot(ctx, s.headerStore, "Header Store") + snap, err := s.buildHeaderStoreSnapshot(ctx, "Header Store") if err != nil { - return nil, connect.NewError(connect.CodeInternal, err) + return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("header store: %w", err)) } - snapshots = append(snapshots, snapshot) + snapshots = append(snapshots, snap) } + // Data store snapshot if s.dataStore != nil { - snapshot, err := collectP2PStoreSnapshot(ctx, s.dataStore, "Data Store") + snap, err := s.buildDataStoreSnapshot(ctx, "Data Store") if err != nil { - return nil, connect.NewError(connect.CodeInternal, err) + return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("data store: %w", err)) } - snapshots = append(snapshots, snapshot) + snapshots = append(snapshots, snap) } return connect.NewResponse(&pb.GetP2PStoreInfoResponse{ @@ -210,61 +227,64 @@ func (s *StoreServer) GetP2PStoreInfo( }), nil } -// GetMetadata implements the GetMetadata RPC method -func (s *StoreServer) GetMetadata( - ctx context.Context, - req *connect.Request[pb.GetMetadataRequest], -) (*connect.Response[pb.GetMetadataResponse], error) { - value, err := s.store.GetMetadata(ctx, req.Msg.Key) - if err != nil { - return nil, connect.NewError(connect.CodeNotFound, err) - } - - return connect.NewResponse(&pb.GetMetadataResponse{ - Value: value, - }), nil -} - -func collectP2PStoreSnapshot[H goheader.Header[H]]( - ctx context.Context, - store goheader.Store[H], - label string, -) (*pb.P2PStoreSnapshot, error) { - snapshot := &pb.P2PStoreSnapshot{ - Label: label, - Height: store.Height(), - } +// buildHeaderStoreSnapshot builds a P2PStoreSnapshot from the header store +func (s *StoreServer) buildHeaderStoreSnapshot(ctx context.Context, label string) (*pb.P2PStoreSnapshot, error) { + height := s.headerStore.Height() - if head, err := store.Head(ctx); err == nil { - snapshot.Head = toP2PStoreEntry(head) - } else if !errors.Is(err, goheader.ErrEmptyStore) && !errors.Is(err, goheader.ErrNotFound) { - return nil, fmt.Errorf("failed to read %s head: %w", label, err) + head, err := s.headerStore.Head(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get head: %w", err) } - if tail, err := store.Tail(ctx); err == nil { - snapshot.Tail = toP2PStoreEntry(tail) - } else if !errors.Is(err, goheader.ErrEmptyStore) && !errors.Is(err, goheader.ErrNotFound) { - return nil, fmt.Errorf("failed to read %s tail: %w", label, err) + tail, err := s.headerStore.Tail(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get tail: %w", err) } - return snapshot, nil + return &pb.P2PStoreSnapshot{ + Label: label, + Height: height, + Head: &pb.P2PStoreEntry{ + Height: head.Height(), + Hash: head.Hash()[:], + Time: timestamppb.New(head.Time()), + }, + Tail: &pb.P2PStoreEntry{ + Height: tail.Height(), + Hash: tail.Hash()[:], + Time: timestamppb.New(tail.Time()), + }, + }, nil } -func toP2PStoreEntry[H goheader.Header[H]](item H) *pb.P2PStoreEntry { - if any(item) == nil { - return nil - } +// buildDataStoreSnapshot builds a P2PStoreSnapshot from the data store +func (s *StoreServer) buildDataStoreSnapshot(ctx context.Context, label string) (*pb.P2PStoreSnapshot, error) { + height := s.dataStore.Height() - entry := &pb.P2PStoreEntry{ - Height: item.Height(), - Hash: append([]byte(nil), item.Hash()...), + head, err := s.dataStore.Head(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get head: %w", err) } - if ts := item.Time(); !ts.IsZero() { - entry.Time = timestamppb.New(ts) + tail, err := s.dataStore.Tail(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get tail: %w", err) } - return entry + return &pb.P2PStoreSnapshot{ + Label: label, + Height: height, + Head: &pb.P2PStoreEntry{ + Height: head.Height(), + Hash: head.Hash()[:], + Time: timestamppb.New(head.Time()), + }, + Tail: &pb.P2PStoreEntry{ + Height: tail.Height(), + Hash: tail.Hash()[:], + Time: timestamppb.New(tail.Time()), + }, + }, nil } type ConfigServer struct { @@ -286,8 +306,8 @@ func (cs *ConfigServer) GetNamespace( req *connect.Request[emptypb.Empty], ) (*connect.Response[pb.GetNamespaceResponse], error) { - hns := coreda.NamespaceFromString(cs.config.DA.GetNamespace()) - dns := coreda.NamespaceFromString(cs.config.DA.GetDataNamespace()) + hns := da.NamespaceFromString(cs.config.DA.GetNamespace()) + dns := da.NamespaceFromString(cs.config.DA.GetDataNamespace()) return connect.NewResponse(&pb.GetNamespaceResponse{ HeaderNamespace: hns.HexString(), diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index dbc5bc567c..c3b3c14200 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -10,8 +10,8 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/rs/zerolog" - coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" + da "github.com/evstack/ev-node/da" ) // ErrInvalidId is returned when the chain id is invalid @@ -28,7 +28,7 @@ type Sequencer struct { proposer bool Id []byte - da coreda.DA + da da.DA batchTime time.Duration @@ -42,7 +42,7 @@ func NewSequencer( ctx context.Context, logger zerolog.Logger, db ds.Batching, - da coreda.DA, + da da.DA, id []byte, batchTime time.Duration, metrics *Metrics, @@ -56,7 +56,7 @@ func NewSequencerWithQueueSize( ctx context.Context, logger zerolog.Logger, db ds.Batching, - da coreda.DA, + da da.DA, id []byte, batchTime time.Duration, metrics *Metrics, @@ -130,7 +130,7 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB // RecordMetrics updates the metrics with the given values. // This method is intended to be called by the block manager after submitting data to the DA layer. -func (c *Sequencer) RecordMetrics(gasPrice float64, blobSize uint64, statusCode coreda.StatusCode, numPendingBlocks uint64, includedBlockHeight uint64) { +func (c *Sequencer) RecordMetrics(gasPrice float64, blobSize uint64, statusCode da.StatusCode, numPendingBlocks uint64, includedBlockHeight uint64) { if c.metrics != nil { c.metrics.GasPrice.Set(gasPrice) c.metrics.LastBlobSize.Set(float64(blobSize)) diff --git a/sequencers/single/sequencer_test.go b/sequencers/single/sequencer_test.go index 5362b49040..e82e17cf0e 100644 --- a/sequencers/single/sequencer_test.go +++ b/sequencers/single/sequencer_test.go @@ -13,14 +13,14 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" + da "github.com/evstack/ev-node/da" damocks "github.com/evstack/ev-node/test/mocks" ) func TestNewSequencer(t *testing.T) { // Create a new sequencer with mock DA client - dummyDA := coreda.NewDummyDA(100_000_000, 10*time.Second) + dummyDA := da.NewDummyDA(100_000_000, 10*time.Second) metrics, _ := NopMetrics() db := ds.NewMapDatastore() ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) @@ -38,22 +38,15 @@ func TestNewSequencer(t *testing.T) { }() // Check if the sequencer was created with the correct values - if seq == nil { - t.Fatal("Expected sequencer to not be nil") - } - - if seq.queue == nil { - t.Fatal("Expected batch queue to not be nil") - } - if seq.da == nil { - t.Fatal("Expected DA client to not be nil") - } + require.NotNil(t, seq, "Expected sequencer to not be nil") + require.NotNil(t, seq.queue, "Expected batch queue to not be nil") + require.NotNil(t, seq.da, "Expected DA client to not be nil") } func TestSequencer_SubmitBatchTxs(t *testing.T) { // Initialize a new sequencer metrics, _ := NopMetrics() - dummyDA := coreda.NewDummyDA(100_000_000, 10*time.Second) + dummyDA := da.NewDummyDA(100_000_000, 10*time.Second) db := ds.NewMapDatastore() ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() @@ -106,7 +99,7 @@ func TestSequencer_SubmitBatchTxs(t *testing.T) { func TestSequencer_SubmitBatchTxs_EmptyBatch(t *testing.T) { // Initialize a new sequencer metrics, _ := NopMetrics() - dummyDA := coreda.NewDummyDA(100_000_000, 10*time.Second) + dummyDA := da.NewDummyDA(100_000_000, 10*time.Second) db := ds.NewMapDatastore() ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() @@ -447,7 +440,7 @@ func TestSequencer_RecordMetrics(t *testing.T) { // Test values gasPrice := 1.5 blobSize := uint64(1024) - statusCode := coreda.StatusSuccess + statusCode := da.StatusSuccess numPendingBlocks := uint64(5) includedBlockHeight := uint64(100) @@ -470,7 +463,7 @@ func TestSequencer_RecordMetrics(t *testing.T) { // Test values gasPrice := 2.0 blobSize := uint64(2048) - statusCode := coreda.StatusNotIncludedInBlock + statusCode := da.StatusNotIncludedInBlock numPendingBlocks := uint64(3) includedBlockHeight := uint64(200) @@ -495,13 +488,13 @@ func TestSequencer_RecordMetrics(t *testing.T) { // Test different status codes testCases := []struct { name string - statusCode coreda.StatusCode + statusCode da.StatusCode }{ - {"Success", coreda.StatusSuccess}, - {"NotIncluded", coreda.StatusNotIncludedInBlock}, - {"AlreadyInMempool", coreda.StatusAlreadyInMempool}, - {"TooBig", coreda.StatusTooBig}, - {"ContextCanceled", coreda.StatusContextCanceled}, + {"Success", da.StatusSuccess}, + {"NotIncluded", da.StatusNotIncludedInBlock}, + {"AlreadyInMempool", da.StatusAlreadyInMempool}, + {"TooBig", da.StatusTooBig}, + {"ContextCanceled", da.StatusContextCanceled}, } for _, tc := range testCases { @@ -634,7 +627,7 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { defer db.Close() // Create a dummy DA that we can make fail - dummyDA := coreda.NewDummyDA(100_000, 100*time.Millisecond) + dummyDA := da.NewDummyDA(100_000, 100*time.Millisecond) dummyDA.StartHeightTicker() defer dummyDA.StopHeightTicker() diff --git a/test/e2e/evm_test_common.go b/test/e2e/evm_test_common.go index 9d502cc218..535e25e752 100644 --- a/test/e2e/evm_test_common.go +++ b/test/e2e/evm_test_common.go @@ -404,6 +404,10 @@ func setupFullNode(t *testing.T, sut *SystemUnderTest, fullNodeHome, sequencerHo err = os.WriteFile(fullNodeGenesis, genesisData, 0644) require.NoError(t, err, "failed to write full node genesis file") + // Read namespace from sequencer config to pass to full node + sequencerConfigPath := filepath.Join(sequencerHome, "config", "evnode.yaml") + namespace := extractNamespaceFromConfig(t, sequencerConfigPath) + // Create JWT secret file for full node fullNodeJwtSecretFile := createJWTSecretFile(t, fullNodeHome, fullNodeJwtSecret) @@ -418,6 +422,7 @@ func setupFullNode(t *testing.T, sut *SystemUnderTest, fullNodeHome, sequencerHo "--evm.eth-url", endpoints.GetFullNodeEthURL(), "--rollkit.da.block_time", DefaultDABlockTime, "--rollkit.da.address", endpoints.GetDAAddress(), + "--rollkit.da.namespace", namespace, // Use same namespace as sequencer "--rollkit.rpc.address", endpoints.GetFullNodeRPCListen(), "--rollkit.p2p.listen_address", endpoints.GetFullNodeP2PAddress(), } @@ -427,6 +432,42 @@ func setupFullNode(t *testing.T, sut *SystemUnderTest, fullNodeHome, sequencerHo sut.AwaitNodeLive(t, endpoints.GetFullNodeRPCAddress(), NodeStartupTimeout) } +// extractNamespaceFromConfig reads the namespace from a config file +func extractNamespaceFromConfig(t *testing.T, configPath string) string { + t.Helper() + + configData, err := os.ReadFile(configPath) + require.NoError(t, err, "failed to read config file") + + // Parse YAML - look for "namespace:" under "da:" section + lines := strings.Split(string(configData), "\n") + inDASection := false + for _, line := range lines { + // Check if we're entering the da: section + if strings.TrimSpace(line) == "da:" { + inDASection = true + continue + } + // Check if we're leaving the da: section (new top-level key) + if inDASection && len(line) > 0 && line[0] != ' ' && line[0] != '\t' { + inDASection = false + } + // Look for namespace: inside the da: section + if inDASection { + trimmed := strings.TrimSpace(line) + if strings.HasPrefix(trimmed, "namespace:") { + parts := strings.SplitN(trimmed, ":", 2) + if len(parts) == 2 { + return strings.TrimSpace(parts[1]) + } + } + } + } + + t.Fatal("namespace not found in config file") + return "" +} + // Global nonce counter to ensure unique nonces across multiple transaction submissions var globalNonce uint64 = 0 diff --git a/test/e2e/go.mod b/test/e2e/go.mod index 07664cbd62..0a7e4a9914 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -16,6 +16,7 @@ require ( replace ( github.com/evstack/ev-node => ../../ + github.com/evstack/ev-node/da => ../../da github.com/evstack/ev-node/execution/evm => ../../execution/evm github.com/evstack/ev-node/execution/evm/test => ../../execution/evm/test ) diff --git a/test/mocks/da.go b/test/mocks/da.go index bb3ad63391..500085d26e 100644 --- a/test/mocks/da.go +++ b/test/mocks/da.go @@ -7,7 +7,7 @@ package mocks import ( "context" - "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/da" mock "github.com/stretchr/testify/mock" ) @@ -335,31 +335,19 @@ func (_c *MockDA_GetProofs_Call) RunAndReturn(run func(ctx context.Context, ids } // Submit provides a mock function for the type MockDA -func (_mock *MockDA) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte) ([]da.ID, error) { +func (_mock *MockDA) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte) da.ResultSubmit { ret := _mock.Called(ctx, blobs, gasPrice, namespace) if len(ret) == 0 { panic("no return value specified for Submit") } - var r0 []da.ID - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, []da.Blob, float64, []byte) ([]da.ID, error)); ok { + var r0 da.ResultSubmit + if returnFunc, ok := ret.Get(0).(func(context.Context, []da.Blob, float64, []byte) da.ResultSubmit); ok { return returnFunc(ctx, blobs, gasPrice, namespace) } - if returnFunc, ok := ret.Get(0).(func(context.Context, []da.Blob, float64, []byte) []da.ID); ok { - r0 = returnFunc(ctx, blobs, gasPrice, namespace) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]da.ID) - } - } - if returnFunc, ok := ret.Get(1).(func(context.Context, []da.Blob, float64, []byte) error); ok { - r1 = returnFunc(ctx, blobs, gasPrice, namespace) - } else { - r1 = ret.Error(1) - } - return r0, r1 + r0 = ret.Get(0).(da.ResultSubmit) + return r0 } // MockDA_Submit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Submit' @@ -404,42 +392,30 @@ func (_c *MockDA_Submit_Call) Run(run func(ctx context.Context, blobs []da.Blob, return _c } -func (_c *MockDA_Submit_Call) Return(vs []da.ID, err error) *MockDA_Submit_Call { - _c.Call.Return(vs, err) +func (_c *MockDA_Submit_Call) Return(result da.ResultSubmit) *MockDA_Submit_Call { + _c.Call.Return(result) return _c } -func (_c *MockDA_Submit_Call) RunAndReturn(run func(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte) ([]da.ID, error)) *MockDA_Submit_Call { +func (_c *MockDA_Submit_Call) RunAndReturn(run func(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte) da.ResultSubmit) *MockDA_Submit_Call { _c.Call.Return(run) return _c } // SubmitWithOptions provides a mock function for the type MockDA -func (_mock *MockDA) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) ([]da.ID, error) { +func (_mock *MockDA) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) da.ResultSubmit { ret := _mock.Called(ctx, blobs, gasPrice, namespace, options) if len(ret) == 0 { panic("no return value specified for SubmitWithOptions") } - var r0 []da.ID - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, []da.Blob, float64, []byte, []byte) ([]da.ID, error)); ok { + var r0 da.ResultSubmit + if returnFunc, ok := ret.Get(0).(func(context.Context, []da.Blob, float64, []byte, []byte) da.ResultSubmit); ok { return returnFunc(ctx, blobs, gasPrice, namespace, options) } - if returnFunc, ok := ret.Get(0).(func(context.Context, []da.Blob, float64, []byte, []byte) []da.ID); ok { - r0 = returnFunc(ctx, blobs, gasPrice, namespace, options) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]da.ID) - } - } - if returnFunc, ok := ret.Get(1).(func(context.Context, []da.Blob, float64, []byte, []byte) error); ok { - r1 = returnFunc(ctx, blobs, gasPrice, namespace, options) - } else { - r1 = ret.Error(1) - } - return r0, r1 + r0 = ret.Get(0).(da.ResultSubmit) + return r0 } // MockDA_SubmitWithOptions_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubmitWithOptions' @@ -490,12 +466,12 @@ func (_c *MockDA_SubmitWithOptions_Call) Run(run func(ctx context.Context, blobs return _c } -func (_c *MockDA_SubmitWithOptions_Call) Return(vs []da.ID, err error) *MockDA_SubmitWithOptions_Call { - _c.Call.Return(vs, err) +func (_c *MockDA_SubmitWithOptions_Call) Return(result da.ResultSubmit) *MockDA_SubmitWithOptions_Call { + _c.Call.Return(result) return _c } -func (_c *MockDA_SubmitWithOptions_Call) RunAndReturn(run func(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) ([]da.ID, error)) *MockDA_SubmitWithOptions_Call { +func (_c *MockDA_SubmitWithOptions_Call) RunAndReturn(run func(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) da.ResultSubmit) *MockDA_SubmitWithOptions_Call { _c.Call.Return(run) return _c } @@ -579,3 +555,41 @@ func (_c *MockDA_Validate_Call) RunAndReturn(run func(ctx context.Context, ids [ _c.Call.Return(run) return _c } + +// Retrieve provides a mock function for the type MockDA +func (_mock *MockDA) Retrieve(ctx context.Context, height uint64, namespace []byte) da.ResultRetrieve { + ret := _mock.Called(ctx, height, namespace) + + if len(ret) == 0 { + panic("no return value specified for Retrieve") + } + + var r0 da.ResultRetrieve + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64, []byte) da.ResultRetrieve); ok { + return returnFunc(ctx, height, namespace) + } + r0 = ret.Get(0).(da.ResultRetrieve) + return r0 +} + +// MockDA_Retrieve_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Retrieve' +type MockDA_Retrieve_Call struct { + *mock.Call +} + +// Retrieve is a helper method to define mock.On call +func (_e *MockDA_Expecter) Retrieve(ctx interface{}, height interface{}, namespace interface{}) *MockDA_Retrieve_Call { + return &MockDA_Retrieve_Call{Call: _e.mock.On("Retrieve", ctx, height, namespace)} +} + +func (_c *MockDA_Retrieve_Call) Run(run func(ctx context.Context, height uint64, namespace []byte)) *MockDA_Retrieve_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].([]byte)) + }) + return _c +} + +func (_c *MockDA_Retrieve_Call) Return(result da.ResultRetrieve) *MockDA_Retrieve_Call { + _c.Call.Return(result) + return _c +} diff --git a/tools/da-debug/README.md b/tools/da-debug/README.md index b4edf674b8..7db8a17c14 100644 --- a/tools/da-debug/README.md +++ b/tools/da-debug/README.md @@ -1,20 +1,22 @@ # DA Debug Tool -A professional debugging tool for querying and inspecting Data Availability (DA) layer data in ev-node. +A debugging tool for querying and inspecting Data Availability (DA) layer data in ev-node. Connects directly to Celestia's blob API. ## Overview -The `da-debug` tool provides a command-line interface to interact with DA layers for debugging purposes. It offers two main commands: `query` for inspecting specific DA heights and `search` for finding blobs containing specific blockchain heights. +The `da-debug` tool provides a command-line interface to interact with Celestia for debugging purposes. It offers two main commands: `query` for inspecting specific DA heights and `search` for finding blobs containing specific blockchain heights. ## Installation -Install using `go install`: - ```bash go install github.com/evstack/ev-node/tools/da-debug@main ``` -After installation, the `da-debug` binary will be available in your `$GOPATH/bin` directory. +Or build locally: + +```bash +make build-tool-da-debug +``` ## Commands @@ -73,16 +75,11 @@ da-debug search 100 "0x000000000000000000000000000000000000000000000000000000746 All commands support these global flags: - -- `--da-url string`: DA layer JSON-RPC URL (default: "") - -- `--auth-token string`: Authentication token for DA layer +- `--da-url string`: Celestia node RPC URL (default: `http://localhost:26658`) +- `--auth-token string`: Authentication token for Celestia node - `--timeout duration`: Request timeout (default: 30s) - `--verbose`: Enable verbose logging - `--max-blob-size uint`: Maximum blob size in bytes (default: 1970176) -- `--gas-price float`: Gas price for DA operations (default: 0.0) -- `--gas-multiplier float`: Gas multiplier for DA operations (default: 1.0) -- `--no-color`: Disable colored output ## Namespace Format @@ -94,3 +91,17 @@ Namespaces can be provided in two formats: 2. **String Identifier**: Any string that gets automatically converted to a valid namespace - Example: `"my-app"` or `"test-namespace"` - The string is hashed and converted to a valid version 0 namespace + +## Getting an Auth Token + +To get an authentication token from your Celestia light node: + +```bash +celestia light auth write +``` + +Then use it with: + +```bash +da-debug query 100 "my-rollup" --auth-token "" +``` diff --git a/tools/da-debug/go.mod b/tools/da-debug/go.mod index d8081e329d..b661709f24 100644 --- a/tools/da-debug/go.mod +++ b/tools/da-debug/go.mod @@ -3,16 +3,18 @@ module github.com/evstack/ev-node/tools/da-debug go 1.24.6 require ( - github.com/evstack/ev-node v1.0.0-beta.6 - github.com/evstack/ev-node/core v1.0.0-beta.5 - github.com/evstack/ev-node/da v1.0.0-beta.6 + github.com/evstack/ev-node v0.0.0 + github.com/evstack/ev-node/da v0.0.0 github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.1 - google.golang.org/protobuf v1.36.9 + google.golang.org/protobuf v1.36.10 ) require ( - github.com/celestiaorg/go-header v0.7.3 // indirect + github.com/celestiaorg/go-header v0.7.4 // indirect + github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 // indirect + github.com/celestiaorg/go-square/v3 v3.0.2 // indirect + github.com/celestiaorg/nmt v0.24.2 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/filecoin-project/go-jsonrpc v0.9.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -22,7 +24,7 @@ require ( github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/go-cid v0.5.0 // indirect - github.com/ipfs/go-log/v2 v2.8.0 // indirect + github.com/ipfs/go-log/v2 v2.8.1 // indirect github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-libp2p v0.43.0 // indirect @@ -40,15 +42,22 @@ require ( github.com/multiformats/go-multicodec v0.9.2 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-multistream v0.6.1 // indirect - github.com/multiformats/go-varint v0.0.7 // indirect + github.com/multiformats/go-varint v0.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/pflag v1.0.10 // indirect go.opencensus.io v0.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.45.0 // indirect - golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 // indirect + golang.org/x/exp v0.0.0-20250911091902-df9299821621 // indirect + golang.org/x/sync v0.18.0 // indirect golang.org/x/sys v0.38.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect lukechampine.com/blake3 v1.4.1 // indirect ) + +replace github.com/evstack/ev-node => ../.. + +replace github.com/evstack/ev-node/da => ../../da + +replace github.com/evstack/ev-node/core => ../../core diff --git a/tools/da-debug/go.sum b/tools/da-debug/go.sum index 0bee6cfd01..a78ab21bf0 100644 --- a/tools/da-debug/go.sum +++ b/tools/da-debug/go.sum @@ -4,10 +4,14 @@ github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/celestiaorg/go-header v0.7.3 h1:3+kIa+YXT789gPGRh3a55qmdYq3yTTBIqTyum26AvN0= -github.com/celestiaorg/go-header v0.7.3/go.mod h1:eX9iTSPthVEAlEDLux40ZT/olXPGhpxHd+mEzJeDhd0= -github.com/celestiaorg/go-square/v3 v3.0.1 h1:44xnE3AUiZn/3q/uJ0c20AezFS0lywFTGG2lE/9jYKA= -github.com/celestiaorg/go-square/v3 v3.0.1/go.mod h1:Xc4ubl/7pbn/STD7w8Bnk/X1/PG3vk0ycOPW6tMOPX4= +github.com/celestiaorg/go-header v0.7.4 h1:kQx3bVvKV+H2etxRi4IUuby5VQydBONx3giHFXDcZ/o= +github.com/celestiaorg/go-header v0.7.4/go.mod h1:eX9iTSPthVEAlEDLux40ZT/olXPGhpxHd+mEzJeDhd0= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 h1:PYInrsYzrDIsZW9Yb86OTi2aEKuPcpgJt6Mc0Jlc/yg= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076/go.mod h1:hlidgivKyvv7m4Yl2Fdf2mSTmazZYxX8+bnr5IQrI98= +github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY72BOgcduA4= +github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= +github.com/celestiaorg/nmt v0.24.2 h1:LlpJSPOd6/Lw1Ig6HUhZuqiINHLka/ZSRTBzlNJpchg= +github.com/celestiaorg/nmt v0.24.2/go.mod h1:vgLBpWBi8F5KLxTdXSwb7AU4NhiIQ1AQRGa+PzdcLEA= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -29,12 +33,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evstack/ev-node v1.0.0-beta.6 h1:jjGWAUsjHDpuBjvM7KXnY6Y8uYHM8LOrn0hDrk5zE6E= -github.com/evstack/ev-node v1.0.0-beta.6/go.mod h1:ZABT4xTIg4bINUS08r8e8LFIUk5anWe799fZ320q+Mk= -github.com/evstack/ev-node/core v1.0.0-beta.5 h1:lgxE8XiF3U9pcFgh7xuKMgsOGvLBGRyd9kc9MR4WL0o= -github.com/evstack/ev-node/core v1.0.0-beta.5/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= -github.com/evstack/ev-node/da v1.0.0-beta.6 h1:htzm4bbIGzNeuue4+/fEZTtjqpieLQWCtOWWnsNZvrY= -github.com/evstack/ev-node/da v1.0.0-beta.6/go.mod h1:Br+hq83JG0iIe9QW4grpm6iW8N86RdyDAhHck4+IJK8= github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= github.com/filecoin-project/go-jsonrpc v0.9.0 h1:G47qEF52w7GholpI21vPSTVBFvsrip6geIoqNiqyZtQ= @@ -43,18 +41,6 @@ github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= -github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= -github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= -github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= -github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= -github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -80,6 +66,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -87,24 +75,16 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= -github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/ipfs/boxo v0.33.1 h1:89m+ksw+cYi0ecTNTJ71IRS5ZrLiovmO6XWHIOGhAEg= -github.com/ipfs/boxo v0.33.1/go.mod h1:KwlJTzv5fb1GLlA9KyMqHQmvP+4mrFuiE3PnjdrPJHs= github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= -github.com/ipfs/go-datastore v0.9.0 h1:WocriPOayqalEsueHv6SdD4nPVl4rYMfYGLD4bqCZ+w= -github.com/ipfs/go-datastore v0.9.0/go.mod h1:uT77w/XEGrvJWwHgdrMr8bqCN6ZTW9gzmi+3uK+ouHg= -github.com/ipfs/go-log/v2 v2.8.0 h1:SptNTPJQV3s5EF4FdrTu/yVdOKfGbDgn1EBZx4til2o= -github.com/ipfs/go-log/v2 v2.8.0/go.mod h1:2LEEhdv8BGubPeSFTyzbqhCqrwqxCbuTNTLWqgNAipo= -github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= -github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= +github.com/ipfs/go-log/v2 v2.8.1 h1:Y/X36z7ASoLJaYIJAL4xITXgwf7RVeqb1+/25aq/Xk0= +github.com/ipfs/go-log/v2 v2.8.1/go.mod h1:NyhTBcZmh2Y55eWVjOeKf8M7e4pnJYM3yDZNxQBWEEY= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= @@ -119,24 +99,14 @@ github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU= github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= -github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= -github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784= github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo= github.com/libp2p/go-libp2p v0.43.0 h1:b2bg2cRNmY4HpLK8VHYQXLX2d3iND95OjodLFymvqXU= github.com/libp2p/go-libp2p v0.43.0/go.mod h1:IiSqAXDyP2sWH+J2gs43pNmB/y4FOi2XQPbsb+8qvzc= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= -github.com/libp2p/go-libp2p-kad-dht v0.34.0 h1:yvJ/Vrt36GVjsqPxiGcuuwOloKuZLV9Aa7awIKyNXy0= -github.com/libp2p/go-libp2p-kad-dht v0.34.0/go.mod h1:JNbkES4W5tajS6uYivw6MPs0842cPHAwhgaPw8sQG4o= -github.com/libp2p/go-libp2p-kbucket v0.7.0 h1:vYDvRjkyJPeWunQXqcW2Z6E93Ywx7fX0jgzb/dGOKCs= -github.com/libp2p/go-libp2p-kbucket v0.7.0/go.mod h1:blOINGIj1yiPYlVEX0Rj9QwEkmVnz3EP8LK1dRKBC6g= github.com/libp2p/go-libp2p-pubsub v0.15.0 h1:cG7Cng2BT82WttmPFMi50gDNV+58K626m/wR00vGL1o= github.com/libp2p/go-libp2p-pubsub v0.15.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= -github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= -github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= -github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI= -github.com/libp2p/go-libp2p-routing-helpers v0.7.5/go.mod h1:3YaxrwP0OBPDD7my3D0KxfR89FlcX/IEbxDEDfAmj98= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= @@ -165,8 +135,6 @@ github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8Rv github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= @@ -190,14 +158,12 @@ github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7B github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ= github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw= -github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= -github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOoETFs5dI= +github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= -github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M= github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= @@ -241,8 +207,6 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= -github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -254,59 +218,41 @@ github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7D github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= -github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg= -github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY= +github.com/quic-go/quic-go v0.54.1 h1:4ZAWm0AhCb6+hE+l5Q1NAL0iRn/ZrMwqHRGQiFwj2eg= +github.com/quic-go/quic-go v0.54.1/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY= github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70= github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= -github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= -github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= -github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= -github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= -github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= -github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4= go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg= @@ -319,10 +265,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= -go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= -go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -330,15 +274,15 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 h1:3yiSh9fhy5/RhCSntf4Sy0Tnx50DmMpQ4MQdKKk4yg4= -golang.org/x/exp v0.0.0-20250811191247-51f88131bc50/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg= +golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU= +golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= -golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -356,8 +300,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -367,6 +311,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU= +golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= @@ -381,16 +327,14 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= -golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= -gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -410,8 +354,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= -google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/tools/da-debug/main.go b/tools/da-debug/main.go index 70c81edf64..9568f49a48 100644 --- a/tools/da-debug/main.go +++ b/tools/da-debug/main.go @@ -13,8 +13,8 @@ import ( "github.com/spf13/cobra" "google.golang.org/protobuf/proto" - coreda "github.com/evstack/ev-node/core/da" - "github.com/evstack/ev-node/da/jsonrpc" + "github.com/evstack/ev-node/da" + "github.com/evstack/ev-node/da/celestia" "github.com/evstack/ev-node/types" pb "github.com/evstack/ev-node/types/pb/evnode/v1" ) @@ -33,12 +33,13 @@ func main() { Use: "da-debug", Short: "DA debugging tool for blockchain data inspection", Long: `DA Debug Tool -A powerful DA debugging tool for inspecting blockchain data availability layers.`, +A powerful DA debugging tool for inspecting blockchain data availability layers. +Connects directly to Celestia's blob API.`, } // Global flags - rootCmd.PersistentFlags().StringVar(&daURL, "da-url", "http://localhost:7980", "DA layer JSON-RPC URL") - rootCmd.PersistentFlags().StringVar(&authToken, "auth-token", "", "Authentication token for DA layer") + rootCmd.PersistentFlags().StringVar(&daURL, "da-url", "http://localhost:26658", "Celestia node RPC URL") + rootCmd.PersistentFlags().StringVar(&authToken, "auth-token", "", "Authentication token for Celestia node") rootCmd.PersistentFlags().DurationVar(&timeout, "timeout", 30*time.Second, "Request timeout") rootCmd.PersistentFlags().BoolVar(&verbose, "verbose", false, "Enable verbose logging") rootCmd.PersistentFlags().Uint64Var(&maxBlobSize, "max-blob-size", 1970176, "Maximum blob size in bytes") @@ -85,7 +86,7 @@ Starting from the given DA height, searches through a range of DA heights until cmd.Flags().Uint64Var(&searchHeight, "target-height", 0, "Target blockchain height to search for (required)") cmd.Flags().Uint64Var(&searchRange, "range", 10, "Number of DA heights to search") - cmd.MarkFlagRequired("target-height") + _ = cmd.MarkFlagRequired("target-height") return cmd } @@ -142,13 +143,13 @@ func runSearch(cmd *cobra.Command, args []string, searchHeight, searchRange uint return searchForHeight(ctx, client, startHeight, namespace, searchHeight, searchRange) } -func searchForHeight(ctx context.Context, client *jsonrpc.Client, startHeight uint64, namespace []byte, targetHeight, searchRange uint64) error { +func searchForHeight(ctx context.Context, client *celestia.Client, startHeight uint64, namespace []byte, targetHeight, searchRange uint64) error { fmt.Printf("Searching for height %d in DA heights %d-%d...\n", targetHeight, startHeight, startHeight+searchRange-1) fmt.Println() foundBlobs := 0 for daHeight := startHeight; daHeight < startHeight+searchRange; daHeight++ { - result, err := client.DA.GetIDs(ctx, daHeight, namespace) + result, err := client.GetIDs(ctx, daHeight, namespace) if err != nil { if err.Error() == "blob: not found" || strings.Contains(err.Error(), "blob: not found") { continue @@ -165,7 +166,7 @@ func searchForHeight(ctx context.Context, client *jsonrpc.Client, startHeight ui } // Get the actual blob data - blobs, err := client.DA.Get(ctx, result.IDs, namespace) + blobs, err := client.Get(ctx, result.IDs, namespace) if err != nil { continue } @@ -198,10 +199,10 @@ func searchForHeight(ctx context.Context, client *jsonrpc.Client, startHeight ui // Display the decoded content if header := tryDecodeHeader(blob); header != nil { - printTypeHeader("SignedHeader", "") + printTypeHeader("SignedHeader") displayHeader(header) } else if data := tryDecodeData(blob); data != nil { - printTypeHeader("SignedData", "") + printTypeHeader("SignedData") displayData(data) } @@ -220,8 +221,8 @@ func searchForHeight(ctx context.Context, client *jsonrpc.Client, startHeight ui return nil } -func queryHeight(ctx context.Context, client *jsonrpc.Client, height uint64, namespace []byte) error { - result, err := client.DA.GetIDs(ctx, height, namespace) +func queryHeight(ctx context.Context, client *celestia.Client, height uint64, namespace []byte) error { + result, err := client.GetIDs(ctx, height, namespace) if err != nil { // Handle "blob not found" as a normal case if err.Error() == "blob: not found" || strings.Contains(err.Error(), "blob: not found") { @@ -246,7 +247,7 @@ func queryHeight(ctx context.Context, client *jsonrpc.Client, height uint64, nam fmt.Println() // Get the actual blob data - blobs, err := client.DA.Get(ctx, result.IDs, namespace) + blobs, err := client.Get(ctx, result.IDs, namespace) if err != nil { return fmt.Errorf("failed to get blob data: %w", err) } @@ -287,13 +288,13 @@ func queryHeight(ctx context.Context, client *jsonrpc.Client, height uint64, nam // Try to decode as header first if header := tryDecodeHeader(blob); header != nil { - printTypeHeader("SignedHeader", "") + printTypeHeader("SignedHeader") displayHeader(header) } else if data := tryDecodeData(blob); data != nil { - printTypeHeader("SignedData", "") + printTypeHeader("SignedData") displayData(data) } else { - printTypeHeader("Raw Data", "") + printTypeHeader("Raw Data") displayRawData(blob) } @@ -345,18 +346,18 @@ func printBlobHeader(current, total int) { fmt.Println(strings.Repeat("-", 80)) } -func displayBlobInfo(id coreda.ID, blob []byte) { +func displayBlobInfo(id da.ID, blob []byte) { fmt.Printf("ID: %s\n", formatHash(hex.EncodeToString(id))) fmt.Printf("Size: %s\n", formatSize(len(blob))) // Try to parse the ID to show height and commitment - if idHeight, commitment, err := coreda.SplitID(id); err == nil { + if idHeight, commitment, err := da.SplitID(id); err == nil { fmt.Printf("ID Height: %d\n", idHeight) fmt.Printf("Commitment: %s\n", formatHash(hex.EncodeToString(commitment))) } } -func printTypeHeader(title, color string) { +func printTypeHeader(title string) { fmt.Printf("Type: %s\n", title) } @@ -444,6 +445,9 @@ func formatHashField(hash string) string { } func formatShortHash(hash string) string { + if len(hash) > 16 { + return hash[:16] + "..." + } return hash } @@ -504,18 +508,20 @@ func tryDecodeData(bz []byte) *types.SignedData { return &signedData } -func createDAClient() (*jsonrpc.Client, error) { +func createDAClient() (*celestia.Client, error) { logger := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}).Level(zerolog.InfoLevel) if verbose { logger = logger.Level(zerolog.DebugLevel) + } else { + logger = zerolog.Nop() } ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - client, err := jsonrpc.NewClient(ctx, logger, daURL, authToken, maxBlobSize) + client, err := celestia.NewClient(ctx, logger, daURL, authToken, maxBlobSize) if err != nil { - return nil, fmt.Errorf("failed to create DA client: %w", err) + return nil, fmt.Errorf("failed to create Celestia client: %w", err) } return client, nil @@ -523,12 +529,12 @@ func createDAClient() (*jsonrpc.Client, error) { func parseNamespace(ns string) ([]byte, error) { // Try to parse as hex first - if hex, err := parseHex(ns); err == nil && len(hex) == 29 { - return hex, nil + if hexBytes, err := parseHex(ns); err == nil && len(hexBytes) == da.NamespaceSize { + return hexBytes, nil } // If not valid hex or not 29 bytes, treat as string identifier - namespace := coreda.NamespaceFromString(ns) + namespace := da.NamespaceFromString(ns) return namespace.Bytes(), nil } diff --git a/tools/da-debug/main_test.go b/tools/da-debug/main_test.go index 09818b4bb5..69342c90c6 100644 --- a/tools/da-debug/main_test.go +++ b/tools/da-debug/main_test.go @@ -4,7 +4,7 @@ import ( "encoding/hex" "testing" - coreda "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/da" ) func TestParseNamespace(t *testing.T) { @@ -187,7 +187,7 @@ func TestIDSplitting(t *testing.T) { height := uint64(12345) commitment := []byte("test-commitment-data") - // Create an ID using the format from the LocalDA implementation + // Create an ID using the format from the da.go implementation id := make([]byte, 8+len(commitment)) // Use little endian as per the da.go implementation id[0] = byte(height) @@ -201,7 +201,7 @@ func TestIDSplitting(t *testing.T) { copy(id[8:], commitment) // Test splitting - parsedHeight, parsedCommitment, err := coreda.SplitID(id) + parsedHeight, parsedCommitment, err := da.SplitID(id) if err != nil { t.Errorf("SplitID() unexpected error: %v", err) } diff --git a/tools/tools.mk b/tools/tools.mk index 2eefeec775..0c18b1a716 100644 --- a/tools/tools.mk +++ b/tools/tools.mk @@ -1,7 +1,7 @@ # tools.mk - Build configuration for ev-node tools # Tool names -TOOLS := da-debug blob-decoder cache-analyzer +TOOLS := blob-decoder cache-analyzer # Build directory TOOLS_BUILD_DIR := $(CURDIR)/build @@ -14,14 +14,6 @@ LDFLAGS ?= \ -X main.GitSHA=$(GITSHA) # Individual tool build targets -## build-tool-da-debug: Build da-debug tool -build-tool-da-debug: - @echo "--> Building da-debug tool" - @mkdir -p $(TOOLS_BUILD_DIR) - @cd tools/da-debug && go build -ldflags "$(LDFLAGS)" -o $(TOOLS_BUILD_DIR)/da-debug . - @echo "--> da-debug built: $(TOOLS_BUILD_DIR)/da-debug" -.PHONY: build-tool-da-debug - ## build-tool-blob-decoder: Build blob-decoder tool build-tool-blob-decoder: @echo "--> Building blob-decoder tool" @@ -45,13 +37,6 @@ build-tools: $(addprefix build-tool-, $(TOOLS)) .PHONY: build-tools # Install individual tools -## install-tool-da-debug: Install da-debug tool to Go bin -install-tool-da-debug: - @echo "--> Installing da-debug tool" - @cd tools/da-debug && go install -ldflags "$(LDFLAGS)" . - @echo "--> da-debug installed to Go bin" -.PHONY: install-tool-da-debug - ## install-tool-blob-decoder: Install blob-decoder tool to Go bin install-tool-blob-decoder: @echo "--> Installing blob-decoder tool"