diff --git a/Makefile b/Makefile index ea649adb3b..805a3c8d2c 100644 --- a/Makefile +++ b/Makefile @@ -10,6 +10,7 @@ setup-dev-tools: setup-build-tools go install honnef.co/go/tools/cmd/staticcheck@2024.1.1 go install github.com/yannh/kubeconform/cmd/kubeconform@v0.6.3 go install github.com/norwoodj/helm-docs/cmd/helm-docs@v1.11.3 + go install github.com/vektra/mockery/v3@v3.3.1 prerequisites: setup-dev-tools go version @@ -17,6 +18,7 @@ prerequisites: setup-dev-tools node -v docker -v dbmate -v + mockery version infra-up: dc-dev diff --git a/demo/cmd/mood/main.go b/demo/cmd/mood/main.go index 555de06584..96f7658c93 100644 --- a/demo/cmd/mood/main.go +++ b/demo/cmd/mood/main.go @@ -3,11 +3,12 @@ package main import ( "context" "fmt" - "github.com/wundergraph/cosmo/demo/pkg/subgraphs/mood" "log" "net/http" "os" + "github.com/wundergraph/cosmo/demo/pkg/subgraphs/mood" + "github.com/99designs/gqlgen/graphql" "github.com/99designs/gqlgen/graphql/handler/debug" "github.com/99designs/gqlgen/graphql/playground" @@ -31,7 +32,9 @@ func main() { port = defaultPort } - srv := subgraphs.NewDemoServer(mood.NewSchema(nil)) + srv := subgraphs.NewDemoServer(mood.NewSchema(nil, func(name string) string { + return name + })) srv.Use(&debug.Tracer{}) srv.Use(otelgqlgen.Middleware(otelgqlgen.WithCreateSpanFromFields(func(ctx *graphql.FieldContext) bool { diff --git a/demo/go.mod b/demo/go.mod index a01c28b3b9..246ac6637e 100644 --- a/demo/go.mod +++ b/demo/go.mod @@ -150,6 +150,7 @@ require ( github.com/twmb/franz-go/pkg/kmsg v1.7.0 // indirect github.com/urfave/cli/v2 v2.27.5 // indirect github.com/wundergraph/astjson v0.0.0-20250106123708-be463c97e083 // indirect + github.com/wundergraph/graphql-go-tools/v2 v2.0.0-rc.183 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect diff --git a/demo/pkg/subgraphs/availability/availability.go b/demo/pkg/subgraphs/availability/availability.go index f37ce470a5..d3b8bb8bb4 100644 --- a/demo/pkg/subgraphs/availability/availability.go +++ b/demo/pkg/subgraphs/availability/availability.go @@ -2,13 +2,13 @@ package availability import ( "github.com/99designs/gqlgen/graphql" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/availability/subgraph" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/availability/subgraph/generated" ) -func NewSchema(pubSubBySourceName map[string]pubsub_datasource.NatsPubSub, pubSubName func(string) string) graphql.ExecutableSchema { +func NewSchema(pubSubBySourceName map[string]nats.Adapter, pubSubName func(string) string) graphql.ExecutableSchema { return generated.NewExecutableSchema(generated.Config{Resolvers: &subgraph.Resolver{ NatsPubSubByProviderID: pubSubBySourceName, GetPubSubName: pubSubName, diff --git a/demo/pkg/subgraphs/availability/subgraph/resolver.go b/demo/pkg/subgraphs/availability/subgraph/resolver.go index 9ac6f82f89..d89a97d01c 100644 --- a/demo/pkg/subgraphs/availability/subgraph/resolver.go +++ b/demo/pkg/subgraphs/availability/subgraph/resolver.go @@ -1,7 +1,7 @@ package subgraph import ( - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" ) // This file will not be regenerated automatically. @@ -9,6 +9,6 @@ import ( // It serves as dependency injection for your app, add any dependencies you require here. type Resolver struct { - NatsPubSubByProviderID map[string]pubsub_datasource.NatsPubSub + NatsPubSubByProviderID map[string]nats.Adapter GetPubSubName func(string) string } diff --git a/demo/pkg/subgraphs/availability/subgraph/schema.resolvers.go b/demo/pkg/subgraphs/availability/subgraph/schema.resolvers.go index 43797ed3dc..7067d88bf1 100644 --- a/demo/pkg/subgraphs/availability/subgraph/schema.resolvers.go +++ b/demo/pkg/subgraphs/availability/subgraph/schema.resolvers.go @@ -10,13 +10,13 @@ import ( "github.com/wundergraph/cosmo/demo/pkg/subgraphs/availability/subgraph/generated" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/availability/subgraph/model" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" ) // UpdateAvailability is the resolver for the updateAvailability field. func (r *mutationResolver) UpdateAvailability(ctx context.Context, employeeID int, isAvailable bool) (*model.Employee, error) { storage.Set(employeeID, isAvailable) - err := r.NatsPubSubByProviderID["default"].Publish(ctx, pubsub_datasource.NatsPublishAndRequestEventConfiguration{ + err := r.NatsPubSubByProviderID["default"].Publish(ctx, nats.PublishAndRequestEventConfiguration{ Subject: r.GetPubSubName(fmt.Sprintf("employeeUpdated.%d", employeeID)), Data: []byte(fmt.Sprintf(`{"id":%d,"__typename": "Employee"}`, employeeID)), }) @@ -24,7 +24,7 @@ func (r *mutationResolver) UpdateAvailability(ctx context.Context, employeeID in if err != nil { return nil, err } - err = r.NatsPubSubByProviderID["my-nats"].Publish(ctx, pubsub_datasource.NatsPublishAndRequestEventConfiguration{ + err = r.NatsPubSubByProviderID["my-nats"].Publish(ctx, nats.PublishAndRequestEventConfiguration{ Subject: r.GetPubSubName(fmt.Sprintf("employeeUpdatedMyNats.%d", employeeID)), Data: []byte(fmt.Sprintf(`{"id":%d,"__typename": "Employee"}`, employeeID)), }) diff --git a/demo/pkg/subgraphs/countries/countries.go b/demo/pkg/subgraphs/countries/countries.go index 3a1ccb7427..a562eb31f7 100644 --- a/demo/pkg/subgraphs/countries/countries.go +++ b/demo/pkg/subgraphs/countries/countries.go @@ -2,13 +2,13 @@ package countries import ( "github.com/99designs/gqlgen/graphql" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/countries/subgraph" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/countries/subgraph/generated" ) -func NewSchema(pubSubBySourceName map[string]pubsub_datasource.NatsPubSub) graphql.ExecutableSchema { +func NewSchema(pubSubBySourceName map[string]nats.Adapter) graphql.ExecutableSchema { return generated.NewExecutableSchema(generated.Config{Resolvers: &subgraph.Resolver{ NatsPubSubByProviderID: pubSubBySourceName, }}) diff --git a/demo/pkg/subgraphs/countries/subgraph/resolver.go b/demo/pkg/subgraphs/countries/subgraph/resolver.go index 4b235fdec9..c8cf455fd4 100644 --- a/demo/pkg/subgraphs/countries/subgraph/resolver.go +++ b/demo/pkg/subgraphs/countries/subgraph/resolver.go @@ -1,8 +1,9 @@ package subgraph import ( - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" "sync" + + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" ) // This file will not be regenerated automatically. @@ -11,5 +12,5 @@ import ( type Resolver struct { mux sync.Mutex - NatsPubSubByProviderID map[string]pubsub_datasource.NatsPubSub + NatsPubSubByProviderID map[string]nats.Adapter } diff --git a/demo/pkg/subgraphs/employees/employees.go b/demo/pkg/subgraphs/employees/employees.go index 408737da15..c60ff2201a 100644 --- a/demo/pkg/subgraphs/employees/employees.go +++ b/demo/pkg/subgraphs/employees/employees.go @@ -2,13 +2,12 @@ package employees import ( "github.com/99designs/gqlgen/graphql" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" - "github.com/wundergraph/cosmo/demo/pkg/subgraphs/employees/subgraph" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/employees/subgraph/generated" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" ) -func NewSchema(natsPubSubByProviderID map[string]pubsub_datasource.NatsPubSub) graphql.ExecutableSchema { +func NewSchema(natsPubSubByProviderID map[string]nats.Adapter) graphql.ExecutableSchema { return generated.NewExecutableSchema(generated.Config{Resolvers: &subgraph.Resolver{ NatsPubSubByProviderID: natsPubSubByProviderID, EmployeesData: subgraph.Employees, diff --git a/demo/pkg/subgraphs/employees/subgraph/resolver.go b/demo/pkg/subgraphs/employees/subgraph/resolver.go index f71624ab76..3fd878a209 100644 --- a/demo/pkg/subgraphs/employees/subgraph/resolver.go +++ b/demo/pkg/subgraphs/employees/subgraph/resolver.go @@ -6,7 +6,7 @@ import ( "sync" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/employees/subgraph/model" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" ) // This file will not be regenerated automatically. @@ -15,7 +15,7 @@ import ( type Resolver struct { mux sync.Mutex - NatsPubSubByProviderID map[string]pubsub_datasource.NatsPubSub + NatsPubSubByProviderID map[string]nats.Adapter EmployeesData []*model.Employee } diff --git a/demo/pkg/subgraphs/family/family.go b/demo/pkg/subgraphs/family/family.go index c55eae3fe4..a8800d0229 100644 --- a/demo/pkg/subgraphs/family/family.go +++ b/demo/pkg/subgraphs/family/family.go @@ -2,13 +2,13 @@ package family import ( "github.com/99designs/gqlgen/graphql" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/family/subgraph" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/family/subgraph/generated" ) -func NewSchema(natsPubSubByProviderID map[string]pubsub_datasource.NatsPubSub) graphql.ExecutableSchema { +func NewSchema(natsPubSubByProviderID map[string]nats.Adapter) graphql.ExecutableSchema { return generated.NewExecutableSchema(generated.Config{Resolvers: &subgraph.Resolver{ NatsPubSubByProviderID: natsPubSubByProviderID, }}) diff --git a/demo/pkg/subgraphs/family/subgraph/resolver.go b/demo/pkg/subgraphs/family/subgraph/resolver.go index f4678ba12e..906dfaf4ae 100644 --- a/demo/pkg/subgraphs/family/subgraph/resolver.go +++ b/demo/pkg/subgraphs/family/subgraph/resolver.go @@ -1,7 +1,7 @@ package subgraph import ( - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" ) // This file will not be regenerated automatically. @@ -9,5 +9,5 @@ import ( // It serves as dependency injection for your app, add any dependencies you require here. type Resolver struct { - NatsPubSubByProviderID map[string]pubsub_datasource.NatsPubSub + NatsPubSubByProviderID map[string]nats.Adapter } diff --git a/demo/pkg/subgraphs/hobbies/hobbies.go b/demo/pkg/subgraphs/hobbies/hobbies.go index 103e8bb43a..0b79ec1929 100644 --- a/demo/pkg/subgraphs/hobbies/hobbies.go +++ b/demo/pkg/subgraphs/hobbies/hobbies.go @@ -2,13 +2,13 @@ package hobbies import ( "github.com/99designs/gqlgen/graphql" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/hobbies/subgraph" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/hobbies/subgraph/generated" ) -func NewSchema(natsPubSubByProviderID map[string]pubsub_datasource.NatsPubSub) graphql.ExecutableSchema { +func NewSchema(natsPubSubByProviderID map[string]nats.Adapter) graphql.ExecutableSchema { return generated.NewExecutableSchema(generated.Config{Resolvers: &subgraph.Resolver{ NatsPubSubByProviderID: natsPubSubByProviderID, }}) diff --git a/demo/pkg/subgraphs/hobbies/subgraph/resolver.go b/demo/pkg/subgraphs/hobbies/subgraph/resolver.go index dc972ffd31..d6f0de5609 100644 --- a/demo/pkg/subgraphs/hobbies/subgraph/resolver.go +++ b/demo/pkg/subgraphs/hobbies/subgraph/resolver.go @@ -1,10 +1,10 @@ package subgraph import ( - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" "reflect" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/hobbies/subgraph/model" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" ) // This file will not be regenerated automatically. @@ -12,7 +12,7 @@ import ( // It serves as dependency injection for your app, add any dependencies you require here. type Resolver struct { - NatsPubSubByProviderID map[string]pubsub_datasource.NatsPubSub + NatsPubSubByProviderID map[string]nats.Adapter } func (r *Resolver) Employees(hobby model.Hobby) ([]*model.Employee, error) { diff --git a/demo/pkg/subgraphs/mood/mood.go b/demo/pkg/subgraphs/mood/mood.go index 7083a607e7..44b8275d85 100644 --- a/demo/pkg/subgraphs/mood/mood.go +++ b/demo/pkg/subgraphs/mood/mood.go @@ -2,14 +2,15 @@ package mood import ( "github.com/99designs/gqlgen/graphql" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/mood/subgraph" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/mood/subgraph/generated" ) -func NewSchema(natsPubSubByProviderID map[string]pubsub_datasource.NatsPubSub) graphql.ExecutableSchema { +func NewSchema(natsPubSubByProviderID map[string]nats.Adapter, getPubSubName func(string) string) graphql.ExecutableSchema { return generated.NewExecutableSchema(generated.Config{Resolvers: &subgraph.Resolver{ NatsPubSubByProviderID: natsPubSubByProviderID, + GetPubSubName: getPubSubName, }}) } diff --git a/demo/pkg/subgraphs/mood/subgraph/resolver.go b/demo/pkg/subgraphs/mood/subgraph/resolver.go index 9ac6f82f89..d89a97d01c 100644 --- a/demo/pkg/subgraphs/mood/subgraph/resolver.go +++ b/demo/pkg/subgraphs/mood/subgraph/resolver.go @@ -1,7 +1,7 @@ package subgraph import ( - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" ) // This file will not be regenerated automatically. @@ -9,6 +9,6 @@ import ( // It serves as dependency injection for your app, add any dependencies you require here. type Resolver struct { - NatsPubSubByProviderID map[string]pubsub_datasource.NatsPubSub + NatsPubSubByProviderID map[string]nats.Adapter GetPubSubName func(string) string } diff --git a/demo/pkg/subgraphs/mood/subgraph/schema.resolvers.go b/demo/pkg/subgraphs/mood/subgraph/schema.resolvers.go index aab22e4499..8ab7c73941 100644 --- a/demo/pkg/subgraphs/mood/subgraph/schema.resolvers.go +++ b/demo/pkg/subgraphs/mood/subgraph/schema.resolvers.go @@ -6,14 +6,42 @@ package subgraph import ( "context" + "fmt" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/mood/subgraph/generated" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/mood/subgraph/model" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" ) // UpdateMood is the resolver for the updateMood field. func (r *mutationResolver) UpdateMood(ctx context.Context, employeeID int, mood model.Mood) (*model.Employee, error) { storage.Set(employeeID, mood) + myNatsTopic := r.GetPubSubName(fmt.Sprintf("employeeUpdated.%d", employeeID)) + payload := fmt.Sprintf(`{"id":%d,"__typename": "Employee"}`, employeeID) + if r.NatsPubSubByProviderID["default"] != nil { + err := r.NatsPubSubByProviderID["default"].Publish(ctx, nats.PublishAndRequestEventConfiguration{ + Subject: myNatsTopic, + Data: []byte(payload), + }) + if err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("no nats pubsub default provider found") + } + + defaultTopic := r.GetPubSubName(fmt.Sprintf("employeeUpdatedMyNats.%d", employeeID)) + if r.NatsPubSubByProviderID["my-nats"] != nil { + err := r.NatsPubSubByProviderID["my-nats"].Publish(ctx, nats.PublishAndRequestEventConfiguration{ + Subject: defaultTopic, + Data: []byte(payload), + }) + if err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("no nats pubsub my-nats provider found") + } return &model.Employee{ID: employeeID, CurrentMood: mood}, nil } diff --git a/demo/pkg/subgraphs/products/products.go b/demo/pkg/subgraphs/products/products.go index f14cc97813..0ed88bda57 100644 --- a/demo/pkg/subgraphs/products/products.go +++ b/demo/pkg/subgraphs/products/products.go @@ -2,13 +2,13 @@ package products import ( "github.com/99designs/gqlgen/graphql" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/products/subgraph" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/products/subgraph/generated" ) -func NewSchema(natsPubSubByProviderID map[string]pubsub_datasource.NatsPubSub) graphql.ExecutableSchema { +func NewSchema(natsPubSubByProviderID map[string]nats.Adapter) graphql.ExecutableSchema { return generated.NewExecutableSchema(generated.Config{Resolvers: &subgraph.Resolver{ NatsPubSubByProviderID: natsPubSubByProviderID, TopSecretFederationFactsData: subgraph.TopSecretFederationFacts, diff --git a/demo/pkg/subgraphs/products/subgraph/resolver.go b/demo/pkg/subgraphs/products/subgraph/resolver.go index c9d610cb04..7eb404b9ca 100644 --- a/demo/pkg/subgraphs/products/subgraph/resolver.go +++ b/demo/pkg/subgraphs/products/subgraph/resolver.go @@ -1,9 +1,10 @@ package subgraph import ( - "github.com/wundergraph/cosmo/demo/pkg/subgraphs/products/subgraph/model" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" "sync" + + "github.com/wundergraph/cosmo/demo/pkg/subgraphs/products/subgraph/model" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" ) // This file will not be regenerated automatically. @@ -12,6 +13,6 @@ import ( type Resolver struct { mux sync.Mutex - NatsPubSubByProviderID map[string]pubsub_datasource.NatsPubSub + NatsPubSubByProviderID map[string]nats.Adapter TopSecretFederationFactsData []model.TopSecretFact } diff --git a/demo/pkg/subgraphs/products_fg/products.go b/demo/pkg/subgraphs/products_fg/products.go index 155cffe417..66aa068c13 100644 --- a/demo/pkg/subgraphs/products_fg/products.go +++ b/demo/pkg/subgraphs/products_fg/products.go @@ -2,13 +2,13 @@ package products_fg import ( "github.com/99designs/gqlgen/graphql" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/products_fg/subgraph" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/products_fg/subgraph/generated" ) -func NewSchema(natsPubSubByProviderID map[string]pubsub_datasource.NatsPubSub) graphql.ExecutableSchema { +func NewSchema(natsPubSubByProviderID map[string]nats.Adapter) graphql.ExecutableSchema { return generated.NewExecutableSchema(generated.Config{Resolvers: &subgraph.Resolver{ NatsPubSubByProviderID: natsPubSubByProviderID, TopSecretFederationFactsData: subgraph.TopSecretFederationFacts, diff --git a/demo/pkg/subgraphs/products_fg/subgraph/resolver.go b/demo/pkg/subgraphs/products_fg/subgraph/resolver.go index f0f8d7059a..194c91f88b 100644 --- a/demo/pkg/subgraphs/products_fg/subgraph/resolver.go +++ b/demo/pkg/subgraphs/products_fg/subgraph/resolver.go @@ -1,9 +1,10 @@ package subgraph import ( - "github.com/wundergraph/cosmo/demo/pkg/subgraphs/products_fg/subgraph/model" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" "sync" + + "github.com/wundergraph/cosmo/demo/pkg/subgraphs/products_fg/subgraph/model" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" ) // This file will not be regenerated automatically. @@ -12,6 +13,6 @@ import ( type Resolver struct { mux sync.Mutex - NatsPubSubByProviderID map[string]pubsub_datasource.NatsPubSub + NatsPubSubByProviderID map[string]nats.Adapter TopSecretFederationFactsData []model.TopSecretFact } diff --git a/demo/pkg/subgraphs/subgraphs.go b/demo/pkg/subgraphs/subgraphs.go index abcaf75973..88f3f4b147 100644 --- a/demo/pkg/subgraphs/subgraphs.go +++ b/demo/pkg/subgraphs/subgraphs.go @@ -22,7 +22,6 @@ import ( "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" natsPubsub "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" "golang.org/x/sync/errgroup" "github.com/wundergraph/cosmo/demo/pkg/injector" @@ -162,7 +161,7 @@ func subgraphHandler(schema graphql.ExecutableSchema) http.Handler { } type SubgraphOptions struct { - NatsPubSubByProviderID map[string]pubsub_datasource.NatsPubSub + NatsPubSubByProviderID map[string]natsPubsub.Adapter GetPubSubName func(string) string } @@ -195,7 +194,7 @@ func AvailabilityHandler(opts *SubgraphOptions) http.Handler { } func MoodHandler(opts *SubgraphOptions) http.Handler { - return subgraphHandler(mood.NewSchema(opts.NatsPubSubByProviderID)) + return subgraphHandler(mood.NewSchema(opts.NatsPubSubByProviderID, opts.GetPubSubName)) } func CountriesHandler(opts *SubgraphOptions) http.Handler { @@ -207,31 +206,30 @@ func New(ctx context.Context, config *Config) (*Subgraphs, error) { if defaultSourceNameURL := os.Getenv("NATS_URL"); defaultSourceNameURL != "" { url = defaultSourceNameURL } - defaultConnection, err := nats.Connect(url) + + natsPubSubByProviderID := map[string]natsPubsub.Adapter{} + + defaultAdapter, err := natsPubsub.NewAdapter(ctx, zap.NewNop(), url, []nats.Option{}, "hostname", "test") if err != nil { - log.Printf("failed to connect to nats source \"nats\": %v", err) + return nil, fmt.Errorf("failed to create default nats adapter: %w", err) } + natsPubSubByProviderID["default"] = defaultAdapter - myNatsConnection, err := nats.Connect(url) + myNatsAdapter, err := natsPubsub.NewAdapter(ctx, zap.NewNop(), url, []nats.Option{}, "hostname", "test") if err != nil { - log.Printf("failed to connect to nats source \"my-nats\": %v", err) + return nil, fmt.Errorf("failed to create my-nats adapter: %w", err) } + natsPubSubByProviderID["my-nats"] = myNatsAdapter - defaultJetStream, err := jetstream.New(defaultConnection) + defaultConnection, err := nats.Connect(url) if err != nil { - return nil, err + log.Printf("failed to connect to nats source \"nats\": %v", err) } - - myNatsJetStream, err := jetstream.New(myNatsConnection) + defaultJetStream, err := jetstream.New(defaultConnection) if err != nil { return nil, err } - natsPubSubByProviderID := map[string]pubsub_datasource.NatsPubSub{ - "default": natsPubsub.NewConnector(zap.NewNop(), defaultConnection, defaultJetStream, "hostname", "test").New(ctx), - "my-nats": natsPubsub.NewConnector(zap.NewNop(), myNatsConnection, myNatsJetStream, "hostname", "test").New(ctx), - } - _, err = defaultJetStream.CreateOrUpdateStream(ctx, jetstream.StreamConfig{ Name: "streamName", Subjects: []string{"employeeUpdated.>"}, @@ -262,7 +260,7 @@ func New(ctx context.Context, config *Config) (*Subgraphs, error) { if srv := newServer("availability", config.EnableDebug, config.Ports.Availability, availability.NewSchema(natsPubSubByProviderID, config.GetPubSubName)); srv != nil { servers = append(servers, srv) } - if srv := newServer("mood", config.EnableDebug, config.Ports.Mood, mood.NewSchema(natsPubSubByProviderID)); srv != nil { + if srv := newServer("mood", config.EnableDebug, config.Ports.Mood, mood.NewSchema(natsPubSubByProviderID, config.GetPubSubName)); srv != nil { servers = append(servers, srv) } if srv := newServer("countries", config.EnableDebug, config.Ports.Countries, countries.NewSchema(natsPubSubByProviderID)); srv != nil { diff --git a/demo/pkg/subgraphs/test1/subgraph/resolver.go b/demo/pkg/subgraphs/test1/subgraph/resolver.go index f4678ba12e..906dfaf4ae 100644 --- a/demo/pkg/subgraphs/test1/subgraph/resolver.go +++ b/demo/pkg/subgraphs/test1/subgraph/resolver.go @@ -1,7 +1,7 @@ package subgraph import ( - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" ) // This file will not be regenerated automatically. @@ -9,5 +9,5 @@ import ( // It serves as dependency injection for your app, add any dependencies you require here. type Resolver struct { - NatsPubSubByProviderID map[string]pubsub_datasource.NatsPubSub + NatsPubSubByProviderID map[string]nats.Adapter } diff --git a/demo/pkg/subgraphs/test1/test1.go b/demo/pkg/subgraphs/test1/test1.go index 25f00b8ec7..0ce50a2440 100644 --- a/demo/pkg/subgraphs/test1/test1.go +++ b/demo/pkg/subgraphs/test1/test1.go @@ -2,13 +2,13 @@ package test1 import ( "github.com/99designs/gqlgen/graphql" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/test1/subgraph" "github.com/wundergraph/cosmo/demo/pkg/subgraphs/test1/subgraph/generated" ) -func NewSchema(natsPubSubByProviderID map[string]pubsub_datasource.NatsPubSub) graphql.ExecutableSchema { +func NewSchema(natsPubSubByProviderID map[string]nats.Adapter) graphql.ExecutableSchema { return generated.NewExecutableSchema(generated.Config{Resolvers: &subgraph.Resolver{ NatsPubSubByProviderID: natsPubSubByProviderID, }}) diff --git a/router-tests/events/events_config_test.go b/router-tests/events/events_config_test.go index 110d0d4cff..f5c05bf8ac 100644 --- a/router-tests/events/events_config_test.go +++ b/router-tests/events/events_config_test.go @@ -1,10 +1,11 @@ package events_test import ( + "testing" + "github.com/stretchr/testify/assert" "github.com/wundergraph/cosmo/router-tests/testenv" "github.com/wundergraph/cosmo/router/pkg/config" - "testing" ) func TestEventsConfig(t *testing.T) { @@ -23,7 +24,7 @@ func TestEventsConfig(t *testing.T) { }, func(t *testing.T, xEnv *testenv.Environment) { assert.Fail(t, "should not be called") }) - assert.ErrorContains(t, err, "failed to find Kafka provider with ID") + assert.ErrorContains(t, err, "kafka provider with ID my-kafka is not defined") }) t.Run("nats provider not specified in the router configuration", func(t *testing.T) { @@ -37,6 +38,6 @@ func TestEventsConfig(t *testing.T) { }, func(t *testing.T, xEnv *testenv.Environment) { assert.Fail(t, "should not be called") }) - assert.ErrorContains(t, err, "failed to find Nats provider with ID") + assert.ErrorContains(t, err, "nats provider with ID default is not defined") }) } diff --git a/router-tests/events/kafka_events_test.go b/router-tests/events/kafka_events_test.go index a2175dd3b4..40d7041fb1 100644 --- a/router-tests/events/kafka_events_test.go +++ b/router-tests/events/kafka_events_test.go @@ -7,6 +7,7 @@ import ( "encoding/json" "fmt" "net/http" + "strconv" "sync/atomic" "testing" "time" @@ -1065,6 +1066,54 @@ func TestKafkaEvents(t *testing.T) { xEnv.WaitForConnectionCount(0, KafkaWaitTimeout) }) }) + + t.Run("mutate", func(t *testing.T) { + t.Parallel() + + topics := []string{"employeeUpdated"} + + testenv.Run(t, &testenv.Config{ + RouterConfigJSONTemplate: testenv.ConfigWithEdfsKafkaJSONTemplate, + EnableKafka: true, + }, func(t *testing.T, xEnv *testenv.Environment) { + ensureTopicExists(t, xEnv, topics...) + + // Send a mutation to trigger the first subscription + resOne := xEnv.MakeGraphQLRequestOK(testenv.GraphQLRequest{ + Query: `mutation { updateEmployeeMyKafka(employeeID: 3, update: {name: "name test"}) { success } }`, + }) + require.JSONEq(t, `{"data":{"updateEmployeeMyKafka":{"success":true}}}`, resOne.Body) + + records, err := readKafkaMessages(xEnv, topics[0], 1) + require.NoError(t, err) + require.Equal(t, 1, len(records)) + require.Equal(t, `{"employeeID":3,"update":{"name":"name test"}}`, string(records[0].Value)) + }) + }) + + t.Run("kafka startup and shutdown with wrong broker should not stop router from starting indefinitely", func(t *testing.T) { + t.Parallel() + + listener := testenv.NewWaitingListener(t, time.Second*10) + listener.Start() + defer listener.Close() + + // kafka client is lazy and will not connect to the broker until the first message is produced + // so the router will start even if the kafka connection fails + errRouter := testenv.RunWithError(t, &testenv.Config{ + RouterConfigJSONTemplate: testenv.ConfigWithEdfsKafkaJSONTemplate, + EnableKafka: true, + ModifyEventsConfiguration: func(config *config.EventsConfiguration) { + for i := range config.Providers.Kafka { + config.Providers.Kafka[i].Brokers = []string{"localhost:" + strconv.Itoa(listener.Port())} + } + }, + }, func(t *testing.T, xEnv *testenv.Environment) { + t.Log("should be called") + }) + + assert.NoError(t, errRouter) + }) } func TestFlakyKafkaEvents(t *testing.T) { @@ -1245,3 +1294,20 @@ func produceKafkaMessage(t *testing.T, xEnv *testenv.Environment, topicName stri fErr := xEnv.KafkaClient.Flush(ctx) require.NoError(t, fErr) } + +func readKafkaMessages(xEnv *testenv.Environment, topicName string, msgs int) ([]*kgo.Record, error) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + client, err := kgo.NewClient( + kgo.SeedBrokers(xEnv.GetKafkaSeeds()...), + kgo.ConsumeTopics(xEnv.GetPubSubName(topicName)), + ) + if err != nil { + return nil, err + } + + fetchs := client.PollRecords(ctx, msgs) + + return fetchs.Records(), nil +} diff --git a/router-tests/events/nats_events_test.go b/router-tests/events/nats_events_test.go index b55adcd97a..5e83a29abe 100644 --- a/router-tests/events/nats_events_test.go +++ b/router-tests/events/nats_events_test.go @@ -8,6 +8,7 @@ import ( "io" "net/http" "net/url" + "strconv" "sync/atomic" "testing" "time" @@ -137,9 +138,9 @@ func TestNatsEvents(t *testing.T) { xEnv.WaitForConnectionCount(0, NatsWaitTimeout) natsLogs := xEnv.Observer().FilterMessageSnippet("Nats").All() - require.Len(t, natsLogs, 4) + require.Len(t, natsLogs, 2) providerIDFields := xEnv.Observer().FilterField(zap.String("provider_id", "my-nats")).All() - require.Len(t, providerIDFields, 2) + require.Len(t, providerIDFields, 3) }) }) @@ -1808,6 +1809,118 @@ func TestNatsEvents(t *testing.T) { assert.Eventually(t, completed.Load, NatsWaitTimeout, time.Millisecond*100) }) }) + + t.Run("NATS startup and shutdown with wrong URLs should not stop router from starting indefinitely", func(t *testing.T) { + t.Parallel() + + listener := testenv.NewWaitingListener(t, time.Second*10) + listener.Start() + defer listener.Close() + + errRouter := testenv.RunWithError(t, &testenv.Config{ + RouterConfigJSONTemplate: testenv.ConfigWithEdfsNatsJSONTemplate, + EnableNats: false, + ModifyEventsConfiguration: func(cfg *config.EventsConfiguration) { + url := "nats://127.0.0.1:" + strconv.Itoa(listener.Port()) + natsEventSources := make([]config.NatsEventSource, len(testenv.DemoNatsProviders)) + for _, sourceName := range testenv.DemoNatsProviders { + natsEventSources = append(natsEventSources, config.NatsEventSource{ + ID: sourceName, + URL: url, + }) + } + cfg.Providers.Nats = natsEventSources + }, + }, func(t *testing.T, xEnv *testenv.Environment) { + assert.Fail(t, "Should not be called") + }) + + assert.Error(t, errRouter) + }) + + t.Run("multiple subscribe async with variables", func(t *testing.T) { + t.Parallel() + + testenv.Run(t, &testenv.Config{ + RouterConfigJSONTemplate: testenv.ConfigWithEdfsNatsJSONTemplate, + EnableNats: true, + LogObservation: testenv.LogObservationConfig{ + Enabled: true, + LogLevel: zapcore.InfoLevel, + }, + NoRetryClient: true, + }, func(t *testing.T, xEnv *testenv.Environment) { + var subscriptionOne struct { + employeeUpdated struct { + ID float64 `graphql:"id"` + Details struct { + Surname string `graphql:"surname"` + } `graphql:"details"` + } `graphql:"employeeUpdatedMyNats(id: 1)"` + } + + var subscriptionTwo struct { + employeeUpdated struct { + ID float64 `graphql:"id"` + Details struct { + Forename string `graphql:"forename"` + } `graphql:"details"` + } `graphql:"employeeUpdatedMyNats(id: 1)"` + } + + surl := xEnv.GraphQLWebSocketSubscriptionURL() + client1 := graphql.NewSubscriptionClient(surl) + t.Cleanup(func() { + _ = client1.Close() + }) + client2 := graphql.NewSubscriptionClient(surl) + t.Cleanup(func() { + _ = client2.Close() + }) + + var subscriptionOneCalled atomic.Uint32 + var subscriptionTwoCalled atomic.Uint32 + + subscriptionOneID, err := client1.Subscribe(&subscriptionOne, nil, func(dataValue []byte, errValue error) error { + subscriptionOneCalled.Add(1) + require.NoError(t, errValue) + require.JSONEq(t, `{"employeeUpdated":{"id":3,"details":{"surname":"Avram"}}}`, string(dataValue)) + return nil + }) + require.NoError(t, err) + require.NotEqual(t, "", subscriptionOneID) + go func() { + clientErr := client1.Run() + require.NoError(t, clientErr) + }() + xEnv.WaitForSubscriptionCount(1, NatsWaitTimeout) + errUnsubscribeOne := client1.Unsubscribe(subscriptionOneID) + require.NoError(t, errUnsubscribeOne) + xEnv.WaitForSubscriptionCount(0, NatsWaitTimeout) + + subscriptionTwoID, err := client2.Subscribe(&subscriptionTwo, nil, func(dataValue []byte, errValue error) error { + subscriptionTwoCalled.Add(1) + require.NoError(t, errValue) + require.JSONEq(t, `{"employeeUpdated":{"id":3,"details":{"forename":"Stefan"}}}`, string(dataValue)) + return nil + }) + require.NoError(t, err) + require.NotEqual(t, "", subscriptionTwoID) + go func() { + clientErr := client2.Run() + require.NoError(t, clientErr) + }() + xEnv.WaitForSubscriptionCount(1, NatsWaitTimeout) + errUnsubscribeTwo := client2.Unsubscribe(subscriptionTwoID) + require.NoError(t, errUnsubscribeTwo) + xEnv.WaitForSubscriptionCount(0, NatsWaitTimeout) + + errClose1 := client1.Close() + require.NoError(t, errClose1) + errClose2 := client2.Close() + require.NoError(t, errClose2) + }) + }) } func TestFlakyNatsEvents(t *testing.T) { diff --git a/router-tests/go.mod b/router-tests/go.mod index afd1179459..4ded2329af 100644 --- a/router-tests/go.mod +++ b/router-tests/go.mod @@ -15,7 +15,6 @@ require ( github.com/hasura/go-graphql-client v0.12.2 github.com/mark3labs/mcp-go v0.30.0 github.com/nats-io/nats.go v1.35.0 - github.com/ory/dockertest/v3 v3.12.0 github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 github.com/redis/go-redis/v9 v9.4.0 @@ -41,13 +40,9 @@ require ( require ( connectrpc.com/connect v1.16.2 // indirect - dario.cat/mergo v1.0.0 // indirect github.com/99designs/gqlgen v0.17.63 // indirect - github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/KimMachineGun/automemlimit v0.6.1 // indirect github.com/MicahParks/keyfunc/v3 v3.3.5 // indirect - github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/agnivade/levenshtein v1.2.0 // indirect github.com/andybalholm/brotli v1.1.0 // indirect github.com/benbjohnson/clock v1.3.0 // indirect @@ -58,15 +53,11 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cilium/ebpf v0.16.0 // indirect github.com/containerd/cgroups/v3 v3.0.2 // indirect - github.com/containerd/continuity v0.4.5 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/docker/cli v27.4.1+incompatible // indirect - github.com/docker/docker v27.1.1+incompatible // indirect - github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/expr-lang/expr v1.17.3 // indirect @@ -85,9 +76,7 @@ require ( github.com/goccy/go-json v0.10.3 // indirect github.com/goccy/go-yaml v1.17.1 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect @@ -108,16 +97,10 @@ require ( github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/minio-go/v7 v7.0.74 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/sys/user v0.3.0 // indirect - github.com/moby/term v0.5.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nats-io/nkeys v0.4.7 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/oklog/run v1.0.0 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0 // indirect - github.com/opencontainers/runc v1.2.3 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d // indirect @@ -139,6 +122,7 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/sosodev/duration v1.3.1 // indirect github.com/spf13/cast v1.7.1 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect github.com/tidwall/sjson v1.2.5 // indirect @@ -149,9 +133,6 @@ require ( github.com/urfave/cli/v2 v2.27.5 // indirect github.com/vektah/gqlparser/v2 v2.5.21 // indirect github.com/wundergraph/astjson v0.0.0-20250106123708-be463c97e083 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect github.com/yosida95/uritemplate/v3 v3.0.2 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect @@ -182,7 +163,6 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect google.golang.org/grpc v1.68.1 // indirect gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect nhooyr.io/websocket v1.8.11 // indirect ) diff --git a/router-tests/go.sum b/router-tests/go.sum index 1c1388e1ee..eed2504bf6 100644 --- a/router-tests/go.sum +++ b/router-tests/go.sum @@ -1,13 +1,7 @@ connectrpc.com/connect v1.16.2 h1:ybd6y+ls7GOlb7Bh5C8+ghA6SvCBajHwxssO2CGFjqE= connectrpc.com/connect v1.16.2/go.mod h1:n2kgwskMHXC+lVqb18wngEpF95ldBHXjZYJussz5FRc= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/99designs/gqlgen v0.17.63 h1:HCdaYDPd9HqUXRchEvmE3EFzELRwLlaJ8DBuyC8Cqto= github.com/99designs/gqlgen v0.17.63/go.mod h1:sVCM2iwIZisJjTI/DEC3fpH+HFgxY1496ZJ+jbT9IjA= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/KimMachineGun/automemlimit v0.6.1 h1:ILa9j1onAAMadBsyyUJv5cack8Y1WT26yLj/V+ulKp8= github.com/KimMachineGun/automemlimit v0.6.1/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY= @@ -15,10 +9,6 @@ github.com/MicahParks/jwkset v0.9.0 h1:xDlGu6mZJdJ+mgAI4mIRqWm2p8Vrx0U98LMgRObw4 github.com/MicahParks/jwkset v0.9.0/go.mod h1:fVrj6TmG1aKlJEeceAz7JsXGTXEn72zP1px3us53JrA= github.com/MicahParks/keyfunc/v3 v3.3.5 h1:7ceAJLUAldnoueHDNzF8Bx06oVcQ5CfJnYwNt1U3YYo= github.com/MicahParks/keyfunc/v3 v3.3.5/go.mod h1:SdCCyMJn/bYqWDvARspC6nCT8Sk74MjuAY22C7dCST8= -github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= -github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= -github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/PuerkitoBio/goquery v1.9.3 h1:mpJr/ikUA9/GNJB/DBZcGeFDXUtosHRyRrwh7KGdTG0= github.com/PuerkitoBio/goquery v1.9.3/go.mod h1:1ndLHPdTz+DyQPICCWYlYQMPl0oXZj0G6D4LCYA6u4U= github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY= @@ -61,14 +51,10 @@ github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NA github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= -github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= -github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -83,12 +69,6 @@ github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7c github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/cli v27.4.1+incompatible h1:VzPiUlRJ/xh+otB75gva3r05isHMo5wXDfPRi5/b4hI= -github.com/docker/cli v27.4.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= -github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= -github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= @@ -117,8 +97,6 @@ github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7 github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-redis/redis_rate/v10 v10.0.1 h1:calPxi7tVlxojKunJwQ72kwfozdy25RjA0bCj1h0MUo= github.com/go-redis/redis_rate/v10 v10.0.1/go.mod h1:EMiuO9+cjRkR7UvdvwMO7vbgqJkltQHtwbdIQvaBKIU= -github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= -github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= @@ -134,8 +112,6 @@ github.com/goccy/go-yaml v1.17.1/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7Lk github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= @@ -147,8 +123,6 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= @@ -192,7 +166,6 @@ github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCX github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE= github.com/kingledion/go-tools v0.6.0 h1:y8C/4mWoHgLkO45dB+Y/j0o4Y4WUB5lDTAcMPMtFpTg= github.com/kingledion/go-tools v0.6.0/go.mod h1:qcDJQxBui/H/hterGb90GMlLs9Yi7QrwaJL8OGdbsms= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= @@ -209,8 +182,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= -github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/logrusorgru/aurora/v4 v4.0.0 h1:sRjfPpun/63iADiSvGGjgA1cAYegEWMPCJdUpJYn9JA= github.com/logrusorgru/aurora/v4 v4.0.0/go.mod h1:lP0iIa2nrnT/qoFXcOZSrZQpJ1o6n2CUf/hyHi2Q4ZQ= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= @@ -236,12 +207,6 @@ github.com/minio/minio-go/v7 v7.0.74 h1:fTo/XlPBTSpo3BAMshlwKL5RspXRv9us5UeHEGYC github.com/minio/minio-go/v7 v7.0.74/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= -github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nats-io/nats.go v1.35.0 h1:XFNqNM7v5B+MQMKqVGAyHwYhyKb48jrenXNxIU20ULk= @@ -252,16 +217,8 @@ github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/opencontainers/runc v1.2.3 h1:fxE7amCzfZflJO2lHXf4y/y8M1BoAqp+FVmG19oYB80= -github.com/opencontainers/runc v1.2.3/go.mod h1:nSxcWUydXrsBZVYNSkTjoQ/N6rcyTtn+1SD5D4+kRIM= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw= -github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXRvO7KCwWVjE= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d h1:U+PMnTlV2tu7RuMK5etusZG3Cf+rpow5hqQByeCzJ2g= @@ -325,6 +282,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -381,8 +339,6 @@ github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGC github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= @@ -445,8 +401,6 @@ golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0J golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -468,9 +422,7 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -489,20 +441,14 @@ golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 h1:GVIKPyP/kLIyVOgOnTwFOrvQaQUzOzGMCxgFUOEmm24= @@ -522,13 +468,10 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= diff --git a/router-tests/mcp_test.go b/router-tests/mcp_test.go index 22f47cebe8..51efd4c987 100644 --- a/router-tests/mcp_test.go +++ b/router-tests/mcp_test.go @@ -209,6 +209,7 @@ func TestMCP(t *testing.T) { t.Run("Execute Query", func(t *testing.T) { t.Run("Execute operation of type query with valid input", func(t *testing.T) { testenv.Run(t, &testenv.Config{ + EnableNats: true, MCP: config.MCPConfiguration{ Enabled: true, }, @@ -265,6 +266,7 @@ func TestMCP(t *testing.T) { t.Run("Execute Mutation", func(t *testing.T) { t.Run("Execute operation of type mutation with valid input", func(t *testing.T) { testenv.Run(t, &testenv.Config{ + EnableNats: true, MCP: config.MCPConfiguration{ Enabled: true, }, diff --git a/router-tests/structured_logging_test.go b/router-tests/structured_logging_test.go index 1bca435aef..a862ceba88 100644 --- a/router-tests/structured_logging_test.go +++ b/router-tests/structured_logging_test.go @@ -4,17 +4,19 @@ import ( "bytes" "encoding/json" "fmt" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/otel/sdk/metric" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" "math" "net/http" "os" "path/filepath" "testing" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/sdk/metric" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "github.com/stretchr/testify/assert" + "github.com/wundergraph/cosmo/router-tests/testenv" "github.com/wundergraph/cosmo/router/core" "github.com/wundergraph/cosmo/router/pkg/config" @@ -162,13 +164,15 @@ func TestRouterStartLogs(t *testing.T) { }, }, func(t *testing.T, xEnv *testenv.Environment) { logEntries := xEnv.Observer().All() - require.Len(t, logEntries, 13) + require.Len(t, logEntries, 12) natsLogs := xEnv.Observer().FilterMessageSnippet("Nats Event source enabled").All() - require.Len(t, natsLogs, 4) + require.Len(t, natsLogs, 2) + natsConnectedLogs := xEnv.Observer().FilterMessageSnippet("NATS connection established").All() + require.Len(t, natsConnectedLogs, 4) providerIDFields := xEnv.Observer().FilterField(zap.String("provider_id", "default")).All() - require.Len(t, providerIDFields, 2) + require.Len(t, providerIDFields, 3) kafkaLogs := xEnv.Observer().FilterMessageSnippet("Kafka Event source enabled").All() - require.Len(t, kafkaLogs, 2) + require.Len(t, kafkaLogs, 1) playgroundLog := xEnv.Observer().FilterMessage("Serving GraphQL playground") require.Equal(t, playgroundLog.Len(), 1) featureFlagLog := xEnv.Observer().FilterMessage("Feature flags enabled") @@ -312,7 +316,7 @@ func TestFlakyAccessLogs(t *testing.T) { }) require.JSONEq(t, employeesIDData, res.Body) logEntries := xEnv.Observer().All() - require.Len(t, logEntries, 10) + require.Len(t, logEntries, 6) requestLog := xEnv.Observer().FilterMessage("/graphql") require.Equal(t, requestLog.Len(), 1) requestContext := requestLog.All()[0].ContextMap() @@ -348,7 +352,7 @@ func TestFlakyAccessLogs(t *testing.T) { }) require.JSONEq(t, employeesIDData, res.Body) logEntries := xEnv.Observer().All() - require.Len(t, logEntries, 10) + require.Len(t, logEntries, 6) requestLog := xEnv.Observer().FilterMessage("/graphql") require.Equal(t, requestLog.Len(), 1) requestContext := requestLog.All()[0].ContextMap() @@ -446,7 +450,7 @@ func TestFlakyAccessLogs(t *testing.T) { }) require.JSONEq(t, employeesIDData, res.Body) logEntries := xEnv.Observer().All() - require.Len(t, logEntries, 10) + require.Len(t, logEntries, 6) requestLog := xEnv.Observer().FilterMessage("/graphql") require.Equal(t, requestLog.Len(), 1) requestContext := requestLog.All()[0].ContextMap() @@ -507,7 +511,7 @@ func TestFlakyAccessLogs(t *testing.T) { }) require.JSONEq(t, employeesIDData, res.Body) logEntries := xEnv.Observer().All() - require.Len(t, logEntries, 10) + require.Len(t, logEntries, 6) requestLog := xEnv.Observer().FilterMessage("/graphql") require.Equal(t, requestLog.Len(), 1) requestContext := requestLog.All()[0].ContextMap() @@ -552,7 +556,7 @@ func TestFlakyAccessLogs(t *testing.T) { }) require.JSONEq(t, employeesIDData, res.Body) logEntries := xEnv.Observer().All() - require.Len(t, logEntries, 10) + require.Len(t, logEntries, 6) requestLog := xEnv.Observer().FilterMessage("/graphql") require.Equal(t, requestLog.Len(), 1) requestContext := requestLog.All()[0].ContextMap() @@ -667,7 +671,7 @@ func TestFlakyAccessLogs(t *testing.T) { require.NoError(t, err) require.Equal(t, `{"errors":[{"message":"unexpected token - got: EOF want one of: [RBRACE IDENT SPREAD]","locations":[{"line":0,"column":0}]}]}`, res.Body) logEntries := xEnv.Observer().All() - require.Len(t, logEntries, 10) + require.Len(t, logEntries, 6) requestLog := xEnv.Observer().FilterMessage("/graphql") require.Equal(t, requestLog.Len(), 1) requestContext := requestLog.All()[0].ContextMap() @@ -786,7 +790,7 @@ func TestFlakyAccessLogs(t *testing.T) { require.NoError(t, err) require.Equal(t, `{"errors":[{"message":"field: notExists not defined on type: Query","path":["query"]}]}`, res.Body) logEntries := xEnv.Observer().All() - require.Len(t, logEntries, 10) + require.Len(t, logEntries, 6) requestLog := xEnv.Observer().FilterMessage("/graphql") require.Equal(t, requestLog.Len(), 1) requestContext := requestLog.All()[0].ContextMap() @@ -919,7 +923,7 @@ func TestFlakyAccessLogs(t *testing.T) { require.NoError(t, err) require.Equal(t, "", res.Body) logEntries := xEnv.Observer().All() - require.Len(t, logEntries, 11) + require.Len(t, logEntries, 7) requestLog := xEnv.Observer().FilterMessage("[Recovery from panic]") require.Equal(t, requestLog.Len(), 1) requestContext := requestLog.All()[0].ContextMap() @@ -950,7 +954,7 @@ func TestFlakyAccessLogs(t *testing.T) { "validation_time", } - require.NotEmpty(t, logEntries[10].Stack) + require.NotEmpty(t, logEntries[6].Stack) checkValues(t, requestContext, expectedValues, additionalExpectedKeys) }) @@ -1056,7 +1060,7 @@ func TestFlakyAccessLogs(t *testing.T) { require.NoError(t, err) require.Equal(t, "", res.Body) logEntries := xEnv.Observer().All() - require.Len(t, logEntries, 11) + require.Len(t, logEntries, 7) requestLog := xEnv.Observer().FilterMessage("[Recovery from panic]") require.Equal(t, requestLog.Len(), 1) requestContext := requestLog.All()[0].ContextMap() @@ -1087,7 +1091,7 @@ func TestFlakyAccessLogs(t *testing.T) { "validation_time", } - require.NotEmpty(t, logEntries[10].Stack) + require.NotEmpty(t, logEntries[6].Stack) checkValues(t, requestContext, expectedValues, additionalExpectedKeys) }) @@ -1145,7 +1149,7 @@ func TestFlakyAccessLogs(t *testing.T) { }) require.Equal(t, `{"errors":[{"message":"Failed to fetch from Subgraph 'products' at Path 'employees'.","extensions":{"errors":[{"message":"Unauthorized","extensions":{"code":"UNAUTHORIZED"}}],"statusCode":403}}],"data":{"employees":[{"id":1,"details":{"forename":"Jens","surname":"Neuse"},"notes":null},{"id":2,"details":{"forename":"Dustin","surname":"Deus"},"notes":null},{"id":3,"details":{"forename":"Stefan","surname":"Avram"},"notes":null},{"id":4,"details":{"forename":"Björn","surname":"Schwenzer"},"notes":null},{"id":5,"details":{"forename":"Sergiy","surname":"Petrunin"},"notes":null},{"id":7,"details":{"forename":"Suvij","surname":"Surya"},"notes":null},{"id":8,"details":{"forename":"Nithin","surname":"Kumar"},"notes":null},{"id":10,"details":{"forename":"Eelco","surname":"Wiersma"},"notes":null},{"id":11,"details":{"forename":"Alexandra","surname":"Neuse"},"notes":null},{"id":12,"details":{"forename":"David","surname":"Stutt"},"notes":null}]}}`, res.Body) logEntries := xEnv.Observer().All() - require.Len(t, logEntries, 10) + require.Len(t, logEntries, 6) requestLog := xEnv.Observer().FilterMessage("/graphql") require.Equal(t, requestLog.Len(), 1) requestContext := requestLog.All()[0].ContextMap() @@ -1190,7 +1194,7 @@ func TestFlakyAccessLogs(t *testing.T) { }) require.JSONEq(t, employeesIDData, res.Body) logEntries := xEnv.Observer().All() - require.Len(t, logEntries, 11) + require.Len(t, logEntries, 7) requestLog := xEnv.Observer().FilterMessage("/graphql") require.Equal(t, requestLog.Len(), 2) @@ -1241,7 +1245,7 @@ func TestFlakyAccessLogs(t *testing.T) { }) require.JSONEq(t, employeesIDData, res.Body) logEntries := xEnv.Observer().All() - require.Len(t, logEntries, 11) + require.Len(t, logEntries, 7) requestLog := xEnv.Observer().FilterMessage("/graphql") require.Equal(t, requestLog.Len(), 2) requestContext := requestLog.All()[0].ContextMap() @@ -1346,7 +1350,7 @@ func TestFlakyAccessLogs(t *testing.T) { }) require.JSONEq(t, employeesIDData, res.Body) logEntries := xEnv.Observer().All() - require.Len(t, logEntries, 11) + require.Len(t, logEntries, 7) requestLog := xEnv.Observer().FilterMessage("/graphql") require.Equal(t, requestLog.Len(), 2) requestContext := requestLog.All()[0].ContextMap() @@ -1433,7 +1437,7 @@ func TestFlakyAccessLogs(t *testing.T) { }) require.JSONEq(t, employeesIDData, res.Body) logEntries := xEnv.Observer().All() - require.Len(t, logEntries, 11) + require.Len(t, logEntries, 7) requestLog := xEnv.Observer().FilterMessage("/graphql") require.Equal(t, requestLog.Len(), 2) requestContext := requestLog.All()[0].ContextMap() @@ -1528,7 +1532,7 @@ func TestFlakyAccessLogs(t *testing.T) { }) require.Equal(t, `{"errors":[{"message":"Failed to fetch from Subgraph 'products' at Path 'employees'.","extensions":{"errors":[{"message":"Unauthorized","extensions":{"code":"UNAUTHORIZED"}}],"statusCode":403}}],"data":{"employees":[{"id":1,"details":{"forename":"Jens","surname":"Neuse"},"notes":null},{"id":2,"details":{"forename":"Dustin","surname":"Deus"},"notes":null},{"id":3,"details":{"forename":"Stefan","surname":"Avram"},"notes":null},{"id":4,"details":{"forename":"Björn","surname":"Schwenzer"},"notes":null},{"id":5,"details":{"forename":"Sergiy","surname":"Petrunin"},"notes":null},{"id":7,"details":{"forename":"Suvij","surname":"Surya"},"notes":null},{"id":8,"details":{"forename":"Nithin","surname":"Kumar"},"notes":null},{"id":10,"details":{"forename":"Eelco","surname":"Wiersma"},"notes":null},{"id":11,"details":{"forename":"Alexandra","surname":"Neuse"},"notes":null},{"id":12,"details":{"forename":"David","surname":"Stutt"},"notes":null}]}}`, res.Body) logEntries := xEnv.Observer().All() - require.Len(t, logEntries, 12) + require.Len(t, logEntries, 8) requestLog := xEnv.Observer().FilterMessage("/graphql") require.Equal(t, requestLog.Len(), 3) @@ -1711,7 +1715,7 @@ func TestFlakyAccessLogs(t *testing.T) { require.NoError(t, err) require.Equal(t, `{"data":{"employees":[{"id":1},{"id":2},{"id":3},{"id":4},{"id":5},{"id":7},{"id":8},{"id":10},{"id":11},{"id":12}]}}`, res.Body) logEntries := xEnv.Observer().All() - require.Len(t, logEntries, 11) + require.Len(t, logEntries, 7) requestLog := xEnv.Observer().FilterMessage("/graphql") require.Equal(t, requestLog.Len(), 2) requestContext := requestLog.All()[0].ContextMap() @@ -1804,7 +1808,7 @@ func TestFlakyAccessLogs(t *testing.T) { }) require.Equal(t, `{"errors":[{"message":"Failed to fetch from Subgraph 'products' at Path 'employees'."}],"data":{"employees":[{"id":1,"details":{"forename":"Jens","surname":"Neuse"},"notes":null},{"id":2,"details":{"forename":"Dustin","surname":"Deus"},"notes":null},{"id":3,"details":{"forename":"Stefan","surname":"Avram"},"notes":null},{"id":4,"details":{"forename":"Björn","surname":"Schwenzer"},"notes":null},{"id":5,"details":{"forename":"Sergiy","surname":"Petrunin"},"notes":null},{"id":7,"details":{"forename":"Suvij","surname":"Surya"},"notes":null},{"id":8,"details":{"forename":"Nithin","surname":"Kumar"},"notes":null},{"id":10,"details":{"forename":"Eelco","surname":"Wiersma"},"notes":null},{"id":11,"details":{"forename":"Alexandra","surname":"Neuse"},"notes":null},{"id":12,"details":{"forename":"David","surname":"Stutt"},"notes":null}]}}`, res.Body) logEntries := xEnv.Observer().All() - require.Len(t, logEntries, 12) + require.Len(t, logEntries, 8) requestLog := xEnv.Observer().FilterMessage("/graphql") require.Equal(t, requestLog.Len(), 3) diff --git a/router-tests/testenv/pubsub.go b/router-tests/testenv/pubsub.go index 9d67d9dd73..4011c231d4 100644 --- a/router-tests/testenv/pubsub.go +++ b/router-tests/testenv/pubsub.go @@ -5,24 +5,33 @@ import ( "time" "github.com/nats-io/nats.go" - "github.com/ory/dockertest/v3" - "github.com/twmb/franz-go/pkg/kgo" nodev1 "github.com/wundergraph/cosmo/router/gen/proto/wg/cosmo/node/v1" ) -type KafkaData struct { - Client *kgo.Client - Brokers []string - Resource *dockertest.Resource +type NatsParams struct { + Opts []nats.Option + Url string } type NatsData struct { Connections []*nats.Conn + Params []*NatsParams } func setupNatsClients(t testing.TB) (*NatsData, error) { natsData := &NatsData{} - for range demoNatsProviders { + for range DemoNatsProviders { + param := &NatsParams{ + Url: nats.DefaultURL, + Opts: []nats.Option{ + nats.MaxReconnects(10), + nats.ReconnectWait(1 * time.Second), + nats.Timeout(5 * time.Second), + nats.ErrorHandler(func(conn *nats.Conn, subscription *nats.Subscription, err error) { + t.Log(err) + }), + }, + } natsConnection, err := nats.Connect( nats.DefaultURL, nats.MaxReconnects(10), @@ -35,6 +44,8 @@ func setupNatsClients(t testing.TB) (*NatsData, error) { if err != nil { return nil, err } + + natsData.Params = append(natsData.Params, param) natsData.Connections = append(natsData.Connections, natsConnection) } return natsData, nil diff --git a/router-tests/testenv/testenv.go b/router-tests/testenv/testenv.go index 637e68156f..616a402a7f 100644 --- a/router-tests/testenv/testenv.go +++ b/router-tests/testenv/testenv.go @@ -40,7 +40,6 @@ import ( "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-retryablehttp" "github.com/nats-io/nats.go" - "github.com/nats-io/nats.go/jetstream" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/twmb/franz-go/pkg/kadm" @@ -54,8 +53,6 @@ import ( "go.uber.org/zap/zaptest/observer" "google.golang.org/protobuf/encoding/protojson" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" - "github.com/wundergraph/cosmo/demo/pkg/subgraphs" "github.com/wundergraph/cosmo/router/core" nodev1 "github.com/wundergraph/cosmo/router/gen/proto/wg/cosmo/node/v1" @@ -85,8 +82,8 @@ var ( ConfigWithEdfsNatsJSONTemplate string //go:embed testdata/configWithPlugins.json ConfigWithPluginsJSONTemplate string - demoNatsProviders = []string{natsDefaultSourceName, myNatsProviderID} - demoKafkaProviders = []string{myKafkaProviderID} + DemoNatsProviders = []string{natsDefaultSourceName, myNatsProviderID} + DemoKafkaProviders = []string{myKafkaProviderID} ) func init() { @@ -792,7 +789,6 @@ func CreateTestEnv(t testing.TB, cfg *Config) (*Environment, error) { if cfg.EnableKafka { cfg.KafkaSeeds = []string{"localhost:9092"} - client, err := kgo.NewClient( kgo.SeedBrokers(cfg.KafkaSeeds...), ) @@ -1266,22 +1262,25 @@ func configureRouter(listenerAddr string, testConfig *Config, routerConfig *node testConfig.ModifySubgraphErrorPropagation(&cfg.SubgraphErrorPropagation) } - natsEventSources := make([]config.NatsEventSource, len(demoNatsProviders)) - kafkaEventSources := make([]config.KafkaEventSource, len(demoKafkaProviders)) + var natsEventSources []config.NatsEventSource + var kafkaEventSources []config.KafkaEventSource if natsData != nil { - for _, sourceName := range demoNatsProviders { + for _, sourceName := range DemoNatsProviders { natsEventSources = append(natsEventSources, config.NatsEventSource{ ID: sourceName, URL: nats.DefaultURL, }) } } - for _, sourceName := range demoKafkaProviders { - kafkaEventSources = append(kafkaEventSources, config.KafkaEventSource{ - ID: sourceName, - Brokers: testConfig.KafkaSeeds, - }) + + if testConfig.KafkaSeeds != nil { + for _, sourceName := range DemoKafkaProviders { + kafkaEventSources = append(kafkaEventSources, config.KafkaEventSource{ + ID: sourceName, + Brokers: testConfig.KafkaSeeds, + }) + } } eventsConfiguration := config.EventsConfiguration{ @@ -1584,6 +1583,18 @@ func gqlURL(srv *httptest.Server) string { return path } +func ReadAndCheckJSON(t testing.TB, conn *websocket.Conn, v interface{}) (err error) { + _, payload, err := conn.ReadMessage() + if err != nil { + return err + } + if err := json.Unmarshal(payload, &v); err != nil { + t.Logf("Failed to decode WebSocket message. Raw payload: %s", string(payload)) + return err + } + return nil +} + type Environment struct { t testing.TB cfg *Config @@ -1630,6 +1641,10 @@ func (e *Environment) GetPubSubName(name string) string { return e.getPubSubName(name) } +func (e *Environment) GetKafkaSeeds() []string { + return e.cfg.KafkaSeeds +} + func (e *Environment) RouterConfigVersionMain() string { return e.routerConfigVersionMain } @@ -2187,8 +2202,7 @@ func (e *Environment) InitGraphQLWebSocketConnection(header http.Header, query u }) require.NoError(e.t, err) var ack WebSocketMessage - err = conn.ReadJSON(&ack) - require.NoError(e.t, err) + require.NoError(e.t, ReadAndCheckJSON(e.t, conn, &ack)) require.Equal(e.t, "connection_ack", ack.Type) return conn } @@ -2549,7 +2563,7 @@ func WSReadJSON(t testing.TB, conn *websocket.Conn, v interface{}) (err error) { return err } - err = conn.ReadJSON(v) + require.NoError(t, ReadAndCheckJSON(t, conn, v)) // Reset the deadline to prevent future operations from timing out if resetErr := conn.SetReadDeadline(time.Time{}); resetErr != nil { @@ -2665,16 +2679,19 @@ func WSWriteJSON(t testing.TB, conn *websocket.Conn, v interface{}) (err error) func subgraphOptions(ctx context.Context, t testing.TB, logger *zap.Logger, natsData *NatsData, pubSubName func(string) string) *subgraphs.SubgraphOptions { if natsData == nil { return &subgraphs.SubgraphOptions{ - NatsPubSubByProviderID: map[string]pubsub_datasource.NatsPubSub{}, + NatsPubSubByProviderID: map[string]pubsubNats.Adapter{}, GetPubSubName: pubSubName, } } - natsPubSubByProviderID := make(map[string]pubsub_datasource.NatsPubSub, len(demoNatsProviders)) - for _, sourceName := range demoNatsProviders { - js, err := jetstream.New(natsData.Connections[0]) + natsPubSubByProviderID := make(map[string]pubsubNats.Adapter, len(DemoNatsProviders)) + for _, sourceName := range DemoNatsProviders { + adapter, err := pubsubNats.NewAdapter(ctx, logger, natsData.Params[0].Url, natsData.Params[0].Opts, "hostname", "listenaddr") require.NoError(t, err) - - natsPubSubByProviderID[sourceName] = pubsubNats.NewConnector(logger, natsData.Connections[0], js, "hostname", "listenaddr").New(ctx) + require.NoError(t, adapter.Startup(ctx)) + t.Cleanup(func() { + require.NoError(t, adapter.Shutdown(context.Background())) + }) + natsPubSubByProviderID[sourceName] = adapter } return &subgraphs.SubgraphOptions{ diff --git a/router-tests/testenv/waitinglistener.go b/router-tests/testenv/waitinglistener.go new file mode 100644 index 0000000000..25aa32c80f --- /dev/null +++ b/router-tests/testenv/waitinglistener.go @@ -0,0 +1,54 @@ +package testenv + +import ( + "context" + "net" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type WaitingListener struct { + cancel context.CancelFunc + listener *net.Listener + waitTime time.Duration + port int +} + +func (l *WaitingListener) Close() error { + l.cancel() + return (*l.listener).Close() +} + +func (l *WaitingListener) Start() { + go func() { + for { + conn, err := (*l.listener).Accept() + if err != nil { + return + } + time.Sleep(l.waitTime) + conn.Close() + } + }() +} + +func (l *WaitingListener) Port() int { + return l.port +} + +func NewWaitingListener(t *testing.T, waitTime time.Duration) (wl *WaitingListener) { + ctx, cancel := context.WithCancel(context.Background()) + var lc net.ListenConfig + listener, err := lc.Listen(ctx, "tcp", "127.0.0.1:0") + require.NoError(t, err) + + wl = &WaitingListener{ + cancel: cancel, + listener: &listener, + waitTime: waitTime, + port: listener.Addr().(*net.TCPAddr).Port, + } + return wl +} diff --git a/router-tests/websocket_test.go b/router-tests/websocket_test.go index f73a119ffc..64ae313215 100644 --- a/router-tests/websocket_test.go +++ b/router-tests/websocket_test.go @@ -2167,6 +2167,9 @@ func expectConnectAndReadCurrentTime(t *testing.T, xEnv *testenv.Environment) { err = testenv.WSReadJSON(t, conn, &msg) require.NoError(t, err) require.Equal(t, "1", msg.ID) + if msg.Type == "error" { + t.Logf("unexpected error on read: %s", string(msg.Payload)) + } require.Equal(t, "next", msg.Type) err = json.Unmarshal(msg.Payload, &payload) require.NoError(t, err) @@ -2176,6 +2179,9 @@ func expectConnectAndReadCurrentTime(t *testing.T, xEnv *testenv.Environment) { err = testenv.WSReadJSON(t, conn, &msg) require.NoError(t, err) require.Equal(t, "1", msg.ID) + if msg.Type == "error" { + t.Logf("unexpected error on read: %s", string(msg.Payload)) + } require.Equal(t, "next", msg.Type) err = json.Unmarshal(msg.Payload, &payload) require.NoError(t, err) @@ -2196,6 +2202,9 @@ func expectConnectAndReadCurrentTime(t *testing.T, xEnv *testenv.Environment) { err = testenv.WSReadJSON(t, conn, &complete) require.NoError(t, err) require.Equal(t, "1", complete.ID) + if complete.Type == "error" { + t.Logf("unexpected error on read: %s", string(complete.Payload)) + } require.Equal(t, "complete", complete.Type) err = conn.SetReadDeadline(time.Now().Add(1 * time.Second)) diff --git a/router/.mockery.yml b/router/.mockery.yml new file mode 100644 index 0000000000..97e6e1a774 --- /dev/null +++ b/router/.mockery.yml @@ -0,0 +1,32 @@ +all: false +dir: '{{.InterfaceDir}}' +filename: mocks.go +force-file-write: true +formatter: goimports +log-level: info +structname: '{{.Mock}}{{.InterfaceName}}' +pkgname: '{{.SrcPackageName}}' +recursive: false +require-template-schema-exists: true +template: testify +template-schema: '{{.Template}}.schema.json' +packages: + github.com/wundergraph/cosmo/router/pkg/pubsub/datasource: + interfaces: + ProviderLifecycle: + ProviderBuilder: + EngineDataSourceFactory: + Provider: + github.com/wundergraph/cosmo/router/pkg/pubsub/nats: + interfaces: + Adapter: + github.com/wundergraph/cosmo/router/pkg/pubsub/kafka: + interfaces: + Adapter: + github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve: + config: + dir: 'pkg/pubsub/datasource' + pkgname: 'datasource' + filename: 'mocks_resolve.go' + interfaces: + SubscriptionUpdater: \ No newline at end of file diff --git a/router/Makefile b/router/Makefile index 02524bc1b1..c96959643d 100644 --- a/router/Makefile +++ b/router/Makefile @@ -37,6 +37,7 @@ build-race: build-custom: CGO_ENABLED=0 go build -trimpath -ldflags "-extldflags -static -X github.com/wundergraph/cosmo/router/core.Version=$(VERSION) -X github.com/wundergraph/cosmo/router/core.Date=$(DATE) -X github.com/wundergraph/cosmo/router/core.Commit=$(COMMIT)" -a -o router cmd/custom/main.go -.PHONY: dev test build lint bump-engine update-snapshot - +generate-mocks: + mockery +.PHONY: dev test build lint bump-engine update-snapshot diff --git a/router/core/errors.go b/router/core/errors.go index 47f248f2e7..7f8df34da2 100644 --- a/router/core/errors.go +++ b/router/core/errors.go @@ -12,7 +12,7 @@ import ( rErrors "github.com/wundergraph/cosmo/router/internal/errors" "github.com/wundergraph/cosmo/router/internal/persistedoperation" "github.com/wundergraph/cosmo/router/internal/unique" - "github.com/wundergraph/cosmo/router/pkg/pubsub" + "github.com/wundergraph/cosmo/router/pkg/pubsub/datasource" rtrace "github.com/wundergraph/cosmo/router/pkg/trace" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/graphql_datasource" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" @@ -73,7 +73,7 @@ func getErrorType(err error) errorType { return errorTypeContextTimeout } } - var edfsErr *pubsub.Error + var edfsErr *datasource.Error if errors.As(err, &edfsErr) { return errorTypeEDFS } diff --git a/router/core/executor.go b/router/core/executor.go index 1606d1a640..74a5225ccd 100644 --- a/router/core/executor.go +++ b/router/core/executor.go @@ -2,15 +2,10 @@ package core import ( "context" - "crypto/tls" - "errors" "fmt" "net/http" "time" - "github.com/nats-io/nats.go" - "github.com/twmb/franz-go/pkg/kgo" - "github.com/twmb/franz-go/pkg/sasl/plain" "go.uber.org/zap" "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" @@ -23,6 +18,7 @@ import ( nodev1 "github.com/wundergraph/cosmo/router/gen/proto/wg/cosmo/node/v1" "github.com/wundergraph/cosmo/router/pkg/config" + pubsub_datasource "github.com/wundergraph/cosmo/router/pkg/pubsub/datasource" "github.com/wundergraph/cosmo/router/pkg/routerplugin" ) @@ -38,6 +34,7 @@ type ExecutorConfigurationBuilder struct { pluginHost *routerplugin.Host subscriptionClientOptions *SubscriptionClientOptions + instanceData InstanceData } type Executor struct { @@ -56,19 +53,19 @@ type ExecutorBuildOptions struct { EngineConfig *nodev1.EngineConfiguration Subgraphs []*nodev1.Subgraph RouterEngineConfig *RouterEngineConfiguration - PubSubProviders *EnginePubSubProviders Reporter resolve.Reporter ApolloCompatibilityFlags config.ApolloCompatibilityFlags ApolloRouterCompatibilityFlags config.ApolloRouterCompatibilityFlags HeartbeatInterval time.Duration TraceClientRequired bool PluginsEnabled bool + InstanceData InstanceData } -func (b *ExecutorConfigurationBuilder) Build(ctx context.Context, opts *ExecutorBuildOptions) (*Executor, error) { - planConfig, err := b.buildPlannerConfiguration(ctx, opts.EngineConfig, opts.Subgraphs, opts.RouterEngineConfig, opts.PubSubProviders, opts.PluginsEnabled) +func (b *ExecutorConfigurationBuilder) Build(ctx context.Context, opts *ExecutorBuildOptions) (*Executor, []pubsub_datasource.Provider, error) { + planConfig, providers, err := b.buildPlannerConfiguration(ctx, opts.EngineConfig, opts.Subgraphs, opts.RouterEngineConfig, opts.PluginsEnabled) if err != nil { - return nil, fmt.Errorf("failed to build planner configuration: %w", err) + return nil, nil, fmt.Errorf("failed to build planner configuration: %w", err) } options := resolve.ResolverOptions{ @@ -132,7 +129,7 @@ func (b *ExecutorConfigurationBuilder) Build(ctx context.Context, opts *Executor routerSchemaDefinition, report = astparser.ParseGraphqlDocumentString(opts.EngineConfig.GraphqlSchema) if report.HasErrors() { - return nil, fmt.Errorf("failed to parse graphql schema from engine config: %w", report) + return nil, providers, fmt.Errorf("failed to parse graphql schema from engine config: %w", report) } // we need to merge the base schema, it contains the __schema and __type queries, // as well as built-in scalars like Int, String, etc... @@ -140,7 +137,7 @@ func (b *ExecutorConfigurationBuilder) Build(ctx context.Context, opts *Executor // the engine needs to have them defined, otherwise it cannot resolve such fields err = asttransform.MergeDefinitionWithBaseSchema(&routerSchemaDefinition) if err != nil { - return nil, fmt.Errorf("failed to merge graphql schema with base schema: %w", err) + return nil, providers, fmt.Errorf("failed to merge graphql schema with base schema: %w", err) } if clientSchemaStr := opts.EngineConfig.GetGraphqlClientSchema(); clientSchemaStr != "" { @@ -149,11 +146,11 @@ func (b *ExecutorConfigurationBuilder) Build(ctx context.Context, opts *Executor clientSchema, report := astparser.ParseGraphqlDocumentString(clientSchemaStr) if report.HasErrors() { - return nil, fmt.Errorf("failed to parse graphql client schema from engine config: %w", report) + return nil, providers, fmt.Errorf("failed to parse graphql client schema from engine config: %w", report) } err = asttransform.MergeDefinitionWithBaseSchema(&clientSchema) if err != nil { - return nil, fmt.Errorf("failed to merge graphql client schema with base schema: %w", err) + return nil, providers, fmt.Errorf("failed to merge graphql client schema with base schema: %w", err) } clientSchemaDefinition = &clientSchema } else { @@ -169,7 +166,7 @@ func (b *ExecutorConfigurationBuilder) Build(ctx context.Context, opts *Executor // datasource is attached to Query.__schema, Query.__type, __Type.fields and __Type.enumValues fields introspectionFactory, err := introspection_datasource.NewIntrospectionConfigFactory(clientSchemaDefinition) if err != nil { - return nil, fmt.Errorf("failed to create introspection config factory: %w", err) + return nil, providers, fmt.Errorf("failed to create introspection config factory: %w", err) } fieldConfigs := introspectionFactory.BuildFieldConfigurations() // we need to add these fields to the config @@ -200,87 +197,15 @@ func (b *ExecutorConfigurationBuilder) Build(ctx context.Context, opts *Executor Resolver: resolver, RenameTypeNames: renameTypeNames, TrackUsageInfo: b.trackUsageInfo, - }, nil + }, providers, nil } -func buildNatsOptions(eventSource config.NatsEventSource, logger *zap.Logger) ([]nats.Option, error) { - opts := []nats.Option{ - nats.Name(fmt.Sprintf("cosmo.router.edfs.nats.%s", eventSource.ID)), - nats.ReconnectJitter(500*time.Millisecond, 2*time.Second), - nats.ClosedHandler(func(conn *nats.Conn) { - logger.Info("NATS connection closed", zap.String("provider_id", eventSource.ID), zap.Error(conn.LastError())) - }), - nats.ConnectHandler(func(nc *nats.Conn) { - logger.Info("NATS connection established", zap.String("provider_id", eventSource.ID), zap.String("url", nc.ConnectedUrlRedacted())) - }), - nats.DisconnectErrHandler(func(nc *nats.Conn, err error) { - if err != nil { - logger.Error("NATS disconnected; will attempt to reconnect", zap.Error(err), zap.String("provider_id", eventSource.ID)) - } else { - logger.Info("NATS disconnected", zap.String("provider_id", eventSource.ID)) - } - }), - nats.ErrorHandler(func(conn *nats.Conn, subscription *nats.Subscription, err error) { - if errors.Is(err, nats.ErrSlowConsumer) { - logger.Warn( - "NATS slow consumer detected. Events are being dropped. Please consider increasing the buffer size or reducing the number of messages being sent.", - zap.Error(err), - zap.String("provider_id", eventSource.ID), - ) - } else { - logger.Error("NATS error", zap.Error(err)) - } - }), - nats.ReconnectHandler(func(conn *nats.Conn) { - logger.Info("NATS reconnected", zap.String("provider_id", eventSource.ID), zap.String("url", conn.ConnectedUrlRedacted())) - }), - } - - if eventSource.Authentication != nil { - if eventSource.Authentication.Token != nil { - opts = append(opts, nats.Token(*eventSource.Authentication.Token)) - } else if eventSource.Authentication.UserInfo.Username != nil && eventSource.Authentication.UserInfo.Password != nil { - opts = append(opts, nats.UserInfo(*eventSource.Authentication.UserInfo.Username, *eventSource.Authentication.UserInfo.Password)) - } - } - - return opts, nil -} - -// buildKafkaOptions creates a list of kgo.Opt options for the given Kafka event source configuration. -// Only general options like TLS, SASL, etc. are configured here. Specific options like topics, etc. are -// configured in the KafkaPubSub implementation. -func buildKafkaOptions(eventSource config.KafkaEventSource) ([]kgo.Opt, error) { - opts := []kgo.Opt{ - kgo.SeedBrokers(eventSource.Brokers...), - // Ensure proper timeouts are set - kgo.ProduceRequestTimeout(10 * time.Second), - kgo.ConnIdleTimeout(60 * time.Second), - } - - if eventSource.TLS != nil && eventSource.TLS.Enabled { - opts = append(opts, - // Configure TLS. Uses SystemCertPool for RootCAs by default. - kgo.DialTLSConfig(new(tls.Config)), - ) - } - - if eventSource.Authentication != nil && eventSource.Authentication.SASLPlain.Username != nil && eventSource.Authentication.SASLPlain.Password != nil { - opts = append(opts, kgo.SASL(plain.Auth{ - User: *eventSource.Authentication.SASLPlain.Username, - Pass: *eventSource.Authentication.SASLPlain.Password, - }.AsMechanism())) - } - - return opts, nil -} - -func (b *ExecutorConfigurationBuilder) buildPlannerConfiguration(ctx context.Context, engineConfig *nodev1.EngineConfiguration, subgraphs []*nodev1.Subgraph, routerEngineCfg *RouterEngineConfiguration, pubSubProviders *EnginePubSubProviders, pluginsEnabled bool) (*plan.Configuration, error) { +func (b *ExecutorConfigurationBuilder) buildPlannerConfiguration(ctx context.Context, engineConfig *nodev1.EngineConfiguration, subgraphs []*nodev1.Subgraph, routerEngineCfg *RouterEngineConfiguration, pluginsEnabled bool) (*plan.Configuration, []pubsub_datasource.Provider, error) { // this loader is used to take the engine config and create a plan config // the plan config is what the engine uses to turn a GraphQL Request into an execution plan // the plan config is stateful as it carries connection pools and other things - loader := NewLoader(b.trackUsageInfo, NewDefaultFactoryResolver( + loader := NewLoader(ctx, b.trackUsageInfo, NewDefaultFactoryResolver( ctx, b.transportOptions, b.subscriptionClientOptions, @@ -290,14 +215,13 @@ func (b *ExecutorConfigurationBuilder) buildPlannerConfiguration(ctx context.Con b.logger, routerEngineCfg.Execution.EnableSingleFlight, routerEngineCfg.Execution.EnableNetPoll, - pubSubProviders.nats, - pubSubProviders.kafka, - )) + b.instanceData, + ), b.logger) // this generates the plan config using the data source factories from the config package - planConfig, err := loader.Load(engineConfig, subgraphs, routerEngineCfg, pluginsEnabled) + planConfig, providers, err := loader.Load(engineConfig, subgraphs, routerEngineCfg, pluginsEnabled) if err != nil { - return nil, fmt.Errorf("failed to load configuration: %w", err) + return nil, nil, fmt.Errorf("failed to load configuration: %w", err) } debug := &routerEngineCfg.Execution.Debug planConfig.Debug = plan.DebugConfiguration{ @@ -313,5 +237,6 @@ func (b *ExecutorConfigurationBuilder) buildPlannerConfiguration(ctx context.Con planConfig.MinifySubgraphOperations = routerEngineCfg.Execution.MinifySubgraphOperations planConfig.EnableOperationNamePropagation = routerEngineCfg.Execution.EnableSubgraphFetchOperationName - return planConfig, nil + + return planConfig, providers, nil } diff --git a/router/core/factoryresolver.go b/router/core/factoryresolver.go index aaccb35599..ceece61fc8 100644 --- a/router/core/factoryresolver.go +++ b/router/core/factoryresolver.go @@ -5,13 +5,13 @@ import ( "encoding/json" "fmt" "net/http" - "net/url" "slices" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/argument_templates" - "github.com/buger/jsonparser" + "github.com/wundergraph/cosmo/router/pkg/pubsub" + pubsub_datasource "github.com/wundergraph/cosmo/router/pkg/pubsub/datasource" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/argument_templates" "github.com/wundergraph/cosmo/router/pkg/config" "github.com/wundergraph/cosmo/router/pkg/routerplugin" @@ -21,7 +21,6 @@ import ( "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/graphql_datasource" grpcdatasource "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/grpc_datasource" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/staticdatasource" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" @@ -30,15 +29,22 @@ import ( ) type Loader struct { + ctx context.Context resolver FactoryResolver // includeInfo controls whether additional information like type usage and field usage is included in the plan de includeInfo bool + logger *zap.Logger +} + +type InstanceData struct { + HostName string + ListenAddress string } type FactoryResolver interface { ResolveGraphqlFactory(subgraphName string) (plan.PlannerFactory[graphql_datasource.Configuration], error) ResolveStaticFactory() (plan.PlannerFactory[staticdatasource.Configuration], error) - ResolvePubsubFactory() (plan.PlannerFactory[pubsub_datasource.Configuration], error) + InstanceData() InstanceData } type ApiTransportFactory interface { @@ -48,7 +54,6 @@ type ApiTransportFactory interface { type DefaultFactoryResolver struct { static *staticdatasource.Factory[staticdatasource.Configuration] - pubsub *pubsub_datasource.Factory[pubsub_datasource.Configuration] log *zap.Logger engineCtx context.Context @@ -61,6 +66,7 @@ type DefaultFactoryResolver struct { pluginHost *routerplugin.Host factoryLogger abstractlogger.Logger + instanceData InstanceData } func NewDefaultFactoryResolver( @@ -73,8 +79,7 @@ func NewDefaultFactoryResolver( log *zap.Logger, enableSingleFlight bool, enableNetPoll bool, - natsPubSubBySourceID map[string]pubsub_datasource.NatsPubSub, - kafkaPubSubBySourceID map[string]pubsub_datasource.KafkaPubSub, + instanceData InstanceData, ) *DefaultFactoryResolver { transportFactory := NewTransport(transportOptions) @@ -144,7 +149,6 @@ func NewDefaultFactoryResolver( return &DefaultFactoryResolver{ static: &staticdatasource.Factory[staticdatasource.Configuration]{}, - pubsub: pubsub_datasource.NewFactory(ctx, natsPubSubBySourceID, kafkaPubSubBySourceID), log: log, factoryLogger: factoryLogger, engineCtx: ctx, @@ -155,6 +159,7 @@ func NewDefaultFactoryResolver( httpClient: defaultHTTPClient, subgraphHTTPClients: subgraphHTTPClients, pluginHost: pluginHost, + instanceData: instanceData, } } @@ -179,14 +184,16 @@ func (d *DefaultFactoryResolver) ResolveStaticFactory() (factory plan.PlannerFac return d.static, nil } -func (d *DefaultFactoryResolver) ResolvePubsubFactory() (factory plan.PlannerFactory[pubsub_datasource.Configuration], err error) { - return d.pubsub, nil +func (d *DefaultFactoryResolver) InstanceData() InstanceData { + return d.instanceData } -func NewLoader(includeInfo bool, resolver FactoryResolver) *Loader { +func NewLoader(ctx context.Context, includeInfo bool, resolver FactoryResolver, logger *zap.Logger) *Loader { return &Loader{ + ctx: ctx, resolver: resolver, includeInfo: includeInfo, + logger: logger, } } @@ -259,7 +266,7 @@ func mapProtoFilterToPlanFilter(input *nodev1.SubscriptionFilterCondition, outpu return nil } -func (l *Loader) Load(engineConfig *nodev1.EngineConfiguration, subgraphs []*nodev1.Subgraph, routerEngineConfig *RouterEngineConfiguration, pluginsEnabled bool) (*plan.Configuration, error) { +func (l *Loader) Load(engineConfig *nodev1.EngineConfiguration, subgraphs []*nodev1.Subgraph, routerEngineConfig *RouterEngineConfiguration, pluginsEnabled bool) (*plan.Configuration, []pubsub_datasource.Provider, error) { var outConfig plan.Configuration // attach field usage information to the plan outConfig.DefaultFlushIntervalMillis = engineConfig.DefaultFlushInterval @@ -294,6 +301,9 @@ func (l *Loader) Load(engineConfig *nodev1.EngineConfiguration, subgraphs []*nod }) } + var providers []pubsub_datasource.Provider + var pubSubDS []pubsub.DataSourceConfigurationWithMetadata + for _, in := range engineConfig.DatasourceConfigurations { var out plan.DataSource @@ -301,7 +311,7 @@ func (l *Loader) Load(engineConfig *nodev1.EngineConfiguration, subgraphs []*nod case nodev1.DataSourceKind_STATIC: factory, err := l.resolver.ResolveStaticFactory() if err != nil { - return nil, err + return nil, providers, err } out, err = plan.NewDataSourceConfiguration[staticdatasource.Configuration]( @@ -313,7 +323,7 @@ func (l *Loader) Load(engineConfig *nodev1.EngineConfiguration, subgraphs []*nod }, ) if err != nil { - return nil, fmt.Errorf("error creating data source configuration for data source %s: %w", in.Id, err) + return nil, providers, fmt.Errorf("error creating data source configuration for data source %s: %w", in.Id, err) } case nodev1.DataSourceKind_GRAPHQL: @@ -342,7 +352,7 @@ func (l *Loader) Load(engineConfig *nodev1.EngineConfiguration, subgraphs []*nod graphqlSchema, err := l.LoadInternedString(engineConfig, in.CustomGraphql.GetUpstreamSchema()) if err != nil { - return nil, fmt.Errorf("could not load GraphQL schema for data source %s: %w", in.Id, err) + return nil, providers, fmt.Errorf("could not load GraphQL schema for data source %s: %w", in.Id, err) } var subscriptionUseSSE bool @@ -381,7 +391,7 @@ func (l *Loader) Load(engineConfig *nodev1.EngineConfiguration, subgraphs []*nod dataSourceRules := FetchURLRules(routerEngineConfig.Headers, subgraphs, subscriptionUrl) forwardedClientHeaders, forwardedClientRegexps, err := PropagatedHeaders(dataSourceRules) if err != nil { - return nil, fmt.Errorf("error parsing header rules for data source %s: %w", in.Id, err) + return nil, providers, fmt.Errorf("error parsing header rules for data source %s: %w", in.Id, err) } schemaConfiguration, err := graphql_datasource.NewSchemaConfiguration( @@ -392,14 +402,14 @@ func (l *Loader) Load(engineConfig *nodev1.EngineConfiguration, subgraphs []*nod }, ) if err != nil { - return nil, fmt.Errorf("error creating schema configuration for data source %s: %w", in.Id, err) + return nil, providers, fmt.Errorf("error creating schema configuration for data source %s: %w", in.Id, err) } grpcConfig := toGRPCConfiguration(in.CustomGraphql.Grpc, pluginsEnabled) if grpcConfig != nil { grpcConfig.Compiler, err = grpcdatasource.NewProtoCompiler(in.CustomGraphql.Grpc.ProtoSchema, grpcConfig.Mapping) if err != nil { - return nil, fmt.Errorf("error creating proto compiler for data source %s: %w", in.Id, err) + return nil, providers, fmt.Errorf("error creating proto compiler for data source %s: %w", in.Id, err) } } @@ -422,14 +432,14 @@ func (l *Loader) Load(engineConfig *nodev1.EngineConfiguration, subgraphs []*nod GRPC: grpcConfig, }) if err != nil { - return nil, fmt.Errorf("error creating custom configuration for data source %s: %w", in.Id, err) + return nil, providers, fmt.Errorf("error creating custom configuration for data source %s: %w", in.Id, err) } dataSourceName := l.subgraphName(subgraphs, in.Id) factory, err := l.resolver.ResolveGraphqlFactory(dataSourceName) if err != nil { - return nil, err + return nil, providers, err } out, err = plan.NewDataSourceConfigurationWithName[graphql_datasource.Configuration]( @@ -440,83 +450,44 @@ func (l *Loader) Load(engineConfig *nodev1.EngineConfiguration, subgraphs []*nod customConfiguration, ) if err != nil { - return nil, fmt.Errorf("error creating data source configuration for data source %s: %w", in.Id, err) + return nil, providers, fmt.Errorf("error creating data source configuration for data source %s: %w", in.Id, err) } case nodev1.DataSourceKind_PUBSUB: - var eventConfigurations []pubsub_datasource.EventConfiguration - - for _, eventConfiguration := range in.GetCustomEvents().GetNats() { - eventType, err := pubsub_datasource.EventTypeFromString(eventConfiguration.EngineEventConfiguration.Type.String()) - if err != nil { - return nil, fmt.Errorf("invalid event type %q for data source %q: %w", eventConfiguration.EngineEventConfiguration.Type.String(), in.Id, err) - } - - var streamConfiguration *pubsub_datasource.NatsStreamConfiguration - if eventConfiguration.StreamConfiguration != nil { - streamConfiguration = &pubsub_datasource.NatsStreamConfiguration{ - Consumer: eventConfiguration.StreamConfiguration.GetConsumerName(), - StreamName: eventConfiguration.StreamConfiguration.GetStreamName(), - ConsumerInactiveThreshold: eventConfiguration.StreamConfiguration.GetConsumerInactiveThreshold(), - } - } - - eventConfigurations = append(eventConfigurations, pubsub_datasource.EventConfiguration{ - Metadata: &pubsub_datasource.EventMetadata{ - ProviderID: eventConfiguration.EngineEventConfiguration.GetProviderId(), - Type: eventType, - TypeName: eventConfiguration.EngineEventConfiguration.GetTypeName(), - FieldName: eventConfiguration.EngineEventConfiguration.GetFieldName(), - }, - Configuration: &pubsub_datasource.NatsEventConfiguration{ - StreamConfiguration: streamConfiguration, - Subjects: eventConfiguration.GetSubjects(), - }, - }) - } - - for _, eventConfiguration := range in.GetCustomEvents().GetKafka() { - eventType, err := pubsub_datasource.EventTypeFromString(eventConfiguration.EngineEventConfiguration.Type.String()) - if err != nil { - return nil, fmt.Errorf("invalid event type %q for data source %q: %w", eventConfiguration.EngineEventConfiguration.Type.String(), in.Id, err) - } + pubSubDS = append(pubSubDS, pubsub.DataSourceConfigurationWithMetadata{ + Configuration: in, + Metadata: l.dataSourceMetaData(in), + }) + default: + return nil, providers, fmt.Errorf("unknown data source type %q", in.Kind) + } - eventConfigurations = append(eventConfigurations, pubsub_datasource.EventConfiguration{ - Metadata: &pubsub_datasource.EventMetadata{ - ProviderID: eventConfiguration.EngineEventConfiguration.GetProviderId(), - Type: eventType, - TypeName: eventConfiguration.EngineEventConfiguration.GetTypeName(), - FieldName: eventConfiguration.EngineEventConfiguration.GetFieldName(), - }, - Configuration: &pubsub_datasource.KafkaEventConfiguration{ - Topics: eventConfiguration.GetTopics(), - }, - }) - } + if out != nil { + outConfig.DataSources = append(outConfig.DataSources, out) + } + } - factory, err := l.resolver.ResolvePubsubFactory() - if err != nil { - return nil, err - } + factoryProviders, factoryDataSources, err := pubsub.BuildProvidersAndDataSources( + l.ctx, + routerEngineConfig.Events, + l.logger, + pubSubDS, + l.resolver.InstanceData().HostName, + l.resolver.InstanceData().ListenAddress, + ) + if err != nil { + return nil, providers, err + } - out, err = plan.NewDataSourceConfiguration[pubsub_datasource.Configuration]( - in.Id, - factory, - l.dataSourceMetaData(in), - pubsub_datasource.Configuration{ - Events: eventConfigurations, - }, - ) - if err != nil { - return nil, fmt.Errorf("error creating data source configuration for data source %s: %w", in.Id, err) - } - default: - return nil, fmt.Errorf("unknown data source type %q", in.Kind) - } + if len(factoryProviders) > 0 { + providers = append(providers, factoryProviders...) + } - outConfig.DataSources = append(outConfig.DataSources, out) + if len(factoryDataSources) > 0 { + outConfig.DataSources = append(outConfig.DataSources, factoryDataSources...) } - return &outConfig, nil + + return &outConfig, providers, nil } func (l *Loader) subgraphName(subgraphs []*nodev1.Subgraph, dataSourceID string) string { diff --git a/router/core/graph_server.go b/router/core/graph_server.go index 4c2a01625f..447382c1e3 100644 --- a/router/core/graph_server.go +++ b/router/core/graph_server.go @@ -21,8 +21,6 @@ import ( "github.com/golang-jwt/jwt/v5" "github.com/klauspost/compress/gzhttp" "github.com/klauspost/compress/gzip" - "github.com/nats-io/nats.go" - "github.com/nats-io/nats.go/jetstream" "go.opentelemetry.io/otel/attribute" otelmetric "go.opentelemetry.io/otel/metric" oteltrace "go.opentelemetry.io/otel/trace" @@ -30,6 +28,7 @@ import ( "go.uber.org/zap" "go.uber.org/zap/zapcore" "golang.org/x/exp/maps" + "golang.org/x/sync/errgroup" "github.com/wundergraph/cosmo/router/gen/proto/wg/cosmo/common" nodev1 "github.com/wundergraph/cosmo/router/gen/proto/wg/cosmo/node/v1" @@ -46,13 +45,10 @@ import ( "github.com/wundergraph/cosmo/router/pkg/logging" rmetric "github.com/wundergraph/cosmo/router/pkg/metric" "github.com/wundergraph/cosmo/router/pkg/otel" - "github.com/wundergraph/cosmo/router/pkg/pubsub" - "github.com/wundergraph/cosmo/router/pkg/pubsub/kafka" - pubsubNats "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" + "github.com/wundergraph/cosmo/router/pkg/pubsub/datasource" "github.com/wundergraph/cosmo/router/pkg/routerplugin" "github.com/wundergraph/cosmo/router/pkg/statistics" rtrace "github.com/wundergraph/cosmo/router/pkg/trace" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" ) const ( @@ -67,11 +63,6 @@ type ( HealthChecks() health.Checker } - EnginePubSubProviders struct { - nats map[string]pubsub_datasource.NatsPubSub - kafka map[string]pubsub_datasource.KafkaPubSub - } - // graphServer is the swappable implementation of a Graph instance which is an HTTP mux with middlewares. // Everytime a schema is updated, the old graph server is shutdown and a new graph server is created. // For feature flags, a graphql server has multiple mux and is dynamically switched based on the feature flag header or cookie. @@ -80,7 +71,6 @@ type ( *Config context context.Context cancelFunc context.CancelFunc - pubSubProviders *EnginePubSubProviders storageProviders *config.StorageProviders engineStats statistics.EngineStatistics playgroundHandler func(http.Handler) http.Handler @@ -99,8 +89,8 @@ type ( otlpEngineMetrics *rmetric.EngineMetrics prometheusEngineMetrics *rmetric.EngineMetrics connectionMetrics *rmetric.ConnectionMetrics - hostName string - routerListenAddr string + instanceData InstanceData + pubSubProviders []datasource.Provider traceDialer *TraceDialer pluginHost *routerplugin.Host } @@ -147,11 +137,9 @@ func newGraphServer(ctx context.Context, r *Router, routerConfig *nodev1.RouterC baseRouterConfigVersion: routerConfig.GetVersion(), inFlightRequests: &atomic.Uint64{}, graphMuxList: make([]*graphMux, 0, 1), - routerListenAddr: r.listenAddr, - hostName: r.hostName, - pubSubProviders: &EnginePubSubProviders{ - nats: map[string]pubsub_datasource.NatsPubSub{}, - kafka: map[string]pubsub_datasource.KafkaPubSub{}, + instanceData: InstanceData{ + HostName: r.hostName, + ListenAddress: r.listenAddr, }, storageProviders: &r.storageProviders, } @@ -991,11 +979,6 @@ func (s *graphServer) buildGraphMux(ctx context.Context, SubgraphErrorPropagation: s.subgraphErrorPropagation, } - err = s.buildPubSubConfiguration(ctx, engineConfig, routerEngineConfig) - if err != nil { - return nil, fmt.Errorf("failed to build pubsub configuration: %w", err) - } - // map[string]*http.Transport cannot be coerced into map[string]http.RoundTripper, unfortunately subgraphTippers := map[string]http.RoundTripper{} for subgraph, subgraphTransport := range s.subgraphTransports { @@ -1052,24 +1035,29 @@ func (s *graphServer) buildGraphMux(ctx context.Context, }, } - executor, err := ecb.Build( + executor, providers, err := ecb.Build( ctx, &ExecutorBuildOptions{ EngineConfig: engineConfig, Subgraphs: configSubgraphs, RouterEngineConfig: routerEngineConfig, - PubSubProviders: s.pubSubProviders, Reporter: s.engineStats, ApolloCompatibilityFlags: s.apolloCompatibilityFlags, ApolloRouterCompatibilityFlags: s.apolloRouterCompatibilityFlags, HeartbeatInterval: s.multipartHeartbeatInterval, PluginsEnabled: s.plugins.Enabled, + InstanceData: s.instanceData, }, ) if err != nil { return nil, fmt.Errorf("failed to build plan configuration: %w", err) } + s.pubSubProviders = providers + if pubSubStartupErr := s.startupPubSubProviders(ctx); pubSubStartupErr != nil { + return nil, pubSubStartupErr + } + operationProcessor := NewOperationProcessor(OperationProcessorOptions{ Executor: executor, MaxOperationSizeInBytes: int64(s.routerTrafficConfig.MaxRequestBodyBytes), @@ -1383,86 +1371,6 @@ func (s *graphServer) setupPluginHost(ctx context.Context, config *nodev1.Engine return nil } -func (s *graphServer) buildPubSubConfiguration(ctx context.Context, engineConfig *nodev1.EngineConfiguration, routerEngineCfg *RouterEngineConfiguration) error { - datasourceConfigurations := engineConfig.GetDatasourceConfigurations() - for _, datasourceConfiguration := range datasourceConfigurations { - if datasourceConfiguration.CustomEvents == nil { - continue - } - - for _, eventConfiguration := range datasourceConfiguration.GetCustomEvents().GetNats() { - - providerID := eventConfiguration.EngineEventConfiguration.GetProviderId() - // if this source name's provider has already been initiated, do not try to initiate again - _, ok := s.pubSubProviders.nats[providerID] - if ok { - continue - } - - for _, eventSource := range routerEngineCfg.Events.Providers.Nats { - if eventSource.ID == eventConfiguration.EngineEventConfiguration.GetProviderId() { - options, err := buildNatsOptions(eventSource, s.logger) - if err != nil { - return fmt.Errorf("failed to build options for Nats provider with ID \"%s\": %w", providerID, err) - } - natsConnection, err := nats.Connect(eventSource.URL, options...) - if err != nil { - return fmt.Errorf("failed to create connection for Nats provider with ID \"%s\": %w", providerID, err) - } - js, err := jetstream.New(natsConnection) - if err != nil { - return err - } - - s.pubSubProviders.nats[providerID] = pubsubNats.NewConnector(s.logger, natsConnection, js, s.hostName, s.routerListenAddr).New(ctx) - - break - } - } - - _, ok = s.pubSubProviders.nats[providerID] - if !ok { - return fmt.Errorf("failed to find Nats provider with ID \"%s\". Ensure the provider definition is part of the config", providerID) - } - } - - for _, eventConfiguration := range datasourceConfiguration.GetCustomEvents().GetKafka() { - - providerID := eventConfiguration.EngineEventConfiguration.GetProviderId() - // if this source name's provider has already been initiated, do not try to initiate again - _, ok := s.pubSubProviders.kafka[providerID] - if ok { - continue - } - - for _, eventSource := range routerEngineCfg.Events.Providers.Kafka { - if eventSource.ID == providerID { - options, err := buildKafkaOptions(eventSource) - if err != nil { - return fmt.Errorf("failed to build options for Kafka provider with ID \"%s\": %w", providerID, err) - } - ps, err := kafka.NewConnector(s.logger, options) - if err != nil { - return fmt.Errorf("failed to create connection for Kafka provider with ID \"%s\": %w", providerID, err) - } - - s.pubSubProviders.kafka[providerID] = ps.New(ctx) - - break - } - } - - _, ok = s.pubSubProviders.kafka[providerID] - if !ok { - return fmt.Errorf("failed to find Kafka provider with ID \"%s\". Ensure the provider definition is part of the config", providerID) - } - } - - } - - return nil -} - // wait waits for all in-flight requests to finish. Similar to http.Server.Shutdown we wait in intervals + jitter // to make the shutdown process more efficient. func (s *graphServer) wait(ctx context.Context) error { @@ -1542,24 +1450,8 @@ func (s *graphServer) Shutdown(ctx context.Context) error { } } - if s.pubSubProviders != nil { - - s.logger.Debug("Shutting down pubsub providers") - - for _, pubSub := range s.pubSubProviders.nats { - if p, ok := pubSub.(pubsub.Lifecycle); ok { - if err := p.Shutdown(ctx); err != nil { - finalErr = errors.Join(finalErr, err) - } - } - } - for _, pubSub := range s.pubSubProviders.kafka { - if p, ok := pubSub.(pubsub.Lifecycle); ok { - if err := p.Shutdown(ctx); err != nil { - finalErr = errors.Join(finalErr, err) - } - } - } + if err := s.shutdownPubSubProviders(ctx); err != nil { + finalErr = errors.Join(finalErr, err) } // Shutdown all graphs muxes to release resources @@ -1588,6 +1480,56 @@ func (s *graphServer) Shutdown(ctx context.Context) error { return finalErr } +// startupPubSubProviders starts all pubsub providers +// It returns an error if any of the providers fail to start +// or if some providers takes to long to start +func (s *graphServer) startupPubSubProviders(ctx context.Context) error { + // Default timeout for pubsub provider startup + const defaultStartupTimeout = 5 * time.Second + + return s.providersActionWithTimeout(ctx, func(ctx context.Context, provider datasource.Provider) error { + return provider.Startup(ctx) + }, defaultStartupTimeout, "pubsub provider startup timed out") +} + +// shutdownPubSubProviders shuts down all pubsub providers +// It returns an error if any of the providers fail to shutdown +// or if some providers takes to long to shutdown +func (s *graphServer) shutdownPubSubProviders(ctx context.Context) error { + // Default timeout for pubsub provider shutdown + const defaultShutdownTimeout = 5 * time.Second + + return s.providersActionWithTimeout(ctx, func(ctx context.Context, provider datasource.Provider) error { + return provider.Shutdown(ctx) + }, defaultShutdownTimeout, "pubsub provider shutdown timed out") +} + +func (s *graphServer) providersActionWithTimeout(ctx context.Context, action func(ctx context.Context, provider datasource.Provider) error, timeout time.Duration, timeoutMessage string) error { + cancellableCtx, cancel := context.WithCancel(ctx) + defer cancel() + + timer := time.NewTimer(timeout) + defer timer.Stop() + + providersGroup := new(errgroup.Group) + for _, provider := range s.pubSubProviders { + providersGroup.Go(func() error { + actionDone := make(chan error, 1) + go func() { + actionDone <- action(cancellableCtx, provider) + }() + select { + case err := <-actionDone: + return err + case <-timer.C: + return errors.New(timeoutMessage) + } + }) + } + + return providersGroup.Wait() +} + func configureSubgraphOverwrites( engineConfig *nodev1.EngineConfiguration, configSubgraphs []*nodev1.Subgraph, diff --git a/router/core/plan_generator.go b/router/core/plan_generator.go index cb72f141b7..01e45ec915 100644 --- a/router/core/plan_generator.go +++ b/router/core/plan_generator.go @@ -9,6 +9,9 @@ import ( log "github.com/jensneuse/abstractlogger" nodev1 "github.com/wundergraph/cosmo/router/gen/proto/wg/cosmo/node/v1" + "github.com/wundergraph/cosmo/router/pkg/config" + "github.com/wundergraph/cosmo/router/pkg/pubsub/kafka" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" "github.com/wundergraph/graphql-go-tools/v2/pkg/astnormalization" "github.com/wundergraph/graphql-go-tools/v2/pkg/astparser" @@ -16,7 +19,6 @@ import ( "github.com/wundergraph/graphql-go-tools/v2/pkg/astvalidation" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/graphql_datasource" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/introspection_datasource" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/postprocess" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" @@ -234,8 +236,9 @@ func (pg *PlanGenerator) buildRouterConfig(configFilePath string) (*nodev1.Route } func (pg *PlanGenerator) loadConfiguration(routerConfig *nodev1.RouterConfig, logger *zap.Logger, maxDataSourceCollectorsConcurrency uint) error { - natSources := map[string]pubsub_datasource.NatsPubSub{} - kafkaSources := map[string]pubsub_datasource.KafkaPubSub{} + routerEngineConfig := RouterEngineConfiguration{} + natSources := map[string]*nats.ProviderAdapter{} + kafkaSources := map[string]*kafka.ProviderAdapter{} for _, ds := range routerConfig.GetEngineConfig().GetDatasourceConfigurations() { if ds.GetKind() != nodev1.DataSourceKind_PUBSUB || ds.GetCustomEvents() == nil { continue @@ -244,16 +247,21 @@ func (pg *PlanGenerator) loadConfiguration(routerConfig *nodev1.RouterConfig, lo providerId := natConfig.GetEngineEventConfiguration().GetProviderId() if _, ok := natSources[providerId]; !ok { natSources[providerId] = nil + routerEngineConfig.Events.Providers.Nats = append(routerEngineConfig.Events.Providers.Nats, config.NatsEventSource{ + ID: providerId, + }) } } for _, kafkaConfig := range ds.GetCustomEvents().GetKafka() { providerId := kafkaConfig.GetEngineEventConfiguration().GetProviderId() if _, ok := kafkaSources[providerId]; !ok { kafkaSources[providerId] = nil + routerEngineConfig.Events.Providers.Kafka = append(routerEngineConfig.Events.Providers.Kafka, config.KafkaEventSource{ + ID: providerId, + }) } } } - pubSubFactory := pubsub_datasource.NewFactory(context.Background(), natSources, kafkaSources) var netPollConfig graphql_datasource.NetPollConfiguration netPollConfig.ApplyDefaults() @@ -266,16 +274,18 @@ func (pg *PlanGenerator) loadConfiguration(routerConfig *nodev1.RouterConfig, lo graphql_datasource.WithNetPollConfiguration(netPollConfig), ) - loader := NewLoader(false, &DefaultFactoryResolver{ - engineCtx: context.Background(), + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + loader := NewLoader(ctx, false, &DefaultFactoryResolver{ + engineCtx: ctx, httpClient: http.DefaultClient, streamingClient: http.DefaultClient, subscriptionClient: subscriptionClient, - pubsub: pubSubFactory, - }) + }, logger) // this generates the plan configuration using the data source factories from the config package - planConfig, err := loader.Load(routerConfig.GetEngineConfig(), routerConfig.GetSubgraphs(), &RouterEngineConfiguration{}, false) // TODO: configure plugins + planConfig, _, err := loader.Load(routerConfig.GetEngineConfig(), routerConfig.GetSubgraphs(), &routerEngineConfig, false) // TODO: configure plugins if err != nil { return fmt.Errorf("failed to load configuration: %w", err) } diff --git a/router/go.mod b/router/go.mod index c49c462f94..58595de615 100644 --- a/router/go.mod +++ b/router/go.mod @@ -137,6 +137,7 @@ require ( github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/cast v1.7.1 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect diff --git a/router/go.sum b/router/go.sum index 3b0009745c..f0086ba66b 100644 --- a/router/go.sum +++ b/router/go.sum @@ -250,6 +250,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/router/pkg/config/config.go b/router/pkg/config/config.go index f88ad8241e..ddf514dcff 100644 --- a/router/pkg/config/config.go +++ b/router/pkg/config/config.go @@ -515,6 +515,10 @@ type NatsEventSource struct { Authentication *NatsAuthentication `yaml:"authentication,omitempty"` } +func (n NatsEventSource) GetID() string { + return n.ID +} + type KafkaSASLPlainAuthentication struct { Password *string `yaml:"password,omitempty"` Username *string `yaml:"username,omitempty"` @@ -535,6 +539,10 @@ type KafkaEventSource struct { TLS *KafkaTLSConfiguration `yaml:"tls,omitempty"` } +func (k KafkaEventSource) GetID() string { + return k.ID +} + type EventProviders struct { Nats []NatsEventSource `yaml:"nats,omitempty"` Kafka []KafkaEventSource `yaml:"kafka,omitempty"` diff --git a/router/pkg/plan_generator/plan_generator.go b/router/pkg/plan_generator/plan_generator.go index 6ac9861c79..8b14858431 100644 --- a/router/pkg/plan_generator/plan_generator.go +++ b/router/pkg/plan_generator/plan_generator.go @@ -116,7 +116,10 @@ func PlanGenerator(ctx context.Context, cfg QueryPlanConfig) error { defer wg.Done() planner, err := pg.GetPlanner() if err != nil { + // if we fail to get the planner, we need to cancel the context to stop the other goroutines + // and return here to stop the current goroutine cancelError(fmt.Errorf("failed to get planner: %v", err)) + return } for { select { diff --git a/router/pkg/plan_generator/plan_generator_test.go b/router/pkg/plan_generator/plan_generator_test.go index 7bae1f21ce..6de540c7a7 100644 --- a/router/pkg/plan_generator/plan_generator_test.go +++ b/router/pkg/plan_generator/plan_generator_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.uber.org/zap" ) func getTestDataDir() string { @@ -130,6 +131,7 @@ func TestPlanGenerator(t *testing.T) { ExecutionConfig: path.Join(getTestDataDir(), "execution_config", "base.json"), Timeout: "30s", OutputFiles: true, + Logger: zap.NewNop(), } err = PlanGenerator(context.Background(), cfg) diff --git a/router/pkg/pubsub/README.md b/router/pkg/pubsub/README.md new file mode 100644 index 0000000000..b3ea6ae408 --- /dev/null +++ b/router/pkg/pubsub/README.md @@ -0,0 +1,137 @@ + +# Adding a PubSub Provider + +This guide outlines the steps required to integrate a new PubSub provider into the router. + +## Modify the Router Proto + +Update the [`router.proto`](../../../proto/wg/cosmo/node/v1/node.proto) file by adding your provider’s configuration. Follow these steps: + +- Define a new configuration message similar to `KafkaEventConfiguration`. +- Add this configuration as a repeated field within the `DataSourceCustomEvents` message. +- Field naming should reflect the provider's message grouping mechanism. For example, use `channels` if the provider groups messages by channel, or `topics` if it uses topics. + +After making these changes, compile the updated proto definitions by running the following command from the root directory: + +```bash +make generate-go +``` + +This will generate the new proto files in the `gen` folder. + + +## Implement the PubSub Provider + +To implement a new PubSub provider, the following components must be created: +- `SubscriptionEventConfiguration` and `PublishEventConfiguration`: Define the data structures used for communication between the adapter and the engine. +- `ProviderAdapter`: Implements the logic that interfaces with the provider’s client or SDK. +- `SubscriptionDataSource` and `PublishDataSource`: Engine components that leverage the configurations to subscribe and publish data. +- `EngineDataSourceFactory`: Bridges the engine and the provider. +- `ProviderBuilder`: Used by the router to instantiate the provider. + +### `SubscriptionEventConfiguration` and `PublishEventConfiguration` + +These structures should be placed at the top of the `engine_datasource.go` file. Their design is specific to each provider. + +Refer to the [kafka implementation](./kafka/engine_datasource.go) for a working example. + +### `ProviderAdapter` + +This component encapsulates the provider-specific logic. Although not required, it’s best practice to implement the following interface to facilitate testing via mocks: + +```go +type Adapter interface { + Subscribe(ctx context.Context, event SubscriptionEventConfiguration, updater resolve.SubscriptionUpdater) error + Publish(ctx context.Context, event PublishEventConfiguration) error + Startup(ctx context.Context) error + Shutdown(ctx context.Context) error +} +``` + +Refer to the [kafka implementation](./kafka/adapter.go) for a working example. + +### `SubscriptionDataSource` and `PublishDataSource` + +These are the core engine interfaces: + +The engine expect two kind of structures: +- `SubscriptionDataSource`: Implements `resolve.SubscriptionDataSource` +- `PublishDataSource`: Implements `resolve.DataSource` + +The implementation of `SubscriptionDataSource` and `PublishDataSource` should be in the `engine_datasource.go` file. + +They are going to use the `SubscriptionEventConfiguration` and `PublishEventConfiguration` that you have implemented in the first step. + +Implement these in the `engine_datasource.go` file, referencing the [kafka implementation](./kafka/engine_datasource.go) for a working example. + +### `EngineDataSourceFactory` + +This structure connects the engine (resolve.DataSource and resolve.SubscriptionDataSource) with the provider implementation. It must implement the `EngineDataSourceFactory` interface defined in [datasource.go](./datasource/datasource.go). + +Refer to the [kafka implementation](./kafka/pubsub_datasource.go) for a working example. + +### `ProviderBuilder` + +The builder is responsible for instantiating the provider within the router. It must implement the [ProviderBuilder](./datasource/provider.go) interface. + +The interface has two generic types: +- `P`, the generic type of the options that the provider builder will need, as defined in the [config.go](../config/config.go) (NatsEventSource, KafkaEventSource, ...) +- `E`, the generic type of the event configuration that the provider builder will receive, as defined in the [proto/wg/cosmo/node/v1/node.proto](../../../proto/wg/cosmo/node/v1/node.proto) (KafkaEventConfiguration, NatsEventConfiguration, ...) + +Key methods: +- `BuildProvider`: Initializes the provider with its configuration and receive the provider options (defined by the `P` type) +- `BuildEngineDataSourceFactory`: Creates the data source and receive the event configuration (defined by the `E` type) + +Refer to the [kafka implementation](./kafka/provider_builder.go) for a working example. + +### Add tests + +You should also add tests to your provider. + +### Generate mocks +As a first step, you can use the [mockery](https://github.com/vektra/mockery) tool to generate the mocks for the ProviderAdapter interface you have implemented. To do this, add the following to the `.mockery.yml` file: + +```yaml +packages: + github.com/wundergraph/cosmo/router/pkg/pubsub/{your-provider-name}: + interfaces: + Adapter: +``` + +Then run the following command from the router directory: + +```bash +make generate-mocks +``` + +This will generate the mocks in the `{your-provider-name}/mocks.go` file. + +You can then use the mocks in your tests. + +### Tests + +You should add tests as specified in the table below. + +| Implementation File | Test File | Reference File | +|-------------------|-----------|-----------------| +| engine_datasource.go | engine_datasource_test.go | [kafka implementation](./kafka/engine_datasource_test.go) | +| engine_datasource_factory.go | engine_datasource_factory_test.go | [kafka implementation](./kafka/engine_datasource_factory_test.go) | +| provider_builder.go | provider_builder_test.go | [kafka implementation](./kafka/provider_builder_test.go) | +| pubsub.go | pubsub_test.go | TestBuildProvidersAndDataSources_Kafka_OK | + +## Add the provider to the router + +Update the `BuildProvidersAndDataSources` function in the [pubsub.go](./pubsub.go) file to include your new provider. + +## How to use the new PubSub Provider + +After you have implemented all the above, you can use your PubSub Provider by adding the following to your router config: + +```yaml +pubsub: + providers: + - name: provider-name + type: new-provider +``` + +But to use it in the GraphQL schema, you will have to work in the [composition](../../../composition) package. \ No newline at end of file diff --git a/router/pkg/pubsub/datasource/datasource.go b/router/pkg/pubsub/datasource/datasource.go new file mode 100644 index 0000000000..2f08b97074 --- /dev/null +++ b/router/pkg/pubsub/datasource/datasource.go @@ -0,0 +1,31 @@ +package datasource + +import ( + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// EngineDataSourceFactory is the interface that all pubsub data sources must implement. +// It serves three main purposes: +// 1. Resolving the data source and subscription data source +// 2. Generating the appropriate input for these data sources +// 3. Providing access to the engine event configuration +// +// For detailed implementation guidelines, see: +// https://github.com/wundergraph/cosmo/blob/main/router/pkg/pubsub/README.md +type EngineDataSourceFactory interface { + // GetFieldName get the field name where the data source is defined + GetFieldName() string + // ResolveDataSource returns the engine DataSource implementation that contains + // methods which will be called by the Planner when resolving a field + ResolveDataSource() (resolve.DataSource, error) + // ResolveDataSourceInput build the input that will be passed to the engine DataSource + ResolveDataSourceInput(event []byte) (string, error) + // ResolveDataSourceSubscription returns the engine SubscriptionDataSource implementation + // that contains methods to start a subscription, which will be called by the Planner + // when a subscription is initiated + ResolveDataSourceSubscription() (resolve.SubscriptionDataSource, error) + // ResolveDataSourceSubscriptionInput build the input that will be passed to the engine SubscriptionDataSource + ResolveDataSourceSubscriptionInput() (string, error) + // TransformEventData allows the data source to transform the event data using the extractFn + TransformEventData(extractFn ArgumentTemplateCallback) error +} diff --git a/router/pkg/pubsub/error.go b/router/pkg/pubsub/datasource/error.go similarity index 93% rename from router/pkg/pubsub/error.go rename to router/pkg/pubsub/datasource/error.go index f6220fb7b1..f09b271688 100644 --- a/router/pkg/pubsub/error.go +++ b/router/pkg/pubsub/datasource/error.go @@ -1,4 +1,4 @@ -package pubsub +package datasource type Error struct { Internal error diff --git a/router/pkg/pubsub/datasource/factory.go b/router/pkg/pubsub/datasource/factory.go new file mode 100644 index 0000000000..5221d22dc9 --- /dev/null +++ b/router/pkg/pubsub/datasource/factory.go @@ -0,0 +1,47 @@ +package datasource + +import ( + "context" + + "github.com/jensneuse/abstractlogger" + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" +) + +type PlannerConfig[PB ProviderBuilder[P, E], P any, E any] struct { + ProviderBuilder PB + Event E +} + +func NewPlannerConfig[PB ProviderBuilder[P, E], P any, E any](providerBuilder PB, event E) *PlannerConfig[PB, P, E] { + return &PlannerConfig[PB, P, E]{ + ProviderBuilder: providerBuilder, + Event: event, + } +} + +func NewPlannerFactory[PB ProviderBuilder[P, E], P any, E any](ctx context.Context, config *PlannerConfig[PB, P, E]) *PlannerFactory[PB, P, E] { + return &PlannerFactory[PB, P, E]{ + config: config, + executionContext: ctx, + } +} + +type PlannerFactory[PB ProviderBuilder[P, E], P any, E any] struct { + config *PlannerConfig[PB, P, E] + executionContext context.Context +} + +func (f *PlannerFactory[PB, P, E]) Planner(_ abstractlogger.Logger) plan.DataSourcePlanner[*PlannerConfig[PB, P, E]] { + return &Planner[PB, P, E]{ + config: f.config, + } +} + +func (f *PlannerFactory[PB, P, E]) Context() context.Context { + return f.executionContext +} + +func (f *PlannerFactory[PB, P, E]) UpstreamSchema(dataSourceConfig plan.DataSourceConfiguration[*PlannerConfig[PB, P, E]]) (*ast.Document, bool) { + return nil, false +} diff --git a/router/pkg/pubsub/datasource/mocks.go b/router/pkg/pubsub/datasource/mocks.go new file mode 100644 index 0000000000..067da9c86c --- /dev/null +++ b/router/pkg/pubsub/datasource/mocks.go @@ -0,0 +1,898 @@ +// Code generated by mockery; DO NOT EDIT. +// github.com/vektra/mockery +// template: testify + +package datasource + +import ( + "context" + + mock "github.com/stretchr/testify/mock" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// NewMockEngineDataSourceFactory creates a new instance of MockEngineDataSourceFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockEngineDataSourceFactory(t interface { + mock.TestingT + Cleanup(func()) +}) *MockEngineDataSourceFactory { + mock := &MockEngineDataSourceFactory{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// MockEngineDataSourceFactory is an autogenerated mock type for the EngineDataSourceFactory type +type MockEngineDataSourceFactory struct { + mock.Mock +} + +type MockEngineDataSourceFactory_Expecter struct { + mock *mock.Mock +} + +func (_m *MockEngineDataSourceFactory) EXPECT() *MockEngineDataSourceFactory_Expecter { + return &MockEngineDataSourceFactory_Expecter{mock: &_m.Mock} +} + +// GetFieldName provides a mock function for the type MockEngineDataSourceFactory +func (_mock *MockEngineDataSourceFactory) GetFieldName() string { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for GetFieldName") + } + + var r0 string + if returnFunc, ok := ret.Get(0).(func() string); ok { + r0 = returnFunc() + } else { + r0 = ret.Get(0).(string) + } + return r0 +} + +// MockEngineDataSourceFactory_GetFieldName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFieldName' +type MockEngineDataSourceFactory_GetFieldName_Call struct { + *mock.Call +} + +// GetFieldName is a helper method to define mock.On call +func (_e *MockEngineDataSourceFactory_Expecter) GetFieldName() *MockEngineDataSourceFactory_GetFieldName_Call { + return &MockEngineDataSourceFactory_GetFieldName_Call{Call: _e.mock.On("GetFieldName")} +} + +func (_c *MockEngineDataSourceFactory_GetFieldName_Call) Run(run func()) *MockEngineDataSourceFactory_GetFieldName_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockEngineDataSourceFactory_GetFieldName_Call) Return(s string) *MockEngineDataSourceFactory_GetFieldName_Call { + _c.Call.Return(s) + return _c +} + +func (_c *MockEngineDataSourceFactory_GetFieldName_Call) RunAndReturn(run func() string) *MockEngineDataSourceFactory_GetFieldName_Call { + _c.Call.Return(run) + return _c +} + +// ResolveDataSource provides a mock function for the type MockEngineDataSourceFactory +func (_mock *MockEngineDataSourceFactory) ResolveDataSource() (resolve.DataSource, error) { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for ResolveDataSource") + } + + var r0 resolve.DataSource + var r1 error + if returnFunc, ok := ret.Get(0).(func() (resolve.DataSource, error)); ok { + return returnFunc() + } + if returnFunc, ok := ret.Get(0).(func() resolve.DataSource); ok { + r0 = returnFunc() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(resolve.DataSource) + } + } + if returnFunc, ok := ret.Get(1).(func() error); ok { + r1 = returnFunc() + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// MockEngineDataSourceFactory_ResolveDataSource_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ResolveDataSource' +type MockEngineDataSourceFactory_ResolveDataSource_Call struct { + *mock.Call +} + +// ResolveDataSource is a helper method to define mock.On call +func (_e *MockEngineDataSourceFactory_Expecter) ResolveDataSource() *MockEngineDataSourceFactory_ResolveDataSource_Call { + return &MockEngineDataSourceFactory_ResolveDataSource_Call{Call: _e.mock.On("ResolveDataSource")} +} + +func (_c *MockEngineDataSourceFactory_ResolveDataSource_Call) Run(run func()) *MockEngineDataSourceFactory_ResolveDataSource_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockEngineDataSourceFactory_ResolveDataSource_Call) Return(dataSource resolve.DataSource, err error) *MockEngineDataSourceFactory_ResolveDataSource_Call { + _c.Call.Return(dataSource, err) + return _c +} + +func (_c *MockEngineDataSourceFactory_ResolveDataSource_Call) RunAndReturn(run func() (resolve.DataSource, error)) *MockEngineDataSourceFactory_ResolveDataSource_Call { + _c.Call.Return(run) + return _c +} + +// ResolveDataSourceInput provides a mock function for the type MockEngineDataSourceFactory +func (_mock *MockEngineDataSourceFactory) ResolveDataSourceInput(event []byte) (string, error) { + ret := _mock.Called(event) + + if len(ret) == 0 { + panic("no return value specified for ResolveDataSourceInput") + } + + var r0 string + var r1 error + if returnFunc, ok := ret.Get(0).(func([]byte) (string, error)); ok { + return returnFunc(event) + } + if returnFunc, ok := ret.Get(0).(func([]byte) string); ok { + r0 = returnFunc(event) + } else { + r0 = ret.Get(0).(string) + } + if returnFunc, ok := ret.Get(1).(func([]byte) error); ok { + r1 = returnFunc(event) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// MockEngineDataSourceFactory_ResolveDataSourceInput_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ResolveDataSourceInput' +type MockEngineDataSourceFactory_ResolveDataSourceInput_Call struct { + *mock.Call +} + +// ResolveDataSourceInput is a helper method to define mock.On call +// - event []byte +func (_e *MockEngineDataSourceFactory_Expecter) ResolveDataSourceInput(event interface{}) *MockEngineDataSourceFactory_ResolveDataSourceInput_Call { + return &MockEngineDataSourceFactory_ResolveDataSourceInput_Call{Call: _e.mock.On("ResolveDataSourceInput", event)} +} + +func (_c *MockEngineDataSourceFactory_ResolveDataSourceInput_Call) Run(run func(event []byte)) *MockEngineDataSourceFactory_ResolveDataSourceInput_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 []byte + if args[0] != nil { + arg0 = args[0].([]byte) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockEngineDataSourceFactory_ResolveDataSourceInput_Call) Return(s string, err error) *MockEngineDataSourceFactory_ResolveDataSourceInput_Call { + _c.Call.Return(s, err) + return _c +} + +func (_c *MockEngineDataSourceFactory_ResolveDataSourceInput_Call) RunAndReturn(run func(event []byte) (string, error)) *MockEngineDataSourceFactory_ResolveDataSourceInput_Call { + _c.Call.Return(run) + return _c +} + +// ResolveDataSourceSubscription provides a mock function for the type MockEngineDataSourceFactory +func (_mock *MockEngineDataSourceFactory) ResolveDataSourceSubscription() (resolve.SubscriptionDataSource, error) { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for ResolveDataSourceSubscription") + } + + var r0 resolve.SubscriptionDataSource + var r1 error + if returnFunc, ok := ret.Get(0).(func() (resolve.SubscriptionDataSource, error)); ok { + return returnFunc() + } + if returnFunc, ok := ret.Get(0).(func() resolve.SubscriptionDataSource); ok { + r0 = returnFunc() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(resolve.SubscriptionDataSource) + } + } + if returnFunc, ok := ret.Get(1).(func() error); ok { + r1 = returnFunc() + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// MockEngineDataSourceFactory_ResolveDataSourceSubscription_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ResolveDataSourceSubscription' +type MockEngineDataSourceFactory_ResolveDataSourceSubscription_Call struct { + *mock.Call +} + +// ResolveDataSourceSubscription is a helper method to define mock.On call +func (_e *MockEngineDataSourceFactory_Expecter) ResolveDataSourceSubscription() *MockEngineDataSourceFactory_ResolveDataSourceSubscription_Call { + return &MockEngineDataSourceFactory_ResolveDataSourceSubscription_Call{Call: _e.mock.On("ResolveDataSourceSubscription")} +} + +func (_c *MockEngineDataSourceFactory_ResolveDataSourceSubscription_Call) Run(run func()) *MockEngineDataSourceFactory_ResolveDataSourceSubscription_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockEngineDataSourceFactory_ResolveDataSourceSubscription_Call) Return(subscriptionDataSource resolve.SubscriptionDataSource, err error) *MockEngineDataSourceFactory_ResolveDataSourceSubscription_Call { + _c.Call.Return(subscriptionDataSource, err) + return _c +} + +func (_c *MockEngineDataSourceFactory_ResolveDataSourceSubscription_Call) RunAndReturn(run func() (resolve.SubscriptionDataSource, error)) *MockEngineDataSourceFactory_ResolveDataSourceSubscription_Call { + _c.Call.Return(run) + return _c +} + +// ResolveDataSourceSubscriptionInput provides a mock function for the type MockEngineDataSourceFactory +func (_mock *MockEngineDataSourceFactory) ResolveDataSourceSubscriptionInput() (string, error) { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for ResolveDataSourceSubscriptionInput") + } + + var r0 string + var r1 error + if returnFunc, ok := ret.Get(0).(func() (string, error)); ok { + return returnFunc() + } + if returnFunc, ok := ret.Get(0).(func() string); ok { + r0 = returnFunc() + } else { + r0 = ret.Get(0).(string) + } + if returnFunc, ok := ret.Get(1).(func() error); ok { + r1 = returnFunc() + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// MockEngineDataSourceFactory_ResolveDataSourceSubscriptionInput_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ResolveDataSourceSubscriptionInput' +type MockEngineDataSourceFactory_ResolveDataSourceSubscriptionInput_Call struct { + *mock.Call +} + +// ResolveDataSourceSubscriptionInput is a helper method to define mock.On call +func (_e *MockEngineDataSourceFactory_Expecter) ResolveDataSourceSubscriptionInput() *MockEngineDataSourceFactory_ResolveDataSourceSubscriptionInput_Call { + return &MockEngineDataSourceFactory_ResolveDataSourceSubscriptionInput_Call{Call: _e.mock.On("ResolveDataSourceSubscriptionInput")} +} + +func (_c *MockEngineDataSourceFactory_ResolveDataSourceSubscriptionInput_Call) Run(run func()) *MockEngineDataSourceFactory_ResolveDataSourceSubscriptionInput_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockEngineDataSourceFactory_ResolveDataSourceSubscriptionInput_Call) Return(s string, err error) *MockEngineDataSourceFactory_ResolveDataSourceSubscriptionInput_Call { + _c.Call.Return(s, err) + return _c +} + +func (_c *MockEngineDataSourceFactory_ResolveDataSourceSubscriptionInput_Call) RunAndReturn(run func() (string, error)) *MockEngineDataSourceFactory_ResolveDataSourceSubscriptionInput_Call { + _c.Call.Return(run) + return _c +} + +// TransformEventData provides a mock function for the type MockEngineDataSourceFactory +func (_mock *MockEngineDataSourceFactory) TransformEventData(extractFn ArgumentTemplateCallback) error { + ret := _mock.Called(extractFn) + + if len(ret) == 0 { + panic("no return value specified for TransformEventData") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(ArgumentTemplateCallback) error); ok { + r0 = returnFunc(extractFn) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockEngineDataSourceFactory_TransformEventData_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TransformEventData' +type MockEngineDataSourceFactory_TransformEventData_Call struct { + *mock.Call +} + +// TransformEventData is a helper method to define mock.On call +// - extractFn ArgumentTemplateCallback +func (_e *MockEngineDataSourceFactory_Expecter) TransformEventData(extractFn interface{}) *MockEngineDataSourceFactory_TransformEventData_Call { + return &MockEngineDataSourceFactory_TransformEventData_Call{Call: _e.mock.On("TransformEventData", extractFn)} +} + +func (_c *MockEngineDataSourceFactory_TransformEventData_Call) Run(run func(extractFn ArgumentTemplateCallback)) *MockEngineDataSourceFactory_TransformEventData_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 ArgumentTemplateCallback + if args[0] != nil { + arg0 = args[0].(ArgumentTemplateCallback) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockEngineDataSourceFactory_TransformEventData_Call) Return(err error) *MockEngineDataSourceFactory_TransformEventData_Call { + _c.Call.Return(err) + return _c +} + +func (_c *MockEngineDataSourceFactory_TransformEventData_Call) RunAndReturn(run func(extractFn ArgumentTemplateCallback) error) *MockEngineDataSourceFactory_TransformEventData_Call { + _c.Call.Return(run) + return _c +} + +// NewMockProviderLifecycle creates a new instance of MockProviderLifecycle. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockProviderLifecycle(t interface { + mock.TestingT + Cleanup(func()) +}) *MockProviderLifecycle { + mock := &MockProviderLifecycle{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// MockProviderLifecycle is an autogenerated mock type for the ProviderLifecycle type +type MockProviderLifecycle struct { + mock.Mock +} + +type MockProviderLifecycle_Expecter struct { + mock *mock.Mock +} + +func (_m *MockProviderLifecycle) EXPECT() *MockProviderLifecycle_Expecter { + return &MockProviderLifecycle_Expecter{mock: &_m.Mock} +} + +// Shutdown provides a mock function for the type MockProviderLifecycle +func (_mock *MockProviderLifecycle) Shutdown(ctx context.Context) error { + ret := _mock.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Shutdown") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = returnFunc(ctx) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockProviderLifecycle_Shutdown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Shutdown' +type MockProviderLifecycle_Shutdown_Call struct { + *mock.Call +} + +// Shutdown is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockProviderLifecycle_Expecter) Shutdown(ctx interface{}) *MockProviderLifecycle_Shutdown_Call { + return &MockProviderLifecycle_Shutdown_Call{Call: _e.mock.On("Shutdown", ctx)} +} + +func (_c *MockProviderLifecycle_Shutdown_Call) Run(run func(ctx context.Context)) *MockProviderLifecycle_Shutdown_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockProviderLifecycle_Shutdown_Call) Return(err error) *MockProviderLifecycle_Shutdown_Call { + _c.Call.Return(err) + return _c +} + +func (_c *MockProviderLifecycle_Shutdown_Call) RunAndReturn(run func(ctx context.Context) error) *MockProviderLifecycle_Shutdown_Call { + _c.Call.Return(run) + return _c +} + +// Startup provides a mock function for the type MockProviderLifecycle +func (_mock *MockProviderLifecycle) Startup(ctx context.Context) error { + ret := _mock.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Startup") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = returnFunc(ctx) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockProviderLifecycle_Startup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Startup' +type MockProviderLifecycle_Startup_Call struct { + *mock.Call +} + +// Startup is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockProviderLifecycle_Expecter) Startup(ctx interface{}) *MockProviderLifecycle_Startup_Call { + return &MockProviderLifecycle_Startup_Call{Call: _e.mock.On("Startup", ctx)} +} + +func (_c *MockProviderLifecycle_Startup_Call) Run(run func(ctx context.Context)) *MockProviderLifecycle_Startup_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockProviderLifecycle_Startup_Call) Return(err error) *MockProviderLifecycle_Startup_Call { + _c.Call.Return(err) + return _c +} + +func (_c *MockProviderLifecycle_Startup_Call) RunAndReturn(run func(ctx context.Context) error) *MockProviderLifecycle_Startup_Call { + _c.Call.Return(run) + return _c +} + +// NewMockProvider creates a new instance of MockProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *MockProvider { + mock := &MockProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// MockProvider is an autogenerated mock type for the Provider type +type MockProvider struct { + mock.Mock +} + +type MockProvider_Expecter struct { + mock *mock.Mock +} + +func (_m *MockProvider) EXPECT() *MockProvider_Expecter { + return &MockProvider_Expecter{mock: &_m.Mock} +} + +// ID provides a mock function for the type MockProvider +func (_mock *MockProvider) ID() string { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for ID") + } + + var r0 string + if returnFunc, ok := ret.Get(0).(func() string); ok { + r0 = returnFunc() + } else { + r0 = ret.Get(0).(string) + } + return r0 +} + +// MockProvider_ID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ID' +type MockProvider_ID_Call struct { + *mock.Call +} + +// ID is a helper method to define mock.On call +func (_e *MockProvider_Expecter) ID() *MockProvider_ID_Call { + return &MockProvider_ID_Call{Call: _e.mock.On("ID")} +} + +func (_c *MockProvider_ID_Call) Run(run func()) *MockProvider_ID_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockProvider_ID_Call) Return(s string) *MockProvider_ID_Call { + _c.Call.Return(s) + return _c +} + +func (_c *MockProvider_ID_Call) RunAndReturn(run func() string) *MockProvider_ID_Call { + _c.Call.Return(run) + return _c +} + +// Shutdown provides a mock function for the type MockProvider +func (_mock *MockProvider) Shutdown(ctx context.Context) error { + ret := _mock.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Shutdown") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = returnFunc(ctx) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockProvider_Shutdown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Shutdown' +type MockProvider_Shutdown_Call struct { + *mock.Call +} + +// Shutdown is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockProvider_Expecter) Shutdown(ctx interface{}) *MockProvider_Shutdown_Call { + return &MockProvider_Shutdown_Call{Call: _e.mock.On("Shutdown", ctx)} +} + +func (_c *MockProvider_Shutdown_Call) Run(run func(ctx context.Context)) *MockProvider_Shutdown_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockProvider_Shutdown_Call) Return(err error) *MockProvider_Shutdown_Call { + _c.Call.Return(err) + return _c +} + +func (_c *MockProvider_Shutdown_Call) RunAndReturn(run func(ctx context.Context) error) *MockProvider_Shutdown_Call { + _c.Call.Return(run) + return _c +} + +// Startup provides a mock function for the type MockProvider +func (_mock *MockProvider) Startup(ctx context.Context) error { + ret := _mock.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Startup") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = returnFunc(ctx) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockProvider_Startup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Startup' +type MockProvider_Startup_Call struct { + *mock.Call +} + +// Startup is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockProvider_Expecter) Startup(ctx interface{}) *MockProvider_Startup_Call { + return &MockProvider_Startup_Call{Call: _e.mock.On("Startup", ctx)} +} + +func (_c *MockProvider_Startup_Call) Run(run func(ctx context.Context)) *MockProvider_Startup_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockProvider_Startup_Call) Return(err error) *MockProvider_Startup_Call { + _c.Call.Return(err) + return _c +} + +func (_c *MockProvider_Startup_Call) RunAndReturn(run func(ctx context.Context) error) *MockProvider_Startup_Call { + _c.Call.Return(run) + return _c +} + +// TypeID provides a mock function for the type MockProvider +func (_mock *MockProvider) TypeID() string { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for TypeID") + } + + var r0 string + if returnFunc, ok := ret.Get(0).(func() string); ok { + r0 = returnFunc() + } else { + r0 = ret.Get(0).(string) + } + return r0 +} + +// MockProvider_TypeID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TypeID' +type MockProvider_TypeID_Call struct { + *mock.Call +} + +// TypeID is a helper method to define mock.On call +func (_e *MockProvider_Expecter) TypeID() *MockProvider_TypeID_Call { + return &MockProvider_TypeID_Call{Call: _e.mock.On("TypeID")} +} + +func (_c *MockProvider_TypeID_Call) Run(run func()) *MockProvider_TypeID_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockProvider_TypeID_Call) Return(s string) *MockProvider_TypeID_Call { + _c.Call.Return(s) + return _c +} + +func (_c *MockProvider_TypeID_Call) RunAndReturn(run func() string) *MockProvider_TypeID_Call { + _c.Call.Return(run) + return _c +} + +// NewMockProviderBuilder creates a new instance of MockProviderBuilder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockProviderBuilder[P any, E any](t interface { + mock.TestingT + Cleanup(func()) +}) *MockProviderBuilder[P, E] { + mock := &MockProviderBuilder[P, E]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// MockProviderBuilder is an autogenerated mock type for the ProviderBuilder type +type MockProviderBuilder[P any, E any] struct { + mock.Mock +} + +type MockProviderBuilder_Expecter[P any, E any] struct { + mock *mock.Mock +} + +func (_m *MockProviderBuilder[P, E]) EXPECT() *MockProviderBuilder_Expecter[P, E] { + return &MockProviderBuilder_Expecter[P, E]{mock: &_m.Mock} +} + +// BuildEngineDataSourceFactory provides a mock function for the type MockProviderBuilder +func (_mock *MockProviderBuilder[P, E]) BuildEngineDataSourceFactory(data E) (EngineDataSourceFactory, error) { + ret := _mock.Called(data) + + if len(ret) == 0 { + panic("no return value specified for BuildEngineDataSourceFactory") + } + + var r0 EngineDataSourceFactory + var r1 error + if returnFunc, ok := ret.Get(0).(func(E) (EngineDataSourceFactory, error)); ok { + return returnFunc(data) + } + if returnFunc, ok := ret.Get(0).(func(E) EngineDataSourceFactory); ok { + r0 = returnFunc(data) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(EngineDataSourceFactory) + } + } + if returnFunc, ok := ret.Get(1).(func(E) error); ok { + r1 = returnFunc(data) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// MockProviderBuilder_BuildEngineDataSourceFactory_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BuildEngineDataSourceFactory' +type MockProviderBuilder_BuildEngineDataSourceFactory_Call[P any, E any] struct { + *mock.Call +} + +// BuildEngineDataSourceFactory is a helper method to define mock.On call +// - data E +func (_e *MockProviderBuilder_Expecter[P, E]) BuildEngineDataSourceFactory(data interface{}) *MockProviderBuilder_BuildEngineDataSourceFactory_Call[P, E] { + return &MockProviderBuilder_BuildEngineDataSourceFactory_Call[P, E]{Call: _e.mock.On("BuildEngineDataSourceFactory", data)} +} + +func (_c *MockProviderBuilder_BuildEngineDataSourceFactory_Call[P, E]) Run(run func(data E)) *MockProviderBuilder_BuildEngineDataSourceFactory_Call[P, E] { + _c.Call.Run(func(args mock.Arguments) { + var arg0 E + if args[0] != nil { + arg0 = args[0].(E) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockProviderBuilder_BuildEngineDataSourceFactory_Call[P, E]) Return(engineDataSourceFactory EngineDataSourceFactory, err error) *MockProviderBuilder_BuildEngineDataSourceFactory_Call[P, E] { + _c.Call.Return(engineDataSourceFactory, err) + return _c +} + +func (_c *MockProviderBuilder_BuildEngineDataSourceFactory_Call[P, E]) RunAndReturn(run func(data E) (EngineDataSourceFactory, error)) *MockProviderBuilder_BuildEngineDataSourceFactory_Call[P, E] { + _c.Call.Return(run) + return _c +} + +// BuildProvider provides a mock function for the type MockProviderBuilder +func (_mock *MockProviderBuilder[P, E]) BuildProvider(options P) (Provider, error) { + ret := _mock.Called(options) + + if len(ret) == 0 { + panic("no return value specified for BuildProvider") + } + + var r0 Provider + var r1 error + if returnFunc, ok := ret.Get(0).(func(P) (Provider, error)); ok { + return returnFunc(options) + } + if returnFunc, ok := ret.Get(0).(func(P) Provider); ok { + r0 = returnFunc(options) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(Provider) + } + } + if returnFunc, ok := ret.Get(1).(func(P) error); ok { + r1 = returnFunc(options) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// MockProviderBuilder_BuildProvider_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BuildProvider' +type MockProviderBuilder_BuildProvider_Call[P any, E any] struct { + *mock.Call +} + +// BuildProvider is a helper method to define mock.On call +// - options P +func (_e *MockProviderBuilder_Expecter[P, E]) BuildProvider(options interface{}) *MockProviderBuilder_BuildProvider_Call[P, E] { + return &MockProviderBuilder_BuildProvider_Call[P, E]{Call: _e.mock.On("BuildProvider", options)} +} + +func (_c *MockProviderBuilder_BuildProvider_Call[P, E]) Run(run func(options P)) *MockProviderBuilder_BuildProvider_Call[P, E] { + _c.Call.Run(func(args mock.Arguments) { + var arg0 P + if args[0] != nil { + arg0 = args[0].(P) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockProviderBuilder_BuildProvider_Call[P, E]) Return(provider Provider, err error) *MockProviderBuilder_BuildProvider_Call[P, E] { + _c.Call.Return(provider, err) + return _c +} + +func (_c *MockProviderBuilder_BuildProvider_Call[P, E]) RunAndReturn(run func(options P) (Provider, error)) *MockProviderBuilder_BuildProvider_Call[P, E] { + _c.Call.Return(run) + return _c +} + +// TypeID provides a mock function for the type MockProviderBuilder +func (_mock *MockProviderBuilder[P, E]) TypeID() string { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for TypeID") + } + + var r0 string + if returnFunc, ok := ret.Get(0).(func() string); ok { + r0 = returnFunc() + } else { + r0 = ret.Get(0).(string) + } + return r0 +} + +// MockProviderBuilder_TypeID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TypeID' +type MockProviderBuilder_TypeID_Call[P any, E any] struct { + *mock.Call +} + +// TypeID is a helper method to define mock.On call +func (_e *MockProviderBuilder_Expecter[P, E]) TypeID() *MockProviderBuilder_TypeID_Call[P, E] { + return &MockProviderBuilder_TypeID_Call[P, E]{Call: _e.mock.On("TypeID")} +} + +func (_c *MockProviderBuilder_TypeID_Call[P, E]) Run(run func()) *MockProviderBuilder_TypeID_Call[P, E] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockProviderBuilder_TypeID_Call[P, E]) Return(s string) *MockProviderBuilder_TypeID_Call[P, E] { + _c.Call.Return(s) + return _c +} + +func (_c *MockProviderBuilder_TypeID_Call[P, E]) RunAndReturn(run func() string) *MockProviderBuilder_TypeID_Call[P, E] { + _c.Call.Return(run) + return _c +} diff --git a/router/pkg/pubsub/datasource/mocks_resolve.go b/router/pkg/pubsub/datasource/mocks_resolve.go new file mode 100644 index 0000000000..1aa4a11643 --- /dev/null +++ b/router/pkg/pubsub/datasource/mocks_resolve.go @@ -0,0 +1,142 @@ +// Code generated by mockery; DO NOT EDIT. +// github.com/vektra/mockery +// template: testify + +package datasource + +import ( + mock "github.com/stretchr/testify/mock" +) + +// NewMockSubscriptionUpdater creates a new instance of MockSubscriptionUpdater. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockSubscriptionUpdater(t interface { + mock.TestingT + Cleanup(func()) +}) *MockSubscriptionUpdater { + mock := &MockSubscriptionUpdater{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// MockSubscriptionUpdater is an autogenerated mock type for the SubscriptionUpdater type +type MockSubscriptionUpdater struct { + mock.Mock +} + +type MockSubscriptionUpdater_Expecter struct { + mock *mock.Mock +} + +func (_m *MockSubscriptionUpdater) EXPECT() *MockSubscriptionUpdater_Expecter { + return &MockSubscriptionUpdater_Expecter{mock: &_m.Mock} +} + +// Close provides a mock function for the type MockSubscriptionUpdater +func (_mock *MockSubscriptionUpdater) Close() { + _mock.Called() + return +} + +// MockSubscriptionUpdater_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type MockSubscriptionUpdater_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *MockSubscriptionUpdater_Expecter) Close() *MockSubscriptionUpdater_Close_Call { + return &MockSubscriptionUpdater_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *MockSubscriptionUpdater_Close_Call) Run(run func()) *MockSubscriptionUpdater_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockSubscriptionUpdater_Close_Call) Return() *MockSubscriptionUpdater_Close_Call { + _c.Call.Return() + return _c +} + +func (_c *MockSubscriptionUpdater_Close_Call) RunAndReturn(run func()) *MockSubscriptionUpdater_Close_Call { + _c.Run(run) + return _c +} + +// Done provides a mock function for the type MockSubscriptionUpdater +func (_mock *MockSubscriptionUpdater) Done() { + _mock.Called() + return +} + +// MockSubscriptionUpdater_Done_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Done' +type MockSubscriptionUpdater_Done_Call struct { + *mock.Call +} + +// Done is a helper method to define mock.On call +func (_e *MockSubscriptionUpdater_Expecter) Done() *MockSubscriptionUpdater_Done_Call { + return &MockSubscriptionUpdater_Done_Call{Call: _e.mock.On("Done")} +} + +func (_c *MockSubscriptionUpdater_Done_Call) Run(run func()) *MockSubscriptionUpdater_Done_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockSubscriptionUpdater_Done_Call) Return() *MockSubscriptionUpdater_Done_Call { + _c.Call.Return() + return _c +} + +func (_c *MockSubscriptionUpdater_Done_Call) RunAndReturn(run func()) *MockSubscriptionUpdater_Done_Call { + _c.Run(run) + return _c +} + +// Update provides a mock function for the type MockSubscriptionUpdater +func (_mock *MockSubscriptionUpdater) Update(data []byte) { + _mock.Called(data) + return +} + +// MockSubscriptionUpdater_Update_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Update' +type MockSubscriptionUpdater_Update_Call struct { + *mock.Call +} + +// Update is a helper method to define mock.On call +// - data []byte +func (_e *MockSubscriptionUpdater_Expecter) Update(data interface{}) *MockSubscriptionUpdater_Update_Call { + return &MockSubscriptionUpdater_Update_Call{Call: _e.mock.On("Update", data)} +} + +func (_c *MockSubscriptionUpdater_Update_Call) Run(run func(data []byte)) *MockSubscriptionUpdater_Update_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 []byte + if args[0] != nil { + arg0 = args[0].([]byte) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockSubscriptionUpdater_Update_Call) Return() *MockSubscriptionUpdater_Update_Call { + _c.Call.Return() + return _c +} + +func (_c *MockSubscriptionUpdater_Update_Call) RunAndReturn(run func(data []byte)) *MockSubscriptionUpdater_Update_Call { + _c.Run(run) + return _c +} diff --git a/router/pkg/pubsub/datasource/planner.go b/router/pkg/pubsub/datasource/planner.go new file mode 100644 index 0000000000..cde11b6d42 --- /dev/null +++ b/router/pkg/pubsub/datasource/planner.go @@ -0,0 +1,208 @@ +package datasource + +import ( + "fmt" + "strings" + + "github.com/wundergraph/cosmo/router/pkg/pubsub/eventdata" + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/argument_templates" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +type Planner[PB ProviderBuilder[P, E], P any, E any] struct { + id int + config *PlannerConfig[PB, P, E] + rootFieldRef int + variables resolve.Variables + visitor *plan.Visitor + extractFn func(tpl string) (string, error) +} + +func (p *Planner[PB, P, E]) SetID(id int) { + p.id = id +} + +func (p *Planner[PB, P, E]) ID() (id int) { + return p.id +} + +func (p *Planner[PB, P, E]) DownstreamResponseFieldAlias(downstreamFieldRef int) (alias string, exists bool) { + // skip, not required + return +} + +func (p *Planner[PB, P, E]) DataSourcePlanningBehavior() plan.DataSourcePlanningBehavior { + return plan.DataSourcePlanningBehavior{ + MergeAliasedRootNodes: false, + OverrideFieldPathFromAlias: false, + } +} + +func (p *Planner[PB, P, E]) Register(visitor *plan.Visitor, configuration plan.DataSourceConfiguration[*PlannerConfig[PB, P, E]], _ plan.DataSourcePlannerConfiguration) error { + p.visitor = visitor + visitor.Walker.RegisterEnterFieldVisitor(p) + visitor.Walker.RegisterEnterDocumentVisitor(p) + p.config = configuration.CustomConfiguration() + + return nil +} + +func (p *Planner[PB, P, E]) ConfigureFetch() resolve.FetchConfiguration { + if p.config == nil { + p.visitor.Walker.StopWithInternalErr(fmt.Errorf("data source not set")) + return resolve.FetchConfiguration{} + } + + pubSubDataSource, err := p.config.ProviderBuilder.BuildEngineDataSourceFactory(p.config.Event) + if err != nil { + p.visitor.Walker.StopWithInternalErr(fmt.Errorf("failed to build data source: %w", err)) + return resolve.FetchConfiguration{} + } + + err = pubSubDataSource.TransformEventData(p.extractFn) + if err != nil { + p.visitor.Walker.StopWithInternalErr(err) + } + + dataSource, err := pubSubDataSource.ResolveDataSource() + if err != nil { + p.visitor.Walker.StopWithInternalErr(fmt.Errorf("failed to get data source: %w", err)) + return resolve.FetchConfiguration{} + } + + event, err := eventdata.BuildEventDataBytes(p.rootFieldRef, p.visitor.Operation, &p.variables) + if err != nil { + p.visitor.Walker.StopWithInternalErr(fmt.Errorf("failed to get resolve data source input: %w", err)) + return resolve.FetchConfiguration{} + } + + input, err := pubSubDataSource.ResolveDataSourceInput(event) + if err != nil { + p.visitor.Walker.StopWithInternalErr(fmt.Errorf("failed to get resolve data source input: %w", err)) + return resolve.FetchConfiguration{} + } + + return resolve.FetchConfiguration{ + Input: input, + Variables: p.variables, + DataSource: dataSource, + PostProcessing: resolve.PostProcessingConfiguration{ + MergePath: []string{pubSubDataSource.GetFieldName()}, + }, + } +} + +func (p *Planner[PB, P, E]) ConfigureSubscription() plan.SubscriptionConfiguration { + if p.config == nil { + p.visitor.Walker.StopWithInternalErr(fmt.Errorf("data source not set")) + return plan.SubscriptionConfiguration{} + } + + pubSubDataSource, err := p.config.ProviderBuilder.BuildEngineDataSourceFactory(p.config.Event) + if err != nil { + p.visitor.Walker.StopWithInternalErr(fmt.Errorf("failed to get resolve data source subscription: %w", err)) + return plan.SubscriptionConfiguration{} + } + + err = pubSubDataSource.TransformEventData(p.extractFn) + if err != nil { + p.visitor.Walker.StopWithInternalErr(err) + } + + dataSource, err := pubSubDataSource.ResolveDataSourceSubscription() + if err != nil { + p.visitor.Walker.StopWithInternalErr(fmt.Errorf("failed to get resolve data source subscription: %w", err)) + return plan.SubscriptionConfiguration{} + } + + input, err := pubSubDataSource.ResolveDataSourceSubscriptionInput() + if err != nil { + p.visitor.Walker.StopWithInternalErr(fmt.Errorf("failed to get resolve data source subscription input: %w", err)) + return plan.SubscriptionConfiguration{} + } + + return plan.SubscriptionConfiguration{ + Input: input, + Variables: p.variables, + DataSource: dataSource, + PostProcessing: resolve.PostProcessingConfiguration{ + MergePath: []string{pubSubDataSource.GetFieldName()}, + }, + } +} + +func (p *Planner[PB, P, E]) addContextVariableByArgumentRef(argumentRef int, operationTypeRef int, argumentPath []string) (string, error) { + variablePath, err := p.visitor.Operation.VariablePathByArgumentRefAndArgumentPath(argumentRef, argumentPath, operationTypeRef) + if err != nil { + return "", err + } + /* The definition is passed as both definition and operation below because getJSONRootType resolves the type + * from the first argument, but finalInputValueTypeRef comes from the definition + */ + contextVariable := &resolve.ContextVariable{ + Path: variablePath, + Renderer: resolve.NewPlainVariableRenderer(), + } + variablePlaceHolder, _ := p.variables.AddVariable(contextVariable) + return variablePlaceHolder, nil +} + +func (p *Planner[PB, P, E]) extractArgumentTemplate(fieldRef int, operationDefinitionRef int, typeDefinitionRef int, template string) (string, error) { + matches := argument_templates.ArgumentTemplateRegex.FindAllStringSubmatch(template, -1) + // If no argument templates are defined, there are only static values + if len(matches) < 1 { + return template, nil + } + fieldNameBytes := p.visitor.Operation.FieldNameBytes(fieldRef) + // TODO: handling for interfaces and unions + fieldDefinitionRef, ok := p.visitor.Definition.ObjectTypeDefinitionFieldWithName(typeDefinitionRef, fieldNameBytes) + if !ok { + return "", fmt.Errorf(`expected field definition to exist for field "%s"`, fieldNameBytes) + } + templateWithVariableTemplateReplacements := template + for templateNumber, groups := range matches { + // The first group is the whole template; the second is the period-delimited argument path + if len(groups) != 2 { + return "", fmt.Errorf(`argument template #%d defined on field "%s" is invalid: expected 2 matching groups but received %d`, templateNumber+1, fieldNameBytes, len(groups)-1) + } + validationResult, err := argument_templates.ValidateArgumentPath(p.visitor.Definition, groups[1], fieldDefinitionRef) + if err != nil { + return "", fmt.Errorf(`argument template #%d defined on field "%s" is invalid: %w`, templateNumber+1, fieldNameBytes, err) + } + argumentNameBytes := []byte(validationResult.ArgumentPath[0]) + argumentRef, ok := p.visitor.Operation.FieldArgument(fieldRef, argumentNameBytes) + if !ok { + return "", fmt.Errorf(`operation field "%s" does not define argument "%s"`, fieldNameBytes, argumentNameBytes) + } + // variablePlaceholder has the form $$0$$, $$1$$, etc. + variablePlaceholder, err := p.addContextVariableByArgumentRef(argumentRef, operationDefinitionRef, validationResult.ArgumentPath) + if err != nil { + return "", fmt.Errorf(`failed to retrieve variable placeholder for argument ""%s" defined on operation field "%s": %w`, argumentNameBytes, fieldNameBytes, err) + } + // Replace the template literal with the variable placeholder (and reuse the variable if it already exists) + templateWithVariableTemplateReplacements = strings.ReplaceAll(templateWithVariableTemplateReplacements, groups[0], variablePlaceholder) + } + + return templateWithVariableTemplateReplacements, nil +} + +func (p *Planner[PB, P, E]) EnterDocument(_, _ *ast.Document) { + p.rootFieldRef = -1 +} + +func (p *Planner[PB, P, E]) EnterField(ref int) { + if p.rootFieldRef != -1 { + // This is a nested field; nothing needs to be done + return + } + p.rootFieldRef = ref + + operationDefinitionRef := p.visitor.Walker.Ancestors[0].Ref + typeDefinitionRef := p.visitor.Walker.EnclosingTypeDefinition.Ref + + p.extractFn = func(tpl string) (string, error) { + return p.extractArgumentTemplate(ref, operationDefinitionRef, typeDefinitionRef, tpl) + } +} diff --git a/router/pkg/pubsub/datasource/provider.go b/router/pkg/pubsub/datasource/provider.go new file mode 100644 index 0000000000..f90446a712 --- /dev/null +++ b/router/pkg/pubsub/datasource/provider.go @@ -0,0 +1,33 @@ +package datasource + +import ( + "context" +) + +type ArgumentTemplateCallback func(tpl string) (string, error) + +type ProviderLifecycle interface { + // Startup is the method called when the provider is started + Startup(ctx context.Context) error + // Shutdown is the method called when the provider is shut down + Shutdown(ctx context.Context) error +} + +// Provider is the interface that the PubSub provider must implement +type Provider interface { + ProviderLifecycle + // ID Get the provider ID as specified in the configuration + ID() string + // TypeID Get the provider type id (e.g. "kafka", "nats") + TypeID() string +} + +// ProviderBuilder is the interface that the provider builder must implement. +type ProviderBuilder[P, E any] interface { + // TypeID Get the provider type id (e.g. "kafka", "nats") + TypeID() string + // BuildProvider Build the provider and the adapter + BuildProvider(options P) (Provider, error) + // BuildEngineDataSourceFactory Build the data source for the given provider and event configuration + BuildEngineDataSourceFactory(data E) (EngineDataSourceFactory, error) +} diff --git a/router/pkg/pubsub/datasource/pubsubprovider.go b/router/pkg/pubsub/datasource/pubsubprovider.go new file mode 100644 index 0000000000..9e1223d950 --- /dev/null +++ b/router/pkg/pubsub/datasource/pubsubprovider.go @@ -0,0 +1,45 @@ +package datasource + +import ( + "context" + + "go.uber.org/zap" +) + +type PubSubProvider struct { + id string + typeID string + Adapter ProviderLifecycle + Logger *zap.Logger +} + +func (p *PubSubProvider) ID() string { + return p.id +} + +func (p *PubSubProvider) TypeID() string { + return p.typeID +} + +func (p *PubSubProvider) Startup(ctx context.Context) error { + if err := p.Adapter.Startup(ctx); err != nil { + return err + } + return nil +} + +func (p *PubSubProvider) Shutdown(ctx context.Context) error { + if err := p.Adapter.Shutdown(ctx); err != nil { + return err + } + return nil +} + +func NewPubSubProvider(id string, typeID string, adapter ProviderLifecycle, logger *zap.Logger) *PubSubProvider { + return &PubSubProvider{ + id: id, + typeID: typeID, + Adapter: adapter, + Logger: logger, + } +} diff --git a/router/pkg/pubsub/datasource/pubsubprovider_test.go b/router/pkg/pubsub/datasource/pubsubprovider_test.go new file mode 100644 index 0000000000..6579b62072 --- /dev/null +++ b/router/pkg/pubsub/datasource/pubsubprovider_test.go @@ -0,0 +1,74 @@ +package datasource + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestProvider_Startup_Success(t *testing.T) { + mockAdapter := NewMockProviderLifecycle(t) + mockAdapter.On("Startup", mock.Anything).Return(nil) + + provider := PubSubProvider{ + Adapter: mockAdapter, + } + err := provider.Startup(context.Background()) + + assert.NoError(t, err) +} + +func TestProvider_Startup_Error(t *testing.T) { + mockAdapter := NewMockProviderLifecycle(t) + mockAdapter.On("Startup", mock.Anything).Return(errors.New("connect error")) + + provider := PubSubProvider{ + Adapter: mockAdapter, + } + err := provider.Startup(context.Background()) + + assert.Error(t, err) +} + +func TestProvider_Shutdown_Success(t *testing.T) { + mockAdapter := NewMockProviderLifecycle(t) + mockAdapter.On("Shutdown", mock.Anything).Return(nil) + + provider := PubSubProvider{ + Adapter: mockAdapter, + } + err := provider.Shutdown(context.Background()) + + assert.NoError(t, err) +} + +func TestProvider_Shutdown_Error(t *testing.T) { + mockAdapter := NewMockProviderLifecycle(t) + mockAdapter.On("Shutdown", mock.Anything).Return(errors.New("close error")) + + provider := PubSubProvider{ + Adapter: mockAdapter, + } + err := provider.Shutdown(context.Background()) + + assert.Error(t, err) +} + +func TestProvider_ID(t *testing.T) { + const testID = "test-id" + provider := PubSubProvider{ + id: testID, + } + assert.Equal(t, testID, provider.ID()) +} + +func TestProvider_TypeID(t *testing.T) { + const providerTypeID = "test-type-id" + provider := PubSubProvider{ + typeID: providerTypeID, + } + assert.Equal(t, providerTypeID, provider.TypeID()) +} diff --git a/router/pkg/pubsub/eventdata/build.go b/router/pkg/pubsub/eventdata/build.go new file mode 100644 index 0000000000..3d7e276c0b --- /dev/null +++ b/router/pkg/pubsub/eventdata/build.go @@ -0,0 +1,38 @@ +package eventdata + +import ( + "bytes" + "encoding/json" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +func BuildEventDataBytes(ref int, operation *ast.Document, variables *resolve.Variables) ([]byte, error) { + // Collect the field arguments for fetch-based operations + fieldArgs := operation.FieldArguments(ref) + var dataBuffer bytes.Buffer + dataBuffer.WriteByte('{') + for i, arg := range fieldArgs { + if i > 0 { + dataBuffer.WriteByte(',') + } + argValue := operation.ArgumentValue(arg) + variableName := operation.VariableValueNameBytes(argValue.Ref) + contextVariable := &resolve.ContextVariable{ + Path: []string{string(variableName)}, + Renderer: resolve.NewPlainVariableRenderer(), + } + variablePlaceHolder, _ := variables.AddVariable(contextVariable) + argumentName := operation.ArgumentNameString(arg) + escapedKey, err := json.Marshal(argumentName) + if err != nil { + return nil, err + } + dataBuffer.Write(escapedKey) + dataBuffer.WriteByte(':') + dataBuffer.WriteString(variablePlaceHolder) + } + dataBuffer.WriteByte('}') + return dataBuffer.Bytes(), nil +} diff --git a/router/pkg/pubsub/kafka/kafka.go b/router/pkg/pubsub/kafka/adapter.go similarity index 73% rename from router/pkg/pubsub/kafka/kafka.go rename to router/pkg/pubsub/kafka/adapter.go index b485980ede..503b8f6f37 100644 --- a/router/pkg/pubsub/kafka/kafka.go +++ b/router/pkg/pubsub/kafka/adapter.go @@ -10,65 +10,29 @@ import ( "github.com/twmb/franz-go/pkg/kerr" "github.com/twmb/franz-go/pkg/kgo" - "github.com/wundergraph/cosmo/router/pkg/pubsub" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" + "github.com/wundergraph/cosmo/router/pkg/pubsub/datasource" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" "go.uber.org/zap" ) var ( - _ pubsub_datasource.KafkaConnector = (*connector)(nil) - _ pubsub_datasource.KafkaPubSub = (*kafkaPubSub)(nil) - _ pubsub.Lifecycle = (*kafkaPubSub)(nil) - errClientClosed = errors.New("client closed") ) -type connector struct { - writeClient *kgo.Client - opts []kgo.Opt - logger *zap.Logger -} - -func NewConnector(logger *zap.Logger, opts []kgo.Opt) (pubsub_datasource.KafkaConnector, error) { - - writeClient, err := kgo.NewClient(append(opts, - // For observability, we set the client ID to "router" - kgo.ClientID("cosmo.router.producer"))..., - ) - if err != nil { - return nil, fmt.Errorf("failed to create write client for Kafka: %w", err) - } - - return &connector{ - writeClient: writeClient, - opts: opts, - logger: logger, - }, nil -} - -func (c *connector) New(ctx context.Context) pubsub_datasource.KafkaPubSub { - - ctx, cancel := context.WithCancel(ctx) - - ps := &kafkaPubSub{ - ctx: ctx, - logger: c.logger.With(zap.String("pubsub", "kafka")), - opts: c.opts, - writeClient: c.writeClient, - closeWg: sync.WaitGroup{}, - cancel: cancel, - } - - return ps +// Adapter defines the interface for Kafka adapter operations +type Adapter interface { + Subscribe(ctx context.Context, event SubscriptionEventConfiguration, updater resolve.SubscriptionUpdater) error + Publish(ctx context.Context, event PublishEventConfiguration) error + Startup(ctx context.Context) error + Shutdown(ctx context.Context) error } -// kafkaPubSub is a Kafka pubsub implementation. +// ProviderAdapter is a Kafka pubsub implementation. // It uses the franz-go Kafka client to consume and produce messages. // The pubsub is stateless and does not store any messages. // It uses a single write client to produce messages and a client per topic to consume messages. // Each client polls the Kafka topic for new records and updates the subscriptions with the new data. -type kafkaPubSub struct { +type ProviderAdapter struct { ctx context.Context opts []kgo.Opt logger *zap.Logger @@ -78,7 +42,7 @@ type kafkaPubSub struct { } // topicPoller polls the Kafka topic for new records and calls the updateTriggers function. -func (p *kafkaPubSub) topicPoller(ctx context.Context, client *kgo.Client, updater resolve.SubscriptionUpdater) error { +func (p *ProviderAdapter) topicPoller(ctx context.Context, client *kgo.Client, updater resolve.SubscriptionUpdater) error { for { select { case <-p.ctx.Done(): // Close the poller if the application context was canceled @@ -132,7 +96,7 @@ func (p *kafkaPubSub) topicPoller(ctx context.Context, client *kgo.Client, updat // Subscribe subscribes to the given topics and updates the subscription updater. // The engine already deduplicates subscriptions with the same topics, stream configuration, extensions, headers, etc. -func (p *kafkaPubSub) Subscribe(ctx context.Context, event pubsub_datasource.KafkaSubscriptionEventConfiguration, updater resolve.SubscriptionUpdater) error { +func (p *ProviderAdapter) Subscribe(ctx context.Context, event SubscriptionEventConfiguration, updater resolve.SubscriptionUpdater) error { log := p.logger.With( zap.String("provider_id", event.ProviderID), @@ -140,8 +104,6 @@ func (p *kafkaPubSub) Subscribe(ctx context.Context, event pubsub_datasource.Kaf zap.Strings("topics", event.Topics), ) - log.Debug("subscribe") - // Create a new client for the topic client, err := kgo.NewClient(append(p.opts, kgo.ConsumeTopics(event.Topics...), @@ -151,6 +113,9 @@ func (p *kafkaPubSub) Subscribe(ctx context.Context, event pubsub_datasource.Kaf kgo.ConsumeResetOffset(kgo.NewOffset().AfterMilli(time.Now().UnixMilli())), // For observability, we set the client ID to "router" kgo.ClientID(fmt.Sprintf("cosmo.router.consumer.%s", strings.Join(event.Topics, "-"))), + // FIXME: the client id should have some unique identifier, like in nats + // What if we have multiple subscriptions for the same topics? + // What if we have more router instances? )...) if err != nil { log.Error("failed to create client", zap.Error(err)) @@ -181,13 +146,17 @@ func (p *kafkaPubSub) Subscribe(ctx context.Context, event pubsub_datasource.Kaf // Publish publishes the given event to the Kafka topic in a non-blocking way. // Publish errors are logged and returned as a pubsub error. // The event is written with a dedicated write client. -func (p *kafkaPubSub) Publish(ctx context.Context, event pubsub_datasource.KafkaPublishEventConfiguration) error { +func (p *ProviderAdapter) Publish(ctx context.Context, event PublishEventConfiguration) error { log := p.logger.With( zap.String("provider_id", event.ProviderID), zap.String("method", "publish"), zap.String("topic", event.Topic), ) + if p.writeClient == nil { + return datasource.NewError("kafka write client not initialized", nil) + } + log.Debug("publish", zap.ByteString("data", event.Data)) var wg sync.WaitGroup @@ -209,13 +178,29 @@ func (p *kafkaPubSub) Publish(ctx context.Context, event pubsub_datasource.Kafka if pErr != nil { log.Error("publish error", zap.Error(pErr)) - return pubsub.NewError(fmt.Sprintf("error publishing to Kafka topic %s", event.Topic), pErr) + return datasource.NewError(fmt.Sprintf("error publishing to Kafka topic %s", event.Topic), pErr) } return nil } -func (p *kafkaPubSub) Shutdown(ctx context.Context) error { +func (p *ProviderAdapter) Startup(ctx context.Context) (err error) { + p.writeClient, err = kgo.NewClient(append(p.opts, + // For observability, we set the client ID to "router" + kgo.ClientID("cosmo.router.producer"))..., + ) + if err != nil { + return err + } + + return +} + +func (p *ProviderAdapter) Shutdown(ctx context.Context) error { + + if p.writeClient == nil { + return nil + } err := p.writeClient.Flush(ctx) if err != nil { @@ -236,3 +221,18 @@ func (p *kafkaPubSub) Shutdown(ctx context.Context) error { return nil } + +func NewProviderAdapter(ctx context.Context, logger *zap.Logger, opts []kgo.Opt) (*ProviderAdapter, error) { + ctx, cancel := context.WithCancel(ctx) + if logger == nil { + logger = zap.NewNop() + } + + return &ProviderAdapter{ + ctx: ctx, + logger: logger.With(zap.String("pubsub", "kafka")), + opts: opts, + closeWg: sync.WaitGroup{}, + cancel: cancel, + }, nil +} diff --git a/router/pkg/pubsub/kafka/engine_datasource.go b/router/pkg/pubsub/kafka/engine_datasource.go new file mode 100644 index 0000000000..7b82a766b0 --- /dev/null +++ b/router/pkg/pubsub/kafka/engine_datasource.go @@ -0,0 +1,88 @@ +package kafka + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + + "github.com/buger/jsonparser" + "github.com/cespare/xxhash/v2" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/httpclient" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +type SubscriptionEventConfiguration struct { + ProviderID string `json:"providerId"` + Topics []string `json:"topics"` +} + +type PublishEventConfiguration struct { + ProviderID string `json:"providerId"` + Topic string `json:"topic"` + Data json.RawMessage `json:"data"` +} + +func (s *PublishEventConfiguration) MarshalJSONTemplate() string { + // The content of the data field could be not valid JSON, so we can't use json.Marshal + // e.g. {"id":$$0$$,"update":$$1$$} + return fmt.Sprintf(`{"topic":"%s", "data": %s, "providerId":"%s"}`, s.Topic, s.Data, s.ProviderID) +} + +type SubscriptionDataSource struct { + pubSub Adapter +} + +func (s *SubscriptionDataSource) UniqueRequestID(ctx *resolve.Context, input []byte, xxh *xxhash.Digest) error { + val, _, _, err := jsonparser.Get(input, "topics") + if err != nil { + return err + } + + _, err = xxh.Write(val) + if err != nil { + return err + } + + val, _, _, err = jsonparser.Get(input, "providerId") + if err != nil { + return err + } + + _, err = xxh.Write(val) + return err +} + +func (s *SubscriptionDataSource) Start(ctx *resolve.Context, input []byte, updater resolve.SubscriptionUpdater) error { + var subscriptionConfiguration SubscriptionEventConfiguration + err := json.Unmarshal(input, &subscriptionConfiguration) + if err != nil { + return err + } + + return s.pubSub.Subscribe(ctx.Context(), subscriptionConfiguration, updater) +} + +type PublishDataSource struct { + pubSub Adapter +} + +func (s *PublishDataSource) Load(ctx context.Context, input []byte, out *bytes.Buffer) error { + var publishConfiguration PublishEventConfiguration + err := json.Unmarshal(input, &publishConfiguration) + if err != nil { + return err + } + + if err := s.pubSub.Publish(ctx, publishConfiguration); err != nil { + _, err = io.WriteString(out, `{"success": false}`) + return err + } + _, err = io.WriteString(out, `{"success": true}`) + return err +} + +func (s *PublishDataSource) LoadWithFiles(ctx context.Context, input []byte, files []*httpclient.FileUpload, out *bytes.Buffer) (err error) { + panic("not implemented") +} diff --git a/router/pkg/pubsub/kafka/engine_datasource_factory.go b/router/pkg/pubsub/kafka/engine_datasource_factory.go new file mode 100644 index 0000000000..d360f02f26 --- /dev/null +++ b/router/pkg/pubsub/kafka/engine_datasource_factory.go @@ -0,0 +1,80 @@ +package kafka + +import ( + "encoding/json" + "fmt" + + "github.com/wundergraph/cosmo/router/pkg/pubsub/datasource" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +type EventType int + +const ( + EventTypePublish EventType = iota + EventTypeSubscribe +) + +type EngineDataSourceFactory struct { + fieldName string + eventType EventType + topics []string + providerId string + + KafkaAdapter Adapter +} + +func (c *EngineDataSourceFactory) GetFieldName() string { + return c.fieldName +} + +func (c *EngineDataSourceFactory) ResolveDataSource() (resolve.DataSource, error) { + var dataSource resolve.DataSource + + switch c.eventType { + case EventTypePublish: + dataSource = &PublishDataSource{ + pubSub: c.KafkaAdapter, + } + default: + return nil, fmt.Errorf("failed to configure fetch: invalid event type \"%d\" for Kafka", c.eventType) + } + + return dataSource, nil +} + +func (c *EngineDataSourceFactory) ResolveDataSourceInput(eventData []byte) (string, error) { + if len(c.topics) != 1 { + return "", fmt.Errorf("publish events should define one topic but received %d", len(c.topics)) + } + + evtCfg := PublishEventConfiguration{ + ProviderID: c.providerId, + Topic: c.topics[0], + Data: eventData, + } + + return evtCfg.MarshalJSONTemplate(), nil +} + +func (c *EngineDataSourceFactory) ResolveDataSourceSubscription() (resolve.SubscriptionDataSource, error) { + return &SubscriptionDataSource{ + pubSub: c.KafkaAdapter, + }, nil +} + +func (c *EngineDataSourceFactory) ResolveDataSourceSubscriptionInput() (string, error) { + evtCfg := SubscriptionEventConfiguration{ + ProviderID: c.providerId, + Topics: c.topics, + } + object, err := json.Marshal(evtCfg) + if err != nil { + return "", fmt.Errorf("failed to marshal event subscription streamConfiguration") + } + return string(object), nil +} + +func (c *EngineDataSourceFactory) TransformEventData(extractFn datasource.ArgumentTemplateCallback) error { + return nil +} diff --git a/router/pkg/pubsub/kafka/engine_datasource_factory_test.go b/router/pkg/pubsub/kafka/engine_datasource_factory_test.go new file mode 100644 index 0000000000..254359a4bc --- /dev/null +++ b/router/pkg/pubsub/kafka/engine_datasource_factory_test.go @@ -0,0 +1,139 @@ +package kafka + +import ( + "bytes" + "context" + "encoding/json" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/wundergraph/cosmo/router/pkg/pubsub/pubsubtest" +) + +func TestKafkaEngineDataSourceFactory(t *testing.T) { + // Create the data source to test with a real adapter + adapter := &ProviderAdapter{} + pubsub := &EngineDataSourceFactory{ + KafkaAdapter: adapter, + fieldName: "testField", + eventType: EventTypePublish, + topics: []string{"test-topic"}, + providerId: "test-provider", + } + + // Run the standard test suite + pubsubtest.VerifyEngineDataSourceFactoryImplementation(t, pubsub) +} + +// TestEngineDataSourceFactoryWithMockAdapter tests the EngineDataSourceFactory with a mocked adapter +func TestEngineDataSourceFactoryWithMockAdapter(t *testing.T) { + // Create mock adapter + mockAdapter := NewMockAdapter(t) + + // Configure mock expectations for Publish + mockAdapter.On("Publish", mock.Anything, mock.MatchedBy(func(event PublishEventConfiguration) bool { + return event.ProviderID == "test-provider" && event.Topic == "test-topic" + })).Return(nil) + + // Create the data source with mock adapter + pubsub := &EngineDataSourceFactory{ + KafkaAdapter: mockAdapter, + fieldName: "testField", + eventType: EventTypePublish, + topics: []string{"test-topic"}, + providerId: "test-provider", + } + + // Get the data source + ds, err := pubsub.ResolveDataSource() + require.NoError(t, err) + + // Get the input + input, err := pubsub.ResolveDataSourceInput([]byte(`{"test":"data"}`)) + require.NoError(t, err) + + // Call Load on the data source + out := &bytes.Buffer{} + err = ds.Load(context.Background(), []byte(input), out) + require.NoError(t, err) + require.Equal(t, `{"success": true}`, out.String()) +} + +// TestEngineDataSourceFactory_GetResolveDataSource_WrongType tests the EngineDataSourceFactory with a mocked adapter +func TestEngineDataSourceFactory_GetResolveDataSource_WrongType(t *testing.T) { + // Create mock adapter + mockAdapter := NewMockAdapter(t) + + // Create the data source with mock adapter + pubsub := &EngineDataSourceFactory{ + KafkaAdapter: mockAdapter, + fieldName: "testField", + eventType: EventTypeSubscribe, + topics: []string{"test-topic"}, + providerId: "test-provider", + } + + // Get the data source + ds, err := pubsub.ResolveDataSource() + require.Error(t, err) + require.Nil(t, ds) +} + +// TestEngineDataSourceFactory_GetResolveDataSourceInput_MultipleTopics tests the EngineDataSourceFactory with a mocked adapter +func TestEngineDataSourceFactory_GetResolveDataSourceInput_MultipleTopics(t *testing.T) { + // Create the data source with mock adapter + pubsub := &EngineDataSourceFactory{ + fieldName: "testField", + eventType: EventTypePublish, + topics: []string{"test-topic-1", "test-topic-2"}, + providerId: "test-provider", + } + + // Get the input + input, err := pubsub.ResolveDataSourceInput([]byte(`{"test":"data"}`)) + require.Error(t, err) + require.Empty(t, input) +} + +// TestEngineDataSourceFactory_GetResolveDataSourceInput_NoTopics tests the EngineDataSourceFactory with a mocked adapter +func TestEngineDataSourceFactory_GetResolveDataSourceInput_NoTopics(t *testing.T) { + // Create the data source with mock adapter + pubsub := &EngineDataSourceFactory{ + fieldName: "testField", + eventType: EventTypePublish, + topics: []string{}, + providerId: "test-provider", + } + + // Get the input + input, err := pubsub.ResolveDataSourceInput([]byte(`{"test":"data"}`)) + require.Error(t, err) + require.Empty(t, input) +} + +// TestKafkaEngineDataSourceFactoryMultiTopicSubscription tests only the subscription functionality +// for multiple topics. The publish and resolve datasource tests are skipped since they +// do not support multiple topics. +func TestKafkaEngineDataSourceFactoryMultiTopicSubscription(t *testing.T) { + // Create the data source to test with mock adapter + pubsub := &EngineDataSourceFactory{ + fieldName: "testField", + eventType: EventTypePublish, + topics: []string{"test-topic-1", "test-topic-2"}, + providerId: "test-provider", + } + + // Test GetResolveDataSourceSubscriptionInput + subscriptionInput, err := pubsub.ResolveDataSourceSubscriptionInput() + require.NoError(t, err, "Expected no error from GetResolveDataSourceSubscriptionInput") + require.NotEmpty(t, subscriptionInput, "Expected non-empty subscription input") + + // Verify the subscription input contains both topics + var subscriptionConfig SubscriptionEventConfiguration + err = json.Unmarshal([]byte(subscriptionInput), &subscriptionConfig) + require.NoError(t, err, "Expected valid JSON from GetResolveDataSourceSubscriptionInput") + require.Equal(t, 2, len(subscriptionConfig.Topics), "Expected 2 topics in subscription configuration") + require.Equal(t, "test-topic-1", subscriptionConfig.Topics[0], "Expected first topic to be 'test-topic-1'") + require.Equal(t, "test-topic-2", subscriptionConfig.Topics[1], "Expected second topic to be 'test-topic-2'") +} diff --git a/router/pkg/pubsub/kafka/engine_datasource_test.go b/router/pkg/pubsub/kafka/engine_datasource_test.go new file mode 100644 index 0000000000..0ad92aeb20 --- /dev/null +++ b/router/pkg/pubsub/kafka/engine_datasource_test.go @@ -0,0 +1,246 @@ +package kafka + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "testing" + + "github.com/cespare/xxhash/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/wundergraph/cosmo/router/pkg/pubsub/datasource" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +func TestPublishEventConfiguration_MarshalJSONTemplate(t *testing.T) { + tests := []struct { + name string + config PublishEventConfiguration + wantPattern string + }{ + { + name: "simple configuration", + config: PublishEventConfiguration{ + ProviderID: "test-provider", + Topic: "test-topic", + Data: json.RawMessage(`{"message":"hello"}`), + }, + wantPattern: `{"topic":"test-topic", "data": {"message":"hello"}, "providerId":"test-provider"}`, + }, + { + name: "with special characters", + config: PublishEventConfiguration{ + ProviderID: "test-provider-id", + Topic: "topic-with-hyphens", + Data: json.RawMessage(`{"message":"special \"quotes\" here"}`), + }, + wantPattern: `{"topic":"topic-with-hyphens", "data": {"message":"special \"quotes\" here"}, "providerId":"test-provider-id"}`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.config.MarshalJSONTemplate() + assert.Equal(t, tt.wantPattern, result) + }) + } +} + +func TestSubscriptionSource_UniqueRequestID(t *testing.T) { + tests := []struct { + name string + input string + expectError bool + expectedError error + }{ + { + name: "valid input", + input: `{"topics":["topic1", "topic2"], "providerId":"test-provider"}`, + expectError: false, + }, + { + name: "missing topics", + input: `{"providerId":"test-provider"}`, + expectError: true, + expectedError: errors.New("Key path not found"), + }, + { + name: "missing providerId", + input: `{"topics":["topic1", "topic2"]}`, + expectError: true, + expectedError: errors.New("Key path not found"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + source := &SubscriptionDataSource{ + pubSub: NewMockAdapter(t), + } + ctx := &resolve.Context{} + input := []byte(tt.input) + xxh := xxhash.New() + + err := source.UniqueRequestID(ctx, input, xxh) + + if tt.expectError { + require.Error(t, err) + if tt.expectedError != nil { + // For jsonparser errors, just check if the error message contains the expected text + assert.Contains(t, err.Error(), tt.expectedError.Error()) + } + } else { + require.NoError(t, err) + // Check that the hash has been updated + assert.NotEqual(t, 0, xxh.Sum64()) + } + }) + } +} + +func TestSubscriptionSource_Start(t *testing.T) { + tests := []struct { + name string + input string + mockSetup func(*MockAdapter, *datasource.MockSubscriptionUpdater) + expectError bool + }{ + { + name: "successful subscription", + input: `{"topics":["topic1", "topic2"], "providerId":"test-provider"}`, + mockSetup: func(m *MockAdapter, updater *datasource.MockSubscriptionUpdater) { + m.On("Subscribe", mock.Anything, SubscriptionEventConfiguration{ + ProviderID: "test-provider", + Topics: []string{"topic1", "topic2"}, + }, mock.Anything).Return(nil) + }, + expectError: false, + }, + { + name: "adapter returns error", + input: `{"topics":["topic1"], "providerId":"test-provider"}`, + mockSetup: func(m *MockAdapter, updater *datasource.MockSubscriptionUpdater) { + m.On("Subscribe", mock.Anything, SubscriptionEventConfiguration{ + ProviderID: "test-provider", + Topics: []string{"topic1"}, + }, mock.Anything).Return(errors.New("subscription error")) + }, + expectError: true, + }, + { + name: "invalid input json", + input: `{"invalid json":`, + mockSetup: func(m *MockAdapter, updater *datasource.MockSubscriptionUpdater) {}, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockAdapter := NewMockAdapter(t) + updater := datasource.NewMockSubscriptionUpdater(t) + tt.mockSetup(mockAdapter, updater) + + source := &SubscriptionDataSource{ + pubSub: mockAdapter, + } + + // Set up go context + goCtx := context.Background() + + // Create a resolve.Context with the standard context + resolveCtx := &resolve.Context{} + resolveCtx = resolveCtx.WithContext(goCtx) + + input := []byte(tt.input) + err := source.Start(resolveCtx, input, updater) + + if tt.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestKafkaPublishDataSource_Load(t *testing.T) { + tests := []struct { + name string + input string + mockSetup func(*MockAdapter) + expectError bool + expectedOutput string + expectPublished bool + }{ + { + name: "successful publish", + input: `{"topic":"test-topic", "data":{"message":"hello"}, "providerId":"test-provider"}`, + mockSetup: func(m *MockAdapter) { + m.On("Publish", mock.Anything, mock.MatchedBy(func(event PublishEventConfiguration) bool { + return event.ProviderID == "test-provider" && + event.Topic == "test-topic" && + string(event.Data) == `{"message":"hello"}` + })).Return(nil) + }, + expectError: false, + expectedOutput: `{"success": true}`, + expectPublished: true, + }, + { + name: "publish error", + input: `{"topic":"test-topic", "data":{"message":"hello"}, "providerId":"test-provider"}`, + mockSetup: func(m *MockAdapter) { + m.On("Publish", mock.Anything, mock.Anything).Return(errors.New("publish error")) + }, + expectError: false, // The Load method doesn't return the publish error directly + expectedOutput: `{"success": false}`, + expectPublished: true, + }, + { + name: "invalid input json", + input: `{"invalid json":`, + mockSetup: func(m *MockAdapter) {}, + expectError: true, + expectPublished: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockAdapter := NewMockAdapter(t) + tt.mockSetup(mockAdapter) + + dataSource := &PublishDataSource{ + pubSub: mockAdapter, + } + ctx := context.Background() + input := []byte(tt.input) + out := &bytes.Buffer{} + + err := dataSource.Load(ctx, input, out) + + if tt.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tt.expectedOutput, out.String()) + } + }) + } +} + +func TestKafkaPublishDataSource_LoadWithFiles(t *testing.T) { + t.Run("panic on not implemented", func(t *testing.T) { + dataSource := &PublishDataSource{ + pubSub: NewMockAdapter(t), + } + + assert.Panics(t, func() { + dataSource.LoadWithFiles(context.Background(), nil, nil, &bytes.Buffer{}) + }) + }) +} diff --git a/router/pkg/pubsub/kafka/mocks.go b/router/pkg/pubsub/kafka/mocks.go new file mode 100644 index 0000000000..f39aee8b4e --- /dev/null +++ b/router/pkg/pubsub/kafka/mocks.go @@ -0,0 +1,261 @@ +// Code generated by mockery; DO NOT EDIT. +// github.com/vektra/mockery +// template: testify + +package kafka + +import ( + "context" + + mock "github.com/stretchr/testify/mock" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// NewMockAdapter creates a new instance of MockAdapter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockAdapter(t interface { + mock.TestingT + Cleanup(func()) +}) *MockAdapter { + mock := &MockAdapter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// MockAdapter is an autogenerated mock type for the Adapter type +type MockAdapter struct { + mock.Mock +} + +type MockAdapter_Expecter struct { + mock *mock.Mock +} + +func (_m *MockAdapter) EXPECT() *MockAdapter_Expecter { + return &MockAdapter_Expecter{mock: &_m.Mock} +} + +// Publish provides a mock function for the type MockAdapter +func (_mock *MockAdapter) Publish(ctx context.Context, event PublishEventConfiguration) error { + ret := _mock.Called(ctx, event) + + if len(ret) == 0 { + panic("no return value specified for Publish") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(context.Context, PublishEventConfiguration) error); ok { + r0 = returnFunc(ctx, event) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockAdapter_Publish_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Publish' +type MockAdapter_Publish_Call struct { + *mock.Call +} + +// Publish is a helper method to define mock.On call +// - ctx context.Context +// - event PublishEventConfiguration +func (_e *MockAdapter_Expecter) Publish(ctx interface{}, event interface{}) *MockAdapter_Publish_Call { + return &MockAdapter_Publish_Call{Call: _e.mock.On("Publish", ctx, event)} +} + +func (_c *MockAdapter_Publish_Call) Run(run func(ctx context.Context, event PublishEventConfiguration)) *MockAdapter_Publish_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 PublishEventConfiguration + if args[1] != nil { + arg1 = args[1].(PublishEventConfiguration) + } + run( + arg0, + arg1, + ) + }) + return _c +} + +func (_c *MockAdapter_Publish_Call) Return(err error) *MockAdapter_Publish_Call { + _c.Call.Return(err) + return _c +} + +func (_c *MockAdapter_Publish_Call) RunAndReturn(run func(ctx context.Context, event PublishEventConfiguration) error) *MockAdapter_Publish_Call { + _c.Call.Return(run) + return _c +} + +// Shutdown provides a mock function for the type MockAdapter +func (_mock *MockAdapter) Shutdown(ctx context.Context) error { + ret := _mock.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Shutdown") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = returnFunc(ctx) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockAdapter_Shutdown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Shutdown' +type MockAdapter_Shutdown_Call struct { + *mock.Call +} + +// Shutdown is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockAdapter_Expecter) Shutdown(ctx interface{}) *MockAdapter_Shutdown_Call { + return &MockAdapter_Shutdown_Call{Call: _e.mock.On("Shutdown", ctx)} +} + +func (_c *MockAdapter_Shutdown_Call) Run(run func(ctx context.Context)) *MockAdapter_Shutdown_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockAdapter_Shutdown_Call) Return(err error) *MockAdapter_Shutdown_Call { + _c.Call.Return(err) + return _c +} + +func (_c *MockAdapter_Shutdown_Call) RunAndReturn(run func(ctx context.Context) error) *MockAdapter_Shutdown_Call { + _c.Call.Return(run) + return _c +} + +// Startup provides a mock function for the type MockAdapter +func (_mock *MockAdapter) Startup(ctx context.Context) error { + ret := _mock.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Startup") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = returnFunc(ctx) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockAdapter_Startup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Startup' +type MockAdapter_Startup_Call struct { + *mock.Call +} + +// Startup is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockAdapter_Expecter) Startup(ctx interface{}) *MockAdapter_Startup_Call { + return &MockAdapter_Startup_Call{Call: _e.mock.On("Startup", ctx)} +} + +func (_c *MockAdapter_Startup_Call) Run(run func(ctx context.Context)) *MockAdapter_Startup_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockAdapter_Startup_Call) Return(err error) *MockAdapter_Startup_Call { + _c.Call.Return(err) + return _c +} + +func (_c *MockAdapter_Startup_Call) RunAndReturn(run func(ctx context.Context) error) *MockAdapter_Startup_Call { + _c.Call.Return(run) + return _c +} + +// Subscribe provides a mock function for the type MockAdapter +func (_mock *MockAdapter) Subscribe(ctx context.Context, event SubscriptionEventConfiguration, updater resolve.SubscriptionUpdater) error { + ret := _mock.Called(ctx, event, updater) + + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(context.Context, SubscriptionEventConfiguration, resolve.SubscriptionUpdater) error); ok { + r0 = returnFunc(ctx, event, updater) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockAdapter_Subscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Subscribe' +type MockAdapter_Subscribe_Call struct { + *mock.Call +} + +// Subscribe is a helper method to define mock.On call +// - ctx context.Context +// - event SubscriptionEventConfiguration +// - updater resolve.SubscriptionUpdater +func (_e *MockAdapter_Expecter) Subscribe(ctx interface{}, event interface{}, updater interface{}) *MockAdapter_Subscribe_Call { + return &MockAdapter_Subscribe_Call{Call: _e.mock.On("Subscribe", ctx, event, updater)} +} + +func (_c *MockAdapter_Subscribe_Call) Run(run func(ctx context.Context, event SubscriptionEventConfiguration, updater resolve.SubscriptionUpdater)) *MockAdapter_Subscribe_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 SubscriptionEventConfiguration + if args[1] != nil { + arg1 = args[1].(SubscriptionEventConfiguration) + } + var arg2 resolve.SubscriptionUpdater + if args[2] != nil { + arg2 = args[2].(resolve.SubscriptionUpdater) + } + run( + arg0, + arg1, + arg2, + ) + }) + return _c +} + +func (_c *MockAdapter_Subscribe_Call) Return(err error) *MockAdapter_Subscribe_Call { + _c.Call.Return(err) + return _c +} + +func (_c *MockAdapter_Subscribe_Call) RunAndReturn(run func(ctx context.Context, event SubscriptionEventConfiguration, updater resolve.SubscriptionUpdater) error) *MockAdapter_Subscribe_Call { + _c.Call.Return(run) + return _c +} diff --git a/router/pkg/pubsub/kafka/provider_builder.go b/router/pkg/pubsub/kafka/provider_builder.go new file mode 100644 index 0000000000..28fa09a469 --- /dev/null +++ b/router/pkg/pubsub/kafka/provider_builder.go @@ -0,0 +1,123 @@ +package kafka + +import ( + "context" + "crypto/tls" + "fmt" + "time" + + "github.com/twmb/franz-go/pkg/kgo" + "github.com/twmb/franz-go/pkg/sasl/plain" + nodev1 "github.com/wundergraph/cosmo/router/gen/proto/wg/cosmo/node/v1" + "github.com/wundergraph/cosmo/router/pkg/config" + "github.com/wundergraph/cosmo/router/pkg/pubsub/datasource" + "go.uber.org/zap" +) + +const providerTypeID = "kafka" + +type ProviderBuilder struct { + ctx context.Context + logger *zap.Logger + hostName string + routerListenAddr string + adapters map[string]Adapter +} + +func (p *ProviderBuilder) TypeID() string { + return providerTypeID +} + +func (p *ProviderBuilder) BuildEngineDataSourceFactory(data *nodev1.KafkaEventConfiguration) (datasource.EngineDataSourceFactory, error) { + providerId := data.GetEngineEventConfiguration().GetProviderId() + adapter, ok := p.adapters[providerId] + if !ok { + return nil, fmt.Errorf("failed to get adapter for provider %s with ID %s", p.TypeID(), providerId) + } + + var eventType EventType + switch data.GetEngineEventConfiguration().GetType() { + case nodev1.EventType_PUBLISH: + eventType = EventTypePublish + case nodev1.EventType_SUBSCRIBE: + eventType = EventTypeSubscribe + default: + return nil, fmt.Errorf("unsupported event type: %s", data.GetEngineEventConfiguration().GetType()) + } + + return &EngineDataSourceFactory{ + fieldName: data.GetEngineEventConfiguration().GetFieldName(), + eventType: eventType, + topics: data.GetTopics(), + providerId: providerId, + KafkaAdapter: adapter, + }, nil +} + +func (p *ProviderBuilder) BuildProvider(provider config.KafkaEventSource) (datasource.Provider, error) { + adapter, pubSubProvider, err := buildProvider(p.ctx, provider, p.logger) + if err != nil { + return nil, err + } + + p.adapters[provider.ID] = adapter + + return pubSubProvider, nil +} + +// buildKafkaOptions creates a list of kgo.Opt options for the given Kafka event source configuration. +// Only general options like TLS, SASL, etc. are configured here. Specific options like topics, etc. are +// configured in the KafkaPubSub implementation. +func buildKafkaOptions(eventSource config.KafkaEventSource) ([]kgo.Opt, error) { + opts := []kgo.Opt{ + kgo.SeedBrokers(eventSource.Brokers...), + // Ensure proper timeouts are set + kgo.ProduceRequestTimeout(10 * time.Second), + kgo.ConnIdleTimeout(60 * time.Second), + } + + if eventSource.TLS != nil && eventSource.TLS.Enabled { + opts = append(opts, + // Configure TLS. Uses SystemCertPool for RootCAs by default. + kgo.DialTLSConfig(new(tls.Config)), + ) + } + + if eventSource.Authentication != nil && eventSource.Authentication.SASLPlain.Username != nil && eventSource.Authentication.SASLPlain.Password != nil { + opts = append(opts, kgo.SASL(plain.Auth{ + User: *eventSource.Authentication.SASLPlain.Username, + Pass: *eventSource.Authentication.SASLPlain.Password, + }.AsMechanism())) + } + + return opts, nil +} + +func buildProvider(ctx context.Context, provider config.KafkaEventSource, logger *zap.Logger) (Adapter, datasource.Provider, error) { + options, err := buildKafkaOptions(provider) + if err != nil { + return nil, nil, fmt.Errorf("failed to build options for Kafka provider with ID \"%s\": %w", provider.ID, err) + } + adapter, err := NewProviderAdapter(ctx, logger, options) + if err != nil { + return nil, nil, fmt.Errorf("failed to create adapter for Kafka provider with ID \"%s\": %w", provider.ID, err) + } + pubSubProvider := datasource.NewPubSubProvider(provider.ID, providerTypeID, adapter, logger) + + return adapter, pubSubProvider, nil +} + +func NewProviderBuilder( + ctx context.Context, + logger *zap.Logger, + hostName string, + routerListenAddr string, +) *ProviderBuilder { + return &ProviderBuilder{ + ctx: ctx, + logger: logger, + hostName: hostName, + routerListenAddr: routerListenAddr, + adapters: make(map[string]Adapter), + } +} diff --git a/router/pkg/pubsub/kafka/provider_builder_test.go b/router/pkg/pubsub/kafka/provider_builder_test.go new file mode 100644 index 0000000000..c9cada1253 --- /dev/null +++ b/router/pkg/pubsub/kafka/provider_builder_test.go @@ -0,0 +1,85 @@ +package kafka + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/wundergraph/cosmo/router/pkg/config" + "github.com/wundergraph/cosmo/router/pkg/pubsub/datasource" + "go.uber.org/zap/zaptest" +) + +func TestBuildKafkaOptions(t *testing.T) { + t.Run("basic configuration", func(t *testing.T) { + cfg := config.KafkaEventSource{ + Brokers: []string{"localhost:9092"}, + } + + opts, err := buildKafkaOptions(cfg) + require.NoError(t, err) + require.NotEmpty(t, opts) + }) + + t.Run("with TLS", func(t *testing.T) { + cfg := config.KafkaEventSource{ + Brokers: []string{"localhost:9092"}, + TLS: &config.KafkaTLSConfiguration{ + Enabled: true, + }, + } + + opts, err := buildKafkaOptions(cfg) + require.NoError(t, err) + require.NotEmpty(t, opts) + // Can't directly check for TLS options, but we can verify more options are present + require.Equal(t, len(opts), 4) + }) + + t.Run("with auth", func(t *testing.T) { + username := "user" + password := "pass" + cfg := config.KafkaEventSource{ + Brokers: []string{"localhost:9092"}, + Authentication: &config.KafkaAuthentication{ + SASLPlain: config.KafkaSASLPlainAuthentication{ + Username: &username, + Password: &password, + }, + }, + } + + opts, err := buildKafkaOptions(cfg) + require.NoError(t, err) + require.NotEmpty(t, opts) + // Can't directly check for SASL options, but we can verify more options are present + require.Greater(t, len(opts), 1) + }) +} + +func TestPubSubProviderBuilderFactory(t *testing.T) { + t.Run("creates provider with configured adapters", func(t *testing.T) { + providerId := "test-provider" + + cfg := config.KafkaEventSource{ + ID: providerId, + Brokers: []string{"localhost:9092"}, + } + + logger := zaptest.NewLogger(t) + + ctx := context.Background() + + builder := NewProviderBuilder(ctx, logger, "host", "addr") + require.NotNil(t, builder) + provider, err := builder.BuildProvider(cfg) + require.NoError(t, err) + + // Check the returned provider + kafkaProvider, ok := provider.(*datasource.PubSubProvider) + require.True(t, ok) + assert.NotNil(t, kafkaProvider.Logger) + assert.NotNil(t, kafkaProvider.Adapter) + }) +} diff --git a/router/pkg/pubsub/lifecycle.go b/router/pkg/pubsub/lifecycle.go deleted file mode 100644 index d68418f9f3..0000000000 --- a/router/pkg/pubsub/lifecycle.go +++ /dev/null @@ -1,8 +0,0 @@ -package pubsub - -import "context" - -type Lifecycle interface { - // Shutdown all the resources used by the pubsub - Shutdown(ctx context.Context) error -} diff --git a/router/pkg/pubsub/nats/nats.go b/router/pkg/pubsub/nats/adapter.go similarity index 60% rename from router/pkg/pubsub/nats/nats.go rename to router/pkg/pubsub/nats/adapter.go index 2e01432d12..a0bef13f45 100644 --- a/router/pkg/pubsub/nats/nats.go +++ b/router/pkg/pubsub/nats/adapter.go @@ -4,69 +4,51 @@ import ( "context" "errors" "fmt" + "io" + "sync" + "time" + "github.com/cespare/xxhash/v2" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" - "github.com/wundergraph/cosmo/router/pkg/pubsub" - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/pubsub_datasource" + "github.com/wundergraph/cosmo/router/pkg/pubsub/datasource" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" "go.uber.org/zap" - "io" - "sync" - "time" ) -var ( - _ pubsub_datasource.NatsConnector = (*connector)(nil) - _ pubsub_datasource.NatsPubSub = (*natsPubSub)(nil) - _ pubsub.Lifecycle = (*natsPubSub)(nil) -) - -type connector struct { - conn *nats.Conn - logger *zap.Logger - js jetstream.JetStream - hostName string - routerListenAddr string -} - -func NewConnector(logger *zap.Logger, conn *nats.Conn, js jetstream.JetStream, hostName string, routerListenAddr string) pubsub_datasource.NatsConnector { - return &connector{ - conn: conn, - logger: logger, - js: js, - hostName: hostName, - routerListenAddr: routerListenAddr, - } -} - -func (c *connector) New(ctx context.Context) pubsub_datasource.NatsPubSub { - return &natsPubSub{ - ctx: ctx, - conn: c.conn, - js: c.js, - logger: c.logger.With(zap.String("pubsub", "nats")), - closeWg: sync.WaitGroup{}, - hostName: c.hostName, - routerListenAddr: c.routerListenAddr, - } +// Adapter defines the methods that a NATS adapter should implement +type Adapter interface { + // Subscribe subscribes to the given events and sends updates to the updater + Subscribe(ctx context.Context, event SubscriptionEventConfiguration, updater resolve.SubscriptionUpdater) error + // Publish publishes the given event to the specified subject + Publish(ctx context.Context, event PublishAndRequestEventConfiguration) error + // Request sends a request to the specified subject and writes the response to the given writer + Request(ctx context.Context, event PublishAndRequestEventConfiguration, w io.Writer) error + // Startup initializes the adapter + Startup(ctx context.Context) error + // Shutdown gracefully shuts down the adapter + Shutdown(ctx context.Context) error } -type natsPubSub struct { +// ProviderAdapter implements the AdapterInterface for NATS pub/sub +type ProviderAdapter struct { ctx context.Context - conn *nats.Conn - logger *zap.Logger + client *nats.Conn js jetstream.JetStream + logger *zap.Logger closeWg sync.WaitGroup hostName string routerListenAddr string + url string + opts []nats.Option + flushTimeout time.Duration } // getInstanceIdentifier returns an identifier for the current instance. // We use the hostname and the address the router is listening on, which should provide a good representation // of what a unique instance is from the perspective of the client that has started a subscription to this instance // and want to restart the subscription after a failure on the client or router side. -func (p *natsPubSub) getInstanceIdentifier() string { +func (p *ProviderAdapter) getInstanceIdentifier() string { return fmt.Sprintf("%s-%s", p.hostName, p.routerListenAddr) } @@ -74,7 +56,7 @@ func (p *natsPubSub) getInstanceIdentifier() string { // we need to make sure that the durable consumer name is unique for each instance and subjects to prevent // multiple routers from changing the same consumer, which would lead to message loss and wrong messages delivered // to the subscribers -func (p *natsPubSub) getDurableConsumerName(durableName string, subjects []string) (string, error) { +func (p *ProviderAdapter) getDurableConsumerName(durableName string, subjects []string) (string, error) { subjHash := xxhash.New() _, err := subjHash.WriteString(p.getInstanceIdentifier()) if err != nil { @@ -90,13 +72,21 @@ func (p *natsPubSub) getDurableConsumerName(durableName string, subjects []strin return fmt.Sprintf("%s-%x", durableName, subjHash.Sum64()), nil } -func (p *natsPubSub) Subscribe(ctx context.Context, event pubsub_datasource.NatsSubscriptionEventConfiguration, updater resolve.SubscriptionUpdater) error { +func (p *ProviderAdapter) Subscribe(ctx context.Context, event SubscriptionEventConfiguration, updater resolve.SubscriptionUpdater) error { log := p.logger.With( zap.String("provider_id", event.ProviderID), zap.String("method", "subscribe"), zap.Strings("subjects", event.Subjects), ) + if p.client == nil { + return datasource.NewError("nats client not initialized", nil) + } + + if p.js == nil { + return datasource.NewError("nats jetstream not initialized", nil) + } + if event.StreamConfiguration != nil { durableConsumerName, err := p.getDurableConsumerName(event.StreamConfiguration.Consumer, event.Subjects) if err != nil { @@ -110,10 +100,11 @@ func (p *natsPubSub) Subscribe(ctx context.Context, event pubsub_datasource.Nats if event.StreamConfiguration.ConsumerInactiveThreshold > 0 { consumerConfig.InactiveThreshold = time.Duration(event.StreamConfiguration.ConsumerInactiveThreshold) * time.Second } + consumer, err := p.js.CreateOrUpdateConsumer(ctx, event.StreamConfiguration.StreamName, consumerConfig) if err != nil { log.Error("creating or updating consumer", zap.Error(err)) - return pubsub.NewError(fmt.Sprintf(`failed to create or update consumer for stream "%s"`, event.StreamConfiguration.StreamName), err) + return datasource.NewError(fmt.Sprintf(`failed to create or update consumer for stream "%s"`, event.StreamConfiguration.StreamName), err) } p.closeWg.Add(1) @@ -161,10 +152,10 @@ func (p *natsPubSub) Subscribe(ctx context.Context, event pubsub_datasource.Nats msgChan := make(chan *nats.Msg) subscriptions := make([]*nats.Subscription, len(event.Subjects)) for i, subject := range event.Subjects { - subscription, err := p.conn.ChanSubscribe(subject, msgChan) + subscription, err := p.client.ChanSubscribe(subject, msgChan) if err != nil { log.Error("subscribing to NATS subject", zap.Error(err), zap.String("subscription_subject", subject)) - return pubsub.NewError(fmt.Sprintf(`failed to subscribe to NATS subject "%s"`, subject), err) + return datasource.NewError(fmt.Sprintf(`failed to subscribe to NATS subject "%s"`, subject), err) } subscriptions[i] = subscription } @@ -206,37 +197,45 @@ func (p *natsPubSub) Subscribe(ctx context.Context, event pubsub_datasource.Nats return nil } -func (p *natsPubSub) Publish(_ context.Context, event pubsub_datasource.NatsPublishAndRequestEventConfiguration) error { +func (p *ProviderAdapter) Publish(_ context.Context, event PublishAndRequestEventConfiguration) error { log := p.logger.With( zap.String("provider_id", event.ProviderID), zap.String("method", "publish"), zap.String("subject", event.Subject), ) + if p.client == nil { + return datasource.NewError("nats client not initialized", nil) + } + log.Debug("publish", zap.ByteString("data", event.Data)) - err := p.conn.Publish(event.Subject, event.Data) + err := p.client.Publish(event.Subject, event.Data) if err != nil { log.Error("publish error", zap.Error(err)) - return pubsub.NewError(fmt.Sprintf("error publishing to NATS subject %s", event.Subject), err) + return datasource.NewError(fmt.Sprintf("error publishing to NATS subject %s", event.Subject), err) } return nil } -func (p *natsPubSub) Request(ctx context.Context, event pubsub_datasource.NatsPublishAndRequestEventConfiguration, w io.Writer) error { +func (p *ProviderAdapter) Request(ctx context.Context, event PublishAndRequestEventConfiguration, w io.Writer) error { log := p.logger.With( zap.String("provider_id", event.ProviderID), zap.String("method", "request"), zap.String("subject", event.Subject), ) + if p.client == nil { + return datasource.NewError("nats client not initialized", nil) + } + log.Debug("request", zap.ByteString("data", event.Data)) - msg, err := p.conn.RequestWithContext(ctx, event.Subject, event.Data) + msg, err := p.client.RequestWithContext(ctx, event.Subject, event.Data) if err != nil { log.Error("request error", zap.Error(err)) - return pubsub.NewError(fmt.Sprintf("error requesting from NATS subject %s", event.Subject), err) + return datasource.NewError(fmt.Sprintf("error requesting from NATS subject %s", event.Subject), err) } _, err = w.Write(msg.Data) @@ -248,34 +247,75 @@ func (p *natsPubSub) Request(ctx context.Context, event pubsub_datasource.NatsPu return err } -func (p *natsPubSub) flush(ctx context.Context) error { - return p.conn.FlushWithContext(ctx) +func (p *ProviderAdapter) flush(ctx context.Context) error { + if p.client == nil { + return nil + } + _, ok := ctx.Deadline() + if !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, p.flushTimeout) + defer cancel() + } + return p.client.FlushWithContext(ctx) } -func (p *natsPubSub) Shutdown(ctx context.Context) error { +func (p *ProviderAdapter) Startup(ctx context.Context) (err error) { + p.client, err = nats.Connect(p.url, p.opts...) + if err != nil { + return err + } + p.js, err = jetstream.New(p.client) + if err != nil { + return err + } + return nil +} - if p.conn.IsClosed() { +func (p *ProviderAdapter) Shutdown(ctx context.Context) error { + if p.client == nil { return nil } - var err error + if p.client.IsClosed() { + return nil // Already disconnected or failed to connect + } + + var shutdownErr error fErr := p.flush(ctx) if fErr != nil { - err = errors.Join(err, fErr) + shutdownErr = errors.Join(shutdownErr, fErr) } - drainErr := p.conn.Drain() + drainErr := p.client.Drain() if drainErr != nil { - err = errors.Join(err, drainErr) + shutdownErr = errors.Join(shutdownErr, drainErr) } // Wait for all subscriptions to be closed p.closeWg.Wait() - if err != nil { - return fmt.Errorf("nats pubsub shutdown: %w", err) + if shutdownErr != nil { + return fmt.Errorf("nats pubsub shutdown: %w", shutdownErr) } return nil } + +func NewAdapter(ctx context.Context, logger *zap.Logger, url string, opts []nats.Option, hostName string, routerListenAddr string) (Adapter, error) { + if logger == nil { + logger = zap.NewNop() + } + + return &ProviderAdapter{ + ctx: ctx, + logger: logger.With(zap.String("pubsub", "nats")), + closeWg: sync.WaitGroup{}, + hostName: hostName, + routerListenAddr: routerListenAddr, + url: url, + opts: opts, + flushTimeout: 10 * time.Second, + }, nil +} diff --git a/router/pkg/pubsub/nats/engine_datasource.go b/router/pkg/pubsub/nats/engine_datasource.go new file mode 100644 index 0000000000..ffc23ca838 --- /dev/null +++ b/router/pkg/pubsub/nats/engine_datasource.go @@ -0,0 +1,114 @@ +package nats + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + + "github.com/buger/jsonparser" + "github.com/cespare/xxhash/v2" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/httpclient" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +type StreamConfiguration struct { + Consumer string `json:"consumer"` + ConsumerInactiveThreshold int32 `json:"consumerInactiveThreshold"` + StreamName string `json:"streamName"` +} + +type SubscriptionEventConfiguration struct { + ProviderID string `json:"providerId"` + Subjects []string `json:"subjects"` + StreamConfiguration *StreamConfiguration `json:"streamConfiguration,omitempty"` +} + +type PublishAndRequestEventConfiguration struct { + ProviderID string `json:"providerId"` + Subject string `json:"subject"` + Data json.RawMessage `json:"data"` +} + +func (s *PublishAndRequestEventConfiguration) MarshalJSONTemplate() string { + // The content of the data field could be not valid JSON, so we can't use json.Marshal + // e.g. {"id":$$0$$,"update":$$1$$} + return fmt.Sprintf(`{"subject":"%s", "data": %s, "providerId":"%s"}`, s.Subject, s.Data, s.ProviderID) +} + +type SubscriptionSource struct { + pubSub Adapter +} + +func (s *SubscriptionSource) UniqueRequestID(ctx *resolve.Context, input []byte, xxh *xxhash.Digest) error { + + val, _, _, err := jsonparser.Get(input, "subjects") + if err != nil { + return err + } + + _, err = xxh.Write(val) + if err != nil { + return err + } + + val, _, _, err = jsonparser.Get(input, "providerId") + if err != nil { + return err + } + + _, err = xxh.Write(val) + return err +} + +func (s *SubscriptionSource) Start(ctx *resolve.Context, input []byte, updater resolve.SubscriptionUpdater) error { + var subscriptionConfiguration SubscriptionEventConfiguration + err := json.Unmarshal(input, &subscriptionConfiguration) + if err != nil { + return err + } + + return s.pubSub.Subscribe(ctx.Context(), subscriptionConfiguration, updater) +} + +type NatsPublishDataSource struct { + pubSub Adapter +} + +func (s *NatsPublishDataSource) Load(ctx context.Context, input []byte, out *bytes.Buffer) error { + var publishConfiguration PublishAndRequestEventConfiguration + err := json.Unmarshal(input, &publishConfiguration) + if err != nil { + return err + } + + if err := s.pubSub.Publish(ctx, publishConfiguration); err != nil { + _, err = io.WriteString(out, `{"success": false}`) + return err + } + _, err = io.WriteString(out, `{"success": true}`) + return err +} + +func (s *NatsPublishDataSource) LoadWithFiles(ctx context.Context, input []byte, files []*httpclient.FileUpload, out *bytes.Buffer) (err error) { + panic("not implemented") +} + +type NatsRequestDataSource struct { + pubSub Adapter +} + +func (s *NatsRequestDataSource) Load(ctx context.Context, input []byte, out *bytes.Buffer) error { + var subscriptionConfiguration PublishAndRequestEventConfiguration + err := json.Unmarshal(input, &subscriptionConfiguration) + if err != nil { + return err + } + + return s.pubSub.Request(ctx, subscriptionConfiguration, out) +} + +func (s *NatsRequestDataSource) LoadWithFiles(ctx context.Context, input []byte, files []*httpclient.FileUpload, out *bytes.Buffer) error { + panic("not implemented") +} diff --git a/router/pkg/pubsub/nats/engine_datasource_factory.go b/router/pkg/pubsub/nats/engine_datasource_factory.go new file mode 100644 index 0000000000..48fd2849f7 --- /dev/null +++ b/router/pkg/pubsub/nats/engine_datasource_factory.go @@ -0,0 +1,127 @@ +package nats + +import ( + "encoding/json" + "fmt" + "slices" + + "github.com/wundergraph/cosmo/router/pkg/pubsub/datasource" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +type EventType int + +const ( + EventTypePublish EventType = iota + EventTypeRequest + EventTypeSubscribe +) + +type EngineDataSourceFactory struct { + NatsAdapter Adapter + + fieldName string + eventType EventType + subjects []string + providerId string + + withStreamConfiguration bool + consumerName string + streamName string + consumerInactiveThreshold int32 +} + +func (c *EngineDataSourceFactory) GetFieldName() string { + return c.fieldName +} + +func (c *EngineDataSourceFactory) ResolveDataSource() (resolve.DataSource, error) { + var dataSource resolve.DataSource + + switch c.eventType { + case EventTypePublish: + dataSource = &NatsPublishDataSource{ + pubSub: c.NatsAdapter, + } + case EventTypeRequest: + dataSource = &NatsRequestDataSource{ + pubSub: c.NatsAdapter, + } + default: + return nil, fmt.Errorf("failed to configure fetch: invalid event type \"%d\" for Nats", c.eventType) + } + + return dataSource, nil +} + +func (c *EngineDataSourceFactory) ResolveDataSourceInput(eventData []byte) (string, error) { + if len(c.subjects) != 1 { + return "", fmt.Errorf("publish and request events should define one subject but received %d", len(c.subjects)) + } + + subject := c.subjects[0] + + evtCfg := PublishAndRequestEventConfiguration{ + ProviderID: c.providerId, + Subject: subject, + Data: eventData, + } + + return evtCfg.MarshalJSONTemplate(), nil +} + +func (c *EngineDataSourceFactory) ResolveDataSourceSubscription() (resolve.SubscriptionDataSource, error) { + return &SubscriptionSource{ + pubSub: c.NatsAdapter, + }, nil +} + +func (c *EngineDataSourceFactory) ResolveDataSourceSubscriptionInput() (string, error) { + evtCfg := SubscriptionEventConfiguration{ + ProviderID: c.providerId, + Subjects: c.subjects, + } + if c.withStreamConfiguration { + evtCfg.StreamConfiguration = &StreamConfiguration{ + Consumer: c.consumerName, + StreamName: c.streamName, + ConsumerInactiveThreshold: c.consumerInactiveThreshold, + } + } + object, err := json.Marshal(evtCfg) + if err != nil { + return "", fmt.Errorf("failed to marshal event subscription streamConfiguration") + } + return string(object), nil +} + +func (c *EngineDataSourceFactory) TransformEventData(extractFn datasource.ArgumentTemplateCallback) error { + switch c.eventType { + case EventTypePublish, EventTypeRequest: + extractedSubject, err := extractFn(c.subjects[0]) + if err != nil { + return fmt.Errorf("unable to parse subject with id %s", c.subjects[0]) + } + if !isValidNatsSubject(extractedSubject) { + return fmt.Errorf("invalid subject: %s", extractedSubject) + } + c.subjects = []string{extractedSubject} + case EventTypeSubscribe: + extractedSubjects := make([]string, 0, len(c.subjects)) + for _, rawSubject := range c.subjects { + extractedSubject, err := extractFn(rawSubject) + if err != nil { + return nil + } + if !isValidNatsSubject(extractedSubject) { + return fmt.Errorf("invalid subject: %s", extractedSubject) + } + extractedSubjects = append(extractedSubjects, extractedSubject) + } + slices.Sort(extractedSubjects) + c.subjects = extractedSubjects + } + + return nil +} diff --git a/router/pkg/pubsub/nats/engine_datasource_factory_test.go b/router/pkg/pubsub/nats/engine_datasource_factory_test.go new file mode 100644 index 0000000000..57426ad34c --- /dev/null +++ b/router/pkg/pubsub/nats/engine_datasource_factory_test.go @@ -0,0 +1,255 @@ +package nats + +import ( + "bytes" + "context" + "encoding/json" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/wundergraph/cosmo/router/pkg/pubsub/pubsubtest" +) + +func TestNatsEngineDataSourceFactory(t *testing.T) { + // Create the data source to test with a real adapter + adapter := &ProviderAdapter{} + pubsub := &EngineDataSourceFactory{ + providerId: "test-provider", + eventType: EventTypePublish, + subjects: []string{"test-subject"}, + fieldName: "testField", + NatsAdapter: adapter, + } + + // Run the standard test suite + pubsubtest.VerifyEngineDataSourceFactoryImplementation(t, pubsub) +} + +func TestEngineDataSourceFactoryWithMockAdapter(t *testing.T) { + // Create mock adapter + mockAdapter := NewMockAdapter(t) + + // Configure mock expectations for Publish + mockAdapter.On("Publish", mock.Anything, mock.MatchedBy(func(event PublishAndRequestEventConfiguration) bool { + return event.ProviderID == "test-provider" && event.Subject == "test-subject" + })).Return(nil) + + // Create the data source with mock adapter + pubsub := &EngineDataSourceFactory{ + providerId: "test-provider", + eventType: EventTypePublish, + subjects: []string{"test-subject"}, + fieldName: "testField", + NatsAdapter: mockAdapter, + } + + // Get the data source + ds, err := pubsub.ResolveDataSource() + require.NoError(t, err) + + // Get the input + input, err := pubsub.ResolveDataSourceInput([]byte(`{"test":"data"}`)) + require.NoError(t, err) + + // Call Load on the data source + out := &bytes.Buffer{} + err = ds.Load(context.Background(), []byte(input), out) + require.NoError(t, err) + require.Equal(t, `{"success": true}`, out.String()) +} + +func TestEngineDataSourceFactory_GetResolveDataSource_WrongType(t *testing.T) { + // Create mock adapter + mockAdapter := NewMockAdapter(t) + + // Create the data source with mock adapter + pubsub := &EngineDataSourceFactory{ + providerId: "test-provider", + eventType: EventTypeSubscribe, + subjects: []string{"test-subject"}, + fieldName: "testField", + NatsAdapter: mockAdapter, + } + + // Get the data source + ds, err := pubsub.ResolveDataSource() + require.Error(t, err) + require.Nil(t, ds) +} + +func TestEngineDataSourceFactory_GetResolveDataSourceInput_MultipleSubjects(t *testing.T) { + // Create the data source with mock adapter + pubsub := &EngineDataSourceFactory{ + providerId: "test-provider", + eventType: EventTypePublish, + subjects: []string{"test-subject-1", "test-subject-2"}, + fieldName: "testField", + } + + // Get the input + input, err := pubsub.ResolveDataSourceInput([]byte(`{"test":"data"}`)) + require.Error(t, err) + require.Empty(t, input) +} + +func TestEngineDataSourceFactory_GetResolveDataSourceInput_NoSubjects(t *testing.T) { + // Create the data source with mock adapter + pubsub := &EngineDataSourceFactory{ + providerId: "test-provider", + eventType: EventTypePublish, + subjects: []string{}, + fieldName: "testField", + } + + // Get the input + input, err := pubsub.ResolveDataSourceInput([]byte(`{"test":"data"}`)) + require.Error(t, err) + require.Empty(t, input) +} + +func TestNatsEngineDataSourceFactoryMultiSubjectSubscription(t *testing.T) { + // Create the data source to test with mock adapter + pubsub := &EngineDataSourceFactory{ + providerId: "test-provider", + eventType: EventTypePublish, + subjects: []string{"test-subject-1", "test-subject-2"}, + fieldName: "testField", + } + + // Test GetResolveDataSourceSubscriptionInput + subscriptionInput, err := pubsub.ResolveDataSourceSubscriptionInput() + require.NoError(t, err, "Expected no error from GetResolveDataSourceSubscriptionInput") + require.NotEmpty(t, subscriptionInput, "Expected non-empty subscription input") + + // Verify the subscription input contains both subjects + var subscriptionConfig SubscriptionEventConfiguration + err = json.Unmarshal([]byte(subscriptionInput), &subscriptionConfig) + require.NoError(t, err, "Expected valid JSON from GetResolveDataSourceSubscriptionInput") + require.Equal(t, 2, len(subscriptionConfig.Subjects), "Expected 2 subjects in subscription configuration") + require.Equal(t, "test-subject-1", subscriptionConfig.Subjects[0], "Expected first subject to be 'test-subject-1'") + require.Equal(t, "test-subject-2", subscriptionConfig.Subjects[1], "Expected second subject to be 'test-subject-2'") +} + +func TestNatsEngineDataSourceFactoryWithStreamConfiguration(t *testing.T) { + // Create the data source to test + pubsub := &EngineDataSourceFactory{ + providerId: "test-provider", + eventType: EventTypePublish, + subjects: []string{"test-subject"}, + fieldName: "testField", + withStreamConfiguration: true, + consumerName: "test-consumer", + streamName: "test-stream", + consumerInactiveThreshold: 30, + } + + // Test GetResolveDataSourceSubscriptionInput with stream configuration + subscriptionInput, err := pubsub.ResolveDataSourceSubscriptionInput() + require.NoError(t, err, "Expected no error from GetResolveDataSourceSubscriptionInput") + require.NotEmpty(t, subscriptionInput, "Expected non-empty subscription input") + + // Verify the subscription input contains stream configuration + var subscriptionConfig SubscriptionEventConfiguration + err = json.Unmarshal([]byte(subscriptionInput), &subscriptionConfig) + require.NoError(t, err, "Expected valid JSON from GetResolveDataSourceSubscriptionInput") + require.NotNil(t, subscriptionConfig.StreamConfiguration, "Expected non-nil stream configuration") + require.Equal(t, "test-consumer", subscriptionConfig.StreamConfiguration.Consumer, "Expected consumer to be 'test-consumer'") + require.Equal(t, "test-stream", subscriptionConfig.StreamConfiguration.StreamName, "Expected stream name to be 'test-stream'") + require.Equal(t, int32(30), subscriptionConfig.StreamConfiguration.ConsumerInactiveThreshold, "Expected consumer inactive threshold to be 30") +} + +func TestEngineDataSourceFactory_RequestDataSource(t *testing.T) { + // Create mock adapter + mockAdapter := NewMockAdapter(t) + + // Configure mock expectations for Request + mockAdapter.On("Request", mock.Anything, mock.MatchedBy(func(event PublishAndRequestEventConfiguration) bool { + return event.ProviderID == "test-provider" && event.Subject == "test-subject" + }), mock.Anything).Return(nil).Run(func(args mock.Arguments) { + w := args.Get(2).(io.Writer) + w.Write([]byte(`{"response": "test"}`)) + }) + + // Create the data source with mock adapter + pubsub := &EngineDataSourceFactory{ + providerId: "test-provider", + eventType: EventTypeRequest, + subjects: []string{"test-subject"}, + fieldName: "testField", + NatsAdapter: mockAdapter, + } + + // Get the data source + ds, err := pubsub.ResolveDataSource() + require.NoError(t, err) + require.NotNil(t, ds) + + // Get the input + input, err := pubsub.ResolveDataSourceInput([]byte(`{"test":"data"}`)) + require.NoError(t, err) + + // Call Load on the data source + out := &bytes.Buffer{} + err = ds.Load(context.Background(), []byte(input), out) + require.NoError(t, err) + require.Equal(t, `{"response": "test"}`, out.String()) +} + +func TestTransformEventConfig(t *testing.T) { + t.Run("publish event", func(t *testing.T) { + cfg := &EngineDataSourceFactory{ + providerId: "test-provider", + eventType: EventTypePublish, + subjects: []string{"original.subject"}, + fieldName: "testField", + } + + // Simple transform function that adds "transformed." prefix + transformFn := func(s string) (string, error) { + return "transformed." + s, nil + } + + err := cfg.TransformEventData(transformFn) + require.NoError(t, err) + require.Equal(t, []string{"transformed.original.subject"}, cfg.subjects) + }) + + t.Run("subscribe event", func(t *testing.T) { + cfg := &EngineDataSourceFactory{ + providerId: "test-provider", + eventType: EventTypeSubscribe, + subjects: []string{"original.subject1", "original.subject2"}, + fieldName: "testField", + } + + // Simple transform function that adds "transformed." prefix + transformFn := func(s string) (string, error) { + return "transformed." + s, nil + } + + err := cfg.TransformEventData(transformFn) + require.NoError(t, err) + // Since the function sorts the subjects + require.Equal(t, []string{"transformed.original.subject1", "transformed.original.subject2"}, cfg.subjects) + }) + + t.Run("invalid subject", func(t *testing.T) { + cfg := &EngineDataSourceFactory{ + providerId: "test-provider", + eventType: EventTypePublish, + subjects: []string{"invalid subject with spaces"}, + fieldName: "testField", + } + + transformFn := func(s string) (string, error) { + return s, nil + } + + err := cfg.TransformEventData(transformFn) + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid subject") + }) +} diff --git a/router/pkg/pubsub/nats/engine_datasource_test.go b/router/pkg/pubsub/nats/engine_datasource_test.go new file mode 100644 index 0000000000..da21d4de88 --- /dev/null +++ b/router/pkg/pubsub/nats/engine_datasource_test.go @@ -0,0 +1,356 @@ +package nats + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "testing" + + "github.com/cespare/xxhash/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/wundergraph/cosmo/router/pkg/pubsub/datasource" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +func TestPublishEventConfiguration_MarshalJSONTemplate(t *testing.T) { + tests := []struct { + name string + config PublishAndRequestEventConfiguration + wantPattern string + }{ + { + name: "simple configuration", + config: PublishAndRequestEventConfiguration{ + ProviderID: "test-provider", + Subject: "test-subject", + Data: json.RawMessage(`{"message":"hello"}`), + }, + wantPattern: `{"subject":"test-subject", "data": {"message":"hello"}, "providerId":"test-provider"}`, + }, + { + name: "with special characters", + config: PublishAndRequestEventConfiguration{ + ProviderID: "test-provider-id", + Subject: "subject-with-hyphens", + Data: json.RawMessage(`{"message":"special \"quotes\" here"}`), + }, + wantPattern: `{"subject":"subject-with-hyphens", "data": {"message":"special \"quotes\" here"}, "providerId":"test-provider-id"}`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.config.MarshalJSONTemplate() + assert.Equal(t, tt.wantPattern, result) + }) + } +} + +func TestPublishAndRequestEventConfiguration_MarshalJSONTemplate(t *testing.T) { + tests := []struct { + name string + config PublishAndRequestEventConfiguration + wantPattern string + }{ + { + name: "simple configuration", + config: PublishAndRequestEventConfiguration{ + ProviderID: "test-provider", + Subject: "test-subject", + Data: json.RawMessage(`{"message":"hello"}`), + }, + wantPattern: `{"subject":"test-subject", "data": {"message":"hello"}, "providerId":"test-provider"}`, + }, + { + name: "with special characters", + config: PublishAndRequestEventConfiguration{ + ProviderID: "test-provider-id", + Subject: "subject-with-hyphens", + Data: json.RawMessage(`{"message":"special \"quotes\" here"}`), + }, + wantPattern: `{"subject":"subject-with-hyphens", "data": {"message":"special \"quotes\" here"}, "providerId":"test-provider-id"}`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.config.MarshalJSONTemplate() + assert.Equal(t, tt.wantPattern, result) + }) + } +} + +func TestSubscriptionSource_UniqueRequestID(t *testing.T) { + tests := []struct { + name string + input string + expectError bool + expectedError error + }{ + { + name: "valid input", + input: `{"subjects":["subject1", "subject2"], "providerId":"test-provider"}`, + expectError: false, + }, + { + name: "missing subjects", + input: `{"providerId":"test-provider"}`, + expectError: true, + expectedError: errors.New("Key path not found"), + }, + { + name: "missing providerId", + input: `{"subjects":["subject1", "subject2"]}`, + expectError: true, + expectedError: errors.New("Key path not found"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + source := &SubscriptionSource{ + pubSub: NewMockAdapter(t), + } + ctx := &resolve.Context{} + input := []byte(tt.input) + xxh := xxhash.New() + + err := source.UniqueRequestID(ctx, input, xxh) + + if tt.expectError { + require.Error(t, err) + if tt.expectedError != nil { + // For jsonparser errors, just check if the error message contains the expected text + assert.Contains(t, err.Error(), tt.expectedError.Error()) + } + } else { + require.NoError(t, err) + // Check that the hash has been updated + assert.NotEqual(t, 0, xxh.Sum64()) + } + }) + } +} + +func TestSubscriptionSource_Start(t *testing.T) { + tests := []struct { + name string + input string + mockSetup func(*MockAdapter, *datasource.MockSubscriptionUpdater) + expectError bool + }{ + { + name: "successful subscription", + input: `{"subjects":["subject1", "subject2"], "providerId":"test-provider"}`, + mockSetup: func(m *MockAdapter, updater *datasource.MockSubscriptionUpdater) { + m.On("Subscribe", mock.Anything, SubscriptionEventConfiguration{ + ProviderID: "test-provider", + Subjects: []string{"subject1", "subject2"}, + }, mock.Anything).Return(nil) + }, + expectError: false, + }, + { + name: "adapter returns error", + input: `{"subjects":["subject1"], "providerId":"test-provider"}`, + mockSetup: func(m *MockAdapter, updater *datasource.MockSubscriptionUpdater) { + m.On("Subscribe", mock.Anything, SubscriptionEventConfiguration{ + ProviderID: "test-provider", + Subjects: []string{"subject1"}, + }, mock.Anything).Return(errors.New("subscription error")) + }, + expectError: true, + }, + { + name: "invalid input json", + input: `{"invalid json":`, + mockSetup: func(m *MockAdapter, updater *datasource.MockSubscriptionUpdater) {}, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockAdapter := NewMockAdapter(t) + updater := datasource.NewMockSubscriptionUpdater(t) + tt.mockSetup(mockAdapter, updater) + + source := &SubscriptionSource{ + pubSub: mockAdapter, + } + + // Set up go context + goCtx := context.Background() + + // Create a resolve.Context with the standard context + resolveCtx := &resolve.Context{} + resolveCtx = resolveCtx.WithContext(goCtx) + + input := []byte(tt.input) + err := source.Start(resolveCtx, input, updater) + + if tt.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestNatsPublishDataSource_Load(t *testing.T) { + tests := []struct { + name string + input string + mockSetup func(*MockAdapter) + expectError bool + expectedOutput string + expectPublished bool + }{ + { + name: "successful publish", + input: `{"subject":"test-subject", "data":{"message":"hello"}, "providerId":"test-provider"}`, + mockSetup: func(m *MockAdapter) { + m.On("Publish", mock.Anything, mock.MatchedBy(func(event PublishAndRequestEventConfiguration) bool { + return event.ProviderID == "test-provider" && + event.Subject == "test-subject" && + string(event.Data) == `{"message":"hello"}` + })).Return(nil) + }, + expectError: false, + expectedOutput: `{"success": true}`, + expectPublished: true, + }, + { + name: "publish error", + input: `{"subject":"test-subject", "data":{"message":"hello"}, "providerId":"test-provider"}`, + mockSetup: func(m *MockAdapter) { + m.On("Publish", mock.Anything, mock.Anything).Return(errors.New("publish error")) + }, + expectError: false, // The Load method doesn't return the publish error directly + expectedOutput: `{"success": false}`, + expectPublished: true, + }, + { + name: "invalid input json", + input: `{"invalid json":`, + mockSetup: func(m *MockAdapter) {}, + expectError: true, + expectPublished: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockAdapter := NewMockAdapter(t) + tt.mockSetup(mockAdapter) + + dataSource := &NatsPublishDataSource{ + pubSub: mockAdapter, + } + + ctx := context.Background() + input := []byte(tt.input) + var out bytes.Buffer + + err := dataSource.Load(ctx, input, &out) + + if tt.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + if tt.expectedOutput != "" { + assert.Equal(t, tt.expectedOutput, out.String()) + } + } + }) + } +} + +func TestNatsPublishDataSource_LoadWithFiles(t *testing.T) { + dataSource := &NatsPublishDataSource{} + assert.Panics(t, func() { + dataSource.LoadWithFiles(context.Background(), []byte{}, nil, &bytes.Buffer{}) + }, "Expected LoadWithFiles to panic with 'not implemented'") +} + +func TestNatsRequestDataSource_Load(t *testing.T) { + tests := []struct { + name string + input string + mockSetup func(*MockAdapter) + expectError bool + expectedOutput string + }{ + { + name: "successful request", + input: `{"subject":"test-subject", "data":{"message":"hello"}, "providerId":"test-provider"}`, + mockSetup: func(m *MockAdapter) { + m.On("Request", mock.Anything, mock.MatchedBy(func(event PublishAndRequestEventConfiguration) bool { + return event.ProviderID == "test-provider" && + event.Subject == "test-subject" && + string(event.Data) == `{"message":"hello"}` + }), mock.Anything).Run(func(args mock.Arguments) { + // Write response to the output buffer + w := args.Get(2).(io.Writer) + _, _ = w.Write([]byte(`{"response":"success"}`)) + }).Return(nil) + }, + expectError: false, + expectedOutput: `{"response":"success"}`, + }, + { + name: "request error", + input: `{"subject":"test-subject", "data":{"message":"hello"}, "providerId":"test-provider"}`, + mockSetup: func(m *MockAdapter) { + m.On("Request", mock.Anything, mock.Anything, mock.Anything).Return(errors.New("request error")) + }, + expectError: true, + expectedOutput: "", + }, + { + name: "invalid input json", + input: `{"invalid json":`, + mockSetup: func(m *MockAdapter) {}, + expectError: true, + expectedOutput: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockAdapter := NewMockAdapter(t) + tt.mockSetup(mockAdapter) + + dataSource := &NatsRequestDataSource{ + pubSub: mockAdapter, + } + + ctx := context.Background() + input := []byte(tt.input) + var out bytes.Buffer + + err := dataSource.Load(ctx, input, &out) + + if tt.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + if tt.expectedOutput != "" { + assert.Equal(t, tt.expectedOutput, out.String()) + } + } + }) + } +} + +func TestNatsRequestDataSource_LoadWithFiles(t *testing.T) { + dataSource := &NatsRequestDataSource{} + assert.Panics(t, func() { + dataSource.LoadWithFiles(context.Background(), []byte{}, nil, &bytes.Buffer{}) + }, "Expected LoadWithFiles to panic with 'not implemented'") +} diff --git a/router/pkg/pubsub/nats/mocks.go b/router/pkg/pubsub/nats/mocks.go new file mode 100644 index 0000000000..de49c6ae7e --- /dev/null +++ b/router/pkg/pubsub/nats/mocks.go @@ -0,0 +1,325 @@ +// Code generated by mockery; DO NOT EDIT. +// github.com/vektra/mockery +// template: testify + +package nats + +import ( + "context" + "io" + + mock "github.com/stretchr/testify/mock" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// NewMockAdapter creates a new instance of MockAdapter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockAdapter(t interface { + mock.TestingT + Cleanup(func()) +}) *MockAdapter { + mock := &MockAdapter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// MockAdapter is an autogenerated mock type for the Adapter type +type MockAdapter struct { + mock.Mock +} + +type MockAdapter_Expecter struct { + mock *mock.Mock +} + +func (_m *MockAdapter) EXPECT() *MockAdapter_Expecter { + return &MockAdapter_Expecter{mock: &_m.Mock} +} + +// Publish provides a mock function for the type MockAdapter +func (_mock *MockAdapter) Publish(ctx context.Context, event PublishAndRequestEventConfiguration) error { + ret := _mock.Called(ctx, event) + + if len(ret) == 0 { + panic("no return value specified for Publish") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(context.Context, PublishAndRequestEventConfiguration) error); ok { + r0 = returnFunc(ctx, event) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockAdapter_Publish_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Publish' +type MockAdapter_Publish_Call struct { + *mock.Call +} + +// Publish is a helper method to define mock.On call +// - ctx context.Context +// - event PublishAndRequestEventConfiguration +func (_e *MockAdapter_Expecter) Publish(ctx interface{}, event interface{}) *MockAdapter_Publish_Call { + return &MockAdapter_Publish_Call{Call: _e.mock.On("Publish", ctx, event)} +} + +func (_c *MockAdapter_Publish_Call) Run(run func(ctx context.Context, event PublishAndRequestEventConfiguration)) *MockAdapter_Publish_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 PublishAndRequestEventConfiguration + if args[1] != nil { + arg1 = args[1].(PublishAndRequestEventConfiguration) + } + run( + arg0, + arg1, + ) + }) + return _c +} + +func (_c *MockAdapter_Publish_Call) Return(err error) *MockAdapter_Publish_Call { + _c.Call.Return(err) + return _c +} + +func (_c *MockAdapter_Publish_Call) RunAndReturn(run func(ctx context.Context, event PublishAndRequestEventConfiguration) error) *MockAdapter_Publish_Call { + _c.Call.Return(run) + return _c +} + +// Request provides a mock function for the type MockAdapter +func (_mock *MockAdapter) Request(ctx context.Context, event PublishAndRequestEventConfiguration, w io.Writer) error { + ret := _mock.Called(ctx, event, w) + + if len(ret) == 0 { + panic("no return value specified for Request") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(context.Context, PublishAndRequestEventConfiguration, io.Writer) error); ok { + r0 = returnFunc(ctx, event, w) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockAdapter_Request_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Request' +type MockAdapter_Request_Call struct { + *mock.Call +} + +// Request is a helper method to define mock.On call +// - ctx context.Context +// - event PublishAndRequestEventConfiguration +// - w io.Writer +func (_e *MockAdapter_Expecter) Request(ctx interface{}, event interface{}, w interface{}) *MockAdapter_Request_Call { + return &MockAdapter_Request_Call{Call: _e.mock.On("Request", ctx, event, w)} +} + +func (_c *MockAdapter_Request_Call) Run(run func(ctx context.Context, event PublishAndRequestEventConfiguration, w io.Writer)) *MockAdapter_Request_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 PublishAndRequestEventConfiguration + if args[1] != nil { + arg1 = args[1].(PublishAndRequestEventConfiguration) + } + var arg2 io.Writer + if args[2] != nil { + arg2 = args[2].(io.Writer) + } + run( + arg0, + arg1, + arg2, + ) + }) + return _c +} + +func (_c *MockAdapter_Request_Call) Return(err error) *MockAdapter_Request_Call { + _c.Call.Return(err) + return _c +} + +func (_c *MockAdapter_Request_Call) RunAndReturn(run func(ctx context.Context, event PublishAndRequestEventConfiguration, w io.Writer) error) *MockAdapter_Request_Call { + _c.Call.Return(run) + return _c +} + +// Shutdown provides a mock function for the type MockAdapter +func (_mock *MockAdapter) Shutdown(ctx context.Context) error { + ret := _mock.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Shutdown") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = returnFunc(ctx) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockAdapter_Shutdown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Shutdown' +type MockAdapter_Shutdown_Call struct { + *mock.Call +} + +// Shutdown is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockAdapter_Expecter) Shutdown(ctx interface{}) *MockAdapter_Shutdown_Call { + return &MockAdapter_Shutdown_Call{Call: _e.mock.On("Shutdown", ctx)} +} + +func (_c *MockAdapter_Shutdown_Call) Run(run func(ctx context.Context)) *MockAdapter_Shutdown_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockAdapter_Shutdown_Call) Return(err error) *MockAdapter_Shutdown_Call { + _c.Call.Return(err) + return _c +} + +func (_c *MockAdapter_Shutdown_Call) RunAndReturn(run func(ctx context.Context) error) *MockAdapter_Shutdown_Call { + _c.Call.Return(run) + return _c +} + +// Startup provides a mock function for the type MockAdapter +func (_mock *MockAdapter) Startup(ctx context.Context) error { + ret := _mock.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Startup") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = returnFunc(ctx) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockAdapter_Startup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Startup' +type MockAdapter_Startup_Call struct { + *mock.Call +} + +// Startup is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockAdapter_Expecter) Startup(ctx interface{}) *MockAdapter_Startup_Call { + return &MockAdapter_Startup_Call{Call: _e.mock.On("Startup", ctx)} +} + +func (_c *MockAdapter_Startup_Call) Run(run func(ctx context.Context)) *MockAdapter_Startup_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockAdapter_Startup_Call) Return(err error) *MockAdapter_Startup_Call { + _c.Call.Return(err) + return _c +} + +func (_c *MockAdapter_Startup_Call) RunAndReturn(run func(ctx context.Context) error) *MockAdapter_Startup_Call { + _c.Call.Return(run) + return _c +} + +// Subscribe provides a mock function for the type MockAdapter +func (_mock *MockAdapter) Subscribe(ctx context.Context, event SubscriptionEventConfiguration, updater resolve.SubscriptionUpdater) error { + ret := _mock.Called(ctx, event, updater) + + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(context.Context, SubscriptionEventConfiguration, resolve.SubscriptionUpdater) error); ok { + r0 = returnFunc(ctx, event, updater) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockAdapter_Subscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Subscribe' +type MockAdapter_Subscribe_Call struct { + *mock.Call +} + +// Subscribe is a helper method to define mock.On call +// - ctx context.Context +// - event SubscriptionEventConfiguration +// - updater resolve.SubscriptionUpdater +func (_e *MockAdapter_Expecter) Subscribe(ctx interface{}, event interface{}, updater interface{}) *MockAdapter_Subscribe_Call { + return &MockAdapter_Subscribe_Call{Call: _e.mock.On("Subscribe", ctx, event, updater)} +} + +func (_c *MockAdapter_Subscribe_Call) Run(run func(ctx context.Context, event SubscriptionEventConfiguration, updater resolve.SubscriptionUpdater)) *MockAdapter_Subscribe_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 SubscriptionEventConfiguration + if args[1] != nil { + arg1 = args[1].(SubscriptionEventConfiguration) + } + var arg2 resolve.SubscriptionUpdater + if args[2] != nil { + arg2 = args[2].(resolve.SubscriptionUpdater) + } + run( + arg0, + arg1, + arg2, + ) + }) + return _c +} + +func (_c *MockAdapter_Subscribe_Call) Return(err error) *MockAdapter_Subscribe_Call { + _c.Call.Return(err) + return _c +} + +func (_c *MockAdapter_Subscribe_Call) RunAndReturn(run func(ctx context.Context, event SubscriptionEventConfiguration, updater resolve.SubscriptionUpdater) error) *MockAdapter_Subscribe_Call { + _c.Call.Return(run) + return _c +} diff --git a/router/pkg/pubsub/nats/provider_builder.go b/router/pkg/pubsub/nats/provider_builder.go new file mode 100644 index 0000000000..e1ae0a1e70 --- /dev/null +++ b/router/pkg/pubsub/nats/provider_builder.go @@ -0,0 +1,148 @@ +package nats + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/nats-io/nats.go" + nodev1 "github.com/wundergraph/cosmo/router/gen/proto/wg/cosmo/node/v1" + "github.com/wundergraph/cosmo/router/pkg/config" + "github.com/wundergraph/cosmo/router/pkg/pubsub/datasource" + "go.uber.org/zap" +) + +const providerTypeID = "nats" + +type ProviderBuilder struct { + ctx context.Context + logger *zap.Logger + hostName string + routerListenAddr string + adapters map[string]Adapter +} + +func (p *ProviderBuilder) TypeID() string { + return providerTypeID +} + +func (p *ProviderBuilder) BuildEngineDataSourceFactory(data *nodev1.NatsEventConfiguration) (datasource.EngineDataSourceFactory, error) { + providerId := data.GetEngineEventConfiguration().GetProviderId() + adapter, ok := p.adapters[providerId] + if !ok { + return nil, fmt.Errorf("failed to get adapter for provider %s with ID %s", p.TypeID(), providerId) + } + + var eventType EventType + switch data.GetEngineEventConfiguration().GetType() { + case nodev1.EventType_PUBLISH: + eventType = EventTypePublish + case nodev1.EventType_SUBSCRIBE: + eventType = EventTypeSubscribe + case nodev1.EventType_REQUEST: + eventType = EventTypeRequest + default: + return nil, fmt.Errorf("unsupported event type: %s", data.GetEngineEventConfiguration().GetType()) + } + dataSourceFactory := &EngineDataSourceFactory{ + NatsAdapter: adapter, + fieldName: data.GetEngineEventConfiguration().GetFieldName(), + eventType: eventType, + subjects: data.GetSubjects(), + providerId: providerId, + withStreamConfiguration: data.GetStreamConfiguration() != nil, + } + + if data.GetStreamConfiguration() != nil { + dataSourceFactory.withStreamConfiguration = true + dataSourceFactory.consumerName = data.GetStreamConfiguration().GetConsumerName() + dataSourceFactory.streamName = data.GetStreamConfiguration().GetStreamName() + dataSourceFactory.consumerInactiveThreshold = data.GetStreamConfiguration().GetConsumerInactiveThreshold() + } + + return dataSourceFactory, nil +} + +func (p *ProviderBuilder) BuildProvider(provider config.NatsEventSource) (datasource.Provider, error) { + adapter, pubSubProvider, err := buildProvider(p.ctx, provider, p.logger, p.hostName, p.routerListenAddr) + if err != nil { + return nil, err + } + p.adapters[provider.ID] = adapter + + return pubSubProvider, nil +} + +func buildNatsOptions(eventSource config.NatsEventSource, logger *zap.Logger) ([]nats.Option, error) { + opts := []nats.Option{ + nats.Name(fmt.Sprintf("cosmo.router.edfs.nats.%s", eventSource.ID)), + nats.ReconnectJitter(500*time.Millisecond, 2*time.Second), + nats.ClosedHandler(func(conn *nats.Conn) { + logger.Info("NATS connection closed", zap.String("provider_id", eventSource.ID), zap.Error(conn.LastError())) + }), + nats.ConnectHandler(func(nc *nats.Conn) { + logger.Info("NATS connection established", zap.String("provider_id", eventSource.ID), zap.String("url", nc.ConnectedUrlRedacted())) + }), + nats.DisconnectErrHandler(func(nc *nats.Conn, err error) { + if err != nil { + logger.Error("NATS disconnected; will attempt to reconnect", zap.Error(err), zap.String("provider_id", eventSource.ID)) + } else { + logger.Info("NATS disconnected", zap.String("provider_id", eventSource.ID)) + } + }), + nats.ErrorHandler(func(conn *nats.Conn, subscription *nats.Subscription, err error) { + if errors.Is(err, nats.ErrSlowConsumer) { + logger.Warn( + "NATS slow consumer detected. Events are being dropped. Please consider increasing the buffer size or reducing the number of messages being sent.", + zap.Error(err), + zap.String("provider_id", eventSource.ID), + ) + } else { + logger.Error("NATS error", zap.Error(err)) + } + }), + nats.ReconnectHandler(func(conn *nats.Conn) { + logger.Info("NATS reconnected", zap.String("provider_id", eventSource.ID), zap.String("url", conn.ConnectedUrlRedacted())) + }), + } + + if eventSource.Authentication != nil { + if eventSource.Authentication.Token != nil { + opts = append(opts, nats.Token(*eventSource.Authentication.Token)) + } else if eventSource.Authentication.UserInfo.Username != nil && eventSource.Authentication.UserInfo.Password != nil { + opts = append(opts, nats.UserInfo(*eventSource.Authentication.UserInfo.Username, *eventSource.Authentication.UserInfo.Password)) + } + } + + return opts, nil +} + +func buildProvider(ctx context.Context, provider config.NatsEventSource, logger *zap.Logger, hostName string, routerListenAddr string) (Adapter, datasource.Provider, error) { + options, err := buildNatsOptions(provider, logger) + if err != nil { + return nil, nil, fmt.Errorf("failed to build options for Nats provider with ID \"%s\": %w", provider.ID, err) + } + adapter, err := NewAdapter(ctx, logger, provider.URL, options, hostName, routerListenAddr) + if err != nil { + return nil, nil, fmt.Errorf("failed to create adapter for Nats provider with ID \"%s\": %w", provider.ID, err) + } + pubSubProvider := datasource.NewPubSubProvider(provider.ID, providerTypeID, adapter, logger) + + return adapter, pubSubProvider, nil +} + +func NewProviderBuilder( + ctx context.Context, + logger *zap.Logger, + hostName string, + routerListenAddr string, +) datasource.ProviderBuilder[config.NatsEventSource, *nodev1.NatsEventConfiguration] { + return &ProviderBuilder{ + ctx: ctx, + logger: logger, + hostName: hostName, + routerListenAddr: routerListenAddr, + adapters: make(map[string]Adapter), + } +} diff --git a/router/pkg/pubsub/nats/provider_builder_test.go b/router/pkg/pubsub/nats/provider_builder_test.go new file mode 100644 index 0000000000..d2646b1941 --- /dev/null +++ b/router/pkg/pubsub/nats/provider_builder_test.go @@ -0,0 +1,94 @@ +package nats + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/wundergraph/cosmo/router/pkg/config" + "github.com/wundergraph/cosmo/router/pkg/pubsub/datasource" + "go.uber.org/zap/zaptest" +) + +func TestBuildNatsOptions(t *testing.T) { + t.Run("basic configuration", func(t *testing.T) { + cfg := config.NatsEventSource{ + ID: "test-nats", + URL: "nats://localhost:4222", + } + logger := zaptest.NewLogger(t) + + opts, err := buildNatsOptions(cfg, logger) + require.NoError(t, err) + require.NotEmpty(t, opts) + }) + + t.Run("with token authentication", func(t *testing.T) { + token := "test-token" + cfg := config.NatsEventSource{ + ID: "test-nats", + URL: "nats://localhost:4222", + Authentication: &config.NatsAuthentication{ + NatsTokenBasedAuthentication: config.NatsTokenBasedAuthentication{ + Token: &token, + }, + }, + } + logger := zaptest.NewLogger(t) + + opts, err := buildNatsOptions(cfg, logger) + require.NoError(t, err) + require.NotEmpty(t, opts) + // Can't directly check for token options, but we can verify options are present + require.Greater(t, len(opts), 7) // Basic options (7) + token option + }) + + t.Run("with user/password authentication", func(t *testing.T) { + username := "user" + password := "pass" + cfg := config.NatsEventSource{ + ID: "test-nats", + URL: "nats://localhost:4222", + Authentication: &config.NatsAuthentication{ + UserInfo: config.NatsCredentialsAuthentication{ + Username: &username, + Password: &password, + }, + }, + } + logger := zaptest.NewLogger(t) + + opts, err := buildNatsOptions(cfg, logger) + require.NoError(t, err) + require.NotEmpty(t, opts) + // Can't directly check for auth options, but we can verify options are present + require.Greater(t, len(opts), 7) // Basic options (7) + user info option + }) +} + +func TestPubSubProviderBuilderFactory(t *testing.T) { + t.Run("creates provider with configured adapters", func(t *testing.T) { + providerId := "test-provider" + + cfg := config.NatsEventSource{ + ID: providerId, + URL: "nats://localhost:4222", + } + + logger := zaptest.NewLogger(t) + + ctx := context.Background() + + builder := NewProviderBuilder(ctx, logger, "host", "addr") + require.NotNil(t, builder) + provider, err := builder.BuildProvider(cfg) + require.NoError(t, err) + + // Check the returned provider + natsProvider, ok := provider.(*datasource.PubSubProvider) + require.True(t, ok) + assert.NotNil(t, natsProvider.Logger) + assert.NotNil(t, natsProvider.Adapter) + }) +} diff --git a/router/pkg/pubsub/nats/utils.go b/router/pkg/pubsub/nats/utils.go new file mode 100644 index 0000000000..0229f7a41c --- /dev/null +++ b/router/pkg/pubsub/nats/utils.go @@ -0,0 +1,37 @@ +package nats + +import ( + "strings" +) + +const ( + fwc = '>' + tsep = "." +) + +func isValidNatsSubject(subject string) bool { + if subject == "" { + return false + } + sfwc := false + tokens := strings.Split(subject, tsep) + for _, t := range tokens { + length := len(t) + if length == 0 || sfwc { + return false + } + if length > 1 { + if strings.ContainsAny(t, "\t\n\f\r ") { + return false + } + continue + } + switch t[0] { + case fwc: + sfwc = true + case ' ', '\t', '\n', '\r', '\f': + return false + } + } + return true +} diff --git a/router/pkg/pubsub/nats/utils_test.go b/router/pkg/pubsub/nats/utils_test.go new file mode 100644 index 0000000000..9ec92c78cc --- /dev/null +++ b/router/pkg/pubsub/nats/utils_test.go @@ -0,0 +1,83 @@ +package nats + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIsValidNatsSubject(t *testing.T) { + tests := []struct { + name string + subject string + want bool + }{ + { + name: "empty string", + subject: "", + want: false, + }, + { + name: "simple valid subject", + subject: "test.subject", + want: true, + }, + { + name: "valid subject with wildcard", + subject: "test.>", + want: true, + }, + { + name: "invalid with space", + subject: "test subject", + want: false, + }, + { + name: "invalid with tab", + subject: "test\tsubject", + want: false, + }, + { + name: "invalid with newline", + subject: "test\nsubject", + want: false, + }, + { + name: "invalid with empty token", + subject: "test..subject", + want: false, + }, + { + name: "wildcard not at end", + subject: "test.>.subject", + want: false, + }, + { + name: "contains a space", + subject: " ", + want: false, + }, + { + name: "contains a tab", + subject: "\t", + want: false, + }, + { + name: "contains a newline", + subject: "\n", + want: false, + }, + { + name: "contains a form feed", + subject: "\f", + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := isValidNatsSubject(tt.subject) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/router/pkg/pubsub/pubsub.go b/router/pkg/pubsub/pubsub.go new file mode 100644 index 0000000000..9a9bf58807 --- /dev/null +++ b/router/pkg/pubsub/pubsub.go @@ -0,0 +1,187 @@ +package pubsub + +import ( + "context" + "fmt" + "slices" + "strconv" + + nodev1 "github.com/wundergraph/cosmo/router/gen/proto/wg/cosmo/node/v1" + "github.com/wundergraph/cosmo/router/pkg/config" + pubsub_datasource "github.com/wundergraph/cosmo/router/pkg/pubsub/datasource" + "github.com/wundergraph/cosmo/router/pkg/pubsub/kafka" + "github.com/wundergraph/cosmo/router/pkg/pubsub/nats" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "go.uber.org/zap" +) + +type DataSourceConfigurationWithMetadata struct { + Configuration *nodev1.DataSourceConfiguration + Metadata *plan.DataSourceMetadata +} + +type GetID interface { + GetID() string +} + +type GetEngineEventConfiguration interface { + GetEngineEventConfiguration() *nodev1.EngineEventConfiguration +} + +type EngineEventConfiguration interface { + GetTypeName() string + GetFieldName() string + GetProviderId() string +} + +type ProviderNotDefinedError struct { + ProviderID string + ProviderTypeID string +} + +type dsConfAndEvents[E GetEngineEventConfiguration] struct { + dsConf *DataSourceConfigurationWithMetadata + events []E +} + +func (e *ProviderNotDefinedError) Error() string { + return fmt.Sprintf("%s provider with ID %s is not defined", e.ProviderTypeID, e.ProviderID) +} + +// BuildProvidersAndDataSources is a generic function that builds providers and data sources for the given +// EventsConfiguration and DataSourceConfigurationWithMetadata +func BuildProvidersAndDataSources( + ctx context.Context, + config config.EventsConfiguration, + logger *zap.Logger, + dsConfs []DataSourceConfigurationWithMetadata, + hostName string, + routerListenAddr string, +) ([]pubsub_datasource.Provider, []plan.DataSource, error) { + var pubSubProviders []pubsub_datasource.Provider + var outs []plan.DataSource + + // initialize Kafka providers and data sources + kafkaBuilder := kafka.NewProviderBuilder(ctx, logger, hostName, routerListenAddr) + kafkaDsConfsWithEvents := []dsConfAndEvents[*nodev1.KafkaEventConfiguration]{} + for _, dsConf := range dsConfs { + kafkaDsConfsWithEvents = append(kafkaDsConfsWithEvents, dsConfAndEvents[*nodev1.KafkaEventConfiguration]{ + dsConf: &dsConf, + events: dsConf.Configuration.GetCustomEvents().GetKafka(), + }) + } + kafkaPubSubProviders, kafkaOuts, err := build(ctx, kafkaBuilder, config.Providers.Kafka, kafkaDsConfsWithEvents) + if err != nil { + return nil, nil, err + } + pubSubProviders = append(pubSubProviders, kafkaPubSubProviders...) + outs = append(outs, kafkaOuts...) + + // initialize NATS providers and data sources + natsBuilder := nats.NewProviderBuilder(ctx, logger, hostName, routerListenAddr) + natsDsConfsWithEvents := []dsConfAndEvents[*nodev1.NatsEventConfiguration]{} + for _, dsConf := range dsConfs { + natsDsConfsWithEvents = append(natsDsConfsWithEvents, dsConfAndEvents[*nodev1.NatsEventConfiguration]{ + dsConf: &dsConf, + events: dsConf.Configuration.GetCustomEvents().GetNats(), + }) + } + natsPubSubProviders, natsOuts, err := build(ctx, natsBuilder, config.Providers.Nats, natsDsConfsWithEvents) + if err != nil { + return nil, nil, err + } + pubSubProviders = append(pubSubProviders, natsPubSubProviders...) + outs = append(outs, natsOuts...) + + return pubSubProviders, outs, nil +} + +func build[P GetID, E GetEngineEventConfiguration](ctx context.Context, builder pubsub_datasource.ProviderBuilder[P, E], providersData []P, dsConfs []dsConfAndEvents[E]) ([]pubsub_datasource.Provider, []plan.DataSource, error) { + var pubSubProviders []pubsub_datasource.Provider + var outs []plan.DataSource + + // check used providers + usedProviderIds := []string{} + for _, dsConf := range dsConfs { + for _, event := range dsConf.events { + if !slices.Contains(usedProviderIds, event.GetEngineEventConfiguration().GetProviderId()) { + usedProviderIds = append(usedProviderIds, event.GetEngineEventConfiguration().GetProviderId()) + } + } + } + + // initialize providers if used + providerIds := []string{} + for _, providerData := range providersData { + if !slices.Contains(usedProviderIds, providerData.GetID()) { + continue + } + provider, err := builder.BuildProvider(providerData) + if err != nil { + return nil, nil, err + } + pubSubProviders = append(pubSubProviders, provider) + providerIds = append(providerIds, provider.ID()) + } + + // check if all used providers are initialized + for _, providerId := range usedProviderIds { + if !slices.Contains(providerIds, providerId) { + return pubSubProviders, nil, &ProviderNotDefinedError{ + ProviderID: providerId, + ProviderTypeID: builder.TypeID(), + } + } + } + + // build data sources for each event + for _, dsConf := range dsConfs { + for i, event := range dsConf.events { + plannerConfig := pubsub_datasource.NewPlannerConfig(builder, event) + out, err := plan.NewDataSourceConfiguration( + dsConf.dsConf.Configuration.Id+"-"+builder.TypeID()+"-"+strconv.Itoa(i), + pubsub_datasource.NewPlannerFactory(ctx, plannerConfig), + getFilteredDataSourceMetadata(event.GetEngineEventConfiguration(), dsConf.dsConf.Metadata), + plannerConfig, + ) + if err != nil { + return nil, nil, err + } + outs = append(outs, out) + } + } + + return pubSubProviders, outs, nil +} + +func getFilteredDataSourceMetadata[E EngineEventConfiguration](event E, dsMeta *plan.DataSourceMetadata) *plan.DataSourceMetadata { + // find used root types and fields + rootFields := make(map[string][]string) + + typeName := event.GetTypeName() + fieldName := event.GetFieldName() + if _, ok := rootFields[typeName]; !ok { + rootFields[typeName] = []string{} + } + rootFields[typeName] = append(rootFields[typeName], fieldName) + + // filter dsMeta.RootNodes + newRootNodes := []plan.TypeField{} + for _, node := range dsMeta.RootNodes { + newRootNode := plan.TypeField{ + TypeName: node.TypeName, + FieldNames: []string{}, + ExternalFieldNames: node.ExternalFieldNames, + } + for _, fieldName := range node.FieldNames { + if slices.Contains(rootFields[node.TypeName], fieldName) { + newRootNode.FieldNames = append(newRootNode.FieldNames, fieldName) + } + } + newRootNodes = append(newRootNodes, newRootNode) + } + newDsMeta := *dsMeta + newDsMeta.RootNodes = newRootNodes + + return &newDsMeta +} diff --git a/router/pkg/pubsub/pubsub_test.go b/router/pkg/pubsub/pubsub_test.go new file mode 100644 index 0000000000..9cf54bc6fa --- /dev/null +++ b/router/pkg/pubsub/pubsub_test.go @@ -0,0 +1,356 @@ +package pubsub + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + nodev1 "github.com/wundergraph/cosmo/router/gen/proto/wg/cosmo/node/v1" + "github.com/wundergraph/cosmo/router/pkg/config" + "github.com/wundergraph/cosmo/router/pkg/pubsub/datasource" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "go.uber.org/zap" +) + +func TestBuild_OK(t *testing.T) { + ctx := context.Background() + mockBuilder := datasource.NewMockProviderBuilder[config.NatsEventSource, *nodev1.NatsEventConfiguration](t) + mockPubSubProvider := datasource.NewMockProvider(t) + + dsMeta := &plan.DataSourceMetadata{ + RootNodes: []plan.TypeField{ + { + TypeName: "Type1", + FieldNames: []string{"Field1", "Field2"}, + }, + }, + } + + // Mock input data + event := &nodev1.EngineEventConfiguration{ + ProviderId: "provider-1", + TypeName: "Type1", + FieldName: "Field1", + Type: nodev1.EventType_PUBLISH, + } + dsConf := DataSourceConfigurationWithMetadata{ + Configuration: &nodev1.DataSourceConfiguration{ + Id: "test-id", + CustomEvents: &nodev1.DataSourceCustomEvents{ + Nats: []*nodev1.NatsEventConfiguration{ + { + EngineEventConfiguration: event, + }, + }, + }, + }, + Metadata: dsMeta, + } + dsConfs := []dsConfAndEvents[*nodev1.NatsEventConfiguration]{ + { + dsConf: &dsConf, + events: dsConf.Configuration.GetCustomEvents().GetNats(), + }, + } + natsEventSources := []config.NatsEventSource{ + {ID: "provider-1"}, + } + + mockPubSubProvider.On("ID").Return("provider-1") + + mockBuilder.On("TypeID").Return("nats") + mockBuilder.On("BuildProvider", natsEventSources[0]).Return(mockPubSubProvider, nil) + + // ctx, kafkaBuilder, config.Providers.Kafka, kafkaDsConfsWithEvents + // Execute the function + providers, dataSources, err := build(ctx, mockBuilder, natsEventSources, dsConfs) + + // Assertions + assert.NoError(t, err) + require.Len(t, providers, 1) + require.Len(t, dataSources, 1) + assert.True(t, dataSources[0].HasRootNode("Type1", "Field1")) + assert.False(t, dataSources[0].HasRootNode("Type1", "Field2")) +} + +func TestBuild_ProviderError(t *testing.T) { + ctx := context.Background() + mockBuilder := datasource.NewMockProviderBuilder[config.NatsEventSource, *nodev1.NatsEventConfiguration](t) + + dsMeta := &plan.DataSourceMetadata{ + RootNodes: []plan.TypeField{ + { + TypeName: "Type1", + FieldNames: []string{"Field1", "Field2"}, + }, + }, + } + + // Mock input data + event := &nodev1.EngineEventConfiguration{ + ProviderId: "provider-1", + TypeName: "Type1", + FieldName: "Field1", + Type: nodev1.EventType_PUBLISH, + } + dsConf := DataSourceConfigurationWithMetadata{ + Configuration: &nodev1.DataSourceConfiguration{ + Id: "test-id", + CustomEvents: &nodev1.DataSourceCustomEvents{ + Nats: []*nodev1.NatsEventConfiguration{ + { + EngineEventConfiguration: event, + }, + }, + }, + }, + Metadata: dsMeta, + } + dsConfs := []dsConfAndEvents[*nodev1.NatsEventConfiguration]{ + { + dsConf: &dsConf, + events: dsConf.Configuration.GetCustomEvents().GetNats(), + }, + } + natsEventSources := []config.NatsEventSource{ + {ID: "provider-1"}, + } + + mockBuilder.On("BuildProvider", natsEventSources[0]).Return(nil, errors.New("provider error")) + + // Execute the function + providers, dataSources, err := build(ctx, mockBuilder, natsEventSources, dsConfs) + + // Assertions + assert.Error(t, err) + require.Len(t, providers, 0) + require.Len(t, dataSources, 0) +} + +func TestBuild_ShouldGetAnErrorIfProviderIsNotDefined(t *testing.T) { + ctx := context.Background() + mockBuilder := datasource.NewMockProviderBuilder[config.NatsEventSource, *nodev1.NatsEventConfiguration](t) + //mockPubSubProvider := datasource.NewMockPubSubProvider(t) + + dsMeta := &plan.DataSourceMetadata{ + RootNodes: []plan.TypeField{ + { + TypeName: "Type1", + FieldNames: []string{"Field1", "Field2"}, + }, + }, + } + + // Mock input data + event := &nodev1.EngineEventConfiguration{ + ProviderId: "provider-2", + TypeName: "Type1", + FieldName: "Field1", + Type: nodev1.EventType_PUBLISH, + } + dsConf := DataSourceConfigurationWithMetadata{ + Configuration: &nodev1.DataSourceConfiguration{ + Id: "test-id", + CustomEvents: &nodev1.DataSourceCustomEvents{ + Nats: []*nodev1.NatsEventConfiguration{ + { + EngineEventConfiguration: event, + }, + }, + }, + }, + Metadata: dsMeta, + } + dsConfs := []dsConfAndEvents[*nodev1.NatsEventConfiguration]{ + { + dsConf: &dsConf, + events: dsConf.Configuration.GetCustomEvents().GetNats(), + }, + } + natsEventSources := []config.NatsEventSource{ + {ID: "provider-1"}, + } + + mockBuilder.On("TypeID").Return("nats") + + // Execute the function + providers, dataSources, err := build(ctx, mockBuilder, natsEventSources, dsConfs) + + // Assertions + assert.Error(t, err) + assert.IsType(t, &ProviderNotDefinedError{ + ProviderID: "provider-2", + ProviderTypeID: "nats", + }, err) + require.Len(t, providers, 0) + require.Len(t, dataSources, 0) +} + +func TestBuild_ShouldNotInitializeProviderIfNotUsed(t *testing.T) { + ctx := context.Background() + mockBuilder := datasource.NewMockProviderBuilder[config.NatsEventSource, *nodev1.NatsEventConfiguration](t) + mockPubSubUsedProvider := datasource.NewMockProvider(t) + + dsMeta := &plan.DataSourceMetadata{ + RootNodes: []plan.TypeField{ + { + TypeName: "Type1", + FieldNames: []string{"Field1", "Field2"}, + }, + }, + } + + // Mock input data + event := &nodev1.EngineEventConfiguration{ + ProviderId: "provider-2", + TypeName: "Type1", + FieldName: "Field1", + Type: nodev1.EventType_PUBLISH, + } + dsConf := DataSourceConfigurationWithMetadata{ + Configuration: &nodev1.DataSourceConfiguration{ + Id: "test-id", + CustomEvents: &nodev1.DataSourceCustomEvents{ + Nats: []*nodev1.NatsEventConfiguration{ + { + EngineEventConfiguration: event, + }, + }, + }, + }, + Metadata: dsMeta, + } + dsConfs := []dsConfAndEvents[*nodev1.NatsEventConfiguration]{ + { + dsConf: &dsConf, + events: dsConf.Configuration.GetCustomEvents().GetNats(), + }, + } + natsEventSources := []config.NatsEventSource{ + {ID: "provider-1"}, + {ID: "provider-2"}, + } + + mockPubSubUsedProvider.On("ID").Return("provider-2") + + mockBuilder.On("TypeID").Return("nats") + mockBuilder.On("BuildProvider", natsEventSources[1]).Return(mockPubSubUsedProvider, nil) + + // Execute the function + providers, dataSources, err := build(ctx, mockBuilder, natsEventSources, dsConfs) + + // Assertions + assert.NoError(t, err) + require.Len(t, providers, 1) + require.Len(t, dataSources, 1) + assert.True(t, dataSources[0].HasRootNode("Type1", "Field1")) + assert.False(t, dataSources[0].HasRootNode("Type1", "Field2")) +} + +func TestBuildProvidersAndDataSources_Nats_OK(t *testing.T) { + ctx := context.Background() + + dsMeta := &plan.DataSourceMetadata{ + RootNodes: []plan.TypeField{ + { + TypeName: "Type1", + FieldNames: []string{"Field1", "Field2"}, + }, + }, + } + + // Mock input data + event := &nodev1.EngineEventConfiguration{ + ProviderId: "provider-1", + TypeName: "Type1", + FieldName: "Field1", + Type: nodev1.EventType_PUBLISH, + } + dsConf := DataSourceConfigurationWithMetadata{ + Configuration: &nodev1.DataSourceConfiguration{ + Id: "test-id", + CustomEvents: &nodev1.DataSourceCustomEvents{ + Nats: []*nodev1.NatsEventConfiguration{ + { + EngineEventConfiguration: event, + }, + }, + }, + }, + Metadata: dsMeta, + } + dsConfs := []DataSourceConfigurationWithMetadata{dsConf} + + // Execute the function + providers, dataSources, err := BuildProvidersAndDataSources(ctx, config.EventsConfiguration{ + Providers: config.EventProviders{ + Nats: []config.NatsEventSource{ + {ID: "provider-1"}, + }, + }, + }, zap.NewNop(), dsConfs, "host", "addr") + + // Assertions + assert.NoError(t, err) + require.Len(t, providers, 1) + require.Equal(t, providers[0].ID(), "provider-1") + require.Equal(t, providers[0].TypeID(), "nats") + require.Len(t, dataSources, 1) + assert.True(t, dataSources[0].HasRootNode("Type1", "Field1")) + assert.False(t, dataSources[0].HasRootNode("Type1", "Field2")) +} + +func TestBuildProvidersAndDataSources_Kafka_OK(t *testing.T) { + ctx := context.Background() + + dsMeta := &plan.DataSourceMetadata{ + RootNodes: []plan.TypeField{ + { + TypeName: "Type1", + FieldNames: []string{"Field1", "Field2"}, + }, + }, + } + + // Mock input data + event := &nodev1.EngineEventConfiguration{ + ProviderId: "provider-1", + TypeName: "Type1", + FieldName: "Field1", + Type: nodev1.EventType_PUBLISH, + } + dsConf := DataSourceConfigurationWithMetadata{ + Configuration: &nodev1.DataSourceConfiguration{ + Id: "test-id", + CustomEvents: &nodev1.DataSourceCustomEvents{ + Kafka: []*nodev1.KafkaEventConfiguration{ + { + EngineEventConfiguration: event, + }, + }, + }, + }, + Metadata: dsMeta, + } + dsConfs := []DataSourceConfigurationWithMetadata{dsConf} + + // Execute the function + providers, dataSources, err := BuildProvidersAndDataSources(ctx, config.EventsConfiguration{ + Providers: config.EventProviders{ + Kafka: []config.KafkaEventSource{ + {ID: "provider-1"}, + }, + }, + }, zap.NewNop(), dsConfs, "host", "addr") + + // Assertions + assert.NoError(t, err) + require.Len(t, providers, 1) + require.Equal(t, providers[0].ID(), "provider-1") + require.Equal(t, providers[0].TypeID(), "kafka") + require.Len(t, dataSources, 1) + assert.True(t, dataSources[0].HasRootNode("Type1", "Field1")) + assert.False(t, dataSources[0].HasRootNode("Type1", "Field2")) +} diff --git a/router/pkg/pubsub/pubsubtest/pubsubtest.go b/router/pkg/pubsub/pubsubtest/pubsubtest.go new file mode 100644 index 0000000000..254d1e8609 --- /dev/null +++ b/router/pkg/pubsub/pubsubtest/pubsubtest.go @@ -0,0 +1,48 @@ +package pubsubtest + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/wundergraph/cosmo/router/pkg/pubsub/datasource" +) + +// VerifyEngineDataSourceFactoryImplementation is a common test function to verify any EngineDataSourceFactory implementation +// This function can be used by other packages to test their EngineDataSourceFactory implementations +func VerifyEngineDataSourceFactoryImplementation(t *testing.T, pubSub datasource.EngineDataSourceFactory) { + // Test GetFieldName + fieldName := pubSub.GetFieldName() + require.NotEmpty(t, fieldName, "Expected non-empty field name") + + // Test GetResolveDataSource + dataSource, err := pubSub.ResolveDataSource() + require.NoError(t, err, "Expected no error from GetResolveDataSource") + require.NotNil(t, dataSource, "Expected non-nil DataSource") + + // Test GetResolveDataSourceInput with sample event data + testEvent := []byte(`{"test":"data"}`) + input, err := pubSub.ResolveDataSourceInput(testEvent) + require.NoError(t, err, "Expected no error from GetResolveDataSourceInput") + assert.NotEmpty(t, input, "Expected non-empty input") + + // Make sure the input is valid JSON + var result interface{} + err = json.Unmarshal([]byte(input), &result) + assert.NoError(t, err, "Expected valid JSON from GetResolveDataSourceInput") + + // Test GetResolveDataSourceSubscription + subscription, err := pubSub.ResolveDataSourceSubscription() + require.NoError(t, err, "Expected no error from GetResolveDataSourceSubscription") + require.NotNil(t, subscription, "Expected non-nil SubscriptionDataSource") + + // Test GetResolveDataSourceSubscriptionInput + subscriptionInput, err := pubSub.ResolveDataSourceSubscriptionInput() + require.NoError(t, err, "Expected no error from GetResolveDataSourceSubscriptionInput") + assert.NotEmpty(t, subscriptionInput, "Expected non-empty subscription input") + + // Make sure the subscription input is valid JSON + err = json.Unmarshal([]byte(subscriptionInput), &result) + assert.NoError(t, err, "Expected valid JSON from GetResolveDataSourceSubscriptionInput") +}