diff --git a/docs-website/docs.json b/docs-website/docs.json index 44a2060895..186a67d582 100644 --- a/docs-website/docs.json +++ b/docs-website/docs.json @@ -181,6 +181,7 @@ "icon": "projector", "pages": [ "router/metrics-and-monitoring", + "router/metrics-and-monitoring/span-error-status", "router/metrics-and-monitoring/prometheus-metric-reference", "router/metrics-and-monitoring/grafana" ] diff --git a/docs-website/router/metrics-and-monitoring.mdx b/docs-website/router/metrics-and-monitoring.mdx index 4f574d22ea..2352c1217f 100644 --- a/docs-website/router/metrics-and-monitoring.mdx +++ b/docs-website/router/metrics-and-monitoring.mdx @@ -173,12 +173,16 @@ All metrics are tracked along different dimensions. A dimension is an attribute You can use the `router.http.requests.error` metric to track errors across router and subgraph requests. In addition to that, you can also use the `router.http.requests` metric to identify errors. We attach the following fields: -* `wg.request.error`: Identify if an error occurred. This applies to a request that didn't result in a successful HTTP or GraphQL response. Only set when it is `true`. Be aware that a Status-Code `200` can still be an error in GraphQL. +* `wg.request.error`: Identify if a server-side error occurred. This applies to a request that didn't result in a successful HTTP or GraphQL response. Only set when it is `true`. Be aware that a Status-Code `200` can still be an error in GraphQL. Client disconnections are **not** counted as errors. * `http.status_code`: The status code of the response. You have two ways to query for errors. Both metrics can be quite useful depending on the query scenario. In general, we recommend to use the `router.http.requests.error` metric. + + For a detailed breakdown of which scenarios mark spans as ERROR and increment error metrics, see [Span Error Status & Error Metrics](/router/open-telemetry/span-error-status). + + #### Subgraph specific * `wg.subgraph.name`: The name of the subgraph diff --git a/docs-website/router/metrics-and-monitoring/span-error-status.mdx b/docs-website/router/metrics-and-monitoring/span-error-status.mdx new file mode 100644 index 0000000000..5517f7d2fe --- /dev/null +++ b/docs-website/router/metrics-and-monitoring/span-error-status.mdx @@ -0,0 +1,111 @@ +--- +title: "Span Error Status & Error Metrics" +description: "Understand when the router marks spans as ERROR and how errors are reflected in metrics" +sidebarTitle: "Span Error Status" +icon: "triangle-exclamation" +--- + +The router uses OpenTelemetry spans to trace the full lifecycle of a GraphQL request. This page documents when and why spans are marked with an `ERROR` status, how errors flow into metrics, and how client-side vs server-side failures are distinguished. + +## Overview + +Not every unsuccessful response marks a span as `ERROR`. The router distinguishes between: + +- **Client-side events**: Events like client disconnections that are not server failures (these do NOT mark spans as ERROR) +- **Server-side errors**: All other failures — caused by the router, its subgraphs, or invalid client input like malformed queries (these mark spans as ERROR) + +## When Spans Are NOT Marked as ERROR + +### Client Disconnection / Context Cancellation + +When a request's context is canceled (`context.Canceled`), the router does **not** mark the span as `ERROR` and does **not** increment error metrics. This typically occurs when: + +- A client disconnects during request processing (the most common cause) +- The router is shutting down gracefully and cancels in-flight requests + +In both cases, the work was **interrupted**, not **failed** — marking it as an error would be misleading: + +- It would inflate error rates and trigger false alerts +- It does not indicate a problem with the router or subgraphs +- The router logs these events at `DEBUG` level, not `ERROR` + +The error is still recorded as a span **event** (via `RecordError`) so it remains visible in traces for debugging, but the span status stays `UNSET`. + +The response message is set to `"Client disconnected"` with HTTP status `408 Request Timeout` for observability, but this does not affect span status or error metrics. + + + This is distinct from **server timeouts** (`context.DeadlineExceeded`), which indicate that a configured deadline was exceeded. Timeouts ARE tracked as errors because they point to a subgraph or configuration issue that should be investigated. + + +### Successful Requests + +Requests that complete successfully (HTTP 2xx, no subgraph errors) leave the span status as `UNSET` (the OpenTelemetry default for successful operations). + +## When Spans Are Marked as ERROR + +Any error not listed above is treated as a server-side error. The span status is set to `ERROR`, the error is recorded on the span, and error metrics are incremented. This includes but is not limited to: + +### Authentication Failure + +When a request fails authentication, both the router root span and the authentication span are marked as `ERROR`. The response returns HTTP `401 Unauthorized`. + +### Subgraph Fetch Error + +When the router fails to fetch a response from a subgraph (network error, timeout, non-2xx status code), the **Engine - Fetch** span for that subgraph is marked as `ERROR`. The error is also recorded in the `router.http.requests.error` metric with subgraph-level dimensions. + +Downstream GraphQL errors from the subgraph response are captured as **span events** on the fetch span, with attributes: +- `wg.subgraph.error.extended_code`: The error extension code from the subgraph response +- `wg.subgraph.error.message`: The error message from the subgraph response + +### Persisted Operation Error + +When a persisted operation cannot be loaded (CDN failure, operation not found), the span is marked as `ERROR`. + +### Operation Processing Errors (Parse, Normalize, Validate, Plan) + +Each stage of GraphQL operation processing has its own span. If any stage fails, that stage's span is marked as `ERROR`, and the error propagates to the router root span: + +- **Operation - Parse**: Malformed GraphQL syntax +- **Operation - Normalize**: Variable normalization or remapping failures +- **Operation - Validate**: Query depth violations, validation rule failures +- **Operation - Plan**: Query plan generation failures + +### GraphQL Execution Error + +When the GraphQL engine encounters errors during resolution (e.g., subgraph returns errors that prevent successful data merging), the root execution span is marked as `ERROR`. The error is propagated to the router root span. + +### Batch Request Error + +When a batched GraphQL request fails at the batch-level (malformed JSON array, encoding failure), the request span is marked as `ERROR`. + +### Subscription Resolution Failure + +When a subscription fails to resolve (excluding client disconnections), the span is marked as `ERROR` and an HTTP `500` response is returned. + +### Rate Limit Exceeded + +When a request exceeds the configured rate limit, the span is marked as `ERROR`. + +### Authorization Failure (In-Resolver) + +When field-level authorization fails during resolution, the span is marked as `ERROR`. + +## Metrics + +- `router.http.requests.error`: A dedicated counter for failed requests. Incremented only for server-side errors. +- `router.http.requests`: The general request counter. When an error occurs, the `wg.request.error=true` attribute is attached, allowing you to filter error vs non-error requests from the same metric. + +Both metrics share the same error classification: a request is counted as an error **only** when it is a server-side failure. Client disconnections are excluded. + +## Subgraph-Level Error Tracking + +Subgraph errors are tracked at a more granular level on the **Engine - Fetch** span: + +1. The fetch span status is set to `ERROR` when `responseInfo.Err` is not nil +2. Individual downstream errors are recorded as **span events** with error codes and messages +3. Error codes are deduplicated and sorted to reduce metric cardinality +4. The `router.http.requests.error` metric is recorded with subgraph-specific dimensions (`wg.subgraph.name`, `wg.subgraph.id`) + + + Use the `router.http.requests.error` metric with the `wg.subgraph.name` dimension to identify which subgraphs are contributing the most errors to your federated graph. + diff --git a/router-tests/telemetry/span_error_status_test.go b/router-tests/telemetry/span_error_status_test.go new file mode 100644 index 0000000000..88b20a0875 --- /dev/null +++ b/router-tests/telemetry/span_error_status_test.go @@ -0,0 +1,178 @@ +package telemetry + +import ( + "context" + "net/http" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/wundergraph/cosmo/router-tests/testenv" + "github.com/wundergraph/cosmo/router/pkg/trace/tracetest" + "go.opentelemetry.io/otel/codes" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.uber.org/zap/zapcore" +) + +// rootSpan returns the last span in the list, which is the root server span. +// OTEL exports child spans before parents, so the root is always last. +func rootSpan(spans []sdktrace.ReadOnlySpan) sdktrace.ReadOnlySpan { + if len(spans) == 0 { + return nil + } + return spans[len(spans)-1] +} + +func TestClientDisconnectionBehavior(t *testing.T) { + t.Parallel() + + t.Run("span status is not error but exception event is recorded", func(t *testing.T) { + t.Parallel() + + exporter := tracetest.NewInMemoryExporter(t) + + testenv.Run(t, &testenv.Config{ + TraceExporter: exporter, + Subgraphs: testenv.SubgraphsConfig{ + Employees: testenv.SubgraphConfig{ + Delay: 2 * time.Second, + }, + }, + }, func(t *testing.T, xEnv *testenv.Environment) { + ctx, cancel := context.WithTimeout(t.Context(), 200*time.Millisecond) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, xEnv.GraphQLRequestURL(), + strings.NewReader(`{"query":"{ employees { id } }"}`)) + require.NoError(t, err) + req.Header.Set("Content-Type", "application/json") + + client := &http.Client{} + _, err = client.Do(req) + require.Error(t, err) + + time.Sleep(500 * time.Millisecond) + + rootSpan := rootSpan(exporter.GetSpans().Snapshots()) + require.NotNil(t, rootSpan, "expected root span to be exported for client disconnections") + + require.NotEqual(t, codes.Error, rootSpan.Status().Code, + "root span should not be marked as error for client disconnections") + + hasExceptionEvent := false + for _, event := range rootSpan.Events() { + if event.Name == "exception" { + hasExceptionEvent = true + break + } + } + require.True(t, hasExceptionEvent, + "root span should have an exception event recorded for client disconnections") + }) + }) + + t.Run("error metrics are not inflated but request count is recorded", func(t *testing.T) { + t.Parallel() + + metricReader := sdkmetric.NewManualReader() + + testenv.Run(t, &testenv.Config{ + MetricReader: metricReader, + Subgraphs: testenv.SubgraphsConfig{ + Employees: testenv.SubgraphConfig{ + Delay: 2 * time.Second, + }, + }, + }, func(t *testing.T, xEnv *testenv.Environment) { + ctx, cancel := context.WithTimeout(t.Context(), 200*time.Millisecond) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, xEnv.GraphQLRequestURL(), + strings.NewReader(`{"query":"{ employees { id } }"}`)) + require.NoError(t, err) + req.Header.Set("Content-Type", "application/json") + + client := &http.Client{} + _, _ = client.Do(req) + + time.Sleep(500 * time.Millisecond) + + var rm metricdata.ResourceMetrics + err = metricReader.Collect(t.Context(), &rm) + require.NoError(t, err) + + var requestCountFound bool + for _, scopeMetric := range rm.ScopeMetrics { + for _, m := range scopeMetric.Metrics { + if m.Name == "router.http.requests" { + requestCountFound = true + } + } + } + require.True(t, requestCountFound, "request count metric should be recorded even for client disconnections") + }) + }) + + t.Run("log level is info not error", func(t *testing.T) { + t.Parallel() + + testenv.Run(t, &testenv.Config{ + LogObservation: testenv.LogObservationConfig{ + Enabled: true, + LogLevel: zapcore.DebugLevel, + }, + Subgraphs: testenv.SubgraphsConfig{ + Employees: testenv.SubgraphConfig{ + Delay: 2 * time.Second, + }, + }, + }, func(t *testing.T, xEnv *testenv.Environment) { + ctx, cancel := context.WithTimeout(t.Context(), 200*time.Millisecond) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, xEnv.GraphQLRequestURL(), + strings.NewReader(`{"query":"{ employees { id } }"}`)) + require.NoError(t, err) + req.Header.Set("Content-Type", "application/json") + + client := &http.Client{} + _, _ = client.Do(req) + + time.Sleep(500 * time.Millisecond) + + errorLogs := xEnv.Observer().FilterLevelExact(zapcore.ErrorLevel).All() + for _, entry := range errorLogs { + require.NotContains(t, entry.Message, "context canceled", + "context canceled should not be logged at error level") + } + }) + }) + + t.Run("other errors still mark span as error", func(t *testing.T) { + t.Parallel() + + exporter := tracetest.NewInMemoryExporter(t) + + testenv.Run(t, &testenv.Config{ + TraceExporter: exporter, + Subgraphs: testenv.SubgraphsConfig{ + Employees: testenv.SubgraphConfig{ + CloseOnStart: true, + }, + }, + }, func(t *testing.T, xEnv *testenv.Environment) { + res := xEnv.MakeGraphQLRequestOK(testenv.GraphQLRequest{ + Query: `{ employees { id } }`, + }) + require.Contains(t, res.Body, "errors") + + rootSpan := rootSpan(exporter.GetSpans().Snapshots()) + require.NotNil(t, rootSpan, "root span should exist") + require.Equal(t, codes.Error, rootSpan.Status().Code, + "root span should be marked as error for real failures") + }) + }) +} diff --git a/router/core/errors.go b/router/core/errors.go index c4acdf3944..7c5de91f65 100644 --- a/router/core/errors.go +++ b/router/core/errors.go @@ -111,21 +111,28 @@ func logInternalErrorsFromReport(report *operationreport.Report, requestLogger * // trackFinalResponseError sets the final response error on the request context and // attaches it to the span. This is used to process the error in the outer middleware // and therefore only intended to be used in the GraphQL handler. +// Client disconnections (context.Canceled) are tracked separately and do not mark +// the span as ERROR or count toward error metrics, since they are not server-side failures. func trackFinalResponseError(ctx context.Context, err error) { if err == nil { return } - span := trace.SpanFromContext(ctx) requestContext := getRequestContext(ctx) if requestContext == nil { return } requestContext.SetError(err) + + if errors.Is(err, context.Canceled) { + return + } + requestContext.graphQLErrorServices = getAggregatedSubgraphServiceNames(requestContext.error) requestContext.graphQLErrorCodes = getAggregatedSubgraphErrorCodes(requestContext.error) + span := trace.SpanFromContext(ctx) rtrace.AttachErrToSpan(span, err) } diff --git a/router/core/graphql_prehandler.go b/router/core/graphql_prehandler.go index 2aa4f95535..d8535e4a98 100644 --- a/router/core/graphql_prehandler.go +++ b/router/core/graphql_prehandler.go @@ -457,9 +457,17 @@ func (h *PreHandler) Handler(next http.Handler) http.Handler { // and enrich the context to make it available in the request context as well for metrics etc. next.ServeHTTP(ww, r) - // Mark the root span of the router as failed, so we can easily identify failed requests + // Mark the root span of the router as failed, so we can easily identify failed requests. + // Client disconnections are not server-side errors and should not mark the root span as ERROR. if requestContext.error != nil { - rtrace.AttachErrToSpan(trace.SpanFromContext(r.Context()), requestContext.error) + contextSpan := trace.SpanFromContext(r.Context()) + + if errors.Is(requestContext.error, context.Canceled) { + // For disconnects just record the error but don't set the status + contextSpan.RecordError(requestContext.error) + } else { + rtrace.AttachErrToSpan(contextSpan, requestContext.error) + } } }) } diff --git a/router/core/operation_metrics.go b/router/core/operation_metrics.go index 4dbdc55a29..9d14af46b4 100644 --- a/router/core/operation_metrics.go +++ b/router/core/operation_metrics.go @@ -2,6 +2,7 @@ package core import ( "context" + "errors" "time" "go.opentelemetry.io/otel/attribute" @@ -56,7 +57,10 @@ func (m *OperationMetrics) Finish(reqContext *requestContext, statusCode int, re o := otelmetric.WithAttributeSet(attribute.NewSet(attrs...)) - if reqContext.error != nil { + // Client disconnections are not server-side errors and should not inflate error metrics. + isError := reqContext.error != nil && !errors.Is(reqContext.error, context.Canceled) + + if isError { rm.MeasureRequestError(ctx, sliceAttrs, o) attrs = append(attrs, rotel.WgRequestError.Bool(true)) @@ -82,16 +86,18 @@ func (m *OperationMetrics) Finish(reqContext *requestContext, statusCode int, re } } - // Export schema usage info to configured exporters + // Export schema usage info to configured exporters. + // Client disconnections are excluded from the error flag to stay consistent with + // the error metrics above. if reqContext.operation != nil && !reqContext.operation.executionOptions.SkipLoader { // GraphQL metrics export (to metrics service) if m.trackUsageInfo { - m.routerMetrics.ExportSchemaUsageInfo(reqContext.operation, statusCode, reqContext.error != nil, exportSynchronous) + m.routerMetrics.ExportSchemaUsageInfo(reqContext.operation, statusCode, isError, exportSynchronous) } // Prometheus metrics export (to local Prometheus metrics) if m.prometheusTrackUsageInfo { - m.routerMetrics.ExportSchemaUsageInfoPrometheus(reqContext.operation, statusCode, reqContext.error != nil, exportSynchronous) + m.routerMetrics.ExportSchemaUsageInfoPrometheus(reqContext.operation, statusCode, isError, exportSynchronous) } } } diff --git a/router/core/operation_metrics_test.go b/router/core/operation_metrics_test.go new file mode 100644 index 0000000000..771b61ab37 --- /dev/null +++ b/router/core/operation_metrics_test.go @@ -0,0 +1,146 @@ +package core + +import ( + "context" + "errors" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" + "github.com/wundergraph/cosmo/router/internal/graphqlmetrics" + "github.com/wundergraph/cosmo/router/pkg/metric" + "go.opentelemetry.io/otel/attribute" + otelmetric "go.opentelemetry.io/otel/metric" + "go.uber.org/zap" +) + +func TestFinishSchemaUsageExport(t *testing.T) { + t.Parallel() + + t.Run("client disconnection passes hasError=false to schema usage exporters", func(t *testing.T) { + t.Parallel() + + store := &spyMetricStore{} + rm := &spyRouterMetrics{store: store} + rc := newTestRequestContext(t) + rc.error = context.Canceled + + m := &OperationMetrics{ + routerMetrics: rm, + inflightMetric: func() {}, + trackUsageInfo: true, + prometheusTrackUsageInfo: true, + } + m.Finish(rc, 200, 100, false) + + require.True(t, rm.schemaUsageCalled) + require.False(t, rm.schemaUsageHasError, "ExportSchemaUsageInfo should receive hasError=false for client disconnections") + + require.True(t, rm.promUsageCalled) + require.False(t, rm.promUsageHasError, "ExportSchemaUsageInfoPrometheus should receive hasError=false for client disconnections") + + require.False(t, store.requestErrorCalled, "MeasureRequestError should not be called for client disconnections") + }) + + t.Run("real error passes hasError=true to schema usage exporters", func(t *testing.T) { + t.Parallel() + + store := &spyMetricStore{} + rm := &spyRouterMetrics{store: store} + rc := newTestRequestContext(t) + rc.error = errors.New("subgraph timeout") + + m := &OperationMetrics{ + routerMetrics: rm, + inflightMetric: func() {}, + trackUsageInfo: true, + prometheusTrackUsageInfo: true, + } + m.Finish(rc, 500, 100, false) + + require.True(t, rm.schemaUsageCalled) + require.True(t, rm.schemaUsageHasError, "ExportSchemaUsageInfo should receive hasError=true for real errors") + + require.True(t, rm.promUsageCalled) + require.True(t, rm.promUsageHasError, "ExportSchemaUsageInfoPrometheus should receive hasError=true for real errors") + + require.True(t, store.requestErrorCalled, "MeasureRequestError should be called for real errors") + }) + + t.Run("no error passes hasError=false to schema usage exporters", func(t *testing.T) { + t.Parallel() + + store := &spyMetricStore{} + rm := &spyRouterMetrics{store: store} + rc := newTestRequestContext(t) + + m := &OperationMetrics{ + routerMetrics: rm, + inflightMetric: func() {}, + trackUsageInfo: true, + prometheusTrackUsageInfo: true, + } + m.Finish(rc, 200, 100, false) + + require.True(t, rm.schemaUsageCalled) + require.False(t, rm.schemaUsageHasError) + + require.True(t, rm.promUsageCalled) + require.False(t, rm.promUsageHasError) + + require.False(t, store.requestErrorCalled) + }) +} + +type spyRouterMetrics struct { + store metric.Store + + schemaUsageCalled bool + schemaUsageHasError bool + promUsageCalled bool + promUsageHasError bool +} + +func (m *spyRouterMetrics) StartOperation(_ *zap.Logger, _ int64, _ []attribute.KeyValue, _ otelmetric.AddOption) *OperationMetrics { + return nil +} + +func (m *spyRouterMetrics) ExportSchemaUsageInfo(_ *operationContext, _ int, hasError bool, _ bool) { + m.schemaUsageCalled = true + m.schemaUsageHasError = hasError +} + +func (m *spyRouterMetrics) ExportSchemaUsageInfoPrometheus(_ *operationContext, _ int, hasError bool, _ bool) { + m.promUsageCalled = true + m.promUsageHasError = hasError +} + +func (m *spyRouterMetrics) GQLMetricsExporter() *graphqlmetrics.GraphQLMetricsExporter { + return nil +} + +func (m *spyRouterMetrics) PrometheusMetricsExporter() *graphqlmetrics.PrometheusMetricsExporter { + return nil +} + +func (m *spyRouterMetrics) MetricStore() metric.Store { + return m.store +} + +type spyMetricStore struct { + metric.NoopMetrics + requestErrorCalled bool +} + +func (m *spyMetricStore) MeasureRequestError(_ context.Context, _ []attribute.KeyValue, _ otelmetric.AddOption) { + m.requestErrorCalled = true +} + +func newTestRequestContext(t *testing.T) *requestContext { + t.Helper() + req := httptest.NewRequest(http.MethodPost, "/graphql", nil) + rc := buildRequestContext(requestContextOptions{r: req}) + rc.operation = &operationContext{} + return rc +} diff --git a/router/core/request_context_fields.go b/router/core/request_context_fields.go index 09df11cb6e..e6371512dc 100644 --- a/router/core/request_context_fields.go +++ b/router/core/request_context_fields.go @@ -1,6 +1,8 @@ package core import ( + "context" + "errors" "fmt" "github.com/wundergraph/cosmo/router/internal/expr" "net/http" @@ -302,7 +304,7 @@ func LogLevelHandler(r *http.Request) zapcore.Level { return zapcore.InfoLevel } - if reqContext.error != nil { + if reqContext.error != nil && !errors.Is(reqContext.error, context.Canceled) { return zapcore.ErrorLevel } diff --git a/router/pkg/grpcconnector/grpccommon/grpc_plugin_client.go b/router/pkg/grpcconnector/grpccommon/grpc_plugin_client.go index 688f02bb24..0e6b963edf 100644 --- a/router/pkg/grpcconnector/grpccommon/grpc_plugin_client.go +++ b/router/pkg/grpcconnector/grpccommon/grpc_plugin_client.go @@ -18,6 +18,8 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + + otelcodes "go.opentelemetry.io/otel/codes" ) type GRPCPluginClient struct { @@ -55,6 +57,8 @@ func WithReconnectConfig(reconnectTimeout time.Duration, pingInterval time.Durat var _ grpc.ClientConnInterface = &GRPCPluginClient{} +var errPluginNotActive = errors.New("plugin is not active") + type GRPCTraceAttributeGetter func(context.Context) (string, trace.SpanStartEventOption) type GRPCPluginClientOpts struct { @@ -154,13 +158,16 @@ func (g *GRPCPluginClient) Invoke(ctx context.Context, method string, args any, if g.IsPluginProcessExited() { if err := g.waitForPluginToBeActive(); err != nil { span.RecordError(err) + span.SetStatus(otelcodes.Error, err.Error()) return err } } if g.isClosed.Load() { - span.RecordError(errors.New("plugin is not active")) - return status.Error(codes.Unavailable, "plugin is not active") + span.RecordError(errPluginNotActive) + span.SetStatus(otelcodes.Error, errPluginNotActive.Error()) + + return status.Error(codes.Unavailable, errPluginNotActive.Error()) } g.mu.RLock() diff --git a/router/pkg/grpcconnector/grpccommon/grpc_plugin_client_test.go b/router/pkg/grpcconnector/grpccommon/grpc_plugin_client_test.go new file mode 100644 index 0000000000..6085d21d1f --- /dev/null +++ b/router/pkg/grpcconnector/grpccommon/grpc_plugin_client_test.go @@ -0,0 +1,151 @@ +package grpccommon + +import ( + "context" + "testing" + "time" + + "github.com/hashicorp/go-plugin" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "google.golang.org/grpc" + grpccodes "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" +) + +func TestInvoke(t *testing.T) { + t.Parallel() + + t.Run("plugin exited and reconnect times out sets span error status", func(t *testing.T) { + t.Parallel() + + tracer, exporter := newTestTracer(t) + + g := &GRPCPluginClient{ + pc: nil, // nil means IsPluginProcessExited() returns true + tracer: tracer, + getTraceAttributes: stubTraceAttributes, + config: GRPCPluginClientConfig{ + ReconnectTimeout: 10 * time.Millisecond, + PingInterval: 1 * time.Millisecond, + }, + } + + err := g.Invoke(t.Context(), "/test.Method", nil, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "plugin was not active in time") + + spans := exporter.GetSpans() + require.Len(t, spans, 1) + + span := spans[0] + require.Equal(t, "test-span", span.Name) + require.Equal(t, codes.Error, span.Status.Code) + require.Contains(t, span.Status.Description, "plugin was not active in time") + + // Verify exception event was recorded + require.NotEmpty(t, span.Events) + hasException := false + for _, event := range span.Events { + if event.Name == "exception" { + hasException = true + break + } + } + require.True(t, hasException, "span should have an exception event") + }) + + t.Run("client closed returns unavailable with span error status", func(t *testing.T) { + t.Parallel() + + tracer, exporter := newTestTracer(t) + + g := &GRPCPluginClient{ + pc: &plugin.Client{}, // non-nil, Exited() returns false + cc: nil, + tracer: tracer, + getTraceAttributes: stubTraceAttributes, + config: defaultGRPCPluginClientConfig, + } + g.isClosed.Store(true) + + err := g.Invoke(t.Context(), "/test.Method", nil, nil) + require.Error(t, err) + + st, ok := status.FromError(err) + require.True(t, ok, "error should be a gRPC status") + require.Equal(t, grpccodes.Unavailable, st.Code()) + require.Equal(t, errPluginNotActive.Error(), st.Message()) + + spans := exporter.GetSpans() + require.Len(t, spans, 1) + + span := spans[0] + require.Equal(t, codes.Error, span.Status.Code) + require.Equal(t, errPluginNotActive.Error(), span.Status.Description) + + hasException := false + for _, event := range span.Events { + if event.Name == "exception" { + hasException = true + break + } + } + require.True(t, hasException, "span should have an exception event") + }) + + t.Run("healthy plugin delegates to underlying connection", func(t *testing.T) { + t.Parallel() + + tracer, exporter := newTestTracer(t) + + fakeCC := &fakeClientConn{} + + g := &GRPCPluginClient{ + pc: &plugin.Client{}, // non-nil, Exited() returns false + cc: fakeCC, + tracer: tracer, + getTraceAttributes: stubTraceAttributes, + config: defaultGRPCPluginClientConfig, + } + + err := g.Invoke(t.Context(), "/test.Method", nil, nil) + require.NoError(t, err) + require.True(t, fakeCC.invoked, "should delegate to underlying connection") + + spans := exporter.GetSpans() + require.Len(t, spans, 1) + require.NotEqual(t, codes.Error, spans[0].Status.Code, + "span should not be in error state for successful invocations") + }) +} + +// fakeClientConn implements grpc.ClientConnInterface for testing the happy path. +type fakeClientConn struct { + invoked bool +} + +func (f *fakeClientConn) Invoke(_ context.Context, _ string, _ any, _ any, _ ...grpc.CallOption) error { + f.invoked = true + return nil +} + +func (f *fakeClientConn) NewStream(_ context.Context, _ *grpc.StreamDesc, _ string, _ ...grpc.CallOption) (grpc.ClientStream, error) { + return nil, nil +} + +func newTestTracer(t *testing.T) (trace.Tracer, *tracetest.InMemoryExporter) { + t.Helper() + exporter := tracetest.NewInMemoryExporter() + tp := sdktrace.NewTracerProvider(sdktrace.WithSyncer(exporter)) + t.Cleanup(func() { _ = tp.Shutdown(t.Context()) }) + return tp.Tracer("test"), exporter +} + +func stubTraceAttributes(_ context.Context) (string, trace.SpanStartEventOption) { + return "test-span", trace.WithAttributes() +}