From 918089bc8a74e15f0c13feada8a53ef7e2cd5b6d Mon Sep 17 00:00:00 2001 From: Jens Neuse Date: Wed, 4 Mar 2026 00:51:28 +0100 Subject: [PATCH 01/11] feat: enrich FetchTimingEvent with HTTP status, response size, and TTFB for subgraph SLO data collection Add HTTPStatusCode, ResponseBytes, and TTFBMs fields to FetchTimingEvent so the schema registry can compute subgraph-level SLOs (latency percentiles, error rates, availability) from per-request telemetry. Add SubgraphRequestMetrics type and SubgraphMetrics() query method on CacheAnalyticsSnapshot for per-subgraph aggregate metrics export. Co-Authored-By: Claude Opus 4.6 --- v2/pkg/engine/resolve/cache_analytics.go | 78 ++++++++- v2/pkg/engine/resolve/loader.go | 14 +- .../engine/resolve/subgraph_metrics_test.go | 165 ++++++++++++++++++ 3 files changed, 245 insertions(+), 12 deletions(-) create mode 100644 v2/pkg/engine/resolve/subgraph_metrics_test.go diff --git a/v2/pkg/engine/resolve/cache_analytics.go b/v2/pkg/engine/resolve/cache_analytics.go index 52e0f3c6f8..6866ac43e2 100644 --- a/v2/pkg/engine/resolve/cache_analytics.go +++ b/v2/pkg/engine/resolve/cache_analytics.go @@ -60,12 +60,15 @@ type CacheWriteEvent struct { // FetchTimingEvent records the duration of a subgraph fetch or cache lookup. type FetchTimingEvent struct { - DataSource string // subgraph name - EntityType string // entity type (empty for root fetches) - DurationMs int64 // time spent on this operation in milliseconds - Source FieldSource // what handled this: Subgraph (fetch), L2 (cache GET) - ItemCount int // number of entities in this fetch/lookup - IsEntityFetch bool // true for _entities, false for root field + DataSource string // subgraph name + EntityType string // entity type (empty for root fetches) + DurationMs int64 // time spent on this operation in milliseconds + Source FieldSource // what handled this: Subgraph (fetch), L2 (cache GET) + ItemCount int // number of entities in this fetch/lookup + IsEntityFetch bool // true for _entities, false for root field + HTTPStatusCode int // HTTP status code from subgraph response (0 for cache hits) + ResponseBytes int // response body size in bytes (0 for cache hits) + TTFBMs int64 // time to first byte in milliseconds (0 when unavailable) } // SubgraphErrorEvent records a subgraph error for analytics. @@ -824,6 +827,69 @@ func (s *CacheAnalyticsSnapshot) ShadowFreshnessRateByEntityType() map[string]fl return result } +// SubgraphRequestMetrics holds per-subgraph aggregate metrics for a single request. +// Designed for export to external SLO systems (e.g., schema registry). +type SubgraphRequestMetrics struct { + SubgraphName string + RequestCount int // number of fetches to this subgraph + ErrorCount int // number of errors from this subgraph + TotalDurationMs int64 // sum of fetch durations + MaxDurationMs int64 // max single-fetch duration + TotalResponseBytes int64 // sum of response body sizes +} + +// SubgraphMetrics returns per-subgraph aggregate metrics for this request. +// Only considers actual subgraph fetches (not cache hits). +// Returns nil if there are no subgraph fetches or errors. +func (s *CacheAnalyticsSnapshot) SubgraphMetrics() []SubgraphRequestMetrics { + // Collect metrics by subgraph name, preserving insertion order + type entry struct { + metrics SubgraphRequestMetrics + index int + } + byName := make(map[string]*entry) + var order []string + + for _, ft := range s.FetchTimings { + if ft.Source != FieldSourceSubgraph { + continue + } + e, ok := byName[ft.DataSource] + if !ok { + e = &entry{metrics: SubgraphRequestMetrics{SubgraphName: ft.DataSource}, index: len(order)} + byName[ft.DataSource] = e + order = append(order, ft.DataSource) + } + e.metrics.RequestCount++ + e.metrics.TotalDurationMs += ft.DurationMs + if ft.DurationMs > e.metrics.MaxDurationMs { + e.metrics.MaxDurationMs = ft.DurationMs + } + e.metrics.TotalResponseBytes += int64(ft.ResponseBytes) + } + + for _, ev := range s.ErrorEvents { + e, ok := byName[ev.DataSource] + if !ok { + e = &entry{metrics: SubgraphRequestMetrics{SubgraphName: ev.DataSource}, index: len(order)} + byName[ev.DataSource] = e + order = append(order, ev.DataSource) + } + e.metrics.ErrorCount++ + } + + if len(order) == 0 { + return nil + } + + results := make([]SubgraphRequestMetrics, len(order)) + for _, name := range order { + e := byName[name] + results[e.index] = e.metrics + } + return results +} + // computeCacheAgeMs computes cache age in milliseconds from remaining TTL and original TTL. // Returns 0 if either value is zero or if the computed age would be negative. func computeCacheAgeMs(remainingTTL, originalTTL time.Duration) int64 { diff --git a/v2/pkg/engine/resolve/loader.go b/v2/pkg/engine/resolve/loader.go index e552e9e476..2ffb446c2d 100644 --- a/v2/pkg/engine/resolve/loader.go +++ b/v2/pkg/engine/resolve/loader.go @@ -2260,12 +2260,14 @@ func (l *Loader) executeSourceLoad(ctx context.Context, fetchItem *FetchItem, so isEntityFetch = info.OperationType == ast.OperationTypeQuery && (entityType != "Query" && entityType != "Mutation" && entityType != "Subscription") } res.l2FetchTimings = append(res.l2FetchTimings, FetchTimingEvent{ - DataSource: res.ds.Name, - EntityType: entityType, - DurationMs: time.Since(fetchStart).Milliseconds(), - Source: FieldSourceSubgraph, - ItemCount: 1, - IsEntityFetch: isEntityFetch, + DataSource: res.ds.Name, + EntityType: entityType, + DurationMs: time.Since(fetchStart).Milliseconds(), + Source: FieldSourceSubgraph, + ItemCount: 1, + IsEntityFetch: isEntityFetch, + HTTPStatusCode: res.statusCode, + ResponseBytes: len(res.out), }) } diff --git a/v2/pkg/engine/resolve/subgraph_metrics_test.go b/v2/pkg/engine/resolve/subgraph_metrics_test.go new file mode 100644 index 0000000000..90d71bf519 --- /dev/null +++ b/v2/pkg/engine/resolve/subgraph_metrics_test.go @@ -0,0 +1,165 @@ +package resolve + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCacheAnalyticsSnapshot_SubgraphMetrics(t *testing.T) { + t.Run("returns nil when no subgraph fetches or errors", func(t *testing.T) { + snap := CacheAnalyticsSnapshot{ + FetchTimings: []FetchTimingEvent{ + {DataSource: "accounts", DurationMs: 10, Source: FieldSourceL1}, // cache hit, not subgraph + {DataSource: "accounts", DurationMs: 5, Source: FieldSourceL2}, // cache hit, not subgraph + }, + } + assert.Equal(t, []SubgraphRequestMetrics(nil), snap.SubgraphMetrics()) + }) + + t.Run("single subgraph with one fetch", func(t *testing.T) { + snap := CacheAnalyticsSnapshot{ + FetchTimings: []FetchTimingEvent{ + {DataSource: "accounts", DurationMs: 42, Source: FieldSourceSubgraph, ResponseBytes: 256, HTTPStatusCode: 200}, + }, + } + result := snap.SubgraphMetrics() + assert.Equal(t, 1, len(result), "should have exactly 1 subgraph") + assert.Equal(t, SubgraphRequestMetrics{ + SubgraphName: "accounts", + RequestCount: 1, + ErrorCount: 0, + TotalDurationMs: 42, + MaxDurationMs: 42, + TotalResponseBytes: 256, + }, result[0]) + }) + + t.Run("single subgraph with multiple fetches picks max duration", func(t *testing.T) { + snap := CacheAnalyticsSnapshot{ + FetchTimings: []FetchTimingEvent{ + {DataSource: "accounts", DurationMs: 10, Source: FieldSourceSubgraph, ResponseBytes: 100, HTTPStatusCode: 200}, + {DataSource: "accounts", DurationMs: 50, Source: FieldSourceSubgraph, ResponseBytes: 200, HTTPStatusCode: 200}, + {DataSource: "accounts", DurationMs: 30, Source: FieldSourceSubgraph, ResponseBytes: 150, HTTPStatusCode: 200}, + }, + } + result := snap.SubgraphMetrics() + assert.Equal(t, 1, len(result), "should have exactly 1 subgraph") + assert.Equal(t, SubgraphRequestMetrics{ + SubgraphName: "accounts", + RequestCount: 3, + ErrorCount: 0, + TotalDurationMs: 90, + MaxDurationMs: 50, + TotalResponseBytes: 450, + }, result[0]) + }) + + t.Run("multiple subgraphs with mixed success and errors", func(t *testing.T) { + snap := CacheAnalyticsSnapshot{ + FetchTimings: []FetchTimingEvent{ + {DataSource: "accounts", DurationMs: 20, Source: FieldSourceSubgraph, ResponseBytes: 100}, + {DataSource: "products", DurationMs: 80, Source: FieldSourceSubgraph, ResponseBytes: 500}, + {DataSource: "accounts", DurationMs: 15, Source: FieldSourceSubgraph, ResponseBytes: 90}, + {DataSource: "products", DurationMs: 120, Source: FieldSourceSubgraph, ResponseBytes: 600}, + }, + ErrorEvents: []SubgraphErrorEvent{ + {DataSource: "products", EntityType: "Product", Message: "timeout", Code: "TIMEOUT"}, + {DataSource: "reviews", EntityType: "Review", Message: "not found", Code: "NOT_FOUND"}, + }, + } + result := snap.SubgraphMetrics() + assert.Equal(t, 3, len(result), "should have exactly 3 subgraphs") + + // accounts: 2 fetches, 0 errors + assert.Equal(t, SubgraphRequestMetrics{ + SubgraphName: "accounts", + RequestCount: 2, + ErrorCount: 0, + TotalDurationMs: 35, + MaxDurationMs: 20, + TotalResponseBytes: 190, + }, result[0]) + + // products: 2 fetches, 1 error + assert.Equal(t, SubgraphRequestMetrics{ + SubgraphName: "products", + RequestCount: 2, + ErrorCount: 1, + TotalDurationMs: 200, + MaxDurationMs: 120, + TotalResponseBytes: 1100, + }, result[1]) + + // reviews: 0 fetches, 1 error (error-only subgraph) + assert.Equal(t, SubgraphRequestMetrics{ + SubgraphName: "reviews", + RequestCount: 0, + ErrorCount: 1, + TotalDurationMs: 0, + MaxDurationMs: 0, + }, result[2]) + }) + + t.Run("cache hits are excluded from subgraph metrics", func(t *testing.T) { + snap := CacheAnalyticsSnapshot{ + FetchTimings: []FetchTimingEvent{ + {DataSource: "accounts", DurationMs: 0, Source: FieldSourceL1}, // L1 cache hit + {DataSource: "accounts", DurationMs: 5, Source: FieldSourceL2}, // L2 cache hit + {DataSource: "accounts", DurationMs: 30, Source: FieldSourceSubgraph}, // actual fetch + }, + } + result := snap.SubgraphMetrics() + assert.Equal(t, 1, len(result), "should have exactly 1 subgraph") + assert.Equal(t, 1, result[0].RequestCount, "should count only the subgraph fetch") + assert.Equal(t, int64(30), result[0].TotalDurationMs, "should only sum subgraph fetch duration") + }) + + t.Run("empty snapshot returns nil", func(t *testing.T) { + snap := CacheAnalyticsSnapshot{} + assert.Equal(t, []SubgraphRequestMetrics(nil), snap.SubgraphMetrics()) + }) + + t.Run("errors-only subgraph has zero request count", func(t *testing.T) { + snap := CacheAnalyticsSnapshot{ + ErrorEvents: []SubgraphErrorEvent{ + {DataSource: "accounts", Message: "connection refused"}, + {DataSource: "accounts", Message: "connection refused"}, + }, + } + result := snap.SubgraphMetrics() + assert.Equal(t, 1, len(result), "should have exactly 1 subgraph") + assert.Equal(t, SubgraphRequestMetrics{ + SubgraphName: "accounts", + RequestCount: 0, + ErrorCount: 2, + }, result[0]) + }) +} + +func TestFetchTimingEvent_NewFields(t *testing.T) { + t.Run("subgraph fetch carries HTTP status and response size", func(t *testing.T) { + event := FetchTimingEvent{ + DataSource: "accounts", + DurationMs: 42, + Source: FieldSourceSubgraph, + HTTPStatusCode: 200, + ResponseBytes: 1024, + TTFBMs: 0, // not yet instrumented + } + assert.Equal(t, 200, event.HTTPStatusCode) + assert.Equal(t, 1024, event.ResponseBytes) + assert.Equal(t, int64(0), event.TTFBMs) + }) + + t.Run("cache hit has zero values for HTTP fields", func(t *testing.T) { + event := FetchTimingEvent{ + DataSource: "accounts", + DurationMs: 1, + Source: FieldSourceL1, + } + assert.Equal(t, 0, event.HTTPStatusCode, "cache hits should have zero status code") + assert.Equal(t, 0, event.ResponseBytes, "cache hits should have zero response bytes") + assert.Equal(t, int64(0), event.TTFBMs, "cache hits should have zero TTFB") + }) +} From 4af7b835b3806c811e5a43a651910be92bc08022 Mon Sep 17 00:00:00 2001 From: Jens Neuse Date: Wed, 4 Mar 2026 08:24:42 +0100 Subject: [PATCH 02/11] fix: resolve pre-existing linter failures (gci, staticcheck) Remove extra blank line in resolve_test.go (gci) and lowercase error string in schema.go (staticcheck ST1005). Update test assertions to match. Co-Authored-By: Claude Opus 4.6 --- v2/pkg/engine/datasource/service_datasource/schema.go | 2 +- v2/pkg/engine/datasource/service_datasource/schema_test.go | 4 ++-- v2/pkg/engine/resolve/resolve_test.go | 1 - 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/v2/pkg/engine/datasource/service_datasource/schema.go b/v2/pkg/engine/datasource/service_datasource/schema.go index 86b1d5f74d..6dcaf1ddd0 100644 --- a/v2/pkg/engine/datasource/service_datasource/schema.go +++ b/v2/pkg/engine/datasource/service_datasource/schema.go @@ -56,7 +56,7 @@ func ExtendSchemaWithServiceTypes(schema *ast.Document) error { // 1. Find Query type first to fail fast queryNode, found := findQueryType(schema) if !found { - return fmt.Errorf("Query type not found in schema") + return fmt.Errorf("query type not found in schema") } // 2. Add _Capability type (must be added before _Service since _Service references it) diff --git a/v2/pkg/engine/datasource/service_datasource/schema_test.go b/v2/pkg/engine/datasource/service_datasource/schema_test.go index 8081818481..0402362989 100644 --- a/v2/pkg/engine/datasource/service_datasource/schema_test.go +++ b/v2/pkg/engine/datasource/service_datasource/schema_test.go @@ -118,7 +118,7 @@ func TestExtendSchemaWithServiceTypes(t *testing.T) { err := ExtendSchemaWithServiceTypes(&schema) assert.Error(t, err) - assert.Contains(t, err.Error(), "Query type not found") + assert.Contains(t, err.Error(), "query type not found") }) t.Run("works with custom query type name", func(t *testing.T) { @@ -264,6 +264,6 @@ func TestNewServiceConfigFactoryWithSchema(t *testing.T) { factory, err := NewServiceConfigFactoryWithSchema(&schema, ServiceOptions{}) assert.Error(t, err) assert.Nil(t, factory) - assert.Contains(t, err.Error(), "Query type not found") + assert.Contains(t, err.Error(), "query type not found") }) } diff --git a/v2/pkg/engine/resolve/resolve_test.go b/v2/pkg/engine/resolve/resolve_test.go index 72e29fecf6..82a8e1e635 100644 --- a/v2/pkg/engine/resolve/resolve_test.go +++ b/v2/pkg/engine/resolve/resolve_test.go @@ -183,7 +183,6 @@ func waitForFollowerCount(t *testing.T, r *Resolver, count int32) { } } - type TestErrorWriter struct { } From 3729588ac1c6153952dbe54c63bf4e823fbda99e Mon Sep 17 00:00:00 2001 From: Jens Neuse Date: Wed, 4 Mar 2026 08:29:04 +0100 Subject: [PATCH 03/11] fix: update test expectation for CacheAnalytics on union types The planner now populates CacheAnalytics with per-concrete-type key fields for union objects (Cat/Dog in CatOrDog). Update the test to match the actual planner output. Co-Authored-By: Claude Opus 4.6 --- .../graphql_datasource_test.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_test.go b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_test.go index ea4fb52cd9..1b984cf897 100644 --- a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_test.go +++ b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_test.go @@ -7859,6 +7859,23 @@ func TestGraphQLDataSource(t *testing.T) { "Dog": {}, }, TypeName: "CatOrDog", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Cat": { + KeyFields: []resolve.KeyField{ + {Name: "id"}, + }, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, + "Dog": { + KeyFields: []resolve.KeyField{ + {Name: "id"}, + }, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, + }, + }, Fields: []*resolve.Field{ { Name: []byte("name"), From 8a60b31b54d22b46cee17568931fc4b20e1b372c Mon Sep 17 00:00:00 2001 From: Jens Neuse Date: Wed, 4 Mar 2026 10:40:37 +0100 Subject: [PATCH 04/11] fix: update planner test expectations for CacheAnalytics and alias normalization Update test expectations across graphql_datasource tests to match current planner output: CacheAnalytics populated on entity/union objects, OriginalName and HasAliases set on aliased fields. Co-Authored-By: Claude Opus 4.6 --- ...ource_federation_entity_interfaces_test.go | 198 ++ ...hql_datasource_federation_provides_test.go | 16 + .../graphql_datasource_federation_test.go | 1905 +++++++++++++---- .../graphql_datasource_test.go | 75 + 4 files changed, 1768 insertions(+), 426 deletions(-) diff --git a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_entity_interfaces_test.go b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_entity_interfaces_test.go index ad3df31e65..768c606108 100644 --- a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_entity_interfaces_test.go +++ b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_entity_interfaces_test.go @@ -110,6 +110,15 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Account": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, }, }, }, @@ -231,6 +240,15 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Account": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -511,6 +529,15 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Account": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -724,6 +751,15 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Account": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -845,6 +881,10 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -990,6 +1030,10 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -1115,6 +1159,10 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Admin": {}, }, TypeName: "Admin", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -1260,6 +1308,10 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Admin": {}, }, TypeName: "Admin", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -1451,6 +1503,15 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Account": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -1649,6 +1710,14 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "User": {}, }, TypeName: "Accounts", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -1988,6 +2057,14 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "User": {}, }, TypeName: "Accounts", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -2270,6 +2347,15 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Account": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -2465,6 +2551,15 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Account": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -2753,6 +2848,15 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Account": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -3010,6 +3114,15 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Account": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -3350,6 +3463,15 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Account": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -3597,6 +3719,15 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Account": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("age"), @@ -3802,6 +3933,15 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Account": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("age"), @@ -3957,6 +4097,15 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Account": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("title"), @@ -4167,6 +4316,15 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Account": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -4488,6 +4646,15 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Account": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("age"), @@ -4858,6 +5025,15 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Account": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("age"), @@ -5067,6 +5243,15 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Account": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -5177,6 +5362,15 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Account": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "Moderator": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -5286,6 +5480,10 @@ func TestGraphQLDataSourceFederationEntityInterfaces(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), diff --git a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_provides_test.go b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_provides_test.go index 1b4947969f..0f2ffabef1 100644 --- a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_provides_test.go +++ b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_provides_test.go @@ -306,6 +306,10 @@ func TestGraphQLDataSourceFederation_NestedRequiresProvides(t *testing.T) { Path: []string{"order"}, Nullable: true, TypeName: "Order", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, PossibleTypes: map[string]struct{}{"Order": {}}, Fields: []*resolve.Field{ { @@ -314,6 +318,10 @@ func TestGraphQLDataSourceFederation_NestedRequiresProvides(t *testing.T) { Path: []string{"shippingInfo"}, Nullable: true, TypeName: "ShippingInfo", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, PossibleTypes: map[string]struct{}{"ShippingInfo": {}}, Fields: []*resolve.Field{ { @@ -387,6 +395,10 @@ func TestGraphQLDataSourceFederation_NestedRequiresProvides(t *testing.T) { Path: []string{"order"}, Nullable: true, TypeName: "Order", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, PossibleTypes: map[string]struct{}{"Order": {}}, Fields: []*resolve.Field{ { @@ -395,6 +407,10 @@ func TestGraphQLDataSourceFederation_NestedRequiresProvides(t *testing.T) { Path: []string{"shippingInfo"}, Nullable: true, TypeName: "ShippingInfo", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, PossibleTypes: map[string]struct{}{"ShippingInfo": {}}, Fields: []*resolve.Field{ { diff --git a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_test.go b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_test.go index 1d218d076b..2a3dd48016 100644 --- a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_test.go +++ b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_test.go @@ -416,6 +416,10 @@ func TestGraphQLDataSourceFederation_Typenames(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("__typename"), @@ -500,6 +504,10 @@ func TestGraphQLDataSourceFederation_Typenames(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("__typename"), @@ -805,6 +813,10 @@ func TestGraphQLDataSourceFederation_Mutations(t *testing.T) { "Object": {}, }, TypeName: "Object", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("__typename"), @@ -848,6 +860,10 @@ func TestGraphQLDataSourceFederation_Mutations(t *testing.T) { "Object": {}, }, TypeName: "Object", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("__typename"), @@ -955,6 +971,10 @@ func TestGraphQLDataSourceFederation_Mutations(t *testing.T) { "Object": {}, }, TypeName: "Object", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("field"), @@ -1500,6 +1520,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("account"), @@ -1510,6 +1534,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}, {Name: "info"}, {Name: "{a"}, {Name: "b}"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("name"), @@ -1784,6 +1812,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, OperationType: ast.OperationTypeQuery, ProvidesData: &resolve.Object{ + HasAliases: true, Fields: []*resolve.Field{ { Name: []byte("name"), @@ -1793,14 +1822,16 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, { - Name: []byte("shippingInfo"), - OnTypeNames: [][]byte{[]byte("Account")}, + Name: []byte("shippingInfo"), + OnTypeNames: [][]byte{[]byte("Account")}, Value: &resolve.Object{ - Path: []string{"shippingInfo"}, - Nullable: true, + Path: []string{"shippingInfo"}, + Nullable: true, + HasAliases: true, Fields: []*resolve.Field{ { - Name: []byte("z"), + Name: []byte("z"), + OriginalName: []byte("zip"), Value: &resolve.Scalar{ Path: []string{"z"}, }, @@ -1912,6 +1943,13 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }), }, + KeyFields: []resolve.KeyField{ + {Name: "id"}, + {Name: "info", Children: []resolve.KeyField{ + {Name: "a"}, + {Name: "b"}, + }}, + }, }, }, }, "user.account", resolve.ObjectPath("user"), resolve.ObjectPath("account")), @@ -1940,6 +1978,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, SourceName: "user.service", Fields: []*resolve.Field{ { @@ -1953,6 +1995,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { Names: []string{"user.service"}, }, ExactParentTypeName: "User", + CacheAnalyticsHash: true, }, Value: &resolve.Object{ Path: []string{"account"}, @@ -1961,6 +2004,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}, {Name: "info"}, {Name: "{a"}, {Name: "b}"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, SourceName: "user.service", Fields: []*resolve.Field{ { @@ -1974,6 +2021,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { Names: []string{"user.service"}, }, ExactParentTypeName: "Account", + CacheAnalyticsHash: true, }, Value: &resolve.String{ Path: []string{"__typename"}, @@ -1991,6 +2039,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { Names: []string{"account.service"}, }, ExactParentTypeName: "Account", + CacheAnalyticsHash: true, }, Value: &resolve.String{ Path: []string{"name"}, @@ -2008,6 +2057,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, ExactParentTypeName: "Account", HasAuthorizationRule: true, + CacheAnalyticsHash: true, }, Value: &resolve.Object{ Path: []string{"shippingInfo"}, @@ -2341,6 +2391,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "account", Children: []resolve.KeyField{{Name: "id"}}}, {Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("foo"), @@ -2359,6 +2413,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "account", Children: []resolve.KeyField{{Name: "id"}}}, {Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("foo"), @@ -2523,6 +2581,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "account", Children: []resolve.KeyField{{Name: "id"}}}, {Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Path: []string{"user"}, Fields: []*resolve.Field{ { @@ -2541,6 +2603,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "account", Children: []resolve.KeyField{{Name: "id"}}}, {Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Path: []string{"otherUser"}, Fields: []*resolve.Field{ { @@ -2711,6 +2777,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "account", Children: []resolve.KeyField{{Name: "id"}}}, {Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Path: []string{"user"}, Fields: []*resolve.Field{ { @@ -2729,6 +2799,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "account", Children: []resolve.KeyField{{Name: "id"}}}, {Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Path: []string{"otherUser"}, Fields: []*resolve.Field{ { @@ -2849,6 +2923,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "account", Children: []resolve.KeyField{{Name: "id"}}}, {Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -2972,6 +3050,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "account", Children: []resolve.KeyField{{Name: "id"}}}, {Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -2999,6 +3081,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("type"), @@ -3306,6 +3392,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "account", Children: []resolve.KeyField{{Name: "id"}}}, {Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Path: []string{"user"}, Fields: []*resolve.Field{ { @@ -3324,6 +3414,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "account", Children: []resolve.KeyField{{Name: "id"}}}, {Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Path: []string{"otherUser"}, Fields: []*resolve.Field{ { @@ -3902,6 +3996,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, SourceName: "user.service", TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("account"), @@ -3914,6 +4012,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { IDs: []string{"user.service"}, Names: []string{"user.service"}, }, + CacheAnalyticsHash: true, }, Value: &resolve.Object{ Path: []string{"account"}, @@ -3923,6 +4022,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, SourceName: "user.service", TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}, {Name: "info"}, {Name: "{a"}, {Name: "b}"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("address"), @@ -3935,6 +4038,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { IDs: []string{"user.service"}, Names: []string{"user.service"}, }, + CacheAnalyticsHash: true, }, Value: &resolve.Object{ Path: []string{"address"}, @@ -3944,6 +4048,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, SourceName: "user.service", TypeName: "Address", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("fullAddress"), @@ -3959,6 +4067,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { IDs: []string{"account.service"}, Names: []string{"account.service"}, }, + CacheAnalyticsHash: true, }, }, }, @@ -4251,6 +4360,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("account"), @@ -4261,6 +4374,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}, {Name: "info"}, {Name: "{a"}, {Name: "b}"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("secretAddress"), @@ -4271,6 +4388,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Address": {}, }, TypeName: "Address", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("secretLine"), @@ -4499,6 +4620,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("account"), @@ -4509,6 +4634,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}, {Name: "info"}, {Name: "{a"}, {Name: "b}"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("deliveryAddress"), @@ -4519,6 +4648,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Address": {}, }, TypeName: "Address", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("line1"), @@ -4614,6 +4747,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("oldAccount"), @@ -4624,6 +4761,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}, {Name: "info"}, {Name: "{a"}, {Name: "b}"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("deliveryAddress"), @@ -4634,6 +4775,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Address": {}, }, TypeName: "Address", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("line1"), @@ -4789,6 +4934,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("account"), @@ -4799,6 +4948,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}, {Name: "info"}, {Name: "{a"}, {Name: "b}"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("providedAddress"), @@ -4809,6 +4962,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Address": {}, }, TypeName: "Address", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("secretLine"), @@ -5119,6 +5276,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("fullName"), @@ -5266,6 +5427,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("firstName"), @@ -5423,6 +5588,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("fullName"), @@ -5580,6 +5749,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("firstName"), @@ -5747,17 +5920,8 @@ func TestGraphQLDataSourceFederation(t *testing.T) { } t.Run("select field with requires directive", func(t *testing.T) { - RunWithPermutations( - t, - definition, - ` - query Entities { - entities { - property - } - }`, - "Entities", - &plan.SynchronousResponsePlan{ + expectedPlan := func(keyFields []resolve.KeyField) *plan.SynchronousResponsePlan { + return &plan.SynchronousResponsePlan{ Response: &resolve.GraphQLResponse{ Fetches: resolve.Sequence( resolve.Single(&resolve.SingleFetch{ @@ -5876,6 +6040,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Entity": {}, }, TypeName: "Entity", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: keyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("property"), @@ -5890,6 +6058,22 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, }, + } + } + + RunWithPermutationsVariants( + t, + definition, + ` + query Entities { + entities { + property + } + }`, + "Entities", + []plan.Plan{ + expectedPlan([]resolve.KeyField{}), // permutation [0_1]: first subgraph (resolvable: false) processed first + expectedPlan([]resolve.KeyField{{Name: "id"}, {Name: "otherID"}}), // permutation [1_0]: second subgraph (resolvable: true) processed first }, planConfiguration, WithDefaultPostProcessor(), @@ -6188,6 +6372,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("fullName"), @@ -6333,6 +6521,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("firstName"), @@ -6695,6 +6887,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "UserList": {}, }, TypeName: "UserList", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("users"), @@ -6714,6 +6910,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -6729,6 +6929,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "FullName": {}, }, TypeName: "FullName", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -6899,6 +7103,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "UserList": {}, }, TypeName: "UserList", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("users"), @@ -6918,6 +7126,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -6933,6 +7145,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "FullName": {}, }, TypeName: "FullName", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -7028,6 +7244,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("oldAccount"), @@ -7038,6 +7258,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}, {Name: "info"}, {Name: "{a"}, {Name: "b}"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("name"), @@ -7187,6 +7411,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("account"), @@ -7197,6 +7425,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}, {Name: "info"}, {Name: "{a"}, {Name: "b}"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("name"), @@ -7235,6 +7467,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Account": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}, {Name: "info"}, {Name: "{a"}, {Name: "b}"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("name"), @@ -7565,6 +7801,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -7575,6 +7815,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -7584,6 +7828,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: nil, Fields: []*resolve.Field{ { Name: []byte("url"), @@ -7687,6 +7932,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -7697,6 +7946,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -7706,6 +7959,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: nil, Fields: []*resolve.Field{ { Name: []byte("__typename"), @@ -7819,6 +8073,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -7829,6 +8087,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -7838,6 +8100,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: nil, Fields: []*resolve.Field{ { Name: []byte("__typename"), @@ -8178,6 +8441,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -8188,6 +8455,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -8197,6 +8468,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: nil, Fields: []*resolve.Field{ { Name: []byte("url"), @@ -8300,6 +8572,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -8310,6 +8586,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -8319,6 +8599,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: nil, Fields: []*resolve.Field{ { Name: []byte("__typename"), @@ -8469,6 +8750,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -8479,6 +8764,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -8488,6 +8777,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: nil, Fields: []*resolve.Field{ { Name: []byte("__typename"), @@ -8800,6 +9090,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -8810,6 +9104,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -8819,6 +9117,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: nil, Fields: []*resolve.Field{ { Name: []byte("url"), @@ -8885,6 +9184,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -8895,6 +9198,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -8904,6 +9211,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: nil, Fields: []*resolve.Field{ { Name: []byte("__typename"), @@ -8980,6 +9288,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -8990,6 +9302,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -8999,6 +9315,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: nil, Fields: []*resolve.Field{ { Name: []byte("__typename"), @@ -9325,6 +9642,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -9335,6 +9656,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -9344,6 +9669,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: nil, Fields: []*resolve.Field{ { Name: []byte("url"), @@ -9410,6 +9736,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -9420,6 +9750,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -9429,6 +9763,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: nil, Fields: []*resolve.Field{ { Name: []byte("__typename"), @@ -9542,6 +9877,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -9552,6 +9891,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -9561,6 +9904,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: nil, Fields: []*resolve.Field{ { Name: []byte("__typename"), @@ -9740,6 +10084,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -9750,6 +10098,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -9759,6 +10111,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: nil, Fields: []*resolve.Field{ { Name: []byte("__typename"), @@ -9802,6 +10155,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Hosting": {}, }, TypeName: "Hosting", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}, {Name: "category"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("category"), @@ -10023,6 +10380,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "details"}, {Name: "{forename"}, {Name: "surname}"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("details"), @@ -10032,6 +10393,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Details": {}, }, TypeName: "Details", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "forename"}, {Name: "surname"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("__typename"), @@ -10097,6 +10462,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "details"}, {Name: "{forename"}, {Name: "surname}"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("details"), @@ -10106,6 +10475,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Details": {}, }, TypeName: "Details", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "forename"}, {Name: "surname"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("forename"), @@ -10175,6 +10548,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "details"}, {Name: "{forename"}, {Name: "surname}"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("details"), @@ -10184,6 +10561,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Details": {}, }, TypeName: "Details", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "forename"}, {Name: "surname"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("middlename"), @@ -10250,6 +10631,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "details"}, {Name: "{forename"}, {Name: "surname}"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("details"), @@ -10259,6 +10644,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Details": {}, }, TypeName: "Details", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "forename"}, {Name: "surname"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("__typename"), @@ -10550,6 +10939,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("details"), @@ -10641,6 +11034,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("details"), @@ -10743,6 +11140,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("details"), @@ -10884,6 +11285,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("details"), @@ -11021,6 +11426,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("details"), @@ -11178,6 +11587,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("details"), @@ -11549,6 +11962,13 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "Node", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("name"), @@ -11651,6 +12071,13 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "Node", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("some"), @@ -11660,6 +12087,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("name"), @@ -11769,6 +12200,13 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "Node", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("title"), @@ -11919,6 +12357,13 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "Node", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("some"), @@ -11928,6 +12373,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("title"), @@ -11943,6 +12392,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("title"), @@ -11965,6 +12418,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("title"), @@ -12072,6 +12529,13 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "Node", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("some"), @@ -12081,6 +12545,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("title"), @@ -12100,6 +12568,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -12324,6 +12796,13 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -12750,6 +13229,23 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Moderator": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Admin": { + KeyFields: []resolve.KeyField{{Name: "adminID"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, + "Moderator": { + KeyFields: []resolve.KeyField{{Name: "moderatorID"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, + "User": { + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, + }, + }, Fields: []*resolve.Field{ { Name: []byte("name"), @@ -12963,6 +13459,23 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "Account", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Admin": { + KeyFields: []resolve.KeyField{{Name: "adminID"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, + "Moderator": { + KeyFields: []resolve.KeyField{{Name: "moderatorID"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, + "User": { + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, + }, + }, Fields: []*resolve.Field{ { Name: []byte("name"), @@ -12993,6 +13506,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Address": {}, }, TypeName: "Address", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("zip"), @@ -13136,6 +13653,23 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Moderator": {}, }, TypeName: "Node", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Admin": { + KeyFields: []resolve.KeyField{{Name: "adminID"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, + "Moderator": { + KeyFields: []resolve.KeyField{{Name: "moderatorID"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, + "User": { + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, + }, + }, Fields: []*resolve.Field{ { Name: []byte("title"), @@ -13700,6 +14234,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "NodesWrapper": {}, }, TypeName: "NodesWrapper", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("nodes"), @@ -13714,6 +14252,13 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "Node", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "Admin": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("title"), @@ -13746,6 +14291,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "AdditionalInfo": {}, }, TypeName: "AdditionalInfo", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, }, OnTypeNames: [][]byte{[]byte("Admin")}, }, @@ -13780,6 +14329,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "AdditionalInfo": {}, }, TypeName: "AdditionalInfo", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, }, OnTypeNames: [][]byte{[]byte("User")}, }, @@ -14032,22 +14585,8 @@ func TestGraphQLDataSourceFederation(t *testing.T) { DisableResolveFieldPositions: true, } - RunWithPermutations( - t, - definition, - ` - query User { - user { - id - name - title - address { - country - } - } - }`, - "User", - &plan.SynchronousResponsePlan{ + expectedPlanFn := func(keyFields []resolve.KeyField) *plan.SynchronousResponsePlan { + return &plan.SynchronousResponsePlan{ Response: &resolve.GraphQLResponse{ Fetches: resolve.Sequence( resolve.Single(&resolve.SingleFetch{ @@ -14181,6 +14720,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: keyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -14224,7 +14767,43 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, }, - }, + } + } + + // CacheAnalytics KeyFields depend on which datasource is processed first. + // Lexicographic order: 0-5 start with idx 0, 6-11 idx 1, 12-17 idx 2, 18-23 idx 3 + // idx 0 (first-service, @key "id"): {id} + // idx 1 (second-service, @key "id" + @key "uuid"): {id, uuid} + // idx 2 (third-service, @key "uuid"): {uuid} + // idx 3 (fourth-service, @key "id"): {id} + keyVariants := [][]resolve.KeyField{ + {{Name: "id"}}, // idx 0 first + {{Name: "id"}, {Name: "uuid"}}, // idx 1 first + {{Name: "uuid"}}, // idx 2 first + {{Name: "id"}}, // idx 3 first + } + + variants := make([]plan.Plan, 24) + for i := range variants { + variants[i] = expectedPlanFn(keyVariants[i/6]) + } + + RunWithPermutationsVariants( + t, + definition, + ` + query User { + user { + id + name + title + address { + country + } + } + }`, + "User", + variants, planConfiguration, WithDefaultPostProcessor(), ) @@ -14452,20 +15031,8 @@ func TestGraphQLDataSourceFederation(t *testing.T) { } t.Run("only fields", func(t *testing.T) { - RunWithPermutations( - t, - definition, - ` - query User { - user { - field1 - field2 - field3 - field4 - } - }`, - "User", - &plan.SynchronousResponsePlan{ + expectedPlanFn := func(keyFields []resolve.KeyField) plan.Plan { + return &plan.SynchronousResponsePlan{ Response: &resolve.GraphQLResponse{ Fetches: resolve.Sequence( resolve.Single(&resolve.SingleFetch{ @@ -14599,6 +15166,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: keyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("field1"), @@ -14630,24 +15201,49 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, }, - }, - planConfiguration, - WithDefaultPostProcessor(), - ) - }) + } + } - t.Run("field from the last subgraph in a chain", func(t *testing.T) { - RunWithPermutations( + // CacheAnalytics KeyFields depend on which datasource is processed first. + // Lexicographic order: 0-5 start with idx 0, 6-11 idx 1, 12-17 idx 2, 18-23 idx 3 + // idx 0 (first-service, @key "key1"): {key1} + // idx 1 (second-service, @key "key1" + @key "key2"): {key1, key2} + // idx 2 (third-service, @key "key2" + @key "key3"): {key2, key3} + // idx 3 (fourth-service, @key "key3"): {key3} + keyVariants := [][]resolve.KeyField{ + {{Name: "key1"}}, // idx 0 first + {{Name: "key1"}, {Name: "key2"}}, // idx 1 first + {{Name: "key2"}, {Name: "key3"}}, // idx 2 first + {{Name: "key3"}}, // idx 3 first + } + + variants := make([]plan.Plan, 24) + for i := range variants { + variants[i] = expectedPlanFn(keyVariants[i/6]) + } + + RunWithPermutationsVariants( t, definition, ` query User { user { + field1 + field2 + field3 field4 } }`, "User", - &plan.SynchronousResponsePlan{ + variants, + planConfiguration, + WithDefaultPostProcessor(), + ) + }) + + t.Run("field from the last subgraph in a chain", func(t *testing.T) { + expectedPlanFn := func(keyFields []resolve.KeyField) plan.Plan { + return &plan.SynchronousResponsePlan{ Response: &resolve.GraphQLResponse{ Fetches: resolve.Sequence( resolve.Single(&resolve.SingleFetch{ @@ -14781,6 +15377,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: keyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("field4"), @@ -14794,30 +15394,40 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, }, - }, - planConfiguration, - WithDefaultPostProcessor(), - ) - }) + } + } - t.Run("fields and keys", func(t *testing.T) { - RunWithPermutations( - t, - definition, - ` - query User { + keyVariants := [][]resolve.KeyField{ + {{Name: "key1"}}, // idx 0 first + {{Name: "key1"}, {Name: "key2"}}, // idx 1 first + {{Name: "key2"}, {Name: "key3"}}, // idx 2 first + {{Name: "key3"}}, // idx 3 first + } + + variants := make([]plan.Plan, 24) + for i := range variants { + variants[i] = expectedPlanFn(keyVariants[i/6]) + } + + RunWithPermutationsVariants( + t, + definition, + ` + query User { user { - key1 - key2 - key3 - field1 - field2 - field3 field4 } }`, "User", - &plan.SynchronousResponsePlan{ + variants, + planConfiguration, + WithDefaultPostProcessor(), + ) + }) + + t.Run("fields and keys", func(t *testing.T) { + expectedPlanFn := func(keyFields []resolve.KeyField) plan.Plan { + return &plan.SynchronousResponsePlan{ Response: &resolve.GraphQLResponse{ Fetches: resolve.Sequence( resolve.Single(&resolve.SingleFetch{ @@ -14951,6 +15561,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: keyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("key1"), @@ -15000,7 +15614,38 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, }, - }, + } + } + + keyVariants := [][]resolve.KeyField{ + {{Name: "key1"}}, // idx 0 first + {{Name: "key1"}, {Name: "key2"}}, // idx 1 first + {{Name: "key2"}, {Name: "key3"}}, // idx 2 first + {{Name: "key3"}}, // idx 3 first + } + + variants := make([]plan.Plan, 24) + for i := range variants { + variants[i] = expectedPlanFn(keyVariants[i/6]) + } + + RunWithPermutationsVariants( + t, + definition, + ` + query User { + user { + key1 + key2 + key3 + field1 + field2 + field3 + field4 + } + }`, + "User", + variants, planConfiguration, WithDefaultPostProcessor(), ) @@ -15179,18 +15824,8 @@ func TestGraphQLDataSourceFederation(t *testing.T) { } t.Run("do not jump to resolvable false", func(t *testing.T) { - RunWithPermutations( - t, - definition, - ` - query User { - user { - id - name - } - }`, - "User", - &plan.SynchronousResponsePlan{ + expectedPlanFn := func(keyFields []resolve.KeyField) plan.Plan { + return &plan.SynchronousResponsePlan{ Response: &resolve.GraphQLResponse{ Fetches: resolve.Sequence( resolve.Single(&resolve.SingleFetch{ @@ -15250,6 +15885,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: keyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -15269,25 +15908,44 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, }, - }, - planConfiguration, - WithDefaultPostProcessor(), - ) - }) + } + } - t.Run("jump from resolvable false", func(t *testing.T) { - RunWithPermutations( + // CacheAnalytics KeyFields depend on which datasource is processed first. + // idx 0 (first-service, @key "id" resolvable): {id} + // idx 1 (second-service, @key "id" resolvable: false): {} (empty - non-resolvable key) + // idx 2 (third-service, @key "id" resolvable): {id} + keyVariants := [][]resolve.KeyField{ + {{Name: "id"}}, // idx 0 first + {}, // idx 1 first (resolvable: false) + {{Name: "id"}}, // idx 2 first + } + + variants := make([]plan.Plan, 6) + for i := range variants { + variants[i] = expectedPlanFn(keyVariants[i/2]) + } + + RunWithPermutationsVariants( t, definition, ` query User { - userWithName { + user { + id name - title } }`, "User", - &plan.SynchronousResponsePlan{ + variants, + planConfiguration, + WithDefaultPostProcessor(), + ) + }) + + t.Run("jump from resolvable false", func(t *testing.T) { + expectedPlanFn := func(keyFields []resolve.KeyField) plan.Plan { + return &plan.SynchronousResponsePlan{ Response: &resolve.GraphQLResponse{ Fetches: resolve.Sequence( resolve.Single(&resolve.SingleFetch{ @@ -15347,6 +16005,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: keyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("name"), @@ -15366,7 +16028,32 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, }, - }, + } + } + + keyVariants := [][]resolve.KeyField{ + {{Name: "id"}}, // idx 0 first + {}, // idx 1 first (resolvable: false) + {{Name: "id"}}, // idx 2 first + } + + variants := make([]plan.Plan, 6) + for i := range variants { + variants[i] = expectedPlanFn(keyVariants[i/2]) + } + + RunWithPermutationsVariants( + t, + definition, + ` + query User { + userWithName { + name + title + } + }`, + "User", + variants, planConfiguration, WithDefaultPostProcessor(), ) @@ -15491,20 +16178,8 @@ func TestGraphQLDataSourceFederation(t *testing.T) { } t.Run("query", func(t *testing.T) { - RunWithPermutations( - t, - definition, - ` - query Query { - entity { - id - name - age - } - } - `, - "Query", - &plan.SynchronousResponsePlan{ + expectedPlanFn := func(keyFields []resolve.KeyField) plan.Plan { + return &plan.SynchronousResponsePlan{ Response: &resolve.GraphQLResponse{ Fetches: resolve.Sequence( resolve.Single(&resolve.SingleFetch{ @@ -15564,6 +16239,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Entity": {}, }, TypeName: "Entity", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: keyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -15589,7 +16268,35 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, }, - }, + } + } + + // idx 0 (first-service, DisableEntityResolver): {} (empty) + // idx 1 (second-service, @key "id"): {id} + keyVariants := [][]resolve.KeyField{ + {}, // idx 0 first (disabled entity resolver) + {{Name: "id"}}, // idx 1 first + } + + variants := make([]plan.Plan, 2) + for i := range variants { + variants[i] = expectedPlanFn(keyVariants[i]) + } + + RunWithPermutationsVariants( + t, + definition, + ` + query Query { + entity { + id + name + age + } + } + `, + "Query", + variants, planConfiguration, WithDefaultPostProcessor(), ) @@ -15726,21 +16433,8 @@ func TestGraphQLDataSourceFederation(t *testing.T) { } t.Run("query", func(t *testing.T) { - RunWithPermutations( - t, - definition, - ` - query Query { - entity { - id - name - isEntity - age - } - } - `, - "Query", - &plan.SynchronousResponsePlan{ + expectedPlanFn := func(keyFields []resolve.KeyField) plan.Plan { + return &plan.SynchronousResponsePlan{ Response: &resolve.GraphQLResponse{ Fetches: resolve.Sequence( resolve.Single(&resolve.SingleFetch{ @@ -15800,6 +16494,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Entity": {}, }, TypeName: "Entity", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: keyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -15831,7 +16529,36 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, }, - }, + } + } + + // idx 0 (first-service, DisableEntityResolver on both keys): {} (empty) + // idx 1 (second-service, @key "id" + @key "name"): {id, name} + keyVariants := [][]resolve.KeyField{ + {}, // idx 0 first + {{Name: "id"}, {Name: "name"}}, // idx 1 first + } + + variants := make([]plan.Plan, 2) + for i := range variants { + variants[i] = expectedPlanFn(keyVariants[i]) + } + + RunWithPermutationsVariants( + t, + definition, + ` + query Query { + entity { + id + name + isEntity + age + } + } + `, + "Query", + variants, planConfiguration, WithDefaultPostProcessor(), ) @@ -16297,6 +17024,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { entityTwoFetchTwo resolve.Fetch, entityThreeFetchOne resolve.Fetch, entityThreeFetchTwo resolve.Fetch, + keyFields []resolve.KeyField, ) plan.Plan { return &plan.SynchronousResponsePlan{ Response: &resolve.GraphQLResponse{ @@ -16358,6 +17086,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Entity": {}, }, TypeName: "Entity", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: keyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -16413,6 +17145,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Entity": {}, }, TypeName: "Entity", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: keyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -16468,6 +17204,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Entity": {}, }, TypeName: "Entity", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: keyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -16520,25 +17260,57 @@ func TestGraphQLDataSourceFederation(t *testing.T) { } } - variant1 := expectedPlan( - entityOneNestedFetch2Second(1, true), entityOneNestedFetch2Third(2, true), - entityTwoNestedFetch2First(4), entityTwoNestedFetch2Third(5), - entityThreeNestedFetch2Second(7), entityThreeNestedFetch2First(8), - ) - - variant2 := expectedPlan( - entityOneNestedFetch2Second(1, false), entityOneNestedFetch2Third(2, false), - entityTwoNestedFetch2First(4), entityTwoNestedFetch2Third(5), - entityThreeNestedFetch2Second(7), entityThreeNestedFetch2First(8), - ) + // CacheAnalytics KeyFields depend on which datasource is processed first. + // idx 0 (first-service, @key "id"): {id} + // idx 1 (second-service, @key "id" + @key "uuid"): {id, uuid} + // idx 2 (third-service, @key "name"): {name} + keyIdx0 := []resolve.KeyField{{Name: "id"}} + keyIdx1 := []resolve.KeyField{{Name: "id"}, {Name: "uuid"}} + keyIdx2 := []resolve.KeyField{{Name: "name"}} expectedPlans := []plan.Plan{ - variant1, - variant2, - variant1, - variant1, - variant2, - variant2, + // perm [0_1_2]: idx 0 first, fetch variant1 + expectedPlan( + entityOneNestedFetch2Second(1, true), entityOneNestedFetch2Third(2, true), + entityTwoNestedFetch2First(4), entityTwoNestedFetch2Third(5), + entityThreeNestedFetch2Second(7), entityThreeNestedFetch2First(8), + keyIdx0, + ), + // perm [0_2_1]: idx 0 first, fetch variant2 + expectedPlan( + entityOneNestedFetch2Second(1, false), entityOneNestedFetch2Third(2, false), + entityTwoNestedFetch2First(4), entityTwoNestedFetch2Third(5), + entityThreeNestedFetch2Second(7), entityThreeNestedFetch2First(8), + keyIdx0, + ), + // perm [1_0_2]: idx 1 first, fetch variant1 + expectedPlan( + entityOneNestedFetch2Second(1, true), entityOneNestedFetch2Third(2, true), + entityTwoNestedFetch2First(4), entityTwoNestedFetch2Third(5), + entityThreeNestedFetch2Second(7), entityThreeNestedFetch2First(8), + keyIdx1, + ), + // perm [1_2_0]: idx 1 first, fetch variant1 + expectedPlan( + entityOneNestedFetch2Second(1, true), entityOneNestedFetch2Third(2, true), + entityTwoNestedFetch2First(4), entityTwoNestedFetch2Third(5), + entityThreeNestedFetch2Second(7), entityThreeNestedFetch2First(8), + keyIdx1, + ), + // perm [2_0_1]: idx 2 first, fetch variant2 + expectedPlan( + entityOneNestedFetch2Second(1, false), entityOneNestedFetch2Third(2, false), + entityTwoNestedFetch2First(4), entityTwoNestedFetch2Third(5), + entityThreeNestedFetch2Second(7), entityThreeNestedFetch2First(8), + keyIdx2, + ), + // perm [2_1_0]: idx 2 first, fetch variant2 + expectedPlan( + entityOneNestedFetch2Second(1, false), entityOneNestedFetch2Third(2, false), + entityTwoNestedFetch2First(4), entityTwoNestedFetch2Third(5), + entityThreeNestedFetch2Second(7), entityThreeNestedFetch2First(8), + keyIdx2, + ), } RunWithPermutationsVariants( @@ -16583,112 +17355,118 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }) t.Run("query last field in a chain first-second-third", func(t *testing.T) { - expectedPlan := &plan.SynchronousResponsePlan{ - Response: &resolve.GraphQLResponse{ - Fetches: resolve.Sequence( - resolve.Single(&resolve.SingleFetch{ - FetchDependencies: resolve.FetchDependencies{ - FetchID: 0, - }, - FetchConfiguration: resolve.FetchConfiguration{ - Input: `{"method":"POST","url":"http://first.service","body":{"query":"{entityOne {__typename id}}"}}`, - PostProcessing: DefaultPostProcessingConfiguration, - DataSource: &Source{}, - }, - DataSourceIdentifier: []byte("graphql_datasource.Source"), - }), + expectedPlanFn := func(keyFields []resolve.KeyField) plan.Plan { + return &plan.SynchronousResponsePlan{ + Response: &resolve.GraphQLResponse{ + Fetches: resolve.Sequence( + resolve.Single(&resolve.SingleFetch{ + FetchDependencies: resolve.FetchDependencies{ + FetchID: 0, + }, + FetchConfiguration: resolve.FetchConfiguration{ + Input: `{"method":"POST","url":"http://first.service","body":{"query":"{entityOne {__typename id}}"}}`, + PostProcessing: DefaultPostProcessingConfiguration, + DataSource: &Source{}, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }), - resolve.SingleWithPath(&resolve.SingleFetch{ - FetchDependencies: resolve.FetchDependencies{ - FetchID: 1, - DependsOnFetchIDs: []int{0}, - }, FetchConfiguration: resolve.FetchConfiguration{ - RequiresEntityBatchFetch: false, - RequiresEntityFetch: true, - Input: `{"method":"POST","url":"http://second.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Entity {__typename name}}}","variables":{"representations":[$$0$$]}}}`, - DataSource: &Source{}, - SetTemplateOutputToNullOnVariableNull: true, - Variables: []resolve.Variable{ - &resolve.ResolvableObjectVariable{ - Renderer: resolve.NewGraphQLVariableResolveRenderer(&resolve.Object{ - Nullable: true, - Fields: []*resolve.Field{ - { - Name: []byte("__typename"), - Value: &resolve.String{ - Path: []string{"__typename"}, + resolve.SingleWithPath(&resolve.SingleFetch{ + FetchDependencies: resolve.FetchDependencies{ + FetchID: 1, + DependsOnFetchIDs: []int{0}, + }, FetchConfiguration: resolve.FetchConfiguration{ + RequiresEntityBatchFetch: false, + RequiresEntityFetch: true, + Input: `{"method":"POST","url":"http://second.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Entity {__typename name}}}","variables":{"representations":[$$0$$]}}}`, + DataSource: &Source{}, + SetTemplateOutputToNullOnVariableNull: true, + Variables: []resolve.Variable{ + &resolve.ResolvableObjectVariable{ + Renderer: resolve.NewGraphQLVariableResolveRenderer(&resolve.Object{ + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{ + Path: []string{"__typename"}, + }, + OnTypeNames: [][]byte{[]byte("Entity")}, }, - OnTypeNames: [][]byte{[]byte("Entity")}, - }, - { - Name: []byte("id"), - Value: &resolve.Scalar{ - Path: []string{"id"}, + { + Name: []byte("id"), + Value: &resolve.Scalar{ + Path: []string{"id"}, + }, + OnTypeNames: [][]byte{[]byte("Entity")}, }, - OnTypeNames: [][]byte{[]byte("Entity")}, }, - }, - }), + }), + }, }, + PostProcessing: SingleEntityPostProcessingConfiguration, }, - PostProcessing: SingleEntityPostProcessingConfiguration, - }, - DataSourceIdentifier: []byte("graphql_datasource.Source"), - }, "entityOne", resolve.ObjectPath("entityOne")), - resolve.SingleWithPath(&resolve.SingleFetch{ - FetchDependencies: resolve.FetchDependencies{ - FetchID: 2, - DependsOnFetchIDs: []int{1}, - }, FetchConfiguration: resolve.FetchConfiguration{ - RequiresEntityBatchFetch: false, - RequiresEntityFetch: true, - Input: `{"method":"POST","url":"http://third.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Entity {__typename isImportant}}}","variables":{"representations":[$$0$$]}}}`, - DataSource: &Source{}, - SetTemplateOutputToNullOnVariableNull: true, - Variables: []resolve.Variable{ - &resolve.ResolvableObjectVariable{ - Renderer: resolve.NewGraphQLVariableResolveRenderer(&resolve.Object{ - Nullable: true, - Fields: []*resolve.Field{ - { - Name: []byte("__typename"), - Value: &resolve.String{ - Path: []string{"__typename"}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "entityOne", resolve.ObjectPath("entityOne")), + resolve.SingleWithPath(&resolve.SingleFetch{ + FetchDependencies: resolve.FetchDependencies{ + FetchID: 2, + DependsOnFetchIDs: []int{1}, + }, FetchConfiguration: resolve.FetchConfiguration{ + RequiresEntityBatchFetch: false, + RequiresEntityFetch: true, + Input: `{"method":"POST","url":"http://third.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Entity {__typename isImportant}}}","variables":{"representations":[$$0$$]}}}`, + DataSource: &Source{}, + SetTemplateOutputToNullOnVariableNull: true, + Variables: []resolve.Variable{ + &resolve.ResolvableObjectVariable{ + Renderer: resolve.NewGraphQLVariableResolveRenderer(&resolve.Object{ + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{ + Path: []string{"__typename"}, + }, + OnTypeNames: [][]byte{[]byte("Entity")}, }, - OnTypeNames: [][]byte{[]byte("Entity")}, - }, - { - Name: []byte("name"), - Value: &resolve.String{ - Path: []string{"name"}, + { + Name: []byte("name"), + Value: &resolve.String{ + Path: []string{"name"}, + }, + OnTypeNames: [][]byte{[]byte("Entity")}, }, - OnTypeNames: [][]byte{[]byte("Entity")}, }, - }, - }), + }), + }, }, + PostProcessing: SingleEntityPostProcessingConfiguration, }, - PostProcessing: SingleEntityPostProcessingConfiguration, - }, - DataSourceIdentifier: []byte("graphql_datasource.Source"), - }, "entityOne", resolve.ObjectPath("entityOne")), - ), - Data: &resolve.Object{ - Fields: []*resolve.Field{ - { - Name: []byte("entityOne"), - Value: &resolve.Object{ - Path: []string{"entityOne"}, - Nullable: false, - PossibleTypes: map[string]struct{}{ - "Entity": {}, - }, - TypeName: "Entity", - Fields: []*resolve.Field{ - { - Name: []byte("isImportant"), - Value: &resolve.Boolean{ - Path: []string{"isImportant"}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "entityOne", resolve.ObjectPath("entityOne")), + ), + Data: &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("entityOne"), + Value: &resolve.Object{ + Path: []string{"entityOne"}, + Nullable: false, + PossibleTypes: map[string]struct{}{ + "Entity": {}, + }, + TypeName: "Entity", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: keyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, + Fields: []*resolve.Field{ + { + Name: []byte("isImportant"), + Value: &resolve.Boolean{ + Path: []string{"isImportant"}, + }, }, }, }, @@ -16696,10 +17474,24 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, }, - }, + } } - RunWithPermutations( + // idx 0 (first-service, @key "id"): {id} + // idx 1 (second-service, @key "id" + @key "uuid"): {id, uuid} + // idx 2 (third-service, @key "name"): {name} + keyVariants := [][]resolve.KeyField{ + {{Name: "id"}}, // idx 0 first + {{Name: "id"}, {Name: "uuid"}}, // idx 1 first + {{Name: "name"}}, // idx 2 first + } + + variants := make([]plan.Plan, 6) + for i := range variants { + variants[i] = expectedPlanFn(keyVariants[i/2]) + } + + RunWithPermutationsVariants( t, definition, ` @@ -16710,118 +17502,124 @@ func TestGraphQLDataSourceFederation(t *testing.T) { } `, "Query", - expectedPlan, + variants, planConfiguration, WithDefaultCustomPostProcessor(postprocess.DisableResolveInputTemplates(), postprocess.DisableCreateConcreteSingleFetchTypes(), postprocess.DisableOrderSequenceByDependencies(), postprocess.DisableMergeFields()), ) }) t.Run("query last field in a chain third-second-first", func(t *testing.T) { - expectedPlan := &plan.SynchronousResponsePlan{ - Response: &resolve.GraphQLResponse{ - Fetches: resolve.Sequence( - resolve.Single(&resolve.SingleFetch{ - FetchDependencies: resolve.FetchDependencies{ - FetchID: 0, - }, - FetchConfiguration: resolve.FetchConfiguration{ - Input: `{"method":"POST","url":"http://third.service","body":{"query":"{entityThree {__typename uuid}}"}}`, - PostProcessing: DefaultPostProcessingConfiguration, - DataSource: &Source{}, - }, - DataSourceIdentifier: []byte("graphql_datasource.Source"), - }), - resolve.SingleWithPath(&resolve.SingleFetch{ - FetchDependencies: resolve.FetchDependencies{ - FetchID: 1, - DependsOnFetchIDs: []int{0}, - }, FetchConfiguration: resolve.FetchConfiguration{ - RequiresEntityBatchFetch: false, - RequiresEntityFetch: true, - Input: `{"method":"POST","url":"http://second.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Entity {__typename id}}}","variables":{"representations":[$$0$$]}}}`, - DataSource: &Source{}, - SetTemplateOutputToNullOnVariableNull: true, - Variables: []resolve.Variable{ - &resolve.ResolvableObjectVariable{ - Renderer: resolve.NewGraphQLVariableResolveRenderer(&resolve.Object{ - Nullable: true, - Fields: []*resolve.Field{ - { - Name: []byte("__typename"), - Value: &resolve.String{ - Path: []string{"__typename"}, + expectedPlanFn := func(keyFields []resolve.KeyField) plan.Plan { + return &plan.SynchronousResponsePlan{ + Response: &resolve.GraphQLResponse{ + Fetches: resolve.Sequence( + resolve.Single(&resolve.SingleFetch{ + FetchDependencies: resolve.FetchDependencies{ + FetchID: 0, + }, + FetchConfiguration: resolve.FetchConfiguration{ + Input: `{"method":"POST","url":"http://third.service","body":{"query":"{entityThree {__typename uuid}}"}}`, + PostProcessing: DefaultPostProcessingConfiguration, + DataSource: &Source{}, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }), + resolve.SingleWithPath(&resolve.SingleFetch{ + FetchDependencies: resolve.FetchDependencies{ + FetchID: 1, + DependsOnFetchIDs: []int{0}, + }, FetchConfiguration: resolve.FetchConfiguration{ + RequiresEntityBatchFetch: false, + RequiresEntityFetch: true, + Input: `{"method":"POST","url":"http://second.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Entity {__typename id}}}","variables":{"representations":[$$0$$]}}}`, + DataSource: &Source{}, + SetTemplateOutputToNullOnVariableNull: true, + Variables: []resolve.Variable{ + &resolve.ResolvableObjectVariable{ + Renderer: resolve.NewGraphQLVariableResolveRenderer(&resolve.Object{ + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{ + Path: []string{"__typename"}, + }, + OnTypeNames: [][]byte{[]byte("Entity")}, }, - OnTypeNames: [][]byte{[]byte("Entity")}, - }, - { - Name: []byte("uuid"), - Value: &resolve.Scalar{ - Path: []string{"uuid"}, + { + Name: []byte("uuid"), + Value: &resolve.Scalar{ + Path: []string{"uuid"}, + }, + OnTypeNames: [][]byte{[]byte("Entity")}, }, - OnTypeNames: [][]byte{[]byte("Entity")}, }, - }, - }), + }), + }, }, + PostProcessing: SingleEntityPostProcessingConfiguration, }, - PostProcessing: SingleEntityPostProcessingConfiguration, - }, - DataSourceIdentifier: []byte("graphql_datasource.Source"), - }, "entityThree", resolve.ObjectPath("entityThree")), - resolve.SingleWithPath(&resolve.SingleFetch{ - FetchDependencies: resolve.FetchDependencies{ - FetchID: 2, - DependsOnFetchIDs: []int{1}, - }, FetchConfiguration: resolve.FetchConfiguration{ - RequiresEntityBatchFetch: false, - RequiresEntityFetch: true, - Input: `{"method":"POST","url":"http://first.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Entity {__typename isEntity}}}","variables":{"representations":[$$0$$]}}}`, - DataSource: &Source{}, - SetTemplateOutputToNullOnVariableNull: true, - Variables: []resolve.Variable{ - &resolve.ResolvableObjectVariable{ - Renderer: resolve.NewGraphQLVariableResolveRenderer(&resolve.Object{ - Nullable: true, - Fields: []*resolve.Field{ - { - Name: []byte("__typename"), - Value: &resolve.String{ - Path: []string{"__typename"}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "entityThree", resolve.ObjectPath("entityThree")), + resolve.SingleWithPath(&resolve.SingleFetch{ + FetchDependencies: resolve.FetchDependencies{ + FetchID: 2, + DependsOnFetchIDs: []int{1}, + }, FetchConfiguration: resolve.FetchConfiguration{ + RequiresEntityBatchFetch: false, + RequiresEntityFetch: true, + Input: `{"method":"POST","url":"http://first.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Entity {__typename isEntity}}}","variables":{"representations":[$$0$$]}}}`, + DataSource: &Source{}, + SetTemplateOutputToNullOnVariableNull: true, + Variables: []resolve.Variable{ + &resolve.ResolvableObjectVariable{ + Renderer: resolve.NewGraphQLVariableResolveRenderer(&resolve.Object{ + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{ + Path: []string{"__typename"}, + }, + OnTypeNames: [][]byte{[]byte("Entity")}, }, - OnTypeNames: [][]byte{[]byte("Entity")}, - }, - { - Name: []byte("id"), - Value: &resolve.Scalar{ - Path: []string{"id"}, + { + Name: []byte("id"), + Value: &resolve.Scalar{ + Path: []string{"id"}, + }, + OnTypeNames: [][]byte{[]byte("Entity")}, }, - OnTypeNames: [][]byte{[]byte("Entity")}, }, - }, - }), + }), + }, }, + PostProcessing: SingleEntityPostProcessingConfiguration, }, - PostProcessing: SingleEntityPostProcessingConfiguration, - }, - DataSourceIdentifier: []byte("graphql_datasource.Source"), - }, "entityThree", resolve.ObjectPath("entityThree")), - ), - Data: &resolve.Object{ - Fields: []*resolve.Field{ - { - Name: []byte("entityThree"), - Value: &resolve.Object{ - Path: []string{"entityThree"}, - Nullable: false, - PossibleTypes: map[string]struct{}{ - "Entity": {}, - }, - TypeName: "Entity", - Fields: []*resolve.Field{ - { - Name: []byte("isEntity"), - Value: &resolve.Boolean{ - Path: []string{"isEntity"}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "entityThree", resolve.ObjectPath("entityThree")), + ), + Data: &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("entityThree"), + Value: &resolve.Object{ + Path: []string{"entityThree"}, + Nullable: false, + PossibleTypes: map[string]struct{}{ + "Entity": {}, + }, + TypeName: "Entity", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: keyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, + Fields: []*resolve.Field{ + { + Name: []byte("isEntity"), + Value: &resolve.Boolean{ + Path: []string{"isEntity"}, + }, }, }, }, @@ -16829,10 +17627,21 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, }, - }, + } } - RunWithPermutations( + keyVariants := [][]resolve.KeyField{ + {{Name: "id"}}, // idx 0 first + {{Name: "id"}, {Name: "uuid"}}, // idx 1 first + {{Name: "name"}}, // idx 2 first + } + + variants := make([]plan.Plan, 6) + for i := range variants { + variants[i] = expectedPlanFn(keyVariants[i/2]) + } + + RunWithPermutationsVariants( t, definition, ` @@ -16843,7 +17652,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { } `, "Query", - expectedPlan, + variants, planConfiguration, WithDefaultCustomPostProcessor(postprocess.DisableResolveInputTemplates(), postprocess.DisableCreateConcreteSingleFetchTypes(), postprocess.DisableOrderSequenceByDependencies(), postprocess.DisableMergeFields()), ) @@ -17038,6 +17847,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -17176,6 +17989,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -17201,6 +18018,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -17530,21 +18351,8 @@ func TestGraphQLDataSourceFederation(t *testing.T) { } t.Run("query provided external fields and use them as a conditional implicit key", func(t *testing.T) { - RunWithPermutations( - t, - definition, - ` - query User { - user { - hostedImageWithProvides { - image { - cdnUrl - } - } - } - }`, - "User", - &plan.SynchronousResponsePlan{ + expectedPlanFn := func(imageKeyFields []resolve.KeyField) plan.Plan { + return &plan.SynchronousResponsePlan{ Response: &resolve.GraphQLResponse{ Fetches: resolve.Sequence( resolve.Single(&resolve.SingleFetch{ @@ -17641,6 +18449,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImageWithProvides"), @@ -17651,6 +18463,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -17660,6 +18476,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: imageKeyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("cdnUrl"), @@ -17679,29 +18499,63 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, }, - }, - planConfiguration, - WithDefaultPostProcessor(), - ) - }) + } + } - t.Run("do not query external conditional fields - Image.id key field is present in a query", func(t *testing.T) { - RunWithPermutations( + // Image has keys on ds idx 1 (disabled), idx 2 (disabled), idx 3 (resolvable). + // Image gets {id} when idx 3 is the first Image-key-contributing ds in the permutation. + imageID := []resolve.KeyField{{Name: "id"}} + imageEmpty := []resolve.KeyField{} + variants := []plan.Plan{ + expectedPlanFn(imageEmpty), // [0_1_2_3]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [0_1_3_2]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [0_2_1_3]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageEmpty), // [0_2_3_1]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageID), // [0_3_1_2]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageID), // [0_3_2_1]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageEmpty), // [1_0_2_3]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [1_0_3_2]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [1_2_0_3]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [1_2_3_0]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [1_3_0_2]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [1_3_2_0]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [2_0_1_3]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageEmpty), // [2_0_3_1]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageEmpty), // [2_1_0_3]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageEmpty), // [2_1_3_0]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageEmpty), // [2_3_0_1]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageEmpty), // [2_3_1_0]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageID), // [3_0_1_2]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageID), // [3_0_2_1]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageID), // [3_1_0_2]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageID), // [3_1_2_0]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageID), // [3_2_0_1]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageID), // [3_2_1_0]: first Image ds = idx 3 (resolvable) + } + + RunWithPermutationsVariants( t, definition, ` query User { user { - hostedImage { + hostedImageWithProvides { image { - id cdnUrl } } } }`, "User", - &plan.SynchronousResponsePlan{ + variants, + planConfiguration, + WithDefaultPostProcessor(), + ) + }) + + t.Run("do not query external conditional fields - Image.id key field is present in a query", func(t *testing.T) { + expectedPlanFn := func(imageKeyFields []resolve.KeyField) plan.Plan { + return &plan.SynchronousResponsePlan{ Response: &resolve.GraphQLResponse{ Fetches: resolve.Sequence( resolve.Single(&resolve.SingleFetch{ @@ -17798,6 +18652,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -17808,6 +18666,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -17817,6 +18679,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: imageKeyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -17842,7 +18708,54 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, }, - }, + } + } + + imageID := []resolve.KeyField{{Name: "id"}} + imageEmpty := []resolve.KeyField{} + variants := []plan.Plan{ + expectedPlanFn(imageEmpty), // [0_1_2_3]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [0_1_3_2]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [0_2_1_3]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageEmpty), // [0_2_3_1]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageID), // [0_3_1_2]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageID), // [0_3_2_1]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageEmpty), // [1_0_2_3]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [1_0_3_2]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [1_2_0_3]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [1_2_3_0]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [1_3_0_2]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [1_3_2_0]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [2_0_1_3]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageEmpty), // [2_0_3_1]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageEmpty), // [2_1_0_3]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageEmpty), // [2_1_3_0]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageEmpty), // [2_3_0_1]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageEmpty), // [2_3_1_0]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageID), // [3_0_1_2]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageID), // [3_0_2_1]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageID), // [3_1_0_2]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageID), // [3_1_2_0]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageID), // [3_2_0_1]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageID), // [3_2_1_0]: first Image ds = idx 3 (resolvable) + } + + RunWithPermutationsVariants( + t, + definition, + ` + query User { + user { + hostedImage { + image { + id + cdnUrl + } + } + } + }`, + "User", + variants, planConfiguration, WithDefaultPostProcessor(), ) @@ -17862,21 +18775,8 @@ func TestGraphQLDataSourceFederation(t *testing.T) { // TODO: implement same kind of test but with HostedImage type as union and interface // TODO: add test when parent nodes are shareable and should be selected basic on keys to child - RunWithPermutations( - t, - definition, - ` - query User { - user { - hostedImage { - image { - cdnUrl - } - } - } - }`, - "User", - &plan.SynchronousResponsePlan{ + expectedPlanFn := func(imageKeyFields []resolve.KeyField) plan.Plan { + return &plan.SynchronousResponsePlan{ Response: &resolve.GraphQLResponse{ Fetches: resolve.Sequence( resolve.Single(&resolve.SingleFetch{ @@ -17973,6 +18873,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -17983,6 +18887,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -17992,6 +18900,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: imageKeyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("cdnUrl"), @@ -18011,14 +18923,60 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, }, - }, + } + } + + imageID := []resolve.KeyField{{Name: "id"}} + imageEmpty := []resolve.KeyField{} + variants := []plan.Plan{ + expectedPlanFn(imageEmpty), // [0_1_2_3]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [0_1_3_2]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [0_2_1_3]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageEmpty), // [0_2_3_1]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageID), // [0_3_1_2]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageID), // [0_3_2_1]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageEmpty), // [1_0_2_3]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [1_0_3_2]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [1_2_0_3]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [1_2_3_0]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [1_3_0_2]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [1_3_2_0]: first Image ds = idx 1 (disabled) + expectedPlanFn(imageEmpty), // [2_0_1_3]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageEmpty), // [2_0_3_1]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageEmpty), // [2_1_0_3]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageEmpty), // [2_1_3_0]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageEmpty), // [2_3_0_1]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageEmpty), // [2_3_1_0]: first Image ds = idx 2 (disabled) + expectedPlanFn(imageID), // [3_0_1_2]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageID), // [3_0_2_1]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageID), // [3_1_0_2]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageID), // [3_1_2_0]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageID), // [3_2_0_1]: first Image ds = idx 3 (resolvable) + expectedPlanFn(imageID), // [3_2_1_0]: first Image ds = idx 3 (resolvable) + } + + RunWithPermutationsVariants( + t, + definition, + ` + query User { + user { + hostedImage { + image { + cdnUrl + } + } + } + }`, + "User", + variants, planConfiguration, WithDefaultPostProcessor(), ) }) t.Run("it is allowed to query a typename even if other fields are external", func(t *testing.T) { - expectedPlan := func(service string) plan.Plan { + expectedPlan := func(service string, imageKeyFields []resolve.KeyField) plan.Plan { return &plan.SynchronousResponsePlan{ Response: &resolve.GraphQLResponse{ Fetches: resolve.Sequence( @@ -18079,6 +19037,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -18089,6 +19051,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -18098,6 +19064,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: imageKeyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("__typename"), @@ -18121,8 +19091,8 @@ func TestGraphQLDataSourceFederation(t *testing.T) { } } - variant1 := expectedPlan("http://third.service") - variant2 := expectedPlan("http://second.service") + imageID := []resolve.KeyField{{Name: "id"}} + imageEmpty := []resolve.KeyField{} RunWithPermutationsVariants( t, @@ -18139,30 +19109,30 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }`, "User", []plan.Plan{ - variant2, - variant2, - variant1, - variant1, - variant2, - variant1, - variant2, - variant2, - variant2, - variant2, - variant2, - variant2, - variant1, - variant1, - variant1, - variant1, - variant1, - variant1, - variant2, - variant1, - variant2, - variant2, - variant1, - variant1, + expectedPlan("http://second.service", imageEmpty), // [0_1_2_3]: second, idx 1 first Image ds + expectedPlan("http://second.service", imageEmpty), // [0_1_3_2]: second, idx 1 first Image ds + expectedPlan("http://third.service", imageEmpty), // [0_2_1_3]: third, idx 2 first Image ds + expectedPlan("http://third.service", imageEmpty), // [0_2_3_1]: third, idx 2 first Image ds + expectedPlan("http://second.service", imageID), // [0_3_1_2]: second, idx 3 first Image ds + expectedPlan("http://third.service", imageID), // [0_3_2_1]: third, idx 3 first Image ds + expectedPlan("http://second.service", imageEmpty), // [1_0_2_3]: second, idx 1 first Image ds + expectedPlan("http://second.service", imageEmpty), // [1_0_3_2]: second, idx 1 first Image ds + expectedPlan("http://second.service", imageEmpty), // [1_2_0_3]: second, idx 1 first Image ds + expectedPlan("http://second.service", imageEmpty), // [1_2_3_0]: second, idx 1 first Image ds + expectedPlan("http://second.service", imageEmpty), // [1_3_0_2]: second, idx 1 first Image ds + expectedPlan("http://second.service", imageEmpty), // [1_3_2_0]: second, idx 1 first Image ds + expectedPlan("http://third.service", imageEmpty), // [2_0_1_3]: third, idx 2 first Image ds + expectedPlan("http://third.service", imageEmpty), // [2_0_3_1]: third, idx 2 first Image ds + expectedPlan("http://third.service", imageEmpty), // [2_1_0_3]: third, idx 2 first Image ds + expectedPlan("http://third.service", imageEmpty), // [2_1_3_0]: third, idx 2 first Image ds + expectedPlan("http://third.service", imageEmpty), // [2_3_0_1]: third, idx 2 first Image ds + expectedPlan("http://third.service", imageEmpty), // [2_3_1_0]: third, idx 2 first Image ds + expectedPlan("http://second.service", imageID), // [3_0_1_2]: second, idx 3 first Image ds + expectedPlan("http://third.service", imageID), // [3_0_2_1]: third, idx 3 first Image ds + expectedPlan("http://second.service", imageID), // [3_1_0_2]: second, idx 3 first Image ds + expectedPlan("http://second.service", imageID), // [3_1_2_0]: second, idx 3 first Image ds + expectedPlan("http://third.service", imageID), // [3_2_0_1]: third, idx 3 first Image ds + expectedPlan("http://third.service", imageID), // [3_2_1_0]: third, idx 3 first Image ds }, planConfiguration, WithDefaultPostProcessor(), @@ -18589,6 +19559,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -18599,6 +19573,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -18608,6 +19586,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -18738,6 +19720,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -18748,6 +19734,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "HostedImage": {}, }, TypeName: "HostedImage", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("image"), @@ -18757,6 +19747,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("__typename"), @@ -18972,6 +19966,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -19195,21 +20193,8 @@ func TestGraphQLDataSourceFederation(t *testing.T) { } t.Run("run", func(t *testing.T) { - RunWithPermutations( - t, - definition, - ` - query User { - user { - hostedImage { - image { - url - } - } - } - }`, - "User", - &plan.SynchronousResponsePlan{ + expectedPlanFn := func(imageKeyFields []resolve.KeyField) plan.Plan { + return &plan.SynchronousResponsePlan{ Response: &resolve.GraphQLResponse{ Fetches: resolve.Sequence( resolve.Single(&resolve.SingleFetch{ @@ -19306,6 +20291,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("hostedImage"), @@ -19325,6 +20314,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Image": {}, }, TypeName: "Image", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: imageKeyFields, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("url"), @@ -19344,7 +20337,45 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, }, - }, + } + } + + // Image KeyFields depend on which datasource first contributes Image keys: + // idx 0 (first-service): no Image keys + // idx 1 (second-service): Image @key "id" resolvable: false -> empty + // idx 2 (third-service): Image @key "id" resolvable -> {id} + // perm [0_1_2]: first Image ds = idx 1 -> empty + // perm [0_2_1]: first Image ds = idx 2 -> {id} + // perm [1_0_2]: first Image ds = idx 1 -> empty + // perm [1_2_0]: first Image ds = idx 1 -> empty + // perm [2_0_1]: first Image ds = idx 2 -> {id} + // perm [2_1_0]: first Image ds = idx 2 -> {id} + imageID := []resolve.KeyField{{Name: "id"}} + imageEmpty := []resolve.KeyField{} + variants := []plan.Plan{ + expectedPlanFn(imageEmpty), // [0_1_2] + expectedPlanFn(imageID), // [0_2_1] + expectedPlanFn(imageEmpty), // [1_0_2] + expectedPlanFn(imageEmpty), // [1_2_0] + expectedPlanFn(imageID), // [2_0_1] + expectedPlanFn(imageID), // [2_1_0] + } + + RunWithPermutationsVariants( + t, + definition, + ` + query User { + user { + hostedImage { + image { + url + } + } + } + }`, + "User", + variants, planConfiguration, WithDefaultPostProcessor(), ) @@ -19656,6 +20687,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Entity": {}, }, TypeName: "Entity", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("nested"), @@ -19783,6 +20818,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Entity": {}, }, TypeName: "Entity", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("nested"), @@ -20272,6 +21311,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Entity": {}, }, TypeName: "Entity", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("nested"), @@ -20282,6 +21325,12 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "NestedEntity": {}, }, TypeName: "Node", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "NestedEntity": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -20446,6 +21495,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { "Entity": {}, }, TypeName: "Entity", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("nested"), diff --git a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_test.go b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_test.go index 1b984cf897..3f86f221ef 100644 --- a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_test.go +++ b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_test.go @@ -464,12 +464,14 @@ func TestGraphQLDataSource(t *testing.T) { ProvidesData: &resolve.Object{ Nullable: false, Path: []string{}, + HasAliases: true, Fields: []*resolve.Field{ { Name: []byte("droid"), Value: &resolve.Object{ Nullable: true, Path: []string{"droid"}, + HasAliases: true, Fields: []*resolve.Field{ { Name: []byte("name"), @@ -480,6 +482,7 @@ func TestGraphQLDataSource(t *testing.T) { }, { Name: []byte("aliased"), + OriginalName: []byte("name"), Value: &resolve.Scalar{ Path: []string{"aliased"}, Nullable: false, @@ -4378,6 +4381,10 @@ func TestGraphQLDataSource(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -4417,6 +4424,10 @@ func TestGraphQLDataSource(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -4441,6 +4452,10 @@ func TestGraphQLDataSource(t *testing.T) { "Product": {}, }, TypeName: "Product", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "upc"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("name"), @@ -4480,6 +4495,10 @@ func TestGraphQLDataSource(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -4964,6 +4983,10 @@ func TestGraphQLDataSource(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -5348,6 +5371,10 @@ func TestGraphQLDataSource(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Path: []string{"user"}, Nullable: true, Fields: []*resolve.Field{ @@ -5713,6 +5740,10 @@ func TestGraphQLDataSource(t *testing.T) { "ServiceOneType": {}, }, TypeName: "ServiceOneType", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("serviceTwoFieldOne"), @@ -6423,6 +6454,10 @@ func TestGraphQLDataSource(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -6461,6 +6496,10 @@ func TestGraphQLDataSource(t *testing.T) { "Review": {}, }, TypeName: "Review", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("body"), @@ -6559,6 +6598,12 @@ func TestGraphQLDataSource(t *testing.T) { "User": {}, }, TypeName: "Identity", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("id"), @@ -6599,6 +6644,10 @@ func TestGraphQLDataSource(t *testing.T) { "Review": {}, }, TypeName: "Review", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("body"), @@ -6702,6 +6751,12 @@ func TestGraphQLDataSource(t *testing.T) { "User": {}, }, TypeName: "Identity", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{ + "User": {KeyFields: []resolve.KeyField{{Name: "id"}}, ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("reviews"), @@ -6714,6 +6769,10 @@ func TestGraphQLDataSource(t *testing.T) { "Review": {}, }, TypeName: "Review", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("body"), @@ -6984,6 +7043,10 @@ func TestGraphQLDataSource(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("username"), @@ -7167,6 +7230,10 @@ func TestGraphQLDataSource(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("username"), @@ -7354,6 +7421,10 @@ func TestGraphQLDataSource(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("username"), @@ -7550,6 +7621,10 @@ func TestGraphQLDataSource(t *testing.T) { "User": {}, }, TypeName: "User", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{{Name: "id"}}, + ByTypeName: map[string]*resolve.ObjectCacheAnalytics{}, + }, Fields: []*resolve.Field{ { Name: []byte("username"), From 7801a45342fe6b0d1f814f957e2cf43459796caf Mon Sep 17 00:00:00 2001 From: Jens Neuse Date: Wed, 4 Mar 2026 21:40:24 +0100 Subject: [PATCH 05/11] refactor: return individual per-fetch metrics instead of aggregated Replace SubgraphRequestMetrics (aggregated by subgraph) with SubgraphFetchMetrics (one entry per fetch). This gives the schema registry raw per-fetch data needed for percentile computation (p50/p95/p99). Co-Authored-By: Claude Opus 4.6 --- v2/pkg/engine/resolve/cache_analytics.go | 81 ++++------- .../engine/resolve/subgraph_metrics_test.go | 134 ++++-------------- 2 files changed, 56 insertions(+), 159 deletions(-) diff --git a/v2/pkg/engine/resolve/cache_analytics.go b/v2/pkg/engine/resolve/cache_analytics.go index 6866ac43e2..89194fe6c0 100644 --- a/v2/pkg/engine/resolve/cache_analytics.go +++ b/v2/pkg/engine/resolve/cache_analytics.go @@ -827,67 +827,36 @@ func (s *CacheAnalyticsSnapshot) ShadowFreshnessRateByEntityType() map[string]fl return result } -// SubgraphRequestMetrics holds per-subgraph aggregate metrics for a single request. -// Designed for export to external SLO systems (e.g., schema registry). -type SubgraphRequestMetrics struct { - SubgraphName string - RequestCount int // number of fetches to this subgraph - ErrorCount int // number of errors from this subgraph - TotalDurationMs int64 // sum of fetch durations - MaxDurationMs int64 // max single-fetch duration - TotalResponseBytes int64 // sum of response body sizes -} - -// SubgraphMetrics returns per-subgraph aggregate metrics for this request. -// Only considers actual subgraph fetches (not cache hits). -// Returns nil if there are no subgraph fetches or errors. -func (s *CacheAnalyticsSnapshot) SubgraphMetrics() []SubgraphRequestMetrics { - // Collect metrics by subgraph name, preserving insertion order - type entry struct { - metrics SubgraphRequestMetrics - index int - } - byName := make(map[string]*entry) - var order []string - +// SubgraphFetchMetrics holds metrics for a single subgraph fetch. +// Designed for export to external SLO systems (e.g., schema registry) +// where per-fetch granularity is needed for percentile computation. +type SubgraphFetchMetrics struct { + SubgraphName string + EntityType string + DurationMs int64 + HTTPStatusCode int + ResponseBytes int + IsEntityFetch bool +} + +// SubgraphFetches returns one entry per actual subgraph fetch for this request. +// Cache hits (L1/L2) are excluded. Returns nil if there are no subgraph fetches. +func (s *CacheAnalyticsSnapshot) SubgraphFetches() []SubgraphFetchMetrics { + var result []SubgraphFetchMetrics for _, ft := range s.FetchTimings { if ft.Source != FieldSourceSubgraph { continue } - e, ok := byName[ft.DataSource] - if !ok { - e = &entry{metrics: SubgraphRequestMetrics{SubgraphName: ft.DataSource}, index: len(order)} - byName[ft.DataSource] = e - order = append(order, ft.DataSource) - } - e.metrics.RequestCount++ - e.metrics.TotalDurationMs += ft.DurationMs - if ft.DurationMs > e.metrics.MaxDurationMs { - e.metrics.MaxDurationMs = ft.DurationMs - } - e.metrics.TotalResponseBytes += int64(ft.ResponseBytes) + result = append(result, SubgraphFetchMetrics{ + SubgraphName: ft.DataSource, + EntityType: ft.EntityType, + DurationMs: ft.DurationMs, + HTTPStatusCode: ft.HTTPStatusCode, + ResponseBytes: ft.ResponseBytes, + IsEntityFetch: ft.IsEntityFetch, + }) } - - for _, ev := range s.ErrorEvents { - e, ok := byName[ev.DataSource] - if !ok { - e = &entry{metrics: SubgraphRequestMetrics{SubgraphName: ev.DataSource}, index: len(order)} - byName[ev.DataSource] = e - order = append(order, ev.DataSource) - } - e.metrics.ErrorCount++ - } - - if len(order) == 0 { - return nil - } - - results := make([]SubgraphRequestMetrics, len(order)) - for _, name := range order { - e := byName[name] - results[e.index] = e.metrics - } - return results + return result } // computeCacheAgeMs computes cache age in milliseconds from remaining TTL and original TTL. diff --git a/v2/pkg/engine/resolve/subgraph_metrics_test.go b/v2/pkg/engine/resolve/subgraph_metrics_test.go index 90d71bf519..d1aa9daa07 100644 --- a/v2/pkg/engine/resolve/subgraph_metrics_test.go +++ b/v2/pkg/engine/resolve/subgraph_metrics_test.go @@ -6,134 +6,62 @@ import ( "github.com/stretchr/testify/assert" ) -func TestCacheAnalyticsSnapshot_SubgraphMetrics(t *testing.T) { - t.Run("returns nil when no subgraph fetches or errors", func(t *testing.T) { +func TestCacheAnalyticsSnapshot_SubgraphFetches(t *testing.T) { + t.Run("returns nil when no subgraph fetches", func(t *testing.T) { snap := CacheAnalyticsSnapshot{ FetchTimings: []FetchTimingEvent{ - {DataSource: "accounts", DurationMs: 10, Source: FieldSourceL1}, // cache hit, not subgraph - {DataSource: "accounts", DurationMs: 5, Source: FieldSourceL2}, // cache hit, not subgraph + {DataSource: "accounts", DurationMs: 10, Source: FieldSourceL1}, // L1 cache hit + {DataSource: "accounts", DurationMs: 5, Source: FieldSourceL2}, // L2 cache hit }, } - assert.Equal(t, []SubgraphRequestMetrics(nil), snap.SubgraphMetrics()) + assert.Equal(t, []SubgraphFetchMetrics(nil), snap.SubgraphFetches()) }) - t.Run("single subgraph with one fetch", func(t *testing.T) { + t.Run("returns one entry per subgraph fetch", func(t *testing.T) { snap := CacheAnalyticsSnapshot{ FetchTimings: []FetchTimingEvent{ - {DataSource: "accounts", DurationMs: 42, Source: FieldSourceSubgraph, ResponseBytes: 256, HTTPStatusCode: 200}, + {DataSource: "accounts", EntityType: "User", DurationMs: 42, Source: FieldSourceSubgraph, ResponseBytes: 256, HTTPStatusCode: 200, IsEntityFetch: true}, + {DataSource: "products", EntityType: "", DurationMs: 80, Source: FieldSourceSubgraph, ResponseBytes: 500, HTTPStatusCode: 200, IsEntityFetch: false}, + {DataSource: "accounts", EntityType: "User", DurationMs: 15, Source: FieldSourceSubgraph, ResponseBytes: 90, HTTPStatusCode: 200, IsEntityFetch: true}, }, } - result := snap.SubgraphMetrics() - assert.Equal(t, 1, len(result), "should have exactly 1 subgraph") - assert.Equal(t, SubgraphRequestMetrics{ - SubgraphName: "accounts", - RequestCount: 1, - ErrorCount: 0, - TotalDurationMs: 42, - MaxDurationMs: 42, - TotalResponseBytes: 256, - }, result[0]) + result := snap.SubgraphFetches() + assert.Equal(t, []SubgraphFetchMetrics{ + {SubgraphName: "accounts", EntityType: "User", DurationMs: 42, HTTPStatusCode: 200, ResponseBytes: 256, IsEntityFetch: true}, + {SubgraphName: "products", EntityType: "", DurationMs: 80, HTTPStatusCode: 200, ResponseBytes: 500, IsEntityFetch: false}, + {SubgraphName: "accounts", EntityType: "User", DurationMs: 15, HTTPStatusCode: 200, ResponseBytes: 90, IsEntityFetch: true}, + }, result) }) - t.Run("single subgraph with multiple fetches picks max duration", func(t *testing.T) { + t.Run("excludes cache hits", func(t *testing.T) { snap := CacheAnalyticsSnapshot{ FetchTimings: []FetchTimingEvent{ - {DataSource: "accounts", DurationMs: 10, Source: FieldSourceSubgraph, ResponseBytes: 100, HTTPStatusCode: 200}, - {DataSource: "accounts", DurationMs: 50, Source: FieldSourceSubgraph, ResponseBytes: 200, HTTPStatusCode: 200}, - {DataSource: "accounts", DurationMs: 30, Source: FieldSourceSubgraph, ResponseBytes: 150, HTTPStatusCode: 200}, + {DataSource: "accounts", DurationMs: 0, Source: FieldSourceL1}, // L1 cache hit + {DataSource: "accounts", DurationMs: 5, Source: FieldSourceL2}, // L2 cache hit + {DataSource: "accounts", EntityType: "User", DurationMs: 30, Source: FieldSourceSubgraph, HTTPStatusCode: 200, ResponseBytes: 128}, // actual fetch }, } - result := snap.SubgraphMetrics() - assert.Equal(t, 1, len(result), "should have exactly 1 subgraph") - assert.Equal(t, SubgraphRequestMetrics{ - SubgraphName: "accounts", - RequestCount: 3, - ErrorCount: 0, - TotalDurationMs: 90, - MaxDurationMs: 50, - TotalResponseBytes: 450, - }, result[0]) - }) - - t.Run("multiple subgraphs with mixed success and errors", func(t *testing.T) { - snap := CacheAnalyticsSnapshot{ - FetchTimings: []FetchTimingEvent{ - {DataSource: "accounts", DurationMs: 20, Source: FieldSourceSubgraph, ResponseBytes: 100}, - {DataSource: "products", DurationMs: 80, Source: FieldSourceSubgraph, ResponseBytes: 500}, - {DataSource: "accounts", DurationMs: 15, Source: FieldSourceSubgraph, ResponseBytes: 90}, - {DataSource: "products", DurationMs: 120, Source: FieldSourceSubgraph, ResponseBytes: 600}, - }, - ErrorEvents: []SubgraphErrorEvent{ - {DataSource: "products", EntityType: "Product", Message: "timeout", Code: "TIMEOUT"}, - {DataSource: "reviews", EntityType: "Review", Message: "not found", Code: "NOT_FOUND"}, - }, - } - result := snap.SubgraphMetrics() - assert.Equal(t, 3, len(result), "should have exactly 3 subgraphs") - - // accounts: 2 fetches, 0 errors - assert.Equal(t, SubgraphRequestMetrics{ - SubgraphName: "accounts", - RequestCount: 2, - ErrorCount: 0, - TotalDurationMs: 35, - MaxDurationMs: 20, - TotalResponseBytes: 190, - }, result[0]) - - // products: 2 fetches, 1 error - assert.Equal(t, SubgraphRequestMetrics{ - SubgraphName: "products", - RequestCount: 2, - ErrorCount: 1, - TotalDurationMs: 200, - MaxDurationMs: 120, - TotalResponseBytes: 1100, - }, result[1]) - - // reviews: 0 fetches, 1 error (error-only subgraph) - assert.Equal(t, SubgraphRequestMetrics{ - SubgraphName: "reviews", - RequestCount: 0, - ErrorCount: 1, - TotalDurationMs: 0, - MaxDurationMs: 0, - }, result[2]) - }) - - t.Run("cache hits are excluded from subgraph metrics", func(t *testing.T) { - snap := CacheAnalyticsSnapshot{ - FetchTimings: []FetchTimingEvent{ - {DataSource: "accounts", DurationMs: 0, Source: FieldSourceL1}, // L1 cache hit - {DataSource: "accounts", DurationMs: 5, Source: FieldSourceL2}, // L2 cache hit - {DataSource: "accounts", DurationMs: 30, Source: FieldSourceSubgraph}, // actual fetch - }, - } - result := snap.SubgraphMetrics() - assert.Equal(t, 1, len(result), "should have exactly 1 subgraph") - assert.Equal(t, 1, result[0].RequestCount, "should count only the subgraph fetch") - assert.Equal(t, int64(30), result[0].TotalDurationMs, "should only sum subgraph fetch duration") + result := snap.SubgraphFetches() + assert.Equal(t, []SubgraphFetchMetrics{ + {SubgraphName: "accounts", EntityType: "User", DurationMs: 30, HTTPStatusCode: 200, ResponseBytes: 128}, + }, result) }) t.Run("empty snapshot returns nil", func(t *testing.T) { snap := CacheAnalyticsSnapshot{} - assert.Equal(t, []SubgraphRequestMetrics(nil), snap.SubgraphMetrics()) + assert.Equal(t, []SubgraphFetchMetrics(nil), snap.SubgraphFetches()) }) - t.Run("errors-only subgraph has zero request count", func(t *testing.T) { + t.Run("preserves error status codes", func(t *testing.T) { snap := CacheAnalyticsSnapshot{ - ErrorEvents: []SubgraphErrorEvent{ - {DataSource: "accounts", Message: "connection refused"}, - {DataSource: "accounts", Message: "connection refused"}, + FetchTimings: []FetchTimingEvent{ + {DataSource: "accounts", DurationMs: 100, Source: FieldSourceSubgraph, HTTPStatusCode: 500, ResponseBytes: 50}, + {DataSource: "accounts", DurationMs: 20, Source: FieldSourceSubgraph, HTTPStatusCode: 200, ResponseBytes: 256}, }, } - result := snap.SubgraphMetrics() - assert.Equal(t, 1, len(result), "should have exactly 1 subgraph") - assert.Equal(t, SubgraphRequestMetrics{ - SubgraphName: "accounts", - RequestCount: 0, - ErrorCount: 2, - }, result[0]) + result := snap.SubgraphFetches() + assert.Equal(t, 500, result[0].HTTPStatusCode) + assert.Equal(t, 200, result[1].HTTPStatusCode) }) } From 342b4ade221583b0acf8913f69160e1d40e5de40 Mon Sep 17 00:00:00 2001 From: Jens Neuse Date: Wed, 4 Mar 2026 21:45:02 +0100 Subject: [PATCH 06/11] refactor: remove redundant SubgraphFetchMetrics type and method The SubgraphFetches() method was a trivial projection of FetchTimingEvent fields. Callers can filter FetchTimings directly since each entry already represents one individual fetch. Co-Authored-By: Claude Opus 4.6 --- v2/pkg/engine/resolve/cache_analytics.go | 32 ---------- .../engine/resolve/subgraph_metrics_test.go | 59 ------------------- 2 files changed, 91 deletions(-) diff --git a/v2/pkg/engine/resolve/cache_analytics.go b/v2/pkg/engine/resolve/cache_analytics.go index 89194fe6c0..ccf0e8171d 100644 --- a/v2/pkg/engine/resolve/cache_analytics.go +++ b/v2/pkg/engine/resolve/cache_analytics.go @@ -827,38 +827,6 @@ func (s *CacheAnalyticsSnapshot) ShadowFreshnessRateByEntityType() map[string]fl return result } -// SubgraphFetchMetrics holds metrics for a single subgraph fetch. -// Designed for export to external SLO systems (e.g., schema registry) -// where per-fetch granularity is needed for percentile computation. -type SubgraphFetchMetrics struct { - SubgraphName string - EntityType string - DurationMs int64 - HTTPStatusCode int - ResponseBytes int - IsEntityFetch bool -} - -// SubgraphFetches returns one entry per actual subgraph fetch for this request. -// Cache hits (L1/L2) are excluded. Returns nil if there are no subgraph fetches. -func (s *CacheAnalyticsSnapshot) SubgraphFetches() []SubgraphFetchMetrics { - var result []SubgraphFetchMetrics - for _, ft := range s.FetchTimings { - if ft.Source != FieldSourceSubgraph { - continue - } - result = append(result, SubgraphFetchMetrics{ - SubgraphName: ft.DataSource, - EntityType: ft.EntityType, - DurationMs: ft.DurationMs, - HTTPStatusCode: ft.HTTPStatusCode, - ResponseBytes: ft.ResponseBytes, - IsEntityFetch: ft.IsEntityFetch, - }) - } - return result -} - // computeCacheAgeMs computes cache age in milliseconds from remaining TTL and original TTL. // Returns 0 if either value is zero or if the computed age would be negative. func computeCacheAgeMs(remainingTTL, originalTTL time.Duration) int64 { diff --git a/v2/pkg/engine/resolve/subgraph_metrics_test.go b/v2/pkg/engine/resolve/subgraph_metrics_test.go index d1aa9daa07..922c7783b0 100644 --- a/v2/pkg/engine/resolve/subgraph_metrics_test.go +++ b/v2/pkg/engine/resolve/subgraph_metrics_test.go @@ -6,65 +6,6 @@ import ( "github.com/stretchr/testify/assert" ) -func TestCacheAnalyticsSnapshot_SubgraphFetches(t *testing.T) { - t.Run("returns nil when no subgraph fetches", func(t *testing.T) { - snap := CacheAnalyticsSnapshot{ - FetchTimings: []FetchTimingEvent{ - {DataSource: "accounts", DurationMs: 10, Source: FieldSourceL1}, // L1 cache hit - {DataSource: "accounts", DurationMs: 5, Source: FieldSourceL2}, // L2 cache hit - }, - } - assert.Equal(t, []SubgraphFetchMetrics(nil), snap.SubgraphFetches()) - }) - - t.Run("returns one entry per subgraph fetch", func(t *testing.T) { - snap := CacheAnalyticsSnapshot{ - FetchTimings: []FetchTimingEvent{ - {DataSource: "accounts", EntityType: "User", DurationMs: 42, Source: FieldSourceSubgraph, ResponseBytes: 256, HTTPStatusCode: 200, IsEntityFetch: true}, - {DataSource: "products", EntityType: "", DurationMs: 80, Source: FieldSourceSubgraph, ResponseBytes: 500, HTTPStatusCode: 200, IsEntityFetch: false}, - {DataSource: "accounts", EntityType: "User", DurationMs: 15, Source: FieldSourceSubgraph, ResponseBytes: 90, HTTPStatusCode: 200, IsEntityFetch: true}, - }, - } - result := snap.SubgraphFetches() - assert.Equal(t, []SubgraphFetchMetrics{ - {SubgraphName: "accounts", EntityType: "User", DurationMs: 42, HTTPStatusCode: 200, ResponseBytes: 256, IsEntityFetch: true}, - {SubgraphName: "products", EntityType: "", DurationMs: 80, HTTPStatusCode: 200, ResponseBytes: 500, IsEntityFetch: false}, - {SubgraphName: "accounts", EntityType: "User", DurationMs: 15, HTTPStatusCode: 200, ResponseBytes: 90, IsEntityFetch: true}, - }, result) - }) - - t.Run("excludes cache hits", func(t *testing.T) { - snap := CacheAnalyticsSnapshot{ - FetchTimings: []FetchTimingEvent{ - {DataSource: "accounts", DurationMs: 0, Source: FieldSourceL1}, // L1 cache hit - {DataSource: "accounts", DurationMs: 5, Source: FieldSourceL2}, // L2 cache hit - {DataSource: "accounts", EntityType: "User", DurationMs: 30, Source: FieldSourceSubgraph, HTTPStatusCode: 200, ResponseBytes: 128}, // actual fetch - }, - } - result := snap.SubgraphFetches() - assert.Equal(t, []SubgraphFetchMetrics{ - {SubgraphName: "accounts", EntityType: "User", DurationMs: 30, HTTPStatusCode: 200, ResponseBytes: 128}, - }, result) - }) - - t.Run("empty snapshot returns nil", func(t *testing.T) { - snap := CacheAnalyticsSnapshot{} - assert.Equal(t, []SubgraphFetchMetrics(nil), snap.SubgraphFetches()) - }) - - t.Run("preserves error status codes", func(t *testing.T) { - snap := CacheAnalyticsSnapshot{ - FetchTimings: []FetchTimingEvent{ - {DataSource: "accounts", DurationMs: 100, Source: FieldSourceSubgraph, HTTPStatusCode: 500, ResponseBytes: 50}, - {DataSource: "accounts", DurationMs: 20, Source: FieldSourceSubgraph, HTTPStatusCode: 200, ResponseBytes: 256}, - }, - } - result := snap.SubgraphFetches() - assert.Equal(t, 500, result[0].HTTPStatusCode) - assert.Equal(t, 200, result[1].HTTPStatusCode) - }) -} - func TestFetchTimingEvent_NewFields(t *testing.T) { t.Run("subgraph fetch carries HTTP status and response size", func(t *testing.T) { event := FetchTimingEvent{ From 4a361460f7eb3d1b6a0876da59374c77a1f58208 Mon Sep 17 00:00:00 2001 From: Jens Neuse Date: Wed, 4 Mar 2026 21:46:31 +0100 Subject: [PATCH 07/11] chore: remove trivial FetchTimingEvent field tests These tests only verified struct field initialization, not behavior. Co-Authored-By: Claude Opus 4.6 --- .../engine/resolve/subgraph_metrics_test.go | 34 ------------------- 1 file changed, 34 deletions(-) delete mode 100644 v2/pkg/engine/resolve/subgraph_metrics_test.go diff --git a/v2/pkg/engine/resolve/subgraph_metrics_test.go b/v2/pkg/engine/resolve/subgraph_metrics_test.go deleted file mode 100644 index 922c7783b0..0000000000 --- a/v2/pkg/engine/resolve/subgraph_metrics_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package resolve - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestFetchTimingEvent_NewFields(t *testing.T) { - t.Run("subgraph fetch carries HTTP status and response size", func(t *testing.T) { - event := FetchTimingEvent{ - DataSource: "accounts", - DurationMs: 42, - Source: FieldSourceSubgraph, - HTTPStatusCode: 200, - ResponseBytes: 1024, - TTFBMs: 0, // not yet instrumented - } - assert.Equal(t, 200, event.HTTPStatusCode) - assert.Equal(t, 1024, event.ResponseBytes) - assert.Equal(t, int64(0), event.TTFBMs) - }) - - t.Run("cache hit has zero values for HTTP fields", func(t *testing.T) { - event := FetchTimingEvent{ - DataSource: "accounts", - DurationMs: 1, - Source: FieldSourceL1, - } - assert.Equal(t, 0, event.HTTPStatusCode, "cache hits should have zero status code") - assert.Equal(t, 0, event.ResponseBytes, "cache hits should have zero response bytes") - assert.Equal(t, int64(0), event.TTFBMs, "cache hits should have zero TTFB") - }) -} From e40c1c0621cd45bfb67aeeeb3adff48f8d30cf6a Mon Sep 17 00:00:00 2001 From: Jens Neuse Date: Wed, 4 Mar 2026 22:08:21 +0100 Subject: [PATCH 08/11] feat: add e2e tests for FetchTimingEvent enriched fields Add two e2e tests in TestCacheAnalyticsE2E verifying: - Subgraph fetches record HTTPStatusCode (200) and ResponseBytes - L2 cache hits have zero HTTPStatusCode and ResponseBytes Also fix a bug where resolveSingle (sequential fetch path) did not merge FetchTimingEvent data into the analytics collector. Only resolveParallel had the merge logic. Added mergeResultAnalytics helper called from all three fetch type cases in resolveSingle. Co-Authored-By: Claude Opus 4.6 --- execution/engine/federation_caching_test.go | 115 ++++++++++++++++++++ v2/pkg/engine/resolve/loader.go | 18 +++ 2 files changed, 133 insertions(+) diff --git a/execution/engine/federation_caching_test.go b/execution/engine/federation_caching_test.go index 57ea5572f5..b19bd213b1 100644 --- a/execution/engine/federation_caching_test.go +++ b/execution/engine/federation_caching_test.go @@ -5381,6 +5381,7 @@ func normalizeSnapshot(snap resolve.CacheAnalyticsSnapshot) resolve.CacheAnalyti } // Zero out non-deterministic FetchTimings (DurationMs varies between runs) + // Use normalizeFetchTimings() when you need to assert FetchTimings fields. snap.FetchTimings = nil // Normalize empty slices to nil for consistent comparison @@ -5416,6 +5417,24 @@ func normalizeSnapshot(snap resolve.CacheAnalyticsSnapshot) resolve.CacheAnalyti return snap } +// normalizeFetchTimings sorts FetchTimings deterministically and zeros DurationMs +// (the only non-deterministic field). Unlike normalizeSnapshot, this preserves +// all other fields (HTTPStatusCode, ResponseBytes, etc.) for assertion. +func normalizeFetchTimings(timings []resolve.FetchTimingEvent) []resolve.FetchTimingEvent { + sorted := make([]resolve.FetchTimingEvent, len(timings)) + copy(sorted, timings) + for i := range sorted { + sorted[i].DurationMs = 0 + } + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].DataSource != sorted[j].DataSource { + return sorted[i].DataSource < sorted[j].DataSource + } + return sorted[i].Source < sorted[j].Source + }) + return sorted +} + func TestCacheAnalyticsE2E(t *testing.T) { // Common cache key constants used across subtests const ( @@ -5900,6 +5919,102 @@ func TestCacheAnalyticsE2E(t *testing.T) { }) assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) }) + + t.Run("subgraph fetch records HTTPStatusCode and ResponseBytes", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First request — all L2 misses, subgraph fetches happen + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + snap := parseCacheAnalytics(t, headers) + + // Filter to subgraph fetch events only (exclude L2 read events) + var subgraphTimings []resolve.FetchTimingEvent + for _, ft := range snap.FetchTimings { + if ft.Source == resolve.FieldSourceSubgraph { + subgraphTimings = append(subgraphTimings, ft) + } + } + timings := normalizeFetchTimings(subgraphTimings) + + assert.Equal(t, 3, len(timings), "should have exactly 3 fetch timing events (one per subgraph)") + for i, ft := range timings { + assert.Equal(t, resolve.FieldSourceSubgraph, ft.Source, "entry %d should be a subgraph fetch", i) + assert.Equal(t, 200, ft.HTTPStatusCode, "entry %d should have HTTP 200", i) + assert.Equal(t, int64(0), ft.TTFBMs, "entry %d TTFB not yet instrumented", i) + } + + // Sorted by DataSource: accounts, products, reviews + assert.Equal(t, dsAccounts, timings[0].DataSource) + assert.Equal(t, "User", timings[0].EntityType) + assert.Equal(t, true, timings[0].IsEntityFetch) + + assert.Equal(t, dsProducts, timings[1].DataSource) + assert.Equal(t, "Query", timings[1].EntityType) + assert.Equal(t, false, timings[1].IsEntityFetch) + + assert.Equal(t, dsReviews, timings[2].DataSource) + assert.Equal(t, "Product", timings[2].EntityType) + assert.Equal(t, true, timings[2].IsEntityFetch) + + // ResponseBytes = full GraphQL response body from each subgraph + assert.Equal(t, 62, timings[0].ResponseBytes, "accounts subgraph response size") + assert.Equal(t, 136, timings[1].ResponseBytes, "products subgraph response size") + assert.Equal(t, 376, timings[2].ResponseBytes, "reviews subgraph response size") + }) + + t.Run("cache hit has zero HTTPStatusCode and ResponseBytes", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First request — populates L2 cache + resp, _ := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + // Second request — all L2 hits + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + snap := parseCacheAnalytics(t, headers) + timings := normalizeFetchTimings(snap.FetchTimings) + + // All entries should be L2 cache hits with zero HTTP fields + for i, ft := range timings { + assert.Equal(t, resolve.FieldSourceL2, ft.Source, "entry %d should be an L2 cache hit", i) + assert.Equal(t, 0, ft.HTTPStatusCode, "entry %d cache hit should have zero HTTPStatusCode", i) + assert.Equal(t, 0, ft.ResponseBytes, "entry %d cache hit should have zero ResponseBytes", i) + } + }) } func TestShadowCacheE2E(t *testing.T) { diff --git a/v2/pkg/engine/resolve/loader.go b/v2/pkg/engine/resolve/loader.go index f10f423de0..563188cd77 100644 --- a/v2/pkg/engine/resolve/loader.go +++ b/v2/pkg/engine/resolve/loader.go @@ -444,6 +444,7 @@ func (l *Loader) resolveSingle(item *FetchItem) error { return err } } + l.mergeResultAnalytics(res) err = l.mergeResult(item, res, items) l.callOnFinished(res) return err @@ -460,6 +461,7 @@ func (l *Loader) resolveSingle(item *FetchItem) error { return errors.WithStack(err) } } + l.mergeResultAnalytics(res) err = l.mergeResult(item, res, items) l.callOnFinished(res) return err @@ -475,6 +477,7 @@ func (l *Loader) resolveSingle(item *FetchItem) error { return errors.WithStack(err) } } + l.mergeResultAnalytics(res) err = l.mergeResult(item, res, items) l.callOnFinished(res) return err @@ -483,6 +486,21 @@ func (l *Loader) resolveSingle(item *FetchItem) error { } } +// mergeResultAnalytics merges analytics events accumulated on a result into the collector. +// In resolveParallel, this happens in bulk after all goroutines complete. +// In resolveSingle, we must call this per-result since there's no bulk merge phase. +func (l *Loader) mergeResultAnalytics(res *result) { + if !l.ctx.cacheAnalyticsEnabled() { + return + } + if len(res.l2FetchTimings) > 0 { + l.ctx.cacheAnalytics.MergeL2FetchTimings(res.l2FetchTimings) + } + if len(res.l2ErrorEvents) > 0 { + l.ctx.cacheAnalytics.MergeL2Errors(res.l2ErrorEvents) + } +} + func (l *Loader) callOnFinished(res *result) { if l.ctx.LoaderHooks != nil && res.loaderHookContext != nil { l.ctx.LoaderHooks.OnFinished(res.loaderHookContext, res.ds, newResponseInfo(res, l.ctx.subgraphErrors)) From a4ab567467671de3ef981917fbcc490aae974c95 Mon Sep 17 00:00:00 2001 From: Jens Neuse Date: Wed, 4 Mar 2026 22:41:02 +0100 Subject: [PATCH 09/11] refactor: split federation_caching_test.go into themed files Split the 7,234-line test file into 5 focused files for maintainability: - federation_caching_helpers_test.go: shared infrastructure - federation_caching_test.go: core L2 foundation tests - federation_caching_l1_test.go: L1 cache behavior tests - federation_caching_l2_test.go: L2/combined/error tests - federation_caching_analytics_test.go: analytics, shadow, mutation, alias tests Also use complete FetchTimingEvent struct assertions in the new e2e subtests. Co-Authored-By: Claude Opus 4.6 --- .../federation_caching_analytics_test.go | 1788 ++++++ .../engine/federation_caching_helpers_test.go | 866 +++ .../engine/federation_caching_l1_test.go | 1061 ++++ .../engine/federation_caching_l2_test.go | 1124 ++++ execution/engine/federation_caching_test.go | 4794 ----------------- 5 files changed, 4839 insertions(+), 4794 deletions(-) create mode 100644 execution/engine/federation_caching_analytics_test.go create mode 100644 execution/engine/federation_caching_helpers_test.go create mode 100644 execution/engine/federation_caching_l1_test.go create mode 100644 execution/engine/federation_caching_l2_test.go diff --git a/execution/engine/federation_caching_analytics_test.go b/execution/engine/federation_caching_analytics_test.go new file mode 100644 index 0000000000..e8272ec8b1 --- /dev/null +++ b/execution/engine/federation_caching_analytics_test.go @@ -0,0 +1,1788 @@ +package engine_test + +import ( + "context" + "net/http" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + accounts "github.com/wundergraph/graphql-go-tools/execution/federationtesting/accounts/graph" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +func TestCacheAnalyticsE2E(t *testing.T) { + // Common cache key constants used across subtests + const ( + keyProductTop1 = `{"__typename":"Product","key":{"upc":"top-1"}}` + keyProductTop2 = `{"__typename":"Product","key":{"upc":"top-2"}}` + keyTopProducts = `{"__typename":"Query","field":"topProducts"}` + keyUser1234 = `{"__typename":"User","key":{"id":"1234"}}` + keyMe = `{"__typename":"Query","field":"me"}` + dsAccounts = "accounts" + dsProducts = "products" + dsReviews = "reviews" + ) + + // Field hash constants — xxhash of the rendered scalar field values. + // These are deterministic because xxhash is seeded identically each time. + const ( + hashProductNameTrilby uint64 = 1032923585965781586 // xxhash("Trilby") + hashProductNameFedora uint64 = 2432227032303632641 // xxhash("Fedora") + hashUserUsernameMe uint64 = 4957449860898447395 // xxhash("Me") + ) + + // Entity key constants for field hash assertions + const ( + entityKeyProductTop1 = `{"upc":"top-1"}` + entityKeyProductTop2 = `{"upc":"top-2"}` + entityKeyUser1234 = `{"id":"1234"}` + ) + + // Byte sizes of cached entities (measured from actual JSON marshalling) + const ( + byteSizeProductTop1 = 177 // Product top-1 entity (reviews subgraph response) + byteSizeProductTop2 = 233 // Product top-2 entity (reviews subgraph response) + byteSizeTopProducts = 127 // Query.topProducts root field (products subgraph response) + byteSizeUser1234 = 49 // User 1234 entity (accounts subgraph response) + byteSizeUser1234Full = 105 // User 1234 entity from L1 (includes sameUserReviewers data) + byteSizeQueryMe = 56 // Query.me root field (accounts subgraph response) + ) + + // Shared field hashes for the multi-upstream query (topProducts with reviews). + // Product.name: 2 products (Trilby, Fedora) → 2 distinct hashes + // User.username: 2 reviews both by "Me" → 2 identical hashes + // All FieldSourceSubgraph by default (overridden in specific tests) + multiUpstreamFieldHashes := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceSubgraph}, + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, + } + + // L2 hit field hashes — same data but all sourced from L2 cache + multiUpstreamFieldHashesL2 := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + } + + multiUpstreamEntityTypes := []resolve.EntityTypeInfo{ + {TypeName: "Product", Count: 2, UniqueKeys: 2}, + {TypeName: "User", Count: 2, UniqueKeys: 1}, + } + + // Standard subgraph caching configs used by L2 and L1+L2 tests + multiUpstreamCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + expectedResponseBody := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}` + + t.Run("L2 miss then hit with analytics", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First query — all L2 misses, populates L2 cache + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // L2 miss: root field not yet cached + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts}, // L2 miss: User entity not yet cached (second review's User 1234 deduplicated in batch) + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written after subgraph fetch on miss + {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written after subgraph fetch on miss + {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written to L2 after fetch + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User entity written after accounts fetch + }, + FieldHashes: multiUpstreamFieldHashes, + EntityTypes: multiUpstreamEntityTypes, + }) + assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Second query — all L2 hits from populated cache + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // L2 hit: populated by Request 1 + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // L2 hit: populated by Request 1 + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // L2 hit: root field cached by Request 1 + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234}, // L2 hit: User entity cached by Request 1 (second review's User 1234 deduplicated) + }, + // No L2Writes: all served from cache, no fetches needed + FieldHashes: multiUpstreamFieldHashesL2, + EntityTypes: multiUpstreamEntityTypes, + }) + assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("L1 cache analytics with entity reuse", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + EnableCacheAnalytics: true, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Query that triggers L1 entity reuse: + // 1. Query.me -> accounts subgraph -> returns User 1234 -> populates L1 + // 2. User.sameUserReviewers -> reviews subgraph -> returns [User 1234] + // 3. Entity fetch for User 1234 -> L1 HIT (no subgraph call) + query := `query { + me { + id + username + sameUserReviewers { + id + username + } + } + }` + + tracker.Reset() + resp, headers := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, `{"data":{"me":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}}`, string(resp)) + + expected := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L1Reads: []resolve.CacheKeyEvent{ + // L1 hit: User 1234 was populated by Query.me root fetch, reused for sameUserReviewers + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234Full}, + }, + L1Writes: []resolve.CacheWriteEvent{ + // Query.me root field written to L1 after accounts subgraph fetch + {CacheKey: keyMe, EntityType: "Query", ByteSize: byteSizeQueryMe, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL1}, + }, + FieldHashes: []resolve.EntityFieldHash{ + // Both username entries show L1 source because the entity key resolves to + // the L1 source recorded during the entity fetch L1 HIT + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL1}, // me.username: entity came from L1 + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL1}, // sameUserReviewers[0].username: same L1 entity + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 2, UniqueKeys: 1}, // 2 User instances, but only 1 unique key (1234) + }, + }) + assert.Equal(t, expected, normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("L1+L2 combined analytics", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: true, + EnableCacheAnalytics: true, + }), + withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First query — L2 misses (L1 is per-request, always fresh) + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // L2 miss: root field not yet cached + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts}, // L2 miss: User entity not yet cached (second review's User 1234 hits L1 after this fetch) + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written after reviews subgraph fetch + {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written after reviews subgraph fetch + {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written after products fetch + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User entity written after accounts fetch + }, + FieldHashes: multiUpstreamFieldHashes, + EntityTypes: multiUpstreamEntityTypes, + }) + assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Second query — L2 hits (L1 is per-request, reset between requests) + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // L2 hit: populated by Request 1 + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // L2 hit: populated by Request 1 + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // L2 hit: root field cached by Request 1 + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234}, // L2 hit: User entity cached by Request 1 (second review's User 1234 hits L1) + }, + // No L2Writes: all entities served from L2 cache + FieldHashes: multiUpstreamFieldHashesL2, + EntityTypes: multiUpstreamEntityTypes, + }) + assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("root field with args - L2 analytics", func(t *testing.T) { + // Tests that root field caching with arguments properly records L2 analytics events. + // This covers the root field path in tryL2CacheLoad (no L1 keys branch). + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + rootFieldArgsCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "user", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(rootFieldArgsCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + const ( + keyUserById1234 = `{"__typename":"Query","field":"user","args":{"id":"1234"}}` + keyUserById5678 = `{"__typename":"Query","field":"user","args":{"id":"5678"}}` + dsAccountsLocal = "accounts" + byteSizeUser1234 = 38 // {"user":{"id":"1234","username":"Me"}} + byteSizeUser5678 = 45 // {"user":{"id":"5678","username":"User 5678"}} + + hashUsernameMeLocal uint64 = 4957449860898447395 // xxhash("Me") + hashUsername5678Local uint64 = 15512417390573333165 // xxhash("User 5678") + entityKeyUser1234Local = `{"id":"1234"}` + entityKeyUser5678Local = `{"id":"5678"}` + ) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query (id=1234) — L2 miss, populates cache + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts subgraph") + + expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyUserById1234, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsAccountsLocal}, // L2 miss: first request, cache empty + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyUserById1234, EntityType: "Query", ByteSize: byteSizeUser1234, DataSource: dsAccountsLocal, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written after accounts fetch + }, + FieldHashes: []resolve.EntityFieldHash{ + {EntityType: "User", FieldName: "username", FieldHash: hashUsernameMeLocal, KeyRaw: entityKeyUser1234Local, Source: resolve.FieldSourceSubgraph}, // User returned by root field, data from subgraph + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, // 1 User entity from root field response + }, + }) + assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Second query (same id=1234) — L2 hit + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip accounts (cache hit)") + + expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyUserById1234, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsAccountsLocal, ByteSize: byteSizeUser1234}, // L2 hit: populated by first request + }, + // No L2Writes: data served from cache + FieldHashes: []resolve.EntityFieldHash{ + // Source is FieldSourceSubgraph (default) because entity source tracking operates at + // entity cache level, not root field cache level — no entity caching configured for User + {EntityType: "User", FieldName: "username", FieldHash: hashUsernameMeLocal, KeyRaw: entityKeyUser1234Local, Source: resolve.FieldSourceSubgraph}, + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, + }, + }) + assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Third query (different id=5678) — L2 miss (different args = different cache key) + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "5678"}, t) + assert.Equal(t, `{"data":{"user":{"id":"5678","username":"User 5678"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Third query should call accounts (different args)") + + expected3 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyUserById5678, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsAccountsLocal}, // L2 miss: different args, not cached + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyUserById5678, EntityType: "Query", ByteSize: byteSizeUser5678, DataSource: dsAccountsLocal, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // New args written to L2 + }, + FieldHashes: []resolve.EntityFieldHash{ + {EntityType: "User", FieldName: "username", FieldHash: hashUsername5678Local, KeyRaw: entityKeyUser5678Local, Source: resolve.FieldSourceSubgraph}, // User 5678 data from subgraph + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, + }, + }) + assert.Equal(t, expected3, normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("root field only - L2 analytics without entity caching", func(t *testing.T) { + // Tests root field caching analytics in isolation — only root field caching configured, + // no entity caching. Verifies that only root field events appear in analytics. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Only configure root field caching for products — no entity caching at all + rootOnlyConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(rootOnlyConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + reviewsHost := reviewsURLParsed.Host + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + const ( + keyTopProductsLocal = `{"__typename":"Query","field":"topProducts"}` + dsProductsLocal = "products" + byteSizeTP = 127 // Query.topProducts root field response + ) + + // First query — L2 miss for root field, no events for entities (not configured) + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + // Products subgraph called (root field miss), reviews + accounts always called (no entity caching) + assert.Equal(t, 1, tracker.GetCount(productsHost), "First query should call products subgraph") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "First query should call reviews subgraph") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts subgraph") + + expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyTopProductsLocal, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProductsLocal}, // L2 miss: first request, cache empty + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyTopProductsLocal, EntityType: "Query", ByteSize: byteSizeTP, DataSource: dsProductsLocal, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written after products fetch + }, + // Only entity types tracked during resolution (not caching-dependent) + FieldHashes: multiUpstreamFieldHashes, + EntityTypes: multiUpstreamEntityTypes, + }) + assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Second query — L2 hit for root field, entities still fetched (not cached) + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + // Products subgraph skipped (root field cache hit), reviews + accounts still called + assert.Equal(t, 0, tracker.GetCount(productsHost), "Second query should skip products (root field cache hit)") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "Second query should call reviews (no entity caching)") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Second query should call accounts (no entity caching)") + + expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyTopProductsLocal, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProductsLocal, ByteSize: byteSizeTP}, // L2 hit: root field cached by first request + }, + // No L2Writes: root field served from cache, entities have no caching configured + FieldHashes: multiUpstreamFieldHashes, // Entity field hashes still tracked (resolution, not caching) + EntityTypes: multiUpstreamEntityTypes, + }) + assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("subgraph fetch records HTTPStatusCode and ResponseBytes", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First request — all L2 misses, subgraph fetches happen + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + snap := parseCacheAnalytics(t, headers) + + // Filter to subgraph fetch events only (exclude L2 read events) + var subgraphTimings []resolve.FetchTimingEvent + for _, ft := range snap.FetchTimings { + if ft.Source == resolve.FieldSourceSubgraph { + subgraphTimings = append(subgraphTimings, ft) + } + } + timings := normalizeFetchTimings(subgraphTimings) + + assert.Equal(t, []resolve.FetchTimingEvent{ + {DataSource: dsAccounts, EntityType: "User", Source: resolve.FieldSourceSubgraph, ItemCount: 1, IsEntityFetch: true, HTTPStatusCode: 200, ResponseBytes: 62}, // _entities fetch for User 1234 + {DataSource: dsProducts, EntityType: "Query", Source: resolve.FieldSourceSubgraph, ItemCount: 1, IsEntityFetch: false, HTTPStatusCode: 200, ResponseBytes: 136}, // topProducts root field fetch + {DataSource: dsReviews, EntityType: "Product", Source: resolve.FieldSourceSubgraph, ItemCount: 1, IsEntityFetch: true, HTTPStatusCode: 200, ResponseBytes: 376}, // _entities fetch for Product top-1 and top-2 + }, timings) + }) + + t.Run("cache hit has zero HTTPStatusCode and ResponseBytes", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First request — populates L2 cache + resp, _ := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + // Second request — all L2 hits + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + snap := parseCacheAnalytics(t, headers) + timings := normalizeFetchTimings(snap.FetchTimings) + + assert.Equal(t, []resolve.FetchTimingEvent{ + {DataSource: dsAccounts, EntityType: "User", Source: resolve.FieldSourceL2, ItemCount: 1, IsEntityFetch: true}, // L2 hit for User 1234 entity + {DataSource: dsProducts, EntityType: "Query", Source: resolve.FieldSourceL2, ItemCount: 1, IsEntityFetch: true}, // L2 hit for topProducts root field + {DataSource: dsReviews, EntityType: "Product", Source: resolve.FieldSourceL2, ItemCount: 2, IsEntityFetch: true}, // L2 hit for Product top-1 and top-2 entities + }, timings) + }) +} + +func TestShadowCacheE2E(t *testing.T) { + // Cache key constants (same as TestCacheAnalyticsE2E — same federation setup) + const ( + keyProductTop1 = `{"__typename":"Product","key":{"upc":"top-1"}}` + keyProductTop2 = `{"__typename":"Product","key":{"upc":"top-2"}}` + keyTopProducts = `{"__typename":"Query","field":"topProducts"}` + keyUser1234 = `{"__typename":"User","key":{"id":"1234"}}` + dsAccounts = "accounts" + dsProducts = "products" + dsReviews = "reviews" + ) + + // Field hash constants + const ( + hashProductNameTrilby uint64 = 1032923585965781586 + hashProductNameFedora uint64 = 2432227032303632641 + hashUserUsernameMe uint64 = 4957449860898447395 + ) + + // Entity key constants + const ( + entityKeyProductTop1 = `{"upc":"top-1"}` + entityKeyProductTop2 = `{"upc":"top-2"}` + entityKeyUser1234 = `{"id":"1234"}` + ) + + // Byte sizes + const ( + byteSizeProductTop1 = 177 + byteSizeProductTop2 = 233 + byteSizeTopProducts = 127 + byteSizeUser1234 = 49 + ) + + // Shadow comparison hash constants + const ( + shadowHashProductTop1 uint64 = 8656108128396512717 + shadowHashProductTop2 uint64 = 4671066427758823003 + shadowHashUser1234 uint64 = 188937276969638005 + shadowBytesProductTop1 = 124 + shadowBytesProductTop2 = 180 + shadowBytesUser1234 = 17 + ) + + // Shadow cached field hash constants (ProvidesData fields hashed from cached value during shadow comparison) + const ( + shadowFieldHashProductReviewsTop1 uint64 = 13894521258004960943 // xxhash of Product reviews field for top-1 + shadowFieldHashProductReviewsTop2 uint64 = 3182276346310063647 // xxhash of Product reviews field for top-2 + ) + + // Field hashes when all data comes from subgraph (first request, all misses) + fieldHashesSubgraph := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceSubgraph}, + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, + } + + // Field hashes when all data comes from L2 (second request, all hits — no shadow entities) + fieldHashesL2 := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + } + + // Field hashes when all entities are in shadow mode (second request): + // L2 source hashes from resolution + ShadowCached hashes from compareShadowValues + fieldHashesL2AllShadow := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, + {EntityType: "Product", FieldName: "reviews", FieldHash: shadowFieldHashProductReviewsTop1, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceShadowCached}, // Cached Product reviews field for per-field staleness detection + {EntityType: "Product", FieldName: "reviews", FieldHash: shadowFieldHashProductReviewsTop2, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceShadowCached}, // Cached Product reviews field for per-field staleness detection + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username for per-field staleness detection + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username (second review) + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + } + + // Field hashes when only User is in shadow mode (mixed mode, second request): + // Product/root L2 source hashes + User L2 + User ShadowCached hashes + fieldHashesL2MixedShadow := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username for per-field staleness detection + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username (second review) + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + } + + entityTypes := []resolve.EntityTypeInfo{ + {TypeName: "Product", Count: 2, UniqueKeys: 2}, + {TypeName: "User", Count: 2, UniqueKeys: 1}, + } + + expectedResponseBody := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}` + + t.Run("shadow all entities - always fetches", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Shadow mode for all entity types, real caching for root fields + shadowConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(shadowConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) + reviewsHost := mustParseHost(setup.ReviewsUpstreamServer.URL) + + // Request 1: All L2 misses → all 3 subgraphs called + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + assert.Equal(t, 1, tracker.GetCount(productsHost), "request 1: should call products exactly once") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "request 1: should call reviews exactly once") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 1: should call accounts exactly once") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews, Shadow: true}, // Shadow L2 miss: cache empty, subgraph fetched + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews, Shadow: true}, // Shadow L2 miss: cache empty, subgraph fetched + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // Real L2 miss: root field not shadow, fetched normally + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts, Shadow: true}, // Shadow L2 miss: User not yet cached + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written to L2 even in shadow (populates for comparison) + {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written to L2 even in shadow + {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written normally (not shadow) + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User entity written for future shadow comparison + }, + // No ShadowComparisons: nothing cached yet to compare against + FieldHashes: fieldHashesSubgraph, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Request 2: Entity L2 hits (shadow) → entity subgraphs STILL called + // Root field L2 hit → products NOT called (real caching) + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + assert.Equal(t, 0, tracker.GetCount(productsHost), "request 2: products should NOT be called (root field real cache hit)") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "request 2: reviews should be called (Product entity shadow)") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 2: accounts should be called (User entity shadow)") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1, Shadow: true}, // Shadow L2 hit: cached by Req 1, but subgraph still called + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2, Shadow: true}, // Shadow L2 hit: cached by Req 1, but subgraph still called + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field served from cache (not shadow) + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234, Shadow: true}, // Shadow L2 hit: accounts still called for comparison + }, + L2Writes: []resolve.CacheWriteEvent{ + // Only shadow entities re-written (refreshed from subgraph); root field NOT re-written (real cache hit) + {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write: fresh data from subgraph + {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write: fresh data from subgraph + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write: fresh User from accounts + }, + ShadowComparisons: []resolve.ShadowComparisonEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", IsFresh: true, CachedHash: shadowHashProductTop1, FreshHash: shadowHashProductTop1, CachedBytes: shadowBytesProductTop1, FreshBytes: shadowBytesProductTop1, DataSource: dsReviews, ConfiguredTTL: 30 * time.Second}, // Fresh: cached matches subgraph (data unchanged) + {CacheKey: keyProductTop2, EntityType: "Product", IsFresh: true, CachedHash: shadowHashProductTop2, FreshHash: shadowHashProductTop2, CachedBytes: shadowBytesProductTop2, FreshBytes: shadowBytesProductTop2, DataSource: dsReviews, ConfiguredTTL: 30 * time.Second}, // Fresh: cached matches subgraph (data unchanged) + {CacheKey: keyUser1234, EntityType: "User", IsFresh: true, CachedHash: shadowHashUser1234, FreshHash: shadowHashUser1234, CachedBytes: shadowBytesUser1234, FreshBytes: shadowBytesUser1234, DataSource: dsAccounts, ConfiguredTTL: 30 * time.Second}, // Fresh: cached User matches subgraph (no mutation) + }, + FieldHashes: fieldHashesL2AllShadow, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("mixed mode - shadow User, real cache Product", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Shadow mode for User only, real caching for Product and root fields + mixedConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, // real caching + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, // shadow + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(mixedConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) + reviewsHost := mustParseHost(setup.ReviewsUpstreamServer.URL) + + // Request 1: All L2 misses → all 3 subgraphs called + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + assert.Equal(t, 1, tracker.GetCount(productsHost), "request 1: should call products exactly once") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "request 1: should call reviews exactly once") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 1: should call accounts exactly once") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: Product entity not yet cached + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: Product entity not yet cached + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // Real L2 miss: root field not yet cached + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts, Shadow: true}, // Shadow L2 miss: User entity not yet cached + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Product written for real caching + {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Product written for real caching + {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written for real caching + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User written (shadow still populates L2) + }, + FieldHashes: fieldHashesSubgraph, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Request 2: Product real cache hit, User shadow → still fetched + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + assert.Equal(t, 0, tracker.GetCount(productsHost), "request 2: products should NOT be called (root field real cache hit)") + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "request 2: reviews should NOT be called (Product entity real cache hit)") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 2: accounts SHOULD be called (User entity shadow)") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // Real L2 hit: Product served from cache (no subgraph call) + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // Real L2 hit: Product served from cache (no subgraph call) + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field served from cache + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234, Shadow: true}, // Shadow L2 hit: accounts still called for comparison + }, + L2Writes: []resolve.CacheWriteEvent{ + // Only User re-written (shadow always fetches fresh); Product/root NOT re-written (real hit) + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write: fresh data from accounts + }, + ShadowComparisons: []resolve.ShadowComparisonEvent{ + // Only User has shadow comparisons; Product uses real caching + {CacheKey: keyUser1234, EntityType: "User", IsFresh: true, CachedHash: shadowHashUser1234, FreshHash: shadowHashUser1234, CachedBytes: shadowBytesUser1234, FreshBytes: shadowBytesUser1234, DataSource: dsAccounts, ConfiguredTTL: 30 * time.Second}, // Fresh: cached User matches subgraph + }, + FieldHashes: fieldHashesL2MixedShadow, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("shadow mode without analytics - safety only", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + shadowConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), // analytics NOT enabled + withSubgraphEntityCachingConfigs(shadowConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + + // Request 1: Populate cache + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + // No stats when analytics is disabled + assert.Empty(t, headers.Get("X-Cache-Analytics"), "analytics header should not be set when analytics disabled") + + // Request 2: Shadow mode — accounts still fetched (data not served from cache) + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 2: accounts should be called (shadow mode)") + // No stats when analytics is disabled + assert.Empty(t, headers.Get("X-Cache-Analytics"), "analytics header should not be set when analytics disabled") + }) + + t.Run("graduation - shadow to real", func(t *testing.T) { + // Same FakeLoaderCache shared across both engine setups + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Phase 1: Shadow mode for User + shadowConfigs := engine.SubgraphCachingConfigs{ + {SubgraphName: "products", RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }}, + {SubgraphName: "reviews", EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }}, + {SubgraphName: "accounts", EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, + }}, + } + + setup1 := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(shadowConfigs), + )) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost1 := mustParseHost(setup1.AccountsUpstreamServer.URL) + + // Phase 1, Request 1: Populate L2 cache + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup1.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: first request, cache empty + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: first request, cache empty + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // Real L2 miss: root field not yet cached + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts, Shadow: true}, // Shadow L2 miss: User not yet cached + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Product written for real caching + {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Product written for real caching + {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written for real caching + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User written (shadow still populates L2) + }, + FieldHashes: fieldHashesSubgraph, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Phase 1, Request 2: Shadow — accounts still called + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup1.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost1), "phase 1 request 2: accounts should be called (shadow mode)") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // Real L2 hit: Product served from cache + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // Real L2 hit: Product served from cache + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field from cache + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234, Shadow: true}, // Shadow L2 hit: cached but accounts still called + }, + L2Writes: []resolve.CacheWriteEvent{ + // Only shadow User re-written; Product/root use real caching (no re-write on hit) + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write with fresh data from accounts + }, + ShadowComparisons: []resolve.ShadowComparisonEvent{ + {CacheKey: keyUser1234, EntityType: "User", IsFresh: true, CachedHash: shadowHashUser1234, FreshHash: shadowHashUser1234, CachedBytes: shadowBytesUser1234, FreshBytes: shadowBytesUser1234, DataSource: dsAccounts, ConfiguredTTL: 30 * time.Second}, // Fresh: cached User matches subgraph (safe to graduate) + }, + FieldHashes: fieldHashesL2MixedShadow, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + + setup1.Close() + + // Phase 2: Graduated to real caching (same cache, new engine) + realConfigs := engine.SubgraphCachingConfigs{ + {SubgraphName: "products", RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }}, + {SubgraphName: "reviews", EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }}, + {SubgraphName: "accounts", EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, // No ShadowMode! + }}, + } + + tracker2 := newSubgraphCallTracker(http.DefaultTransport) + trackingClient2 := &http.Client{Transport: tracker2} + + setup2 := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), // SAME cache + withHTTPClient(trackingClient2), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(realConfigs), + )) + t.Cleanup(setup2.Close) + + accountsHost2 := mustParseHost(setup2.AccountsUpstreamServer.URL) + + // Phase 2, Request 3: Real L2 hit — accounts NOT called + tracker2.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup2.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + assert.Equal(t, 0, tracker2.GetCount(accountsHost2), "phase 2: accounts should NOT be called (real L2 hit)") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // Real L2 hit: cached by Phase 1 + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // Real L2 hit: cached by Phase 1 + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field cached by Phase 1 + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234}, // Real L2 hit: graduated from shadow, no longer calls accounts + }, + // No L2Writes: all real cache hits, no fetches needed + // No ShadowComparisons: User is no longer in shadow mode + FieldHashes: fieldHashesL2, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) +} + +func TestMutationImpactE2E(t *testing.T) { + accounts.ResetUsers() + t.Cleanup(accounts.ResetUsers) + + // Configure entity caching for User on accounts subgraph + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` + + // Uses a simple query that causes an entity fetch for User 1234 + // me { id username } triggers: accounts root fetch for Query.me, no entity fetch + // We need a query that triggers entity caching for User - topProducts with reviews + authorWithoutProvides + entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + + t.Run("mutation with prior cache shows stale entity", func(t *testing.T) { + accounts.ResetUsers() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Request 1: Query to populate L2 cache with User entity + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) + assert.Contains(t, string(resp), `"username":"Me"`) + + // Request 2: Mutation — should detect stale cached entity + tracker.Reset() + respMut, headersMut := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) + assert.Contains(t, string(respMut), `"UpdatedMe"`) + + snap := normalizeSnapshot(parseCacheAnalytics(t, headersMut)) + require.NotNil(t, snap.MutationEvents, "should have mutation impact events") + require.Equal(t, 1, len(snap.MutationEvents), "should have exactly 1 mutation impact event") + + event := snap.MutationEvents[0] + assert.Equal(t, "updateUsername", event.MutationRootField) + assert.Equal(t, "User", event.EntityType) + assert.Equal(t, `{"__typename":"User","key":{"id":"1234"}}`, event.EntityCacheKey) + assert.Equal(t, true, event.HadCachedValue, "should have found cached value") + assert.Equal(t, true, event.IsStale, "cached value should be stale (username changed)") + + // Record discovered values for exact assertion + t.Logf("MutationImpact event: %+v", event) + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + FieldHashes: []resolve.EntityFieldHash{ + // Hash of "UpdatedMe" (post-mutation username) + {EntityType: "User", FieldName: "username", FieldHash: 16932466035575627600, KeyRaw: `{"id":"1234"}`}, + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, // Mutation returned 1 User entity + }, + MutationEvents: []resolve.MutationEvent{ + { + MutationRootField: "updateUsername", + EntityType: "User", + EntityCacheKey: `{"__typename":"User","key":{"id":"1234"}}`, + HadCachedValue: true, // L2 had cached value from Request 1 query + IsStale: true, // Cached "Me" differs from fresh "UpdatedMe" + CachedHash: event.CachedHash, + FreshHash: event.FreshHash, + CachedBytes: event.CachedBytes, + FreshBytes: event.FreshBytes, + }, + }, + }), snap) + }) + + t.Run("mutation without prior cache shows no-cache event", func(t *testing.T) { + accounts.ResetUsers() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // NO prior query — L2 cache is empty + // Send mutation directly + tracker.Reset() + respMut, headersMut := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) + assert.Contains(t, string(respMut), `"UpdatedMe"`) + + snap := normalizeSnapshot(parseCacheAnalytics(t, headersMut)) + require.NotNil(t, snap.MutationEvents, "should have mutation impact events") + require.Equal(t, 1, len(snap.MutationEvents), "should have exactly 1 mutation impact event") + + event := snap.MutationEvents[0] + assert.Equal(t, "updateUsername", event.MutationRootField) + assert.Equal(t, "User", event.EntityType) + assert.Equal(t, `{"__typename":"User","key":{"id":"1234"}}`, event.EntityCacheKey) + assert.Equal(t, false, event.HadCachedValue, "should NOT have found cached value") + assert.Equal(t, false, event.IsStale, "cannot be stale without cached value") + assert.Equal(t, uint64(0), event.CachedHash, "no cached value = no hash") + assert.Equal(t, 0, event.CachedBytes, "no cached value = no bytes") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + FieldHashes: []resolve.EntityFieldHash{ + // Hash of "UpdatedMe" (post-mutation username) + {EntityType: "User", FieldName: "username", FieldHash: 16932466035575627600, KeyRaw: `{"id":"1234"}`}, + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, // Mutation returned 1 User entity + }, + MutationEvents: []resolve.MutationEvent{ + { + MutationRootField: "updateUsername", + EntityType: "User", + EntityCacheKey: `{"__typename":"User","key":{"id":"1234"}}`, + HadCachedValue: false, // No prior query, L2 cache was empty + IsStale: false, // Cannot be stale without a cached value to compare + FreshHash: event.FreshHash, + FreshBytes: event.FreshBytes, + }, + }, + }), snap) + }) +} + +func TestFederationCachingAliases(t *testing.T) { + // Helper to create a standard setup for alias caching tests + setupAliasCachingTest := func(t *testing.T) ( + *federationtesting.FederationSetup, + *GraphqlClient, + context.Context, + context.CancelFunc, + *subgraphCallTracker, + *FakeLoaderCache, + string, // accountsHost + ) { + t.Helper() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + return setup, gqlClient, ctx, cancel, tracker, defaultCache, accountsHost + } + + t.Run("L2 hit - alias then no alias", func(t *testing.T) { + setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) + + // Request 1: Use alias userName for username + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { topProducts { name reviews { body authorWithoutProvides { userName: username } } } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"userName":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"userName":"Me"}}]}]}}`, + string(resp)) + + accountsCalls1 := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") + + // Request 2: No alias (original field name) + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, + string(resp)) + + accountsCalls2 := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit from normalized cache)") + }) + + t.Run("L2 hit - two different aliases for same field", func(t *testing.T) { + setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) + + // Request 1: alias u1 for username + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { topProducts { name reviews { body authorWithoutProvides { u1: username } } } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"u1":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"u1":"Me"}}]}]}}`, + string(resp)) + + accountsCalls1 := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") + + // Request 2: alias u2 for username + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { topProducts { name reviews { body authorWithoutProvides { u2: username } } } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"u2":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"u2":"Me"}}]}]}}`, + string(resp)) + + accountsCalls2 := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying field)") + }) + + t.Run("no collision - alias matches another field name", func(t *testing.T) { + setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) + + // Request 1: alias realName for username (realName is another real field on User) + // This triggers an accounts entity fetch for username, stores normalized {"username":"Me"} in L2 + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { topProducts { name reviews { body authorWithoutProvides { realName: username } } } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"realName":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"realName":"Me"}}]}]}}`, + string(resp)) + + accountsCalls1 := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once for username") + + // Request 2: actual username field (no alias) - same underlying field + // Should be an L2 hit because both resolve username from accounts + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, + string(resp)) + + accountsCalls2 := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying field username)") + }) + + t.Run("no collision - field name used as alias for another field", func(t *testing.T) { + setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) + + // Request 1: username field (no alias) - triggers accounts entity fetch for username + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, + string(resp)) + + accountsCalls1 := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") + + // Request 2: different alias (u1) for same field (username) + // Should be an L2 hit because the underlying field is the same + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { topProducts { name reviews { body authorWithoutProvides { u1: username } } } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"u1":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"u1":"Me"}}]}]}}`, + string(resp)) + + accountsCalls2 := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying field)") + }) + + t.Run("L2 hit - multiple fields some aliased some not", func(t *testing.T) { + setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) + + // Request 1: alias username and include realName (realName comes from reviews, not accounts) + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { topProducts { name reviews { body authorWithoutProvides { userName: username realName } } } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"userName":"Me","realName":"User Usington"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"userName":"Me","realName":"User Usington"}}]}]}}`, + string(resp)) + + accountsCalls1 := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") + + // Request 2: no alias on username, different alias on realName + // accounts entity cache should be L2 hit (same username field) + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { topProducts { name reviews { body authorWithoutProvides { username name: realName } } } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","name":"User Usington"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","name":"User Usington"}}]}]}}`, + string(resp)) + + accountsCalls2 := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying username field)") + }) + + t.Run("L1 hit within single request with aliases", func(t *testing.T) { + // Tests L1 cache with aliased fields across entity fetches within the same request. + // Flow: + // 1. topProducts -> products + // 2. reviews -> reviews (entity fetch for Products) + // 3. authorWithoutProvides -> accounts (entity fetch for User 1234, aliased userName: username) + // -> User 1234 stored in L1 with normalized field names + // 4. sameUserReviewers -> reviews (returns [User 1234] reference) + // 5. Entity resolution for sameUserReviewers -> accounts + // -> User 1234 is L1 HIT (already fetched in step 3), entire accounts call skipped + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: false}), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Query with alias on username - sameUserReviewers returns same user, + // should be L1 hit from the first entity fetch + tracker.Reset() + query := `query { + topProducts { + reviews { + authorWithoutProvides { + id + userName: username + sameUserReviewers { + id + userName: username + } + } + } + } + }` + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]}]}}`, + string(resp)) + + // With L1 enabled: first accounts call fetches User 1234 for authorWithoutProvides + // sameUserReviewers entity resolution hits L1 -> accounts call skipped + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, "Should call accounts subgraph once (sameUserReviewers skipped via L1)") + }) + + t.Run("L1 hit within single request with mixed alias and no alias", func(t *testing.T) { + // Same as above, but the nested sameUserReviewers uses the original field name (no alias) + // while the outer authorWithoutProvides uses an alias. L1 cache stores normalized data, + // so the nested fetch should still hit L1 despite the different field naming. + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: false}), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Outer authorWithoutProvides uses alias "userName: username" + // Nested sameUserReviewers uses plain "username" (no alias) + // L1 should still hit because cache stores normalized (original) field names + tracker.Reset() + query := `query { + topProducts { + reviews { + authorWithoutProvides { + id + userName: username + sameUserReviewers { + id + username + } + } + } + } + }` + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}`, + string(resp)) + + // With L1 enabled: first accounts call fetches User 1234 for authorWithoutProvides + // sameUserReviewers entity resolution hits L1 -> accounts call skipped + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, "Should call accounts subgraph once (sameUserReviewers skipped via L1)") + }) + + t.Run("L2 hit - aliased root field then original root field", func(t *testing.T) { + setup, gqlClient, ctx, _, tracker, defaultCache, _ := setupAliasCachingTest(t) + productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) + + // Request 1: alias the root field topProducts as tp + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { tp: topProducts { name } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"tp":[{"name":"Trilby"},{"name":"Fedora"}]}}`, + string(resp)) + + productsCalls1 := tracker.GetCount(productsHost) + assert.Equal(t, 1, productsCalls1, "Request 1 should call products subgraph once") + + // Request 2: same root field without alias — should L2 hit (same cache key) + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { topProducts { name } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby"},{"name":"Fedora"}]}}`, + string(resp)) + + productsCalls2 := tracker.GetCount(productsHost) + assert.Equal(t, 0, productsCalls2, "Request 2 should skip products (L2 hit from aliased root field)") + }) + + t.Run("L2 hit - two different root field aliases", func(t *testing.T) { + setup, gqlClient, ctx, _, tracker, defaultCache, _ := setupAliasCachingTest(t) + productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) + + // Request 1: alias p1 for topProducts + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { p1: topProducts { name } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"p1":[{"name":"Trilby"},{"name":"Fedora"}]}}`, + string(resp)) + + productsCalls1 := tracker.GetCount(productsHost) + assert.Equal(t, 1, productsCalls1, "Request 1 should call products subgraph once") + + // Request 2: different alias p2 for same root field + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { p2: topProducts { name } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"p2":[{"name":"Trilby"},{"name":"Fedora"}]}}`, + string(resp)) + + productsCalls2 := tracker.GetCount(productsHost) + assert.Equal(t, 0, productsCalls2, "Request 2 should skip products (L2 hit - same underlying root field)") + }) + + t.Run("L1+L2 combined - alias entity caching across both layers", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + + // Request 1: alias on username, sameUserReviewers triggers L1 hit within request + // L2 is also populated on the first entity fetch + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { + topProducts { + reviews { + authorWithoutProvides { + id + userName: username + sameUserReviewers { + id + userName: username + } + } + } + } + }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]}]}}`, + string(resp)) + + accountsCalls1 := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls1, "Request 1: accounts called once (sameUserReviewers skipped via L1)") + + // Request 2: same query without alias — L2 hit for User entity, no accounts calls + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { + topProducts { + reviews { + authorWithoutProvides { + id + username + sameUserReviewers { + id + username + } + } + } + } + }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}`, + string(resp)) + + accountsCalls2 := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls2, "Request 2: accounts skipped (L2 hit from normalized cache)") + }) + + t.Run("L2 analytics - aliased root field", func(t *testing.T) { + const ( + keyTopProducts = `{"__typename":"Query","field":"topProducts"}` + dsProducts = "products" + byteSizeTopProducts = 53 + hashProductNameTrilby = uint64(1032923585965781586) + hashProductNameFedora = uint64(2432227032303632641) + ) + + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Shared field hashes: Product.name for Trilby and Fedora from root field response + // Products are not entity-resolved (no @key fetch), so KeyRaw is empty + fieldHashes := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: "{}"}, // xxhash("Trilby"), no entity key (root field) + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: "{}"}, // xxhash("Fedora"), no entity key (root field) + } + entityTypes := []resolve.EntityTypeInfo{ + {TypeName: "Product", Count: 2, UniqueKeys: 1}, // 2 products from root field, no entity keys + } + + // Request 1: aliased root field — L2 miss, populates cache + tracker.Reset() + query1 := `query { tp: topProducts { name } }` + resp, headers := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, `{"data":{"tp":[{"name":"Trilby"},{"name":"Fedora"}]}}`, string(resp)) + + // Cache key must use original field name "topProducts", NOT the alias "tp" + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // L2 miss: first request, cache empty + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written after products fetch + }, + FieldHashes: fieldHashes, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Request 2: original root field (no alias) — L2 hit from Request 1 + tracker.Reset() + query2 := `query { topProducts { name } }` + resp, headers = gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby"},{"name":"Fedora"}]}}`, string(resp)) + + // Same cache key hit regardless of alias difference + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // L2 hit: populated by aliased Request 1 + }, + // No L2Writes: served from cache + FieldHashes: fieldHashes, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("L1 dedup - two aliases for same entity field in single request", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: false}), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + + // Two aliases (a1, a2) for the same entity field (authorWithoutProvides) + // Both resolve the same User 1234 — second should be L1 hit + tracker.Reset() + query := `query { + topProducts { + reviews { + a1: authorWithoutProvides { + id + username + } + a2: authorWithoutProvides { + id + username + } + } + } + }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"reviews":[{"a1":{"id":"1234","username":"Me"},"a2":{"id":"1234","username":"Me"}}]},{"reviews":[{"a1":{"id":"1234","username":"Me"},"a2":{"id":"1234","username":"Me"}}]}]}}`, + string(resp)) + + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, "Should call accounts once (second alias L1 hit for same User entity)") + }) +} diff --git a/execution/engine/federation_caching_helpers_test.go b/execution/engine/federation_caching_helpers_test.go new file mode 100644 index 0000000000..0a922e5b2d --- /dev/null +++ b/execution/engine/federation_caching_helpers_test.go @@ -0,0 +1,866 @@ +package engine_test + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "path" + "sort" + "strings" + "sync" + "testing" + "time" + + "github.com/jensneuse/abstractlogger" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting/gateway" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// subgraphCallTracker tracks HTTP requests made to subgraph servers +type subgraphCallTracker struct { + mu sync.RWMutex + counts map[string]int // Maps subgraph URL to call count + original http.RoundTripper +} + +func newSubgraphCallTracker(original http.RoundTripper) *subgraphCallTracker { + return &subgraphCallTracker{ + counts: make(map[string]int), + original: original, + } +} + +func (t *subgraphCallTracker) RoundTrip(req *http.Request) (*http.Response, error) { + t.mu.Lock() + host := req.URL.Host + t.counts[host]++ + t.mu.Unlock() + return t.original.RoundTrip(req) +} + +func (t *subgraphCallTracker) GetCount(url string) int { + t.mu.RLock() + defer t.mu.RUnlock() + return t.counts[url] +} + +func (t *subgraphCallTracker) Reset() { + t.mu.Lock() + defer t.mu.Unlock() + t.counts = make(map[string]int) +} + +func (t *subgraphCallTracker) GetCounts() map[string]int { + t.mu.RLock() + defer t.mu.RUnlock() + result := make(map[string]int) + for k, v := range t.counts { + result[k] = v + } + return result +} + +func (t *subgraphCallTracker) DebugPrint() string { + t.mu.RLock() + defer t.mu.RUnlock() + return fmt.Sprintf("%v", t.counts) +} + +// Helper functions for gateway setup with HTTP client support +type cachingGatewayOptions struct { + enableART bool + withLoaderCache map[string]resolve.LoaderCache + httpClient *http.Client + subgraphHeadersBuilder resolve.SubgraphHeadersBuilder + cachingOptions resolve.CachingOptions + subgraphEntityCachingConfigs engine.SubgraphCachingConfigs + debugMode bool +} + +func withCachingEnableART(enableART bool) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.enableART = enableART + } +} + +func withCachingLoaderCache(loaderCache map[string]resolve.LoaderCache) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.withLoaderCache = loaderCache + } +} + +func withHTTPClient(client *http.Client) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.httpClient = client + } +} + +func withSubgraphHeadersBuilder(builder resolve.SubgraphHeadersBuilder) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.subgraphHeadersBuilder = builder + } +} + +func withCachingOptionsFunc(cachingOpts resolve.CachingOptions) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.cachingOptions = cachingOpts + } +} + +func withSubgraphEntityCachingConfigs(configs engine.SubgraphCachingConfigs) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.subgraphEntityCachingConfigs = configs + } +} + +func withDebugMode(enabled bool) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.debugMode = enabled + } +} + +type cachingGatewayOptionsToFunc func(opts *cachingGatewayOptions) + +func addCachingGateway(options ...cachingGatewayOptionsToFunc) func(setup *federationtesting.FederationSetup) *httptest.Server { + opts := &cachingGatewayOptions{} + for _, option := range options { + option(opts) + } + return func(setup *federationtesting.FederationSetup) *httptest.Server { + httpClient := opts.httpClient + if httpClient == nil { + httpClient = http.DefaultClient + } + + poller := gateway.NewDatasource([]gateway.ServiceConfig{ + {Name: "accounts", URL: setup.AccountsUpstreamServer.URL}, + {Name: "products", URL: setup.ProductsUpstreamServer.URL, WS: strings.ReplaceAll(setup.ProductsUpstreamServer.URL, "http:", "ws:")}, + {Name: "reviews", URL: setup.ReviewsUpstreamServer.URL}, + }, httpClient) + + gtw := gateway.HandlerWithCaching(abstractlogger.NoopLogger, poller, httpClient, opts.enableART, opts.withLoaderCache, opts.subgraphHeadersBuilder, opts.cachingOptions, opts.subgraphEntityCachingConfigs, opts.debugMode) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + poller.Run(ctx) + return httptest.NewServer(gtw) + } +} + +// mockSubgraphHeadersBuilder is a mock implementation of SubgraphHeadersBuilder +type mockSubgraphHeadersBuilder struct { + hashes map[string]uint64 +} + +func (m *mockSubgraphHeadersBuilder) HeadersForSubgraph(subgraphName string) (http.Header, uint64) { + hash := m.hashes[subgraphName] + if hash == 0 { + // Return default hash if not found + return nil, 99999 + } + return nil, hash +} + +func (m *mockSubgraphHeadersBuilder) HashAll() uint64 { + // Return a simple hash of all subgraph hashes combined + var result uint64 + for _, hash := range m.hashes { + result ^= hash + } + return result +} + +func cachingTestQueryPath(name string) string { + return path.Join("..", "federationtesting", "testdata", name) +} + +type CacheLogEntry struct { + Operation string // "get", "set", "delete" + Keys []string // Keys involved in the operation + Hits []bool // For Get: whether each key was a hit (true) or miss (false) + Caller string // Fetch identity when debug enabled: "accounts: entity(User)" or "products: rootField(Query.topProducts)" +} + +// sortCacheLogKeys sorts the keys (and corresponding hits) in each cache log entry. +// This makes comparisons order-independent when multiple keys are present. +// Caller is intentionally stripped — it's for debug logging, not assertions. +func sortCacheLogKeys(log []CacheLogEntry) []CacheLogEntry { + sorted := make([]CacheLogEntry, len(log)) + for i, entry := range log { + // Only sort if there are multiple keys + if len(entry.Keys) <= 1 { + sorted[i] = CacheLogEntry{ + Operation: entry.Operation, + Keys: entry.Keys, + Hits: entry.Hits, + } + continue + } + + // Create pairs of (key, hit) to sort together + pairs := make([]struct { + key string + hit bool + }, len(entry.Keys)) + for j := range entry.Keys { + pairs[j].key = entry.Keys[j] + if entry.Hits != nil && j < len(entry.Hits) { + pairs[j].hit = entry.Hits[j] + } + } + + // Sort pairs by key + sort.Slice(pairs, func(a, b int) bool { + return pairs[a].key < pairs[b].key + }) + + // Extract sorted keys and hits + sorted[i] = CacheLogEntry{ + Operation: entry.Operation, + Keys: make([]string, len(pairs)), + Hits: nil, + } + if len(entry.Hits) > 0 { + sorted[i].Hits = make([]bool, len(pairs)) + } + for j := range pairs { + sorted[i].Keys[j] = pairs[j].key + if sorted[i].Hits != nil { + sorted[i].Hits[j] = pairs[j].hit + } + } + } + return sorted +} + +// sortCacheLogKeysWithCaller is like sortCacheLogKeys but preserves the Caller field. +// Use this when you want assertions to verify which Loader method chain triggered each cache event. +func sortCacheLogKeysWithCaller(log []CacheLogEntry) []CacheLogEntry { + sorted := make([]CacheLogEntry, len(log)) + for i, entry := range log { + if len(entry.Keys) <= 1 { + sorted[i] = CacheLogEntry{ + Operation: entry.Operation, + Keys: entry.Keys, + Hits: entry.Hits, + Caller: entry.Caller, + } + continue + } + + pairs := make([]struct { + key string + hit bool + }, len(entry.Keys)) + for j := range entry.Keys { + pairs[j].key = entry.Keys[j] + if entry.Hits != nil && j < len(entry.Hits) { + pairs[j].hit = entry.Hits[j] + } + } + sort.Slice(pairs, func(a, b int) bool { + return pairs[a].key < pairs[b].key + }) + sorted[i] = CacheLogEntry{ + Operation: entry.Operation, + Keys: make([]string, len(pairs)), + Hits: nil, + Caller: entry.Caller, + } + if len(entry.Hits) > 0 { + sorted[i].Hits = make([]bool, len(pairs)) + } + for j := range pairs { + sorted[i].Keys[j] = pairs[j].key + if sorted[i].Hits != nil { + sorted[i].Hits[j] = pairs[j].hit + } + } + } + return sorted +} + +type cacheEntry struct { + data []byte + expiresAt *time.Time +} + +type FakeLoaderCache struct { + mu sync.RWMutex + storage map[string]cacheEntry + log []CacheLogEntry +} + +func NewFakeLoaderCache() *FakeLoaderCache { + return &FakeLoaderCache{ + storage: make(map[string]cacheEntry), + log: make([]CacheLogEntry, 0), + } +} + +func (f *FakeLoaderCache) cleanupExpired() { + now := time.Now() + for key, entry := range f.storage { + if entry.expiresAt != nil && now.After(*entry.expiresAt) { + delete(f.storage, key) + } + } +} + +func (f *FakeLoaderCache) Get(ctx context.Context, keys []string) ([]*resolve.CacheEntry, error) { + f.mu.Lock() + defer f.mu.Unlock() + + // Clean up expired entries before executing command + f.cleanupExpired() + + hits := make([]bool, len(keys)) + result := make([]*resolve.CacheEntry, len(keys)) + for i, key := range keys { + if entry, exists := f.storage[key]; exists { + // Make a copy of the data to prevent external modifications + dataCopy := make([]byte, len(entry.data)) + copy(dataCopy, entry.data) + ce := &resolve.CacheEntry{ + Key: key, + Value: dataCopy, + } + // Populate RemainingTTL from expiresAt for cache age analytics + if entry.expiresAt != nil { + remaining := time.Until(*entry.expiresAt) + if remaining > 0 { + ce.RemainingTTL = remaining + } + } + result[i] = ce + hits[i] = true + } else { + result[i] = nil + hits[i] = false + } + } + + // Log the operation + caller := "" + if cfi := resolve.GetCacheFetchInfo(ctx); cfi != nil { + caller = cfi.String() + } + f.log = append(f.log, CacheLogEntry{ + Operation: "get", + Keys: keys, + Hits: hits, + Caller: caller, + }) + + return result, nil +} + +func (f *FakeLoaderCache) Set(ctx context.Context, entries []*resolve.CacheEntry, ttl time.Duration) error { + if len(entries) == 0 { + return nil + } + + f.mu.Lock() + defer f.mu.Unlock() + + // Clean up expired entries before executing command + f.cleanupExpired() + + keys := make([]string, 0, len(entries)) + for _, entry := range entries { + if entry == nil { + continue + } + cacheEntry := cacheEntry{ + // Make a copy of the data to prevent external modifications + data: make([]byte, len(entry.Value)), + } + copy(cacheEntry.data, entry.Value) + + // If ttl is 0, store without expiration + if ttl > 0 { + expiresAt := time.Now().Add(ttl) + cacheEntry.expiresAt = &expiresAt + } + + f.storage[entry.Key] = cacheEntry + keys = append(keys, entry.Key) + } + + // Log the operation + caller := "" + if cfi := resolve.GetCacheFetchInfo(ctx); cfi != nil { + caller = cfi.String() + } + f.log = append(f.log, CacheLogEntry{ + Operation: "set", + Keys: keys, + Hits: nil, // Set operations don't have hits/misses + Caller: caller, + }) + + return nil +} + +func (f *FakeLoaderCache) Delete(ctx context.Context, keys []string) error { + f.mu.Lock() + defer f.mu.Unlock() + + // Clean up expired entries before executing command + f.cleanupExpired() + + for _, key := range keys { + delete(f.storage, key) + } + + // Log the operation + caller := "" + if cfi := resolve.GetCacheFetchInfo(ctx); cfi != nil { + caller = cfi.String() + } + f.log = append(f.log, CacheLogEntry{ + Operation: "delete", + Keys: keys, + Hits: nil, // Delete operations don't have hits/misses + Caller: caller, + }) + + return nil +} + +// GetLog returns a copy of the cache operation log +func (f *FakeLoaderCache) GetLog() []CacheLogEntry { + f.mu.RLock() + defer f.mu.RUnlock() + logCopy := make([]CacheLogEntry, len(f.log)) + copy(logCopy, f.log) + return logCopy +} + +// GetLogWithCaller returns a copy of the cache operation log with Caller populated. +// Use this with sortCacheLogKeysWithCaller to assert on both operation details and +// the Loader method chain that triggered each cache event. +func (f *FakeLoaderCache) GetLogWithCaller() []CacheLogEntry { + f.mu.RLock() + defer f.mu.RUnlock() + logCopy := make([]CacheLogEntry, len(f.log)) + copy(logCopy, f.log) + return logCopy +} + +// ClearLog clears the cache operation log +func (f *FakeLoaderCache) ClearLog() { + f.mu.Lock() + defer f.mu.Unlock() + f.log = make([]CacheLogEntry, 0) +} + +// TestFakeLoaderCache tests the cache implementation itself +func TestFakeLoaderCache(t *testing.T) { + ctx := context.Background() + cache := NewFakeLoaderCache() + + t.Run("SetAndGet", func(t *testing.T) { + // Test basic set and get + keys := []string{"key1", "key2", "key3"} + entries := []*resolve.CacheEntry{ + {Key: "key1", Value: []byte("value1")}, + {Key: "key2", Value: []byte("value2")}, + {Key: "key3", Value: []byte("value3")}, + } + + err := cache.Set(ctx, entries, 0) // No TTL + require.NoError(t, err) + + // Get all keys + result, err := cache.Get(ctx, keys) + require.NoError(t, err) + require.Len(t, result, 3) + assert.NotNil(t, result[0]) + assert.Equal(t, "value1", string(result[0].Value)) + assert.NotNil(t, result[1]) + assert.Equal(t, "value2", string(result[1].Value)) + assert.NotNil(t, result[2]) + assert.Equal(t, "value3", string(result[2].Value)) + + // Get partial keys + result, err = cache.Get(ctx, []string{"key2", "key4", "key1"}) + require.NoError(t, err) + require.Len(t, result, 3) + assert.NotNil(t, result[0]) + assert.Equal(t, "value2", string(result[0].Value)) + assert.Nil(t, result[1]) // key4 doesn't exist + assert.NotNil(t, result[2]) + assert.Equal(t, "value1", string(result[2].Value)) + }) + + t.Run("Delete", func(t *testing.T) { + // Set some keys + entries := []*resolve.CacheEntry{ + {Key: "del1", Value: []byte("v1")}, + {Key: "del2", Value: []byte("v2")}, + {Key: "del3", Value: []byte("v3")}, + } + err := cache.Set(ctx, entries, 0) + require.NoError(t, err) + + // Delete some keys + err = cache.Delete(ctx, []string{"del1", "del3"}) + require.NoError(t, err) + + // Check remaining keys + result, err := cache.Get(ctx, []string{"del1", "del2", "del3"}) + require.NoError(t, err) + assert.Nil(t, result[0]) // del1 was deleted + assert.NotNil(t, result[1]) // del2 still exists + assert.Equal(t, "v2", string(result[1].Value)) + assert.Nil(t, result[2]) // del3 was deleted + }) + + t.Run("TTL", func(t *testing.T) { + // Set with 50ms TTL + entries := []*resolve.CacheEntry{ + {Key: "ttl1", Value: []byte("expire1")}, + {Key: "ttl2", Value: []byte("expire2")}, + } + err := cache.Set(ctx, entries, 50*time.Millisecond) + require.NoError(t, err) + + // Immediately get - should exist + result, err := cache.Get(ctx, []string{"ttl1", "ttl2"}) + require.NoError(t, err) + assert.NotNil(t, result[0]) + assert.Equal(t, "expire1", string(result[0].Value)) + assert.NotNil(t, result[1]) + assert.Equal(t, "expire2", string(result[1].Value)) + + // Wait for expiration + time.Sleep(60 * time.Millisecond) + + // Get again - should be nil + result, err = cache.Get(ctx, []string{"ttl1", "ttl2"}) + require.NoError(t, err) + assert.Nil(t, result[0]) + assert.Nil(t, result[1]) + }) + + t.Run("MixedTTL", func(t *testing.T) { + // Set some with TTL, some without + err := cache.Set(ctx, []*resolve.CacheEntry{{Key: "perm1", Value: []byte("permanent")}}, 0) + require.NoError(t, err) + + err = cache.Set(ctx, []*resolve.CacheEntry{{Key: "temp1", Value: []byte("temporary")}}, 50*time.Millisecond) + require.NoError(t, err) + + // Wait for temporary to expire + time.Sleep(60 * time.Millisecond) + + // Check both + result, err := cache.Get(ctx, []string{"perm1", "temp1"}) + require.NoError(t, err) + assert.NotNil(t, result[0]) + assert.Equal(t, "permanent", string(result[0].Value)) // Still exists + assert.Nil(t, result[1]) // Expired + }) + + t.Run("ThreadSafety", func(t *testing.T) { + // Test concurrent access + done := make(chan bool) + + // Writer goroutine + go func() { + for i := 0; i < 100; i++ { + key := fmt.Sprintf("concurrent_%d", i) + value := fmt.Sprintf("value_%d", i) + err := cache.Set(ctx, []*resolve.CacheEntry{{Key: key, Value: []byte(value)}}, 0) + assert.NoError(t, err) + } + done <- true + }() + + // Reader goroutine + go func() { + for i := 0; i < 100; i++ { + key := fmt.Sprintf("concurrent_%d", i%50) + _, err := cache.Get(ctx, []string{key}) + assert.NoError(t, err) + } + done <- true + }() + + // Deleter goroutine + go func() { + for i := 0; i < 50; i++ { + key := fmt.Sprintf("concurrent_%d", i*2) + err := cache.Delete(ctx, []string{key}) + assert.NoError(t, err) + } + done <- true + }() + + // Wait for all goroutines + <-done + <-done + <-done + }) + + t.Run("ResultLengthMatchesKeysLength", func(t *testing.T) { + // Test that result length always matches input keys length + + // Set some data + err := cache.Set(ctx, []*resolve.CacheEntry{ + {Key: "exist1", Value: []byte("data1")}, + {Key: "exist3", Value: []byte("data3")}, + }, 0) + require.NoError(t, err) + + // Request mix of existing and non-existing keys + keys := []string{"exist1", "missing1", "exist3", "missing2", "missing3"} + result, err := cache.Get(ctx, keys) + require.NoError(t, err) + + // Verify length matches exactly + assert.Len(t, result, len(keys), "Result length must match keys length") + assert.Len(t, result, 5, "Should return exactly 5 results") + + // Verify correct values + assert.NotNil(t, result[0]) + assert.Equal(t, "data1", string(result[0].Value)) // exist1 + assert.Nil(t, result[1]) // missing1 + assert.NotNil(t, result[2]) + assert.Equal(t, "data3", string(result[2].Value)) // exist3 + assert.Nil(t, result[3]) // missing2 + assert.Nil(t, result[4]) // missing3 + + // Test with all missing keys + allMissingKeys := []string{"missing4", "missing5", "missing6"} + result, err = cache.Get(ctx, allMissingKeys) + require.NoError(t, err) + assert.Len(t, result, 3, "Should return 3 results for 3 keys") + assert.Nil(t, result[0]) + assert.Nil(t, result[1]) + assert.Nil(t, result[2]) + + // Test with empty keys + result, err = cache.Get(ctx, []string{}) + require.NoError(t, err) + assert.Len(t, result, 0, "Should return empty slice for empty keys") + }) +} + +// ============================================================================= +// L1/L2 CACHE END-TO-END TESTS +// ============================================================================= +// +// These tests verify the L1 (per-request in-memory) and L2 (external cross-request) +// caching behavior in a federated GraphQL setup. +// +// L1 Cache: Prevents redundant fetches for the same entity within a single request +// L2 Cache: Shares entity data across requests via external cache (e.g., Redis) +// +// Lookup Order (entity fetches): L1 -> L2 -> Subgraph Fetch +// Lookup Order (root fetches): L2 -> Subgraph Fetch (no L1) + +func parseCacheAnalytics(t *testing.T, headers http.Header) resolve.CacheAnalyticsSnapshot { + t.Helper() + raw := headers.Get("X-Cache-Analytics") + require.NotEmpty(t, raw, "X-Cache-Analytics header should be present") + var snap resolve.CacheAnalyticsSnapshot + err := json.Unmarshal([]byte(raw), &snap) + require.NoError(t, err, "X-Cache-Analytics header should be valid JSON") + return snap +} + +// normalizeSnapshot makes a CacheAnalyticsSnapshot deterministically comparable by +// sorting EntityTypes, L1Reads, L2Reads, L1Writes, L2Writes, and FieldHashes. +func normalizeSnapshot(snap resolve.CacheAnalyticsSnapshot) resolve.CacheAnalyticsSnapshot { + // Sort EntityTypes by TypeName + if snap.EntityTypes != nil { + sorted := make([]resolve.EntityTypeInfo, len(snap.EntityTypes)) + copy(sorted, snap.EntityTypes) + sort.Slice(sorted, func(i, j int) bool { + return sorted[i].TypeName < sorted[j].TypeName + }) + snap.EntityTypes = sorted + } + + // Sort L1Reads and zero out non-deterministic CacheAgeMs + if snap.L1Reads != nil { + sorted := make([]resolve.CacheKeyEvent, len(snap.L1Reads)) + copy(sorted, snap.L1Reads) + for i := range sorted { + sorted[i].CacheAgeMs = 0 + } + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].CacheKey != sorted[j].CacheKey { + return sorted[i].CacheKey < sorted[j].CacheKey + } + return sorted[i].Kind < sorted[j].Kind + }) + snap.L1Reads = sorted + } + + // Sort L2Reads and zero out non-deterministic CacheAgeMs + if snap.L2Reads != nil { + sorted := make([]resolve.CacheKeyEvent, len(snap.L2Reads)) + copy(sorted, snap.L2Reads) + for i := range sorted { + sorted[i].CacheAgeMs = 0 + } + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].CacheKey != sorted[j].CacheKey { + return sorted[i].CacheKey < sorted[j].CacheKey + } + return sorted[i].Kind < sorted[j].Kind + }) + snap.L2Reads = sorted + } + + // Sort L1Writes + if snap.L1Writes != nil { + sorted := make([]resolve.CacheWriteEvent, len(snap.L1Writes)) + copy(sorted, snap.L1Writes) + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].CacheKey != sorted[j].CacheKey { + return sorted[i].CacheKey < sorted[j].CacheKey + } + return sorted[i].CacheLevel < sorted[j].CacheLevel + }) + snap.L1Writes = sorted + } + + // Sort L2Writes + if snap.L2Writes != nil { + sorted := make([]resolve.CacheWriteEvent, len(snap.L2Writes)) + copy(sorted, snap.L2Writes) + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].CacheKey != sorted[j].CacheKey { + return sorted[i].CacheKey < sorted[j].CacheKey + } + return sorted[i].CacheLevel < sorted[j].CacheLevel + }) + snap.L2Writes = sorted + } + + // Sort FieldHashes for deterministic comparison + if snap.FieldHashes != nil { + sorted := make([]resolve.EntityFieldHash, len(snap.FieldHashes)) + copy(sorted, snap.FieldHashes) + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].EntityType != sorted[j].EntityType { + return sorted[i].EntityType < sorted[j].EntityType + } + if sorted[i].FieldName != sorted[j].FieldName { + return sorted[i].FieldName < sorted[j].FieldName + } + if sorted[i].KeyRaw != sorted[j].KeyRaw { + return sorted[i].KeyRaw < sorted[j].KeyRaw + } + if sorted[i].KeyHash != sorted[j].KeyHash { + return sorted[i].KeyHash < sorted[j].KeyHash + } + return sorted[i].FieldHash < sorted[j].FieldHash + }) + snap.FieldHashes = sorted + } + + // Sort ShadowComparisons by CacheKey and zero out non-deterministic CacheAgeMs + if snap.ShadowComparisons != nil { + sorted := make([]resolve.ShadowComparisonEvent, len(snap.ShadowComparisons)) + copy(sorted, snap.ShadowComparisons) + for i := range sorted { + sorted[i].CacheAgeMs = 0 + } + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].CacheKey != sorted[j].CacheKey { + return sorted[i].CacheKey < sorted[j].CacheKey + } + return sorted[i].EntityType < sorted[j].EntityType + }) + snap.ShadowComparisons = sorted + } + + // Sort MutationEvents for deterministic comparison + if snap.MutationEvents != nil { + sorted := make([]resolve.MutationEvent, len(snap.MutationEvents)) + copy(sorted, snap.MutationEvents) + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].MutationRootField != sorted[j].MutationRootField { + return sorted[i].MutationRootField < sorted[j].MutationRootField + } + return sorted[i].EntityCacheKey < sorted[j].EntityCacheKey + }) + snap.MutationEvents = sorted + } + + // Zero out non-deterministic FetchTimings (DurationMs varies between runs) + // Use normalizeFetchTimings() when you need to assert FetchTimings fields. + snap.FetchTimings = nil + + // Normalize empty slices to nil for consistent comparison + // (JSON unmarshalling produces empty slices, expected literals produce nil) + if len(snap.L1Reads) == 0 { + snap.L1Reads = nil + } + if len(snap.L2Reads) == 0 { + snap.L2Reads = nil + } + if len(snap.L1Writes) == 0 { + snap.L1Writes = nil + } + if len(snap.L2Writes) == 0 { + snap.L2Writes = nil + } + if len(snap.EntityTypes) == 0 { + snap.EntityTypes = nil + } + if len(snap.FieldHashes) == 0 { + snap.FieldHashes = nil + } + if len(snap.ErrorEvents) == 0 { + snap.ErrorEvents = nil + } + if len(snap.ShadowComparisons) == 0 { + snap.ShadowComparisons = nil + } + if len(snap.MutationEvents) == 0 { + snap.MutationEvents = nil + } + + return snap +} + +// normalizeFetchTimings sorts FetchTimings deterministically and zeros DurationMs +// (the only non-deterministic field). Unlike normalizeSnapshot, this preserves +// all other fields (HTTPStatusCode, ResponseBytes, etc.) for assertion. +func normalizeFetchTimings(timings []resolve.FetchTimingEvent) []resolve.FetchTimingEvent { + sorted := make([]resolve.FetchTimingEvent, len(timings)) + copy(sorted, timings) + for i := range sorted { + sorted[i].DurationMs = 0 + } + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].DataSource != sorted[j].DataSource { + return sorted[i].DataSource < sorted[j].DataSource + } + return sorted[i].Source < sorted[j].Source + }) + return sorted +} + +func mustParseHost(rawURL string) string { + parsed, err := url.Parse(rawURL) + if err != nil { + panic(fmt.Sprintf("failed to parse URL %q: %v", rawURL, err)) + } + return parsed.Host +} diff --git a/execution/engine/federation_caching_l1_test.go b/execution/engine/federation_caching_l1_test.go new file mode 100644 index 0000000000..fc8aac2b11 --- /dev/null +++ b/execution/engine/federation_caching_l1_test.go @@ -0,0 +1,1061 @@ +package engine_test + +import ( + "context" + "net/http" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +func TestL1CacheReducesHTTPCalls(t *testing.T) { + // This test demonstrates L1 cache behavior with entity fetches. + // + // Query structure: + // - me: root query to accounts service → returns User 1234 {id, username} + // - me.reviews: entity fetch from reviews service → returns reviews + // - me.reviews.product: entity fetch from products service → returns products + // - me.reviews.product.reviews: entity fetch from reviews service → returns reviews + // - me.reviews.product.reviews.authorWithoutProvides: entity fetch from accounts → returns User 1234 + // + // Note: The `me` root query does NOT populate L1 cache because L1 cache only works + // for entity fetches (RequiresEntityFetch=true). Root queries don't qualify. + // + // With L1 enabled: Both `me` (root) and `authorWithoutProvides` (entity) make calls. + // L1 cache doesn't help here because `me` is a root query, not an entity fetch. + // With L1 disabled: Same behavior - 2 accounts calls. + // + // L1 cache DOES help when the same entity is fetched multiple times through + // entity fetches within a single request (e.g., self-referential entities). + + query := `query { + me { + id + username + reviews { + body + product { + upc + reviews { + authorWithoutProvides { + id + username + } + } + } + } + } + }` + + expectedResponse := `{"data":{"me":{"id":"1234","username":"Me","reviews":[{"body":"A highly effective form of birth control.","product":{"upc":"top-1","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","product":{"upc":"top-2","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}}]}}}` + + t.Run("L1 enabled - entity fetches use L1 cache", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Both `me` (root query) and `authorWithoutProvides` (entity fetch) call accounts. + // L1 cache doesn't help because `me` is a root query, not an entity fetch. + // Root queries don't populate L1 cache (RequiresEntityFetch=false). + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, + "Both me (root query) and authorWithoutProvides (entity fetch) call accounts") + }) + + t.Run("L1 disabled - more accounts calls without cache", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // KEY ASSERTION: With L1 disabled, 2 accounts calls! + // The authorWithoutProvides.username requires another fetch since L1 is disabled. + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 2, accountsCalls, + "With L1 disabled, should make 2 accounts calls (no cache reuse)") + }) +} + +func TestL1CacheReducesHTTPCallsInterface(t *testing.T) { + // This test demonstrates L1 cache behavior with interface return types. + // + // Query structure: + // - meInterface: root query to accounts service → returns User 1234 via Identifiable interface + // - meInterface.reviews: entity fetch from reviews service → returns reviews + // - meInterface.reviews.product: entity fetch from products service → returns products + // - meInterface.reviews.product.reviews: entity fetch from reviews service → returns reviews + // - meInterface.reviews.product.reviews.authorWithoutProvides: entity fetch from accounts → returns User 1234 + // + // This tests that interface return types properly build cache key templates + // for all entity types that implement the interface. + + query := `query { + meInterface { + ... on User { + id + username + reviews { + body + product { + upc + reviews { + authorWithoutProvides { + id + username + } + } + } + } + } + } + }` + + expectedResponse := `{"data":{"meInterface":{"id":"1234","username":"Me","reviews":[{"body":"A highly effective form of birth control.","product":{"upc":"top-1","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","product":{"upc":"top-2","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}}]}}}` + + t.Run("L1 enabled - interface entity fetches use L1 cache", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Same behavior as non-interface: root query + entity fetch both call accounts + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, + "Interface field should behave same as object field for L1 caching") + }) + + t.Run("L1 disabled - more accounts calls without cache", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // KEY ASSERTION: With L1 disabled, 2 accounts calls! + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 2, accountsCalls, + "With L1 disabled, should make 2 accounts calls (no cache reuse)") + }) +} + +func TestL1CacheReducesHTTPCallsUnion(t *testing.T) { + // This test demonstrates L1 cache behavior with union return types. + // + // Query structure: + // - meUnion: root query to accounts service → returns User 1234 via MeUnion union + // - meUnion.reviews: entity fetch from reviews service → returns reviews + // - meUnion.reviews.product: entity fetch from products service → returns products + // - meUnion.reviews.product.reviews: entity fetch from reviews service → returns reviews + // - meUnion.reviews.product.reviews.authorWithoutProvides: entity fetch from accounts → returns User 1234 + // + // This tests that union return types properly build cache key templates + // for all entity types that are members of the union. + + query := `query { + meUnion { + ... on User { + id + username + reviews { + body + product { + upc + reviews { + authorWithoutProvides { + id + username + } + } + } + } + } + } + }` + + expectedResponse := `{"data":{"meUnion":{"id":"1234","username":"Me","reviews":[{"body":"A highly effective form of birth control.","product":{"upc":"top-1","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","product":{"upc":"top-2","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}}]}}}` + + t.Run("L1 enabled - union entity fetches use L1 cache", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Same behavior as non-union: root query + entity fetch both call accounts + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, + "Union field should behave same as object field for L1 caching") + }) + + t.Run("L1 disabled - more accounts calls without cache", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // KEY ASSERTION: With L1 disabled, 2 accounts calls! + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 2, accountsCalls, + "With L1 disabled, should make 2 accounts calls (no cache reuse)") + }) +} + +func TestL1CacheSelfReferentialEntity(t *testing.T) { + // This test verifies that self-referential entities don't cause + // stack overflow when L1 cache is enabled. + // + // Background: When an entity type has a field that returns the same type + // (e.g., User.sameUserReviewers returning [User]), and L1 cache stores + // a pointer to the entity, both key.Item and key.FromCache can point to + // the same memory location. Without a fix, calling MergeValues(ptr, ptr) + // causes infinite recursion and stack overflow. + // + // The sameUserReviewers field has @requires(fields: "username") which forces + // sequential execution: the User entity is first fetched from accounts + // (populating L1), then sameUserReviewers is resolved, returning the same + // User entity that's already in L1 cache. + + query := `query { + topProducts { + reviews { + authorWithoutProvides { + id + username + sameUserReviewers { + id + username + } + } + } + } + }` + + // This response shows User 1234 appearing both at authorWithoutProvides level + // and inside sameUserReviewers (which returns the same user for testing) + expectedResponse := `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}` + + t.Run("self-referential entity should not cause stack overflow", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // This should complete without stack overflow + // Before the fix, this would crash with "fatal error: stack overflow" + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + }) +} + +func TestL1CacheChildFieldEntityList(t *testing.T) { + // This test verifies L1 cache behavior for User.sameUserReviewers: [User!]! + // which returns only the same user (self-reference). + // + // sameUserReviewers is defined in the reviews subgraph with @requires(fields: "username"), + // which means: + // 1. The gateway first resolves username from accounts (entity fetch) + // 2. Then calls reviews to get sameUserReviewers + // 3. sameUserReviewers returns User references (just IDs) - only the same user + // 4. The gateway must make entity fetches to accounts to resolve those users + // + // Query flow: + // 1. topProducts -> products subgraph (root query) + // 2. reviews -> reviews subgraph (entity fetch for Products) + // 3. authorWithoutProvides -> accounts subgraph (entity fetch for User 1234) + // - User 1234 is fetched and stored in L1 + // 4. sameUserReviewers -> reviews subgraph (after username resolved) + // - Returns [User 1234] as reference (same user only) + // 5. Entity resolution for sameUserReviewers -> accounts subgraph + // - User 1234 is 100% L1 HIT (already fetched in step 3) + // - THE ENTIRE ACCOUNTS CALL IS SKIPPED! + // + // With L1 enabled: The sameUserReviewers entity fetch is completely skipped + // because all entities are already in L1 cache. + + query := `query { + topProducts { + reviews { + authorWithoutProvides { + id + username + sameUserReviewers { + id + username + } + } + } + } + }` + + // User 1234's sameUserReviewers returns [User 1234] (only self) + expectedResponse := `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}` + + t.Run("L1 enabled - sameUserReviewers fetch entirely skipped via L1 cache", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, // Isolate L1 behavior + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // With L1 enabled: + // - First accounts call fetches User 1234 for authorWithoutProvides (L1 miss, stored) + // - Reviews called for sameUserReviewers (returns [User 1234] reference) + // - sameUserReviewers entity resolution: User 1234 is 100% L1 HIT + // → accounts call is COMPLETELY SKIPPED! + accountsCalls := tracker.GetCount(accountsHost) + reviewsCalls := tracker.GetCount(reviewsHost) + + // Reviews should be called twice: once for Product entity (reviews field), + // once for sameUserReviewers (after username is resolved from accounts) + assert.Equal(t, 2, reviewsCalls, "Reviews subgraph called for Product.reviews and User.sameUserReviewers") + + // KEY ASSERTION: Only 1 accounts call! The sameUserReviewers entity resolution + // is completely skipped because User 1234 is already in L1 cache. + assert.Equal(t, 1, accountsCalls, + "With L1 enabled: only 1 accounts call (sameUserReviewers entity fetch skipped via L1)") + + }) + + t.Run("L1 disabled - accounts called for sameUserReviewers", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // With L1 disabled: + // - First accounts call fetches User 1234 for authorWithoutProvides + // - Second accounts call for sameUserReviewers: User 1234 fetched again (no L1) + // Total: 2 accounts calls + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 2, accountsCalls, + "With L1 disabled: 2 accounts calls (sameUserReviewers requires separate fetch)") + + }) +} + +func TestL1CacheNestedEntityListDeduplication(t *testing.T) { + // This test verifies L1 deduplication when the same entity appears + // at multiple levels in nested list queries using coReviewers. + // + // coReviewers is defined in the reviews subgraph with @requires(fields: "username"), + // so it triggers cross-subgraph entity resolution. + // + // Query flow: + // 1. topProducts -> products subgraph + // 2. reviews -> reviews subgraph (Product entity fetch) + // 3. authorWithoutProvides -> accounts (User 1234 fetched, stored in L1) + // 4. coReviewers -> reviews subgraph (after username resolved) + // - Returns [User 1234, User 7777] as references + // 5. Entity resolution for coReviewers -> accounts + // - User 1234 should be L1 HIT (already fetched in step 3) + // - User 7777 is L1 MISS (stored in L1) + // 6. coReviewers for User 1234 and User 7777 -> reviews subgraph + // 7. Entity resolution for nested coReviewers -> accounts + // - All users (1234, 7777) are already in L1! + // + // With L1 enabled: The nested coReviewers level should have 100% L1 hits, + // potentially skipping the accounts call entirely for that level. + + query := `query { + topProducts { + reviews { + authorWithoutProvides { + id + username + coReviewers { + id + username + coReviewers { + id + username + } + } + } + } + } + }` + + // User 1234's coReviewers: [User 1234, User 7777] + // User 7777's coReviewers: [User 7777, User 1234] + // Nested level repeats these patterns + expectedResponse := `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me"},{"id":"7777","username":"User 7777"}]},{"id":"7777","username":"User 7777","coReviewers":[{"id":"7777","username":"User 7777"},{"id":"1234","username":"Me"}]}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me"},{"id":"7777","username":"User 7777"}]},{"id":"7777","username":"User 7777","coReviewers":[{"id":"7777","username":"User 7777"},{"id":"1234","username":"Me"}]}]}}]}]}}` + + t.Run("L1 enabled - nested coReviewers benefits from L1 hits", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // With L1 enabled: + // - Call 1: authorWithoutProvides fetches User 1234 (miss, stored) + // - Call 2: coReviewers entity resolution [User 1234 (hit), User 7777 (miss, stored)] + // - Call 3: nested coReviewers entity resolution - all users are in L1! + // This call should be fully served from L1 cache. + accountsCalls := tracker.GetCount(accountsHost) + // With L1 enabled, the nested coReviewers should be served from L1 + // Only 2 accounts calls needed because nested coReviewers is fully served from L1 + assert.Equal(t, 2, accountsCalls, + "With L1 enabled: exactly 2 accounts calls (nested coReviewers served entirely from L1)") + }) + + t.Run("L1 disabled - more accounts calls without deduplication", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // With L1 disabled: + // - Call 1: authorWithoutProvides fetches User 1234 + // - Call 2: coReviewers entity resolution for User 1234 and User 7777 (no L1 dedup) + // - Call 3: nested coReviewers entity resolution (no L1 dedup) + accountsCalls := tracker.GetCount(accountsHost) + // Without L1 cache, we need 3 accounts calls (no deduplication at nested level) + assert.Equal(t, 3, accountsCalls, + "With L1 disabled: exactly 3 accounts calls (no deduplication)") + }) +} + +func TestL1CacheRootFieldEntityListPopulation(t *testing.T) { + // This test verifies L1 cache behavior with a complex nested query starting + // from a root field that returns a list of entities. + // + // Query flow: + // 1. topProducts -> products subgraph (root query, returns list) + // 2. reviews -> reviews subgraph (entity fetch for Products) + // 3. authorWithoutProvides -> accounts subgraph (entity fetch for User 1234) + // - User 1234 is fetched and stored in L1 + // 4. sameUserReviewers -> reviews subgraph (after username resolved) + // - Returns [User 1234] as reference (same user only) + // 5. Entity resolution for sameUserReviewers -> accounts subgraph + // - User 1234 is 100% L1 HIT (already fetched in step 3) + // - THE ENTIRE ACCOUNTS CALL IS SKIPPED! + // + // With L1 enabled: The sameUserReviewers entity fetch is completely skipped. + // With L1 disabled: accounts is called twice (no deduplication). + + query := `query { + topProducts { + upc + name + reviews { + body + authorWithoutProvides { + id + username + sameUserReviewers { + id + username + } + } + } + } + }` + + expectedResponse := `{"data":{"topProducts":[{"upc":"top-1","name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"upc":"top-2","name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}` + + t.Run("L1 enabled - sameUserReviewers fetch skipped via L1 cache", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Query flow with L1 enabled: + // 1. products subgraph: topProducts root query + // 2. reviews subgraph: Product entity fetch for reviews + // 3. accounts subgraph: User entity fetch for authorWithoutProvides (User 1234 stored in L1) + // 4. reviews subgraph: sameUserReviewers (returns [User 1234]) + // 5. sameUserReviewers entity resolution: User 1234 is 100% L1 HIT → accounts call SKIPPED! + productsCalls := tracker.GetCount(productsHost) + reviewsCalls := tracker.GetCount(reviewsHost) + accountsCalls := tracker.GetCount(accountsHost) + + assert.Equal(t, 1, productsCalls, "Should call products subgraph once for topProducts") + assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice (Product.reviews + User.sameUserReviewers)") + // KEY ASSERTION: Only 1 accounts call! sameUserReviewers entity resolution skipped via L1. + assert.Equal(t, 1, accountsCalls, + "With L1 enabled: only 1 accounts call (sameUserReviewers entity fetch skipped via L1)") + + }) + + t.Run("L1 disabled - more accounts calls without L1 optimization", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Query flow with L1 disabled: + // 1. products subgraph: topProducts root query + // 2. reviews subgraph: Product entity fetch for reviews + // 3. accounts subgraph: User entity fetch for authorWithoutProvides + // 4. reviews subgraph: sameUserReviewers + // 5. accounts subgraph: User entity fetch for sameUserReviewers (no L1 → must fetch again!) + productsCalls := tracker.GetCount(productsHost) + reviewsCalls := tracker.GetCount(reviewsHost) + accountsCalls := tracker.GetCount(accountsHost) + + assert.Equal(t, 1, productsCalls, "Should call products subgraph once") + assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice") + // KEY ASSERTION: 2 accounts calls without L1 optimization + assert.Equal(t, 2, accountsCalls, + "With L1 disabled: 2 accounts calls (sameUserReviewers requires separate fetch)") + + }) +} + +func TestL1CacheRootFieldNonEntityWithNestedEntities(t *testing.T) { + // This test verifies L1 cache behavior when a root field returns a NON-entity type + // (Review) that contains nested entities (User via authorWithoutProvides). + // + // Key difference from TestL1CacheRootFieldEntityListPopulation: + // - That test starts with topProducts -> [Product] where Product IS an entity (@key(fields: "upc")) + // - This test starts with topReviews -> [Review] where Review is NOT an entity (no @key) + // - Both prove L1 entity caching works for nested User entities + // + // Query flow: + // 1. topReviews -> reviews subgraph (root query, returns [Review] — NOT an entity) + // 2. authorWithoutProvides -> accounts subgraph (entity fetch for Users, stored in L1) + // 3. sameUserReviewers -> reviews subgraph (after username resolved via @requires) + // 4. Entity resolution for sameUserReviewers -> accounts subgraph + // - All Users are 100% L1 HITs (already fetched in step 2) + // - THE ENTIRE ACCOUNTS CALL IS SKIPPED! + + query := `query { + topReviews { + body + authorWithoutProvides { + id + username + sameUserReviewers { + id + username + } + } + } + }` + + expectedResponse := `{"data":{"topReviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}},{"body":"This is the last straw. Hat you will wear. 11/10","authorWithoutProvides":{"id":"7777","username":"User 7777","sameUserReviewers":[{"id":"7777","username":"User 7777"}]}},{"body":"Perfect summer hat.","authorWithoutProvides":{"id":"5678","username":"User 5678","sameUserReviewers":[{"id":"5678","username":"User 5678"}]}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"id":"8888","username":"User 8888","sameUserReviewers":[{"id":"8888","username":"User 8888"}]}}]}}` + + t.Run("L1 enabled - sameUserReviewers fetch skipped via L1 cache", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsHost := reviewsURLParsed.Host + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Query flow with L1 enabled: + // 1. reviews subgraph: topReviews root query (Review is NOT an entity) + // 2. accounts subgraph: User entity fetch for authorWithoutProvides (Users stored in L1) + // 3. reviews subgraph: sameUserReviewers (returns [User] references) + // 4. sameUserReviewers entity resolution: all Users are L1 HITs → accounts call SKIPPED! + reviewsCalls := tracker.GetCount(reviewsHost) + accountsCalls := tracker.GetCount(accountsHost) + + assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice (topReviews + sameUserReviewers)") + // KEY ASSERTION: Only 1 accounts call! sameUserReviewers entity resolution skipped via L1. + assert.Equal(t, 1, accountsCalls, + "With L1 enabled: only 1 accounts call (sameUserReviewers entity fetch skipped via L1)") + }) + + t.Run("L1 disabled - more accounts calls without L1 optimization", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsHost := reviewsURLParsed.Host + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Query flow with L1 disabled: + // 1. reviews subgraph: topReviews root query + // 2. accounts subgraph: User entity fetch for authorWithoutProvides + // 3. reviews subgraph: sameUserReviewers + // 4. accounts subgraph: User entity fetch for sameUserReviewers (no L1 → must fetch again!) + reviewsCalls := tracker.GetCount(reviewsHost) + accountsCalls := tracker.GetCount(accountsHost) + + assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice") + // KEY ASSERTION: 2 accounts calls without L1 optimization + assert.Equal(t, 2, accountsCalls, + "With L1 disabled: 2 accounts calls (sameUserReviewers requires separate fetch)") + }) +} + +// ============================================================================= +// CACHE ERROR HANDLING TESTS +// ============================================================================= +// +// These tests verify that caches are NOT populated when subgraphs return errors. +// The cache should only store successful responses to prevent caching error states. + +func TestL1CacheOptimizationReducesSubgraphCalls(t *testing.T) { + // This query demonstrates L1 optimization: + // - Query.me returns User entity + // - User.sameUserReviewers returns [User] entities + // When L1 is enabled and optimized correctly: + // - First User fetch (me) populates L1 cache + // - Second User fetch (sameUserReviewers) hits L1 cache, SKIPS subgraph call + // + // The optimizeL1Cache postprocessor: + // - Sets UseL1Cache=true on User fetches (they share the same entity type) + // - Sets UseL1Cache=false on fetches with no matching entity types + + query := `query { + me { + id + username + sameUserReviewers { + id + username + } + } + }` + + expectedResponse := `{"data":{"me":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}}` + + t.Run("L1 optimization enables cache hit between same entity type fetches", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Query flow with L1 optimization: + // 1. accounts subgraph: Query.me (root query, returns User 1234) + // - L1 cache populated with User 1234 + // 2. reviews subgraph: User.sameUserReviewers (returns [User 1234]) + // 3. accounts subgraph: User entity fetch for sameUserReviewers + // - User 1234 is 100% L1 HIT! This call is SKIPPED! + accountsCalls := tracker.GetCount(accountsHost) + reviewsCalls := tracker.GetCount(reviewsHost) + + // KEY ASSERTION: Only 1 accounts call! + // Without L1 optimization, there would be 2 calls: + // - First: Query.me + // - Second: User entity resolution for sameUserReviewers + // With L1 optimization, the second call is skipped because User 1234 is in L1 cache. + assert.Equal(t, 1, accountsCalls, + "L1 optimization: only 1 accounts call (sameUserReviewers resolved from L1 cache)") + assert.Equal(t, 1, reviewsCalls, + "Should call reviews subgraph once for User.sameUserReviewers") + }) + + t.Run("Without L1, same query requires more subgraph calls", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, // L1 disabled + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Query flow WITHOUT L1: + // 1. accounts subgraph: Query.me (root query) + // 2. reviews subgraph: User.sameUserReviewers + // 3. accounts subgraph: User entity fetch (NO L1 cache → must fetch!) + accountsCalls := tracker.GetCount(accountsHost) + reviewsCalls := tracker.GetCount(reviewsHost) + + // KEY ASSERTION: 2 accounts calls without L1! + // This proves L1 optimization saves a subgraph call. + assert.Equal(t, 2, accountsCalls, + "Without L1: 2 accounts calls (sameUserReviewers requires separate fetch)") + assert.Equal(t, 1, reviewsCalls, + "Should call reviews subgraph once for User.sameUserReviewers") + }) +} + diff --git a/execution/engine/federation_caching_l2_test.go b/execution/engine/federation_caching_l2_test.go new file mode 100644 index 0000000000..25a29787bf --- /dev/null +++ b/execution/engine/federation_caching_l2_test.go @@ -0,0 +1,1124 @@ +package engine_test + +import ( + "context" + "net/http" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +func TestL2CacheOnly(t *testing.T) { + t.Run("L2 enabled - miss then hit across requests", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable L2 cache only + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + } + + // Enable entity caching for L2 tests (opt-in per-subgraph caching) + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + // First query - should miss cache + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + // Cache operations: get/set for Query.topProducts, Product entities, User entities = 6 operations + assert.Equal(t, 6, len(logAfterFirst), "Should have exactly 6 cache operations (get/set for Query, Products, Users)") + + // Verify the exact cache access log (order may vary for keys within each operation) + wantLogFirst := []CacheLogEntry{ + // Root field Query.topProducts + { + Operation: "get", + Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, + Hits: []bool{false}, + }, + { + Operation: "set", + Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, + }, + // Product entity fetches (reviews data for each product) + { + Operation: "get", + Keys: []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + }, + Hits: []bool{false, false}, + }, + { + Operation: "set", + Keys: []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + }, + }, + // User entity fetches (author data) + { + Operation: "get", + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + }, + Hits: []bool{false}, + }, + { + Operation: "set", + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + }, + }, + } + assert.Equal(t, sortCacheLogKeys(wantLogFirst), sortCacheLogKeys(logAfterFirst), "First query cache log should match expected") + + // Verify subgraph calls for first query + productsCallsFirst := tracker.GetCount(productsHost) + reviewsCallsFirst := tracker.GetCount(reviewsHost) + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, productsCallsFirst, "First query should call products subgraph exactly once") + assert.Equal(t, 1, reviewsCallsFirst, "First query should call reviews subgraph exactly once") + assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph for User entity resolution") + + // Second query - all fetches should hit cache + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + // Verify L2 cache hits + logAfterSecond := defaultCache.GetLog() + // All cache operations should be gets with hits: Query.topProducts, Product entities, User entities + assert.Equal(t, 3, len(logAfterSecond), "Second query should have 3 cache get operations (all hits)") + + // Verify the exact cache access log for second query (all hits) + wantLogSecond := []CacheLogEntry{ + // Root field Query.topProducts - HIT + { + Operation: "get", + Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, + Hits: []bool{true}, + }, + // Product entity fetches - HITS + { + Operation: "get", + Keys: []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + }, + Hits: []bool{true, true}, + }, + // User entity fetches - HITS + { + Operation: "get", + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + }, + Hits: []bool{true}, + }, + } + assert.Equal(t, sortCacheLogKeys(wantLogSecond), sortCacheLogKeys(logAfterSecond), "Second query cache log should match expected (all hits)") + + // Verify subgraph calls for second query - all should be cached + productsCallsSecond := tracker.GetCount(productsHost) + reviewsCallsSecond := tracker.GetCount(reviewsHost) + accountsCallsSecond := tracker.GetCount(accountsHost) + assert.Equal(t, 0, productsCallsSecond, "Second query should not call products subgraph (root field cache hit)") + assert.Equal(t, 0, reviewsCallsSecond, "Second query should not call reviews subgraph (entity cache hit)") + assert.Equal(t, 0, accountsCallsSecond, "Second query should not call accounts subgraph (entity cache hit)") + }) + + t.Run("L2 disabled - no external cache operations", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Disable L2 cache + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First query + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + // Verify no cache operations + log := defaultCache.GetLog() + assert.Empty(t, log, "No L2 cache operations should occur when L2 is disabled") + }) +} + +func TestL1L2CacheCombined(t *testing.T) { + t.Run("L1+L2 enabled - L1 within request, L2 across requests", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable both L1 and L2 cache + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: true, + } + + // Enable entity caching for L2 tests (opt-in per-entity caching) + // Configure caching per-subgraph with explicit subgraph names + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + // First query - L1 helps within request, L2 populates for later + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + // Cache operations: get/set for Query.topProducts, Product entities, User entities = 6 operations + assert.Equal(t, 6, len(logAfterFirst), "Should have exactly 6 cache operations (get/set for Query, Products, Users)") + + // Verify the exact cache access log (order may vary for keys within each operation) + wantLogFirst := []CacheLogEntry{ + // Root field Query.topProducts + { + Operation: "get", + Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, + Hits: []bool{false}, + }, + { + Operation: "set", + Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, + }, + // Product entity fetches (reviews data for each product) + { + Operation: "get", + Keys: []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + }, + Hits: []bool{false, false}, + }, + { + Operation: "set", + Keys: []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + }, + }, + // User entity fetches (author data) + { + Operation: "get", + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + }, + Hits: []bool{false}, + }, + { + Operation: "set", + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + }, + }, + } + assert.Equal(t, sortCacheLogKeys(wantLogFirst), sortCacheLogKeys(logAfterFirst), "First query cache log should match expected") + + // Verify subgraph calls for first query + productsCallsFirst := tracker.GetCount(productsHost) + reviewsCallsFirst := tracker.GetCount(reviewsHost) + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, productsCallsFirst, "First query should call products subgraph exactly once") + assert.Equal(t, 1, reviewsCallsFirst, "First query should call reviews subgraph exactly once") + assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph for User entity resolution") + + // Second query - new request means fresh L1, but L2 should hit + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + // All cache operations should be gets with hits: Query.topProducts, Product entities, User entities + assert.Equal(t, 3, len(logAfterSecond), "Second query should have 3 cache get operations (all hits)") + + // Verify the exact cache access log for second query (all hits) + wantLogSecond := []CacheLogEntry{ + // Root field Query.topProducts - HIT + { + Operation: "get", + Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, + Hits: []bool{true}, + }, + // Product entity fetches - HITS + { + Operation: "get", + Keys: []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + }, + Hits: []bool{true, true}, + }, + // User entity fetches - HITS + { + Operation: "get", + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + }, + Hits: []bool{true}, + }, + } + assert.Equal(t, sortCacheLogKeys(wantLogSecond), sortCacheLogKeys(logAfterSecond), "Second query cache log should match expected (all hits)") + + // Verify no subgraph calls for second query (L2 cache hits) + productsCallsSecond := tracker.GetCount(productsHost) + reviewsCallsSecond := tracker.GetCount(reviewsHost) + accountsCallsSecond := tracker.GetCount(accountsHost) + assert.Equal(t, 0, productsCallsSecond, "Second query should not call products subgraph (L2 hit)") + assert.Equal(t, 0, reviewsCallsSecond, "Second query should not call reviews subgraph (L2 hit)") + assert.Equal(t, 0, accountsCallsSecond, "Second query should not call accounts subgraph (L2 hit)") + }) + + t.Run("L1+L2 - cross-request isolation: L1 per-request, L2 shared", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable both L1 and L2 + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: true, + } + + // Enable entity caching for L2 tests (opt-in per-entity caching) + // Configure caching per-subgraph with explicit subgraph names + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First request - populates L2 cache + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + productKeys := []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + } + userKeys := []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + } + wantFirstLog := []CacheLogEntry{ + // reviews subgraph _entities(Product) — L2 miss, first time seeing these products + {Operation: "get", Keys: productKeys, Hits: []bool{false, false}}, + // reviews subgraph _entities(Product) — store fetched product data in L2 + {Operation: "set", Keys: productKeys}, + // accounts subgraph _entities(User) — L2 miss, first time seeing this user + {Operation: "get", Keys: userKeys, Hits: []bool{false}}, + // accounts subgraph _entities(User) — store fetched user data in L2 + {Operation: "set", Keys: userKeys}, + } + assert.Equal(t, sortCacheLogKeys(wantFirstLog), sortCacheLogKeys(logAfterFirst), "First request: L2 miss + set for Product and User") + + // Second request - L1 is fresh (new request), but L2 should provide data + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + wantSecondLog := []CacheLogEntry{ + // reviews subgraph _entities(Product) — L2 hit, both products cached from first request + {Operation: "get", Keys: productKeys, Hits: []bool{true, true}}, + // accounts subgraph _entities(User) — L2 hit, user cached from first request (deduplicated: 1 unique user) + {Operation: "get", Keys: userKeys, Hits: []bool{true}}, + // No set operations — all data served from cache + } + assert.Equal(t, sortCacheLogKeys(wantSecondLog), sortCacheLogKeys(logAfterSecond), "Second request: all L2 cache hits, no sets") + + // No subgraph calls on second request — all entity data served from L2 cache + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + assert.Equal(t, 0, tracker.GetCount(reviewsURLParsed.Host), "Second request should skip reviews subgraph (Product L2 cache hit)") + assert.Equal(t, 0, tracker.GetCount(accountsURLParsed.Host), "Second request should skip accounts subgraph (User L2 cache hit)") + }) +} + +// TestPartialEntityCaching demonstrates that only explicitly configured entity types +// are cached. This test configures caching for Product but NOT for User, verifying +// the opt-in nature of the per-entity caching configuration. +func TestPartialEntityCaching(t *testing.T) { + t.Run("only configured entities are cached", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable L2 cache + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + } + + // PARTIAL CACHING: Only configure caching for Product in reviews subgraph, NOT for User in accounts + // This demonstrates the opt-in per-entity caching behavior + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + // Note: accounts subgraph is intentionally NOT configured - User entities should NOT be cached + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + // First query - Product entities should be cached, User entities should NOT + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + // Only Product has L2 caching configured (reviews subgraph); User (accounts) does NOT. + // So we expect cache operations for Product only — no User cache activity at all. + productKeys := []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + } + logAfterFirst := defaultCache.GetLog() + wantFirstLog := []CacheLogEntry{ + // reviews subgraph _entities(Product) — L2 miss, first time seeing these products + {Operation: "get", Keys: productKeys, Hits: []bool{false, false}}, + // reviews subgraph _entities(Product) — store fetched product data in L2 + {Operation: "set", Keys: productKeys}, + // No User operations — accounts subgraph has no caching configured + } + assert.Equal(t, sortCacheLogKeys(wantFirstLog), sortCacheLogKeys(logAfterFirst), "First request: only Product entities have cache operations") + + // Both subgraphs called on first request (no cache to serve from) + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "First query should call reviews subgraph") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts subgraph") + + // Second query - Product should hit cache, User should still be fetched from subgraph + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + wantSecondLog := []CacheLogEntry{ + // reviews subgraph _entities(Product) — L2 hit, both products cached from first request + {Operation: "get", Keys: productKeys, Hits: []bool{true, true}}, + // No User operations — accounts subgraph still has no caching configured + // No set operations — Product data served from cache + } + assert.Equal(t, sortCacheLogKeys(wantSecondLog), sortCacheLogKeys(logAfterSecond), "Second request: Product cache hits only") + + // Reviews subgraph skipped (Product served from cache), accounts still called (User not cached) + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "Second query should skip reviews subgraph (Product cache hit)") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Second query should still call accounts subgraph (User NOT cached)") + }) +} + +// TestRootFieldCaching tests that root fields (like Query.topProducts) can be cached +// when explicitly configured with RootFieldCaching configuration. +func TestRootFieldCaching(t *testing.T) { + t.Run("root field caching enabled", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable L2 cache + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + } + + // Configure root field caching for Query.topProducts on products subgraph + // Also configure entity caching to compare behavior + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + accountsHost := accountsURLParsed.Host + + // First query - should miss cache for all: root field, entity types + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + // Should have cache operations for: + // 1. Root field Query.topProducts (get + set = 2 operations) + // 2. Product entities (get + set = 2 operations) + // 3. User entities (get + set = 2 operations) + // Total: 6 operations + assert.Equal(t, 6, len(logAfterFirst), "First query should have 6 cache operations (get+set for root field, Product, User)") + + // Verify first query calls all subgraphs + productsCallsFirst := tracker.GetCount(productsHost) + reviewsCallsFirst := tracker.GetCount(reviewsHost) + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, productsCallsFirst, "First query should call products subgraph") + assert.Equal(t, 1, reviewsCallsFirst, "First query should call reviews subgraph") + assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph") + + // Second query - should hit cache for root field and entities + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + wantSecondLog := []CacheLogEntry{ + // products subgraph Query.topProducts — root field L2 hit, cached from first request + {Operation: "get", Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, Hits: []bool{true}}, + // reviews subgraph _entities(Product) — L2 hit, both products cached from first request + {Operation: "get", Keys: []string{`{"__typename":"Product","key":{"upc":"top-1"}}`, `{"__typename":"Product","key":{"upc":"top-2"}}`}, Hits: []bool{true, true}}, + // accounts subgraph _entities(User) — L2 hit, user cached from first request (1 unique user) + {Operation: "get", Keys: []string{`{"__typename":"User","key":{"id":"1234"}}`}, Hits: []bool{true}}, + // No set operations — all data served from cache + } + assert.Equal(t, sortCacheLogKeys(wantSecondLog), sortCacheLogKeys(logAfterSecond), "Second query: all cache hits, no sets") + + // All subgraphs skipped on second query (everything served from cache) + assert.Equal(t, 0, tracker.GetCount(productsHost), "Second query should skip products subgraph (root field cache hit)") + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "Second query should skip reviews subgraph (entity cache hit)") + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip accounts subgraph (entity cache hit)") + }) + + t.Run("root field caching NOT enabled - subgraph still called", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable L2 cache + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + } + + // Only configure entity caching, NOT root field caching + // This demonstrates opt-in behavior: root fields are NOT cached unless configured + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + // Note: products subgraph has NO caching config for Query.topProducts + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + + // First query + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + productsCallsFirst := tracker.GetCount(productsHost) + assert.Equal(t, 1, productsCallsFirst, "First query should call products subgraph") + + // Second query - products subgraph should still be called because root field is NOT cached + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + // KEY ASSERTION: Products subgraph IS called on second query because root field is NOT cached + productsCallsSecond := tracker.GetCount(productsHost) + assert.Equal(t, 1, productsCallsSecond, "Second query SHOULD call products subgraph (root field NOT cached)") + }) +} + +// ============================================================================= +// L1 CACHE TESTS FOR LIST FIELDS +// ============================================================================= +// +// These tests verify L1 caching behavior when root fields or child fields +// return lists of entities. + +func TestCacheNotPopulatedOnErrors(t *testing.T) { + // Query that triggers an error in accounts subgraph via error-user + // The reviewWithError field returns a review with author ID "error-user" + // which causes FindUserByID to return an error + errorQuery := `query { + reviewWithError { + body + authorWithoutProvides { + id + username + } + } + }` + + // Expected error response - data is null due to non-nullable username field error propagation + expectedErrorResponse := `{"errors":[{"message":"Failed to fetch from Subgraph 'accounts' at Path 'reviewWithError.authorWithoutProvides'."},{"message":"Cannot return null for non-nullable field 'User.username'.","path":["reviewWithError","authorWithoutProvides","username"]}],"data":{"reviewWithError":null}}` + + t.Run("L1 only - error response prevents cache population", func(t *testing.T) { + // This test verifies that L1 cache is NOT populated when an error occurs. + // If L1 was erroneously populated, the second query would not call accounts. + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + // First query - should get error from accounts + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Verify exact error response + assert.Equal(t, expectedErrorResponse, string(resp)) + + reviewsCallsFirst := tracker.GetCount(reviewsHost) + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, reviewsCallsFirst, "First query should call reviews subgraph once") + assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph once") + + // Second query - L1 should NOT have cached the error, so accounts should be called again + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Same error should be returned + assert.Equal(t, expectedErrorResponse, string(resp)) + + accountsCallsSecond := tracker.GetCount(accountsHost) + // KEY ASSERTION: If L1 incorrectly cached the error, this would be 0 + assert.Equal(t, 1, accountsCallsSecond, "Second query should call accounts again (L1 should NOT cache errors)") + }) + + t.Run("L2 only - error response prevents cache population", func(t *testing.T) { + // This test verifies that L2 cache is NOT populated when an error occurs. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Configure L2 caching for User entities + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query - should get error from accounts + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Verify exact error response + assert.Equal(t, expectedErrorResponse, string(resp)) + + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph once") + + // Verify exact cache log: only "get" with miss, NO "set" + // Since the fetch had an error, cache population should be skipped entirely + wantCacheLog := []CacheLogEntry{ + { + Operation: "get", + Keys: []string{`{"__typename":"User","key":{"id":"error-user"}}`}, + Hits: []bool{false}, + }, + // NO "set" entry - this is the key assertion + } + assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Cache log should only have 'get' miss, no 'set'") + + // Second query - L2 should NOT have cached the error, so accounts should be called again + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Same error should be returned + assert.Equal(t, expectedErrorResponse, string(resp)) + + accountsCallsSecond := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCallsSecond, "Second query should call accounts again (L2 should NOT cache errors)") + + // Second query should also have same cache log pattern (get miss, no set) + assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Second query cache log should also have 'get' miss, no 'set'") + }) + + t.Run("L1 and L2 - error response prevents both caches", func(t *testing.T) { + // This test verifies that both L1 and L2 caches are NOT populated when an error occurs. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Configure L2 caching for User entities + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: true, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query - should get error from accounts + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Verify exact error response + assert.Equal(t, expectedErrorResponse, string(resp)) + + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph once") + + // Verify exact cache log: only "get" with miss, NO "set" + wantCacheLog := []CacheLogEntry{ + { + Operation: "get", + Keys: []string{`{"__typename":"User","key":{"id":"error-user"}}`}, + Hits: []bool{false}, + }, + } + assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Cache log should only have 'get' miss, no 'set'") + + // Second query - neither L1 nor L2 should have cached the error + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Same error should be returned + assert.Equal(t, expectedErrorResponse, string(resp)) + + accountsCallsSecond := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCallsSecond, "Second query should call accounts again (neither L1 nor L2 should cache errors)") + + // Second query should also have same cache log pattern + assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Second query cache log should also have 'get' miss, no 'set'") + }) + + t.Run("error does not pollute cache for subsequent success queries", func(t *testing.T) { + // This test verifies that an error query doesn't pollute the cache + // and that subsequent successful queries still work correctly. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Configure L2 caching for User entities + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: true, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First: Query that triggers an error + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Verify exact error response + assert.Equal(t, expectedErrorResponse, string(resp)) + + accountsCallsError := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCallsError, "Error query should call accounts") + + // Verify error-user was NOT cached (only get, no set) + wantErrorCacheLog := []CacheLogEntry{ + { + Operation: "get", + Keys: []string{`{"__typename":"User","key":{"id":"error-user"}}`}, + Hits: []bool{false}, + }, + } + assert.Equal(t, wantErrorCacheLog, defaultCache.GetLog(), "Error query cache log should only have 'get' miss, no 'set'") + + // Second: Query a successful user (User 1234 via me query) + // Note: "me" is a root query, not an entity fetch, so it doesn't use L2 entity caching + successQuery := `query { + me { + id + username + } + }` + expectedSuccessResponse := `{"data":{"me":{"id":"1234","username":"Me"}}}` + + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, successQuery, nil, t) + + // Should succeed with exact expected response + assert.Equal(t, expectedSuccessResponse, string(resp)) + + // Note: Root queries (me) don't use L2 entity caching by default, + // so the cache log should be empty for this query. + // The important thing is that the previous error didn't pollute the cache. + assert.Equal(t, 0, len(defaultCache.GetLog()), "Root query should not use L2 entity cache") + + // Third: Query the error user again - should still fail (not cached) + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + assert.Equal(t, expectedErrorResponse, string(resp)) + accountsCallsErrorAgain := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCallsErrorAgain, "Error query should call accounts again (error was not cached)") + + // Verify cache log still shows only get miss, no set + assert.Equal(t, wantErrorCacheLog, defaultCache.GetLog(), "Third query cache log should still have 'get' miss, no 'set'") + }) +} + +// TestL1CacheOptimizationReducesSubgraphCalls tests that the L1 cache optimization +// postprocessor (optimizeL1Cache) correctly identifies which fetches can benefit +// from L1 caching and sets UseL1Cache appropriately. +// +// The key insight is that L1 is only useful when: +// 1. A prior fetch can provide cached data (READ benefit) +// 2. A later fetch can consume cached data (WRITE benefit) +// +// This test verifies the end-to-end effect: when L1 optimization identifies +// matching entity types between fetches, it enables L1 caching, resulting in +// fewer subgraph calls. diff --git a/execution/engine/federation_caching_test.go b/execution/engine/federation_caching_test.go index b19bd213b1..8721e87ce3 100644 --- a/execution/engine/federation_caching_test.go +++ b/execution/engine/federation_caching_test.go @@ -2,27 +2,19 @@ package engine_test import ( "context" - "encoding/json" "fmt" "net/http" - "net/http/httptest" "net/url" - "path" - "sort" "strconv" - "strings" "sync" "testing" "time" - "github.com/jensneuse/abstractlogger" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/wundergraph/graphql-go-tools/execution/engine" "github.com/wundergraph/graphql-go-tools/execution/federationtesting" - accounts "github.com/wundergraph/graphql-go-tools/execution/federationtesting/accounts/graph" - "github.com/wundergraph/graphql-go-tools/execution/federationtesting/gateway" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" ) @@ -2446,4789 +2438,3 @@ func TestFederationCaching_MutationSkipsL2Read(t *testing.T) { }) } -// subgraphCallTracker tracks HTTP requests made to subgraph servers -type subgraphCallTracker struct { - mu sync.RWMutex - counts map[string]int // Maps subgraph URL to call count - original http.RoundTripper -} - -func newSubgraphCallTracker(original http.RoundTripper) *subgraphCallTracker { - return &subgraphCallTracker{ - counts: make(map[string]int), - original: original, - } -} - -func (t *subgraphCallTracker) RoundTrip(req *http.Request) (*http.Response, error) { - t.mu.Lock() - host := req.URL.Host - t.counts[host]++ - t.mu.Unlock() - return t.original.RoundTrip(req) -} - -func (t *subgraphCallTracker) GetCount(url string) int { - t.mu.RLock() - defer t.mu.RUnlock() - return t.counts[url] -} - -func (t *subgraphCallTracker) Reset() { - t.mu.Lock() - defer t.mu.Unlock() - t.counts = make(map[string]int) -} - -func (t *subgraphCallTracker) GetCounts() map[string]int { - t.mu.RLock() - defer t.mu.RUnlock() - result := make(map[string]int) - for k, v := range t.counts { - result[k] = v - } - return result -} - -func (t *subgraphCallTracker) DebugPrint() string { - t.mu.RLock() - defer t.mu.RUnlock() - return fmt.Sprintf("%v", t.counts) -} - -// Helper functions for gateway setup with HTTP client support -type cachingGatewayOptions struct { - enableART bool - withLoaderCache map[string]resolve.LoaderCache - httpClient *http.Client - subgraphHeadersBuilder resolve.SubgraphHeadersBuilder - cachingOptions resolve.CachingOptions - subgraphEntityCachingConfigs engine.SubgraphCachingConfigs - debugMode bool -} - -func withCachingEnableART(enableART bool) func(*cachingGatewayOptions) { - return func(opts *cachingGatewayOptions) { - opts.enableART = enableART - } -} - -func withCachingLoaderCache(loaderCache map[string]resolve.LoaderCache) func(*cachingGatewayOptions) { - return func(opts *cachingGatewayOptions) { - opts.withLoaderCache = loaderCache - } -} - -func withHTTPClient(client *http.Client) func(*cachingGatewayOptions) { - return func(opts *cachingGatewayOptions) { - opts.httpClient = client - } -} - -func withSubgraphHeadersBuilder(builder resolve.SubgraphHeadersBuilder) func(*cachingGatewayOptions) { - return func(opts *cachingGatewayOptions) { - opts.subgraphHeadersBuilder = builder - } -} - -func withCachingOptionsFunc(cachingOpts resolve.CachingOptions) func(*cachingGatewayOptions) { - return func(opts *cachingGatewayOptions) { - opts.cachingOptions = cachingOpts - } -} - -func withSubgraphEntityCachingConfigs(configs engine.SubgraphCachingConfigs) func(*cachingGatewayOptions) { - return func(opts *cachingGatewayOptions) { - opts.subgraphEntityCachingConfigs = configs - } -} - -func withDebugMode(enabled bool) func(*cachingGatewayOptions) { - return func(opts *cachingGatewayOptions) { - opts.debugMode = enabled - } -} - -type cachingGatewayOptionsToFunc func(opts *cachingGatewayOptions) - -func addCachingGateway(options ...cachingGatewayOptionsToFunc) func(setup *federationtesting.FederationSetup) *httptest.Server { - opts := &cachingGatewayOptions{} - for _, option := range options { - option(opts) - } - return func(setup *federationtesting.FederationSetup) *httptest.Server { - httpClient := opts.httpClient - if httpClient == nil { - httpClient = http.DefaultClient - } - - poller := gateway.NewDatasource([]gateway.ServiceConfig{ - {Name: "accounts", URL: setup.AccountsUpstreamServer.URL}, - {Name: "products", URL: setup.ProductsUpstreamServer.URL, WS: strings.ReplaceAll(setup.ProductsUpstreamServer.URL, "http:", "ws:")}, - {Name: "reviews", URL: setup.ReviewsUpstreamServer.URL}, - }, httpClient) - - gtw := gateway.HandlerWithCaching(abstractlogger.NoopLogger, poller, httpClient, opts.enableART, opts.withLoaderCache, opts.subgraphHeadersBuilder, opts.cachingOptions, opts.subgraphEntityCachingConfigs, opts.debugMode) - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - - poller.Run(ctx) - return httptest.NewServer(gtw) - } -} - -// mockSubgraphHeadersBuilder is a mock implementation of SubgraphHeadersBuilder -type mockSubgraphHeadersBuilder struct { - hashes map[string]uint64 -} - -func (m *mockSubgraphHeadersBuilder) HeadersForSubgraph(subgraphName string) (http.Header, uint64) { - hash := m.hashes[subgraphName] - if hash == 0 { - // Return default hash if not found - return nil, 99999 - } - return nil, hash -} - -func (m *mockSubgraphHeadersBuilder) HashAll() uint64 { - // Return a simple hash of all subgraph hashes combined - var result uint64 - for _, hash := range m.hashes { - result ^= hash - } - return result -} - -func cachingTestQueryPath(name string) string { - return path.Join("..", "federationtesting", "testdata", name) -} - -type CacheLogEntry struct { - Operation string // "get", "set", "delete" - Keys []string // Keys involved in the operation - Hits []bool // For Get: whether each key was a hit (true) or miss (false) - Caller string // Fetch identity when debug enabled: "accounts: entity(User)" or "products: rootField(Query.topProducts)" -} - -// sortCacheLogKeys sorts the keys (and corresponding hits) in each cache log entry. -// This makes comparisons order-independent when multiple keys are present. -// Caller is intentionally stripped — it's for debug logging, not assertions. -func sortCacheLogKeys(log []CacheLogEntry) []CacheLogEntry { - sorted := make([]CacheLogEntry, len(log)) - for i, entry := range log { - // Only sort if there are multiple keys - if len(entry.Keys) <= 1 { - sorted[i] = CacheLogEntry{ - Operation: entry.Operation, - Keys: entry.Keys, - Hits: entry.Hits, - } - continue - } - - // Create pairs of (key, hit) to sort together - pairs := make([]struct { - key string - hit bool - }, len(entry.Keys)) - for j := range entry.Keys { - pairs[j].key = entry.Keys[j] - if entry.Hits != nil && j < len(entry.Hits) { - pairs[j].hit = entry.Hits[j] - } - } - - // Sort pairs by key - sort.Slice(pairs, func(a, b int) bool { - return pairs[a].key < pairs[b].key - }) - - // Extract sorted keys and hits - sorted[i] = CacheLogEntry{ - Operation: entry.Operation, - Keys: make([]string, len(pairs)), - Hits: nil, - } - if len(entry.Hits) > 0 { - sorted[i].Hits = make([]bool, len(pairs)) - } - for j := range pairs { - sorted[i].Keys[j] = pairs[j].key - if sorted[i].Hits != nil { - sorted[i].Hits[j] = pairs[j].hit - } - } - } - return sorted -} - -// sortCacheLogKeysWithCaller is like sortCacheLogKeys but preserves the Caller field. -// Use this when you want assertions to verify which Loader method chain triggered each cache event. -func sortCacheLogKeysWithCaller(log []CacheLogEntry) []CacheLogEntry { - sorted := make([]CacheLogEntry, len(log)) - for i, entry := range log { - if len(entry.Keys) <= 1 { - sorted[i] = CacheLogEntry{ - Operation: entry.Operation, - Keys: entry.Keys, - Hits: entry.Hits, - Caller: entry.Caller, - } - continue - } - - pairs := make([]struct { - key string - hit bool - }, len(entry.Keys)) - for j := range entry.Keys { - pairs[j].key = entry.Keys[j] - if entry.Hits != nil && j < len(entry.Hits) { - pairs[j].hit = entry.Hits[j] - } - } - sort.Slice(pairs, func(a, b int) bool { - return pairs[a].key < pairs[b].key - }) - sorted[i] = CacheLogEntry{ - Operation: entry.Operation, - Keys: make([]string, len(pairs)), - Hits: nil, - Caller: entry.Caller, - } - if len(entry.Hits) > 0 { - sorted[i].Hits = make([]bool, len(pairs)) - } - for j := range pairs { - sorted[i].Keys[j] = pairs[j].key - if sorted[i].Hits != nil { - sorted[i].Hits[j] = pairs[j].hit - } - } - } - return sorted -} - -type cacheEntry struct { - data []byte - expiresAt *time.Time -} - -type FakeLoaderCache struct { - mu sync.RWMutex - storage map[string]cacheEntry - log []CacheLogEntry -} - -func NewFakeLoaderCache() *FakeLoaderCache { - return &FakeLoaderCache{ - storage: make(map[string]cacheEntry), - log: make([]CacheLogEntry, 0), - } -} - -func (f *FakeLoaderCache) cleanupExpired() { - now := time.Now() - for key, entry := range f.storage { - if entry.expiresAt != nil && now.After(*entry.expiresAt) { - delete(f.storage, key) - } - } -} - -func (f *FakeLoaderCache) Get(ctx context.Context, keys []string) ([]*resolve.CacheEntry, error) { - f.mu.Lock() - defer f.mu.Unlock() - - // Clean up expired entries before executing command - f.cleanupExpired() - - hits := make([]bool, len(keys)) - result := make([]*resolve.CacheEntry, len(keys)) - for i, key := range keys { - if entry, exists := f.storage[key]; exists { - // Make a copy of the data to prevent external modifications - dataCopy := make([]byte, len(entry.data)) - copy(dataCopy, entry.data) - ce := &resolve.CacheEntry{ - Key: key, - Value: dataCopy, - } - // Populate RemainingTTL from expiresAt for cache age analytics - if entry.expiresAt != nil { - remaining := time.Until(*entry.expiresAt) - if remaining > 0 { - ce.RemainingTTL = remaining - } - } - result[i] = ce - hits[i] = true - } else { - result[i] = nil - hits[i] = false - } - } - - // Log the operation - caller := "" - if cfi := resolve.GetCacheFetchInfo(ctx); cfi != nil { - caller = cfi.String() - } - f.log = append(f.log, CacheLogEntry{ - Operation: "get", - Keys: keys, - Hits: hits, - Caller: caller, - }) - - return result, nil -} - -func (f *FakeLoaderCache) Set(ctx context.Context, entries []*resolve.CacheEntry, ttl time.Duration) error { - if len(entries) == 0 { - return nil - } - - f.mu.Lock() - defer f.mu.Unlock() - - // Clean up expired entries before executing command - f.cleanupExpired() - - keys := make([]string, 0, len(entries)) - for _, entry := range entries { - if entry == nil { - continue - } - cacheEntry := cacheEntry{ - // Make a copy of the data to prevent external modifications - data: make([]byte, len(entry.Value)), - } - copy(cacheEntry.data, entry.Value) - - // If ttl is 0, store without expiration - if ttl > 0 { - expiresAt := time.Now().Add(ttl) - cacheEntry.expiresAt = &expiresAt - } - - f.storage[entry.Key] = cacheEntry - keys = append(keys, entry.Key) - } - - // Log the operation - caller := "" - if cfi := resolve.GetCacheFetchInfo(ctx); cfi != nil { - caller = cfi.String() - } - f.log = append(f.log, CacheLogEntry{ - Operation: "set", - Keys: keys, - Hits: nil, // Set operations don't have hits/misses - Caller: caller, - }) - - return nil -} - -func (f *FakeLoaderCache) Delete(ctx context.Context, keys []string) error { - f.mu.Lock() - defer f.mu.Unlock() - - // Clean up expired entries before executing command - f.cleanupExpired() - - for _, key := range keys { - delete(f.storage, key) - } - - // Log the operation - caller := "" - if cfi := resolve.GetCacheFetchInfo(ctx); cfi != nil { - caller = cfi.String() - } - f.log = append(f.log, CacheLogEntry{ - Operation: "delete", - Keys: keys, - Hits: nil, // Delete operations don't have hits/misses - Caller: caller, - }) - - return nil -} - -// GetLog returns a copy of the cache operation log -func (f *FakeLoaderCache) GetLog() []CacheLogEntry { - f.mu.RLock() - defer f.mu.RUnlock() - logCopy := make([]CacheLogEntry, len(f.log)) - copy(logCopy, f.log) - return logCopy -} - -// GetLogWithCaller returns a copy of the cache operation log with Caller populated. -// Use this with sortCacheLogKeysWithCaller to assert on both operation details and -// the Loader method chain that triggered each cache event. -func (f *FakeLoaderCache) GetLogWithCaller() []CacheLogEntry { - f.mu.RLock() - defer f.mu.RUnlock() - logCopy := make([]CacheLogEntry, len(f.log)) - copy(logCopy, f.log) - return logCopy -} - -// ClearLog clears the cache operation log -func (f *FakeLoaderCache) ClearLog() { - f.mu.Lock() - defer f.mu.Unlock() - f.log = make([]CacheLogEntry, 0) -} - -// TestFakeLoaderCache tests the cache implementation itself -func TestFakeLoaderCache(t *testing.T) { - ctx := context.Background() - cache := NewFakeLoaderCache() - - t.Run("SetAndGet", func(t *testing.T) { - // Test basic set and get - keys := []string{"key1", "key2", "key3"} - entries := []*resolve.CacheEntry{ - {Key: "key1", Value: []byte("value1")}, - {Key: "key2", Value: []byte("value2")}, - {Key: "key3", Value: []byte("value3")}, - } - - err := cache.Set(ctx, entries, 0) // No TTL - require.NoError(t, err) - - // Get all keys - result, err := cache.Get(ctx, keys) - require.NoError(t, err) - require.Len(t, result, 3) - assert.NotNil(t, result[0]) - assert.Equal(t, "value1", string(result[0].Value)) - assert.NotNil(t, result[1]) - assert.Equal(t, "value2", string(result[1].Value)) - assert.NotNil(t, result[2]) - assert.Equal(t, "value3", string(result[2].Value)) - - // Get partial keys - result, err = cache.Get(ctx, []string{"key2", "key4", "key1"}) - require.NoError(t, err) - require.Len(t, result, 3) - assert.NotNil(t, result[0]) - assert.Equal(t, "value2", string(result[0].Value)) - assert.Nil(t, result[1]) // key4 doesn't exist - assert.NotNil(t, result[2]) - assert.Equal(t, "value1", string(result[2].Value)) - }) - - t.Run("Delete", func(t *testing.T) { - // Set some keys - entries := []*resolve.CacheEntry{ - {Key: "del1", Value: []byte("v1")}, - {Key: "del2", Value: []byte("v2")}, - {Key: "del3", Value: []byte("v3")}, - } - err := cache.Set(ctx, entries, 0) - require.NoError(t, err) - - // Delete some keys - err = cache.Delete(ctx, []string{"del1", "del3"}) - require.NoError(t, err) - - // Check remaining keys - result, err := cache.Get(ctx, []string{"del1", "del2", "del3"}) - require.NoError(t, err) - assert.Nil(t, result[0]) // del1 was deleted - assert.NotNil(t, result[1]) // del2 still exists - assert.Equal(t, "v2", string(result[1].Value)) - assert.Nil(t, result[2]) // del3 was deleted - }) - - t.Run("TTL", func(t *testing.T) { - // Set with 50ms TTL - entries := []*resolve.CacheEntry{ - {Key: "ttl1", Value: []byte("expire1")}, - {Key: "ttl2", Value: []byte("expire2")}, - } - err := cache.Set(ctx, entries, 50*time.Millisecond) - require.NoError(t, err) - - // Immediately get - should exist - result, err := cache.Get(ctx, []string{"ttl1", "ttl2"}) - require.NoError(t, err) - assert.NotNil(t, result[0]) - assert.Equal(t, "expire1", string(result[0].Value)) - assert.NotNil(t, result[1]) - assert.Equal(t, "expire2", string(result[1].Value)) - - // Wait for expiration - time.Sleep(60 * time.Millisecond) - - // Get again - should be nil - result, err = cache.Get(ctx, []string{"ttl1", "ttl2"}) - require.NoError(t, err) - assert.Nil(t, result[0]) - assert.Nil(t, result[1]) - }) - - t.Run("MixedTTL", func(t *testing.T) { - // Set some with TTL, some without - err := cache.Set(ctx, []*resolve.CacheEntry{{Key: "perm1", Value: []byte("permanent")}}, 0) - require.NoError(t, err) - - err = cache.Set(ctx, []*resolve.CacheEntry{{Key: "temp1", Value: []byte("temporary")}}, 50*time.Millisecond) - require.NoError(t, err) - - // Wait for temporary to expire - time.Sleep(60 * time.Millisecond) - - // Check both - result, err := cache.Get(ctx, []string{"perm1", "temp1"}) - require.NoError(t, err) - assert.NotNil(t, result[0]) - assert.Equal(t, "permanent", string(result[0].Value)) // Still exists - assert.Nil(t, result[1]) // Expired - }) - - t.Run("ThreadSafety", func(t *testing.T) { - // Test concurrent access - done := make(chan bool) - - // Writer goroutine - go func() { - for i := 0; i < 100; i++ { - key := fmt.Sprintf("concurrent_%d", i) - value := fmt.Sprintf("value_%d", i) - err := cache.Set(ctx, []*resolve.CacheEntry{{Key: key, Value: []byte(value)}}, 0) - assert.NoError(t, err) - } - done <- true - }() - - // Reader goroutine - go func() { - for i := 0; i < 100; i++ { - key := fmt.Sprintf("concurrent_%d", i%50) - _, err := cache.Get(ctx, []string{key}) - assert.NoError(t, err) - } - done <- true - }() - - // Deleter goroutine - go func() { - for i := 0; i < 50; i++ { - key := fmt.Sprintf("concurrent_%d", i*2) - err := cache.Delete(ctx, []string{key}) - assert.NoError(t, err) - } - done <- true - }() - - // Wait for all goroutines - <-done - <-done - <-done - }) - - t.Run("ResultLengthMatchesKeysLength", func(t *testing.T) { - // Test that result length always matches input keys length - - // Set some data - err := cache.Set(ctx, []*resolve.CacheEntry{ - {Key: "exist1", Value: []byte("data1")}, - {Key: "exist3", Value: []byte("data3")}, - }, 0) - require.NoError(t, err) - - // Request mix of existing and non-existing keys - keys := []string{"exist1", "missing1", "exist3", "missing2", "missing3"} - result, err := cache.Get(ctx, keys) - require.NoError(t, err) - - // Verify length matches exactly - assert.Len(t, result, len(keys), "Result length must match keys length") - assert.Len(t, result, 5, "Should return exactly 5 results") - - // Verify correct values - assert.NotNil(t, result[0]) - assert.Equal(t, "data1", string(result[0].Value)) // exist1 - assert.Nil(t, result[1]) // missing1 - assert.NotNil(t, result[2]) - assert.Equal(t, "data3", string(result[2].Value)) // exist3 - assert.Nil(t, result[3]) // missing2 - assert.Nil(t, result[4]) // missing3 - - // Test with all missing keys - allMissingKeys := []string{"missing4", "missing5", "missing6"} - result, err = cache.Get(ctx, allMissingKeys) - require.NoError(t, err) - assert.Len(t, result, 3, "Should return 3 results for 3 keys") - assert.Nil(t, result[0]) - assert.Nil(t, result[1]) - assert.Nil(t, result[2]) - - // Test with empty keys - result, err = cache.Get(ctx, []string{}) - require.NoError(t, err) - assert.Len(t, result, 0, "Should return empty slice for empty keys") - }) -} - -// ============================================================================= -// L1/L2 CACHE END-TO-END TESTS -// ============================================================================= -// -// These tests verify the L1 (per-request in-memory) and L2 (external cross-request) -// caching behavior in a federated GraphQL setup. -// -// L1 Cache: Prevents redundant fetches for the same entity within a single request -// L2 Cache: Shares entity data across requests via external cache (e.g., Redis) -// -// Lookup Order (entity fetches): L1 -> L2 -> Subgraph Fetch -// Lookup Order (root fetches): L2 -> Subgraph Fetch (no L1) - -func TestL1CacheReducesHTTPCalls(t *testing.T) { - // This test demonstrates L1 cache behavior with entity fetches. - // - // Query structure: - // - me: root query to accounts service → returns User 1234 {id, username} - // - me.reviews: entity fetch from reviews service → returns reviews - // - me.reviews.product: entity fetch from products service → returns products - // - me.reviews.product.reviews: entity fetch from reviews service → returns reviews - // - me.reviews.product.reviews.authorWithoutProvides: entity fetch from accounts → returns User 1234 - // - // Note: The `me` root query does NOT populate L1 cache because L1 cache only works - // for entity fetches (RequiresEntityFetch=true). Root queries don't qualify. - // - // With L1 enabled: Both `me` (root) and `authorWithoutProvides` (entity) make calls. - // L1 cache doesn't help here because `me` is a root query, not an entity fetch. - // With L1 disabled: Same behavior - 2 accounts calls. - // - // L1 cache DOES help when the same entity is fetched multiple times through - // entity fetches within a single request (e.g., self-referential entities). - - query := `query { - me { - id - username - reviews { - body - product { - upc - reviews { - authorWithoutProvides { - id - username - } - } - } - } - } - }` - - expectedResponse := `{"data":{"me":{"id":"1234","username":"Me","reviews":[{"body":"A highly effective form of birth control.","product":{"upc":"top-1","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","product":{"upc":"top-2","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}}]}}}` - - t.Run("L1 enabled - entity fetches use L1 cache", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // Both `me` (root query) and `authorWithoutProvides` (entity fetch) call accounts. - // L1 cache doesn't help because `me` is a root query, not an entity fetch. - // Root queries don't populate L1 cache (RequiresEntityFetch=false). - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls, - "Both me (root query) and authorWithoutProvides (entity fetch) call accounts") - }) - - t.Run("L1 disabled - more accounts calls without cache", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // KEY ASSERTION: With L1 disabled, 2 accounts calls! - // The authorWithoutProvides.username requires another fetch since L1 is disabled. - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 2, accountsCalls, - "With L1 disabled, should make 2 accounts calls (no cache reuse)") - }) -} - -func TestL1CacheReducesHTTPCallsInterface(t *testing.T) { - // This test demonstrates L1 cache behavior with interface return types. - // - // Query structure: - // - meInterface: root query to accounts service → returns User 1234 via Identifiable interface - // - meInterface.reviews: entity fetch from reviews service → returns reviews - // - meInterface.reviews.product: entity fetch from products service → returns products - // - meInterface.reviews.product.reviews: entity fetch from reviews service → returns reviews - // - meInterface.reviews.product.reviews.authorWithoutProvides: entity fetch from accounts → returns User 1234 - // - // This tests that interface return types properly build cache key templates - // for all entity types that implement the interface. - - query := `query { - meInterface { - ... on User { - id - username - reviews { - body - product { - upc - reviews { - authorWithoutProvides { - id - username - } - } - } - } - } - } - }` - - expectedResponse := `{"data":{"meInterface":{"id":"1234","username":"Me","reviews":[{"body":"A highly effective form of birth control.","product":{"upc":"top-1","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","product":{"upc":"top-2","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}}]}}}` - - t.Run("L1 enabled - interface entity fetches use L1 cache", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // Same behavior as non-interface: root query + entity fetch both call accounts - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls, - "Interface field should behave same as object field for L1 caching") - }) - - t.Run("L1 disabled - more accounts calls without cache", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // KEY ASSERTION: With L1 disabled, 2 accounts calls! - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 2, accountsCalls, - "With L1 disabled, should make 2 accounts calls (no cache reuse)") - }) -} - -func TestL1CacheReducesHTTPCallsUnion(t *testing.T) { - // This test demonstrates L1 cache behavior with union return types. - // - // Query structure: - // - meUnion: root query to accounts service → returns User 1234 via MeUnion union - // - meUnion.reviews: entity fetch from reviews service → returns reviews - // - meUnion.reviews.product: entity fetch from products service → returns products - // - meUnion.reviews.product.reviews: entity fetch from reviews service → returns reviews - // - meUnion.reviews.product.reviews.authorWithoutProvides: entity fetch from accounts → returns User 1234 - // - // This tests that union return types properly build cache key templates - // for all entity types that are members of the union. - - query := `query { - meUnion { - ... on User { - id - username - reviews { - body - product { - upc - reviews { - authorWithoutProvides { - id - username - } - } - } - } - } - } - }` - - expectedResponse := `{"data":{"meUnion":{"id":"1234","username":"Me","reviews":[{"body":"A highly effective form of birth control.","product":{"upc":"top-1","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","product":{"upc":"top-2","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}}]}}}` - - t.Run("L1 enabled - union entity fetches use L1 cache", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // Same behavior as non-union: root query + entity fetch both call accounts - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls, - "Union field should behave same as object field for L1 caching") - }) - - t.Run("L1 disabled - more accounts calls without cache", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // KEY ASSERTION: With L1 disabled, 2 accounts calls! - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 2, accountsCalls, - "With L1 disabled, should make 2 accounts calls (no cache reuse)") - }) -} - -func TestL1CacheSelfReferentialEntity(t *testing.T) { - // This test verifies that self-referential entities don't cause - // stack overflow when L1 cache is enabled. - // - // Background: When an entity type has a field that returns the same type - // (e.g., User.sameUserReviewers returning [User]), and L1 cache stores - // a pointer to the entity, both key.Item and key.FromCache can point to - // the same memory location. Without a fix, calling MergeValues(ptr, ptr) - // causes infinite recursion and stack overflow. - // - // The sameUserReviewers field has @requires(fields: "username") which forces - // sequential execution: the User entity is first fetched from accounts - // (populating L1), then sameUserReviewers is resolved, returning the same - // User entity that's already in L1 cache. - - query := `query { - topProducts { - reviews { - authorWithoutProvides { - id - username - sameUserReviewers { - id - username - } - } - } - } - }` - - // This response shows User 1234 appearing both at authorWithoutProvides level - // and inside sameUserReviewers (which returns the same user for testing) - expectedResponse := `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}` - - t.Run("self-referential entity should not cause stack overflow", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // This should complete without stack overflow - // Before the fix, this would crash with "fatal error: stack overflow" - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - }) -} - -func TestL2CacheOnly(t *testing.T) { - t.Run("L2 enabled - miss then hit across requests", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - // Create HTTP client with tracking - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{ - Transport: tracker, - } - - // Enable L2 cache only - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: true, - } - - // Enable entity caching for L2 tests (opt-in per-subgraph caching) - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames for tracking - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - productsHost := productsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - - // First query - should miss cache - defaultCache.ClearLog() - tracker.Reset() - resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - logAfterFirst := defaultCache.GetLog() - // Cache operations: get/set for Query.topProducts, Product entities, User entities = 6 operations - assert.Equal(t, 6, len(logAfterFirst), "Should have exactly 6 cache operations (get/set for Query, Products, Users)") - - // Verify the exact cache access log (order may vary for keys within each operation) - wantLogFirst := []CacheLogEntry{ - // Root field Query.topProducts - { - Operation: "get", - Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, - Hits: []bool{false}, - }, - { - Operation: "set", - Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, - }, - // Product entity fetches (reviews data for each product) - { - Operation: "get", - Keys: []string{ - `{"__typename":"Product","key":{"upc":"top-1"}}`, - `{"__typename":"Product","key":{"upc":"top-2"}}`, - }, - Hits: []bool{false, false}, - }, - { - Operation: "set", - Keys: []string{ - `{"__typename":"Product","key":{"upc":"top-1"}}`, - `{"__typename":"Product","key":{"upc":"top-2"}}`, - }, - }, - // User entity fetches (author data) - { - Operation: "get", - Keys: []string{ - `{"__typename":"User","key":{"id":"1234"}}`, - }, - Hits: []bool{false}, - }, - { - Operation: "set", - Keys: []string{ - `{"__typename":"User","key":{"id":"1234"}}`, - }, - }, - } - assert.Equal(t, sortCacheLogKeys(wantLogFirst), sortCacheLogKeys(logAfterFirst), "First query cache log should match expected") - - // Verify subgraph calls for first query - productsCallsFirst := tracker.GetCount(productsHost) - reviewsCallsFirst := tracker.GetCount(reviewsHost) - accountsCallsFirst := tracker.GetCount(accountsHost) - assert.Equal(t, 1, productsCallsFirst, "First query should call products subgraph exactly once") - assert.Equal(t, 1, reviewsCallsFirst, "First query should call reviews subgraph exactly once") - assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph for User entity resolution") - - // Second query - all fetches should hit cache - defaultCache.ClearLog() - tracker.Reset() - resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - // Verify L2 cache hits - logAfterSecond := defaultCache.GetLog() - // All cache operations should be gets with hits: Query.topProducts, Product entities, User entities - assert.Equal(t, 3, len(logAfterSecond), "Second query should have 3 cache get operations (all hits)") - - // Verify the exact cache access log for second query (all hits) - wantLogSecond := []CacheLogEntry{ - // Root field Query.topProducts - HIT - { - Operation: "get", - Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, - Hits: []bool{true}, - }, - // Product entity fetches - HITS - { - Operation: "get", - Keys: []string{ - `{"__typename":"Product","key":{"upc":"top-1"}}`, - `{"__typename":"Product","key":{"upc":"top-2"}}`, - }, - Hits: []bool{true, true}, - }, - // User entity fetches - HITS - { - Operation: "get", - Keys: []string{ - `{"__typename":"User","key":{"id":"1234"}}`, - }, - Hits: []bool{true}, - }, - } - assert.Equal(t, sortCacheLogKeys(wantLogSecond), sortCacheLogKeys(logAfterSecond), "Second query cache log should match expected (all hits)") - - // Verify subgraph calls for second query - all should be cached - productsCallsSecond := tracker.GetCount(productsHost) - reviewsCallsSecond := tracker.GetCount(reviewsHost) - accountsCallsSecond := tracker.GetCount(accountsHost) - assert.Equal(t, 0, productsCallsSecond, "Second query should not call products subgraph (root field cache hit)") - assert.Equal(t, 0, reviewsCallsSecond, "Second query should not call reviews subgraph (entity cache hit)") - assert.Equal(t, 0, accountsCallsSecond, "Second query should not call accounts subgraph (entity cache hit)") - }) - - t.Run("L2 disabled - no external cache operations", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - // Create HTTP client with tracking - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{ - Transport: tracker, - } - - // Disable L2 cache - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // First query - defaultCache.ClearLog() - tracker.Reset() - resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - // Verify no cache operations - log := defaultCache.GetLog() - assert.Empty(t, log, "No L2 cache operations should occur when L2 is disabled") - }) -} - -func TestL1L2CacheCombined(t *testing.T) { - t.Run("L1+L2 enabled - L1 within request, L2 across requests", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - // Create HTTP client with tracking - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{ - Transport: tracker, - } - - // Enable both L1 and L2 cache - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: true, - } - - // Enable entity caching for L2 tests (opt-in per-entity caching) - // Configure caching per-subgraph with explicit subgraph names - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames for tracking - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - productsHost := productsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - - // First query - L1 helps within request, L2 populates for later - defaultCache.ClearLog() - tracker.Reset() - resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - logAfterFirst := defaultCache.GetLog() - // Cache operations: get/set for Query.topProducts, Product entities, User entities = 6 operations - assert.Equal(t, 6, len(logAfterFirst), "Should have exactly 6 cache operations (get/set for Query, Products, Users)") - - // Verify the exact cache access log (order may vary for keys within each operation) - wantLogFirst := []CacheLogEntry{ - // Root field Query.topProducts - { - Operation: "get", - Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, - Hits: []bool{false}, - }, - { - Operation: "set", - Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, - }, - // Product entity fetches (reviews data for each product) - { - Operation: "get", - Keys: []string{ - `{"__typename":"Product","key":{"upc":"top-1"}}`, - `{"__typename":"Product","key":{"upc":"top-2"}}`, - }, - Hits: []bool{false, false}, - }, - { - Operation: "set", - Keys: []string{ - `{"__typename":"Product","key":{"upc":"top-1"}}`, - `{"__typename":"Product","key":{"upc":"top-2"}}`, - }, - }, - // User entity fetches (author data) - { - Operation: "get", - Keys: []string{ - `{"__typename":"User","key":{"id":"1234"}}`, - }, - Hits: []bool{false}, - }, - { - Operation: "set", - Keys: []string{ - `{"__typename":"User","key":{"id":"1234"}}`, - }, - }, - } - assert.Equal(t, sortCacheLogKeys(wantLogFirst), sortCacheLogKeys(logAfterFirst), "First query cache log should match expected") - - // Verify subgraph calls for first query - productsCallsFirst := tracker.GetCount(productsHost) - reviewsCallsFirst := tracker.GetCount(reviewsHost) - accountsCallsFirst := tracker.GetCount(accountsHost) - assert.Equal(t, 1, productsCallsFirst, "First query should call products subgraph exactly once") - assert.Equal(t, 1, reviewsCallsFirst, "First query should call reviews subgraph exactly once") - assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph for User entity resolution") - - // Second query - new request means fresh L1, but L2 should hit - defaultCache.ClearLog() - tracker.Reset() - resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - logAfterSecond := defaultCache.GetLog() - // All cache operations should be gets with hits: Query.topProducts, Product entities, User entities - assert.Equal(t, 3, len(logAfterSecond), "Second query should have 3 cache get operations (all hits)") - - // Verify the exact cache access log for second query (all hits) - wantLogSecond := []CacheLogEntry{ - // Root field Query.topProducts - HIT - { - Operation: "get", - Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, - Hits: []bool{true}, - }, - // Product entity fetches - HITS - { - Operation: "get", - Keys: []string{ - `{"__typename":"Product","key":{"upc":"top-1"}}`, - `{"__typename":"Product","key":{"upc":"top-2"}}`, - }, - Hits: []bool{true, true}, - }, - // User entity fetches - HITS - { - Operation: "get", - Keys: []string{ - `{"__typename":"User","key":{"id":"1234"}}`, - }, - Hits: []bool{true}, - }, - } - assert.Equal(t, sortCacheLogKeys(wantLogSecond), sortCacheLogKeys(logAfterSecond), "Second query cache log should match expected (all hits)") - - // Verify no subgraph calls for second query (L2 cache hits) - productsCallsSecond := tracker.GetCount(productsHost) - reviewsCallsSecond := tracker.GetCount(reviewsHost) - accountsCallsSecond := tracker.GetCount(accountsHost) - assert.Equal(t, 0, productsCallsSecond, "Second query should not call products subgraph (L2 hit)") - assert.Equal(t, 0, reviewsCallsSecond, "Second query should not call reviews subgraph (L2 hit)") - assert.Equal(t, 0, accountsCallsSecond, "Second query should not call accounts subgraph (L2 hit)") - }) - - t.Run("L1+L2 - cross-request isolation: L1 per-request, L2 shared", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - // Create HTTP client with tracking - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{ - Transport: tracker, - } - - // Enable both L1 and L2 - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: true, - } - - // Enable entity caching for L2 tests (opt-in per-entity caching) - // Configure caching per-subgraph with explicit subgraph names - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // First request - populates L2 cache - defaultCache.ClearLog() - tracker.Reset() - resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - logAfterFirst := defaultCache.GetLog() - productKeys := []string{ - `{"__typename":"Product","key":{"upc":"top-1"}}`, - `{"__typename":"Product","key":{"upc":"top-2"}}`, - } - userKeys := []string{ - `{"__typename":"User","key":{"id":"1234"}}`, - } - wantFirstLog := []CacheLogEntry{ - // reviews subgraph _entities(Product) — L2 miss, first time seeing these products - {Operation: "get", Keys: productKeys, Hits: []bool{false, false}}, - // reviews subgraph _entities(Product) — store fetched product data in L2 - {Operation: "set", Keys: productKeys}, - // accounts subgraph _entities(User) — L2 miss, first time seeing this user - {Operation: "get", Keys: userKeys, Hits: []bool{false}}, - // accounts subgraph _entities(User) — store fetched user data in L2 - {Operation: "set", Keys: userKeys}, - } - assert.Equal(t, sortCacheLogKeys(wantFirstLog), sortCacheLogKeys(logAfterFirst), "First request: L2 miss + set for Product and User") - - // Second request - L1 is fresh (new request), but L2 should provide data - defaultCache.ClearLog() - tracker.Reset() - resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - logAfterSecond := defaultCache.GetLog() - wantSecondLog := []CacheLogEntry{ - // reviews subgraph _entities(Product) — L2 hit, both products cached from first request - {Operation: "get", Keys: productKeys, Hits: []bool{true, true}}, - // accounts subgraph _entities(User) — L2 hit, user cached from first request (deduplicated: 1 unique user) - {Operation: "get", Keys: userKeys, Hits: []bool{true}}, - // No set operations — all data served from cache - } - assert.Equal(t, sortCacheLogKeys(wantSecondLog), sortCacheLogKeys(logAfterSecond), "Second request: all L2 cache hits, no sets") - - // No subgraph calls on second request — all entity data served from L2 cache - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - assert.Equal(t, 0, tracker.GetCount(reviewsURLParsed.Host), "Second request should skip reviews subgraph (Product L2 cache hit)") - assert.Equal(t, 0, tracker.GetCount(accountsURLParsed.Host), "Second request should skip accounts subgraph (User L2 cache hit)") - }) -} - -// TestPartialEntityCaching demonstrates that only explicitly configured entity types -// are cached. This test configures caching for Product but NOT for User, verifying -// the opt-in nature of the per-entity caching configuration. -func TestPartialEntityCaching(t *testing.T) { - t.Run("only configured entities are cached", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - // Create HTTP client with tracking - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{ - Transport: tracker, - } - - // Enable L2 cache - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: true, - } - - // PARTIAL CACHING: Only configure caching for Product in reviews subgraph, NOT for User in accounts - // This demonstrates the opt-in per-entity caching behavior - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - // Note: accounts subgraph is intentionally NOT configured - User entities should NOT be cached - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames for tracking - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - - // First query - Product entities should be cached, User entities should NOT - defaultCache.ClearLog() - tracker.Reset() - resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - // Only Product has L2 caching configured (reviews subgraph); User (accounts) does NOT. - // So we expect cache operations for Product only — no User cache activity at all. - productKeys := []string{ - `{"__typename":"Product","key":{"upc":"top-1"}}`, - `{"__typename":"Product","key":{"upc":"top-2"}}`, - } - logAfterFirst := defaultCache.GetLog() - wantFirstLog := []CacheLogEntry{ - // reviews subgraph _entities(Product) — L2 miss, first time seeing these products - {Operation: "get", Keys: productKeys, Hits: []bool{false, false}}, - // reviews subgraph _entities(Product) — store fetched product data in L2 - {Operation: "set", Keys: productKeys}, - // No User operations — accounts subgraph has no caching configured - } - assert.Equal(t, sortCacheLogKeys(wantFirstLog), sortCacheLogKeys(logAfterFirst), "First request: only Product entities have cache operations") - - // Both subgraphs called on first request (no cache to serve from) - assert.Equal(t, 1, tracker.GetCount(reviewsHost), "First query should call reviews subgraph") - assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts subgraph") - - // Second query - Product should hit cache, User should still be fetched from subgraph - defaultCache.ClearLog() - tracker.Reset() - resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - logAfterSecond := defaultCache.GetLog() - wantSecondLog := []CacheLogEntry{ - // reviews subgraph _entities(Product) — L2 hit, both products cached from first request - {Operation: "get", Keys: productKeys, Hits: []bool{true, true}}, - // No User operations — accounts subgraph still has no caching configured - // No set operations — Product data served from cache - } - assert.Equal(t, sortCacheLogKeys(wantSecondLog), sortCacheLogKeys(logAfterSecond), "Second request: Product cache hits only") - - // Reviews subgraph skipped (Product served from cache), accounts still called (User not cached) - assert.Equal(t, 0, tracker.GetCount(reviewsHost), "Second query should skip reviews subgraph (Product cache hit)") - assert.Equal(t, 1, tracker.GetCount(accountsHost), "Second query should still call accounts subgraph (User NOT cached)") - }) -} - -// TestRootFieldCaching tests that root fields (like Query.topProducts) can be cached -// when explicitly configured with RootFieldCaching configuration. -func TestRootFieldCaching(t *testing.T) { - t.Run("root field caching enabled", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - // Create HTTP client with tracking - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{ - Transport: tracker, - } - - // Enable L2 cache - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: true, - } - - // Configure root field caching for Query.topProducts on products subgraph - // Also configure entity caching to compare behavior - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames for tracking - productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - productsHost := productsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - accountsHost := accountsURLParsed.Host - - // First query - should miss cache for all: root field, entity types - defaultCache.ClearLog() - tracker.Reset() - resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - logAfterFirst := defaultCache.GetLog() - // Should have cache operations for: - // 1. Root field Query.topProducts (get + set = 2 operations) - // 2. Product entities (get + set = 2 operations) - // 3. User entities (get + set = 2 operations) - // Total: 6 operations - assert.Equal(t, 6, len(logAfterFirst), "First query should have 6 cache operations (get+set for root field, Product, User)") - - // Verify first query calls all subgraphs - productsCallsFirst := tracker.GetCount(productsHost) - reviewsCallsFirst := tracker.GetCount(reviewsHost) - accountsCallsFirst := tracker.GetCount(accountsHost) - assert.Equal(t, 1, productsCallsFirst, "First query should call products subgraph") - assert.Equal(t, 1, reviewsCallsFirst, "First query should call reviews subgraph") - assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph") - - // Second query - should hit cache for root field and entities - defaultCache.ClearLog() - tracker.Reset() - resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - logAfterSecond := defaultCache.GetLog() - wantSecondLog := []CacheLogEntry{ - // products subgraph Query.topProducts — root field L2 hit, cached from first request - {Operation: "get", Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, Hits: []bool{true}}, - // reviews subgraph _entities(Product) — L2 hit, both products cached from first request - {Operation: "get", Keys: []string{`{"__typename":"Product","key":{"upc":"top-1"}}`, `{"__typename":"Product","key":{"upc":"top-2"}}`}, Hits: []bool{true, true}}, - // accounts subgraph _entities(User) — L2 hit, user cached from first request (1 unique user) - {Operation: "get", Keys: []string{`{"__typename":"User","key":{"id":"1234"}}`}, Hits: []bool{true}}, - // No set operations — all data served from cache - } - assert.Equal(t, sortCacheLogKeys(wantSecondLog), sortCacheLogKeys(logAfterSecond), "Second query: all cache hits, no sets") - - // All subgraphs skipped on second query (everything served from cache) - assert.Equal(t, 0, tracker.GetCount(productsHost), "Second query should skip products subgraph (root field cache hit)") - assert.Equal(t, 0, tracker.GetCount(reviewsHost), "Second query should skip reviews subgraph (entity cache hit)") - assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip accounts subgraph (entity cache hit)") - }) - - t.Run("root field caching NOT enabled - subgraph still called", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - // Create HTTP client with tracking - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{ - Transport: tracker, - } - - // Enable L2 cache - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: true, - } - - // Only configure entity caching, NOT root field caching - // This demonstrates opt-in behavior: root fields are NOT cached unless configured - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - // Note: products subgraph has NO caching config for Query.topProducts - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames for tracking - productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) - productsHost := productsURLParsed.Host - - // First query - tracker.Reset() - resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - productsCallsFirst := tracker.GetCount(productsHost) - assert.Equal(t, 1, productsCallsFirst, "First query should call products subgraph") - - // Second query - products subgraph should still be called because root field is NOT cached - tracker.Reset() - resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - // KEY ASSERTION: Products subgraph IS called on second query because root field is NOT cached - productsCallsSecond := tracker.GetCount(productsHost) - assert.Equal(t, 1, productsCallsSecond, "Second query SHOULD call products subgraph (root field NOT cached)") - }) -} - -// ============================================================================= -// L1 CACHE TESTS FOR LIST FIELDS -// ============================================================================= -// -// These tests verify L1 caching behavior when root fields or child fields -// return lists of entities. - -func TestL1CacheChildFieldEntityList(t *testing.T) { - // This test verifies L1 cache behavior for User.sameUserReviewers: [User!]! - // which returns only the same user (self-reference). - // - // sameUserReviewers is defined in the reviews subgraph with @requires(fields: "username"), - // which means: - // 1. The gateway first resolves username from accounts (entity fetch) - // 2. Then calls reviews to get sameUserReviewers - // 3. sameUserReviewers returns User references (just IDs) - only the same user - // 4. The gateway must make entity fetches to accounts to resolve those users - // - // Query flow: - // 1. topProducts -> products subgraph (root query) - // 2. reviews -> reviews subgraph (entity fetch for Products) - // 3. authorWithoutProvides -> accounts subgraph (entity fetch for User 1234) - // - User 1234 is fetched and stored in L1 - // 4. sameUserReviewers -> reviews subgraph (after username resolved) - // - Returns [User 1234] as reference (same user only) - // 5. Entity resolution for sameUserReviewers -> accounts subgraph - // - User 1234 is 100% L1 HIT (already fetched in step 3) - // - THE ENTIRE ACCOUNTS CALL IS SKIPPED! - // - // With L1 enabled: The sameUserReviewers entity fetch is completely skipped - // because all entities are already in L1 cache. - - query := `query { - topProducts { - reviews { - authorWithoutProvides { - id - username - sameUserReviewers { - id - username - } - } - } - } - }` - - // User 1234's sameUserReviewers returns [User 1234] (only self) - expectedResponse := `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}` - - t.Run("L1 enabled - sameUserReviewers fetch entirely skipped via L1 cache", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, // Isolate L1 behavior - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // With L1 enabled: - // - First accounts call fetches User 1234 for authorWithoutProvides (L1 miss, stored) - // - Reviews called for sameUserReviewers (returns [User 1234] reference) - // - sameUserReviewers entity resolution: User 1234 is 100% L1 HIT - // → accounts call is COMPLETELY SKIPPED! - accountsCalls := tracker.GetCount(accountsHost) - reviewsCalls := tracker.GetCount(reviewsHost) - - // Reviews should be called twice: once for Product entity (reviews field), - // once for sameUserReviewers (after username is resolved from accounts) - assert.Equal(t, 2, reviewsCalls, "Reviews subgraph called for Product.reviews and User.sameUserReviewers") - - // KEY ASSERTION: Only 1 accounts call! The sameUserReviewers entity resolution - // is completely skipped because User 1234 is already in L1 cache. - assert.Equal(t, 1, accountsCalls, - "With L1 enabled: only 1 accounts call (sameUserReviewers entity fetch skipped via L1)") - - }) - - t.Run("L1 disabled - accounts called for sameUserReviewers", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // With L1 disabled: - // - First accounts call fetches User 1234 for authorWithoutProvides - // - Second accounts call for sameUserReviewers: User 1234 fetched again (no L1) - // Total: 2 accounts calls - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 2, accountsCalls, - "With L1 disabled: 2 accounts calls (sameUserReviewers requires separate fetch)") - - }) -} - -func TestL1CacheNestedEntityListDeduplication(t *testing.T) { - // This test verifies L1 deduplication when the same entity appears - // at multiple levels in nested list queries using coReviewers. - // - // coReviewers is defined in the reviews subgraph with @requires(fields: "username"), - // so it triggers cross-subgraph entity resolution. - // - // Query flow: - // 1. topProducts -> products subgraph - // 2. reviews -> reviews subgraph (Product entity fetch) - // 3. authorWithoutProvides -> accounts (User 1234 fetched, stored in L1) - // 4. coReviewers -> reviews subgraph (after username resolved) - // - Returns [User 1234, User 7777] as references - // 5. Entity resolution for coReviewers -> accounts - // - User 1234 should be L1 HIT (already fetched in step 3) - // - User 7777 is L1 MISS (stored in L1) - // 6. coReviewers for User 1234 and User 7777 -> reviews subgraph - // 7. Entity resolution for nested coReviewers -> accounts - // - All users (1234, 7777) are already in L1! - // - // With L1 enabled: The nested coReviewers level should have 100% L1 hits, - // potentially skipping the accounts call entirely for that level. - - query := `query { - topProducts { - reviews { - authorWithoutProvides { - id - username - coReviewers { - id - username - coReviewers { - id - username - } - } - } - } - } - }` - - // User 1234's coReviewers: [User 1234, User 7777] - // User 7777's coReviewers: [User 7777, User 1234] - // Nested level repeats these patterns - expectedResponse := `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me"},{"id":"7777","username":"User 7777"}]},{"id":"7777","username":"User 7777","coReviewers":[{"id":"7777","username":"User 7777"},{"id":"1234","username":"Me"}]}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me"},{"id":"7777","username":"User 7777"}]},{"id":"7777","username":"User 7777","coReviewers":[{"id":"7777","username":"User 7777"},{"id":"1234","username":"Me"}]}]}}]}]}}` - - t.Run("L1 enabled - nested coReviewers benefits from L1 hits", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // With L1 enabled: - // - Call 1: authorWithoutProvides fetches User 1234 (miss, stored) - // - Call 2: coReviewers entity resolution [User 1234 (hit), User 7777 (miss, stored)] - // - Call 3: nested coReviewers entity resolution - all users are in L1! - // This call should be fully served from L1 cache. - accountsCalls := tracker.GetCount(accountsHost) - // With L1 enabled, the nested coReviewers should be served from L1 - // Only 2 accounts calls needed because nested coReviewers is fully served from L1 - assert.Equal(t, 2, accountsCalls, - "With L1 enabled: exactly 2 accounts calls (nested coReviewers served entirely from L1)") - }) - - t.Run("L1 disabled - more accounts calls without deduplication", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // With L1 disabled: - // - Call 1: authorWithoutProvides fetches User 1234 - // - Call 2: coReviewers entity resolution for User 1234 and User 7777 (no L1 dedup) - // - Call 3: nested coReviewers entity resolution (no L1 dedup) - accountsCalls := tracker.GetCount(accountsHost) - // Without L1 cache, we need 3 accounts calls (no deduplication at nested level) - assert.Equal(t, 3, accountsCalls, - "With L1 disabled: exactly 3 accounts calls (no deduplication)") - }) -} - -func TestL1CacheRootFieldEntityListPopulation(t *testing.T) { - // This test verifies L1 cache behavior with a complex nested query starting - // from a root field that returns a list of entities. - // - // Query flow: - // 1. topProducts -> products subgraph (root query, returns list) - // 2. reviews -> reviews subgraph (entity fetch for Products) - // 3. authorWithoutProvides -> accounts subgraph (entity fetch for User 1234) - // - User 1234 is fetched and stored in L1 - // 4. sameUserReviewers -> reviews subgraph (after username resolved) - // - Returns [User 1234] as reference (same user only) - // 5. Entity resolution for sameUserReviewers -> accounts subgraph - // - User 1234 is 100% L1 HIT (already fetched in step 3) - // - THE ENTIRE ACCOUNTS CALL IS SKIPPED! - // - // With L1 enabled: The sameUserReviewers entity fetch is completely skipped. - // With L1 disabled: accounts is called twice (no deduplication). - - query := `query { - topProducts { - upc - name - reviews { - body - authorWithoutProvides { - id - username - sameUserReviewers { - id - username - } - } - } - } - }` - - expectedResponse := `{"data":{"topProducts":[{"upc":"top-1","name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"upc":"top-2","name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}` - - t.Run("L1 enabled - sameUserReviewers fetch skipped via L1 cache", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - productsHost := productsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // Query flow with L1 enabled: - // 1. products subgraph: topProducts root query - // 2. reviews subgraph: Product entity fetch for reviews - // 3. accounts subgraph: User entity fetch for authorWithoutProvides (User 1234 stored in L1) - // 4. reviews subgraph: sameUserReviewers (returns [User 1234]) - // 5. sameUserReviewers entity resolution: User 1234 is 100% L1 HIT → accounts call SKIPPED! - productsCalls := tracker.GetCount(productsHost) - reviewsCalls := tracker.GetCount(reviewsHost) - accountsCalls := tracker.GetCount(accountsHost) - - assert.Equal(t, 1, productsCalls, "Should call products subgraph once for topProducts") - assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice (Product.reviews + User.sameUserReviewers)") - // KEY ASSERTION: Only 1 accounts call! sameUserReviewers entity resolution skipped via L1. - assert.Equal(t, 1, accountsCalls, - "With L1 enabled: only 1 accounts call (sameUserReviewers entity fetch skipped via L1)") - - }) - - t.Run("L1 disabled - more accounts calls without L1 optimization", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - productsHost := productsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // Query flow with L1 disabled: - // 1. products subgraph: topProducts root query - // 2. reviews subgraph: Product entity fetch for reviews - // 3. accounts subgraph: User entity fetch for authorWithoutProvides - // 4. reviews subgraph: sameUserReviewers - // 5. accounts subgraph: User entity fetch for sameUserReviewers (no L1 → must fetch again!) - productsCalls := tracker.GetCount(productsHost) - reviewsCalls := tracker.GetCount(reviewsHost) - accountsCalls := tracker.GetCount(accountsHost) - - assert.Equal(t, 1, productsCalls, "Should call products subgraph once") - assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice") - // KEY ASSERTION: 2 accounts calls without L1 optimization - assert.Equal(t, 2, accountsCalls, - "With L1 disabled: 2 accounts calls (sameUserReviewers requires separate fetch)") - - }) -} - -func TestL1CacheRootFieldNonEntityWithNestedEntities(t *testing.T) { - // This test verifies L1 cache behavior when a root field returns a NON-entity type - // (Review) that contains nested entities (User via authorWithoutProvides). - // - // Key difference from TestL1CacheRootFieldEntityListPopulation: - // - That test starts with topProducts -> [Product] where Product IS an entity (@key(fields: "upc")) - // - This test starts with topReviews -> [Review] where Review is NOT an entity (no @key) - // - Both prove L1 entity caching works for nested User entities - // - // Query flow: - // 1. topReviews -> reviews subgraph (root query, returns [Review] — NOT an entity) - // 2. authorWithoutProvides -> accounts subgraph (entity fetch for Users, stored in L1) - // 3. sameUserReviewers -> reviews subgraph (after username resolved via @requires) - // 4. Entity resolution for sameUserReviewers -> accounts subgraph - // - All Users are 100% L1 HITs (already fetched in step 2) - // - THE ENTIRE ACCOUNTS CALL IS SKIPPED! - - query := `query { - topReviews { - body - authorWithoutProvides { - id - username - sameUserReviewers { - id - username - } - } - } - }` - - expectedResponse := `{"data":{"topReviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}},{"body":"This is the last straw. Hat you will wear. 11/10","authorWithoutProvides":{"id":"7777","username":"User 7777","sameUserReviewers":[{"id":"7777","username":"User 7777"}]}},{"body":"Perfect summer hat.","authorWithoutProvides":{"id":"5678","username":"User 5678","sameUserReviewers":[{"id":"5678","username":"User 5678"}]}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"id":"8888","username":"User 8888","sameUserReviewers":[{"id":"8888","username":"User 8888"}]}}]}}` - - t.Run("L1 enabled - sameUserReviewers fetch skipped via L1 cache", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - reviewsHost := reviewsURLParsed.Host - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // Query flow with L1 enabled: - // 1. reviews subgraph: topReviews root query (Review is NOT an entity) - // 2. accounts subgraph: User entity fetch for authorWithoutProvides (Users stored in L1) - // 3. reviews subgraph: sameUserReviewers (returns [User] references) - // 4. sameUserReviewers entity resolution: all Users are L1 HITs → accounts call SKIPPED! - reviewsCalls := tracker.GetCount(reviewsHost) - accountsCalls := tracker.GetCount(accountsHost) - - assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice (topReviews + sameUserReviewers)") - // KEY ASSERTION: Only 1 accounts call! sameUserReviewers entity resolution skipped via L1. - assert.Equal(t, 1, accountsCalls, - "With L1 enabled: only 1 accounts call (sameUserReviewers entity fetch skipped via L1)") - }) - - t.Run("L1 disabled - more accounts calls without L1 optimization", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - reviewsHost := reviewsURLParsed.Host - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // Query flow with L1 disabled: - // 1. reviews subgraph: topReviews root query - // 2. accounts subgraph: User entity fetch for authorWithoutProvides - // 3. reviews subgraph: sameUserReviewers - // 4. accounts subgraph: User entity fetch for sameUserReviewers (no L1 → must fetch again!) - reviewsCalls := tracker.GetCount(reviewsHost) - accountsCalls := tracker.GetCount(accountsHost) - - assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice") - // KEY ASSERTION: 2 accounts calls without L1 optimization - assert.Equal(t, 2, accountsCalls, - "With L1 disabled: 2 accounts calls (sameUserReviewers requires separate fetch)") - }) -} - -// ============================================================================= -// CACHE ERROR HANDLING TESTS -// ============================================================================= -// -// These tests verify that caches are NOT populated when subgraphs return errors. -// The cache should only store successful responses to prevent caching error states. - -func TestCacheNotPopulatedOnErrors(t *testing.T) { - // Query that triggers an error in accounts subgraph via error-user - // The reviewWithError field returns a review with author ID "error-user" - // which causes FindUserByID to return an error - errorQuery := `query { - reviewWithError { - body - authorWithoutProvides { - id - username - } - } - }` - - // Expected error response - data is null due to non-nullable username field error propagation - expectedErrorResponse := `{"errors":[{"message":"Failed to fetch from Subgraph 'accounts' at Path 'reviewWithError.authorWithoutProvides'."},{"message":"Cannot return null for non-nullable field 'User.username'.","path":["reviewWithError","authorWithoutProvides","username"]}],"data":{"reviewWithError":null}}` - - t.Run("L1 only - error response prevents cache population", func(t *testing.T) { - // This test verifies that L1 cache is NOT populated when an error occurs. - // If L1 was erroneously populated, the second query would not call accounts. - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - - // First query - should get error from accounts - tracker.Reset() - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) - - // Verify exact error response - assert.Equal(t, expectedErrorResponse, string(resp)) - - reviewsCallsFirst := tracker.GetCount(reviewsHost) - accountsCallsFirst := tracker.GetCount(accountsHost) - assert.Equal(t, 1, reviewsCallsFirst, "First query should call reviews subgraph once") - assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph once") - - // Second query - L1 should NOT have cached the error, so accounts should be called again - tracker.Reset() - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) - - // Same error should be returned - assert.Equal(t, expectedErrorResponse, string(resp)) - - accountsCallsSecond := tracker.GetCount(accountsHost) - // KEY ASSERTION: If L1 incorrectly cached the error, this would be 0 - assert.Equal(t, 1, accountsCallsSecond, "Second query should call accounts again (L1 should NOT cache errors)") - }) - - t.Run("L2 only - error response prevents cache population", func(t *testing.T) { - // This test verifies that L2 cache is NOT populated when an error occurs. - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - // Configure L2 caching for User entities - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: true, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - // First query - should get error from accounts - defaultCache.ClearLog() - tracker.Reset() - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) - - // Verify exact error response - assert.Equal(t, expectedErrorResponse, string(resp)) - - accountsCallsFirst := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph once") - - // Verify exact cache log: only "get" with miss, NO "set" - // Since the fetch had an error, cache population should be skipped entirely - wantCacheLog := []CacheLogEntry{ - { - Operation: "get", - Keys: []string{`{"__typename":"User","key":{"id":"error-user"}}`}, - Hits: []bool{false}, - }, - // NO "set" entry - this is the key assertion - } - assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Cache log should only have 'get' miss, no 'set'") - - // Second query - L2 should NOT have cached the error, so accounts should be called again - defaultCache.ClearLog() - tracker.Reset() - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) - - // Same error should be returned - assert.Equal(t, expectedErrorResponse, string(resp)) - - accountsCallsSecond := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCallsSecond, "Second query should call accounts again (L2 should NOT cache errors)") - - // Second query should also have same cache log pattern (get miss, no set) - assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Second query cache log should also have 'get' miss, no 'set'") - }) - - t.Run("L1 and L2 - error response prevents both caches", func(t *testing.T) { - // This test verifies that both L1 and L2 caches are NOT populated when an error occurs. - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - // Configure L2 caching for User entities - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: true, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - // First query - should get error from accounts - defaultCache.ClearLog() - tracker.Reset() - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) - - // Verify exact error response - assert.Equal(t, expectedErrorResponse, string(resp)) - - accountsCallsFirst := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph once") - - // Verify exact cache log: only "get" with miss, NO "set" - wantCacheLog := []CacheLogEntry{ - { - Operation: "get", - Keys: []string{`{"__typename":"User","key":{"id":"error-user"}}`}, - Hits: []bool{false}, - }, - } - assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Cache log should only have 'get' miss, no 'set'") - - // Second query - neither L1 nor L2 should have cached the error - defaultCache.ClearLog() - tracker.Reset() - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) - - // Same error should be returned - assert.Equal(t, expectedErrorResponse, string(resp)) - - accountsCallsSecond := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCallsSecond, "Second query should call accounts again (neither L1 nor L2 should cache errors)") - - // Second query should also have same cache log pattern - assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Second query cache log should also have 'get' miss, no 'set'") - }) - - t.Run("error does not pollute cache for subsequent success queries", func(t *testing.T) { - // This test verifies that an error query doesn't pollute the cache - // and that subsequent successful queries still work correctly. - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - // Configure L2 caching for User entities - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: true, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - // First: Query that triggers an error - defaultCache.ClearLog() - tracker.Reset() - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) - - // Verify exact error response - assert.Equal(t, expectedErrorResponse, string(resp)) - - accountsCallsError := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCallsError, "Error query should call accounts") - - // Verify error-user was NOT cached (only get, no set) - wantErrorCacheLog := []CacheLogEntry{ - { - Operation: "get", - Keys: []string{`{"__typename":"User","key":{"id":"error-user"}}`}, - Hits: []bool{false}, - }, - } - assert.Equal(t, wantErrorCacheLog, defaultCache.GetLog(), "Error query cache log should only have 'get' miss, no 'set'") - - // Second: Query a successful user (User 1234 via me query) - // Note: "me" is a root query, not an entity fetch, so it doesn't use L2 entity caching - successQuery := `query { - me { - id - username - } - }` - expectedSuccessResponse := `{"data":{"me":{"id":"1234","username":"Me"}}}` - - defaultCache.ClearLog() - tracker.Reset() - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, successQuery, nil, t) - - // Should succeed with exact expected response - assert.Equal(t, expectedSuccessResponse, string(resp)) - - // Note: Root queries (me) don't use L2 entity caching by default, - // so the cache log should be empty for this query. - // The important thing is that the previous error didn't pollute the cache. - assert.Equal(t, 0, len(defaultCache.GetLog()), "Root query should not use L2 entity cache") - - // Third: Query the error user again - should still fail (not cached) - defaultCache.ClearLog() - tracker.Reset() - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) - - assert.Equal(t, expectedErrorResponse, string(resp)) - accountsCallsErrorAgain := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCallsErrorAgain, "Error query should call accounts again (error was not cached)") - - // Verify cache log still shows only get miss, no set - assert.Equal(t, wantErrorCacheLog, defaultCache.GetLog(), "Third query cache log should still have 'get' miss, no 'set'") - }) -} - -// TestL1CacheOptimizationReducesSubgraphCalls tests that the L1 cache optimization -// postprocessor (optimizeL1Cache) correctly identifies which fetches can benefit -// from L1 caching and sets UseL1Cache appropriately. -// -// The key insight is that L1 is only useful when: -// 1. A prior fetch can provide cached data (READ benefit) -// 2. A later fetch can consume cached data (WRITE benefit) -// -// This test verifies the end-to-end effect: when L1 optimization identifies -// matching entity types between fetches, it enables L1 caching, resulting in -// fewer subgraph calls. -func TestL1CacheOptimizationReducesSubgraphCalls(t *testing.T) { - // This query demonstrates L1 optimization: - // - Query.me returns User entity - // - User.sameUserReviewers returns [User] entities - // When L1 is enabled and optimized correctly: - // - First User fetch (me) populates L1 cache - // - Second User fetch (sameUserReviewers) hits L1 cache, SKIPS subgraph call - // - // The optimizeL1Cache postprocessor: - // - Sets UseL1Cache=true on User fetches (they share the same entity type) - // - Sets UseL1Cache=false on fetches with no matching entity types - - query := `query { - me { - id - username - sameUserReviewers { - id - username - } - } - }` - - expectedResponse := `{"data":{"me":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}}` - - t.Run("L1 optimization enables cache hit between same entity type fetches", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // Query flow with L1 optimization: - // 1. accounts subgraph: Query.me (root query, returns User 1234) - // - L1 cache populated with User 1234 - // 2. reviews subgraph: User.sameUserReviewers (returns [User 1234]) - // 3. accounts subgraph: User entity fetch for sameUserReviewers - // - User 1234 is 100% L1 HIT! This call is SKIPPED! - accountsCalls := tracker.GetCount(accountsHost) - reviewsCalls := tracker.GetCount(reviewsHost) - - // KEY ASSERTION: Only 1 accounts call! - // Without L1 optimization, there would be 2 calls: - // - First: Query.me - // - Second: User entity resolution for sameUserReviewers - // With L1 optimization, the second call is skipped because User 1234 is in L1 cache. - assert.Equal(t, 1, accountsCalls, - "L1 optimization: only 1 accounts call (sameUserReviewers resolved from L1 cache)") - assert.Equal(t, 1, reviewsCalls, - "Should call reviews subgraph once for User.sameUserReviewers") - }) - - t.Run("Without L1, same query requires more subgraph calls", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, // L1 disabled - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // Query flow WITHOUT L1: - // 1. accounts subgraph: Query.me (root query) - // 2. reviews subgraph: User.sameUserReviewers - // 3. accounts subgraph: User entity fetch (NO L1 cache → must fetch!) - accountsCalls := tracker.GetCount(accountsHost) - reviewsCalls := tracker.GetCount(reviewsHost) - - // KEY ASSERTION: 2 accounts calls without L1! - // This proves L1 optimization saves a subgraph call. - assert.Equal(t, 2, accountsCalls, - "Without L1: 2 accounts calls (sameUserReviewers requires separate fetch)") - assert.Equal(t, 1, reviewsCalls, - "Should call reviews subgraph once for User.sameUserReviewers") - }) -} - -// withCacheAnalytics returns an option that enables cache analytics collection. -// parseCacheAnalytics extracts and parses the X-Cache-Analytics JSON header. -func parseCacheAnalytics(t *testing.T, headers http.Header) resolve.CacheAnalyticsSnapshot { - t.Helper() - raw := headers.Get("X-Cache-Analytics") - require.NotEmpty(t, raw, "X-Cache-Analytics header should be present") - var snap resolve.CacheAnalyticsSnapshot - err := json.Unmarshal([]byte(raw), &snap) - require.NoError(t, err, "X-Cache-Analytics header should be valid JSON") - return snap -} - -// normalizeSnapshot makes a CacheAnalyticsSnapshot deterministically comparable by -// sorting EntityTypes, L1Reads, L2Reads, L1Writes, L2Writes, and FieldHashes. -func normalizeSnapshot(snap resolve.CacheAnalyticsSnapshot) resolve.CacheAnalyticsSnapshot { - // Sort EntityTypes by TypeName - if snap.EntityTypes != nil { - sorted := make([]resolve.EntityTypeInfo, len(snap.EntityTypes)) - copy(sorted, snap.EntityTypes) - sort.Slice(sorted, func(i, j int) bool { - return sorted[i].TypeName < sorted[j].TypeName - }) - snap.EntityTypes = sorted - } - - // Sort L1Reads and zero out non-deterministic CacheAgeMs - if snap.L1Reads != nil { - sorted := make([]resolve.CacheKeyEvent, len(snap.L1Reads)) - copy(sorted, snap.L1Reads) - for i := range sorted { - sorted[i].CacheAgeMs = 0 - } - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].CacheKey != sorted[j].CacheKey { - return sorted[i].CacheKey < sorted[j].CacheKey - } - return sorted[i].Kind < sorted[j].Kind - }) - snap.L1Reads = sorted - } - - // Sort L2Reads and zero out non-deterministic CacheAgeMs - if snap.L2Reads != nil { - sorted := make([]resolve.CacheKeyEvent, len(snap.L2Reads)) - copy(sorted, snap.L2Reads) - for i := range sorted { - sorted[i].CacheAgeMs = 0 - } - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].CacheKey != sorted[j].CacheKey { - return sorted[i].CacheKey < sorted[j].CacheKey - } - return sorted[i].Kind < sorted[j].Kind - }) - snap.L2Reads = sorted - } - - // Sort L1Writes - if snap.L1Writes != nil { - sorted := make([]resolve.CacheWriteEvent, len(snap.L1Writes)) - copy(sorted, snap.L1Writes) - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].CacheKey != sorted[j].CacheKey { - return sorted[i].CacheKey < sorted[j].CacheKey - } - return sorted[i].CacheLevel < sorted[j].CacheLevel - }) - snap.L1Writes = sorted - } - - // Sort L2Writes - if snap.L2Writes != nil { - sorted := make([]resolve.CacheWriteEvent, len(snap.L2Writes)) - copy(sorted, snap.L2Writes) - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].CacheKey != sorted[j].CacheKey { - return sorted[i].CacheKey < sorted[j].CacheKey - } - return sorted[i].CacheLevel < sorted[j].CacheLevel - }) - snap.L2Writes = sorted - } - - // Sort FieldHashes for deterministic comparison - if snap.FieldHashes != nil { - sorted := make([]resolve.EntityFieldHash, len(snap.FieldHashes)) - copy(sorted, snap.FieldHashes) - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].EntityType != sorted[j].EntityType { - return sorted[i].EntityType < sorted[j].EntityType - } - if sorted[i].FieldName != sorted[j].FieldName { - return sorted[i].FieldName < sorted[j].FieldName - } - if sorted[i].KeyRaw != sorted[j].KeyRaw { - return sorted[i].KeyRaw < sorted[j].KeyRaw - } - if sorted[i].KeyHash != sorted[j].KeyHash { - return sorted[i].KeyHash < sorted[j].KeyHash - } - return sorted[i].FieldHash < sorted[j].FieldHash - }) - snap.FieldHashes = sorted - } - - // Sort ShadowComparisons by CacheKey and zero out non-deterministic CacheAgeMs - if snap.ShadowComparisons != nil { - sorted := make([]resolve.ShadowComparisonEvent, len(snap.ShadowComparisons)) - copy(sorted, snap.ShadowComparisons) - for i := range sorted { - sorted[i].CacheAgeMs = 0 - } - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].CacheKey != sorted[j].CacheKey { - return sorted[i].CacheKey < sorted[j].CacheKey - } - return sorted[i].EntityType < sorted[j].EntityType - }) - snap.ShadowComparisons = sorted - } - - // Sort MutationEvents for deterministic comparison - if snap.MutationEvents != nil { - sorted := make([]resolve.MutationEvent, len(snap.MutationEvents)) - copy(sorted, snap.MutationEvents) - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].MutationRootField != sorted[j].MutationRootField { - return sorted[i].MutationRootField < sorted[j].MutationRootField - } - return sorted[i].EntityCacheKey < sorted[j].EntityCacheKey - }) - snap.MutationEvents = sorted - } - - // Zero out non-deterministic FetchTimings (DurationMs varies between runs) - // Use normalizeFetchTimings() when you need to assert FetchTimings fields. - snap.FetchTimings = nil - - // Normalize empty slices to nil for consistent comparison - // (JSON unmarshalling produces empty slices, expected literals produce nil) - if len(snap.L1Reads) == 0 { - snap.L1Reads = nil - } - if len(snap.L2Reads) == 0 { - snap.L2Reads = nil - } - if len(snap.L1Writes) == 0 { - snap.L1Writes = nil - } - if len(snap.L2Writes) == 0 { - snap.L2Writes = nil - } - if len(snap.EntityTypes) == 0 { - snap.EntityTypes = nil - } - if len(snap.FieldHashes) == 0 { - snap.FieldHashes = nil - } - if len(snap.ErrorEvents) == 0 { - snap.ErrorEvents = nil - } - if len(snap.ShadowComparisons) == 0 { - snap.ShadowComparisons = nil - } - if len(snap.MutationEvents) == 0 { - snap.MutationEvents = nil - } - - return snap -} - -// normalizeFetchTimings sorts FetchTimings deterministically and zeros DurationMs -// (the only non-deterministic field). Unlike normalizeSnapshot, this preserves -// all other fields (HTTPStatusCode, ResponseBytes, etc.) for assertion. -func normalizeFetchTimings(timings []resolve.FetchTimingEvent) []resolve.FetchTimingEvent { - sorted := make([]resolve.FetchTimingEvent, len(timings)) - copy(sorted, timings) - for i := range sorted { - sorted[i].DurationMs = 0 - } - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].DataSource != sorted[j].DataSource { - return sorted[i].DataSource < sorted[j].DataSource - } - return sorted[i].Source < sorted[j].Source - }) - return sorted -} - -func TestCacheAnalyticsE2E(t *testing.T) { - // Common cache key constants used across subtests - const ( - keyProductTop1 = `{"__typename":"Product","key":{"upc":"top-1"}}` - keyProductTop2 = `{"__typename":"Product","key":{"upc":"top-2"}}` - keyTopProducts = `{"__typename":"Query","field":"topProducts"}` - keyUser1234 = `{"__typename":"User","key":{"id":"1234"}}` - keyMe = `{"__typename":"Query","field":"me"}` - dsAccounts = "accounts" - dsProducts = "products" - dsReviews = "reviews" - ) - - // Field hash constants — xxhash of the rendered scalar field values. - // These are deterministic because xxhash is seeded identically each time. - const ( - hashProductNameTrilby uint64 = 1032923585965781586 // xxhash("Trilby") - hashProductNameFedora uint64 = 2432227032303632641 // xxhash("Fedora") - hashUserUsernameMe uint64 = 4957449860898447395 // xxhash("Me") - ) - - // Entity key constants for field hash assertions - const ( - entityKeyProductTop1 = `{"upc":"top-1"}` - entityKeyProductTop2 = `{"upc":"top-2"}` - entityKeyUser1234 = `{"id":"1234"}` - ) - - // Byte sizes of cached entities (measured from actual JSON marshalling) - const ( - byteSizeProductTop1 = 177 // Product top-1 entity (reviews subgraph response) - byteSizeProductTop2 = 233 // Product top-2 entity (reviews subgraph response) - byteSizeTopProducts = 127 // Query.topProducts root field (products subgraph response) - byteSizeUser1234 = 49 // User 1234 entity (accounts subgraph response) - byteSizeUser1234Full = 105 // User 1234 entity from L1 (includes sameUserReviewers data) - byteSizeQueryMe = 56 // Query.me root field (accounts subgraph response) - ) - - // Shared field hashes for the multi-upstream query (topProducts with reviews). - // Product.name: 2 products (Trilby, Fedora) → 2 distinct hashes - // User.username: 2 reviews both by "Me" → 2 identical hashes - // All FieldSourceSubgraph by default (overridden in specific tests) - multiUpstreamFieldHashes := []resolve.EntityFieldHash{ - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceSubgraph}, - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceSubgraph}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, - } - - // L2 hit field hashes — same data but all sourced from L2 cache - multiUpstreamFieldHashesL2 := []resolve.EntityFieldHash{ - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, - } - - multiUpstreamEntityTypes := []resolve.EntityTypeInfo{ - {TypeName: "Product", Count: 2, UniqueKeys: 2}, - {TypeName: "User", Count: 2, UniqueKeys: 1}, - } - - // Standard subgraph caching configs used by L2 and L1+L2 tests - multiUpstreamCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - - expectedResponseBody := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}` - - t.Run("L2 miss then hit with analytics", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // First query — all L2 misses, populates L2 cache - tracker.Reset() - resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // L2 miss: root field not yet cached - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts}, // L2 miss: User entity not yet cached (second review's User 1234 deduplicated in batch) - }, - L2Writes: []resolve.CacheWriteEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written after subgraph fetch on miss - {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written after subgraph fetch on miss - {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written to L2 after fetch - {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User entity written after accounts fetch - }, - FieldHashes: multiUpstreamFieldHashes, - EntityTypes: multiUpstreamEntityTypes, - }) - assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) - - // Second query — all L2 hits from populated cache - tracker.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // L2 hit: populated by Request 1 - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // L2 hit: populated by Request 1 - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // L2 hit: root field cached by Request 1 - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234}, // L2 hit: User entity cached by Request 1 (second review's User 1234 deduplicated) - }, - // No L2Writes: all served from cache, no fetches needed - FieldHashes: multiUpstreamFieldHashesL2, - EntityTypes: multiUpstreamEntityTypes, - }) - assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) - }) - - t.Run("L1 cache analytics with entity reuse", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - EnableCacheAnalytics: true, - }), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Query that triggers L1 entity reuse: - // 1. Query.me -> accounts subgraph -> returns User 1234 -> populates L1 - // 2. User.sameUserReviewers -> reviews subgraph -> returns [User 1234] - // 3. Entity fetch for User 1234 -> L1 HIT (no subgraph call) - query := `query { - me { - id - username - sameUserReviewers { - id - username - } - } - }` - - tracker.Reset() - resp, headers := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - assert.Equal(t, `{"data":{"me":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}}`, string(resp)) - - expected := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L1Reads: []resolve.CacheKeyEvent{ - // L1 hit: User 1234 was populated by Query.me root fetch, reused for sameUserReviewers - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234Full}, - }, - L1Writes: []resolve.CacheWriteEvent{ - // Query.me root field written to L1 after accounts subgraph fetch - {CacheKey: keyMe, EntityType: "Query", ByteSize: byteSizeQueryMe, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL1}, - }, - FieldHashes: []resolve.EntityFieldHash{ - // Both username entries show L1 source because the entity key resolves to - // the L1 source recorded during the entity fetch L1 HIT - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL1}, // me.username: entity came from L1 - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL1}, // sameUserReviewers[0].username: same L1 entity - }, - EntityTypes: []resolve.EntityTypeInfo{ - {TypeName: "User", Count: 2, UniqueKeys: 1}, // 2 User instances, but only 1 unique key (1234) - }, - }) - assert.Equal(t, expected, normalizeSnapshot(parseCacheAnalytics(t, headers))) - }) - - t.Run("L1+L2 combined analytics", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: true, - EnableCacheAnalytics: true, - }), - withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // First query — L2 misses (L1 is per-request, always fresh) - tracker.Reset() - resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // L2 miss: root field not yet cached - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts}, // L2 miss: User entity not yet cached (second review's User 1234 hits L1 after this fetch) - }, - L2Writes: []resolve.CacheWriteEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written after reviews subgraph fetch - {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written after reviews subgraph fetch - {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written after products fetch - {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User entity written after accounts fetch - }, - FieldHashes: multiUpstreamFieldHashes, - EntityTypes: multiUpstreamEntityTypes, - }) - assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) - - // Second query — L2 hits (L1 is per-request, reset between requests) - tracker.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // L2 hit: populated by Request 1 - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // L2 hit: populated by Request 1 - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // L2 hit: root field cached by Request 1 - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234}, // L2 hit: User entity cached by Request 1 (second review's User 1234 hits L1) - }, - // No L2Writes: all entities served from L2 cache - FieldHashes: multiUpstreamFieldHashesL2, - EntityTypes: multiUpstreamEntityTypes, - }) - assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) - }) - - t.Run("root field with args - L2 analytics", func(t *testing.T) { - // Tests that root field caching with arguments properly records L2 analytics events. - // This covers the root field path in tryL2CacheLoad (no L1 keys branch). - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - rootFieldArgsCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "accounts", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "user", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(rootFieldArgsCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - const ( - keyUserById1234 = `{"__typename":"Query","field":"user","args":{"id":"1234"}}` - keyUserById5678 = `{"__typename":"Query","field":"user","args":{"id":"5678"}}` - dsAccountsLocal = "accounts" - byteSizeUser1234 = 38 // {"user":{"id":"1234","username":"Me"}} - byteSizeUser5678 = 45 // {"user":{"id":"5678","username":"User 5678"}} - - hashUsernameMeLocal uint64 = 4957449860898447395 // xxhash("Me") - hashUsername5678Local uint64 = 15512417390573333165 // xxhash("User 5678") - entityKeyUser1234Local = `{"id":"1234"}` - entityKeyUser5678Local = `{"id":"5678"}` - ) - - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - // First query (id=1234) — L2 miss, populates cache - tracker.Reset() - resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) - assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) - assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts subgraph") - - expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyUserById1234, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsAccountsLocal}, // L2 miss: first request, cache empty - }, - L2Writes: []resolve.CacheWriteEvent{ - {CacheKey: keyUserById1234, EntityType: "Query", ByteSize: byteSizeUser1234, DataSource: dsAccountsLocal, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written after accounts fetch - }, - FieldHashes: []resolve.EntityFieldHash{ - {EntityType: "User", FieldName: "username", FieldHash: hashUsernameMeLocal, KeyRaw: entityKeyUser1234Local, Source: resolve.FieldSourceSubgraph}, // User returned by root field, data from subgraph - }, - EntityTypes: []resolve.EntityTypeInfo{ - {TypeName: "User", Count: 1, UniqueKeys: 1}, // 1 User entity from root field response - }, - }) - assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) - - // Second query (same id=1234) — L2 hit - tracker.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) - assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) - assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip accounts (cache hit)") - - expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyUserById1234, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsAccountsLocal, ByteSize: byteSizeUser1234}, // L2 hit: populated by first request - }, - // No L2Writes: data served from cache - FieldHashes: []resolve.EntityFieldHash{ - // Source is FieldSourceSubgraph (default) because entity source tracking operates at - // entity cache level, not root field cache level — no entity caching configured for User - {EntityType: "User", FieldName: "username", FieldHash: hashUsernameMeLocal, KeyRaw: entityKeyUser1234Local, Source: resolve.FieldSourceSubgraph}, - }, - EntityTypes: []resolve.EntityTypeInfo{ - {TypeName: "User", Count: 1, UniqueKeys: 1}, - }, - }) - assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) - - // Third query (different id=5678) — L2 miss (different args = different cache key) - tracker.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "5678"}, t) - assert.Equal(t, `{"data":{"user":{"id":"5678","username":"User 5678"}}}`, string(resp)) - assert.Equal(t, 1, tracker.GetCount(accountsHost), "Third query should call accounts (different args)") - - expected3 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyUserById5678, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsAccountsLocal}, // L2 miss: different args, not cached - }, - L2Writes: []resolve.CacheWriteEvent{ - {CacheKey: keyUserById5678, EntityType: "Query", ByteSize: byteSizeUser5678, DataSource: dsAccountsLocal, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // New args written to L2 - }, - FieldHashes: []resolve.EntityFieldHash{ - {EntityType: "User", FieldName: "username", FieldHash: hashUsername5678Local, KeyRaw: entityKeyUser5678Local, Source: resolve.FieldSourceSubgraph}, // User 5678 data from subgraph - }, - EntityTypes: []resolve.EntityTypeInfo{ - {TypeName: "User", Count: 1, UniqueKeys: 1}, - }, - }) - assert.Equal(t, expected3, normalizeSnapshot(parseCacheAnalytics(t, headers))) - }) - - t.Run("root field only - L2 analytics without entity caching", func(t *testing.T) { - // Tests root field caching analytics in isolation — only root field caching configured, - // no entity caching. Verifies that only root field events appear in analytics. - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - // Only configure root field caching for products — no entity caching at all - rootOnlyConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(rootOnlyConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) - productsHost := productsURLParsed.Host - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - reviewsHost := reviewsURLParsed.Host - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - const ( - keyTopProductsLocal = `{"__typename":"Query","field":"topProducts"}` - dsProductsLocal = "products" - byteSizeTP = 127 // Query.topProducts root field response - ) - - // First query — L2 miss for root field, no events for entities (not configured) - tracker.Reset() - resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - // Products subgraph called (root field miss), reviews + accounts always called (no entity caching) - assert.Equal(t, 1, tracker.GetCount(productsHost), "First query should call products subgraph") - assert.Equal(t, 1, tracker.GetCount(reviewsHost), "First query should call reviews subgraph") - assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts subgraph") - - expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyTopProductsLocal, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProductsLocal}, // L2 miss: first request, cache empty - }, - L2Writes: []resolve.CacheWriteEvent{ - {CacheKey: keyTopProductsLocal, EntityType: "Query", ByteSize: byteSizeTP, DataSource: dsProductsLocal, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written after products fetch - }, - // Only entity types tracked during resolution (not caching-dependent) - FieldHashes: multiUpstreamFieldHashes, - EntityTypes: multiUpstreamEntityTypes, - }) - assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) - - // Second query — L2 hit for root field, entities still fetched (not cached) - tracker.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - // Products subgraph skipped (root field cache hit), reviews + accounts still called - assert.Equal(t, 0, tracker.GetCount(productsHost), "Second query should skip products (root field cache hit)") - assert.Equal(t, 1, tracker.GetCount(reviewsHost), "Second query should call reviews (no entity caching)") - assert.Equal(t, 1, tracker.GetCount(accountsHost), "Second query should call accounts (no entity caching)") - - expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyTopProductsLocal, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProductsLocal, ByteSize: byteSizeTP}, // L2 hit: root field cached by first request - }, - // No L2Writes: root field served from cache, entities have no caching configured - FieldHashes: multiUpstreamFieldHashes, // Entity field hashes still tracked (resolution, not caching) - EntityTypes: multiUpstreamEntityTypes, - }) - assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) - }) - - t.Run("subgraph fetch records HTTPStatusCode and ResponseBytes", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // First request — all L2 misses, subgraph fetches happen - resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - snap := parseCacheAnalytics(t, headers) - - // Filter to subgraph fetch events only (exclude L2 read events) - var subgraphTimings []resolve.FetchTimingEvent - for _, ft := range snap.FetchTimings { - if ft.Source == resolve.FieldSourceSubgraph { - subgraphTimings = append(subgraphTimings, ft) - } - } - timings := normalizeFetchTimings(subgraphTimings) - - assert.Equal(t, 3, len(timings), "should have exactly 3 fetch timing events (one per subgraph)") - for i, ft := range timings { - assert.Equal(t, resolve.FieldSourceSubgraph, ft.Source, "entry %d should be a subgraph fetch", i) - assert.Equal(t, 200, ft.HTTPStatusCode, "entry %d should have HTTP 200", i) - assert.Equal(t, int64(0), ft.TTFBMs, "entry %d TTFB not yet instrumented", i) - } - - // Sorted by DataSource: accounts, products, reviews - assert.Equal(t, dsAccounts, timings[0].DataSource) - assert.Equal(t, "User", timings[0].EntityType) - assert.Equal(t, true, timings[0].IsEntityFetch) - - assert.Equal(t, dsProducts, timings[1].DataSource) - assert.Equal(t, "Query", timings[1].EntityType) - assert.Equal(t, false, timings[1].IsEntityFetch) - - assert.Equal(t, dsReviews, timings[2].DataSource) - assert.Equal(t, "Product", timings[2].EntityType) - assert.Equal(t, true, timings[2].IsEntityFetch) - - // ResponseBytes = full GraphQL response body from each subgraph - assert.Equal(t, 62, timings[0].ResponseBytes, "accounts subgraph response size") - assert.Equal(t, 136, timings[1].ResponseBytes, "products subgraph response size") - assert.Equal(t, 376, timings[2].ResponseBytes, "reviews subgraph response size") - }) - - t.Run("cache hit has zero HTTPStatusCode and ResponseBytes", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // First request — populates L2 cache - resp, _ := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - // Second request — all L2 hits - resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - snap := parseCacheAnalytics(t, headers) - timings := normalizeFetchTimings(snap.FetchTimings) - - // All entries should be L2 cache hits with zero HTTP fields - for i, ft := range timings { - assert.Equal(t, resolve.FieldSourceL2, ft.Source, "entry %d should be an L2 cache hit", i) - assert.Equal(t, 0, ft.HTTPStatusCode, "entry %d cache hit should have zero HTTPStatusCode", i) - assert.Equal(t, 0, ft.ResponseBytes, "entry %d cache hit should have zero ResponseBytes", i) - } - }) -} - -func TestShadowCacheE2E(t *testing.T) { - // Cache key constants (same as TestCacheAnalyticsE2E — same federation setup) - const ( - keyProductTop1 = `{"__typename":"Product","key":{"upc":"top-1"}}` - keyProductTop2 = `{"__typename":"Product","key":{"upc":"top-2"}}` - keyTopProducts = `{"__typename":"Query","field":"topProducts"}` - keyUser1234 = `{"__typename":"User","key":{"id":"1234"}}` - dsAccounts = "accounts" - dsProducts = "products" - dsReviews = "reviews" - ) - - // Field hash constants - const ( - hashProductNameTrilby uint64 = 1032923585965781586 - hashProductNameFedora uint64 = 2432227032303632641 - hashUserUsernameMe uint64 = 4957449860898447395 - ) - - // Entity key constants - const ( - entityKeyProductTop1 = `{"upc":"top-1"}` - entityKeyProductTop2 = `{"upc":"top-2"}` - entityKeyUser1234 = `{"id":"1234"}` - ) - - // Byte sizes - const ( - byteSizeProductTop1 = 177 - byteSizeProductTop2 = 233 - byteSizeTopProducts = 127 - byteSizeUser1234 = 49 - ) - - // Shadow comparison hash constants - const ( - shadowHashProductTop1 uint64 = 8656108128396512717 - shadowHashProductTop2 uint64 = 4671066427758823003 - shadowHashUser1234 uint64 = 188937276969638005 - shadowBytesProductTop1 = 124 - shadowBytesProductTop2 = 180 - shadowBytesUser1234 = 17 - ) - - // Shadow cached field hash constants (ProvidesData fields hashed from cached value during shadow comparison) - const ( - shadowFieldHashProductReviewsTop1 uint64 = 13894521258004960943 // xxhash of Product reviews field for top-1 - shadowFieldHashProductReviewsTop2 uint64 = 3182276346310063647 // xxhash of Product reviews field for top-2 - ) - - // Field hashes when all data comes from subgraph (first request, all misses) - fieldHashesSubgraph := []resolve.EntityFieldHash{ - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceSubgraph}, - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceSubgraph}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, - } - - // Field hashes when all data comes from L2 (second request, all hits — no shadow entities) - fieldHashesL2 := []resolve.EntityFieldHash{ - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, - } - - // Field hashes when all entities are in shadow mode (second request): - // L2 source hashes from resolution + ShadowCached hashes from compareShadowValues - fieldHashesL2AllShadow := []resolve.EntityFieldHash{ - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, - {EntityType: "Product", FieldName: "reviews", FieldHash: shadowFieldHashProductReviewsTop1, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceShadowCached}, // Cached Product reviews field for per-field staleness detection - {EntityType: "Product", FieldName: "reviews", FieldHash: shadowFieldHashProductReviewsTop2, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceShadowCached}, // Cached Product reviews field for per-field staleness detection - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username for per-field staleness detection - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username (second review) - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, - } - - // Field hashes when only User is in shadow mode (mixed mode, second request): - // Product/root L2 source hashes + User L2 + User ShadowCached hashes - fieldHashesL2MixedShadow := []resolve.EntityFieldHash{ - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username for per-field staleness detection - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username (second review) - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, - } - - entityTypes := []resolve.EntityTypeInfo{ - {TypeName: "Product", Count: 2, UniqueKeys: 2}, - {TypeName: "User", Count: 2, UniqueKeys: 1}, - } - - expectedResponseBody := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}` - - t.Run("shadow all entities - always fetches", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{"default": defaultCache} - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - // Shadow mode for all entity types, real caching for root fields - shadowConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, - }, - }, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(shadowConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) - productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) - reviewsHost := mustParseHost(setup.ReviewsUpstreamServer.URL) - - // Request 1: All L2 misses → all 3 subgraphs called - tracker.Reset() - resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - assert.Equal(t, 1, tracker.GetCount(productsHost), "request 1: should call products exactly once") - assert.Equal(t, 1, tracker.GetCount(reviewsHost), "request 1: should call reviews exactly once") - assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 1: should call accounts exactly once") - - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews, Shadow: true}, // Shadow L2 miss: cache empty, subgraph fetched - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews, Shadow: true}, // Shadow L2 miss: cache empty, subgraph fetched - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // Real L2 miss: root field not shadow, fetched normally - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts, Shadow: true}, // Shadow L2 miss: User not yet cached - }, - L2Writes: []resolve.CacheWriteEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written to L2 even in shadow (populates for comparison) - {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written to L2 even in shadow - {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written normally (not shadow) - {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User entity written for future shadow comparison - }, - // No ShadowComparisons: nothing cached yet to compare against - FieldHashes: fieldHashesSubgraph, - EntityTypes: entityTypes, - }), normalizeSnapshot(parseCacheAnalytics(t, headers))) - - // Request 2: Entity L2 hits (shadow) → entity subgraphs STILL called - // Root field L2 hit → products NOT called (real caching) - tracker.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - assert.Equal(t, 0, tracker.GetCount(productsHost), "request 2: products should NOT be called (root field real cache hit)") - assert.Equal(t, 1, tracker.GetCount(reviewsHost), "request 2: reviews should be called (Product entity shadow)") - assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 2: accounts should be called (User entity shadow)") - - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1, Shadow: true}, // Shadow L2 hit: cached by Req 1, but subgraph still called - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2, Shadow: true}, // Shadow L2 hit: cached by Req 1, but subgraph still called - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field served from cache (not shadow) - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234, Shadow: true}, // Shadow L2 hit: accounts still called for comparison - }, - L2Writes: []resolve.CacheWriteEvent{ - // Only shadow entities re-written (refreshed from subgraph); root field NOT re-written (real cache hit) - {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write: fresh data from subgraph - {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write: fresh data from subgraph - {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write: fresh User from accounts - }, - ShadowComparisons: []resolve.ShadowComparisonEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", IsFresh: true, CachedHash: shadowHashProductTop1, FreshHash: shadowHashProductTop1, CachedBytes: shadowBytesProductTop1, FreshBytes: shadowBytesProductTop1, DataSource: dsReviews, ConfiguredTTL: 30 * time.Second}, // Fresh: cached matches subgraph (data unchanged) - {CacheKey: keyProductTop2, EntityType: "Product", IsFresh: true, CachedHash: shadowHashProductTop2, FreshHash: shadowHashProductTop2, CachedBytes: shadowBytesProductTop2, FreshBytes: shadowBytesProductTop2, DataSource: dsReviews, ConfiguredTTL: 30 * time.Second}, // Fresh: cached matches subgraph (data unchanged) - {CacheKey: keyUser1234, EntityType: "User", IsFresh: true, CachedHash: shadowHashUser1234, FreshHash: shadowHashUser1234, CachedBytes: shadowBytesUser1234, FreshBytes: shadowBytesUser1234, DataSource: dsAccounts, ConfiguredTTL: 30 * time.Second}, // Fresh: cached User matches subgraph (no mutation) - }, - FieldHashes: fieldHashesL2AllShadow, - EntityTypes: entityTypes, - }), normalizeSnapshot(parseCacheAnalytics(t, headers))) - }) - - t.Run("mixed mode - shadow User, real cache Product", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{"default": defaultCache} - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - // Shadow mode for User only, real caching for Product and root fields - mixedConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, // real caching - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, // shadow - }, - }, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(mixedConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) - productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) - reviewsHost := mustParseHost(setup.ReviewsUpstreamServer.URL) - - // Request 1: All L2 misses → all 3 subgraphs called - tracker.Reset() - resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - assert.Equal(t, 1, tracker.GetCount(productsHost), "request 1: should call products exactly once") - assert.Equal(t, 1, tracker.GetCount(reviewsHost), "request 1: should call reviews exactly once") - assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 1: should call accounts exactly once") - - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: Product entity not yet cached - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: Product entity not yet cached - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // Real L2 miss: root field not yet cached - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts, Shadow: true}, // Shadow L2 miss: User entity not yet cached - }, - L2Writes: []resolve.CacheWriteEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Product written for real caching - {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Product written for real caching - {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written for real caching - {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User written (shadow still populates L2) - }, - FieldHashes: fieldHashesSubgraph, - EntityTypes: entityTypes, - }), normalizeSnapshot(parseCacheAnalytics(t, headers))) - - // Request 2: Product real cache hit, User shadow → still fetched - tracker.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - assert.Equal(t, 0, tracker.GetCount(productsHost), "request 2: products should NOT be called (root field real cache hit)") - assert.Equal(t, 0, tracker.GetCount(reviewsHost), "request 2: reviews should NOT be called (Product entity real cache hit)") - assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 2: accounts SHOULD be called (User entity shadow)") - - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // Real L2 hit: Product served from cache (no subgraph call) - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // Real L2 hit: Product served from cache (no subgraph call) - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field served from cache - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234, Shadow: true}, // Shadow L2 hit: accounts still called for comparison - }, - L2Writes: []resolve.CacheWriteEvent{ - // Only User re-written (shadow always fetches fresh); Product/root NOT re-written (real hit) - {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write: fresh data from accounts - }, - ShadowComparisons: []resolve.ShadowComparisonEvent{ - // Only User has shadow comparisons; Product uses real caching - {CacheKey: keyUser1234, EntityType: "User", IsFresh: true, CachedHash: shadowHashUser1234, FreshHash: shadowHashUser1234, CachedBytes: shadowBytesUser1234, FreshBytes: shadowBytesUser1234, DataSource: dsAccounts, ConfiguredTTL: 30 * time.Second}, // Fresh: cached User matches subgraph - }, - FieldHashes: fieldHashesL2MixedShadow, - EntityTypes: entityTypes, - }), normalizeSnapshot(parseCacheAnalytics(t, headers))) - }) - - t.Run("shadow mode without analytics - safety only", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{"default": defaultCache} - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - shadowConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, - }, - }, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), // analytics NOT enabled - withSubgraphEntityCachingConfigs(shadowConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) - - // Request 1: Populate cache - tracker.Reset() - resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - // No stats when analytics is disabled - assert.Empty(t, headers.Get("X-Cache-Analytics"), "analytics header should not be set when analytics disabled") - - // Request 2: Shadow mode — accounts still fetched (data not served from cache) - tracker.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 2: accounts should be called (shadow mode)") - // No stats when analytics is disabled - assert.Empty(t, headers.Get("X-Cache-Analytics"), "analytics header should not be set when analytics disabled") - }) - - t.Run("graduation - shadow to real", func(t *testing.T) { - // Same FakeLoaderCache shared across both engine setups - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{"default": defaultCache} - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - // Phase 1: Shadow mode for User - shadowConfigs := engine.SubgraphCachingConfigs{ - {SubgraphName: "products", RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, - }}, - {SubgraphName: "reviews", EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, - }}, - {SubgraphName: "accounts", EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, - }}, - } - - setup1 := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(shadowConfigs), - )) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsHost1 := mustParseHost(setup1.AccountsUpstreamServer.URL) - - // Phase 1, Request 1: Populate L2 cache - tracker.Reset() - resp, headers := gqlClient.QueryWithHeaders(ctx, setup1.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: first request, cache empty - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: first request, cache empty - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // Real L2 miss: root field not yet cached - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts, Shadow: true}, // Shadow L2 miss: User not yet cached - }, - L2Writes: []resolve.CacheWriteEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Product written for real caching - {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Product written for real caching - {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written for real caching - {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User written (shadow still populates L2) - }, - FieldHashes: fieldHashesSubgraph, - EntityTypes: entityTypes, - }), normalizeSnapshot(parseCacheAnalytics(t, headers))) - - // Phase 1, Request 2: Shadow — accounts still called - tracker.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup1.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - assert.Equal(t, 1, tracker.GetCount(accountsHost1), "phase 1 request 2: accounts should be called (shadow mode)") - - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // Real L2 hit: Product served from cache - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // Real L2 hit: Product served from cache - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field from cache - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234, Shadow: true}, // Shadow L2 hit: cached but accounts still called - }, - L2Writes: []resolve.CacheWriteEvent{ - // Only shadow User re-written; Product/root use real caching (no re-write on hit) - {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write with fresh data from accounts - }, - ShadowComparisons: []resolve.ShadowComparisonEvent{ - {CacheKey: keyUser1234, EntityType: "User", IsFresh: true, CachedHash: shadowHashUser1234, FreshHash: shadowHashUser1234, CachedBytes: shadowBytesUser1234, FreshBytes: shadowBytesUser1234, DataSource: dsAccounts, ConfiguredTTL: 30 * time.Second}, // Fresh: cached User matches subgraph (safe to graduate) - }, - FieldHashes: fieldHashesL2MixedShadow, - EntityTypes: entityTypes, - }), normalizeSnapshot(parseCacheAnalytics(t, headers))) - - setup1.Close() - - // Phase 2: Graduated to real caching (same cache, new engine) - realConfigs := engine.SubgraphCachingConfigs{ - {SubgraphName: "products", RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, - }}, - {SubgraphName: "reviews", EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, - }}, - {SubgraphName: "accounts", EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, // No ShadowMode! - }}, - } - - tracker2 := newSubgraphCallTracker(http.DefaultTransport) - trackingClient2 := &http.Client{Transport: tracker2} - - setup2 := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), // SAME cache - withHTTPClient(trackingClient2), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(realConfigs), - )) - t.Cleanup(setup2.Close) - - accountsHost2 := mustParseHost(setup2.AccountsUpstreamServer.URL) - - // Phase 2, Request 3: Real L2 hit — accounts NOT called - tracker2.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup2.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - assert.Equal(t, 0, tracker2.GetCount(accountsHost2), "phase 2: accounts should NOT be called (real L2 hit)") - - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // Real L2 hit: cached by Phase 1 - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // Real L2 hit: cached by Phase 1 - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field cached by Phase 1 - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234}, // Real L2 hit: graduated from shadow, no longer calls accounts - }, - // No L2Writes: all real cache hits, no fetches needed - // No ShadowComparisons: User is no longer in shadow mode - FieldHashes: fieldHashesL2, - EntityTypes: entityTypes, - }), normalizeSnapshot(parseCacheAnalytics(t, headers))) - }) -} - -func TestMutationImpactE2E(t *testing.T) { - accounts.ResetUsers() - t.Cleanup(accounts.ResetUsers) - - // Configure entity caching for User on accounts subgraph - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - - mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` - - // Uses a simple query that causes an entity fetch for User 1234 - // me { id username } triggers: accounts root fetch for Query.me, no entity fetch - // We need a query that triggers entity caching for User - topProducts with reviews + authorWithoutProvides - entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` - - t.Run("mutation with prior cache shows stale entity", func(t *testing.T) { - accounts.ResetUsers() - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{"default": defaultCache} - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Request 1: Query to populate L2 cache with User entity - tracker.Reset() - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) - assert.Contains(t, string(resp), `"username":"Me"`) - - // Request 2: Mutation — should detect stale cached entity - tracker.Reset() - respMut, headersMut := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) - assert.Contains(t, string(respMut), `"UpdatedMe"`) - - snap := normalizeSnapshot(parseCacheAnalytics(t, headersMut)) - require.NotNil(t, snap.MutationEvents, "should have mutation impact events") - require.Equal(t, 1, len(snap.MutationEvents), "should have exactly 1 mutation impact event") - - event := snap.MutationEvents[0] - assert.Equal(t, "updateUsername", event.MutationRootField) - assert.Equal(t, "User", event.EntityType) - assert.Equal(t, `{"__typename":"User","key":{"id":"1234"}}`, event.EntityCacheKey) - assert.Equal(t, true, event.HadCachedValue, "should have found cached value") - assert.Equal(t, true, event.IsStale, "cached value should be stale (username changed)") - - // Record discovered values for exact assertion - t.Logf("MutationImpact event: %+v", event) - - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - FieldHashes: []resolve.EntityFieldHash{ - // Hash of "UpdatedMe" (post-mutation username) - {EntityType: "User", FieldName: "username", FieldHash: 16932466035575627600, KeyRaw: `{"id":"1234"}`}, - }, - EntityTypes: []resolve.EntityTypeInfo{ - {TypeName: "User", Count: 1, UniqueKeys: 1}, // Mutation returned 1 User entity - }, - MutationEvents: []resolve.MutationEvent{ - { - MutationRootField: "updateUsername", - EntityType: "User", - EntityCacheKey: `{"__typename":"User","key":{"id":"1234"}}`, - HadCachedValue: true, // L2 had cached value from Request 1 query - IsStale: true, // Cached "Me" differs from fresh "UpdatedMe" - CachedHash: event.CachedHash, - FreshHash: event.FreshHash, - CachedBytes: event.CachedBytes, - FreshBytes: event.FreshBytes, - }, - }, - }), snap) - }) - - t.Run("mutation without prior cache shows no-cache event", func(t *testing.T) { - accounts.ResetUsers() - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{"default": defaultCache} - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // NO prior query — L2 cache is empty - // Send mutation directly - tracker.Reset() - respMut, headersMut := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) - assert.Contains(t, string(respMut), `"UpdatedMe"`) - - snap := normalizeSnapshot(parseCacheAnalytics(t, headersMut)) - require.NotNil(t, snap.MutationEvents, "should have mutation impact events") - require.Equal(t, 1, len(snap.MutationEvents), "should have exactly 1 mutation impact event") - - event := snap.MutationEvents[0] - assert.Equal(t, "updateUsername", event.MutationRootField) - assert.Equal(t, "User", event.EntityType) - assert.Equal(t, `{"__typename":"User","key":{"id":"1234"}}`, event.EntityCacheKey) - assert.Equal(t, false, event.HadCachedValue, "should NOT have found cached value") - assert.Equal(t, false, event.IsStale, "cannot be stale without cached value") - assert.Equal(t, uint64(0), event.CachedHash, "no cached value = no hash") - assert.Equal(t, 0, event.CachedBytes, "no cached value = no bytes") - - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - FieldHashes: []resolve.EntityFieldHash{ - // Hash of "UpdatedMe" (post-mutation username) - {EntityType: "User", FieldName: "username", FieldHash: 16932466035575627600, KeyRaw: `{"id":"1234"}`}, - }, - EntityTypes: []resolve.EntityTypeInfo{ - {TypeName: "User", Count: 1, UniqueKeys: 1}, // Mutation returned 1 User entity - }, - MutationEvents: []resolve.MutationEvent{ - { - MutationRootField: "updateUsername", - EntityType: "User", - EntityCacheKey: `{"__typename":"User","key":{"id":"1234"}}`, - HadCachedValue: false, // No prior query, L2 cache was empty - IsStale: false, // Cannot be stale without a cached value to compare - FreshHash: event.FreshHash, - FreshBytes: event.FreshBytes, - }, - }, - }), snap) - }) -} - -func mustParseHost(rawURL string) string { - parsed, err := url.Parse(rawURL) - if err != nil { - panic(fmt.Sprintf("failed to parse URL %q: %v", rawURL, err)) - } - return parsed.Host -} - -func TestFederationCachingAliases(t *testing.T) { - // Helper to create a standard setup for alias caching tests - setupAliasCachingTest := func(t *testing.T) ( - *federationtesting.FederationSetup, - *GraphqlClient, - context.Context, - context.CancelFunc, - *subgraphCallTracker, - *FakeLoaderCache, - string, // accountsHost - ) { - t.Helper() - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{ - Transport: tracker, - } - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - return setup, gqlClient, ctx, cancel, tracker, defaultCache, accountsHost - } - - t.Run("L2 hit - alias then no alias", func(t *testing.T) { - setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) - - // Request 1: Use alias userName for username - defaultCache.ClearLog() - tracker.Reset() - query1 := `query { topProducts { name reviews { body authorWithoutProvides { userName: username } } } }` - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"userName":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"userName":"Me"}}]}]}}`, - string(resp)) - - accountsCalls1 := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") - - // Request 2: No alias (original field name) - defaultCache.ClearLog() - tracker.Reset() - query2 := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, - string(resp)) - - accountsCalls2 := tracker.GetCount(accountsHost) - assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit from normalized cache)") - }) - - t.Run("L2 hit - two different aliases for same field", func(t *testing.T) { - setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) - - // Request 1: alias u1 for username - defaultCache.ClearLog() - tracker.Reset() - query1 := `query { topProducts { name reviews { body authorWithoutProvides { u1: username } } } }` - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"u1":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"u1":"Me"}}]}]}}`, - string(resp)) - - accountsCalls1 := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") - - // Request 2: alias u2 for username - defaultCache.ClearLog() - tracker.Reset() - query2 := `query { topProducts { name reviews { body authorWithoutProvides { u2: username } } } }` - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"u2":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"u2":"Me"}}]}]}}`, - string(resp)) - - accountsCalls2 := tracker.GetCount(accountsHost) - assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying field)") - }) - - t.Run("no collision - alias matches another field name", func(t *testing.T) { - setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) - - // Request 1: alias realName for username (realName is another real field on User) - // This triggers an accounts entity fetch for username, stores normalized {"username":"Me"} in L2 - defaultCache.ClearLog() - tracker.Reset() - query1 := `query { topProducts { name reviews { body authorWithoutProvides { realName: username } } } }` - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"realName":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"realName":"Me"}}]}]}}`, - string(resp)) - - accountsCalls1 := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once for username") - - // Request 2: actual username field (no alias) - same underlying field - // Should be an L2 hit because both resolve username from accounts - defaultCache.ClearLog() - tracker.Reset() - query2 := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, - string(resp)) - - accountsCalls2 := tracker.GetCount(accountsHost) - assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying field username)") - }) - - t.Run("no collision - field name used as alias for another field", func(t *testing.T) { - setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) - - // Request 1: username field (no alias) - triggers accounts entity fetch for username - defaultCache.ClearLog() - tracker.Reset() - query1 := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, - string(resp)) - - accountsCalls1 := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") - - // Request 2: different alias (u1) for same field (username) - // Should be an L2 hit because the underlying field is the same - defaultCache.ClearLog() - tracker.Reset() - query2 := `query { topProducts { name reviews { body authorWithoutProvides { u1: username } } } }` - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"u1":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"u1":"Me"}}]}]}}`, - string(resp)) - - accountsCalls2 := tracker.GetCount(accountsHost) - assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying field)") - }) - - t.Run("L2 hit - multiple fields some aliased some not", func(t *testing.T) { - setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) - - // Request 1: alias username and include realName (realName comes from reviews, not accounts) - defaultCache.ClearLog() - tracker.Reset() - query1 := `query { topProducts { name reviews { body authorWithoutProvides { userName: username realName } } } }` - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"userName":"Me","realName":"User Usington"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"userName":"Me","realName":"User Usington"}}]}]}}`, - string(resp)) - - accountsCalls1 := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") - - // Request 2: no alias on username, different alias on realName - // accounts entity cache should be L2 hit (same username field) - defaultCache.ClearLog() - tracker.Reset() - query2 := `query { topProducts { name reviews { body authorWithoutProvides { username name: realName } } } }` - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","name":"User Usington"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","name":"User Usington"}}]}]}}`, - string(resp)) - - accountsCalls2 := tracker.GetCount(accountsHost) - assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying username field)") - }) - - t.Run("L1 hit within single request with aliases", func(t *testing.T) { - // Tests L1 cache with aliased fields across entity fetches within the same request. - // Flow: - // 1. topProducts -> products - // 2. reviews -> reviews (entity fetch for Products) - // 3. authorWithoutProvides -> accounts (entity fetch for User 1234, aliased userName: username) - // -> User 1234 stored in L1 with normalized field names - // 4. sameUserReviewers -> reviews (returns [User 1234] reference) - // 5. Entity resolution for sameUserReviewers -> accounts - // -> User 1234 is L1 HIT (already fetched in step 3), entire accounts call skipped - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: false}), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - // Query with alias on username - sameUserReviewers returns same user, - // should be L1 hit from the first entity fetch - tracker.Reset() - query := `query { - topProducts { - reviews { - authorWithoutProvides { - id - userName: username - sameUserReviewers { - id - userName: username - } - } - } - } - }` - resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]}]}}`, - string(resp)) - - // With L1 enabled: first accounts call fetches User 1234 for authorWithoutProvides - // sameUserReviewers entity resolution hits L1 -> accounts call skipped - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls, "Should call accounts subgraph once (sameUserReviewers skipped via L1)") - }) - - t.Run("L1 hit within single request with mixed alias and no alias", func(t *testing.T) { - // Same as above, but the nested sameUserReviewers uses the original field name (no alias) - // while the outer authorWithoutProvides uses an alias. L1 cache stores normalized data, - // so the nested fetch should still hit L1 despite the different field naming. - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: false}), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - // Outer authorWithoutProvides uses alias "userName: username" - // Nested sameUserReviewers uses plain "username" (no alias) - // L1 should still hit because cache stores normalized (original) field names - tracker.Reset() - query := `query { - topProducts { - reviews { - authorWithoutProvides { - id - userName: username - sameUserReviewers { - id - username - } - } - } - } - }` - resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}`, - string(resp)) - - // With L1 enabled: first accounts call fetches User 1234 for authorWithoutProvides - // sameUserReviewers entity resolution hits L1 -> accounts call skipped - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls, "Should call accounts subgraph once (sameUserReviewers skipped via L1)") - }) - - t.Run("L2 hit - aliased root field then original root field", func(t *testing.T) { - setup, gqlClient, ctx, _, tracker, defaultCache, _ := setupAliasCachingTest(t) - productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) - - // Request 1: alias the root field topProducts as tp - defaultCache.ClearLog() - tracker.Reset() - query1 := `query { tp: topProducts { name } }` - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) - assert.Equal(t, - `{"data":{"tp":[{"name":"Trilby"},{"name":"Fedora"}]}}`, - string(resp)) - - productsCalls1 := tracker.GetCount(productsHost) - assert.Equal(t, 1, productsCalls1, "Request 1 should call products subgraph once") - - // Request 2: same root field without alias — should L2 hit (same cache key) - defaultCache.ClearLog() - tracker.Reset() - query2 := `query { topProducts { name } }` - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby"},{"name":"Fedora"}]}}`, - string(resp)) - - productsCalls2 := tracker.GetCount(productsHost) - assert.Equal(t, 0, productsCalls2, "Request 2 should skip products (L2 hit from aliased root field)") - }) - - t.Run("L2 hit - two different root field aliases", func(t *testing.T) { - setup, gqlClient, ctx, _, tracker, defaultCache, _ := setupAliasCachingTest(t) - productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) - - // Request 1: alias p1 for topProducts - defaultCache.ClearLog() - tracker.Reset() - query1 := `query { p1: topProducts { name } }` - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) - assert.Equal(t, - `{"data":{"p1":[{"name":"Trilby"},{"name":"Fedora"}]}}`, - string(resp)) - - productsCalls1 := tracker.GetCount(productsHost) - assert.Equal(t, 1, productsCalls1, "Request 1 should call products subgraph once") - - // Request 2: different alias p2 for same root field - defaultCache.ClearLog() - tracker.Reset() - query2 := `query { p2: topProducts { name } }` - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) - assert.Equal(t, - `{"data":{"p2":[{"name":"Trilby"},{"name":"Fedora"}]}}`, - string(resp)) - - productsCalls2 := tracker.GetCount(productsHost) - assert.Equal(t, 0, productsCalls2, "Request 2 should skip products (L2 hit - same underlying root field)") - }) - - t.Run("L1+L2 combined - alias entity caching across both layers", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: true}), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) - - // Request 1: alias on username, sameUserReviewers triggers L1 hit within request - // L2 is also populated on the first entity fetch - defaultCache.ClearLog() - tracker.Reset() - query1 := `query { - topProducts { - reviews { - authorWithoutProvides { - id - userName: username - sameUserReviewers { - id - userName: username - } - } - } - } - }` - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]}]}}`, - string(resp)) - - accountsCalls1 := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls1, "Request 1: accounts called once (sameUserReviewers skipped via L1)") - - // Request 2: same query without alias — L2 hit for User entity, no accounts calls - defaultCache.ClearLog() - tracker.Reset() - query2 := `query { - topProducts { - reviews { - authorWithoutProvides { - id - username - sameUserReviewers { - id - username - } - } - } - } - }` - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}`, - string(resp)) - - accountsCalls2 := tracker.GetCount(accountsHost) - assert.Equal(t, 0, accountsCalls2, "Request 2: accounts skipped (L2 hit from normalized cache)") - }) - - t.Run("L2 analytics - aliased root field", func(t *testing.T) { - const ( - keyTopProducts = `{"__typename":"Query","field":"topProducts"}` - dsProducts = "products" - byteSizeTopProducts = 53 - hashProductNameTrilby = uint64(1032923585965781586) - hashProductNameFedora = uint64(2432227032303632641) - ) - - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Shared field hashes: Product.name for Trilby and Fedora from root field response - // Products are not entity-resolved (no @key fetch), so KeyRaw is empty - fieldHashes := []resolve.EntityFieldHash{ - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: "{}"}, // xxhash("Trilby"), no entity key (root field) - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: "{}"}, // xxhash("Fedora"), no entity key (root field) - } - entityTypes := []resolve.EntityTypeInfo{ - {TypeName: "Product", Count: 2, UniqueKeys: 1}, // 2 products from root field, no entity keys - } - - // Request 1: aliased root field — L2 miss, populates cache - tracker.Reset() - query1 := `query { tp: topProducts { name } }` - resp, headers := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query1, nil, t) - assert.Equal(t, `{"data":{"tp":[{"name":"Trilby"},{"name":"Fedora"}]}}`, string(resp)) - - // Cache key must use original field name "topProducts", NOT the alias "tp" - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // L2 miss: first request, cache empty - }, - L2Writes: []resolve.CacheWriteEvent{ - {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written after products fetch - }, - FieldHashes: fieldHashes, - EntityTypes: entityTypes, - }), normalizeSnapshot(parseCacheAnalytics(t, headers))) - - // Request 2: original root field (no alias) — L2 hit from Request 1 - tracker.Reset() - query2 := `query { topProducts { name } }` - resp, headers = gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query2, nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby"},{"name":"Fedora"}]}}`, string(resp)) - - // Same cache key hit regardless of alias difference - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // L2 hit: populated by aliased Request 1 - }, - // No L2Writes: served from cache - FieldHashes: fieldHashes, - EntityTypes: entityTypes, - }), normalizeSnapshot(parseCacheAnalytics(t, headers))) - }) - - t.Run("L1 dedup - two aliases for same entity field in single request", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: false}), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) - - // Two aliases (a1, a2) for the same entity field (authorWithoutProvides) - // Both resolve the same User 1234 — second should be L1 hit - tracker.Reset() - query := `query { - topProducts { - reviews { - a1: authorWithoutProvides { - id - username - } - a2: authorWithoutProvides { - id - username - } - } - } - }` - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"reviews":[{"a1":{"id":"1234","username":"Me"},"a2":{"id":"1234","username":"Me"}}]},{"reviews":[{"a1":{"id":"1234","username":"Me"},"a2":{"id":"1234","username":"Me"}}]}]}}`, - string(resp)) - - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls, "Should call accounts once (second alias L1 hit for same User entity)") - }) -} From 436c708866a5e84cfe4641475397d208d60895cf Mon Sep 17 00:00:00 2001 From: Jens Neuse Date: Wed, 4 Mar 2026 22:50:10 +0100 Subject: [PATCH 10/11] refactor: move TestMutationCacheInvalidationE2E to L2 test file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Tests L2 cache deletion behavior, not analytics — belongs with other L2 tests. Also remove stale trailing comment from L2 file. Co-Authored-By: Claude Opus 4.6 --- .../federation_caching_analytics_test.go | 142 ---------------- .../engine/federation_caching_l2_test.go | 154 ++++++++++++++++-- 2 files changed, 143 insertions(+), 153 deletions(-) diff --git a/execution/engine/federation_caching_analytics_test.go b/execution/engine/federation_caching_analytics_test.go index e41e9dc7bb..e9940ec323 100644 --- a/execution/engine/federation_caching_analytics_test.go +++ b/execution/engine/federation_caching_analytics_test.go @@ -1220,148 +1220,6 @@ func TestMutationImpactE2E(t *testing.T) { }) } -func TestMutationCacheInvalidationE2E(t *testing.T) { - accounts.ResetUsers() - t.Cleanup(accounts.ResetUsers) - - // Configure entity caching for User AND mutation invalidation for updateUsername - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, - }, - MutationCacheInvalidation: plan.MutationCacheInvalidationConfigurations{ - {FieldName: "updateUsername"}, - }, - }, - } - - // Query that triggers entity caching for User via authorWithoutProvides (no @provides) - entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` - mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` - - t.Run("mutation deletes L2 cache entry", func(t *testing.T) { - accounts.ResetUsers() - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{"default": defaultCache} - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) - - // Request 1: Query to populate L2 cache with User entity - tracker.Reset() - defaultCache.ClearLog() - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) - assert.Contains(t, string(resp), `"username":"Me"`) - assert.Equal(t, 1, tracker.GetCount(accountsHost), "should call accounts subgraph once to populate cache") - - // Request 2: Same query — should hit L2 cache, no accounts call - tracker.Reset() - defaultCache.ClearLog() - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) - assert.Contains(t, string(resp), `"username":"Me"`) - assert.Equal(t, 0, tracker.GetCount(accountsHost), "should NOT call accounts subgraph (L2 hit)") - - // Request 3: Mutation — should delete the L2 cache entry - tracker.Reset() - defaultCache.ClearLog() - respMut := gqlClient.QueryString(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) - assert.Contains(t, string(respMut), `"UpdatedMe"`) - - // Verify the cache log contains a delete operation - mutationLog := defaultCache.GetLog() - hasDelete := false - for _, entry := range mutationLog { - if entry.Operation == "delete" { - hasDelete = true - assert.Equal(t, 1, len(entry.Keys), "delete should have exactly 1 key") - assert.Contains(t, entry.Keys[0], `"__typename":"User"`) - assert.Contains(t, entry.Keys[0], `"id":"1234"`) - } - } - assert.True(t, hasDelete, "mutation should trigger a cache delete operation") - - // Request 4: Same query again — should miss L2 (entry deleted), re-fetch from subgraph - tracker.Reset() - defaultCache.ClearLog() - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) - assert.Contains(t, string(resp), `"username":"UpdatedMe"`) - assert.Equal(t, 1, tracker.GetCount(accountsHost), "should call accounts subgraph again (L2 entry was deleted)") - }) - - t.Run("mutation without invalidation config does not delete", func(t *testing.T) { - accounts.ResetUsers() - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{"default": defaultCache} - - // Config WITHOUT MutationCacheInvalidation - noInvalidationConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, - }, - // No MutationCacheInvalidation — mutation should NOT delete cache - }, - } - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), - withSubgraphEntityCachingConfigs(noInvalidationConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) - - // Request 1: Query to populate L2 cache - tracker.Reset() - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) - assert.Contains(t, string(resp), `"username":"Me"`) - - // Request 2: Mutation — should NOT delete L2 cache entry - tracker.Reset() - defaultCache.ClearLog() - respMut := gqlClient.QueryString(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) - assert.Contains(t, string(respMut), `"UpdatedMe"`) - - // Verify no delete operation in cache log - mutationLog := defaultCache.GetLog() - for _, entry := range mutationLog { - assert.NotEqual(t, "delete", entry.Operation, "should not have any delete operations without invalidation config") - } - - // Request 3: Same query — should still hit L2 cache (stale but not deleted) - tracker.Reset() - _ = gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) - assert.Equal(t, 0, tracker.GetCount(accountsHost), "should NOT call accounts subgraph (L2 entry still present)") - }) -} func TestFederationCachingAliases(t *testing.T) { // Helper to create a standard setup for alias caching tests diff --git a/execution/engine/federation_caching_l2_test.go b/execution/engine/federation_caching_l2_test.go index 25a29787bf..bf988e86d8 100644 --- a/execution/engine/federation_caching_l2_test.go +++ b/execution/engine/federation_caching_l2_test.go @@ -11,6 +11,7 @@ import ( "github.com/wundergraph/graphql-go-tools/execution/engine" "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + accounts "github.com/wundergraph/graphql-go-tools/execution/federationtesting/accounts/graph" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" ) @@ -1111,14 +1112,145 @@ func TestCacheNotPopulatedOnErrors(t *testing.T) { }) } -// TestL1CacheOptimizationReducesSubgraphCalls tests that the L1 cache optimization -// postprocessor (optimizeL1Cache) correctly identifies which fetches can benefit -// from L1 caching and sets UseL1Cache appropriately. -// -// The key insight is that L1 is only useful when: -// 1. A prior fetch can provide cached data (READ benefit) -// 2. A later fetch can consume cached data (WRITE benefit) -// -// This test verifies the end-to-end effect: when L1 optimization identifies -// matching entity types between fetches, it enables L1 caching, resulting in -// fewer subgraph calls. +func TestMutationCacheInvalidationE2E(t *testing.T) { + accounts.ResetUsers() + t.Cleanup(accounts.ResetUsers) + + // Configure entity caching for User AND mutation invalidation for updateUsername + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + MutationCacheInvalidation: plan.MutationCacheInvalidationConfigurations{ + {FieldName: "updateUsername"}, + }, + }, + } + + // Query that triggers entity caching for User via authorWithoutProvides (no @provides) + entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` + + t.Run("mutation deletes L2 cache entry", func(t *testing.T) { + accounts.ResetUsers() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + + // Request 1: Query to populate L2 cache with User entity + tracker.Reset() + defaultCache.ClearLog() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) + assert.Contains(t, string(resp), `"username":"Me"`) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "should call accounts subgraph once to populate cache") + + // Request 2: Same query — should hit L2 cache, no accounts call + tracker.Reset() + defaultCache.ClearLog() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) + assert.Contains(t, string(resp), `"username":"Me"`) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "should NOT call accounts subgraph (L2 hit)") + + // Request 3: Mutation — should delete the L2 cache entry + tracker.Reset() + defaultCache.ClearLog() + respMut := gqlClient.QueryString(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) + assert.Contains(t, string(respMut), `"UpdatedMe"`) + + // Verify the cache log contains a delete operation + mutationLog := defaultCache.GetLog() + hasDelete := false + for _, entry := range mutationLog { + if entry.Operation == "delete" { + hasDelete = true + assert.Equal(t, 1, len(entry.Keys), "delete should have exactly 1 key") + assert.Contains(t, entry.Keys[0], `"__typename":"User"`) + assert.Contains(t, entry.Keys[0], `"id":"1234"`) + } + } + assert.True(t, hasDelete, "mutation should trigger a cache delete operation") + + // Request 4: Same query again — should miss L2 (entry deleted), re-fetch from subgraph + tracker.Reset() + defaultCache.ClearLog() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) + assert.Contains(t, string(resp), `"username":"UpdatedMe"`) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "should call accounts subgraph again (L2 entry was deleted)") + }) + + t.Run("mutation without invalidation config does not delete", func(t *testing.T) { + accounts.ResetUsers() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + // Config WITHOUT MutationCacheInvalidation + noInvalidationConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + // No MutationCacheInvalidation — mutation should NOT delete cache + }, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(noInvalidationConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + + // Request 1: Query to populate L2 cache + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) + assert.Contains(t, string(resp), `"username":"Me"`) + + // Request 2: Mutation — should NOT delete L2 cache entry + tracker.Reset() + defaultCache.ClearLog() + respMut := gqlClient.QueryString(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) + assert.Contains(t, string(respMut), `"UpdatedMe"`) + + // Verify no delete operation in cache log + mutationLog := defaultCache.GetLog() + for _, entry := range mutationLog { + assert.NotEqual(t, "delete", entry.Operation, "should not have any delete operations without invalidation config") + } + + // Request 3: Same query — should still hit L2 cache (stale but not deleted) + tracker.Reset() + _ = gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "should NOT call accounts subgraph (L2 entry still present)") + }) +} From 1b7c1d48a8c73b2069643bd21b597539bdc8e354 Mon Sep 17 00:00:00 2001 From: Jens Neuse Date: Wed, 4 Mar 2026 23:01:35 +0100 Subject: [PATCH 11/11] fix: gci import ordering and trailing whitespace in split test files Co-Authored-By: Claude Opus 4.6 --- execution/engine/federation_caching_analytics_test.go | 9 ++++----- execution/engine/federation_caching_l1_test.go | 1 - execution/engine/federation_caching_test.go | 1 - 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/execution/engine/federation_caching_analytics_test.go b/execution/engine/federation_caching_analytics_test.go index e9940ec323..347696fa10 100644 --- a/execution/engine/federation_caching_analytics_test.go +++ b/execution/engine/federation_caching_analytics_test.go @@ -536,7 +536,7 @@ func TestCacheAnalyticsE2E(t *testing.T) { timings := normalizeFetchTimings(subgraphTimings) assert.Equal(t, []resolve.FetchTimingEvent{ - {DataSource: dsAccounts, EntityType: "User", Source: resolve.FieldSourceSubgraph, ItemCount: 1, IsEntityFetch: true, HTTPStatusCode: 200, ResponseBytes: 62}, // _entities fetch for User 1234 + {DataSource: dsAccounts, EntityType: "User", Source: resolve.FieldSourceSubgraph, ItemCount: 1, IsEntityFetch: true, HTTPStatusCode: 200, ResponseBytes: 62}, // _entities fetch for User 1234 {DataSource: dsProducts, EntityType: "Query", Source: resolve.FieldSourceSubgraph, ItemCount: 1, IsEntityFetch: false, HTTPStatusCode: 200, ResponseBytes: 136}, // topProducts root field fetch {DataSource: dsReviews, EntityType: "Product", Source: resolve.FieldSourceSubgraph, ItemCount: 1, IsEntityFetch: true, HTTPStatusCode: 200, ResponseBytes: 376}, // _entities fetch for Product top-1 and top-2 }, timings) @@ -572,9 +572,9 @@ func TestCacheAnalyticsE2E(t *testing.T) { timings := normalizeFetchTimings(snap.FetchTimings) assert.Equal(t, []resolve.FetchTimingEvent{ - {DataSource: dsAccounts, EntityType: "User", Source: resolve.FieldSourceL2, ItemCount: 1, IsEntityFetch: true}, // L2 hit for User 1234 entity - {DataSource: dsProducts, EntityType: "Query", Source: resolve.FieldSourceL2, ItemCount: 1, IsEntityFetch: true}, // L2 hit for topProducts root field - {DataSource: dsReviews, EntityType: "Product", Source: resolve.FieldSourceL2, ItemCount: 2, IsEntityFetch: true}, // L2 hit for Product top-1 and top-2 entities + {DataSource: dsAccounts, EntityType: "User", Source: resolve.FieldSourceL2, ItemCount: 1, IsEntityFetch: true}, // L2 hit for User 1234 entity + {DataSource: dsProducts, EntityType: "Query", Source: resolve.FieldSourceL2, ItemCount: 1, IsEntityFetch: true}, // L2 hit for topProducts root field + {DataSource: dsReviews, EntityType: "Product", Source: resolve.FieldSourceL2, ItemCount: 2, IsEntityFetch: true}, // L2 hit for Product top-1 and top-2 entities }, timings) }) } @@ -1220,7 +1220,6 @@ func TestMutationImpactE2E(t *testing.T) { }) } - func TestFederationCachingAliases(t *testing.T) { // Helper to create a standard setup for alias caching tests setupAliasCachingTest := func(t *testing.T) ( diff --git a/execution/engine/federation_caching_l1_test.go b/execution/engine/federation_caching_l1_test.go index fc8aac2b11..5b11cdacb4 100644 --- a/execution/engine/federation_caching_l1_test.go +++ b/execution/engine/federation_caching_l1_test.go @@ -1058,4 +1058,3 @@ func TestL1CacheOptimizationReducesSubgraphCalls(t *testing.T) { "Should call reviews subgraph once for User.sameUserReviewers") }) } - diff --git a/execution/engine/federation_caching_test.go b/execution/engine/federation_caching_test.go index 8721e87ce3..2c3c3ed46a 100644 --- a/execution/engine/federation_caching_test.go +++ b/execution/engine/federation_caching_test.go @@ -2437,4 +2437,3 @@ func TestFederationCaching_MutationSkipsL2Read(t *testing.T) { assert.Equal(t, 1, tracker.GetCount(accountsHost), "Step 2: accounts called once for me root query, entity resolution served from L2 cache") }) } -