diff --git a/execution/engine/federation_caching_analytics_test.go b/execution/engine/federation_caching_analytics_test.go new file mode 100644 index 0000000000..347696fa10 --- /dev/null +++ b/execution/engine/federation_caching_analytics_test.go @@ -0,0 +1,1788 @@ +package engine_test + +import ( + "context" + "net/http" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + accounts "github.com/wundergraph/graphql-go-tools/execution/federationtesting/accounts/graph" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +func TestCacheAnalyticsE2E(t *testing.T) { + // Common cache key constants used across subtests + const ( + keyProductTop1 = `{"__typename":"Product","key":{"upc":"top-1"}}` + keyProductTop2 = `{"__typename":"Product","key":{"upc":"top-2"}}` + keyTopProducts = `{"__typename":"Query","field":"topProducts"}` + keyUser1234 = `{"__typename":"User","key":{"id":"1234"}}` + keyMe = `{"__typename":"Query","field":"me"}` + dsAccounts = "accounts" + dsProducts = "products" + dsReviews = "reviews" + ) + + // Field hash constants — xxhash of the rendered scalar field values. + // These are deterministic because xxhash is seeded identically each time. + const ( + hashProductNameTrilby uint64 = 1032923585965781586 // xxhash("Trilby") + hashProductNameFedora uint64 = 2432227032303632641 // xxhash("Fedora") + hashUserUsernameMe uint64 = 4957449860898447395 // xxhash("Me") + ) + + // Entity key constants for field hash assertions + const ( + entityKeyProductTop1 = `{"upc":"top-1"}` + entityKeyProductTop2 = `{"upc":"top-2"}` + entityKeyUser1234 = `{"id":"1234"}` + ) + + // Byte sizes of cached entities (measured from actual JSON marshalling) + const ( + byteSizeProductTop1 = 177 // Product top-1 entity (reviews subgraph response) + byteSizeProductTop2 = 233 // Product top-2 entity (reviews subgraph response) + byteSizeTopProducts = 127 // Query.topProducts root field (products subgraph response) + byteSizeUser1234 = 49 // User 1234 entity (accounts subgraph response) + byteSizeUser1234Full = 105 // User 1234 entity from L1 (includes sameUserReviewers data) + byteSizeQueryMe = 56 // Query.me root field (accounts subgraph response) + ) + + // Shared field hashes for the multi-upstream query (topProducts with reviews). + // Product.name: 2 products (Trilby, Fedora) → 2 distinct hashes + // User.username: 2 reviews both by "Me" → 2 identical hashes + // All FieldSourceSubgraph by default (overridden in specific tests) + multiUpstreamFieldHashes := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceSubgraph}, + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, + } + + // L2 hit field hashes — same data but all sourced from L2 cache + multiUpstreamFieldHashesL2 := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + } + + multiUpstreamEntityTypes := []resolve.EntityTypeInfo{ + {TypeName: "Product", Count: 2, UniqueKeys: 2}, + {TypeName: "User", Count: 2, UniqueKeys: 1}, + } + + // Standard subgraph caching configs used by L2 and L1+L2 tests + multiUpstreamCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + expectedResponseBody := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}` + + t.Run("L2 miss then hit with analytics", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First query — all L2 misses, populates L2 cache + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // L2 miss: root field not yet cached + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts}, // L2 miss: User entity not yet cached (second review's User 1234 deduplicated in batch) + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written after subgraph fetch on miss + {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written after subgraph fetch on miss + {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written to L2 after fetch + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User entity written after accounts fetch + }, + FieldHashes: multiUpstreamFieldHashes, + EntityTypes: multiUpstreamEntityTypes, + }) + assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Second query — all L2 hits from populated cache + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // L2 hit: populated by Request 1 + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // L2 hit: populated by Request 1 + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // L2 hit: root field cached by Request 1 + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234}, // L2 hit: User entity cached by Request 1 (second review's User 1234 deduplicated) + }, + // No L2Writes: all served from cache, no fetches needed + FieldHashes: multiUpstreamFieldHashesL2, + EntityTypes: multiUpstreamEntityTypes, + }) + assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("L1 cache analytics with entity reuse", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + EnableCacheAnalytics: true, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Query that triggers L1 entity reuse: + // 1. Query.me -> accounts subgraph -> returns User 1234 -> populates L1 + // 2. User.sameUserReviewers -> reviews subgraph -> returns [User 1234] + // 3. Entity fetch for User 1234 -> L1 HIT (no subgraph call) + query := `query { + me { + id + username + sameUserReviewers { + id + username + } + } + }` + + tracker.Reset() + resp, headers := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, `{"data":{"me":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}}`, string(resp)) + + expected := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L1Reads: []resolve.CacheKeyEvent{ + // L1 hit: User 1234 was populated by Query.me root fetch, reused for sameUserReviewers + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234Full}, + }, + L1Writes: []resolve.CacheWriteEvent{ + // Query.me root field written to L1 after accounts subgraph fetch + {CacheKey: keyMe, EntityType: "Query", ByteSize: byteSizeQueryMe, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL1}, + }, + FieldHashes: []resolve.EntityFieldHash{ + // Both username entries show L1 source because the entity key resolves to + // the L1 source recorded during the entity fetch L1 HIT + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL1}, // me.username: entity came from L1 + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL1}, // sameUserReviewers[0].username: same L1 entity + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 2, UniqueKeys: 1}, // 2 User instances, but only 1 unique key (1234) + }, + }) + assert.Equal(t, expected, normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("L1+L2 combined analytics", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: true, + EnableCacheAnalytics: true, + }), + withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First query — L2 misses (L1 is per-request, always fresh) + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // L2 miss: root field not yet cached + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts}, // L2 miss: User entity not yet cached (second review's User 1234 hits L1 after this fetch) + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written after reviews subgraph fetch + {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written after reviews subgraph fetch + {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written after products fetch + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User entity written after accounts fetch + }, + FieldHashes: multiUpstreamFieldHashes, + EntityTypes: multiUpstreamEntityTypes, + }) + assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Second query — L2 hits (L1 is per-request, reset between requests) + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // L2 hit: populated by Request 1 + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // L2 hit: populated by Request 1 + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // L2 hit: root field cached by Request 1 + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234}, // L2 hit: User entity cached by Request 1 (second review's User 1234 hits L1) + }, + // No L2Writes: all entities served from L2 cache + FieldHashes: multiUpstreamFieldHashesL2, + EntityTypes: multiUpstreamEntityTypes, + }) + assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("root field with args - L2 analytics", func(t *testing.T) { + // Tests that root field caching with arguments properly records L2 analytics events. + // This covers the root field path in tryL2CacheLoad (no L1 keys branch). + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + rootFieldArgsCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "user", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(rootFieldArgsCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + const ( + keyUserById1234 = `{"__typename":"Query","field":"user","args":{"id":"1234"}}` + keyUserById5678 = `{"__typename":"Query","field":"user","args":{"id":"5678"}}` + dsAccountsLocal = "accounts" + byteSizeUser1234 = 38 // {"user":{"id":"1234","username":"Me"}} + byteSizeUser5678 = 45 // {"user":{"id":"5678","username":"User 5678"}} + + hashUsernameMeLocal uint64 = 4957449860898447395 // xxhash("Me") + hashUsername5678Local uint64 = 15512417390573333165 // xxhash("User 5678") + entityKeyUser1234Local = `{"id":"1234"}` + entityKeyUser5678Local = `{"id":"5678"}` + ) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query (id=1234) — L2 miss, populates cache + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts subgraph") + + expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyUserById1234, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsAccountsLocal}, // L2 miss: first request, cache empty + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyUserById1234, EntityType: "Query", ByteSize: byteSizeUser1234, DataSource: dsAccountsLocal, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written after accounts fetch + }, + FieldHashes: []resolve.EntityFieldHash{ + {EntityType: "User", FieldName: "username", FieldHash: hashUsernameMeLocal, KeyRaw: entityKeyUser1234Local, Source: resolve.FieldSourceSubgraph}, // User returned by root field, data from subgraph + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, // 1 User entity from root field response + }, + }) + assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Second query (same id=1234) — L2 hit + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip accounts (cache hit)") + + expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyUserById1234, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsAccountsLocal, ByteSize: byteSizeUser1234}, // L2 hit: populated by first request + }, + // No L2Writes: data served from cache + FieldHashes: []resolve.EntityFieldHash{ + // Source is FieldSourceSubgraph (default) because entity source tracking operates at + // entity cache level, not root field cache level — no entity caching configured for User + {EntityType: "User", FieldName: "username", FieldHash: hashUsernameMeLocal, KeyRaw: entityKeyUser1234Local, Source: resolve.FieldSourceSubgraph}, + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, + }, + }) + assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Third query (different id=5678) — L2 miss (different args = different cache key) + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "5678"}, t) + assert.Equal(t, `{"data":{"user":{"id":"5678","username":"User 5678"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Third query should call accounts (different args)") + + expected3 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyUserById5678, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsAccountsLocal}, // L2 miss: different args, not cached + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyUserById5678, EntityType: "Query", ByteSize: byteSizeUser5678, DataSource: dsAccountsLocal, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // New args written to L2 + }, + FieldHashes: []resolve.EntityFieldHash{ + {EntityType: "User", FieldName: "username", FieldHash: hashUsername5678Local, KeyRaw: entityKeyUser5678Local, Source: resolve.FieldSourceSubgraph}, // User 5678 data from subgraph + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, + }, + }) + assert.Equal(t, expected3, normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("root field only - L2 analytics without entity caching", func(t *testing.T) { + // Tests root field caching analytics in isolation — only root field caching configured, + // no entity caching. Verifies that only root field events appear in analytics. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Only configure root field caching for products — no entity caching at all + rootOnlyConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(rootOnlyConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + reviewsHost := reviewsURLParsed.Host + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + const ( + keyTopProductsLocal = `{"__typename":"Query","field":"topProducts"}` + dsProductsLocal = "products" + byteSizeTP = 127 // Query.topProducts root field response + ) + + // First query — L2 miss for root field, no events for entities (not configured) + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + // Products subgraph called (root field miss), reviews + accounts always called (no entity caching) + assert.Equal(t, 1, tracker.GetCount(productsHost), "First query should call products subgraph") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "First query should call reviews subgraph") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts subgraph") + + expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyTopProductsLocal, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProductsLocal}, // L2 miss: first request, cache empty + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyTopProductsLocal, EntityType: "Query", ByteSize: byteSizeTP, DataSource: dsProductsLocal, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written after products fetch + }, + // Only entity types tracked during resolution (not caching-dependent) + FieldHashes: multiUpstreamFieldHashes, + EntityTypes: multiUpstreamEntityTypes, + }) + assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Second query — L2 hit for root field, entities still fetched (not cached) + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + // Products subgraph skipped (root field cache hit), reviews + accounts still called + assert.Equal(t, 0, tracker.GetCount(productsHost), "Second query should skip products (root field cache hit)") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "Second query should call reviews (no entity caching)") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Second query should call accounts (no entity caching)") + + expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyTopProductsLocal, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProductsLocal, ByteSize: byteSizeTP}, // L2 hit: root field cached by first request + }, + // No L2Writes: root field served from cache, entities have no caching configured + FieldHashes: multiUpstreamFieldHashes, // Entity field hashes still tracked (resolution, not caching) + EntityTypes: multiUpstreamEntityTypes, + }) + assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("subgraph fetch records HTTPStatusCode and ResponseBytes", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First request — all L2 misses, subgraph fetches happen + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + snap := parseCacheAnalytics(t, headers) + + // Filter to subgraph fetch events only (exclude L2 read events) + var subgraphTimings []resolve.FetchTimingEvent + for _, ft := range snap.FetchTimings { + if ft.Source == resolve.FieldSourceSubgraph { + subgraphTimings = append(subgraphTimings, ft) + } + } + timings := normalizeFetchTimings(subgraphTimings) + + assert.Equal(t, []resolve.FetchTimingEvent{ + {DataSource: dsAccounts, EntityType: "User", Source: resolve.FieldSourceSubgraph, ItemCount: 1, IsEntityFetch: true, HTTPStatusCode: 200, ResponseBytes: 62}, // _entities fetch for User 1234 + {DataSource: dsProducts, EntityType: "Query", Source: resolve.FieldSourceSubgraph, ItemCount: 1, IsEntityFetch: false, HTTPStatusCode: 200, ResponseBytes: 136}, // topProducts root field fetch + {DataSource: dsReviews, EntityType: "Product", Source: resolve.FieldSourceSubgraph, ItemCount: 1, IsEntityFetch: true, HTTPStatusCode: 200, ResponseBytes: 376}, // _entities fetch for Product top-1 and top-2 + }, timings) + }) + + t.Run("cache hit has zero HTTPStatusCode and ResponseBytes", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First request — populates L2 cache + resp, _ := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + // Second request — all L2 hits + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + snap := parseCacheAnalytics(t, headers) + timings := normalizeFetchTimings(snap.FetchTimings) + + assert.Equal(t, []resolve.FetchTimingEvent{ + {DataSource: dsAccounts, EntityType: "User", Source: resolve.FieldSourceL2, ItemCount: 1, IsEntityFetch: true}, // L2 hit for User 1234 entity + {DataSource: dsProducts, EntityType: "Query", Source: resolve.FieldSourceL2, ItemCount: 1, IsEntityFetch: true}, // L2 hit for topProducts root field + {DataSource: dsReviews, EntityType: "Product", Source: resolve.FieldSourceL2, ItemCount: 2, IsEntityFetch: true}, // L2 hit for Product top-1 and top-2 entities + }, timings) + }) +} + +func TestShadowCacheE2E(t *testing.T) { + // Cache key constants (same as TestCacheAnalyticsE2E — same federation setup) + const ( + keyProductTop1 = `{"__typename":"Product","key":{"upc":"top-1"}}` + keyProductTop2 = `{"__typename":"Product","key":{"upc":"top-2"}}` + keyTopProducts = `{"__typename":"Query","field":"topProducts"}` + keyUser1234 = `{"__typename":"User","key":{"id":"1234"}}` + dsAccounts = "accounts" + dsProducts = "products" + dsReviews = "reviews" + ) + + // Field hash constants + const ( + hashProductNameTrilby uint64 = 1032923585965781586 + hashProductNameFedora uint64 = 2432227032303632641 + hashUserUsernameMe uint64 = 4957449860898447395 + ) + + // Entity key constants + const ( + entityKeyProductTop1 = `{"upc":"top-1"}` + entityKeyProductTop2 = `{"upc":"top-2"}` + entityKeyUser1234 = `{"id":"1234"}` + ) + + // Byte sizes + const ( + byteSizeProductTop1 = 177 + byteSizeProductTop2 = 233 + byteSizeTopProducts = 127 + byteSizeUser1234 = 49 + ) + + // Shadow comparison hash constants + const ( + shadowHashProductTop1 uint64 = 8656108128396512717 + shadowHashProductTop2 uint64 = 4671066427758823003 + shadowHashUser1234 uint64 = 188937276969638005 + shadowBytesProductTop1 = 124 + shadowBytesProductTop2 = 180 + shadowBytesUser1234 = 17 + ) + + // Shadow cached field hash constants (ProvidesData fields hashed from cached value during shadow comparison) + const ( + shadowFieldHashProductReviewsTop1 uint64 = 13894521258004960943 // xxhash of Product reviews field for top-1 + shadowFieldHashProductReviewsTop2 uint64 = 3182276346310063647 // xxhash of Product reviews field for top-2 + ) + + // Field hashes when all data comes from subgraph (first request, all misses) + fieldHashesSubgraph := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceSubgraph}, + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, + } + + // Field hashes when all data comes from L2 (second request, all hits — no shadow entities) + fieldHashesL2 := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + } + + // Field hashes when all entities are in shadow mode (second request): + // L2 source hashes from resolution + ShadowCached hashes from compareShadowValues + fieldHashesL2AllShadow := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, + {EntityType: "Product", FieldName: "reviews", FieldHash: shadowFieldHashProductReviewsTop1, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceShadowCached}, // Cached Product reviews field for per-field staleness detection + {EntityType: "Product", FieldName: "reviews", FieldHash: shadowFieldHashProductReviewsTop2, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceShadowCached}, // Cached Product reviews field for per-field staleness detection + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username for per-field staleness detection + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username (second review) + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + } + + // Field hashes when only User is in shadow mode (mixed mode, second request): + // Product/root L2 source hashes + User L2 + User ShadowCached hashes + fieldHashesL2MixedShadow := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username for per-field staleness detection + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username (second review) + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + } + + entityTypes := []resolve.EntityTypeInfo{ + {TypeName: "Product", Count: 2, UniqueKeys: 2}, + {TypeName: "User", Count: 2, UniqueKeys: 1}, + } + + expectedResponseBody := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}` + + t.Run("shadow all entities - always fetches", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Shadow mode for all entity types, real caching for root fields + shadowConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(shadowConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) + reviewsHost := mustParseHost(setup.ReviewsUpstreamServer.URL) + + // Request 1: All L2 misses → all 3 subgraphs called + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + assert.Equal(t, 1, tracker.GetCount(productsHost), "request 1: should call products exactly once") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "request 1: should call reviews exactly once") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 1: should call accounts exactly once") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews, Shadow: true}, // Shadow L2 miss: cache empty, subgraph fetched + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews, Shadow: true}, // Shadow L2 miss: cache empty, subgraph fetched + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // Real L2 miss: root field not shadow, fetched normally + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts, Shadow: true}, // Shadow L2 miss: User not yet cached + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written to L2 even in shadow (populates for comparison) + {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written to L2 even in shadow + {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written normally (not shadow) + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User entity written for future shadow comparison + }, + // No ShadowComparisons: nothing cached yet to compare against + FieldHashes: fieldHashesSubgraph, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Request 2: Entity L2 hits (shadow) → entity subgraphs STILL called + // Root field L2 hit → products NOT called (real caching) + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + assert.Equal(t, 0, tracker.GetCount(productsHost), "request 2: products should NOT be called (root field real cache hit)") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "request 2: reviews should be called (Product entity shadow)") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 2: accounts should be called (User entity shadow)") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1, Shadow: true}, // Shadow L2 hit: cached by Req 1, but subgraph still called + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2, Shadow: true}, // Shadow L2 hit: cached by Req 1, but subgraph still called + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field served from cache (not shadow) + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234, Shadow: true}, // Shadow L2 hit: accounts still called for comparison + }, + L2Writes: []resolve.CacheWriteEvent{ + // Only shadow entities re-written (refreshed from subgraph); root field NOT re-written (real cache hit) + {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write: fresh data from subgraph + {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write: fresh data from subgraph + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write: fresh User from accounts + }, + ShadowComparisons: []resolve.ShadowComparisonEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", IsFresh: true, CachedHash: shadowHashProductTop1, FreshHash: shadowHashProductTop1, CachedBytes: shadowBytesProductTop1, FreshBytes: shadowBytesProductTop1, DataSource: dsReviews, ConfiguredTTL: 30 * time.Second}, // Fresh: cached matches subgraph (data unchanged) + {CacheKey: keyProductTop2, EntityType: "Product", IsFresh: true, CachedHash: shadowHashProductTop2, FreshHash: shadowHashProductTop2, CachedBytes: shadowBytesProductTop2, FreshBytes: shadowBytesProductTop2, DataSource: dsReviews, ConfiguredTTL: 30 * time.Second}, // Fresh: cached matches subgraph (data unchanged) + {CacheKey: keyUser1234, EntityType: "User", IsFresh: true, CachedHash: shadowHashUser1234, FreshHash: shadowHashUser1234, CachedBytes: shadowBytesUser1234, FreshBytes: shadowBytesUser1234, DataSource: dsAccounts, ConfiguredTTL: 30 * time.Second}, // Fresh: cached User matches subgraph (no mutation) + }, + FieldHashes: fieldHashesL2AllShadow, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("mixed mode - shadow User, real cache Product", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Shadow mode for User only, real caching for Product and root fields + mixedConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, // real caching + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, // shadow + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(mixedConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) + reviewsHost := mustParseHost(setup.ReviewsUpstreamServer.URL) + + // Request 1: All L2 misses → all 3 subgraphs called + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + assert.Equal(t, 1, tracker.GetCount(productsHost), "request 1: should call products exactly once") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "request 1: should call reviews exactly once") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 1: should call accounts exactly once") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: Product entity not yet cached + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: Product entity not yet cached + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // Real L2 miss: root field not yet cached + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts, Shadow: true}, // Shadow L2 miss: User entity not yet cached + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Product written for real caching + {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Product written for real caching + {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written for real caching + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User written (shadow still populates L2) + }, + FieldHashes: fieldHashesSubgraph, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Request 2: Product real cache hit, User shadow → still fetched + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + assert.Equal(t, 0, tracker.GetCount(productsHost), "request 2: products should NOT be called (root field real cache hit)") + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "request 2: reviews should NOT be called (Product entity real cache hit)") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 2: accounts SHOULD be called (User entity shadow)") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // Real L2 hit: Product served from cache (no subgraph call) + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // Real L2 hit: Product served from cache (no subgraph call) + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field served from cache + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234, Shadow: true}, // Shadow L2 hit: accounts still called for comparison + }, + L2Writes: []resolve.CacheWriteEvent{ + // Only User re-written (shadow always fetches fresh); Product/root NOT re-written (real hit) + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write: fresh data from accounts + }, + ShadowComparisons: []resolve.ShadowComparisonEvent{ + // Only User has shadow comparisons; Product uses real caching + {CacheKey: keyUser1234, EntityType: "User", IsFresh: true, CachedHash: shadowHashUser1234, FreshHash: shadowHashUser1234, CachedBytes: shadowBytesUser1234, FreshBytes: shadowBytesUser1234, DataSource: dsAccounts, ConfiguredTTL: 30 * time.Second}, // Fresh: cached User matches subgraph + }, + FieldHashes: fieldHashesL2MixedShadow, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("shadow mode without analytics - safety only", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + shadowConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), // analytics NOT enabled + withSubgraphEntityCachingConfigs(shadowConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + + // Request 1: Populate cache + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + // No stats when analytics is disabled + assert.Empty(t, headers.Get("X-Cache-Analytics"), "analytics header should not be set when analytics disabled") + + // Request 2: Shadow mode — accounts still fetched (data not served from cache) + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 2: accounts should be called (shadow mode)") + // No stats when analytics is disabled + assert.Empty(t, headers.Get("X-Cache-Analytics"), "analytics header should not be set when analytics disabled") + }) + + t.Run("graduation - shadow to real", func(t *testing.T) { + // Same FakeLoaderCache shared across both engine setups + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Phase 1: Shadow mode for User + shadowConfigs := engine.SubgraphCachingConfigs{ + {SubgraphName: "products", RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }}, + {SubgraphName: "reviews", EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }}, + {SubgraphName: "accounts", EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, + }}, + } + + setup1 := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(shadowConfigs), + )) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost1 := mustParseHost(setup1.AccountsUpstreamServer.URL) + + // Phase 1, Request 1: Populate L2 cache + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup1.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: first request, cache empty + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: first request, cache empty + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // Real L2 miss: root field not yet cached + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts, Shadow: true}, // Shadow L2 miss: User not yet cached + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Product written for real caching + {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Product written for real caching + {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written for real caching + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User written (shadow still populates L2) + }, + FieldHashes: fieldHashesSubgraph, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Phase 1, Request 2: Shadow — accounts still called + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup1.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost1), "phase 1 request 2: accounts should be called (shadow mode)") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // Real L2 hit: Product served from cache + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // Real L2 hit: Product served from cache + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field from cache + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234, Shadow: true}, // Shadow L2 hit: cached but accounts still called + }, + L2Writes: []resolve.CacheWriteEvent{ + // Only shadow User re-written; Product/root use real caching (no re-write on hit) + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write with fresh data from accounts + }, + ShadowComparisons: []resolve.ShadowComparisonEvent{ + {CacheKey: keyUser1234, EntityType: "User", IsFresh: true, CachedHash: shadowHashUser1234, FreshHash: shadowHashUser1234, CachedBytes: shadowBytesUser1234, FreshBytes: shadowBytesUser1234, DataSource: dsAccounts, ConfiguredTTL: 30 * time.Second}, // Fresh: cached User matches subgraph (safe to graduate) + }, + FieldHashes: fieldHashesL2MixedShadow, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + + setup1.Close() + + // Phase 2: Graduated to real caching (same cache, new engine) + realConfigs := engine.SubgraphCachingConfigs{ + {SubgraphName: "products", RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }}, + {SubgraphName: "reviews", EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }}, + {SubgraphName: "accounts", EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, // No ShadowMode! + }}, + } + + tracker2 := newSubgraphCallTracker(http.DefaultTransport) + trackingClient2 := &http.Client{Transport: tracker2} + + setup2 := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), // SAME cache + withHTTPClient(trackingClient2), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(realConfigs), + )) + t.Cleanup(setup2.Close) + + accountsHost2 := mustParseHost(setup2.AccountsUpstreamServer.URL) + + // Phase 2, Request 3: Real L2 hit — accounts NOT called + tracker2.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup2.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + assert.Equal(t, 0, tracker2.GetCount(accountsHost2), "phase 2: accounts should NOT be called (real L2 hit)") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // Real L2 hit: cached by Phase 1 + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // Real L2 hit: cached by Phase 1 + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field cached by Phase 1 + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234}, // Real L2 hit: graduated from shadow, no longer calls accounts + }, + // No L2Writes: all real cache hits, no fetches needed + // No ShadowComparisons: User is no longer in shadow mode + FieldHashes: fieldHashesL2, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) +} + +func TestMutationImpactE2E(t *testing.T) { + accounts.ResetUsers() + t.Cleanup(accounts.ResetUsers) + + // Configure entity caching for User on accounts subgraph + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` + + // Uses a simple query that causes an entity fetch for User 1234 + // me { id username } triggers: accounts root fetch for Query.me, no entity fetch + // We need a query that triggers entity caching for User - topProducts with reviews + authorWithoutProvides + entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + + t.Run("mutation with prior cache shows stale entity", func(t *testing.T) { + accounts.ResetUsers() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Request 1: Query to populate L2 cache with User entity + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) + assert.Contains(t, string(resp), `"username":"Me"`) + + // Request 2: Mutation — should detect stale cached entity + tracker.Reset() + respMut, headersMut := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) + assert.Contains(t, string(respMut), `"UpdatedMe"`) + + snap := normalizeSnapshot(parseCacheAnalytics(t, headersMut)) + require.NotNil(t, snap.MutationEvents, "should have mutation impact events") + require.Equal(t, 1, len(snap.MutationEvents), "should have exactly 1 mutation impact event") + + event := snap.MutationEvents[0] + assert.Equal(t, "updateUsername", event.MutationRootField) + assert.Equal(t, "User", event.EntityType) + assert.Equal(t, `{"__typename":"User","key":{"id":"1234"}}`, event.EntityCacheKey) + assert.Equal(t, true, event.HadCachedValue, "should have found cached value") + assert.Equal(t, true, event.IsStale, "cached value should be stale (username changed)") + + // Record discovered values for exact assertion + t.Logf("MutationImpact event: %+v", event) + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + FieldHashes: []resolve.EntityFieldHash{ + // Hash of "UpdatedMe" (post-mutation username) + {EntityType: "User", FieldName: "username", FieldHash: 16932466035575627600, KeyRaw: `{"id":"1234"}`}, + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, // Mutation returned 1 User entity + }, + MutationEvents: []resolve.MutationEvent{ + { + MutationRootField: "updateUsername", + EntityType: "User", + EntityCacheKey: `{"__typename":"User","key":{"id":"1234"}}`, + HadCachedValue: true, // L2 had cached value from Request 1 query + IsStale: true, // Cached "Me" differs from fresh "UpdatedMe" + CachedHash: event.CachedHash, + FreshHash: event.FreshHash, + CachedBytes: event.CachedBytes, + FreshBytes: event.FreshBytes, + }, + }, + }), snap) + }) + + t.Run("mutation without prior cache shows no-cache event", func(t *testing.T) { + accounts.ResetUsers() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // NO prior query — L2 cache is empty + // Send mutation directly + tracker.Reset() + respMut, headersMut := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) + assert.Contains(t, string(respMut), `"UpdatedMe"`) + + snap := normalizeSnapshot(parseCacheAnalytics(t, headersMut)) + require.NotNil(t, snap.MutationEvents, "should have mutation impact events") + require.Equal(t, 1, len(snap.MutationEvents), "should have exactly 1 mutation impact event") + + event := snap.MutationEvents[0] + assert.Equal(t, "updateUsername", event.MutationRootField) + assert.Equal(t, "User", event.EntityType) + assert.Equal(t, `{"__typename":"User","key":{"id":"1234"}}`, event.EntityCacheKey) + assert.Equal(t, false, event.HadCachedValue, "should NOT have found cached value") + assert.Equal(t, false, event.IsStale, "cannot be stale without cached value") + assert.Equal(t, uint64(0), event.CachedHash, "no cached value = no hash") + assert.Equal(t, 0, event.CachedBytes, "no cached value = no bytes") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + FieldHashes: []resolve.EntityFieldHash{ + // Hash of "UpdatedMe" (post-mutation username) + {EntityType: "User", FieldName: "username", FieldHash: 16932466035575627600, KeyRaw: `{"id":"1234"}`}, + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, // Mutation returned 1 User entity + }, + MutationEvents: []resolve.MutationEvent{ + { + MutationRootField: "updateUsername", + EntityType: "User", + EntityCacheKey: `{"__typename":"User","key":{"id":"1234"}}`, + HadCachedValue: false, // No prior query, L2 cache was empty + IsStale: false, // Cannot be stale without a cached value to compare + FreshHash: event.FreshHash, + FreshBytes: event.FreshBytes, + }, + }, + }), snap) + }) +} + +func TestFederationCachingAliases(t *testing.T) { + // Helper to create a standard setup for alias caching tests + setupAliasCachingTest := func(t *testing.T) ( + *federationtesting.FederationSetup, + *GraphqlClient, + context.Context, + context.CancelFunc, + *subgraphCallTracker, + *FakeLoaderCache, + string, // accountsHost + ) { + t.Helper() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + return setup, gqlClient, ctx, cancel, tracker, defaultCache, accountsHost + } + + t.Run("L2 hit - alias then no alias", func(t *testing.T) { + setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) + + // Request 1: Use alias userName for username + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { topProducts { name reviews { body authorWithoutProvides { userName: username } } } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"userName":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"userName":"Me"}}]}]}}`, + string(resp)) + + accountsCalls1 := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") + + // Request 2: No alias (original field name) + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, + string(resp)) + + accountsCalls2 := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit from normalized cache)") + }) + + t.Run("L2 hit - two different aliases for same field", func(t *testing.T) { + setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) + + // Request 1: alias u1 for username + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { topProducts { name reviews { body authorWithoutProvides { u1: username } } } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"u1":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"u1":"Me"}}]}]}}`, + string(resp)) + + accountsCalls1 := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") + + // Request 2: alias u2 for username + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { topProducts { name reviews { body authorWithoutProvides { u2: username } } } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"u2":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"u2":"Me"}}]}]}}`, + string(resp)) + + accountsCalls2 := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying field)") + }) + + t.Run("no collision - alias matches another field name", func(t *testing.T) { + setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) + + // Request 1: alias realName for username (realName is another real field on User) + // This triggers an accounts entity fetch for username, stores normalized {"username":"Me"} in L2 + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { topProducts { name reviews { body authorWithoutProvides { realName: username } } } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"realName":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"realName":"Me"}}]}]}}`, + string(resp)) + + accountsCalls1 := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once for username") + + // Request 2: actual username field (no alias) - same underlying field + // Should be an L2 hit because both resolve username from accounts + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, + string(resp)) + + accountsCalls2 := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying field username)") + }) + + t.Run("no collision - field name used as alias for another field", func(t *testing.T) { + setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) + + // Request 1: username field (no alias) - triggers accounts entity fetch for username + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, + string(resp)) + + accountsCalls1 := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") + + // Request 2: different alias (u1) for same field (username) + // Should be an L2 hit because the underlying field is the same + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { topProducts { name reviews { body authorWithoutProvides { u1: username } } } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"u1":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"u1":"Me"}}]}]}}`, + string(resp)) + + accountsCalls2 := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying field)") + }) + + t.Run("L2 hit - multiple fields some aliased some not", func(t *testing.T) { + setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) + + // Request 1: alias username and include realName (realName comes from reviews, not accounts) + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { topProducts { name reviews { body authorWithoutProvides { userName: username realName } } } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"userName":"Me","realName":"User Usington"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"userName":"Me","realName":"User Usington"}}]}]}}`, + string(resp)) + + accountsCalls1 := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") + + // Request 2: no alias on username, different alias on realName + // accounts entity cache should be L2 hit (same username field) + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { topProducts { name reviews { body authorWithoutProvides { username name: realName } } } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","name":"User Usington"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","name":"User Usington"}}]}]}}`, + string(resp)) + + accountsCalls2 := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying username field)") + }) + + t.Run("L1 hit within single request with aliases", func(t *testing.T) { + // Tests L1 cache with aliased fields across entity fetches within the same request. + // Flow: + // 1. topProducts -> products + // 2. reviews -> reviews (entity fetch for Products) + // 3. authorWithoutProvides -> accounts (entity fetch for User 1234, aliased userName: username) + // -> User 1234 stored in L1 with normalized field names + // 4. sameUserReviewers -> reviews (returns [User 1234] reference) + // 5. Entity resolution for sameUserReviewers -> accounts + // -> User 1234 is L1 HIT (already fetched in step 3), entire accounts call skipped + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: false}), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Query with alias on username - sameUserReviewers returns same user, + // should be L1 hit from the first entity fetch + tracker.Reset() + query := `query { + topProducts { + reviews { + authorWithoutProvides { + id + userName: username + sameUserReviewers { + id + userName: username + } + } + } + } + }` + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]}]}}`, + string(resp)) + + // With L1 enabled: first accounts call fetches User 1234 for authorWithoutProvides + // sameUserReviewers entity resolution hits L1 -> accounts call skipped + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, "Should call accounts subgraph once (sameUserReviewers skipped via L1)") + }) + + t.Run("L1 hit within single request with mixed alias and no alias", func(t *testing.T) { + // Same as above, but the nested sameUserReviewers uses the original field name (no alias) + // while the outer authorWithoutProvides uses an alias. L1 cache stores normalized data, + // so the nested fetch should still hit L1 despite the different field naming. + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: false}), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Outer authorWithoutProvides uses alias "userName: username" + // Nested sameUserReviewers uses plain "username" (no alias) + // L1 should still hit because cache stores normalized (original) field names + tracker.Reset() + query := `query { + topProducts { + reviews { + authorWithoutProvides { + id + userName: username + sameUserReviewers { + id + username + } + } + } + } + }` + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}`, + string(resp)) + + // With L1 enabled: first accounts call fetches User 1234 for authorWithoutProvides + // sameUserReviewers entity resolution hits L1 -> accounts call skipped + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, "Should call accounts subgraph once (sameUserReviewers skipped via L1)") + }) + + t.Run("L2 hit - aliased root field then original root field", func(t *testing.T) { + setup, gqlClient, ctx, _, tracker, defaultCache, _ := setupAliasCachingTest(t) + productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) + + // Request 1: alias the root field topProducts as tp + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { tp: topProducts { name } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"tp":[{"name":"Trilby"},{"name":"Fedora"}]}}`, + string(resp)) + + productsCalls1 := tracker.GetCount(productsHost) + assert.Equal(t, 1, productsCalls1, "Request 1 should call products subgraph once") + + // Request 2: same root field without alias — should L2 hit (same cache key) + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { topProducts { name } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby"},{"name":"Fedora"}]}}`, + string(resp)) + + productsCalls2 := tracker.GetCount(productsHost) + assert.Equal(t, 0, productsCalls2, "Request 2 should skip products (L2 hit from aliased root field)") + }) + + t.Run("L2 hit - two different root field aliases", func(t *testing.T) { + setup, gqlClient, ctx, _, tracker, defaultCache, _ := setupAliasCachingTest(t) + productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) + + // Request 1: alias p1 for topProducts + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { p1: topProducts { name } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"p1":[{"name":"Trilby"},{"name":"Fedora"}]}}`, + string(resp)) + + productsCalls1 := tracker.GetCount(productsHost) + assert.Equal(t, 1, productsCalls1, "Request 1 should call products subgraph once") + + // Request 2: different alias p2 for same root field + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { p2: topProducts { name } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"p2":[{"name":"Trilby"},{"name":"Fedora"}]}}`, + string(resp)) + + productsCalls2 := tracker.GetCount(productsHost) + assert.Equal(t, 0, productsCalls2, "Request 2 should skip products (L2 hit - same underlying root field)") + }) + + t.Run("L1+L2 combined - alias entity caching across both layers", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + + // Request 1: alias on username, sameUserReviewers triggers L1 hit within request + // L2 is also populated on the first entity fetch + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { + topProducts { + reviews { + authorWithoutProvides { + id + userName: username + sameUserReviewers { + id + userName: username + } + } + } + } + }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]}]}}`, + string(resp)) + + accountsCalls1 := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls1, "Request 1: accounts called once (sameUserReviewers skipped via L1)") + + // Request 2: same query without alias — L2 hit for User entity, no accounts calls + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { + topProducts { + reviews { + authorWithoutProvides { + id + username + sameUserReviewers { + id + username + } + } + } + } + }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}`, + string(resp)) + + accountsCalls2 := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls2, "Request 2: accounts skipped (L2 hit from normalized cache)") + }) + + t.Run("L2 analytics - aliased root field", func(t *testing.T) { + const ( + keyTopProducts = `{"__typename":"Query","field":"topProducts"}` + dsProducts = "products" + byteSizeTopProducts = 53 + hashProductNameTrilby = uint64(1032923585965781586) + hashProductNameFedora = uint64(2432227032303632641) + ) + + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Shared field hashes: Product.name for Trilby and Fedora from root field response + // Products are not entity-resolved (no @key fetch), so KeyRaw is empty + fieldHashes := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: "{}"}, // xxhash("Trilby"), no entity key (root field) + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: "{}"}, // xxhash("Fedora"), no entity key (root field) + } + entityTypes := []resolve.EntityTypeInfo{ + {TypeName: "Product", Count: 2, UniqueKeys: 1}, // 2 products from root field, no entity keys + } + + // Request 1: aliased root field — L2 miss, populates cache + tracker.Reset() + query1 := `query { tp: topProducts { name } }` + resp, headers := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, `{"data":{"tp":[{"name":"Trilby"},{"name":"Fedora"}]}}`, string(resp)) + + // Cache key must use original field name "topProducts", NOT the alias "tp" + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // L2 miss: first request, cache empty + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written after products fetch + }, + FieldHashes: fieldHashes, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Request 2: original root field (no alias) — L2 hit from Request 1 + tracker.Reset() + query2 := `query { topProducts { name } }` + resp, headers = gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby"},{"name":"Fedora"}]}}`, string(resp)) + + // Same cache key hit regardless of alias difference + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // L2 hit: populated by aliased Request 1 + }, + // No L2Writes: served from cache + FieldHashes: fieldHashes, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("L1 dedup - two aliases for same entity field in single request", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: false}), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + + // Two aliases (a1, a2) for the same entity field (authorWithoutProvides) + // Both resolve the same User 1234 — second should be L1 hit + tracker.Reset() + query := `query { + topProducts { + reviews { + a1: authorWithoutProvides { + id + username + } + a2: authorWithoutProvides { + id + username + } + } + } + }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"reviews":[{"a1":{"id":"1234","username":"Me"},"a2":{"id":"1234","username":"Me"}}]},{"reviews":[{"a1":{"id":"1234","username":"Me"},"a2":{"id":"1234","username":"Me"}}]}]}}`, + string(resp)) + + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, "Should call accounts once (second alias L1 hit for same User entity)") + }) +} diff --git a/execution/engine/federation_caching_helpers_test.go b/execution/engine/federation_caching_helpers_test.go new file mode 100644 index 0000000000..0a922e5b2d --- /dev/null +++ b/execution/engine/federation_caching_helpers_test.go @@ -0,0 +1,866 @@ +package engine_test + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "path" + "sort" + "strings" + "sync" + "testing" + "time" + + "github.com/jensneuse/abstractlogger" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting/gateway" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// subgraphCallTracker tracks HTTP requests made to subgraph servers +type subgraphCallTracker struct { + mu sync.RWMutex + counts map[string]int // Maps subgraph URL to call count + original http.RoundTripper +} + +func newSubgraphCallTracker(original http.RoundTripper) *subgraphCallTracker { + return &subgraphCallTracker{ + counts: make(map[string]int), + original: original, + } +} + +func (t *subgraphCallTracker) RoundTrip(req *http.Request) (*http.Response, error) { + t.mu.Lock() + host := req.URL.Host + t.counts[host]++ + t.mu.Unlock() + return t.original.RoundTrip(req) +} + +func (t *subgraphCallTracker) GetCount(url string) int { + t.mu.RLock() + defer t.mu.RUnlock() + return t.counts[url] +} + +func (t *subgraphCallTracker) Reset() { + t.mu.Lock() + defer t.mu.Unlock() + t.counts = make(map[string]int) +} + +func (t *subgraphCallTracker) GetCounts() map[string]int { + t.mu.RLock() + defer t.mu.RUnlock() + result := make(map[string]int) + for k, v := range t.counts { + result[k] = v + } + return result +} + +func (t *subgraphCallTracker) DebugPrint() string { + t.mu.RLock() + defer t.mu.RUnlock() + return fmt.Sprintf("%v", t.counts) +} + +// Helper functions for gateway setup with HTTP client support +type cachingGatewayOptions struct { + enableART bool + withLoaderCache map[string]resolve.LoaderCache + httpClient *http.Client + subgraphHeadersBuilder resolve.SubgraphHeadersBuilder + cachingOptions resolve.CachingOptions + subgraphEntityCachingConfigs engine.SubgraphCachingConfigs + debugMode bool +} + +func withCachingEnableART(enableART bool) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.enableART = enableART + } +} + +func withCachingLoaderCache(loaderCache map[string]resolve.LoaderCache) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.withLoaderCache = loaderCache + } +} + +func withHTTPClient(client *http.Client) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.httpClient = client + } +} + +func withSubgraphHeadersBuilder(builder resolve.SubgraphHeadersBuilder) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.subgraphHeadersBuilder = builder + } +} + +func withCachingOptionsFunc(cachingOpts resolve.CachingOptions) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.cachingOptions = cachingOpts + } +} + +func withSubgraphEntityCachingConfigs(configs engine.SubgraphCachingConfigs) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.subgraphEntityCachingConfigs = configs + } +} + +func withDebugMode(enabled bool) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.debugMode = enabled + } +} + +type cachingGatewayOptionsToFunc func(opts *cachingGatewayOptions) + +func addCachingGateway(options ...cachingGatewayOptionsToFunc) func(setup *federationtesting.FederationSetup) *httptest.Server { + opts := &cachingGatewayOptions{} + for _, option := range options { + option(opts) + } + return func(setup *federationtesting.FederationSetup) *httptest.Server { + httpClient := opts.httpClient + if httpClient == nil { + httpClient = http.DefaultClient + } + + poller := gateway.NewDatasource([]gateway.ServiceConfig{ + {Name: "accounts", URL: setup.AccountsUpstreamServer.URL}, + {Name: "products", URL: setup.ProductsUpstreamServer.URL, WS: strings.ReplaceAll(setup.ProductsUpstreamServer.URL, "http:", "ws:")}, + {Name: "reviews", URL: setup.ReviewsUpstreamServer.URL}, + }, httpClient) + + gtw := gateway.HandlerWithCaching(abstractlogger.NoopLogger, poller, httpClient, opts.enableART, opts.withLoaderCache, opts.subgraphHeadersBuilder, opts.cachingOptions, opts.subgraphEntityCachingConfigs, opts.debugMode) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + poller.Run(ctx) + return httptest.NewServer(gtw) + } +} + +// mockSubgraphHeadersBuilder is a mock implementation of SubgraphHeadersBuilder +type mockSubgraphHeadersBuilder struct { + hashes map[string]uint64 +} + +func (m *mockSubgraphHeadersBuilder) HeadersForSubgraph(subgraphName string) (http.Header, uint64) { + hash := m.hashes[subgraphName] + if hash == 0 { + // Return default hash if not found + return nil, 99999 + } + return nil, hash +} + +func (m *mockSubgraphHeadersBuilder) HashAll() uint64 { + // Return a simple hash of all subgraph hashes combined + var result uint64 + for _, hash := range m.hashes { + result ^= hash + } + return result +} + +func cachingTestQueryPath(name string) string { + return path.Join("..", "federationtesting", "testdata", name) +} + +type CacheLogEntry struct { + Operation string // "get", "set", "delete" + Keys []string // Keys involved in the operation + Hits []bool // For Get: whether each key was a hit (true) or miss (false) + Caller string // Fetch identity when debug enabled: "accounts: entity(User)" or "products: rootField(Query.topProducts)" +} + +// sortCacheLogKeys sorts the keys (and corresponding hits) in each cache log entry. +// This makes comparisons order-independent when multiple keys are present. +// Caller is intentionally stripped — it's for debug logging, not assertions. +func sortCacheLogKeys(log []CacheLogEntry) []CacheLogEntry { + sorted := make([]CacheLogEntry, len(log)) + for i, entry := range log { + // Only sort if there are multiple keys + if len(entry.Keys) <= 1 { + sorted[i] = CacheLogEntry{ + Operation: entry.Operation, + Keys: entry.Keys, + Hits: entry.Hits, + } + continue + } + + // Create pairs of (key, hit) to sort together + pairs := make([]struct { + key string + hit bool + }, len(entry.Keys)) + for j := range entry.Keys { + pairs[j].key = entry.Keys[j] + if entry.Hits != nil && j < len(entry.Hits) { + pairs[j].hit = entry.Hits[j] + } + } + + // Sort pairs by key + sort.Slice(pairs, func(a, b int) bool { + return pairs[a].key < pairs[b].key + }) + + // Extract sorted keys and hits + sorted[i] = CacheLogEntry{ + Operation: entry.Operation, + Keys: make([]string, len(pairs)), + Hits: nil, + } + if len(entry.Hits) > 0 { + sorted[i].Hits = make([]bool, len(pairs)) + } + for j := range pairs { + sorted[i].Keys[j] = pairs[j].key + if sorted[i].Hits != nil { + sorted[i].Hits[j] = pairs[j].hit + } + } + } + return sorted +} + +// sortCacheLogKeysWithCaller is like sortCacheLogKeys but preserves the Caller field. +// Use this when you want assertions to verify which Loader method chain triggered each cache event. +func sortCacheLogKeysWithCaller(log []CacheLogEntry) []CacheLogEntry { + sorted := make([]CacheLogEntry, len(log)) + for i, entry := range log { + if len(entry.Keys) <= 1 { + sorted[i] = CacheLogEntry{ + Operation: entry.Operation, + Keys: entry.Keys, + Hits: entry.Hits, + Caller: entry.Caller, + } + continue + } + + pairs := make([]struct { + key string + hit bool + }, len(entry.Keys)) + for j := range entry.Keys { + pairs[j].key = entry.Keys[j] + if entry.Hits != nil && j < len(entry.Hits) { + pairs[j].hit = entry.Hits[j] + } + } + sort.Slice(pairs, func(a, b int) bool { + return pairs[a].key < pairs[b].key + }) + sorted[i] = CacheLogEntry{ + Operation: entry.Operation, + Keys: make([]string, len(pairs)), + Hits: nil, + Caller: entry.Caller, + } + if len(entry.Hits) > 0 { + sorted[i].Hits = make([]bool, len(pairs)) + } + for j := range pairs { + sorted[i].Keys[j] = pairs[j].key + if sorted[i].Hits != nil { + sorted[i].Hits[j] = pairs[j].hit + } + } + } + return sorted +} + +type cacheEntry struct { + data []byte + expiresAt *time.Time +} + +type FakeLoaderCache struct { + mu sync.RWMutex + storage map[string]cacheEntry + log []CacheLogEntry +} + +func NewFakeLoaderCache() *FakeLoaderCache { + return &FakeLoaderCache{ + storage: make(map[string]cacheEntry), + log: make([]CacheLogEntry, 0), + } +} + +func (f *FakeLoaderCache) cleanupExpired() { + now := time.Now() + for key, entry := range f.storage { + if entry.expiresAt != nil && now.After(*entry.expiresAt) { + delete(f.storage, key) + } + } +} + +func (f *FakeLoaderCache) Get(ctx context.Context, keys []string) ([]*resolve.CacheEntry, error) { + f.mu.Lock() + defer f.mu.Unlock() + + // Clean up expired entries before executing command + f.cleanupExpired() + + hits := make([]bool, len(keys)) + result := make([]*resolve.CacheEntry, len(keys)) + for i, key := range keys { + if entry, exists := f.storage[key]; exists { + // Make a copy of the data to prevent external modifications + dataCopy := make([]byte, len(entry.data)) + copy(dataCopy, entry.data) + ce := &resolve.CacheEntry{ + Key: key, + Value: dataCopy, + } + // Populate RemainingTTL from expiresAt for cache age analytics + if entry.expiresAt != nil { + remaining := time.Until(*entry.expiresAt) + if remaining > 0 { + ce.RemainingTTL = remaining + } + } + result[i] = ce + hits[i] = true + } else { + result[i] = nil + hits[i] = false + } + } + + // Log the operation + caller := "" + if cfi := resolve.GetCacheFetchInfo(ctx); cfi != nil { + caller = cfi.String() + } + f.log = append(f.log, CacheLogEntry{ + Operation: "get", + Keys: keys, + Hits: hits, + Caller: caller, + }) + + return result, nil +} + +func (f *FakeLoaderCache) Set(ctx context.Context, entries []*resolve.CacheEntry, ttl time.Duration) error { + if len(entries) == 0 { + return nil + } + + f.mu.Lock() + defer f.mu.Unlock() + + // Clean up expired entries before executing command + f.cleanupExpired() + + keys := make([]string, 0, len(entries)) + for _, entry := range entries { + if entry == nil { + continue + } + cacheEntry := cacheEntry{ + // Make a copy of the data to prevent external modifications + data: make([]byte, len(entry.Value)), + } + copy(cacheEntry.data, entry.Value) + + // If ttl is 0, store without expiration + if ttl > 0 { + expiresAt := time.Now().Add(ttl) + cacheEntry.expiresAt = &expiresAt + } + + f.storage[entry.Key] = cacheEntry + keys = append(keys, entry.Key) + } + + // Log the operation + caller := "" + if cfi := resolve.GetCacheFetchInfo(ctx); cfi != nil { + caller = cfi.String() + } + f.log = append(f.log, CacheLogEntry{ + Operation: "set", + Keys: keys, + Hits: nil, // Set operations don't have hits/misses + Caller: caller, + }) + + return nil +} + +func (f *FakeLoaderCache) Delete(ctx context.Context, keys []string) error { + f.mu.Lock() + defer f.mu.Unlock() + + // Clean up expired entries before executing command + f.cleanupExpired() + + for _, key := range keys { + delete(f.storage, key) + } + + // Log the operation + caller := "" + if cfi := resolve.GetCacheFetchInfo(ctx); cfi != nil { + caller = cfi.String() + } + f.log = append(f.log, CacheLogEntry{ + Operation: "delete", + Keys: keys, + Hits: nil, // Delete operations don't have hits/misses + Caller: caller, + }) + + return nil +} + +// GetLog returns a copy of the cache operation log +func (f *FakeLoaderCache) GetLog() []CacheLogEntry { + f.mu.RLock() + defer f.mu.RUnlock() + logCopy := make([]CacheLogEntry, len(f.log)) + copy(logCopy, f.log) + return logCopy +} + +// GetLogWithCaller returns a copy of the cache operation log with Caller populated. +// Use this with sortCacheLogKeysWithCaller to assert on both operation details and +// the Loader method chain that triggered each cache event. +func (f *FakeLoaderCache) GetLogWithCaller() []CacheLogEntry { + f.mu.RLock() + defer f.mu.RUnlock() + logCopy := make([]CacheLogEntry, len(f.log)) + copy(logCopy, f.log) + return logCopy +} + +// ClearLog clears the cache operation log +func (f *FakeLoaderCache) ClearLog() { + f.mu.Lock() + defer f.mu.Unlock() + f.log = make([]CacheLogEntry, 0) +} + +// TestFakeLoaderCache tests the cache implementation itself +func TestFakeLoaderCache(t *testing.T) { + ctx := context.Background() + cache := NewFakeLoaderCache() + + t.Run("SetAndGet", func(t *testing.T) { + // Test basic set and get + keys := []string{"key1", "key2", "key3"} + entries := []*resolve.CacheEntry{ + {Key: "key1", Value: []byte("value1")}, + {Key: "key2", Value: []byte("value2")}, + {Key: "key3", Value: []byte("value3")}, + } + + err := cache.Set(ctx, entries, 0) // No TTL + require.NoError(t, err) + + // Get all keys + result, err := cache.Get(ctx, keys) + require.NoError(t, err) + require.Len(t, result, 3) + assert.NotNil(t, result[0]) + assert.Equal(t, "value1", string(result[0].Value)) + assert.NotNil(t, result[1]) + assert.Equal(t, "value2", string(result[1].Value)) + assert.NotNil(t, result[2]) + assert.Equal(t, "value3", string(result[2].Value)) + + // Get partial keys + result, err = cache.Get(ctx, []string{"key2", "key4", "key1"}) + require.NoError(t, err) + require.Len(t, result, 3) + assert.NotNil(t, result[0]) + assert.Equal(t, "value2", string(result[0].Value)) + assert.Nil(t, result[1]) // key4 doesn't exist + assert.NotNil(t, result[2]) + assert.Equal(t, "value1", string(result[2].Value)) + }) + + t.Run("Delete", func(t *testing.T) { + // Set some keys + entries := []*resolve.CacheEntry{ + {Key: "del1", Value: []byte("v1")}, + {Key: "del2", Value: []byte("v2")}, + {Key: "del3", Value: []byte("v3")}, + } + err := cache.Set(ctx, entries, 0) + require.NoError(t, err) + + // Delete some keys + err = cache.Delete(ctx, []string{"del1", "del3"}) + require.NoError(t, err) + + // Check remaining keys + result, err := cache.Get(ctx, []string{"del1", "del2", "del3"}) + require.NoError(t, err) + assert.Nil(t, result[0]) // del1 was deleted + assert.NotNil(t, result[1]) // del2 still exists + assert.Equal(t, "v2", string(result[1].Value)) + assert.Nil(t, result[2]) // del3 was deleted + }) + + t.Run("TTL", func(t *testing.T) { + // Set with 50ms TTL + entries := []*resolve.CacheEntry{ + {Key: "ttl1", Value: []byte("expire1")}, + {Key: "ttl2", Value: []byte("expire2")}, + } + err := cache.Set(ctx, entries, 50*time.Millisecond) + require.NoError(t, err) + + // Immediately get - should exist + result, err := cache.Get(ctx, []string{"ttl1", "ttl2"}) + require.NoError(t, err) + assert.NotNil(t, result[0]) + assert.Equal(t, "expire1", string(result[0].Value)) + assert.NotNil(t, result[1]) + assert.Equal(t, "expire2", string(result[1].Value)) + + // Wait for expiration + time.Sleep(60 * time.Millisecond) + + // Get again - should be nil + result, err = cache.Get(ctx, []string{"ttl1", "ttl2"}) + require.NoError(t, err) + assert.Nil(t, result[0]) + assert.Nil(t, result[1]) + }) + + t.Run("MixedTTL", func(t *testing.T) { + // Set some with TTL, some without + err := cache.Set(ctx, []*resolve.CacheEntry{{Key: "perm1", Value: []byte("permanent")}}, 0) + require.NoError(t, err) + + err = cache.Set(ctx, []*resolve.CacheEntry{{Key: "temp1", Value: []byte("temporary")}}, 50*time.Millisecond) + require.NoError(t, err) + + // Wait for temporary to expire + time.Sleep(60 * time.Millisecond) + + // Check both + result, err := cache.Get(ctx, []string{"perm1", "temp1"}) + require.NoError(t, err) + assert.NotNil(t, result[0]) + assert.Equal(t, "permanent", string(result[0].Value)) // Still exists + assert.Nil(t, result[1]) // Expired + }) + + t.Run("ThreadSafety", func(t *testing.T) { + // Test concurrent access + done := make(chan bool) + + // Writer goroutine + go func() { + for i := 0; i < 100; i++ { + key := fmt.Sprintf("concurrent_%d", i) + value := fmt.Sprintf("value_%d", i) + err := cache.Set(ctx, []*resolve.CacheEntry{{Key: key, Value: []byte(value)}}, 0) + assert.NoError(t, err) + } + done <- true + }() + + // Reader goroutine + go func() { + for i := 0; i < 100; i++ { + key := fmt.Sprintf("concurrent_%d", i%50) + _, err := cache.Get(ctx, []string{key}) + assert.NoError(t, err) + } + done <- true + }() + + // Deleter goroutine + go func() { + for i := 0; i < 50; i++ { + key := fmt.Sprintf("concurrent_%d", i*2) + err := cache.Delete(ctx, []string{key}) + assert.NoError(t, err) + } + done <- true + }() + + // Wait for all goroutines + <-done + <-done + <-done + }) + + t.Run("ResultLengthMatchesKeysLength", func(t *testing.T) { + // Test that result length always matches input keys length + + // Set some data + err := cache.Set(ctx, []*resolve.CacheEntry{ + {Key: "exist1", Value: []byte("data1")}, + {Key: "exist3", Value: []byte("data3")}, + }, 0) + require.NoError(t, err) + + // Request mix of existing and non-existing keys + keys := []string{"exist1", "missing1", "exist3", "missing2", "missing3"} + result, err := cache.Get(ctx, keys) + require.NoError(t, err) + + // Verify length matches exactly + assert.Len(t, result, len(keys), "Result length must match keys length") + assert.Len(t, result, 5, "Should return exactly 5 results") + + // Verify correct values + assert.NotNil(t, result[0]) + assert.Equal(t, "data1", string(result[0].Value)) // exist1 + assert.Nil(t, result[1]) // missing1 + assert.NotNil(t, result[2]) + assert.Equal(t, "data3", string(result[2].Value)) // exist3 + assert.Nil(t, result[3]) // missing2 + assert.Nil(t, result[4]) // missing3 + + // Test with all missing keys + allMissingKeys := []string{"missing4", "missing5", "missing6"} + result, err = cache.Get(ctx, allMissingKeys) + require.NoError(t, err) + assert.Len(t, result, 3, "Should return 3 results for 3 keys") + assert.Nil(t, result[0]) + assert.Nil(t, result[1]) + assert.Nil(t, result[2]) + + // Test with empty keys + result, err = cache.Get(ctx, []string{}) + require.NoError(t, err) + assert.Len(t, result, 0, "Should return empty slice for empty keys") + }) +} + +// ============================================================================= +// L1/L2 CACHE END-TO-END TESTS +// ============================================================================= +// +// These tests verify the L1 (per-request in-memory) and L2 (external cross-request) +// caching behavior in a federated GraphQL setup. +// +// L1 Cache: Prevents redundant fetches for the same entity within a single request +// L2 Cache: Shares entity data across requests via external cache (e.g., Redis) +// +// Lookup Order (entity fetches): L1 -> L2 -> Subgraph Fetch +// Lookup Order (root fetches): L2 -> Subgraph Fetch (no L1) + +func parseCacheAnalytics(t *testing.T, headers http.Header) resolve.CacheAnalyticsSnapshot { + t.Helper() + raw := headers.Get("X-Cache-Analytics") + require.NotEmpty(t, raw, "X-Cache-Analytics header should be present") + var snap resolve.CacheAnalyticsSnapshot + err := json.Unmarshal([]byte(raw), &snap) + require.NoError(t, err, "X-Cache-Analytics header should be valid JSON") + return snap +} + +// normalizeSnapshot makes a CacheAnalyticsSnapshot deterministically comparable by +// sorting EntityTypes, L1Reads, L2Reads, L1Writes, L2Writes, and FieldHashes. +func normalizeSnapshot(snap resolve.CacheAnalyticsSnapshot) resolve.CacheAnalyticsSnapshot { + // Sort EntityTypes by TypeName + if snap.EntityTypes != nil { + sorted := make([]resolve.EntityTypeInfo, len(snap.EntityTypes)) + copy(sorted, snap.EntityTypes) + sort.Slice(sorted, func(i, j int) bool { + return sorted[i].TypeName < sorted[j].TypeName + }) + snap.EntityTypes = sorted + } + + // Sort L1Reads and zero out non-deterministic CacheAgeMs + if snap.L1Reads != nil { + sorted := make([]resolve.CacheKeyEvent, len(snap.L1Reads)) + copy(sorted, snap.L1Reads) + for i := range sorted { + sorted[i].CacheAgeMs = 0 + } + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].CacheKey != sorted[j].CacheKey { + return sorted[i].CacheKey < sorted[j].CacheKey + } + return sorted[i].Kind < sorted[j].Kind + }) + snap.L1Reads = sorted + } + + // Sort L2Reads and zero out non-deterministic CacheAgeMs + if snap.L2Reads != nil { + sorted := make([]resolve.CacheKeyEvent, len(snap.L2Reads)) + copy(sorted, snap.L2Reads) + for i := range sorted { + sorted[i].CacheAgeMs = 0 + } + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].CacheKey != sorted[j].CacheKey { + return sorted[i].CacheKey < sorted[j].CacheKey + } + return sorted[i].Kind < sorted[j].Kind + }) + snap.L2Reads = sorted + } + + // Sort L1Writes + if snap.L1Writes != nil { + sorted := make([]resolve.CacheWriteEvent, len(snap.L1Writes)) + copy(sorted, snap.L1Writes) + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].CacheKey != sorted[j].CacheKey { + return sorted[i].CacheKey < sorted[j].CacheKey + } + return sorted[i].CacheLevel < sorted[j].CacheLevel + }) + snap.L1Writes = sorted + } + + // Sort L2Writes + if snap.L2Writes != nil { + sorted := make([]resolve.CacheWriteEvent, len(snap.L2Writes)) + copy(sorted, snap.L2Writes) + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].CacheKey != sorted[j].CacheKey { + return sorted[i].CacheKey < sorted[j].CacheKey + } + return sorted[i].CacheLevel < sorted[j].CacheLevel + }) + snap.L2Writes = sorted + } + + // Sort FieldHashes for deterministic comparison + if snap.FieldHashes != nil { + sorted := make([]resolve.EntityFieldHash, len(snap.FieldHashes)) + copy(sorted, snap.FieldHashes) + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].EntityType != sorted[j].EntityType { + return sorted[i].EntityType < sorted[j].EntityType + } + if sorted[i].FieldName != sorted[j].FieldName { + return sorted[i].FieldName < sorted[j].FieldName + } + if sorted[i].KeyRaw != sorted[j].KeyRaw { + return sorted[i].KeyRaw < sorted[j].KeyRaw + } + if sorted[i].KeyHash != sorted[j].KeyHash { + return sorted[i].KeyHash < sorted[j].KeyHash + } + return sorted[i].FieldHash < sorted[j].FieldHash + }) + snap.FieldHashes = sorted + } + + // Sort ShadowComparisons by CacheKey and zero out non-deterministic CacheAgeMs + if snap.ShadowComparisons != nil { + sorted := make([]resolve.ShadowComparisonEvent, len(snap.ShadowComparisons)) + copy(sorted, snap.ShadowComparisons) + for i := range sorted { + sorted[i].CacheAgeMs = 0 + } + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].CacheKey != sorted[j].CacheKey { + return sorted[i].CacheKey < sorted[j].CacheKey + } + return sorted[i].EntityType < sorted[j].EntityType + }) + snap.ShadowComparisons = sorted + } + + // Sort MutationEvents for deterministic comparison + if snap.MutationEvents != nil { + sorted := make([]resolve.MutationEvent, len(snap.MutationEvents)) + copy(sorted, snap.MutationEvents) + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].MutationRootField != sorted[j].MutationRootField { + return sorted[i].MutationRootField < sorted[j].MutationRootField + } + return sorted[i].EntityCacheKey < sorted[j].EntityCacheKey + }) + snap.MutationEvents = sorted + } + + // Zero out non-deterministic FetchTimings (DurationMs varies between runs) + // Use normalizeFetchTimings() when you need to assert FetchTimings fields. + snap.FetchTimings = nil + + // Normalize empty slices to nil for consistent comparison + // (JSON unmarshalling produces empty slices, expected literals produce nil) + if len(snap.L1Reads) == 0 { + snap.L1Reads = nil + } + if len(snap.L2Reads) == 0 { + snap.L2Reads = nil + } + if len(snap.L1Writes) == 0 { + snap.L1Writes = nil + } + if len(snap.L2Writes) == 0 { + snap.L2Writes = nil + } + if len(snap.EntityTypes) == 0 { + snap.EntityTypes = nil + } + if len(snap.FieldHashes) == 0 { + snap.FieldHashes = nil + } + if len(snap.ErrorEvents) == 0 { + snap.ErrorEvents = nil + } + if len(snap.ShadowComparisons) == 0 { + snap.ShadowComparisons = nil + } + if len(snap.MutationEvents) == 0 { + snap.MutationEvents = nil + } + + return snap +} + +// normalizeFetchTimings sorts FetchTimings deterministically and zeros DurationMs +// (the only non-deterministic field). Unlike normalizeSnapshot, this preserves +// all other fields (HTTPStatusCode, ResponseBytes, etc.) for assertion. +func normalizeFetchTimings(timings []resolve.FetchTimingEvent) []resolve.FetchTimingEvent { + sorted := make([]resolve.FetchTimingEvent, len(timings)) + copy(sorted, timings) + for i := range sorted { + sorted[i].DurationMs = 0 + } + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].DataSource != sorted[j].DataSource { + return sorted[i].DataSource < sorted[j].DataSource + } + return sorted[i].Source < sorted[j].Source + }) + return sorted +} + +func mustParseHost(rawURL string) string { + parsed, err := url.Parse(rawURL) + if err != nil { + panic(fmt.Sprintf("failed to parse URL %q: %v", rawURL, err)) + } + return parsed.Host +} diff --git a/execution/engine/federation_caching_l1_test.go b/execution/engine/federation_caching_l1_test.go new file mode 100644 index 0000000000..5b11cdacb4 --- /dev/null +++ b/execution/engine/federation_caching_l1_test.go @@ -0,0 +1,1060 @@ +package engine_test + +import ( + "context" + "net/http" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +func TestL1CacheReducesHTTPCalls(t *testing.T) { + // This test demonstrates L1 cache behavior with entity fetches. + // + // Query structure: + // - me: root query to accounts service → returns User 1234 {id, username} + // - me.reviews: entity fetch from reviews service → returns reviews + // - me.reviews.product: entity fetch from products service → returns products + // - me.reviews.product.reviews: entity fetch from reviews service → returns reviews + // - me.reviews.product.reviews.authorWithoutProvides: entity fetch from accounts → returns User 1234 + // + // Note: The `me` root query does NOT populate L1 cache because L1 cache only works + // for entity fetches (RequiresEntityFetch=true). Root queries don't qualify. + // + // With L1 enabled: Both `me` (root) and `authorWithoutProvides` (entity) make calls. + // L1 cache doesn't help here because `me` is a root query, not an entity fetch. + // With L1 disabled: Same behavior - 2 accounts calls. + // + // L1 cache DOES help when the same entity is fetched multiple times through + // entity fetches within a single request (e.g., self-referential entities). + + query := `query { + me { + id + username + reviews { + body + product { + upc + reviews { + authorWithoutProvides { + id + username + } + } + } + } + } + }` + + expectedResponse := `{"data":{"me":{"id":"1234","username":"Me","reviews":[{"body":"A highly effective form of birth control.","product":{"upc":"top-1","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","product":{"upc":"top-2","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}}]}}}` + + t.Run("L1 enabled - entity fetches use L1 cache", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Both `me` (root query) and `authorWithoutProvides` (entity fetch) call accounts. + // L1 cache doesn't help because `me` is a root query, not an entity fetch. + // Root queries don't populate L1 cache (RequiresEntityFetch=false). + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, + "Both me (root query) and authorWithoutProvides (entity fetch) call accounts") + }) + + t.Run("L1 disabled - more accounts calls without cache", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // KEY ASSERTION: With L1 disabled, 2 accounts calls! + // The authorWithoutProvides.username requires another fetch since L1 is disabled. + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 2, accountsCalls, + "With L1 disabled, should make 2 accounts calls (no cache reuse)") + }) +} + +func TestL1CacheReducesHTTPCallsInterface(t *testing.T) { + // This test demonstrates L1 cache behavior with interface return types. + // + // Query structure: + // - meInterface: root query to accounts service → returns User 1234 via Identifiable interface + // - meInterface.reviews: entity fetch from reviews service → returns reviews + // - meInterface.reviews.product: entity fetch from products service → returns products + // - meInterface.reviews.product.reviews: entity fetch from reviews service → returns reviews + // - meInterface.reviews.product.reviews.authorWithoutProvides: entity fetch from accounts → returns User 1234 + // + // This tests that interface return types properly build cache key templates + // for all entity types that implement the interface. + + query := `query { + meInterface { + ... on User { + id + username + reviews { + body + product { + upc + reviews { + authorWithoutProvides { + id + username + } + } + } + } + } + } + }` + + expectedResponse := `{"data":{"meInterface":{"id":"1234","username":"Me","reviews":[{"body":"A highly effective form of birth control.","product":{"upc":"top-1","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","product":{"upc":"top-2","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}}]}}}` + + t.Run("L1 enabled - interface entity fetches use L1 cache", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Same behavior as non-interface: root query + entity fetch both call accounts + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, + "Interface field should behave same as object field for L1 caching") + }) + + t.Run("L1 disabled - more accounts calls without cache", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // KEY ASSERTION: With L1 disabled, 2 accounts calls! + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 2, accountsCalls, + "With L1 disabled, should make 2 accounts calls (no cache reuse)") + }) +} + +func TestL1CacheReducesHTTPCallsUnion(t *testing.T) { + // This test demonstrates L1 cache behavior with union return types. + // + // Query structure: + // - meUnion: root query to accounts service → returns User 1234 via MeUnion union + // - meUnion.reviews: entity fetch from reviews service → returns reviews + // - meUnion.reviews.product: entity fetch from products service → returns products + // - meUnion.reviews.product.reviews: entity fetch from reviews service → returns reviews + // - meUnion.reviews.product.reviews.authorWithoutProvides: entity fetch from accounts → returns User 1234 + // + // This tests that union return types properly build cache key templates + // for all entity types that are members of the union. + + query := `query { + meUnion { + ... on User { + id + username + reviews { + body + product { + upc + reviews { + authorWithoutProvides { + id + username + } + } + } + } + } + } + }` + + expectedResponse := `{"data":{"meUnion":{"id":"1234","username":"Me","reviews":[{"body":"A highly effective form of birth control.","product":{"upc":"top-1","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","product":{"upc":"top-2","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}}]}}}` + + t.Run("L1 enabled - union entity fetches use L1 cache", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Same behavior as non-union: root query + entity fetch both call accounts + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, + "Union field should behave same as object field for L1 caching") + }) + + t.Run("L1 disabled - more accounts calls without cache", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // KEY ASSERTION: With L1 disabled, 2 accounts calls! + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 2, accountsCalls, + "With L1 disabled, should make 2 accounts calls (no cache reuse)") + }) +} + +func TestL1CacheSelfReferentialEntity(t *testing.T) { + // This test verifies that self-referential entities don't cause + // stack overflow when L1 cache is enabled. + // + // Background: When an entity type has a field that returns the same type + // (e.g., User.sameUserReviewers returning [User]), and L1 cache stores + // a pointer to the entity, both key.Item and key.FromCache can point to + // the same memory location. Without a fix, calling MergeValues(ptr, ptr) + // causes infinite recursion and stack overflow. + // + // The sameUserReviewers field has @requires(fields: "username") which forces + // sequential execution: the User entity is first fetched from accounts + // (populating L1), then sameUserReviewers is resolved, returning the same + // User entity that's already in L1 cache. + + query := `query { + topProducts { + reviews { + authorWithoutProvides { + id + username + sameUserReviewers { + id + username + } + } + } + } + }` + + // This response shows User 1234 appearing both at authorWithoutProvides level + // and inside sameUserReviewers (which returns the same user for testing) + expectedResponse := `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}` + + t.Run("self-referential entity should not cause stack overflow", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // This should complete without stack overflow + // Before the fix, this would crash with "fatal error: stack overflow" + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + }) +} + +func TestL1CacheChildFieldEntityList(t *testing.T) { + // This test verifies L1 cache behavior for User.sameUserReviewers: [User!]! + // which returns only the same user (self-reference). + // + // sameUserReviewers is defined in the reviews subgraph with @requires(fields: "username"), + // which means: + // 1. The gateway first resolves username from accounts (entity fetch) + // 2. Then calls reviews to get sameUserReviewers + // 3. sameUserReviewers returns User references (just IDs) - only the same user + // 4. The gateway must make entity fetches to accounts to resolve those users + // + // Query flow: + // 1. topProducts -> products subgraph (root query) + // 2. reviews -> reviews subgraph (entity fetch for Products) + // 3. authorWithoutProvides -> accounts subgraph (entity fetch for User 1234) + // - User 1234 is fetched and stored in L1 + // 4. sameUserReviewers -> reviews subgraph (after username resolved) + // - Returns [User 1234] as reference (same user only) + // 5. Entity resolution for sameUserReviewers -> accounts subgraph + // - User 1234 is 100% L1 HIT (already fetched in step 3) + // - THE ENTIRE ACCOUNTS CALL IS SKIPPED! + // + // With L1 enabled: The sameUserReviewers entity fetch is completely skipped + // because all entities are already in L1 cache. + + query := `query { + topProducts { + reviews { + authorWithoutProvides { + id + username + sameUserReviewers { + id + username + } + } + } + } + }` + + // User 1234's sameUserReviewers returns [User 1234] (only self) + expectedResponse := `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}` + + t.Run("L1 enabled - sameUserReviewers fetch entirely skipped via L1 cache", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, // Isolate L1 behavior + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // With L1 enabled: + // - First accounts call fetches User 1234 for authorWithoutProvides (L1 miss, stored) + // - Reviews called for sameUserReviewers (returns [User 1234] reference) + // - sameUserReviewers entity resolution: User 1234 is 100% L1 HIT + // → accounts call is COMPLETELY SKIPPED! + accountsCalls := tracker.GetCount(accountsHost) + reviewsCalls := tracker.GetCount(reviewsHost) + + // Reviews should be called twice: once for Product entity (reviews field), + // once for sameUserReviewers (after username is resolved from accounts) + assert.Equal(t, 2, reviewsCalls, "Reviews subgraph called for Product.reviews and User.sameUserReviewers") + + // KEY ASSERTION: Only 1 accounts call! The sameUserReviewers entity resolution + // is completely skipped because User 1234 is already in L1 cache. + assert.Equal(t, 1, accountsCalls, + "With L1 enabled: only 1 accounts call (sameUserReviewers entity fetch skipped via L1)") + + }) + + t.Run("L1 disabled - accounts called for sameUserReviewers", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // With L1 disabled: + // - First accounts call fetches User 1234 for authorWithoutProvides + // - Second accounts call for sameUserReviewers: User 1234 fetched again (no L1) + // Total: 2 accounts calls + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 2, accountsCalls, + "With L1 disabled: 2 accounts calls (sameUserReviewers requires separate fetch)") + + }) +} + +func TestL1CacheNestedEntityListDeduplication(t *testing.T) { + // This test verifies L1 deduplication when the same entity appears + // at multiple levels in nested list queries using coReviewers. + // + // coReviewers is defined in the reviews subgraph with @requires(fields: "username"), + // so it triggers cross-subgraph entity resolution. + // + // Query flow: + // 1. topProducts -> products subgraph + // 2. reviews -> reviews subgraph (Product entity fetch) + // 3. authorWithoutProvides -> accounts (User 1234 fetched, stored in L1) + // 4. coReviewers -> reviews subgraph (after username resolved) + // - Returns [User 1234, User 7777] as references + // 5. Entity resolution for coReviewers -> accounts + // - User 1234 should be L1 HIT (already fetched in step 3) + // - User 7777 is L1 MISS (stored in L1) + // 6. coReviewers for User 1234 and User 7777 -> reviews subgraph + // 7. Entity resolution for nested coReviewers -> accounts + // - All users (1234, 7777) are already in L1! + // + // With L1 enabled: The nested coReviewers level should have 100% L1 hits, + // potentially skipping the accounts call entirely for that level. + + query := `query { + topProducts { + reviews { + authorWithoutProvides { + id + username + coReviewers { + id + username + coReviewers { + id + username + } + } + } + } + } + }` + + // User 1234's coReviewers: [User 1234, User 7777] + // User 7777's coReviewers: [User 7777, User 1234] + // Nested level repeats these patterns + expectedResponse := `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me"},{"id":"7777","username":"User 7777"}]},{"id":"7777","username":"User 7777","coReviewers":[{"id":"7777","username":"User 7777"},{"id":"1234","username":"Me"}]}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me"},{"id":"7777","username":"User 7777"}]},{"id":"7777","username":"User 7777","coReviewers":[{"id":"7777","username":"User 7777"},{"id":"1234","username":"Me"}]}]}}]}]}}` + + t.Run("L1 enabled - nested coReviewers benefits from L1 hits", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // With L1 enabled: + // - Call 1: authorWithoutProvides fetches User 1234 (miss, stored) + // - Call 2: coReviewers entity resolution [User 1234 (hit), User 7777 (miss, stored)] + // - Call 3: nested coReviewers entity resolution - all users are in L1! + // This call should be fully served from L1 cache. + accountsCalls := tracker.GetCount(accountsHost) + // With L1 enabled, the nested coReviewers should be served from L1 + // Only 2 accounts calls needed because nested coReviewers is fully served from L1 + assert.Equal(t, 2, accountsCalls, + "With L1 enabled: exactly 2 accounts calls (nested coReviewers served entirely from L1)") + }) + + t.Run("L1 disabled - more accounts calls without deduplication", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // With L1 disabled: + // - Call 1: authorWithoutProvides fetches User 1234 + // - Call 2: coReviewers entity resolution for User 1234 and User 7777 (no L1 dedup) + // - Call 3: nested coReviewers entity resolution (no L1 dedup) + accountsCalls := tracker.GetCount(accountsHost) + // Without L1 cache, we need 3 accounts calls (no deduplication at nested level) + assert.Equal(t, 3, accountsCalls, + "With L1 disabled: exactly 3 accounts calls (no deduplication)") + }) +} + +func TestL1CacheRootFieldEntityListPopulation(t *testing.T) { + // This test verifies L1 cache behavior with a complex nested query starting + // from a root field that returns a list of entities. + // + // Query flow: + // 1. topProducts -> products subgraph (root query, returns list) + // 2. reviews -> reviews subgraph (entity fetch for Products) + // 3. authorWithoutProvides -> accounts subgraph (entity fetch for User 1234) + // - User 1234 is fetched and stored in L1 + // 4. sameUserReviewers -> reviews subgraph (after username resolved) + // - Returns [User 1234] as reference (same user only) + // 5. Entity resolution for sameUserReviewers -> accounts subgraph + // - User 1234 is 100% L1 HIT (already fetched in step 3) + // - THE ENTIRE ACCOUNTS CALL IS SKIPPED! + // + // With L1 enabled: The sameUserReviewers entity fetch is completely skipped. + // With L1 disabled: accounts is called twice (no deduplication). + + query := `query { + topProducts { + upc + name + reviews { + body + authorWithoutProvides { + id + username + sameUserReviewers { + id + username + } + } + } + } + }` + + expectedResponse := `{"data":{"topProducts":[{"upc":"top-1","name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"upc":"top-2","name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}` + + t.Run("L1 enabled - sameUserReviewers fetch skipped via L1 cache", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Query flow with L1 enabled: + // 1. products subgraph: topProducts root query + // 2. reviews subgraph: Product entity fetch for reviews + // 3. accounts subgraph: User entity fetch for authorWithoutProvides (User 1234 stored in L1) + // 4. reviews subgraph: sameUserReviewers (returns [User 1234]) + // 5. sameUserReviewers entity resolution: User 1234 is 100% L1 HIT → accounts call SKIPPED! + productsCalls := tracker.GetCount(productsHost) + reviewsCalls := tracker.GetCount(reviewsHost) + accountsCalls := tracker.GetCount(accountsHost) + + assert.Equal(t, 1, productsCalls, "Should call products subgraph once for topProducts") + assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice (Product.reviews + User.sameUserReviewers)") + // KEY ASSERTION: Only 1 accounts call! sameUserReviewers entity resolution skipped via L1. + assert.Equal(t, 1, accountsCalls, + "With L1 enabled: only 1 accounts call (sameUserReviewers entity fetch skipped via L1)") + + }) + + t.Run("L1 disabled - more accounts calls without L1 optimization", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Query flow with L1 disabled: + // 1. products subgraph: topProducts root query + // 2. reviews subgraph: Product entity fetch for reviews + // 3. accounts subgraph: User entity fetch for authorWithoutProvides + // 4. reviews subgraph: sameUserReviewers + // 5. accounts subgraph: User entity fetch for sameUserReviewers (no L1 → must fetch again!) + productsCalls := tracker.GetCount(productsHost) + reviewsCalls := tracker.GetCount(reviewsHost) + accountsCalls := tracker.GetCount(accountsHost) + + assert.Equal(t, 1, productsCalls, "Should call products subgraph once") + assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice") + // KEY ASSERTION: 2 accounts calls without L1 optimization + assert.Equal(t, 2, accountsCalls, + "With L1 disabled: 2 accounts calls (sameUserReviewers requires separate fetch)") + + }) +} + +func TestL1CacheRootFieldNonEntityWithNestedEntities(t *testing.T) { + // This test verifies L1 cache behavior when a root field returns a NON-entity type + // (Review) that contains nested entities (User via authorWithoutProvides). + // + // Key difference from TestL1CacheRootFieldEntityListPopulation: + // - That test starts with topProducts -> [Product] where Product IS an entity (@key(fields: "upc")) + // - This test starts with topReviews -> [Review] where Review is NOT an entity (no @key) + // - Both prove L1 entity caching works for nested User entities + // + // Query flow: + // 1. topReviews -> reviews subgraph (root query, returns [Review] — NOT an entity) + // 2. authorWithoutProvides -> accounts subgraph (entity fetch for Users, stored in L1) + // 3. sameUserReviewers -> reviews subgraph (after username resolved via @requires) + // 4. Entity resolution for sameUserReviewers -> accounts subgraph + // - All Users are 100% L1 HITs (already fetched in step 2) + // - THE ENTIRE ACCOUNTS CALL IS SKIPPED! + + query := `query { + topReviews { + body + authorWithoutProvides { + id + username + sameUserReviewers { + id + username + } + } + } + }` + + expectedResponse := `{"data":{"topReviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}},{"body":"This is the last straw. Hat you will wear. 11/10","authorWithoutProvides":{"id":"7777","username":"User 7777","sameUserReviewers":[{"id":"7777","username":"User 7777"}]}},{"body":"Perfect summer hat.","authorWithoutProvides":{"id":"5678","username":"User 5678","sameUserReviewers":[{"id":"5678","username":"User 5678"}]}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"id":"8888","username":"User 8888","sameUserReviewers":[{"id":"8888","username":"User 8888"}]}}]}}` + + t.Run("L1 enabled - sameUserReviewers fetch skipped via L1 cache", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsHost := reviewsURLParsed.Host + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Query flow with L1 enabled: + // 1. reviews subgraph: topReviews root query (Review is NOT an entity) + // 2. accounts subgraph: User entity fetch for authorWithoutProvides (Users stored in L1) + // 3. reviews subgraph: sameUserReviewers (returns [User] references) + // 4. sameUserReviewers entity resolution: all Users are L1 HITs → accounts call SKIPPED! + reviewsCalls := tracker.GetCount(reviewsHost) + accountsCalls := tracker.GetCount(accountsHost) + + assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice (topReviews + sameUserReviewers)") + // KEY ASSERTION: Only 1 accounts call! sameUserReviewers entity resolution skipped via L1. + assert.Equal(t, 1, accountsCalls, + "With L1 enabled: only 1 accounts call (sameUserReviewers entity fetch skipped via L1)") + }) + + t.Run("L1 disabled - more accounts calls without L1 optimization", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsHost := reviewsURLParsed.Host + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Query flow with L1 disabled: + // 1. reviews subgraph: topReviews root query + // 2. accounts subgraph: User entity fetch for authorWithoutProvides + // 3. reviews subgraph: sameUserReviewers + // 4. accounts subgraph: User entity fetch for sameUserReviewers (no L1 → must fetch again!) + reviewsCalls := tracker.GetCount(reviewsHost) + accountsCalls := tracker.GetCount(accountsHost) + + assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice") + // KEY ASSERTION: 2 accounts calls without L1 optimization + assert.Equal(t, 2, accountsCalls, + "With L1 disabled: 2 accounts calls (sameUserReviewers requires separate fetch)") + }) +} + +// ============================================================================= +// CACHE ERROR HANDLING TESTS +// ============================================================================= +// +// These tests verify that caches are NOT populated when subgraphs return errors. +// The cache should only store successful responses to prevent caching error states. + +func TestL1CacheOptimizationReducesSubgraphCalls(t *testing.T) { + // This query demonstrates L1 optimization: + // - Query.me returns User entity + // - User.sameUserReviewers returns [User] entities + // When L1 is enabled and optimized correctly: + // - First User fetch (me) populates L1 cache + // - Second User fetch (sameUserReviewers) hits L1 cache, SKIPS subgraph call + // + // The optimizeL1Cache postprocessor: + // - Sets UseL1Cache=true on User fetches (they share the same entity type) + // - Sets UseL1Cache=false on fetches with no matching entity types + + query := `query { + me { + id + username + sameUserReviewers { + id + username + } + } + }` + + expectedResponse := `{"data":{"me":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}}` + + t.Run("L1 optimization enables cache hit between same entity type fetches", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Query flow with L1 optimization: + // 1. accounts subgraph: Query.me (root query, returns User 1234) + // - L1 cache populated with User 1234 + // 2. reviews subgraph: User.sameUserReviewers (returns [User 1234]) + // 3. accounts subgraph: User entity fetch for sameUserReviewers + // - User 1234 is 100% L1 HIT! This call is SKIPPED! + accountsCalls := tracker.GetCount(accountsHost) + reviewsCalls := tracker.GetCount(reviewsHost) + + // KEY ASSERTION: Only 1 accounts call! + // Without L1 optimization, there would be 2 calls: + // - First: Query.me + // - Second: User entity resolution for sameUserReviewers + // With L1 optimization, the second call is skipped because User 1234 is in L1 cache. + assert.Equal(t, 1, accountsCalls, + "L1 optimization: only 1 accounts call (sameUserReviewers resolved from L1 cache)") + assert.Equal(t, 1, reviewsCalls, + "Should call reviews subgraph once for User.sameUserReviewers") + }) + + t.Run("Without L1, same query requires more subgraph calls", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, // L1 disabled + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Query flow WITHOUT L1: + // 1. accounts subgraph: Query.me (root query) + // 2. reviews subgraph: User.sameUserReviewers + // 3. accounts subgraph: User entity fetch (NO L1 cache → must fetch!) + accountsCalls := tracker.GetCount(accountsHost) + reviewsCalls := tracker.GetCount(reviewsHost) + + // KEY ASSERTION: 2 accounts calls without L1! + // This proves L1 optimization saves a subgraph call. + assert.Equal(t, 2, accountsCalls, + "Without L1: 2 accounts calls (sameUserReviewers requires separate fetch)") + assert.Equal(t, 1, reviewsCalls, + "Should call reviews subgraph once for User.sameUserReviewers") + }) +} diff --git a/execution/engine/federation_caching_l2_test.go b/execution/engine/federation_caching_l2_test.go new file mode 100644 index 0000000000..bf988e86d8 --- /dev/null +++ b/execution/engine/federation_caching_l2_test.go @@ -0,0 +1,1256 @@ +package engine_test + +import ( + "context" + "net/http" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + accounts "github.com/wundergraph/graphql-go-tools/execution/federationtesting/accounts/graph" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +func TestL2CacheOnly(t *testing.T) { + t.Run("L2 enabled - miss then hit across requests", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable L2 cache only + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + } + + // Enable entity caching for L2 tests (opt-in per-subgraph caching) + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + // First query - should miss cache + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + // Cache operations: get/set for Query.topProducts, Product entities, User entities = 6 operations + assert.Equal(t, 6, len(logAfterFirst), "Should have exactly 6 cache operations (get/set for Query, Products, Users)") + + // Verify the exact cache access log (order may vary for keys within each operation) + wantLogFirst := []CacheLogEntry{ + // Root field Query.topProducts + { + Operation: "get", + Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, + Hits: []bool{false}, + }, + { + Operation: "set", + Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, + }, + // Product entity fetches (reviews data for each product) + { + Operation: "get", + Keys: []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + }, + Hits: []bool{false, false}, + }, + { + Operation: "set", + Keys: []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + }, + }, + // User entity fetches (author data) + { + Operation: "get", + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + }, + Hits: []bool{false}, + }, + { + Operation: "set", + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + }, + }, + } + assert.Equal(t, sortCacheLogKeys(wantLogFirst), sortCacheLogKeys(logAfterFirst), "First query cache log should match expected") + + // Verify subgraph calls for first query + productsCallsFirst := tracker.GetCount(productsHost) + reviewsCallsFirst := tracker.GetCount(reviewsHost) + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, productsCallsFirst, "First query should call products subgraph exactly once") + assert.Equal(t, 1, reviewsCallsFirst, "First query should call reviews subgraph exactly once") + assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph for User entity resolution") + + // Second query - all fetches should hit cache + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + // Verify L2 cache hits + logAfterSecond := defaultCache.GetLog() + // All cache operations should be gets with hits: Query.topProducts, Product entities, User entities + assert.Equal(t, 3, len(logAfterSecond), "Second query should have 3 cache get operations (all hits)") + + // Verify the exact cache access log for second query (all hits) + wantLogSecond := []CacheLogEntry{ + // Root field Query.topProducts - HIT + { + Operation: "get", + Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, + Hits: []bool{true}, + }, + // Product entity fetches - HITS + { + Operation: "get", + Keys: []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + }, + Hits: []bool{true, true}, + }, + // User entity fetches - HITS + { + Operation: "get", + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + }, + Hits: []bool{true}, + }, + } + assert.Equal(t, sortCacheLogKeys(wantLogSecond), sortCacheLogKeys(logAfterSecond), "Second query cache log should match expected (all hits)") + + // Verify subgraph calls for second query - all should be cached + productsCallsSecond := tracker.GetCount(productsHost) + reviewsCallsSecond := tracker.GetCount(reviewsHost) + accountsCallsSecond := tracker.GetCount(accountsHost) + assert.Equal(t, 0, productsCallsSecond, "Second query should not call products subgraph (root field cache hit)") + assert.Equal(t, 0, reviewsCallsSecond, "Second query should not call reviews subgraph (entity cache hit)") + assert.Equal(t, 0, accountsCallsSecond, "Second query should not call accounts subgraph (entity cache hit)") + }) + + t.Run("L2 disabled - no external cache operations", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Disable L2 cache + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First query + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + // Verify no cache operations + log := defaultCache.GetLog() + assert.Empty(t, log, "No L2 cache operations should occur when L2 is disabled") + }) +} + +func TestL1L2CacheCombined(t *testing.T) { + t.Run("L1+L2 enabled - L1 within request, L2 across requests", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable both L1 and L2 cache + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: true, + } + + // Enable entity caching for L2 tests (opt-in per-entity caching) + // Configure caching per-subgraph with explicit subgraph names + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + // First query - L1 helps within request, L2 populates for later + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + // Cache operations: get/set for Query.topProducts, Product entities, User entities = 6 operations + assert.Equal(t, 6, len(logAfterFirst), "Should have exactly 6 cache operations (get/set for Query, Products, Users)") + + // Verify the exact cache access log (order may vary for keys within each operation) + wantLogFirst := []CacheLogEntry{ + // Root field Query.topProducts + { + Operation: "get", + Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, + Hits: []bool{false}, + }, + { + Operation: "set", + Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, + }, + // Product entity fetches (reviews data for each product) + { + Operation: "get", + Keys: []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + }, + Hits: []bool{false, false}, + }, + { + Operation: "set", + Keys: []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + }, + }, + // User entity fetches (author data) + { + Operation: "get", + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + }, + Hits: []bool{false}, + }, + { + Operation: "set", + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + }, + }, + } + assert.Equal(t, sortCacheLogKeys(wantLogFirst), sortCacheLogKeys(logAfterFirst), "First query cache log should match expected") + + // Verify subgraph calls for first query + productsCallsFirst := tracker.GetCount(productsHost) + reviewsCallsFirst := tracker.GetCount(reviewsHost) + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, productsCallsFirst, "First query should call products subgraph exactly once") + assert.Equal(t, 1, reviewsCallsFirst, "First query should call reviews subgraph exactly once") + assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph for User entity resolution") + + // Second query - new request means fresh L1, but L2 should hit + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + // All cache operations should be gets with hits: Query.topProducts, Product entities, User entities + assert.Equal(t, 3, len(logAfterSecond), "Second query should have 3 cache get operations (all hits)") + + // Verify the exact cache access log for second query (all hits) + wantLogSecond := []CacheLogEntry{ + // Root field Query.topProducts - HIT + { + Operation: "get", + Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, + Hits: []bool{true}, + }, + // Product entity fetches - HITS + { + Operation: "get", + Keys: []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + }, + Hits: []bool{true, true}, + }, + // User entity fetches - HITS + { + Operation: "get", + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + }, + Hits: []bool{true}, + }, + } + assert.Equal(t, sortCacheLogKeys(wantLogSecond), sortCacheLogKeys(logAfterSecond), "Second query cache log should match expected (all hits)") + + // Verify no subgraph calls for second query (L2 cache hits) + productsCallsSecond := tracker.GetCount(productsHost) + reviewsCallsSecond := tracker.GetCount(reviewsHost) + accountsCallsSecond := tracker.GetCount(accountsHost) + assert.Equal(t, 0, productsCallsSecond, "Second query should not call products subgraph (L2 hit)") + assert.Equal(t, 0, reviewsCallsSecond, "Second query should not call reviews subgraph (L2 hit)") + assert.Equal(t, 0, accountsCallsSecond, "Second query should not call accounts subgraph (L2 hit)") + }) + + t.Run("L1+L2 - cross-request isolation: L1 per-request, L2 shared", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable both L1 and L2 + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: true, + } + + // Enable entity caching for L2 tests (opt-in per-entity caching) + // Configure caching per-subgraph with explicit subgraph names + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First request - populates L2 cache + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + productKeys := []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + } + userKeys := []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + } + wantFirstLog := []CacheLogEntry{ + // reviews subgraph _entities(Product) — L2 miss, first time seeing these products + {Operation: "get", Keys: productKeys, Hits: []bool{false, false}}, + // reviews subgraph _entities(Product) — store fetched product data in L2 + {Operation: "set", Keys: productKeys}, + // accounts subgraph _entities(User) — L2 miss, first time seeing this user + {Operation: "get", Keys: userKeys, Hits: []bool{false}}, + // accounts subgraph _entities(User) — store fetched user data in L2 + {Operation: "set", Keys: userKeys}, + } + assert.Equal(t, sortCacheLogKeys(wantFirstLog), sortCacheLogKeys(logAfterFirst), "First request: L2 miss + set for Product and User") + + // Second request - L1 is fresh (new request), but L2 should provide data + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + wantSecondLog := []CacheLogEntry{ + // reviews subgraph _entities(Product) — L2 hit, both products cached from first request + {Operation: "get", Keys: productKeys, Hits: []bool{true, true}}, + // accounts subgraph _entities(User) — L2 hit, user cached from first request (deduplicated: 1 unique user) + {Operation: "get", Keys: userKeys, Hits: []bool{true}}, + // No set operations — all data served from cache + } + assert.Equal(t, sortCacheLogKeys(wantSecondLog), sortCacheLogKeys(logAfterSecond), "Second request: all L2 cache hits, no sets") + + // No subgraph calls on second request — all entity data served from L2 cache + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + assert.Equal(t, 0, tracker.GetCount(reviewsURLParsed.Host), "Second request should skip reviews subgraph (Product L2 cache hit)") + assert.Equal(t, 0, tracker.GetCount(accountsURLParsed.Host), "Second request should skip accounts subgraph (User L2 cache hit)") + }) +} + +// TestPartialEntityCaching demonstrates that only explicitly configured entity types +// are cached. This test configures caching for Product but NOT for User, verifying +// the opt-in nature of the per-entity caching configuration. +func TestPartialEntityCaching(t *testing.T) { + t.Run("only configured entities are cached", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable L2 cache + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + } + + // PARTIAL CACHING: Only configure caching for Product in reviews subgraph, NOT for User in accounts + // This demonstrates the opt-in per-entity caching behavior + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + // Note: accounts subgraph is intentionally NOT configured - User entities should NOT be cached + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + // First query - Product entities should be cached, User entities should NOT + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + // Only Product has L2 caching configured (reviews subgraph); User (accounts) does NOT. + // So we expect cache operations for Product only — no User cache activity at all. + productKeys := []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + } + logAfterFirst := defaultCache.GetLog() + wantFirstLog := []CacheLogEntry{ + // reviews subgraph _entities(Product) — L2 miss, first time seeing these products + {Operation: "get", Keys: productKeys, Hits: []bool{false, false}}, + // reviews subgraph _entities(Product) — store fetched product data in L2 + {Operation: "set", Keys: productKeys}, + // No User operations — accounts subgraph has no caching configured + } + assert.Equal(t, sortCacheLogKeys(wantFirstLog), sortCacheLogKeys(logAfterFirst), "First request: only Product entities have cache operations") + + // Both subgraphs called on first request (no cache to serve from) + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "First query should call reviews subgraph") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts subgraph") + + // Second query - Product should hit cache, User should still be fetched from subgraph + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + wantSecondLog := []CacheLogEntry{ + // reviews subgraph _entities(Product) — L2 hit, both products cached from first request + {Operation: "get", Keys: productKeys, Hits: []bool{true, true}}, + // No User operations — accounts subgraph still has no caching configured + // No set operations — Product data served from cache + } + assert.Equal(t, sortCacheLogKeys(wantSecondLog), sortCacheLogKeys(logAfterSecond), "Second request: Product cache hits only") + + // Reviews subgraph skipped (Product served from cache), accounts still called (User not cached) + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "Second query should skip reviews subgraph (Product cache hit)") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Second query should still call accounts subgraph (User NOT cached)") + }) +} + +// TestRootFieldCaching tests that root fields (like Query.topProducts) can be cached +// when explicitly configured with RootFieldCaching configuration. +func TestRootFieldCaching(t *testing.T) { + t.Run("root field caching enabled", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable L2 cache + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + } + + // Configure root field caching for Query.topProducts on products subgraph + // Also configure entity caching to compare behavior + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + accountsHost := accountsURLParsed.Host + + // First query - should miss cache for all: root field, entity types + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + // Should have cache operations for: + // 1. Root field Query.topProducts (get + set = 2 operations) + // 2. Product entities (get + set = 2 operations) + // 3. User entities (get + set = 2 operations) + // Total: 6 operations + assert.Equal(t, 6, len(logAfterFirst), "First query should have 6 cache operations (get+set for root field, Product, User)") + + // Verify first query calls all subgraphs + productsCallsFirst := tracker.GetCount(productsHost) + reviewsCallsFirst := tracker.GetCount(reviewsHost) + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, productsCallsFirst, "First query should call products subgraph") + assert.Equal(t, 1, reviewsCallsFirst, "First query should call reviews subgraph") + assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph") + + // Second query - should hit cache for root field and entities + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + wantSecondLog := []CacheLogEntry{ + // products subgraph Query.topProducts — root field L2 hit, cached from first request + {Operation: "get", Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, Hits: []bool{true}}, + // reviews subgraph _entities(Product) — L2 hit, both products cached from first request + {Operation: "get", Keys: []string{`{"__typename":"Product","key":{"upc":"top-1"}}`, `{"__typename":"Product","key":{"upc":"top-2"}}`}, Hits: []bool{true, true}}, + // accounts subgraph _entities(User) — L2 hit, user cached from first request (1 unique user) + {Operation: "get", Keys: []string{`{"__typename":"User","key":{"id":"1234"}}`}, Hits: []bool{true}}, + // No set operations — all data served from cache + } + assert.Equal(t, sortCacheLogKeys(wantSecondLog), sortCacheLogKeys(logAfterSecond), "Second query: all cache hits, no sets") + + // All subgraphs skipped on second query (everything served from cache) + assert.Equal(t, 0, tracker.GetCount(productsHost), "Second query should skip products subgraph (root field cache hit)") + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "Second query should skip reviews subgraph (entity cache hit)") + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip accounts subgraph (entity cache hit)") + }) + + t.Run("root field caching NOT enabled - subgraph still called", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable L2 cache + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + } + + // Only configure entity caching, NOT root field caching + // This demonstrates opt-in behavior: root fields are NOT cached unless configured + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + // Note: products subgraph has NO caching config for Query.topProducts + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + + // First query + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + productsCallsFirst := tracker.GetCount(productsHost) + assert.Equal(t, 1, productsCallsFirst, "First query should call products subgraph") + + // Second query - products subgraph should still be called because root field is NOT cached + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + // KEY ASSERTION: Products subgraph IS called on second query because root field is NOT cached + productsCallsSecond := tracker.GetCount(productsHost) + assert.Equal(t, 1, productsCallsSecond, "Second query SHOULD call products subgraph (root field NOT cached)") + }) +} + +// ============================================================================= +// L1 CACHE TESTS FOR LIST FIELDS +// ============================================================================= +// +// These tests verify L1 caching behavior when root fields or child fields +// return lists of entities. + +func TestCacheNotPopulatedOnErrors(t *testing.T) { + // Query that triggers an error in accounts subgraph via error-user + // The reviewWithError field returns a review with author ID "error-user" + // which causes FindUserByID to return an error + errorQuery := `query { + reviewWithError { + body + authorWithoutProvides { + id + username + } + } + }` + + // Expected error response - data is null due to non-nullable username field error propagation + expectedErrorResponse := `{"errors":[{"message":"Failed to fetch from Subgraph 'accounts' at Path 'reviewWithError.authorWithoutProvides'."},{"message":"Cannot return null for non-nullable field 'User.username'.","path":["reviewWithError","authorWithoutProvides","username"]}],"data":{"reviewWithError":null}}` + + t.Run("L1 only - error response prevents cache population", func(t *testing.T) { + // This test verifies that L1 cache is NOT populated when an error occurs. + // If L1 was erroneously populated, the second query would not call accounts. + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + // First query - should get error from accounts + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Verify exact error response + assert.Equal(t, expectedErrorResponse, string(resp)) + + reviewsCallsFirst := tracker.GetCount(reviewsHost) + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, reviewsCallsFirst, "First query should call reviews subgraph once") + assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph once") + + // Second query - L1 should NOT have cached the error, so accounts should be called again + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Same error should be returned + assert.Equal(t, expectedErrorResponse, string(resp)) + + accountsCallsSecond := tracker.GetCount(accountsHost) + // KEY ASSERTION: If L1 incorrectly cached the error, this would be 0 + assert.Equal(t, 1, accountsCallsSecond, "Second query should call accounts again (L1 should NOT cache errors)") + }) + + t.Run("L2 only - error response prevents cache population", func(t *testing.T) { + // This test verifies that L2 cache is NOT populated when an error occurs. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Configure L2 caching for User entities + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query - should get error from accounts + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Verify exact error response + assert.Equal(t, expectedErrorResponse, string(resp)) + + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph once") + + // Verify exact cache log: only "get" with miss, NO "set" + // Since the fetch had an error, cache population should be skipped entirely + wantCacheLog := []CacheLogEntry{ + { + Operation: "get", + Keys: []string{`{"__typename":"User","key":{"id":"error-user"}}`}, + Hits: []bool{false}, + }, + // NO "set" entry - this is the key assertion + } + assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Cache log should only have 'get' miss, no 'set'") + + // Second query - L2 should NOT have cached the error, so accounts should be called again + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Same error should be returned + assert.Equal(t, expectedErrorResponse, string(resp)) + + accountsCallsSecond := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCallsSecond, "Second query should call accounts again (L2 should NOT cache errors)") + + // Second query should also have same cache log pattern (get miss, no set) + assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Second query cache log should also have 'get' miss, no 'set'") + }) + + t.Run("L1 and L2 - error response prevents both caches", func(t *testing.T) { + // This test verifies that both L1 and L2 caches are NOT populated when an error occurs. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Configure L2 caching for User entities + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: true, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query - should get error from accounts + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Verify exact error response + assert.Equal(t, expectedErrorResponse, string(resp)) + + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph once") + + // Verify exact cache log: only "get" with miss, NO "set" + wantCacheLog := []CacheLogEntry{ + { + Operation: "get", + Keys: []string{`{"__typename":"User","key":{"id":"error-user"}}`}, + Hits: []bool{false}, + }, + } + assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Cache log should only have 'get' miss, no 'set'") + + // Second query - neither L1 nor L2 should have cached the error + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Same error should be returned + assert.Equal(t, expectedErrorResponse, string(resp)) + + accountsCallsSecond := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCallsSecond, "Second query should call accounts again (neither L1 nor L2 should cache errors)") + + // Second query should also have same cache log pattern + assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Second query cache log should also have 'get' miss, no 'set'") + }) + + t.Run("error does not pollute cache for subsequent success queries", func(t *testing.T) { + // This test verifies that an error query doesn't pollute the cache + // and that subsequent successful queries still work correctly. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Configure L2 caching for User entities + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: true, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First: Query that triggers an error + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Verify exact error response + assert.Equal(t, expectedErrorResponse, string(resp)) + + accountsCallsError := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCallsError, "Error query should call accounts") + + // Verify error-user was NOT cached (only get, no set) + wantErrorCacheLog := []CacheLogEntry{ + { + Operation: "get", + Keys: []string{`{"__typename":"User","key":{"id":"error-user"}}`}, + Hits: []bool{false}, + }, + } + assert.Equal(t, wantErrorCacheLog, defaultCache.GetLog(), "Error query cache log should only have 'get' miss, no 'set'") + + // Second: Query a successful user (User 1234 via me query) + // Note: "me" is a root query, not an entity fetch, so it doesn't use L2 entity caching + successQuery := `query { + me { + id + username + } + }` + expectedSuccessResponse := `{"data":{"me":{"id":"1234","username":"Me"}}}` + + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, successQuery, nil, t) + + // Should succeed with exact expected response + assert.Equal(t, expectedSuccessResponse, string(resp)) + + // Note: Root queries (me) don't use L2 entity caching by default, + // so the cache log should be empty for this query. + // The important thing is that the previous error didn't pollute the cache. + assert.Equal(t, 0, len(defaultCache.GetLog()), "Root query should not use L2 entity cache") + + // Third: Query the error user again - should still fail (not cached) + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + assert.Equal(t, expectedErrorResponse, string(resp)) + accountsCallsErrorAgain := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCallsErrorAgain, "Error query should call accounts again (error was not cached)") + + // Verify cache log still shows only get miss, no set + assert.Equal(t, wantErrorCacheLog, defaultCache.GetLog(), "Third query cache log should still have 'get' miss, no 'set'") + }) +} + +func TestMutationCacheInvalidationE2E(t *testing.T) { + accounts.ResetUsers() + t.Cleanup(accounts.ResetUsers) + + // Configure entity caching for User AND mutation invalidation for updateUsername + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + MutationCacheInvalidation: plan.MutationCacheInvalidationConfigurations{ + {FieldName: "updateUsername"}, + }, + }, + } + + // Query that triggers entity caching for User via authorWithoutProvides (no @provides) + entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` + + t.Run("mutation deletes L2 cache entry", func(t *testing.T) { + accounts.ResetUsers() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + + // Request 1: Query to populate L2 cache with User entity + tracker.Reset() + defaultCache.ClearLog() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) + assert.Contains(t, string(resp), `"username":"Me"`) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "should call accounts subgraph once to populate cache") + + // Request 2: Same query — should hit L2 cache, no accounts call + tracker.Reset() + defaultCache.ClearLog() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) + assert.Contains(t, string(resp), `"username":"Me"`) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "should NOT call accounts subgraph (L2 hit)") + + // Request 3: Mutation — should delete the L2 cache entry + tracker.Reset() + defaultCache.ClearLog() + respMut := gqlClient.QueryString(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) + assert.Contains(t, string(respMut), `"UpdatedMe"`) + + // Verify the cache log contains a delete operation + mutationLog := defaultCache.GetLog() + hasDelete := false + for _, entry := range mutationLog { + if entry.Operation == "delete" { + hasDelete = true + assert.Equal(t, 1, len(entry.Keys), "delete should have exactly 1 key") + assert.Contains(t, entry.Keys[0], `"__typename":"User"`) + assert.Contains(t, entry.Keys[0], `"id":"1234"`) + } + } + assert.True(t, hasDelete, "mutation should trigger a cache delete operation") + + // Request 4: Same query again — should miss L2 (entry deleted), re-fetch from subgraph + tracker.Reset() + defaultCache.ClearLog() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) + assert.Contains(t, string(resp), `"username":"UpdatedMe"`) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "should call accounts subgraph again (L2 entry was deleted)") + }) + + t.Run("mutation without invalidation config does not delete", func(t *testing.T) { + accounts.ResetUsers() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + // Config WITHOUT MutationCacheInvalidation + noInvalidationConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + // No MutationCacheInvalidation — mutation should NOT delete cache + }, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(noInvalidationConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + + // Request 1: Query to populate L2 cache + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) + assert.Contains(t, string(resp), `"username":"Me"`) + + // Request 2: Mutation — should NOT delete L2 cache entry + tracker.Reset() + defaultCache.ClearLog() + respMut := gqlClient.QueryString(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) + assert.Contains(t, string(respMut), `"UpdatedMe"`) + + // Verify no delete operation in cache log + mutationLog := defaultCache.GetLog() + for _, entry := range mutationLog { + assert.NotEqual(t, "delete", entry.Operation, "should not have any delete operations without invalidation config") + } + + // Request 3: Same query — should still hit L2 cache (stale but not deleted) + tracker.Reset() + _ = gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "should NOT call accounts subgraph (L2 entry still present)") + }) +} diff --git a/execution/engine/federation_caching_test.go b/execution/engine/federation_caching_test.go index 5386193e50..2c3c3ed46a 100644 --- a/execution/engine/federation_caching_test.go +++ b/execution/engine/federation_caching_test.go @@ -2,27 +2,19 @@ package engine_test import ( "context" - "encoding/json" "fmt" "net/http" - "net/http/httptest" "net/url" - "path" - "sort" "strconv" - "strings" "sync" "testing" "time" - "github.com/jensneuse/abstractlogger" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/wundergraph/graphql-go-tools/execution/engine" "github.com/wundergraph/graphql-go-tools/execution/federationtesting" - accounts "github.com/wundergraph/graphql-go-tools/execution/federationtesting/accounts/graph" - "github.com/wundergraph/graphql-go-tools/execution/federationtesting/gateway" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" ) @@ -2445,4818 +2437,3 @@ func TestFederationCaching_MutationSkipsL2Read(t *testing.T) { assert.Equal(t, 1, tracker.GetCount(accountsHost), "Step 2: accounts called once for me root query, entity resolution served from L2 cache") }) } - -// subgraphCallTracker tracks HTTP requests made to subgraph servers -type subgraphCallTracker struct { - mu sync.RWMutex - counts map[string]int // Maps subgraph URL to call count - original http.RoundTripper -} - -func newSubgraphCallTracker(original http.RoundTripper) *subgraphCallTracker { - return &subgraphCallTracker{ - counts: make(map[string]int), - original: original, - } -} - -func (t *subgraphCallTracker) RoundTrip(req *http.Request) (*http.Response, error) { - t.mu.Lock() - host := req.URL.Host - t.counts[host]++ - t.mu.Unlock() - return t.original.RoundTrip(req) -} - -func (t *subgraphCallTracker) GetCount(url string) int { - t.mu.RLock() - defer t.mu.RUnlock() - return t.counts[url] -} - -func (t *subgraphCallTracker) Reset() { - t.mu.Lock() - defer t.mu.Unlock() - t.counts = make(map[string]int) -} - -func (t *subgraphCallTracker) GetCounts() map[string]int { - t.mu.RLock() - defer t.mu.RUnlock() - result := make(map[string]int) - for k, v := range t.counts { - result[k] = v - } - return result -} - -func (t *subgraphCallTracker) DebugPrint() string { - t.mu.RLock() - defer t.mu.RUnlock() - return fmt.Sprintf("%v", t.counts) -} - -// Helper functions for gateway setup with HTTP client support -type cachingGatewayOptions struct { - enableART bool - withLoaderCache map[string]resolve.LoaderCache - httpClient *http.Client - subgraphHeadersBuilder resolve.SubgraphHeadersBuilder - cachingOptions resolve.CachingOptions - subgraphEntityCachingConfigs engine.SubgraphCachingConfigs - debugMode bool -} - -func withCachingEnableART(enableART bool) func(*cachingGatewayOptions) { - return func(opts *cachingGatewayOptions) { - opts.enableART = enableART - } -} - -func withCachingLoaderCache(loaderCache map[string]resolve.LoaderCache) func(*cachingGatewayOptions) { - return func(opts *cachingGatewayOptions) { - opts.withLoaderCache = loaderCache - } -} - -func withHTTPClient(client *http.Client) func(*cachingGatewayOptions) { - return func(opts *cachingGatewayOptions) { - opts.httpClient = client - } -} - -func withSubgraphHeadersBuilder(builder resolve.SubgraphHeadersBuilder) func(*cachingGatewayOptions) { - return func(opts *cachingGatewayOptions) { - opts.subgraphHeadersBuilder = builder - } -} - -func withCachingOptionsFunc(cachingOpts resolve.CachingOptions) func(*cachingGatewayOptions) { - return func(opts *cachingGatewayOptions) { - opts.cachingOptions = cachingOpts - } -} - -func withSubgraphEntityCachingConfigs(configs engine.SubgraphCachingConfigs) func(*cachingGatewayOptions) { - return func(opts *cachingGatewayOptions) { - opts.subgraphEntityCachingConfigs = configs - } -} - -func withDebugMode(enabled bool) func(*cachingGatewayOptions) { - return func(opts *cachingGatewayOptions) { - opts.debugMode = enabled - } -} - -type cachingGatewayOptionsToFunc func(opts *cachingGatewayOptions) - -func addCachingGateway(options ...cachingGatewayOptionsToFunc) func(setup *federationtesting.FederationSetup) *httptest.Server { - opts := &cachingGatewayOptions{} - for _, option := range options { - option(opts) - } - return func(setup *federationtesting.FederationSetup) *httptest.Server { - httpClient := opts.httpClient - if httpClient == nil { - httpClient = http.DefaultClient - } - - poller := gateway.NewDatasource([]gateway.ServiceConfig{ - {Name: "accounts", URL: setup.AccountsUpstreamServer.URL}, - {Name: "products", URL: setup.ProductsUpstreamServer.URL, WS: strings.ReplaceAll(setup.ProductsUpstreamServer.URL, "http:", "ws:")}, - {Name: "reviews", URL: setup.ReviewsUpstreamServer.URL}, - }, httpClient) - - gtw := gateway.HandlerWithCaching(abstractlogger.NoopLogger, poller, httpClient, opts.enableART, opts.withLoaderCache, opts.subgraphHeadersBuilder, opts.cachingOptions, opts.subgraphEntityCachingConfigs, opts.debugMode) - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - - poller.Run(ctx) - return httptest.NewServer(gtw) - } -} - -// mockSubgraphHeadersBuilder is a mock implementation of SubgraphHeadersBuilder -type mockSubgraphHeadersBuilder struct { - hashes map[string]uint64 -} - -func (m *mockSubgraphHeadersBuilder) HeadersForSubgraph(subgraphName string) (http.Header, uint64) { - hash := m.hashes[subgraphName] - if hash == 0 { - // Return default hash if not found - return nil, 99999 - } - return nil, hash -} - -func (m *mockSubgraphHeadersBuilder) HashAll() uint64 { - // Return a simple hash of all subgraph hashes combined - var result uint64 - for _, hash := range m.hashes { - result ^= hash - } - return result -} - -func cachingTestQueryPath(name string) string { - return path.Join("..", "federationtesting", "testdata", name) -} - -type CacheLogEntry struct { - Operation string // "get", "set", "delete" - Keys []string // Keys involved in the operation - Hits []bool // For Get: whether each key was a hit (true) or miss (false) - Caller string // Fetch identity when debug enabled: "accounts: entity(User)" or "products: rootField(Query.topProducts)" -} - -// sortCacheLogKeys sorts the keys (and corresponding hits) in each cache log entry. -// This makes comparisons order-independent when multiple keys are present. -// Caller is intentionally stripped — it's for debug logging, not assertions. -func sortCacheLogKeys(log []CacheLogEntry) []CacheLogEntry { - sorted := make([]CacheLogEntry, len(log)) - for i, entry := range log { - // Only sort if there are multiple keys - if len(entry.Keys) <= 1 { - sorted[i] = CacheLogEntry{ - Operation: entry.Operation, - Keys: entry.Keys, - Hits: entry.Hits, - } - continue - } - - // Create pairs of (key, hit) to sort together - pairs := make([]struct { - key string - hit bool - }, len(entry.Keys)) - for j := range entry.Keys { - pairs[j].key = entry.Keys[j] - if entry.Hits != nil && j < len(entry.Hits) { - pairs[j].hit = entry.Hits[j] - } - } - - // Sort pairs by key - sort.Slice(pairs, func(a, b int) bool { - return pairs[a].key < pairs[b].key - }) - - // Extract sorted keys and hits - sorted[i] = CacheLogEntry{ - Operation: entry.Operation, - Keys: make([]string, len(pairs)), - Hits: nil, - } - if len(entry.Hits) > 0 { - sorted[i].Hits = make([]bool, len(pairs)) - } - for j := range pairs { - sorted[i].Keys[j] = pairs[j].key - if sorted[i].Hits != nil { - sorted[i].Hits[j] = pairs[j].hit - } - } - } - return sorted -} - -// sortCacheLogKeysWithCaller is like sortCacheLogKeys but preserves the Caller field. -// Use this when you want assertions to verify which Loader method chain triggered each cache event. -func sortCacheLogKeysWithCaller(log []CacheLogEntry) []CacheLogEntry { - sorted := make([]CacheLogEntry, len(log)) - for i, entry := range log { - if len(entry.Keys) <= 1 { - sorted[i] = CacheLogEntry{ - Operation: entry.Operation, - Keys: entry.Keys, - Hits: entry.Hits, - Caller: entry.Caller, - } - continue - } - - pairs := make([]struct { - key string - hit bool - }, len(entry.Keys)) - for j := range entry.Keys { - pairs[j].key = entry.Keys[j] - if entry.Hits != nil && j < len(entry.Hits) { - pairs[j].hit = entry.Hits[j] - } - } - sort.Slice(pairs, func(a, b int) bool { - return pairs[a].key < pairs[b].key - }) - sorted[i] = CacheLogEntry{ - Operation: entry.Operation, - Keys: make([]string, len(pairs)), - Hits: nil, - Caller: entry.Caller, - } - if len(entry.Hits) > 0 { - sorted[i].Hits = make([]bool, len(pairs)) - } - for j := range pairs { - sorted[i].Keys[j] = pairs[j].key - if sorted[i].Hits != nil { - sorted[i].Hits[j] = pairs[j].hit - } - } - } - return sorted -} - -type cacheEntry struct { - data []byte - expiresAt *time.Time -} - -type FakeLoaderCache struct { - mu sync.RWMutex - storage map[string]cacheEntry - log []CacheLogEntry -} - -func NewFakeLoaderCache() *FakeLoaderCache { - return &FakeLoaderCache{ - storage: make(map[string]cacheEntry), - log: make([]CacheLogEntry, 0), - } -} - -func (f *FakeLoaderCache) cleanupExpired() { - now := time.Now() - for key, entry := range f.storage { - if entry.expiresAt != nil && now.After(*entry.expiresAt) { - delete(f.storage, key) - } - } -} - -func (f *FakeLoaderCache) Get(ctx context.Context, keys []string) ([]*resolve.CacheEntry, error) { - f.mu.Lock() - defer f.mu.Unlock() - - // Clean up expired entries before executing command - f.cleanupExpired() - - hits := make([]bool, len(keys)) - result := make([]*resolve.CacheEntry, len(keys)) - for i, key := range keys { - if entry, exists := f.storage[key]; exists { - // Make a copy of the data to prevent external modifications - dataCopy := make([]byte, len(entry.data)) - copy(dataCopy, entry.data) - ce := &resolve.CacheEntry{ - Key: key, - Value: dataCopy, - } - // Populate RemainingTTL from expiresAt for cache age analytics - if entry.expiresAt != nil { - remaining := time.Until(*entry.expiresAt) - if remaining > 0 { - ce.RemainingTTL = remaining - } - } - result[i] = ce - hits[i] = true - } else { - result[i] = nil - hits[i] = false - } - } - - // Log the operation - caller := "" - if cfi := resolve.GetCacheFetchInfo(ctx); cfi != nil { - caller = cfi.String() - } - f.log = append(f.log, CacheLogEntry{ - Operation: "get", - Keys: keys, - Hits: hits, - Caller: caller, - }) - - return result, nil -} - -func (f *FakeLoaderCache) Set(ctx context.Context, entries []*resolve.CacheEntry, ttl time.Duration) error { - if len(entries) == 0 { - return nil - } - - f.mu.Lock() - defer f.mu.Unlock() - - // Clean up expired entries before executing command - f.cleanupExpired() - - keys := make([]string, 0, len(entries)) - for _, entry := range entries { - if entry == nil { - continue - } - cacheEntry := cacheEntry{ - // Make a copy of the data to prevent external modifications - data: make([]byte, len(entry.Value)), - } - copy(cacheEntry.data, entry.Value) - - // If ttl is 0, store without expiration - if ttl > 0 { - expiresAt := time.Now().Add(ttl) - cacheEntry.expiresAt = &expiresAt - } - - f.storage[entry.Key] = cacheEntry - keys = append(keys, entry.Key) - } - - // Log the operation - caller := "" - if cfi := resolve.GetCacheFetchInfo(ctx); cfi != nil { - caller = cfi.String() - } - f.log = append(f.log, CacheLogEntry{ - Operation: "set", - Keys: keys, - Hits: nil, // Set operations don't have hits/misses - Caller: caller, - }) - - return nil -} - -func (f *FakeLoaderCache) Delete(ctx context.Context, keys []string) error { - f.mu.Lock() - defer f.mu.Unlock() - - // Clean up expired entries before executing command - f.cleanupExpired() - - for _, key := range keys { - delete(f.storage, key) - } - - // Log the operation - caller := "" - if cfi := resolve.GetCacheFetchInfo(ctx); cfi != nil { - caller = cfi.String() - } - f.log = append(f.log, CacheLogEntry{ - Operation: "delete", - Keys: keys, - Hits: nil, // Delete operations don't have hits/misses - Caller: caller, - }) - - return nil -} - -// GetLog returns a copy of the cache operation log -func (f *FakeLoaderCache) GetLog() []CacheLogEntry { - f.mu.RLock() - defer f.mu.RUnlock() - logCopy := make([]CacheLogEntry, len(f.log)) - copy(logCopy, f.log) - return logCopy -} - -// GetLogWithCaller returns a copy of the cache operation log with Caller populated. -// Use this with sortCacheLogKeysWithCaller to assert on both operation details and -// the Loader method chain that triggered each cache event. -func (f *FakeLoaderCache) GetLogWithCaller() []CacheLogEntry { - f.mu.RLock() - defer f.mu.RUnlock() - logCopy := make([]CacheLogEntry, len(f.log)) - copy(logCopy, f.log) - return logCopy -} - -// ClearLog clears the cache operation log -func (f *FakeLoaderCache) ClearLog() { - f.mu.Lock() - defer f.mu.Unlock() - f.log = make([]CacheLogEntry, 0) -} - -// TestFakeLoaderCache tests the cache implementation itself -func TestFakeLoaderCache(t *testing.T) { - ctx := context.Background() - cache := NewFakeLoaderCache() - - t.Run("SetAndGet", func(t *testing.T) { - // Test basic set and get - keys := []string{"key1", "key2", "key3"} - entries := []*resolve.CacheEntry{ - {Key: "key1", Value: []byte("value1")}, - {Key: "key2", Value: []byte("value2")}, - {Key: "key3", Value: []byte("value3")}, - } - - err := cache.Set(ctx, entries, 0) // No TTL - require.NoError(t, err) - - // Get all keys - result, err := cache.Get(ctx, keys) - require.NoError(t, err) - require.Len(t, result, 3) - assert.NotNil(t, result[0]) - assert.Equal(t, "value1", string(result[0].Value)) - assert.NotNil(t, result[1]) - assert.Equal(t, "value2", string(result[1].Value)) - assert.NotNil(t, result[2]) - assert.Equal(t, "value3", string(result[2].Value)) - - // Get partial keys - result, err = cache.Get(ctx, []string{"key2", "key4", "key1"}) - require.NoError(t, err) - require.Len(t, result, 3) - assert.NotNil(t, result[0]) - assert.Equal(t, "value2", string(result[0].Value)) - assert.Nil(t, result[1]) // key4 doesn't exist - assert.NotNil(t, result[2]) - assert.Equal(t, "value1", string(result[2].Value)) - }) - - t.Run("Delete", func(t *testing.T) { - // Set some keys - entries := []*resolve.CacheEntry{ - {Key: "del1", Value: []byte("v1")}, - {Key: "del2", Value: []byte("v2")}, - {Key: "del3", Value: []byte("v3")}, - } - err := cache.Set(ctx, entries, 0) - require.NoError(t, err) - - // Delete some keys - err = cache.Delete(ctx, []string{"del1", "del3"}) - require.NoError(t, err) - - // Check remaining keys - result, err := cache.Get(ctx, []string{"del1", "del2", "del3"}) - require.NoError(t, err) - assert.Nil(t, result[0]) // del1 was deleted - assert.NotNil(t, result[1]) // del2 still exists - assert.Equal(t, "v2", string(result[1].Value)) - assert.Nil(t, result[2]) // del3 was deleted - }) - - t.Run("TTL", func(t *testing.T) { - // Set with 50ms TTL - entries := []*resolve.CacheEntry{ - {Key: "ttl1", Value: []byte("expire1")}, - {Key: "ttl2", Value: []byte("expire2")}, - } - err := cache.Set(ctx, entries, 50*time.Millisecond) - require.NoError(t, err) - - // Immediately get - should exist - result, err := cache.Get(ctx, []string{"ttl1", "ttl2"}) - require.NoError(t, err) - assert.NotNil(t, result[0]) - assert.Equal(t, "expire1", string(result[0].Value)) - assert.NotNil(t, result[1]) - assert.Equal(t, "expire2", string(result[1].Value)) - - // Wait for expiration - time.Sleep(60 * time.Millisecond) - - // Get again - should be nil - result, err = cache.Get(ctx, []string{"ttl1", "ttl2"}) - require.NoError(t, err) - assert.Nil(t, result[0]) - assert.Nil(t, result[1]) - }) - - t.Run("MixedTTL", func(t *testing.T) { - // Set some with TTL, some without - err := cache.Set(ctx, []*resolve.CacheEntry{{Key: "perm1", Value: []byte("permanent")}}, 0) - require.NoError(t, err) - - err = cache.Set(ctx, []*resolve.CacheEntry{{Key: "temp1", Value: []byte("temporary")}}, 50*time.Millisecond) - require.NoError(t, err) - - // Wait for temporary to expire - time.Sleep(60 * time.Millisecond) - - // Check both - result, err := cache.Get(ctx, []string{"perm1", "temp1"}) - require.NoError(t, err) - assert.NotNil(t, result[0]) - assert.Equal(t, "permanent", string(result[0].Value)) // Still exists - assert.Nil(t, result[1]) // Expired - }) - - t.Run("ThreadSafety", func(t *testing.T) { - // Test concurrent access - done := make(chan bool) - - // Writer goroutine - go func() { - for i := 0; i < 100; i++ { - key := fmt.Sprintf("concurrent_%d", i) - value := fmt.Sprintf("value_%d", i) - err := cache.Set(ctx, []*resolve.CacheEntry{{Key: key, Value: []byte(value)}}, 0) - assert.NoError(t, err) - } - done <- true - }() - - // Reader goroutine - go func() { - for i := 0; i < 100; i++ { - key := fmt.Sprintf("concurrent_%d", i%50) - _, err := cache.Get(ctx, []string{key}) - assert.NoError(t, err) - } - done <- true - }() - - // Deleter goroutine - go func() { - for i := 0; i < 50; i++ { - key := fmt.Sprintf("concurrent_%d", i*2) - err := cache.Delete(ctx, []string{key}) - assert.NoError(t, err) - } - done <- true - }() - - // Wait for all goroutines - <-done - <-done - <-done - }) - - t.Run("ResultLengthMatchesKeysLength", func(t *testing.T) { - // Test that result length always matches input keys length - - // Set some data - err := cache.Set(ctx, []*resolve.CacheEntry{ - {Key: "exist1", Value: []byte("data1")}, - {Key: "exist3", Value: []byte("data3")}, - }, 0) - require.NoError(t, err) - - // Request mix of existing and non-existing keys - keys := []string{"exist1", "missing1", "exist3", "missing2", "missing3"} - result, err := cache.Get(ctx, keys) - require.NoError(t, err) - - // Verify length matches exactly - assert.Len(t, result, len(keys), "Result length must match keys length") - assert.Len(t, result, 5, "Should return exactly 5 results") - - // Verify correct values - assert.NotNil(t, result[0]) - assert.Equal(t, "data1", string(result[0].Value)) // exist1 - assert.Nil(t, result[1]) // missing1 - assert.NotNil(t, result[2]) - assert.Equal(t, "data3", string(result[2].Value)) // exist3 - assert.Nil(t, result[3]) // missing2 - assert.Nil(t, result[4]) // missing3 - - // Test with all missing keys - allMissingKeys := []string{"missing4", "missing5", "missing6"} - result, err = cache.Get(ctx, allMissingKeys) - require.NoError(t, err) - assert.Len(t, result, 3, "Should return 3 results for 3 keys") - assert.Nil(t, result[0]) - assert.Nil(t, result[1]) - assert.Nil(t, result[2]) - - // Test with empty keys - result, err = cache.Get(ctx, []string{}) - require.NoError(t, err) - assert.Len(t, result, 0, "Should return empty slice for empty keys") - }) -} - -// ============================================================================= -// L1/L2 CACHE END-TO-END TESTS -// ============================================================================= -// -// These tests verify the L1 (per-request in-memory) and L2 (external cross-request) -// caching behavior in a federated GraphQL setup. -// -// L1 Cache: Prevents redundant fetches for the same entity within a single request -// L2 Cache: Shares entity data across requests via external cache (e.g., Redis) -// -// Lookup Order (entity fetches): L1 -> L2 -> Subgraph Fetch -// Lookup Order (root fetches): L2 -> Subgraph Fetch (no L1) - -func TestL1CacheReducesHTTPCalls(t *testing.T) { - // This test demonstrates L1 cache behavior with entity fetches. - // - // Query structure: - // - me: root query to accounts service → returns User 1234 {id, username} - // - me.reviews: entity fetch from reviews service → returns reviews - // - me.reviews.product: entity fetch from products service → returns products - // - me.reviews.product.reviews: entity fetch from reviews service → returns reviews - // - me.reviews.product.reviews.authorWithoutProvides: entity fetch from accounts → returns User 1234 - // - // Note: The `me` root query does NOT populate L1 cache because L1 cache only works - // for entity fetches (RequiresEntityFetch=true). Root queries don't qualify. - // - // With L1 enabled: Both `me` (root) and `authorWithoutProvides` (entity) make calls. - // L1 cache doesn't help here because `me` is a root query, not an entity fetch. - // With L1 disabled: Same behavior - 2 accounts calls. - // - // L1 cache DOES help when the same entity is fetched multiple times through - // entity fetches within a single request (e.g., self-referential entities). - - query := `query { - me { - id - username - reviews { - body - product { - upc - reviews { - authorWithoutProvides { - id - username - } - } - } - } - } - }` - - expectedResponse := `{"data":{"me":{"id":"1234","username":"Me","reviews":[{"body":"A highly effective form of birth control.","product":{"upc":"top-1","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","product":{"upc":"top-2","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}}]}}}` - - t.Run("L1 enabled - entity fetches use L1 cache", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // Both `me` (root query) and `authorWithoutProvides` (entity fetch) call accounts. - // L1 cache doesn't help because `me` is a root query, not an entity fetch. - // Root queries don't populate L1 cache (RequiresEntityFetch=false). - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls, - "Both me (root query) and authorWithoutProvides (entity fetch) call accounts") - }) - - t.Run("L1 disabled - more accounts calls without cache", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // KEY ASSERTION: With L1 disabled, 2 accounts calls! - // The authorWithoutProvides.username requires another fetch since L1 is disabled. - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 2, accountsCalls, - "With L1 disabled, should make 2 accounts calls (no cache reuse)") - }) -} - -func TestL1CacheReducesHTTPCallsInterface(t *testing.T) { - // This test demonstrates L1 cache behavior with interface return types. - // - // Query structure: - // - meInterface: root query to accounts service → returns User 1234 via Identifiable interface - // - meInterface.reviews: entity fetch from reviews service → returns reviews - // - meInterface.reviews.product: entity fetch from products service → returns products - // - meInterface.reviews.product.reviews: entity fetch from reviews service → returns reviews - // - meInterface.reviews.product.reviews.authorWithoutProvides: entity fetch from accounts → returns User 1234 - // - // This tests that interface return types properly build cache key templates - // for all entity types that implement the interface. - - query := `query { - meInterface { - ... on User { - id - username - reviews { - body - product { - upc - reviews { - authorWithoutProvides { - id - username - } - } - } - } - } - } - }` - - expectedResponse := `{"data":{"meInterface":{"id":"1234","username":"Me","reviews":[{"body":"A highly effective form of birth control.","product":{"upc":"top-1","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","product":{"upc":"top-2","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}}]}}}` - - t.Run("L1 enabled - interface entity fetches use L1 cache", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // Same behavior as non-interface: root query + entity fetch both call accounts - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls, - "Interface field should behave same as object field for L1 caching") - }) - - t.Run("L1 disabled - more accounts calls without cache", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // KEY ASSERTION: With L1 disabled, 2 accounts calls! - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 2, accountsCalls, - "With L1 disabled, should make 2 accounts calls (no cache reuse)") - }) -} - -func TestL1CacheReducesHTTPCallsUnion(t *testing.T) { - // This test demonstrates L1 cache behavior with union return types. - // - // Query structure: - // - meUnion: root query to accounts service → returns User 1234 via MeUnion union - // - meUnion.reviews: entity fetch from reviews service → returns reviews - // - meUnion.reviews.product: entity fetch from products service → returns products - // - meUnion.reviews.product.reviews: entity fetch from reviews service → returns reviews - // - meUnion.reviews.product.reviews.authorWithoutProvides: entity fetch from accounts → returns User 1234 - // - // This tests that union return types properly build cache key templates - // for all entity types that are members of the union. - - query := `query { - meUnion { - ... on User { - id - username - reviews { - body - product { - upc - reviews { - authorWithoutProvides { - id - username - } - } - } - } - } - } - }` - - expectedResponse := `{"data":{"meUnion":{"id":"1234","username":"Me","reviews":[{"body":"A highly effective form of birth control.","product":{"upc":"top-1","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","product":{"upc":"top-2","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}}]}}}` - - t.Run("L1 enabled - union entity fetches use L1 cache", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // Same behavior as non-union: root query + entity fetch both call accounts - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls, - "Union field should behave same as object field for L1 caching") - }) - - t.Run("L1 disabled - more accounts calls without cache", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // KEY ASSERTION: With L1 disabled, 2 accounts calls! - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 2, accountsCalls, - "With L1 disabled, should make 2 accounts calls (no cache reuse)") - }) -} - -func TestL1CacheSelfReferentialEntity(t *testing.T) { - // This test verifies that self-referential entities don't cause - // stack overflow when L1 cache is enabled. - // - // Background: When an entity type has a field that returns the same type - // (e.g., User.sameUserReviewers returning [User]), and L1 cache stores - // a pointer to the entity, both key.Item and key.FromCache can point to - // the same memory location. Without a fix, calling MergeValues(ptr, ptr) - // causes infinite recursion and stack overflow. - // - // The sameUserReviewers field has @requires(fields: "username") which forces - // sequential execution: the User entity is first fetched from accounts - // (populating L1), then sameUserReviewers is resolved, returning the same - // User entity that's already in L1 cache. - - query := `query { - topProducts { - reviews { - authorWithoutProvides { - id - username - sameUserReviewers { - id - username - } - } - } - } - }` - - // This response shows User 1234 appearing both at authorWithoutProvides level - // and inside sameUserReviewers (which returns the same user for testing) - expectedResponse := `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}` - - t.Run("self-referential entity should not cause stack overflow", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // This should complete without stack overflow - // Before the fix, this would crash with "fatal error: stack overflow" - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - }) -} - -func TestL2CacheOnly(t *testing.T) { - t.Run("L2 enabled - miss then hit across requests", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - // Create HTTP client with tracking - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{ - Transport: tracker, - } - - // Enable L2 cache only - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: true, - } - - // Enable entity caching for L2 tests (opt-in per-subgraph caching) - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames for tracking - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - productsHost := productsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - - // First query - should miss cache - defaultCache.ClearLog() - tracker.Reset() - resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - logAfterFirst := defaultCache.GetLog() - // Cache operations: get/set for Query.topProducts, Product entities, User entities = 6 operations - assert.Equal(t, 6, len(logAfterFirst), "Should have exactly 6 cache operations (get/set for Query, Products, Users)") - - // Verify the exact cache access log (order may vary for keys within each operation) - wantLogFirst := []CacheLogEntry{ - // Root field Query.topProducts - { - Operation: "get", - Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, - Hits: []bool{false}, - }, - { - Operation: "set", - Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, - }, - // Product entity fetches (reviews data for each product) - { - Operation: "get", - Keys: []string{ - `{"__typename":"Product","key":{"upc":"top-1"}}`, - `{"__typename":"Product","key":{"upc":"top-2"}}`, - }, - Hits: []bool{false, false}, - }, - { - Operation: "set", - Keys: []string{ - `{"__typename":"Product","key":{"upc":"top-1"}}`, - `{"__typename":"Product","key":{"upc":"top-2"}}`, - }, - }, - // User entity fetches (author data) - { - Operation: "get", - Keys: []string{ - `{"__typename":"User","key":{"id":"1234"}}`, - }, - Hits: []bool{false}, - }, - { - Operation: "set", - Keys: []string{ - `{"__typename":"User","key":{"id":"1234"}}`, - }, - }, - } - assert.Equal(t, sortCacheLogKeys(wantLogFirst), sortCacheLogKeys(logAfterFirst), "First query cache log should match expected") - - // Verify subgraph calls for first query - productsCallsFirst := tracker.GetCount(productsHost) - reviewsCallsFirst := tracker.GetCount(reviewsHost) - accountsCallsFirst := tracker.GetCount(accountsHost) - assert.Equal(t, 1, productsCallsFirst, "First query should call products subgraph exactly once") - assert.Equal(t, 1, reviewsCallsFirst, "First query should call reviews subgraph exactly once") - assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph for User entity resolution") - - // Second query - all fetches should hit cache - defaultCache.ClearLog() - tracker.Reset() - resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - // Verify L2 cache hits - logAfterSecond := defaultCache.GetLog() - // All cache operations should be gets with hits: Query.topProducts, Product entities, User entities - assert.Equal(t, 3, len(logAfterSecond), "Second query should have 3 cache get operations (all hits)") - - // Verify the exact cache access log for second query (all hits) - wantLogSecond := []CacheLogEntry{ - // Root field Query.topProducts - HIT - { - Operation: "get", - Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, - Hits: []bool{true}, - }, - // Product entity fetches - HITS - { - Operation: "get", - Keys: []string{ - `{"__typename":"Product","key":{"upc":"top-1"}}`, - `{"__typename":"Product","key":{"upc":"top-2"}}`, - }, - Hits: []bool{true, true}, - }, - // User entity fetches - HITS - { - Operation: "get", - Keys: []string{ - `{"__typename":"User","key":{"id":"1234"}}`, - }, - Hits: []bool{true}, - }, - } - assert.Equal(t, sortCacheLogKeys(wantLogSecond), sortCacheLogKeys(logAfterSecond), "Second query cache log should match expected (all hits)") - - // Verify subgraph calls for second query - all should be cached - productsCallsSecond := tracker.GetCount(productsHost) - reviewsCallsSecond := tracker.GetCount(reviewsHost) - accountsCallsSecond := tracker.GetCount(accountsHost) - assert.Equal(t, 0, productsCallsSecond, "Second query should not call products subgraph (root field cache hit)") - assert.Equal(t, 0, reviewsCallsSecond, "Second query should not call reviews subgraph (entity cache hit)") - assert.Equal(t, 0, accountsCallsSecond, "Second query should not call accounts subgraph (entity cache hit)") - }) - - t.Run("L2 disabled - no external cache operations", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - // Create HTTP client with tracking - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{ - Transport: tracker, - } - - // Disable L2 cache - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // First query - defaultCache.ClearLog() - tracker.Reset() - resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - // Verify no cache operations - log := defaultCache.GetLog() - assert.Empty(t, log, "No L2 cache operations should occur when L2 is disabled") - }) -} - -func TestL1L2CacheCombined(t *testing.T) { - t.Run("L1+L2 enabled - L1 within request, L2 across requests", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - // Create HTTP client with tracking - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{ - Transport: tracker, - } - - // Enable both L1 and L2 cache - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: true, - } - - // Enable entity caching for L2 tests (opt-in per-entity caching) - // Configure caching per-subgraph with explicit subgraph names - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames for tracking - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - productsHost := productsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - - // First query - L1 helps within request, L2 populates for later - defaultCache.ClearLog() - tracker.Reset() - resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - logAfterFirst := defaultCache.GetLog() - // Cache operations: get/set for Query.topProducts, Product entities, User entities = 6 operations - assert.Equal(t, 6, len(logAfterFirst), "Should have exactly 6 cache operations (get/set for Query, Products, Users)") - - // Verify the exact cache access log (order may vary for keys within each operation) - wantLogFirst := []CacheLogEntry{ - // Root field Query.topProducts - { - Operation: "get", - Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, - Hits: []bool{false}, - }, - { - Operation: "set", - Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, - }, - // Product entity fetches (reviews data for each product) - { - Operation: "get", - Keys: []string{ - `{"__typename":"Product","key":{"upc":"top-1"}}`, - `{"__typename":"Product","key":{"upc":"top-2"}}`, - }, - Hits: []bool{false, false}, - }, - { - Operation: "set", - Keys: []string{ - `{"__typename":"Product","key":{"upc":"top-1"}}`, - `{"__typename":"Product","key":{"upc":"top-2"}}`, - }, - }, - // User entity fetches (author data) - { - Operation: "get", - Keys: []string{ - `{"__typename":"User","key":{"id":"1234"}}`, - }, - Hits: []bool{false}, - }, - { - Operation: "set", - Keys: []string{ - `{"__typename":"User","key":{"id":"1234"}}`, - }, - }, - } - assert.Equal(t, sortCacheLogKeys(wantLogFirst), sortCacheLogKeys(logAfterFirst), "First query cache log should match expected") - - // Verify subgraph calls for first query - productsCallsFirst := tracker.GetCount(productsHost) - reviewsCallsFirst := tracker.GetCount(reviewsHost) - accountsCallsFirst := tracker.GetCount(accountsHost) - assert.Equal(t, 1, productsCallsFirst, "First query should call products subgraph exactly once") - assert.Equal(t, 1, reviewsCallsFirst, "First query should call reviews subgraph exactly once") - assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph for User entity resolution") - - // Second query - new request means fresh L1, but L2 should hit - defaultCache.ClearLog() - tracker.Reset() - resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - logAfterSecond := defaultCache.GetLog() - // All cache operations should be gets with hits: Query.topProducts, Product entities, User entities - assert.Equal(t, 3, len(logAfterSecond), "Second query should have 3 cache get operations (all hits)") - - // Verify the exact cache access log for second query (all hits) - wantLogSecond := []CacheLogEntry{ - // Root field Query.topProducts - HIT - { - Operation: "get", - Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, - Hits: []bool{true}, - }, - // Product entity fetches - HITS - { - Operation: "get", - Keys: []string{ - `{"__typename":"Product","key":{"upc":"top-1"}}`, - `{"__typename":"Product","key":{"upc":"top-2"}}`, - }, - Hits: []bool{true, true}, - }, - // User entity fetches - HITS - { - Operation: "get", - Keys: []string{ - `{"__typename":"User","key":{"id":"1234"}}`, - }, - Hits: []bool{true}, - }, - } - assert.Equal(t, sortCacheLogKeys(wantLogSecond), sortCacheLogKeys(logAfterSecond), "Second query cache log should match expected (all hits)") - - // Verify no subgraph calls for second query (L2 cache hits) - productsCallsSecond := tracker.GetCount(productsHost) - reviewsCallsSecond := tracker.GetCount(reviewsHost) - accountsCallsSecond := tracker.GetCount(accountsHost) - assert.Equal(t, 0, productsCallsSecond, "Second query should not call products subgraph (L2 hit)") - assert.Equal(t, 0, reviewsCallsSecond, "Second query should not call reviews subgraph (L2 hit)") - assert.Equal(t, 0, accountsCallsSecond, "Second query should not call accounts subgraph (L2 hit)") - }) - - t.Run("L1+L2 - cross-request isolation: L1 per-request, L2 shared", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - // Create HTTP client with tracking - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{ - Transport: tracker, - } - - // Enable both L1 and L2 - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: true, - } - - // Enable entity caching for L2 tests (opt-in per-entity caching) - // Configure caching per-subgraph with explicit subgraph names - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // First request - populates L2 cache - defaultCache.ClearLog() - tracker.Reset() - resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - logAfterFirst := defaultCache.GetLog() - productKeys := []string{ - `{"__typename":"Product","key":{"upc":"top-1"}}`, - `{"__typename":"Product","key":{"upc":"top-2"}}`, - } - userKeys := []string{ - `{"__typename":"User","key":{"id":"1234"}}`, - } - wantFirstLog := []CacheLogEntry{ - // reviews subgraph _entities(Product) — L2 miss, first time seeing these products - {Operation: "get", Keys: productKeys, Hits: []bool{false, false}}, - // reviews subgraph _entities(Product) — store fetched product data in L2 - {Operation: "set", Keys: productKeys}, - // accounts subgraph _entities(User) — L2 miss, first time seeing this user - {Operation: "get", Keys: userKeys, Hits: []bool{false}}, - // accounts subgraph _entities(User) — store fetched user data in L2 - {Operation: "set", Keys: userKeys}, - } - assert.Equal(t, sortCacheLogKeys(wantFirstLog), sortCacheLogKeys(logAfterFirst), "First request: L2 miss + set for Product and User") - - // Second request - L1 is fresh (new request), but L2 should provide data - defaultCache.ClearLog() - tracker.Reset() - resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - logAfterSecond := defaultCache.GetLog() - wantSecondLog := []CacheLogEntry{ - // reviews subgraph _entities(Product) — L2 hit, both products cached from first request - {Operation: "get", Keys: productKeys, Hits: []bool{true, true}}, - // accounts subgraph _entities(User) — L2 hit, user cached from first request (deduplicated: 1 unique user) - {Operation: "get", Keys: userKeys, Hits: []bool{true}}, - // No set operations — all data served from cache - } - assert.Equal(t, sortCacheLogKeys(wantSecondLog), sortCacheLogKeys(logAfterSecond), "Second request: all L2 cache hits, no sets") - - // No subgraph calls on second request — all entity data served from L2 cache - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - assert.Equal(t, 0, tracker.GetCount(reviewsURLParsed.Host), "Second request should skip reviews subgraph (Product L2 cache hit)") - assert.Equal(t, 0, tracker.GetCount(accountsURLParsed.Host), "Second request should skip accounts subgraph (User L2 cache hit)") - }) -} - -// TestPartialEntityCaching demonstrates that only explicitly configured entity types -// are cached. This test configures caching for Product but NOT for User, verifying -// the opt-in nature of the per-entity caching configuration. -func TestPartialEntityCaching(t *testing.T) { - t.Run("only configured entities are cached", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - // Create HTTP client with tracking - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{ - Transport: tracker, - } - - // Enable L2 cache - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: true, - } - - // PARTIAL CACHING: Only configure caching for Product in reviews subgraph, NOT for User in accounts - // This demonstrates the opt-in per-entity caching behavior - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - // Note: accounts subgraph is intentionally NOT configured - User entities should NOT be cached - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames for tracking - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - - // First query - Product entities should be cached, User entities should NOT - defaultCache.ClearLog() - tracker.Reset() - resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - // Only Product has L2 caching configured (reviews subgraph); User (accounts) does NOT. - // So we expect cache operations for Product only — no User cache activity at all. - productKeys := []string{ - `{"__typename":"Product","key":{"upc":"top-1"}}`, - `{"__typename":"Product","key":{"upc":"top-2"}}`, - } - logAfterFirst := defaultCache.GetLog() - wantFirstLog := []CacheLogEntry{ - // reviews subgraph _entities(Product) — L2 miss, first time seeing these products - {Operation: "get", Keys: productKeys, Hits: []bool{false, false}}, - // reviews subgraph _entities(Product) — store fetched product data in L2 - {Operation: "set", Keys: productKeys}, - // No User operations — accounts subgraph has no caching configured - } - assert.Equal(t, sortCacheLogKeys(wantFirstLog), sortCacheLogKeys(logAfterFirst), "First request: only Product entities have cache operations") - - // Both subgraphs called on first request (no cache to serve from) - assert.Equal(t, 1, tracker.GetCount(reviewsHost), "First query should call reviews subgraph") - assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts subgraph") - - // Second query - Product should hit cache, User should still be fetched from subgraph - defaultCache.ClearLog() - tracker.Reset() - resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - logAfterSecond := defaultCache.GetLog() - wantSecondLog := []CacheLogEntry{ - // reviews subgraph _entities(Product) — L2 hit, both products cached from first request - {Operation: "get", Keys: productKeys, Hits: []bool{true, true}}, - // No User operations — accounts subgraph still has no caching configured - // No set operations — Product data served from cache - } - assert.Equal(t, sortCacheLogKeys(wantSecondLog), sortCacheLogKeys(logAfterSecond), "Second request: Product cache hits only") - - // Reviews subgraph skipped (Product served from cache), accounts still called (User not cached) - assert.Equal(t, 0, tracker.GetCount(reviewsHost), "Second query should skip reviews subgraph (Product cache hit)") - assert.Equal(t, 1, tracker.GetCount(accountsHost), "Second query should still call accounts subgraph (User NOT cached)") - }) -} - -// TestRootFieldCaching tests that root fields (like Query.topProducts) can be cached -// when explicitly configured with RootFieldCaching configuration. -func TestRootFieldCaching(t *testing.T) { - t.Run("root field caching enabled", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - // Create HTTP client with tracking - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{ - Transport: tracker, - } - - // Enable L2 cache - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: true, - } - - // Configure root field caching for Query.topProducts on products subgraph - // Also configure entity caching to compare behavior - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames for tracking - productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - productsHost := productsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - accountsHost := accountsURLParsed.Host - - // First query - should miss cache for all: root field, entity types - defaultCache.ClearLog() - tracker.Reset() - resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - logAfterFirst := defaultCache.GetLog() - // Should have cache operations for: - // 1. Root field Query.topProducts (get + set = 2 operations) - // 2. Product entities (get + set = 2 operations) - // 3. User entities (get + set = 2 operations) - // Total: 6 operations - assert.Equal(t, 6, len(logAfterFirst), "First query should have 6 cache operations (get+set for root field, Product, User)") - - // Verify first query calls all subgraphs - productsCallsFirst := tracker.GetCount(productsHost) - reviewsCallsFirst := tracker.GetCount(reviewsHost) - accountsCallsFirst := tracker.GetCount(accountsHost) - assert.Equal(t, 1, productsCallsFirst, "First query should call products subgraph") - assert.Equal(t, 1, reviewsCallsFirst, "First query should call reviews subgraph") - assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph") - - // Second query - should hit cache for root field and entities - defaultCache.ClearLog() - tracker.Reset() - resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - logAfterSecond := defaultCache.GetLog() - wantSecondLog := []CacheLogEntry{ - // products subgraph Query.topProducts — root field L2 hit, cached from first request - {Operation: "get", Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, Hits: []bool{true}}, - // reviews subgraph _entities(Product) — L2 hit, both products cached from first request - {Operation: "get", Keys: []string{`{"__typename":"Product","key":{"upc":"top-1"}}`, `{"__typename":"Product","key":{"upc":"top-2"}}`}, Hits: []bool{true, true}}, - // accounts subgraph _entities(User) — L2 hit, user cached from first request (1 unique user) - {Operation: "get", Keys: []string{`{"__typename":"User","key":{"id":"1234"}}`}, Hits: []bool{true}}, - // No set operations — all data served from cache - } - assert.Equal(t, sortCacheLogKeys(wantSecondLog), sortCacheLogKeys(logAfterSecond), "Second query: all cache hits, no sets") - - // All subgraphs skipped on second query (everything served from cache) - assert.Equal(t, 0, tracker.GetCount(productsHost), "Second query should skip products subgraph (root field cache hit)") - assert.Equal(t, 0, tracker.GetCount(reviewsHost), "Second query should skip reviews subgraph (entity cache hit)") - assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip accounts subgraph (entity cache hit)") - }) - - t.Run("root field caching NOT enabled - subgraph still called", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - // Create HTTP client with tracking - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{ - Transport: tracker, - } - - // Enable L2 cache - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: true, - } - - // Only configure entity caching, NOT root field caching - // This demonstrates opt-in behavior: root fields are NOT cached unless configured - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, - }, - }, - // Note: products subgraph has NO caching config for Query.topProducts - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames for tracking - productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) - productsHost := productsURLParsed.Host - - // First query - tracker.Reset() - resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - productsCallsFirst := tracker.GetCount(productsHost) - assert.Equal(t, 1, productsCallsFirst, "First query should call products subgraph") - - // Second query - products subgraph should still be called because root field is NOT cached - tracker.Reset() - resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) - - // KEY ASSERTION: Products subgraph IS called on second query because root field is NOT cached - productsCallsSecond := tracker.GetCount(productsHost) - assert.Equal(t, 1, productsCallsSecond, "Second query SHOULD call products subgraph (root field NOT cached)") - }) -} - -// ============================================================================= -// L1 CACHE TESTS FOR LIST FIELDS -// ============================================================================= -// -// These tests verify L1 caching behavior when root fields or child fields -// return lists of entities. - -func TestL1CacheChildFieldEntityList(t *testing.T) { - // This test verifies L1 cache behavior for User.sameUserReviewers: [User!]! - // which returns only the same user (self-reference). - // - // sameUserReviewers is defined in the reviews subgraph with @requires(fields: "username"), - // which means: - // 1. The gateway first resolves username from accounts (entity fetch) - // 2. Then calls reviews to get sameUserReviewers - // 3. sameUserReviewers returns User references (just IDs) - only the same user - // 4. The gateway must make entity fetches to accounts to resolve those users - // - // Query flow: - // 1. topProducts -> products subgraph (root query) - // 2. reviews -> reviews subgraph (entity fetch for Products) - // 3. authorWithoutProvides -> accounts subgraph (entity fetch for User 1234) - // - User 1234 is fetched and stored in L1 - // 4. sameUserReviewers -> reviews subgraph (after username resolved) - // - Returns [User 1234] as reference (same user only) - // 5. Entity resolution for sameUserReviewers -> accounts subgraph - // - User 1234 is 100% L1 HIT (already fetched in step 3) - // - THE ENTIRE ACCOUNTS CALL IS SKIPPED! - // - // With L1 enabled: The sameUserReviewers entity fetch is completely skipped - // because all entities are already in L1 cache. - - query := `query { - topProducts { - reviews { - authorWithoutProvides { - id - username - sameUserReviewers { - id - username - } - } - } - } - }` - - // User 1234's sameUserReviewers returns [User 1234] (only self) - expectedResponse := `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}` - - t.Run("L1 enabled - sameUserReviewers fetch entirely skipped via L1 cache", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, // Isolate L1 behavior - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // With L1 enabled: - // - First accounts call fetches User 1234 for authorWithoutProvides (L1 miss, stored) - // - Reviews called for sameUserReviewers (returns [User 1234] reference) - // - sameUserReviewers entity resolution: User 1234 is 100% L1 HIT - // → accounts call is COMPLETELY SKIPPED! - accountsCalls := tracker.GetCount(accountsHost) - reviewsCalls := tracker.GetCount(reviewsHost) - - // Reviews should be called twice: once for Product entity (reviews field), - // once for sameUserReviewers (after username is resolved from accounts) - assert.Equal(t, 2, reviewsCalls, "Reviews subgraph called for Product.reviews and User.sameUserReviewers") - - // KEY ASSERTION: Only 1 accounts call! The sameUserReviewers entity resolution - // is completely skipped because User 1234 is already in L1 cache. - assert.Equal(t, 1, accountsCalls, - "With L1 enabled: only 1 accounts call (sameUserReviewers entity fetch skipped via L1)") - - }) - - t.Run("L1 disabled - accounts called for sameUserReviewers", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // With L1 disabled: - // - First accounts call fetches User 1234 for authorWithoutProvides - // - Second accounts call for sameUserReviewers: User 1234 fetched again (no L1) - // Total: 2 accounts calls - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 2, accountsCalls, - "With L1 disabled: 2 accounts calls (sameUserReviewers requires separate fetch)") - - }) -} - -func TestL1CacheNestedEntityListDeduplication(t *testing.T) { - // This test verifies L1 deduplication when the same entity appears - // at multiple levels in nested list queries using coReviewers. - // - // coReviewers is defined in the reviews subgraph with @requires(fields: "username"), - // so it triggers cross-subgraph entity resolution. - // - // Query flow: - // 1. topProducts -> products subgraph - // 2. reviews -> reviews subgraph (Product entity fetch) - // 3. authorWithoutProvides -> accounts (User 1234 fetched, stored in L1) - // 4. coReviewers -> reviews subgraph (after username resolved) - // - Returns [User 1234, User 7777] as references - // 5. Entity resolution for coReviewers -> accounts - // - User 1234 should be L1 HIT (already fetched in step 3) - // - User 7777 is L1 MISS (stored in L1) - // 6. coReviewers for User 1234 and User 7777 -> reviews subgraph - // 7. Entity resolution for nested coReviewers -> accounts - // - All users (1234, 7777) are already in L1! - // - // With L1 enabled: The nested coReviewers level should have 100% L1 hits, - // potentially skipping the accounts call entirely for that level. - - query := `query { - topProducts { - reviews { - authorWithoutProvides { - id - username - coReviewers { - id - username - coReviewers { - id - username - } - } - } - } - } - }` - - // User 1234's coReviewers: [User 1234, User 7777] - // User 7777's coReviewers: [User 7777, User 1234] - // Nested level repeats these patterns - expectedResponse := `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me"},{"id":"7777","username":"User 7777"}]},{"id":"7777","username":"User 7777","coReviewers":[{"id":"7777","username":"User 7777"},{"id":"1234","username":"Me"}]}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me"},{"id":"7777","username":"User 7777"}]},{"id":"7777","username":"User 7777","coReviewers":[{"id":"7777","username":"User 7777"},{"id":"1234","username":"Me"}]}]}}]}]}}` - - t.Run("L1 enabled - nested coReviewers benefits from L1 hits", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // With L1 enabled: - // - Call 1: authorWithoutProvides fetches User 1234 (miss, stored) - // - Call 2: coReviewers entity resolution [User 1234 (hit), User 7777 (miss, stored)] - // - Call 3: nested coReviewers entity resolution - all users are in L1! - // This call should be fully served from L1 cache. - accountsCalls := tracker.GetCount(accountsHost) - // With L1 enabled, the nested coReviewers should be served from L1 - // Only 2 accounts calls needed because nested coReviewers is fully served from L1 - assert.Equal(t, 2, accountsCalls, - "With L1 enabled: exactly 2 accounts calls (nested coReviewers served entirely from L1)") - }) - - t.Run("L1 disabled - more accounts calls without deduplication", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // With L1 disabled: - // - Call 1: authorWithoutProvides fetches User 1234 - // - Call 2: coReviewers entity resolution for User 1234 and User 7777 (no L1 dedup) - // - Call 3: nested coReviewers entity resolution (no L1 dedup) - accountsCalls := tracker.GetCount(accountsHost) - // Without L1 cache, we need 3 accounts calls (no deduplication at nested level) - assert.Equal(t, 3, accountsCalls, - "With L1 disabled: exactly 3 accounts calls (no deduplication)") - }) -} - -func TestL1CacheRootFieldEntityListPopulation(t *testing.T) { - // This test verifies L1 cache behavior with a complex nested query starting - // from a root field that returns a list of entities. - // - // Query flow: - // 1. topProducts -> products subgraph (root query, returns list) - // 2. reviews -> reviews subgraph (entity fetch for Products) - // 3. authorWithoutProvides -> accounts subgraph (entity fetch for User 1234) - // - User 1234 is fetched and stored in L1 - // 4. sameUserReviewers -> reviews subgraph (after username resolved) - // - Returns [User 1234] as reference (same user only) - // 5. Entity resolution for sameUserReviewers -> accounts subgraph - // - User 1234 is 100% L1 HIT (already fetched in step 3) - // - THE ENTIRE ACCOUNTS CALL IS SKIPPED! - // - // With L1 enabled: The sameUserReviewers entity fetch is completely skipped. - // With L1 disabled: accounts is called twice (no deduplication). - - query := `query { - topProducts { - upc - name - reviews { - body - authorWithoutProvides { - id - username - sameUserReviewers { - id - username - } - } - } - } - }` - - expectedResponse := `{"data":{"topProducts":[{"upc":"top-1","name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"upc":"top-2","name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}` - - t.Run("L1 enabled - sameUserReviewers fetch skipped via L1 cache", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - productsHost := productsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // Query flow with L1 enabled: - // 1. products subgraph: topProducts root query - // 2. reviews subgraph: Product entity fetch for reviews - // 3. accounts subgraph: User entity fetch for authorWithoutProvides (User 1234 stored in L1) - // 4. reviews subgraph: sameUserReviewers (returns [User 1234]) - // 5. sameUserReviewers entity resolution: User 1234 is 100% L1 HIT → accounts call SKIPPED! - productsCalls := tracker.GetCount(productsHost) - reviewsCalls := tracker.GetCount(reviewsHost) - accountsCalls := tracker.GetCount(accountsHost) - - assert.Equal(t, 1, productsCalls, "Should call products subgraph once for topProducts") - assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice (Product.reviews + User.sameUserReviewers)") - // KEY ASSERTION: Only 1 accounts call! sameUserReviewers entity resolution skipped via L1. - assert.Equal(t, 1, accountsCalls, - "With L1 enabled: only 1 accounts call (sameUserReviewers entity fetch skipped via L1)") - - }) - - t.Run("L1 disabled - more accounts calls without L1 optimization", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - productsHost := productsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // Query flow with L1 disabled: - // 1. products subgraph: topProducts root query - // 2. reviews subgraph: Product entity fetch for reviews - // 3. accounts subgraph: User entity fetch for authorWithoutProvides - // 4. reviews subgraph: sameUserReviewers - // 5. accounts subgraph: User entity fetch for sameUserReviewers (no L1 → must fetch again!) - productsCalls := tracker.GetCount(productsHost) - reviewsCalls := tracker.GetCount(reviewsHost) - accountsCalls := tracker.GetCount(accountsHost) - - assert.Equal(t, 1, productsCalls, "Should call products subgraph once") - assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice") - // KEY ASSERTION: 2 accounts calls without L1 optimization - assert.Equal(t, 2, accountsCalls, - "With L1 disabled: 2 accounts calls (sameUserReviewers requires separate fetch)") - - }) -} - -func TestL1CacheRootFieldNonEntityWithNestedEntities(t *testing.T) { - // This test verifies L1 cache behavior when a root field returns a NON-entity type - // (Review) that contains nested entities (User via authorWithoutProvides). - // - // Key difference from TestL1CacheRootFieldEntityListPopulation: - // - That test starts with topProducts -> [Product] where Product IS an entity (@key(fields: "upc")) - // - This test starts with topReviews -> [Review] where Review is NOT an entity (no @key) - // - Both prove L1 entity caching works for nested User entities - // - // Query flow: - // 1. topReviews -> reviews subgraph (root query, returns [Review] — NOT an entity) - // 2. authorWithoutProvides -> accounts subgraph (entity fetch for Users, stored in L1) - // 3. sameUserReviewers -> reviews subgraph (after username resolved via @requires) - // 4. Entity resolution for sameUserReviewers -> accounts subgraph - // - All Users are 100% L1 HITs (already fetched in step 2) - // - THE ENTIRE ACCOUNTS CALL IS SKIPPED! - - query := `query { - topReviews { - body - authorWithoutProvides { - id - username - sameUserReviewers { - id - username - } - } - } - }` - - expectedResponse := `{"data":{"topReviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}},{"body":"This is the last straw. Hat you will wear. 11/10","authorWithoutProvides":{"id":"7777","username":"User 7777","sameUserReviewers":[{"id":"7777","username":"User 7777"}]}},{"body":"Perfect summer hat.","authorWithoutProvides":{"id":"5678","username":"User 5678","sameUserReviewers":[{"id":"5678","username":"User 5678"}]}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"id":"8888","username":"User 8888","sameUserReviewers":[{"id":"8888","username":"User 8888"}]}}]}}` - - t.Run("L1 enabled - sameUserReviewers fetch skipped via L1 cache", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - reviewsHost := reviewsURLParsed.Host - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // Query flow with L1 enabled: - // 1. reviews subgraph: topReviews root query (Review is NOT an entity) - // 2. accounts subgraph: User entity fetch for authorWithoutProvides (Users stored in L1) - // 3. reviews subgraph: sameUserReviewers (returns [User] references) - // 4. sameUserReviewers entity resolution: all Users are L1 HITs → accounts call SKIPPED! - reviewsCalls := tracker.GetCount(reviewsHost) - accountsCalls := tracker.GetCount(accountsHost) - - assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice (topReviews + sameUserReviewers)") - // KEY ASSERTION: Only 1 accounts call! sameUserReviewers entity resolution skipped via L1. - assert.Equal(t, 1, accountsCalls, - "With L1 enabled: only 1 accounts call (sameUserReviewers entity fetch skipped via L1)") - }) - - t.Run("L1 disabled - more accounts calls without L1 optimization", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - reviewsHost := reviewsURLParsed.Host - accountsHost := accountsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // Query flow with L1 disabled: - // 1. reviews subgraph: topReviews root query - // 2. accounts subgraph: User entity fetch for authorWithoutProvides - // 3. reviews subgraph: sameUserReviewers - // 4. accounts subgraph: User entity fetch for sameUserReviewers (no L1 → must fetch again!) - reviewsCalls := tracker.GetCount(reviewsHost) - accountsCalls := tracker.GetCount(accountsHost) - - assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice") - // KEY ASSERTION: 2 accounts calls without L1 optimization - assert.Equal(t, 2, accountsCalls, - "With L1 disabled: 2 accounts calls (sameUserReviewers requires separate fetch)") - }) -} - -// ============================================================================= -// CACHE ERROR HANDLING TESTS -// ============================================================================= -// -// These tests verify that caches are NOT populated when subgraphs return errors. -// The cache should only store successful responses to prevent caching error states. - -func TestCacheNotPopulatedOnErrors(t *testing.T) { - // Query that triggers an error in accounts subgraph via error-user - // The reviewWithError field returns a review with author ID "error-user" - // which causes FindUserByID to return an error - errorQuery := `query { - reviewWithError { - body - authorWithoutProvides { - id - username - } - } - }` - - // Expected error response - data is null due to non-nullable username field error propagation - expectedErrorResponse := `{"errors":[{"message":"Failed to fetch from Subgraph 'accounts' at Path 'reviewWithError.authorWithoutProvides'."},{"message":"Cannot return null for non-nullable field 'User.username'.","path":["reviewWithError","authorWithoutProvides","username"]}],"data":{"reviewWithError":null}}` - - t.Run("L1 only - error response prevents cache population", func(t *testing.T) { - // This test verifies that L1 cache is NOT populated when an error occurs. - // If L1 was erroneously populated, the second query would not call accounts. - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - - // First query - should get error from accounts - tracker.Reset() - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) - - // Verify exact error response - assert.Equal(t, expectedErrorResponse, string(resp)) - - reviewsCallsFirst := tracker.GetCount(reviewsHost) - accountsCallsFirst := tracker.GetCount(accountsHost) - assert.Equal(t, 1, reviewsCallsFirst, "First query should call reviews subgraph once") - assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph once") - - // Second query - L1 should NOT have cached the error, so accounts should be called again - tracker.Reset() - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) - - // Same error should be returned - assert.Equal(t, expectedErrorResponse, string(resp)) - - accountsCallsSecond := tracker.GetCount(accountsHost) - // KEY ASSERTION: If L1 incorrectly cached the error, this would be 0 - assert.Equal(t, 1, accountsCallsSecond, "Second query should call accounts again (L1 should NOT cache errors)") - }) - - t.Run("L2 only - error response prevents cache population", func(t *testing.T) { - // This test verifies that L2 cache is NOT populated when an error occurs. - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - // Configure L2 caching for User entities - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, - EnableL2Cache: true, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - // First query - should get error from accounts - defaultCache.ClearLog() - tracker.Reset() - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) - - // Verify exact error response - assert.Equal(t, expectedErrorResponse, string(resp)) - - accountsCallsFirst := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph once") - - // Verify exact cache log: only "get" with miss, NO "set" - // Since the fetch had an error, cache population should be skipped entirely - wantCacheLog := []CacheLogEntry{ - { - Operation: "get", - Keys: []string{`{"__typename":"User","key":{"id":"error-user"}}`}, - Hits: []bool{false}, - }, - // NO "set" entry - this is the key assertion - } - assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Cache log should only have 'get' miss, no 'set'") - - // Second query - L2 should NOT have cached the error, so accounts should be called again - defaultCache.ClearLog() - tracker.Reset() - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) - - // Same error should be returned - assert.Equal(t, expectedErrorResponse, string(resp)) - - accountsCallsSecond := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCallsSecond, "Second query should call accounts again (L2 should NOT cache errors)") - - // Second query should also have same cache log pattern (get miss, no set) - assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Second query cache log should also have 'get' miss, no 'set'") - }) - - t.Run("L1 and L2 - error response prevents both caches", func(t *testing.T) { - // This test verifies that both L1 and L2 caches are NOT populated when an error occurs. - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - // Configure L2 caching for User entities - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: true, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - // First query - should get error from accounts - defaultCache.ClearLog() - tracker.Reset() - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) - - // Verify exact error response - assert.Equal(t, expectedErrorResponse, string(resp)) - - accountsCallsFirst := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph once") - - // Verify exact cache log: only "get" with miss, NO "set" - wantCacheLog := []CacheLogEntry{ - { - Operation: "get", - Keys: []string{`{"__typename":"User","key":{"id":"error-user"}}`}, - Hits: []bool{false}, - }, - } - assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Cache log should only have 'get' miss, no 'set'") - - // Second query - neither L1 nor L2 should have cached the error - defaultCache.ClearLog() - tracker.Reset() - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) - - // Same error should be returned - assert.Equal(t, expectedErrorResponse, string(resp)) - - accountsCallsSecond := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCallsSecond, "Second query should call accounts again (neither L1 nor L2 should cache errors)") - - // Second query should also have same cache log pattern - assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Second query cache log should also have 'get' miss, no 'set'") - }) - - t.Run("error does not pollute cache for subsequent success queries", func(t *testing.T) { - // This test verifies that an error query doesn't pollute the cache - // and that subsequent successful queries still work correctly. - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - // Configure L2 caching for User entities - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: true, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - // First: Query that triggers an error - defaultCache.ClearLog() - tracker.Reset() - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) - - // Verify exact error response - assert.Equal(t, expectedErrorResponse, string(resp)) - - accountsCallsError := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCallsError, "Error query should call accounts") - - // Verify error-user was NOT cached (only get, no set) - wantErrorCacheLog := []CacheLogEntry{ - { - Operation: "get", - Keys: []string{`{"__typename":"User","key":{"id":"error-user"}}`}, - Hits: []bool{false}, - }, - } - assert.Equal(t, wantErrorCacheLog, defaultCache.GetLog(), "Error query cache log should only have 'get' miss, no 'set'") - - // Second: Query a successful user (User 1234 via me query) - // Note: "me" is a root query, not an entity fetch, so it doesn't use L2 entity caching - successQuery := `query { - me { - id - username - } - }` - expectedSuccessResponse := `{"data":{"me":{"id":"1234","username":"Me"}}}` - - defaultCache.ClearLog() - tracker.Reset() - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, successQuery, nil, t) - - // Should succeed with exact expected response - assert.Equal(t, expectedSuccessResponse, string(resp)) - - // Note: Root queries (me) don't use L2 entity caching by default, - // so the cache log should be empty for this query. - // The important thing is that the previous error didn't pollute the cache. - assert.Equal(t, 0, len(defaultCache.GetLog()), "Root query should not use L2 entity cache") - - // Third: Query the error user again - should still fail (not cached) - defaultCache.ClearLog() - tracker.Reset() - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) - - assert.Equal(t, expectedErrorResponse, string(resp)) - accountsCallsErrorAgain := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCallsErrorAgain, "Error query should call accounts again (error was not cached)") - - // Verify cache log still shows only get miss, no set - assert.Equal(t, wantErrorCacheLog, defaultCache.GetLog(), "Third query cache log should still have 'get' miss, no 'set'") - }) -} - -// TestL1CacheOptimizationReducesSubgraphCalls tests that the L1 cache optimization -// postprocessor (optimizeL1Cache) correctly identifies which fetches can benefit -// from L1 caching and sets UseL1Cache appropriately. -// -// The key insight is that L1 is only useful when: -// 1. A prior fetch can provide cached data (READ benefit) -// 2. A later fetch can consume cached data (WRITE benefit) -// -// This test verifies the end-to-end effect: when L1 optimization identifies -// matching entity types between fetches, it enables L1 caching, resulting in -// fewer subgraph calls. -func TestL1CacheOptimizationReducesSubgraphCalls(t *testing.T) { - // This query demonstrates L1 optimization: - // - Query.me returns User entity - // - User.sameUserReviewers returns [User] entities - // When L1 is enabled and optimized correctly: - // - First User fetch (me) populates L1 cache - // - Second User fetch (sameUserReviewers) hits L1 cache, SKIPS subgraph call - // - // The optimizeL1Cache postprocessor: - // - Sets UseL1Cache=true on User fetches (they share the same entity type) - // - Sets UseL1Cache=false on fetches with no matching entity types - - query := `query { - me { - id - username - sameUserReviewers { - id - username - } - } - }` - - expectedResponse := `{"data":{"me":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}}` - - t.Run("L1 optimization enables cache hit between same entity type fetches", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // Query flow with L1 optimization: - // 1. accounts subgraph: Query.me (root query, returns User 1234) - // - L1 cache populated with User 1234 - // 2. reviews subgraph: User.sameUserReviewers (returns [User 1234]) - // 3. accounts subgraph: User entity fetch for sameUserReviewers - // - User 1234 is 100% L1 HIT! This call is SKIPPED! - accountsCalls := tracker.GetCount(accountsHost) - reviewsCalls := tracker.GetCount(reviewsHost) - - // KEY ASSERTION: Only 1 accounts call! - // Without L1 optimization, there would be 2 calls: - // - First: Query.me - // - Second: User entity resolution for sameUserReviewers - // With L1 optimization, the second call is skipped because User 1234 is in L1 cache. - assert.Equal(t, 1, accountsCalls, - "L1 optimization: only 1 accounts call (sameUserReviewers resolved from L1 cache)") - assert.Equal(t, 1, reviewsCalls, - "Should call reviews subgraph once for User.sameUserReviewers") - }) - - t.Run("Without L1, same query requires more subgraph calls", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - cachingOpts := resolve.CachingOptions{ - EnableL1Cache: false, // L1 disabled - EnableL2Cache: false, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(cachingOpts), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Extract hostnames - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - reviewsHost := reviewsURLParsed.Host - - tracker.Reset() - out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - - assert.Equal(t, expectedResponse, string(out)) - - // Query flow WITHOUT L1: - // 1. accounts subgraph: Query.me (root query) - // 2. reviews subgraph: User.sameUserReviewers - // 3. accounts subgraph: User entity fetch (NO L1 cache → must fetch!) - accountsCalls := tracker.GetCount(accountsHost) - reviewsCalls := tracker.GetCount(reviewsHost) - - // KEY ASSERTION: 2 accounts calls without L1! - // This proves L1 optimization saves a subgraph call. - assert.Equal(t, 2, accountsCalls, - "Without L1: 2 accounts calls (sameUserReviewers requires separate fetch)") - assert.Equal(t, 1, reviewsCalls, - "Should call reviews subgraph once for User.sameUserReviewers") - }) -} - -// withCacheAnalytics returns an option that enables cache analytics collection. -// parseCacheAnalytics extracts and parses the X-Cache-Analytics JSON header. -func parseCacheAnalytics(t *testing.T, headers http.Header) resolve.CacheAnalyticsSnapshot { - t.Helper() - raw := headers.Get("X-Cache-Analytics") - require.NotEmpty(t, raw, "X-Cache-Analytics header should be present") - var snap resolve.CacheAnalyticsSnapshot - err := json.Unmarshal([]byte(raw), &snap) - require.NoError(t, err, "X-Cache-Analytics header should be valid JSON") - return snap -} - -// normalizeSnapshot makes a CacheAnalyticsSnapshot deterministically comparable by -// sorting EntityTypes, L1Reads, L2Reads, L1Writes, L2Writes, and FieldHashes. -func normalizeSnapshot(snap resolve.CacheAnalyticsSnapshot) resolve.CacheAnalyticsSnapshot { - // Sort EntityTypes by TypeName - if snap.EntityTypes != nil { - sorted := make([]resolve.EntityTypeInfo, len(snap.EntityTypes)) - copy(sorted, snap.EntityTypes) - sort.Slice(sorted, func(i, j int) bool { - return sorted[i].TypeName < sorted[j].TypeName - }) - snap.EntityTypes = sorted - } - - // Sort L1Reads and zero out non-deterministic CacheAgeMs - if snap.L1Reads != nil { - sorted := make([]resolve.CacheKeyEvent, len(snap.L1Reads)) - copy(sorted, snap.L1Reads) - for i := range sorted { - sorted[i].CacheAgeMs = 0 - } - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].CacheKey != sorted[j].CacheKey { - return sorted[i].CacheKey < sorted[j].CacheKey - } - return sorted[i].Kind < sorted[j].Kind - }) - snap.L1Reads = sorted - } - - // Sort L2Reads and zero out non-deterministic CacheAgeMs - if snap.L2Reads != nil { - sorted := make([]resolve.CacheKeyEvent, len(snap.L2Reads)) - copy(sorted, snap.L2Reads) - for i := range sorted { - sorted[i].CacheAgeMs = 0 - } - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].CacheKey != sorted[j].CacheKey { - return sorted[i].CacheKey < sorted[j].CacheKey - } - return sorted[i].Kind < sorted[j].Kind - }) - snap.L2Reads = sorted - } - - // Sort L1Writes - if snap.L1Writes != nil { - sorted := make([]resolve.CacheWriteEvent, len(snap.L1Writes)) - copy(sorted, snap.L1Writes) - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].CacheKey != sorted[j].CacheKey { - return sorted[i].CacheKey < sorted[j].CacheKey - } - return sorted[i].CacheLevel < sorted[j].CacheLevel - }) - snap.L1Writes = sorted - } - - // Sort L2Writes - if snap.L2Writes != nil { - sorted := make([]resolve.CacheWriteEvent, len(snap.L2Writes)) - copy(sorted, snap.L2Writes) - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].CacheKey != sorted[j].CacheKey { - return sorted[i].CacheKey < sorted[j].CacheKey - } - return sorted[i].CacheLevel < sorted[j].CacheLevel - }) - snap.L2Writes = sorted - } - - // Sort FieldHashes for deterministic comparison - if snap.FieldHashes != nil { - sorted := make([]resolve.EntityFieldHash, len(snap.FieldHashes)) - copy(sorted, snap.FieldHashes) - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].EntityType != sorted[j].EntityType { - return sorted[i].EntityType < sorted[j].EntityType - } - if sorted[i].FieldName != sorted[j].FieldName { - return sorted[i].FieldName < sorted[j].FieldName - } - if sorted[i].KeyRaw != sorted[j].KeyRaw { - return sorted[i].KeyRaw < sorted[j].KeyRaw - } - if sorted[i].KeyHash != sorted[j].KeyHash { - return sorted[i].KeyHash < sorted[j].KeyHash - } - return sorted[i].FieldHash < sorted[j].FieldHash - }) - snap.FieldHashes = sorted - } - - // Sort ShadowComparisons by CacheKey and zero out non-deterministic CacheAgeMs - if snap.ShadowComparisons != nil { - sorted := make([]resolve.ShadowComparisonEvent, len(snap.ShadowComparisons)) - copy(sorted, snap.ShadowComparisons) - for i := range sorted { - sorted[i].CacheAgeMs = 0 - } - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].CacheKey != sorted[j].CacheKey { - return sorted[i].CacheKey < sorted[j].CacheKey - } - return sorted[i].EntityType < sorted[j].EntityType - }) - snap.ShadowComparisons = sorted - } - - // Sort MutationEvents for deterministic comparison - if snap.MutationEvents != nil { - sorted := make([]resolve.MutationEvent, len(snap.MutationEvents)) - copy(sorted, snap.MutationEvents) - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].MutationRootField != sorted[j].MutationRootField { - return sorted[i].MutationRootField < sorted[j].MutationRootField - } - return sorted[i].EntityCacheKey < sorted[j].EntityCacheKey - }) - snap.MutationEvents = sorted - } - - // Zero out non-deterministic FetchTimings (DurationMs varies between runs) - snap.FetchTimings = nil - - // Normalize empty slices to nil for consistent comparison - // (JSON unmarshalling produces empty slices, expected literals produce nil) - if len(snap.L1Reads) == 0 { - snap.L1Reads = nil - } - if len(snap.L2Reads) == 0 { - snap.L2Reads = nil - } - if len(snap.L1Writes) == 0 { - snap.L1Writes = nil - } - if len(snap.L2Writes) == 0 { - snap.L2Writes = nil - } - if len(snap.EntityTypes) == 0 { - snap.EntityTypes = nil - } - if len(snap.FieldHashes) == 0 { - snap.FieldHashes = nil - } - if len(snap.ErrorEvents) == 0 { - snap.ErrorEvents = nil - } - if len(snap.ShadowComparisons) == 0 { - snap.ShadowComparisons = nil - } - if len(snap.MutationEvents) == 0 { - snap.MutationEvents = nil - } - - return snap -} - -func TestCacheAnalyticsE2E(t *testing.T) { - // Common cache key constants used across subtests - const ( - keyProductTop1 = `{"__typename":"Product","key":{"upc":"top-1"}}` - keyProductTop2 = `{"__typename":"Product","key":{"upc":"top-2"}}` - keyTopProducts = `{"__typename":"Query","field":"topProducts"}` - keyUser1234 = `{"__typename":"User","key":{"id":"1234"}}` - keyMe = `{"__typename":"Query","field":"me"}` - dsAccounts = "accounts" - dsProducts = "products" - dsReviews = "reviews" - ) - - // Field hash constants — xxhash of the rendered scalar field values. - // These are deterministic because xxhash is seeded identically each time. - const ( - hashProductNameTrilby uint64 = 1032923585965781586 // xxhash("Trilby") - hashProductNameFedora uint64 = 2432227032303632641 // xxhash("Fedora") - hashUserUsernameMe uint64 = 4957449860898447395 // xxhash("Me") - ) - - // Entity key constants for field hash assertions - const ( - entityKeyProductTop1 = `{"upc":"top-1"}` - entityKeyProductTop2 = `{"upc":"top-2"}` - entityKeyUser1234 = `{"id":"1234"}` - ) - - // Byte sizes of cached entities (measured from actual JSON marshalling) - const ( - byteSizeProductTop1 = 177 // Product top-1 entity (reviews subgraph response) - byteSizeProductTop2 = 233 // Product top-2 entity (reviews subgraph response) - byteSizeTopProducts = 127 // Query.topProducts root field (products subgraph response) - byteSizeUser1234 = 49 // User 1234 entity (accounts subgraph response) - byteSizeUser1234Full = 105 // User 1234 entity from L1 (includes sameUserReviewers data) - byteSizeQueryMe = 56 // Query.me root field (accounts subgraph response) - ) - - // Shared field hashes for the multi-upstream query (topProducts with reviews). - // Product.name: 2 products (Trilby, Fedora) → 2 distinct hashes - // User.username: 2 reviews both by "Me" → 2 identical hashes - // All FieldSourceSubgraph by default (overridden in specific tests) - multiUpstreamFieldHashes := []resolve.EntityFieldHash{ - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceSubgraph}, - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceSubgraph}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, - } - - // L2 hit field hashes — same data but all sourced from L2 cache - multiUpstreamFieldHashesL2 := []resolve.EntityFieldHash{ - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, - } - - multiUpstreamEntityTypes := []resolve.EntityTypeInfo{ - {TypeName: "Product", Count: 2, UniqueKeys: 2}, - {TypeName: "User", Count: 2, UniqueKeys: 1}, - } - - // Standard subgraph caching configs used by L2 and L1+L2 tests - multiUpstreamCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - - expectedResponseBody := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}` - - t.Run("L2 miss then hit with analytics", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // First query — all L2 misses, populates L2 cache - tracker.Reset() - resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // L2 miss: root field not yet cached - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts}, // L2 miss: User entity not yet cached (second review's User 1234 deduplicated in batch) - }, - L2Writes: []resolve.CacheWriteEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written after subgraph fetch on miss - {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written after subgraph fetch on miss - {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written to L2 after fetch - {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User entity written after accounts fetch - }, - FieldHashes: multiUpstreamFieldHashes, - EntityTypes: multiUpstreamEntityTypes, - }) - assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) - - // Second query — all L2 hits from populated cache - tracker.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // L2 hit: populated by Request 1 - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // L2 hit: populated by Request 1 - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // L2 hit: root field cached by Request 1 - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234}, // L2 hit: User entity cached by Request 1 (second review's User 1234 deduplicated) - }, - // No L2Writes: all served from cache, no fetches needed - FieldHashes: multiUpstreamFieldHashesL2, - EntityTypes: multiUpstreamEntityTypes, - }) - assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) - }) - - t.Run("L1 cache analytics with entity reuse", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: false, - EnableCacheAnalytics: true, - }), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Query that triggers L1 entity reuse: - // 1. Query.me -> accounts subgraph -> returns User 1234 -> populates L1 - // 2. User.sameUserReviewers -> reviews subgraph -> returns [User 1234] - // 3. Entity fetch for User 1234 -> L1 HIT (no subgraph call) - query := `query { - me { - id - username - sameUserReviewers { - id - username - } - } - }` - - tracker.Reset() - resp, headers := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - assert.Equal(t, `{"data":{"me":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}}`, string(resp)) - - expected := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L1Reads: []resolve.CacheKeyEvent{ - // L1 hit: User 1234 was populated by Query.me root fetch, reused for sameUserReviewers - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234Full}, - }, - L1Writes: []resolve.CacheWriteEvent{ - // Query.me root field written to L1 after accounts subgraph fetch - {CacheKey: keyMe, EntityType: "Query", ByteSize: byteSizeQueryMe, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL1}, - }, - FieldHashes: []resolve.EntityFieldHash{ - // Both username entries show L1 source because the entity key resolves to - // the L1 source recorded during the entity fetch L1 HIT - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL1}, // me.username: entity came from L1 - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL1}, // sameUserReviewers[0].username: same L1 entity - }, - EntityTypes: []resolve.EntityTypeInfo{ - {TypeName: "User", Count: 2, UniqueKeys: 1}, // 2 User instances, but only 1 unique key (1234) - }, - }) - assert.Equal(t, expected, normalizeSnapshot(parseCacheAnalytics(t, headers))) - }) - - t.Run("L1+L2 combined analytics", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{ - EnableL1Cache: true, - EnableL2Cache: true, - EnableCacheAnalytics: true, - }), - withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // First query — L2 misses (L1 is per-request, always fresh) - tracker.Reset() - resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // L2 miss: root field not yet cached - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts}, // L2 miss: User entity not yet cached (second review's User 1234 hits L1 after this fetch) - }, - L2Writes: []resolve.CacheWriteEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written after reviews subgraph fetch - {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written after reviews subgraph fetch - {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written after products fetch - {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User entity written after accounts fetch - }, - FieldHashes: multiUpstreamFieldHashes, - EntityTypes: multiUpstreamEntityTypes, - }) - assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) - - // Second query — L2 hits (L1 is per-request, reset between requests) - tracker.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // L2 hit: populated by Request 1 - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // L2 hit: populated by Request 1 - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // L2 hit: root field cached by Request 1 - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234}, // L2 hit: User entity cached by Request 1 (second review's User 1234 hits L1) - }, - // No L2Writes: all entities served from L2 cache - FieldHashes: multiUpstreamFieldHashesL2, - EntityTypes: multiUpstreamEntityTypes, - }) - assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) - }) - - t.Run("root field with args - L2 analytics", func(t *testing.T) { - // Tests that root field caching with arguments properly records L2 analytics events. - // This covers the root field path in tryL2CacheLoad (no L1 keys branch). - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - rootFieldArgsCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "accounts", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "user", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(rootFieldArgsCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - const ( - keyUserById1234 = `{"__typename":"Query","field":"user","args":{"id":"1234"}}` - keyUserById5678 = `{"__typename":"Query","field":"user","args":{"id":"5678"}}` - dsAccountsLocal = "accounts" - byteSizeUser1234 = 38 // {"user":{"id":"1234","username":"Me"}} - byteSizeUser5678 = 45 // {"user":{"id":"5678","username":"User 5678"}} - - hashUsernameMeLocal uint64 = 4957449860898447395 // xxhash("Me") - hashUsername5678Local uint64 = 15512417390573333165 // xxhash("User 5678") - entityKeyUser1234Local = `{"id":"1234"}` - entityKeyUser5678Local = `{"id":"5678"}` - ) - - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - // First query (id=1234) — L2 miss, populates cache - tracker.Reset() - resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) - assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) - assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts subgraph") - - expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyUserById1234, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsAccountsLocal}, // L2 miss: first request, cache empty - }, - L2Writes: []resolve.CacheWriteEvent{ - {CacheKey: keyUserById1234, EntityType: "Query", ByteSize: byteSizeUser1234, DataSource: dsAccountsLocal, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written after accounts fetch - }, - FieldHashes: []resolve.EntityFieldHash{ - {EntityType: "User", FieldName: "username", FieldHash: hashUsernameMeLocal, KeyRaw: entityKeyUser1234Local, Source: resolve.FieldSourceSubgraph}, // User returned by root field, data from subgraph - }, - EntityTypes: []resolve.EntityTypeInfo{ - {TypeName: "User", Count: 1, UniqueKeys: 1}, // 1 User entity from root field response - }, - }) - assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) - - // Second query (same id=1234) — L2 hit - tracker.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) - assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) - assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip accounts (cache hit)") - - expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyUserById1234, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsAccountsLocal, ByteSize: byteSizeUser1234}, // L2 hit: populated by first request - }, - // No L2Writes: data served from cache - FieldHashes: []resolve.EntityFieldHash{ - // Source is FieldSourceSubgraph (default) because entity source tracking operates at - // entity cache level, not root field cache level — no entity caching configured for User - {EntityType: "User", FieldName: "username", FieldHash: hashUsernameMeLocal, KeyRaw: entityKeyUser1234Local, Source: resolve.FieldSourceSubgraph}, - }, - EntityTypes: []resolve.EntityTypeInfo{ - {TypeName: "User", Count: 1, UniqueKeys: 1}, - }, - }) - assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) - - // Third query (different id=5678) — L2 miss (different args = different cache key) - tracker.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "5678"}, t) - assert.Equal(t, `{"data":{"user":{"id":"5678","username":"User 5678"}}}`, string(resp)) - assert.Equal(t, 1, tracker.GetCount(accountsHost), "Third query should call accounts (different args)") - - expected3 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyUserById5678, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsAccountsLocal}, // L2 miss: different args, not cached - }, - L2Writes: []resolve.CacheWriteEvent{ - {CacheKey: keyUserById5678, EntityType: "Query", ByteSize: byteSizeUser5678, DataSource: dsAccountsLocal, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // New args written to L2 - }, - FieldHashes: []resolve.EntityFieldHash{ - {EntityType: "User", FieldName: "username", FieldHash: hashUsername5678Local, KeyRaw: entityKeyUser5678Local, Source: resolve.FieldSourceSubgraph}, // User 5678 data from subgraph - }, - EntityTypes: []resolve.EntityTypeInfo{ - {TypeName: "User", Count: 1, UniqueKeys: 1}, - }, - }) - assert.Equal(t, expected3, normalizeSnapshot(parseCacheAnalytics(t, headers))) - }) - - t.Run("root field only - L2 analytics without entity caching", func(t *testing.T) { - // Tests root field caching analytics in isolation — only root field caching configured, - // no entity caching. Verifies that only root field events appear in analytics. - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - // Only configure root field caching for products — no entity caching at all - rootOnlyConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(rootOnlyConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) - productsHost := productsURLParsed.Host - reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) - reviewsHost := reviewsURLParsed.Host - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - const ( - keyTopProductsLocal = `{"__typename":"Query","field":"topProducts"}` - dsProductsLocal = "products" - byteSizeTP = 127 // Query.topProducts root field response - ) - - // First query — L2 miss for root field, no events for entities (not configured) - tracker.Reset() - resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - // Products subgraph called (root field miss), reviews + accounts always called (no entity caching) - assert.Equal(t, 1, tracker.GetCount(productsHost), "First query should call products subgraph") - assert.Equal(t, 1, tracker.GetCount(reviewsHost), "First query should call reviews subgraph") - assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts subgraph") - - expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyTopProductsLocal, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProductsLocal}, // L2 miss: first request, cache empty - }, - L2Writes: []resolve.CacheWriteEvent{ - {CacheKey: keyTopProductsLocal, EntityType: "Query", ByteSize: byteSizeTP, DataSource: dsProductsLocal, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written after products fetch - }, - // Only entity types tracked during resolution (not caching-dependent) - FieldHashes: multiUpstreamFieldHashes, - EntityTypes: multiUpstreamEntityTypes, - }) - assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) - - // Second query — L2 hit for root field, entities still fetched (not cached) - tracker.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - // Products subgraph skipped (root field cache hit), reviews + accounts still called - assert.Equal(t, 0, tracker.GetCount(productsHost), "Second query should skip products (root field cache hit)") - assert.Equal(t, 1, tracker.GetCount(reviewsHost), "Second query should call reviews (no entity caching)") - assert.Equal(t, 1, tracker.GetCount(accountsHost), "Second query should call accounts (no entity caching)") - - expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyTopProductsLocal, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProductsLocal, ByteSize: byteSizeTP}, // L2 hit: root field cached by first request - }, - // No L2Writes: root field served from cache, entities have no caching configured - FieldHashes: multiUpstreamFieldHashes, // Entity field hashes still tracked (resolution, not caching) - EntityTypes: multiUpstreamEntityTypes, - }) - assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) - }) -} - -func TestShadowCacheE2E(t *testing.T) { - // Cache key constants (same as TestCacheAnalyticsE2E — same federation setup) - const ( - keyProductTop1 = `{"__typename":"Product","key":{"upc":"top-1"}}` - keyProductTop2 = `{"__typename":"Product","key":{"upc":"top-2"}}` - keyTopProducts = `{"__typename":"Query","field":"topProducts"}` - keyUser1234 = `{"__typename":"User","key":{"id":"1234"}}` - dsAccounts = "accounts" - dsProducts = "products" - dsReviews = "reviews" - ) - - // Field hash constants - const ( - hashProductNameTrilby uint64 = 1032923585965781586 - hashProductNameFedora uint64 = 2432227032303632641 - hashUserUsernameMe uint64 = 4957449860898447395 - ) - - // Entity key constants - const ( - entityKeyProductTop1 = `{"upc":"top-1"}` - entityKeyProductTop2 = `{"upc":"top-2"}` - entityKeyUser1234 = `{"id":"1234"}` - ) - - // Byte sizes - const ( - byteSizeProductTop1 = 177 - byteSizeProductTop2 = 233 - byteSizeTopProducts = 127 - byteSizeUser1234 = 49 - ) - - // Shadow comparison hash constants - const ( - shadowHashProductTop1 uint64 = 8656108128396512717 - shadowHashProductTop2 uint64 = 4671066427758823003 - shadowHashUser1234 uint64 = 188937276969638005 - shadowBytesProductTop1 = 124 - shadowBytesProductTop2 = 180 - shadowBytesUser1234 = 17 - ) - - // Shadow cached field hash constants (ProvidesData fields hashed from cached value during shadow comparison) - const ( - shadowFieldHashProductReviewsTop1 uint64 = 13894521258004960943 // xxhash of Product reviews field for top-1 - shadowFieldHashProductReviewsTop2 uint64 = 3182276346310063647 // xxhash of Product reviews field for top-2 - ) - - // Field hashes when all data comes from subgraph (first request, all misses) - fieldHashesSubgraph := []resolve.EntityFieldHash{ - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceSubgraph}, - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceSubgraph}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, - } - - // Field hashes when all data comes from L2 (second request, all hits — no shadow entities) - fieldHashesL2 := []resolve.EntityFieldHash{ - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, - } - - // Field hashes when all entities are in shadow mode (second request): - // L2 source hashes from resolution + ShadowCached hashes from compareShadowValues - fieldHashesL2AllShadow := []resolve.EntityFieldHash{ - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, - {EntityType: "Product", FieldName: "reviews", FieldHash: shadowFieldHashProductReviewsTop1, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceShadowCached}, // Cached Product reviews field for per-field staleness detection - {EntityType: "Product", FieldName: "reviews", FieldHash: shadowFieldHashProductReviewsTop2, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceShadowCached}, // Cached Product reviews field for per-field staleness detection - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username for per-field staleness detection - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username (second review) - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, - } - - // Field hashes when only User is in shadow mode (mixed mode, second request): - // Product/root L2 source hashes + User L2 + User ShadowCached hashes - fieldHashesL2MixedShadow := []resolve.EntityFieldHash{ - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username for per-field staleness detection - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username (second review) - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, - {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, - } - - entityTypes := []resolve.EntityTypeInfo{ - {TypeName: "Product", Count: 2, UniqueKeys: 2}, - {TypeName: "User", Count: 2, UniqueKeys: 1}, - } - - expectedResponseBody := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}` - - t.Run("shadow all entities - always fetches", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{"default": defaultCache} - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - // Shadow mode for all entity types, real caching for root fields - shadowConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, - }, - }, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(shadowConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) - productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) - reviewsHost := mustParseHost(setup.ReviewsUpstreamServer.URL) - - // Request 1: All L2 misses → all 3 subgraphs called - tracker.Reset() - resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - assert.Equal(t, 1, tracker.GetCount(productsHost), "request 1: should call products exactly once") - assert.Equal(t, 1, tracker.GetCount(reviewsHost), "request 1: should call reviews exactly once") - assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 1: should call accounts exactly once") - - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews, Shadow: true}, // Shadow L2 miss: cache empty, subgraph fetched - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews, Shadow: true}, // Shadow L2 miss: cache empty, subgraph fetched - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // Real L2 miss: root field not shadow, fetched normally - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts, Shadow: true}, // Shadow L2 miss: User not yet cached - }, - L2Writes: []resolve.CacheWriteEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written to L2 even in shadow (populates for comparison) - {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Written to L2 even in shadow - {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written normally (not shadow) - {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User entity written for future shadow comparison - }, - // No ShadowComparisons: nothing cached yet to compare against - FieldHashes: fieldHashesSubgraph, - EntityTypes: entityTypes, - }), normalizeSnapshot(parseCacheAnalytics(t, headers))) - - // Request 2: Entity L2 hits (shadow) → entity subgraphs STILL called - // Root field L2 hit → products NOT called (real caching) - tracker.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - assert.Equal(t, 0, tracker.GetCount(productsHost), "request 2: products should NOT be called (root field real cache hit)") - assert.Equal(t, 1, tracker.GetCount(reviewsHost), "request 2: reviews should be called (Product entity shadow)") - assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 2: accounts should be called (User entity shadow)") - - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1, Shadow: true}, // Shadow L2 hit: cached by Req 1, but subgraph still called - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2, Shadow: true}, // Shadow L2 hit: cached by Req 1, but subgraph still called - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field served from cache (not shadow) - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234, Shadow: true}, // Shadow L2 hit: accounts still called for comparison - }, - L2Writes: []resolve.CacheWriteEvent{ - // Only shadow entities re-written (refreshed from subgraph); root field NOT re-written (real cache hit) - {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write: fresh data from subgraph - {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write: fresh data from subgraph - {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write: fresh User from accounts - }, - ShadowComparisons: []resolve.ShadowComparisonEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", IsFresh: true, CachedHash: shadowHashProductTop1, FreshHash: shadowHashProductTop1, CachedBytes: shadowBytesProductTop1, FreshBytes: shadowBytesProductTop1, DataSource: dsReviews, ConfiguredTTL: 30 * time.Second}, // Fresh: cached matches subgraph (data unchanged) - {CacheKey: keyProductTop2, EntityType: "Product", IsFresh: true, CachedHash: shadowHashProductTop2, FreshHash: shadowHashProductTop2, CachedBytes: shadowBytesProductTop2, FreshBytes: shadowBytesProductTop2, DataSource: dsReviews, ConfiguredTTL: 30 * time.Second}, // Fresh: cached matches subgraph (data unchanged) - {CacheKey: keyUser1234, EntityType: "User", IsFresh: true, CachedHash: shadowHashUser1234, FreshHash: shadowHashUser1234, CachedBytes: shadowBytesUser1234, FreshBytes: shadowBytesUser1234, DataSource: dsAccounts, ConfiguredTTL: 30 * time.Second}, // Fresh: cached User matches subgraph (no mutation) - }, - FieldHashes: fieldHashesL2AllShadow, - EntityTypes: entityTypes, - }), normalizeSnapshot(parseCacheAnalytics(t, headers))) - }) - - t.Run("mixed mode - shadow User, real cache Product", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{"default": defaultCache} - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - // Shadow mode for User only, real caching for Product and root fields - mixedConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, // real caching - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, // shadow - }, - }, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(mixedConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) - productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) - reviewsHost := mustParseHost(setup.ReviewsUpstreamServer.URL) - - // Request 1: All L2 misses → all 3 subgraphs called - tracker.Reset() - resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - assert.Equal(t, 1, tracker.GetCount(productsHost), "request 1: should call products exactly once") - assert.Equal(t, 1, tracker.GetCount(reviewsHost), "request 1: should call reviews exactly once") - assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 1: should call accounts exactly once") - - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: Product entity not yet cached - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: Product entity not yet cached - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // Real L2 miss: root field not yet cached - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts, Shadow: true}, // Shadow L2 miss: User entity not yet cached - }, - L2Writes: []resolve.CacheWriteEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Product written for real caching - {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Product written for real caching - {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written for real caching - {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User written (shadow still populates L2) - }, - FieldHashes: fieldHashesSubgraph, - EntityTypes: entityTypes, - }), normalizeSnapshot(parseCacheAnalytics(t, headers))) - - // Request 2: Product real cache hit, User shadow → still fetched - tracker.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - assert.Equal(t, 0, tracker.GetCount(productsHost), "request 2: products should NOT be called (root field real cache hit)") - assert.Equal(t, 0, tracker.GetCount(reviewsHost), "request 2: reviews should NOT be called (Product entity real cache hit)") - assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 2: accounts SHOULD be called (User entity shadow)") - - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // Real L2 hit: Product served from cache (no subgraph call) - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // Real L2 hit: Product served from cache (no subgraph call) - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field served from cache - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234, Shadow: true}, // Shadow L2 hit: accounts still called for comparison - }, - L2Writes: []resolve.CacheWriteEvent{ - // Only User re-written (shadow always fetches fresh); Product/root NOT re-written (real hit) - {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write: fresh data from accounts - }, - ShadowComparisons: []resolve.ShadowComparisonEvent{ - // Only User has shadow comparisons; Product uses real caching - {CacheKey: keyUser1234, EntityType: "User", IsFresh: true, CachedHash: shadowHashUser1234, FreshHash: shadowHashUser1234, CachedBytes: shadowBytesUser1234, FreshBytes: shadowBytesUser1234, DataSource: dsAccounts, ConfiguredTTL: 30 * time.Second}, // Fresh: cached User matches subgraph - }, - FieldHashes: fieldHashesL2MixedShadow, - EntityTypes: entityTypes, - }), normalizeSnapshot(parseCacheAnalytics(t, headers))) - }) - - t.Run("shadow mode without analytics - safety only", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{"default": defaultCache} - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - shadowConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, - }, - }, - } - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), // analytics NOT enabled - withSubgraphEntityCachingConfigs(shadowConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) - - // Request 1: Populate cache - tracker.Reset() - resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - // No stats when analytics is disabled - assert.Empty(t, headers.Get("X-Cache-Analytics"), "analytics header should not be set when analytics disabled") - - // Request 2: Shadow mode — accounts still fetched (data not served from cache) - tracker.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 2: accounts should be called (shadow mode)") - // No stats when analytics is disabled - assert.Empty(t, headers.Get("X-Cache-Analytics"), "analytics header should not be set when analytics disabled") - }) - - t.Run("graduation - shadow to real", func(t *testing.T) { - // Same FakeLoaderCache shared across both engine setups - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{"default": defaultCache} - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - // Phase 1: Shadow mode for User - shadowConfigs := engine.SubgraphCachingConfigs{ - {SubgraphName: "products", RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, - }}, - {SubgraphName: "reviews", EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, - }}, - {SubgraphName: "accounts", EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, - }}, - } - - setup1 := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(shadowConfigs), - )) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsHost1 := mustParseHost(setup1.AccountsUpstreamServer.URL) - - // Phase 1, Request 1: Populate L2 cache - tracker.Reset() - resp, headers := gqlClient.QueryWithHeaders(ctx, setup1.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: first request, cache empty - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: first request, cache empty - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // Real L2 miss: root field not yet cached - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts, Shadow: true}, // Shadow L2 miss: User not yet cached - }, - L2Writes: []resolve.CacheWriteEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Product written for real caching - {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Product written for real caching - {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written for real caching - {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // User written (shadow still populates L2) - }, - FieldHashes: fieldHashesSubgraph, - EntityTypes: entityTypes, - }), normalizeSnapshot(parseCacheAnalytics(t, headers))) - - // Phase 1, Request 2: Shadow — accounts still called - tracker.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup1.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - assert.Equal(t, 1, tracker.GetCount(accountsHost1), "phase 1 request 2: accounts should be called (shadow mode)") - - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // Real L2 hit: Product served from cache - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // Real L2 hit: Product served from cache - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field from cache - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234, Shadow: true}, // Shadow L2 hit: cached but accounts still called - }, - L2Writes: []resolve.CacheWriteEvent{ - // Only shadow User re-written; Product/root use real caching (no re-write on hit) - {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Shadow re-write with fresh data from accounts - }, - ShadowComparisons: []resolve.ShadowComparisonEvent{ - {CacheKey: keyUser1234, EntityType: "User", IsFresh: true, CachedHash: shadowHashUser1234, FreshHash: shadowHashUser1234, CachedBytes: shadowBytesUser1234, FreshBytes: shadowBytesUser1234, DataSource: dsAccounts, ConfiguredTTL: 30 * time.Second}, // Fresh: cached User matches subgraph (safe to graduate) - }, - FieldHashes: fieldHashesL2MixedShadow, - EntityTypes: entityTypes, - }), normalizeSnapshot(parseCacheAnalytics(t, headers))) - - setup1.Close() - - // Phase 2: Graduated to real caching (same cache, new engine) - realConfigs := engine.SubgraphCachingConfigs{ - {SubgraphName: "products", RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, - }}, - {SubgraphName: "reviews", EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, - }}, - {SubgraphName: "accounts", EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, // No ShadowMode! - }}, - } - - tracker2 := newSubgraphCallTracker(http.DefaultTransport) - trackingClient2 := &http.Client{Transport: tracker2} - - setup2 := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), // SAME cache - withHTTPClient(trackingClient2), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(realConfigs), - )) - t.Cleanup(setup2.Close) - - accountsHost2 := mustParseHost(setup2.AccountsUpstreamServer.URL) - - // Phase 2, Request 3: Real L2 hit — accounts NOT called - tracker2.Reset() - resp, headers = gqlClient.QueryWithHeaders(ctx, setup2.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) - assert.Equal(t, expectedResponseBody, string(resp)) - assert.Equal(t, 0, tracker2.GetCount(accountsHost2), "phase 2: accounts should NOT be called (real L2 hit)") - - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // Real L2 hit: cached by Phase 1 - {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // Real L2 hit: cached by Phase 1 - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field cached by Phase 1 - {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234}, // Real L2 hit: graduated from shadow, no longer calls accounts - }, - // No L2Writes: all real cache hits, no fetches needed - // No ShadowComparisons: User is no longer in shadow mode - FieldHashes: fieldHashesL2, - EntityTypes: entityTypes, - }), normalizeSnapshot(parseCacheAnalytics(t, headers))) - }) -} - -func TestMutationImpactE2E(t *testing.T) { - accounts.ResetUsers() - t.Cleanup(accounts.ResetUsers) - - // Configure entity caching for User on accounts subgraph - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - - mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` - - // Uses a simple query that causes an entity fetch for User 1234 - // me { id username } triggers: accounts root fetch for Query.me, no entity fetch - // We need a query that triggers entity caching for User - topProducts with reviews + authorWithoutProvides - entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` - - t.Run("mutation with prior cache shows stale entity", func(t *testing.T) { - accounts.ResetUsers() - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{"default": defaultCache} - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Request 1: Query to populate L2 cache with User entity - tracker.Reset() - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) - assert.Contains(t, string(resp), `"username":"Me"`) - - // Request 2: Mutation — should detect stale cached entity - tracker.Reset() - respMut, headersMut := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) - assert.Contains(t, string(respMut), `"UpdatedMe"`) - - snap := normalizeSnapshot(parseCacheAnalytics(t, headersMut)) - require.NotNil(t, snap.MutationEvents, "should have mutation impact events") - require.Equal(t, 1, len(snap.MutationEvents), "should have exactly 1 mutation impact event") - - event := snap.MutationEvents[0] - assert.Equal(t, "updateUsername", event.MutationRootField) - assert.Equal(t, "User", event.EntityType) - assert.Equal(t, `{"__typename":"User","key":{"id":"1234"}}`, event.EntityCacheKey) - assert.Equal(t, true, event.HadCachedValue, "should have found cached value") - assert.Equal(t, true, event.IsStale, "cached value should be stale (username changed)") - - // Record discovered values for exact assertion - t.Logf("MutationImpact event: %+v", event) - - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - FieldHashes: []resolve.EntityFieldHash{ - // Hash of "UpdatedMe" (post-mutation username) - {EntityType: "User", FieldName: "username", FieldHash: 16932466035575627600, KeyRaw: `{"id":"1234"}`}, - }, - EntityTypes: []resolve.EntityTypeInfo{ - {TypeName: "User", Count: 1, UniqueKeys: 1}, // Mutation returned 1 User entity - }, - MutationEvents: []resolve.MutationEvent{ - { - MutationRootField: "updateUsername", - EntityType: "User", - EntityCacheKey: `{"__typename":"User","key":{"id":"1234"}}`, - HadCachedValue: true, // L2 had cached value from Request 1 query - IsStale: true, // Cached "Me" differs from fresh "UpdatedMe" - CachedHash: event.CachedHash, - FreshHash: event.FreshHash, - CachedBytes: event.CachedBytes, - FreshBytes: event.FreshBytes, - }, - }, - }), snap) - }) - - t.Run("mutation without prior cache shows no-cache event", func(t *testing.T) { - accounts.ResetUsers() - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{"default": defaultCache} - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // NO prior query — L2 cache is empty - // Send mutation directly - tracker.Reset() - respMut, headersMut := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) - assert.Contains(t, string(respMut), `"UpdatedMe"`) - - snap := normalizeSnapshot(parseCacheAnalytics(t, headersMut)) - require.NotNil(t, snap.MutationEvents, "should have mutation impact events") - require.Equal(t, 1, len(snap.MutationEvents), "should have exactly 1 mutation impact event") - - event := snap.MutationEvents[0] - assert.Equal(t, "updateUsername", event.MutationRootField) - assert.Equal(t, "User", event.EntityType) - assert.Equal(t, `{"__typename":"User","key":{"id":"1234"}}`, event.EntityCacheKey) - assert.Equal(t, false, event.HadCachedValue, "should NOT have found cached value") - assert.Equal(t, false, event.IsStale, "cannot be stale without cached value") - assert.Equal(t, uint64(0), event.CachedHash, "no cached value = no hash") - assert.Equal(t, 0, event.CachedBytes, "no cached value = no bytes") - - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - FieldHashes: []resolve.EntityFieldHash{ - // Hash of "UpdatedMe" (post-mutation username) - {EntityType: "User", FieldName: "username", FieldHash: 16932466035575627600, KeyRaw: `{"id":"1234"}`}, - }, - EntityTypes: []resolve.EntityTypeInfo{ - {TypeName: "User", Count: 1, UniqueKeys: 1}, // Mutation returned 1 User entity - }, - MutationEvents: []resolve.MutationEvent{ - { - MutationRootField: "updateUsername", - EntityType: "User", - EntityCacheKey: `{"__typename":"User","key":{"id":"1234"}}`, - HadCachedValue: false, // No prior query, L2 cache was empty - IsStale: false, // Cannot be stale without a cached value to compare - FreshHash: event.FreshHash, - FreshBytes: event.FreshBytes, - }, - }, - }), snap) - }) -} - -func TestMutationCacheInvalidationE2E(t *testing.T) { - accounts.ResetUsers() - t.Cleanup(accounts.ResetUsers) - - // Configure entity caching for User AND mutation invalidation for updateUsername - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, - }, - MutationCacheInvalidation: plan.MutationCacheInvalidationConfigurations{ - {FieldName: "updateUsername"}, - }, - }, - } - - // Query that triggers entity caching for User via authorWithoutProvides (no @provides) - entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` - mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` - - t.Run("mutation deletes L2 cache entry", func(t *testing.T) { - accounts.ResetUsers() - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{"default": defaultCache} - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) - - // Request 1: Query to populate L2 cache with User entity - tracker.Reset() - defaultCache.ClearLog() - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) - assert.Contains(t, string(resp), `"username":"Me"`) - assert.Equal(t, 1, tracker.GetCount(accountsHost), "should call accounts subgraph once to populate cache") - - // Request 2: Same query — should hit L2 cache, no accounts call - tracker.Reset() - defaultCache.ClearLog() - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) - assert.Contains(t, string(resp), `"username":"Me"`) - assert.Equal(t, 0, tracker.GetCount(accountsHost), "should NOT call accounts subgraph (L2 hit)") - - // Request 3: Mutation — should delete the L2 cache entry - tracker.Reset() - defaultCache.ClearLog() - respMut := gqlClient.QueryString(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) - assert.Contains(t, string(respMut), `"UpdatedMe"`) - - // Verify the cache log contains a delete operation - mutationLog := defaultCache.GetLog() - hasDelete := false - for _, entry := range mutationLog { - if entry.Operation == "delete" { - hasDelete = true - assert.Equal(t, 1, len(entry.Keys), "delete should have exactly 1 key") - assert.Contains(t, entry.Keys[0], `"__typename":"User"`) - assert.Contains(t, entry.Keys[0], `"id":"1234"`) - } - } - assert.True(t, hasDelete, "mutation should trigger a cache delete operation") - - // Request 4: Same query again — should miss L2 (entry deleted), re-fetch from subgraph - tracker.Reset() - defaultCache.ClearLog() - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) - assert.Contains(t, string(resp), `"username":"UpdatedMe"`) - assert.Equal(t, 1, tracker.GetCount(accountsHost), "should call accounts subgraph again (L2 entry was deleted)") - }) - - t.Run("mutation without invalidation config does not delete", func(t *testing.T) { - accounts.ResetUsers() - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{"default": defaultCache} - - // Config WITHOUT MutationCacheInvalidation - noInvalidationConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, - }, - // No MutationCacheInvalidation — mutation should NOT delete cache - }, - } - - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), - withSubgraphEntityCachingConfigs(noInvalidationConfigs), - )) - t.Cleanup(setup.Close) - - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) - - // Request 1: Query to populate L2 cache - tracker.Reset() - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) - assert.Contains(t, string(resp), `"username":"Me"`) - - // Request 2: Mutation — should NOT delete L2 cache entry - tracker.Reset() - defaultCache.ClearLog() - respMut := gqlClient.QueryString(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) - assert.Contains(t, string(respMut), `"UpdatedMe"`) - - // Verify no delete operation in cache log - mutationLog := defaultCache.GetLog() - for _, entry := range mutationLog { - assert.NotEqual(t, "delete", entry.Operation, "should not have any delete operations without invalidation config") - } - - // Request 3: Same query — should still hit L2 cache (stale but not deleted) - tracker.Reset() - _ = gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) - assert.Equal(t, 0, tracker.GetCount(accountsHost), "should NOT call accounts subgraph (L2 entry still present)") - }) -} - -func mustParseHost(rawURL string) string { - parsed, err := url.Parse(rawURL) - if err != nil { - panic(fmt.Sprintf("failed to parse URL %q: %v", rawURL, err)) - } - return parsed.Host -} - -func TestFederationCachingAliases(t *testing.T) { - // Helper to create a standard setup for alias caching tests - setupAliasCachingTest := func(t *testing.T) ( - *federationtesting.FederationSetup, - *GraphqlClient, - context.Context, - context.CancelFunc, - *subgraphCallTracker, - *FakeLoaderCache, - string, // accountsHost - ) { - t.Helper() - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{ - Transport: tracker, - } - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - { - SubgraphName: "reviews", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - return setup, gqlClient, ctx, cancel, tracker, defaultCache, accountsHost - } - - t.Run("L2 hit - alias then no alias", func(t *testing.T) { - setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) - - // Request 1: Use alias userName for username - defaultCache.ClearLog() - tracker.Reset() - query1 := `query { topProducts { name reviews { body authorWithoutProvides { userName: username } } } }` - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"userName":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"userName":"Me"}}]}]}}`, - string(resp)) - - accountsCalls1 := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") - - // Request 2: No alias (original field name) - defaultCache.ClearLog() - tracker.Reset() - query2 := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, - string(resp)) - - accountsCalls2 := tracker.GetCount(accountsHost) - assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit from normalized cache)") - }) - - t.Run("L2 hit - two different aliases for same field", func(t *testing.T) { - setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) - - // Request 1: alias u1 for username - defaultCache.ClearLog() - tracker.Reset() - query1 := `query { topProducts { name reviews { body authorWithoutProvides { u1: username } } } }` - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"u1":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"u1":"Me"}}]}]}}`, - string(resp)) - - accountsCalls1 := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") - - // Request 2: alias u2 for username - defaultCache.ClearLog() - tracker.Reset() - query2 := `query { topProducts { name reviews { body authorWithoutProvides { u2: username } } } }` - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"u2":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"u2":"Me"}}]}]}}`, - string(resp)) - - accountsCalls2 := tracker.GetCount(accountsHost) - assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying field)") - }) - - t.Run("no collision - alias matches another field name", func(t *testing.T) { - setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) - - // Request 1: alias realName for username (realName is another real field on User) - // This triggers an accounts entity fetch for username, stores normalized {"username":"Me"} in L2 - defaultCache.ClearLog() - tracker.Reset() - query1 := `query { topProducts { name reviews { body authorWithoutProvides { realName: username } } } }` - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"realName":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"realName":"Me"}}]}]}}`, - string(resp)) - - accountsCalls1 := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once for username") - - // Request 2: actual username field (no alias) - same underlying field - // Should be an L2 hit because both resolve username from accounts - defaultCache.ClearLog() - tracker.Reset() - query2 := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, - string(resp)) - - accountsCalls2 := tracker.GetCount(accountsHost) - assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying field username)") - }) - - t.Run("no collision - field name used as alias for another field", func(t *testing.T) { - setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) - - // Request 1: username field (no alias) - triggers accounts entity fetch for username - defaultCache.ClearLog() - tracker.Reset() - query1 := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, - string(resp)) - - accountsCalls1 := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") - - // Request 2: different alias (u1) for same field (username) - // Should be an L2 hit because the underlying field is the same - defaultCache.ClearLog() - tracker.Reset() - query2 := `query { topProducts { name reviews { body authorWithoutProvides { u1: username } } } }` - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"u1":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"u1":"Me"}}]}]}}`, - string(resp)) - - accountsCalls2 := tracker.GetCount(accountsHost) - assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying field)") - }) - - t.Run("L2 hit - multiple fields some aliased some not", func(t *testing.T) { - setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) - - // Request 1: alias username and include realName (realName comes from reviews, not accounts) - defaultCache.ClearLog() - tracker.Reset() - query1 := `query { topProducts { name reviews { body authorWithoutProvides { userName: username realName } } } }` - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"userName":"Me","realName":"User Usington"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"userName":"Me","realName":"User Usington"}}]}]}}`, - string(resp)) - - accountsCalls1 := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") - - // Request 2: no alias on username, different alias on realName - // accounts entity cache should be L2 hit (same username field) - defaultCache.ClearLog() - tracker.Reset() - query2 := `query { topProducts { name reviews { body authorWithoutProvides { username name: realName } } } }` - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","name":"User Usington"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","name":"User Usington"}}]}]}}`, - string(resp)) - - accountsCalls2 := tracker.GetCount(accountsHost) - assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying username field)") - }) - - t.Run("L1 hit within single request with aliases", func(t *testing.T) { - // Tests L1 cache with aliased fields across entity fetches within the same request. - // Flow: - // 1. topProducts -> products - // 2. reviews -> reviews (entity fetch for Products) - // 3. authorWithoutProvides -> accounts (entity fetch for User 1234, aliased userName: username) - // -> User 1234 stored in L1 with normalized field names - // 4. sameUserReviewers -> reviews (returns [User 1234] reference) - // 5. Entity resolution for sameUserReviewers -> accounts - // -> User 1234 is L1 HIT (already fetched in step 3), entire accounts call skipped - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: false}), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - // Query with alias on username - sameUserReviewers returns same user, - // should be L1 hit from the first entity fetch - tracker.Reset() - query := `query { - topProducts { - reviews { - authorWithoutProvides { - id - userName: username - sameUserReviewers { - id - userName: username - } - } - } - } - }` - resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]}]}}`, - string(resp)) - - // With L1 enabled: first accounts call fetches User 1234 for authorWithoutProvides - // sameUserReviewers entity resolution hits L1 -> accounts call skipped - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls, "Should call accounts subgraph once (sameUserReviewers skipped via L1)") - }) - - t.Run("L1 hit within single request with mixed alias and no alias", func(t *testing.T) { - // Same as above, but the nested sameUserReviewers uses the original field name (no alias) - // while the outer authorWithoutProvides uses an alias. L1 cache stores normalized data, - // so the nested fetch should still hit L1 despite the different field naming. - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: false}), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) - accountsHost := accountsURLParsed.Host - - // Outer authorWithoutProvides uses alias "userName: username" - // Nested sameUserReviewers uses plain "username" (no alias) - // L1 should still hit because cache stores normalized (original) field names - tracker.Reset() - query := `query { - topProducts { - reviews { - authorWithoutProvides { - id - userName: username - sameUserReviewers { - id - username - } - } - } - } - }` - resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}`, - string(resp)) - - // With L1 enabled: first accounts call fetches User 1234 for authorWithoutProvides - // sameUserReviewers entity resolution hits L1 -> accounts call skipped - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls, "Should call accounts subgraph once (sameUserReviewers skipped via L1)") - }) - - t.Run("L2 hit - aliased root field then original root field", func(t *testing.T) { - setup, gqlClient, ctx, _, tracker, defaultCache, _ := setupAliasCachingTest(t) - productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) - - // Request 1: alias the root field topProducts as tp - defaultCache.ClearLog() - tracker.Reset() - query1 := `query { tp: topProducts { name } }` - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) - assert.Equal(t, - `{"data":{"tp":[{"name":"Trilby"},{"name":"Fedora"}]}}`, - string(resp)) - - productsCalls1 := tracker.GetCount(productsHost) - assert.Equal(t, 1, productsCalls1, "Request 1 should call products subgraph once") - - // Request 2: same root field without alias — should L2 hit (same cache key) - defaultCache.ClearLog() - tracker.Reset() - query2 := `query { topProducts { name } }` - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"name":"Trilby"},{"name":"Fedora"}]}}`, - string(resp)) - - productsCalls2 := tracker.GetCount(productsHost) - assert.Equal(t, 0, productsCalls2, "Request 2 should skip products (L2 hit from aliased root field)") - }) - - t.Run("L2 hit - two different root field aliases", func(t *testing.T) { - setup, gqlClient, ctx, _, tracker, defaultCache, _ := setupAliasCachingTest(t) - productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) - - // Request 1: alias p1 for topProducts - defaultCache.ClearLog() - tracker.Reset() - query1 := `query { p1: topProducts { name } }` - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) - assert.Equal(t, - `{"data":{"p1":[{"name":"Trilby"},{"name":"Fedora"}]}}`, - string(resp)) - - productsCalls1 := tracker.GetCount(productsHost) - assert.Equal(t, 1, productsCalls1, "Request 1 should call products subgraph once") - - // Request 2: different alias p2 for same root field - defaultCache.ClearLog() - tracker.Reset() - query2 := `query { p2: topProducts { name } }` - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) - assert.Equal(t, - `{"data":{"p2":[{"name":"Trilby"},{"name":"Fedora"}]}}`, - string(resp)) - - productsCalls2 := tracker.GetCount(productsHost) - assert.Equal(t, 0, productsCalls2, "Request 2 should skip products (L2 hit - same underlying root field)") - }) - - t.Run("L1+L2 combined - alias entity caching across both layers", func(t *testing.T) { - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "accounts", - EntityCaching: plan.EntityCacheConfigurations{ - {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: true}), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) - - // Request 1: alias on username, sameUserReviewers triggers L1 hit within request - // L2 is also populated on the first entity fetch - defaultCache.ClearLog() - tracker.Reset() - query1 := `query { - topProducts { - reviews { - authorWithoutProvides { - id - userName: username - sameUserReviewers { - id - userName: username - } - } - } - } - }` - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]}]}}`, - string(resp)) - - accountsCalls1 := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls1, "Request 1: accounts called once (sameUserReviewers skipped via L1)") - - // Request 2: same query without alias — L2 hit for User entity, no accounts calls - defaultCache.ClearLog() - tracker.Reset() - query2 := `query { - topProducts { - reviews { - authorWithoutProvides { - id - username - sameUserReviewers { - id - username - } - } - } - } - }` - resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}`, - string(resp)) - - accountsCalls2 := tracker.GetCount(accountsHost) - assert.Equal(t, 0, accountsCalls2, "Request 2: accounts skipped (L2 hit from normalized cache)") - }) - - t.Run("L2 analytics - aliased root field", func(t *testing.T) { - const ( - keyTopProducts = `{"__typename":"Query","field":"topProducts"}` - dsProducts = "products" - byteSizeTopProducts = 53 - hashProductNameTrilby = uint64(1032923585965781586) - hashProductNameFedora = uint64(2432227032303632641) - ) - - defaultCache := NewFakeLoaderCache() - caches := map[string]resolve.LoaderCache{ - "default": defaultCache, - } - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - subgraphCachingConfigs := engine.SubgraphCachingConfigs{ - { - SubgraphName: "products", - RootFieldCaching: plan.RootFieldCacheConfigurations{ - {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, - }, - }, - } - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withCachingLoaderCache(caches), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), - withSubgraphEntityCachingConfigs(subgraphCachingConfigs), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Shared field hashes: Product.name for Trilby and Fedora from root field response - // Products are not entity-resolved (no @key fetch), so KeyRaw is empty - fieldHashes := []resolve.EntityFieldHash{ - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: "{}"}, // xxhash("Trilby"), no entity key (root field) - {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: "{}"}, // xxhash("Fedora"), no entity key (root field) - } - entityTypes := []resolve.EntityTypeInfo{ - {TypeName: "Product", Count: 2, UniqueKeys: 1}, // 2 products from root field, no entity keys - } - - // Request 1: aliased root field — L2 miss, populates cache - tracker.Reset() - query1 := `query { tp: topProducts { name } }` - resp, headers := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query1, nil, t) - assert.Equal(t, `{"data":{"tp":[{"name":"Trilby"},{"name":"Fedora"}]}}`, string(resp)) - - // Cache key must use original field name "topProducts", NOT the alias "tp" - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // L2 miss: first request, cache empty - }, - L2Writes: []resolve.CacheWriteEvent{ - {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, // Root field written after products fetch - }, - FieldHashes: fieldHashes, - EntityTypes: entityTypes, - }), normalizeSnapshot(parseCacheAnalytics(t, headers))) - - // Request 2: original root field (no alias) — L2 hit from Request 1 - tracker.Reset() - query2 := `query { topProducts { name } }` - resp, headers = gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query2, nil, t) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby"},{"name":"Fedora"}]}}`, string(resp)) - - // Same cache key hit regardless of alias difference - assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ - L2Reads: []resolve.CacheKeyEvent{ - {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // L2 hit: populated by aliased Request 1 - }, - // No L2Writes: served from cache - FieldHashes: fieldHashes, - EntityTypes: entityTypes, - }), normalizeSnapshot(parseCacheAnalytics(t, headers))) - }) - - t.Run("L1 dedup - two aliases for same entity field in single request", func(t *testing.T) { - tracker := newSubgraphCallTracker(http.DefaultTransport) - trackingClient := &http.Client{Transport: tracker} - - setup := federationtesting.NewFederationSetup(addCachingGateway( - withCachingEnableART(false), - withHTTPClient(trackingClient), - withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: false}), - )) - t.Cleanup(setup.Close) - gqlClient := NewGraphqlClient(http.DefaultClient) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) - - // Two aliases (a1, a2) for the same entity field (authorWithoutProvides) - // Both resolve the same User 1234 — second should be L1 hit - tracker.Reset() - query := `query { - topProducts { - reviews { - a1: authorWithoutProvides { - id - username - } - a2: authorWithoutProvides { - id - username - } - } - } - }` - resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query, nil, t) - assert.Equal(t, - `{"data":{"topProducts":[{"reviews":[{"a1":{"id":"1234","username":"Me"},"a2":{"id":"1234","username":"Me"}}]},{"reviews":[{"a1":{"id":"1234","username":"Me"},"a2":{"id":"1234","username":"Me"}}]}]}}`, - string(resp)) - - accountsCalls := tracker.GetCount(accountsHost) - assert.Equal(t, 1, accountsCalls, "Should call accounts once (second alias L1 hit for same User entity)") - }) -} diff --git a/v2/pkg/engine/resolve/cache_analytics.go b/v2/pkg/engine/resolve/cache_analytics.go index 52e0f3c6f8..ccf0e8171d 100644 --- a/v2/pkg/engine/resolve/cache_analytics.go +++ b/v2/pkg/engine/resolve/cache_analytics.go @@ -60,12 +60,15 @@ type CacheWriteEvent struct { // FetchTimingEvent records the duration of a subgraph fetch or cache lookup. type FetchTimingEvent struct { - DataSource string // subgraph name - EntityType string // entity type (empty for root fetches) - DurationMs int64 // time spent on this operation in milliseconds - Source FieldSource // what handled this: Subgraph (fetch), L2 (cache GET) - ItemCount int // number of entities in this fetch/lookup - IsEntityFetch bool // true for _entities, false for root field + DataSource string // subgraph name + EntityType string // entity type (empty for root fetches) + DurationMs int64 // time spent on this operation in milliseconds + Source FieldSource // what handled this: Subgraph (fetch), L2 (cache GET) + ItemCount int // number of entities in this fetch/lookup + IsEntityFetch bool // true for _entities, false for root field + HTTPStatusCode int // HTTP status code from subgraph response (0 for cache hits) + ResponseBytes int // response body size in bytes (0 for cache hits) + TTFBMs int64 // time to first byte in milliseconds (0 when unavailable) } // SubgraphErrorEvent records a subgraph error for analytics. diff --git a/v2/pkg/engine/resolve/loader.go b/v2/pkg/engine/resolve/loader.go index 895fc3c201..563188cd77 100644 --- a/v2/pkg/engine/resolve/loader.go +++ b/v2/pkg/engine/resolve/loader.go @@ -444,6 +444,7 @@ func (l *Loader) resolveSingle(item *FetchItem) error { return err } } + l.mergeResultAnalytics(res) err = l.mergeResult(item, res, items) l.callOnFinished(res) return err @@ -460,6 +461,7 @@ func (l *Loader) resolveSingle(item *FetchItem) error { return errors.WithStack(err) } } + l.mergeResultAnalytics(res) err = l.mergeResult(item, res, items) l.callOnFinished(res) return err @@ -475,6 +477,7 @@ func (l *Loader) resolveSingle(item *FetchItem) error { return errors.WithStack(err) } } + l.mergeResultAnalytics(res) err = l.mergeResult(item, res, items) l.callOnFinished(res) return err @@ -483,6 +486,21 @@ func (l *Loader) resolveSingle(item *FetchItem) error { } } +// mergeResultAnalytics merges analytics events accumulated on a result into the collector. +// In resolveParallel, this happens in bulk after all goroutines complete. +// In resolveSingle, we must call this per-result since there's no bulk merge phase. +func (l *Loader) mergeResultAnalytics(res *result) { + if !l.ctx.cacheAnalyticsEnabled() { + return + } + if len(res.l2FetchTimings) > 0 { + l.ctx.cacheAnalytics.MergeL2FetchTimings(res.l2FetchTimings) + } + if len(res.l2ErrorEvents) > 0 { + l.ctx.cacheAnalytics.MergeL2Errors(res.l2ErrorEvents) + } +} + func (l *Loader) callOnFinished(res *result) { if l.ctx.LoaderHooks != nil && res.loaderHookContext != nil { l.ctx.LoaderHooks.OnFinished(res.loaderHookContext, res.ds, newResponseInfo(res, l.ctx.subgraphErrors)) @@ -2263,12 +2281,14 @@ func (l *Loader) executeSourceLoad(ctx context.Context, fetchItem *FetchItem, so isEntityFetch = info.OperationType == ast.OperationTypeQuery && (entityType != "Query" && entityType != "Mutation" && entityType != "Subscription") } res.l2FetchTimings = append(res.l2FetchTimings, FetchTimingEvent{ - DataSource: res.ds.Name, - EntityType: entityType, - DurationMs: time.Since(fetchStart).Milliseconds(), - Source: FieldSourceSubgraph, - ItemCount: 1, - IsEntityFetch: isEntityFetch, + DataSource: res.ds.Name, + EntityType: entityType, + DurationMs: time.Since(fetchStart).Milliseconds(), + Source: FieldSourceSubgraph, + ItemCount: 1, + IsEntityFetch: isEntityFetch, + HTTPStatusCode: res.statusCode, + ResponseBytes: len(res.out), }) }